hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
113e530a45b13fe1f2e094349e6bebfb8b5e53dd | 15,137 | py | Python | autoprocess/utils/misc.py | michel4j/auto-process | 9c011cef3cdc2fc55df31f9cac88c3e1074aa366 | [
"BSD-3-Clause"
] | null | null | null | autoprocess/utils/misc.py | michel4j/auto-process | 9c011cef3cdc2fc55df31f9cac88c3e1074aa366 | [
"BSD-3-Clause"
] | null | null | null | autoprocess/utils/misc.py | michel4j/auto-process | 9c011cef3cdc2fc55df31f9cac88c3e1074aa366 | [
"BSD-3-Clause"
] | null | null | null | import functools
import gzip
import json
import math
import os
import pwd
import shutil
import msgpack
import numpy
import msgpack_numpy
from prettytable import PrettyTable
# Physical Constants
_h = 4.13566733e-15 # eV.s
_c = 299792458e10 # A/s
# direction vector of kappa axis on 08B1-1 when omega is at 0.0 deg
KAPPA_AXIS = numpy.array([0.91354546, 0.3468, 0.21251931])
def code_matches_all(code, *args):
return all([code | v == code for v in args])
def code_matches_any(code, *args):
return any([code | v == code for v in args])
def code_matches_only(code, *args):
return code == functools.reduce(lambda x, y: x | y, args)
def code_matches_none(code, *args):
return not (any([code | v == code for v in args]))
def get_cpu_count():
return os.sysconf('SC_NPROCESSORS_ONLN')
def energy_to_wavelength(energy):
"""Convert energy in keV to wavelength in angstroms."""
if energy == 0.0:
return 0.0
return (_h * _c) / (energy * 1000.0)
def wavelength_to_energy(wavelength):
"""Convert wavelength in angstroms to energy in keV."""
if wavelength == 0.0:
return 0.0
return (_h * _c) / (wavelength * 1000.0)
def air(e):
p = [1.00000857e+00, -3.10243288e-04, 3.01020914e+00]
return 1.0 - (p[0] * math.exp(p[1] * (e ** p[2])))
def get_project_name():
return pwd.getpwuid(os.geteuid())[0]
def get_home_dir():
return pwd.getpwuid(os.geteuid())[5]
def backup_files(*args):
for filename in args:
if os.path.exists(filename):
index = 0
while os.path.exists('%s.%0d' % (filename, index)):
index += 1
shutil.copy(filename, '%s.%0d' % (filename, index))
return
def backup_special_file(filename, suffix):
if os.path.exists(filename):
shutil.copy(filename, '%s.%s' % (filename, suffix))
return
def file_requirements(*args):
all_exist = True
for f in args:
if not os.path.exists(f):
all_exist = False
break
return all_exist
def combine_names(names):
"""
Return a combined name to represent a set of names
"""
return '-'.join([_f for _f in [os.path.commonprefix(names), 'combined'] if _f])
def prepare_dir(workdir, backup=False):
"""
Creates a work dir for autoprocess to run. Increments run number if
directory already exists.
"""
exists = os.path.isdir(workdir)
if not exists:
os.makedirs(workdir)
elif backup:
count = 0
while exists:
count += 1
bkdir = "%s-bk%02d" % (workdir, count)
exists = os.path.isdir(bkdir)
shutil.move(workdir, bkdir)
os.makedirs(workdir)
def calc_angle(v1, v2):
v1 = numpy.array(v1, dtype=numpy.float64) / numpy.linalg.norm(v1)
v2 = numpy.array(v2, dtype=numpy.float64) / numpy.linalg.norm(v2)
cs = numpy.dot(v1, v2)
sn = numpy.linalg.norm(numpy.cross(v1, v2))
a = numpy.arctan2(sn, cs)
if a > numpy.pi / 2.0:
a = a - numpy.pi
return a
def make_rot_matrix(direction, angle):
"""
Create a rotation matrix corresponding to the rotation around a general
axis by a specified angle.
R = dd^T + cos(a) (I - dd^T) + sin(a) skew(d)
Parameters:
angle : float a
direction : array d
"""
angle = angle * numpy.pi / 180.0
d = numpy.array(direction, dtype=numpy.float64)
d /= numpy.linalg.norm(d)
eye = numpy.eye(3, dtype=numpy.float64)
ddt = numpy.outer(d, d)
skew = numpy.array([[0, d[2], -d[1]],
[-d[2], 0, d[0]],
[d[1], -d[0], 0]], dtype=numpy.float64)
mtx = ddt + numpy.cos(angle) * (eye - ddt) + numpy.sin(angle) * skew
return mtx
def rotate_vector(vec, mtxa):
mtx = numpy.matrix(mtxa, dtype=numpy.float64)
vec = numpy.matrix(vec, dtype=numpy.float64)
nvec = mtx * vec.getT()
if vec.shape == (1, 3):
return nvec.getT().getA1()
else:
return nvec.getT().getA()
def optimize_xtal_offset(info, kappa_axis=KAPPA_AXIS):
"""Optimize the kappa and Phi rotations required to align the
longest cell axis closest to the spindle axis
input:
- info is a dictionary produced by parser.xds.parse_xparm
- kappa_axis is the direction vector of the kappa axis at zero spindle rotation
"""
STEP = 5 # How coarse should the brute force search be in degrees?
axis_names = ['cell_a_axis', 'cell_b_axis', 'cell_c_axis']
longest_axis = max(list(zip(info['unit_cell'], axis_names)))[1]
kmat = make_rot_matrix(kappa_axis, STEP)
orig_offset = abs(calc_angle(info[longest_axis], info['rotation_axis'])) * 180.0 / numpy.pi
offsets = []
cell_axis = info[longest_axis]
rot_axis = info['rotation_axis']
for kappa in range(0, 180, STEP):
for phi in range(0, 360, STEP):
pmat = make_rot_matrix(rot_axis, phi)
nc_axis = rotate_vector(cell_axis, pmat) # first apply phi rotation to cell axis
kmat = make_rot_matrix(kappa_axis, kappa)
nc_axis = rotate_vector(nc_axis, kmat) # then add kappa rotation to cell axis
offset = abs(calc_angle(nc_axis, rot_axis)) * 180.0 / numpy.pi
p_ax = rotate_vector(rot_axis, kmat)
chi_offset = abs(calc_angle(p_ax, rot_axis)) * 180.0 / numpy.pi
offsets.append((offset, kappa, phi, chi_offset))
# offset dimensions
ks = len(list(range(0, 180, STEP)))
ps = len(list(range(0, 360, STEP)))
opt_offset, opt_kappa, opt_phi, chi_offset = min(offsets)
_out = {
'kappa': opt_kappa,
'phi': opt_phi,
'chi': chi_offset,
'longest_axis': axis_names.index(longest_axis),
'offset': orig_offset,
'best_offset': opt_offset,
'data': numpy.array(offsets),
'shape': (ks, ps),
}
return _out
class Table(object):
def __init__(self, t):
self._table = t
self.size = len(self._table)
self.hidden_columns = []
def __repr__(self):
return "<Table (%d rows)\n%s\n>" % (self.size, str(self))
def __str__(self):
return self.get_text()
def get_text(self, full=False):
x = PrettyTable(list(self.keys()))
if self.size < 7 or full:
for i in range(self.size):
x.add_row(self.row(i))
else:
for i in range(3):
x.add_row(self.row(i))
x.add_row(['...'] * len(list(self.keys())))
for i in range(self.size - 3, self.size):
x.add_row(self.row(i))
return x.get_string()
def keys(self):
return [k for k in list(self._table[0].keys()) if k not in self.hidden_columns]
def hide(self, *args):
self.hidden_columns.extend(args)
def show_all(self):
self.hidden_columns = []
def row(self, i):
if i < len(self._table):
return [v for k, v in list(self._table[i].items()) if k not in self.hidden_columns]
def rows(self, slice=":"):
pre, post = slice.split(':')
if pre.strip() == '':
pre = 0
else:
pre = int(pre)
if post.strip() == '':
post = self.size
else:
post = int(post)
if post < 0: post = self.size + post
return [self.row(i) for i in range(self.size) if i >= pre and i < post]
def column(self, key):
return [r[key] for r in self._table]
def sort(self, key, reverse=False):
self._table.sort(key=lambda x: x[key])
if reverse:
self._table = self._table[::-1]
def __getitem__(self, s):
vals = [r[s] for r in self._table]
return vals
class rTable(Table):
def __init__(self, t):
self._table = []
keys = list(t.keys())
for i in range(len(t[keys[0]])):
d = {}
for k in keys:
d[k] = t[k][i]
self._table.append(d)
self.size = len(self._table)
self.hidden_columns = []
class sTable(Table):
def __init__(self, t):
self.table = PrettyTable()
self.table.field_names = t[0]
for row in t[1:]:
self.table.add_row(row)
self.table.align[t[0][0]] = "l"
def __str__(self):
return self.table.get_string()
# Ordered Dict from Django
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
super(SortedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = list(data.keys())
else:
self.keyOrder = []
for key, value in data:
if key not in self.keyOrder:
self.keyOrder.append(key)
def __deepcopy__(self, memo):
from copy import deepcopy
return self.__class__([(key, deepcopy(value, memo))
for key, value in self.items()])
def __setitem__(self, key, value):
super(SortedDict, self).__setitem__(key, value)
if key not in self.keyOrder:
self.keyOrder.append(key)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
for k in self.keyOrder:
yield k
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return list(zip(self.keyOrder, list(self.values())))
def iteritems(self):
for key in self.keyOrder:
yield key, super(SortedDict, self).__getitem__(key)
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return list(map(super(SortedDict, self).__getitem__, self.keyOrder))
def itervalues(self):
for key in self.keyOrder:
yield super(SortedDict, self).__getitem__(key)
def update(self, dict_):
for k, v in list(dict_.items()):
self.__setitem__(k, v)
def setdefault(self, key, default):
if key not in self.keyOrder:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Returns the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Inserts the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
obj = self.__class__(self)
obj.keyOrder = self.keyOrder[:]
return obj
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in list(self.items())])
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class DotDict(dict):
def __getattr__(self, attr):
return self.get(attr, None)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def save_pid(file_path):
with open(file_path, 'w') as handle:
handle.write('{}\n'.format(os.getpid()))
def load_json(filename):
with open(filename, 'r') as handle:
info = json.load(handle)
return info
def load_chkpt(filename='process.chkpt'):
with gzip.open(filename, 'rb') as handle:
info = msgpack.load(handle, object_hook=msgpack_numpy.decode)
return info
def savgol_filter(data, window_length, polyorder, deriv=0):
"""
applies a Savitzky-Golay filter
input parameters:
- data => data as a 1D numpy array
- kernel => a positiv integer > 2*order giving the kernel size
- order => order of the polynomal
- deriv => derivative to return default (smooth only)
returns smoothed data as a numpy array
invoke like:
smoothed = savitzky_golay(<rough>, [kernel = value], [order = value]
"""
try:
window_length = abs(int(window_length))
polyorder = abs(int(polyorder))
except ValueError:
raise ValueError("kernel and order have to be of type int (floats will be converted).")
if window_length % 2 != 1: window_length += 1
if window_length < 1: window_length = 1
if window_length < polyorder + 2:
raise TypeError("kernel is to small for the polynomals\nshould be > order + 2")
# a second order polynomal has 3 coefficients
order_range = list(range(polyorder + 1))
half_window = (window_length - 1) // 2
b = numpy.mat([[k ** i for i in order_range] for k in range(-half_window, half_window + 1)])
# since we don't want the derivative, else choose [1] or [2], respectively
assert deriv <= 2
m = numpy.linalg.pinv(b).A[deriv]
window_size = len(m)
half_window = (window_size - 1) // 2
# precompute the offset values for better performance
offsets = list(range(-half_window, half_window + 1))
offset_data = list(zip(offsets, m))
smooth_data = list()
# temporary data, extended with a mirror image to the left and right
firstval = data[0]
lastval = data[len(data) - 1]
# left extension: f(x0-x) = f(x0)-(f(x)-f(x0)) = 2f(x0)-f(x)
# right extension: f(xl+x) = f(xl)+(f(xl)-f(xl-x)) = 2f(xl)-f(xl-x)
leftpad = numpy.zeros(half_window) + 2 * firstval
rightpad = numpy.zeros(half_window) + 2 * lastval
leftchunk = data[1:1 + half_window]
leftpad = leftpad - leftchunk[::-1]
rightchunk = data[len(data) - half_window - 1:len(data) - 1]
rightpad = rightpad - rightchunk[::-1]
data = numpy.concatenate((leftpad, data))
data = numpy.concatenate((data, rightpad))
for i in range(half_window, len(data) - half_window):
value = 0.0
for offset, weight in offset_data:
value += weight * data[i + offset]
smooth_data.append(value)
return numpy.array(smooth_data)
def uniquify(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
| 29.109615 | 96 | 0.595561 |
3cdd63016e996d35b1f0238a5f40aca79f14b29c | 1,082 | py | Python | conans/__init__.py | deadash/conan | 6c0a71ebe0f1b3d00701d9138a5905673921eb11 | [
"MIT"
] | null | null | null | conans/__init__.py | deadash/conan | 6c0a71ebe0f1b3d00701d9138a5905673921eb11 | [
"MIT"
] | null | null | null | conans/__init__.py | deadash/conan | 6c0a71ebe0f1b3d00701d9138a5905673921eb11 | [
"MIT"
] | null | null | null | from conans.client.build.autotools_environment import AutoToolsBuildEnvironment
from conans.client.build.cmake import CMake
from conans.client.build.meson import Meson
from conans.client.build.msbuild import MSBuild
from conans.client.build.visual_environment import VisualStudioBuildEnvironment
from conans.client.run_environment import RunEnvironment
from conans.model.conan_file import ConanFile
from conans.model.options import Options
from conans.model.settings import Settings
from conans.util.files import load
# complex_search: With ORs and not filtering by not restricted settings
COMPLEX_SEARCH_CAPABILITY = "complex_search"
CHECKSUM_DEPLOY = "checksum_deploy" # Only when v2
REVISIONS = "revisions" # Only when enabled in config, not by default look at server_launcher.py
ONLY_V2 = "only_v2" # Remotes and virtuals from Artifactory returns this capability
MATRIX_PARAMS = "matrix_params"
OAUTH_TOKEN = "oauth_token"
SERVER_CAPABILITIES = [COMPLEX_SEARCH_CAPABILITY, REVISIONS] # Server is always with revisions
DEFAULT_REVISION_V1 = "0"
__version__ = '1.43.0-dev'
| 45.083333 | 97 | 0.832717 |
0ed5250582572f28bb8fdef80cd4931bdef26d6c | 618 | py | Python | importer/migrations/0014_auto_20180924_1943.py | juliecentofanti172/juliecentofanti.github.io | 446ea8522b9f4a6709124ebb6e0f675acf7fe205 | [
"CC0-1.0"
] | 134 | 2018-05-23T14:00:29.000Z | 2022-03-10T15:47:53.000Z | importer/migrations/0014_auto_20180924_1943.py | ptrourke/concordia | 56ff364dbf38cb8a763df489479821fe43b76d69 | [
"CC0-1.0"
] | 1,104 | 2018-05-22T20:18:22.000Z | 2022-03-31T17:28:40.000Z | importer/migrations/0014_auto_20180924_1943.py | ptrourke/concordia | 56ff364dbf38cb8a763df489479821fe43b76d69 | [
"CC0-1.0"
] | 32 | 2018-05-22T20:22:38.000Z | 2021-12-21T14:11:44.000Z | # Generated by Django 2.0.8 on 2018-09-24 19:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("concordia", "0024_auto_20180924_1529"),
("importer", "0013_auto_20180924_1318"),
]
operations = [
migrations.AlterUniqueTogether(
name="importitem", unique_together={("job", "item")}
),
migrations.AlterUniqueTogether(
name="importitemasset",
unique_together={
("import_item", "sequence_number"),
("import_item", "asset"),
},
),
]
| 24.72 | 64 | 0.566343 |
2f70314373eeb8eca2616cdd468a55e871bd5ee7 | 2,861 | py | Python | tasks.py | singingwolfboy/invoke | ccd2bae21a94ceef93cba34441cf575d4ddb7ff9 | [
"BSD-2-Clause"
] | null | null | null | tasks.py | singingwolfboy/invoke | ccd2bae21a94ceef93cba34441cf575d4ddb7ff9 | [
"BSD-2-Clause"
] | null | null | null | tasks.py | singingwolfboy/invoke | ccd2bae21a94ceef93cba34441cf575d4ddb7ff9 | [
"BSD-2-Clause"
] | null | null | null | import sys
import time
from invocations.docs import docs, www
from invocations.testing import test, coverage
from invocations.packaging import vendorize, release
from invoke import ctask as task, Collection, Context
@task(help=test.help)
def integration(c, module=None, runner=None, opts=None):
"""
Run the integration test suite. May be slow!
"""
opts = opts or ""
opts += " --tests=integration/"
test(c, module, runner, opts)
@task
def sites(c):
"""
Build both doc sites w/ maxed nitpicking.
"""
# Turn warnings into errors, emit warnings about missing references.
# This gives us a maximally noisy docs build.
# Also enable tracebacks for easier debuggage.
opts = "-W -n -T"
# This is super lolzy but we haven't actually tackled nontrivial in-Python
# task calling yet, so...
docs_c = Context(config=c.config.clone())
www_c = Context(config=c.config.clone())
docs_c.update(**docs.configuration())
www_c.update(**www.configuration())
docs['build'](docs_c, opts=opts)
www['build'](www_c, opts=opts)
@task
def watch(c):
"""
Watch both doc trees & rebuild them if files change.
This includes e.g. rebuilding the API docs if the source code changes;
rebuilding the WWW docs if the README changes; etc.
"""
try:
from watchdog.observers import Observer
from watchdog.events import RegexMatchingEventHandler
except ImportError:
sys.exit("If you want to use this, 'pip install watchdog' first.")
class APIBuildHandler(RegexMatchingEventHandler):
def on_any_event(self, event):
my_c = Context(config=c.config.clone())
my_c.update(**docs.configuration())
docs['build'](my_c)
class WWWBuildHandler(RegexMatchingEventHandler):
def on_any_event(self, event):
my_c = Context(config=c.config.clone())
my_c.update(**www.configuration())
www['build'](my_c)
# Readme & WWW triggers WWW
www_handler = WWWBuildHandler(
regexes=['\./README.rst', '\./sites/www'],
ignore_regexes=['.*/\..*\.swp', '\./sites/www/_build'],
)
# Code and docs trigger API
api_handler = APIBuildHandler(
regexes=['\./invoke/', '\./sites/docs'],
ignore_regexes=['.*/\..*\.swp', '\./sites/docs/_build'],
)
# Run observer loop
observer = Observer()
# TODO: Find parent directory of tasks.py and use that.
for x in (www_handler, api_handler):
observer.schedule(x, '.', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
ns = Collection(
test, coverage, integration, vendorize, release, www, docs, sites, watch
)
ns.configure({'coverage': {'package': 'invoke'}})
| 30.115789 | 78 | 0.642083 |
aa60771a13307580966a143228982d3c29d00bff | 1,350 | py | Python | siml/networks/pyg/gin.py | ricosjp/siml | 8fc07d798cdedd77622c16221ee44a575d36bad0 | [
"Apache-2.0"
] | 11 | 2020-12-28T16:22:33.000Z | 2021-11-14T17:09:27.000Z | siml/networks/pyg/gin.py | ricosjp/siml | 8fc07d798cdedd77622c16221ee44a575d36bad0 | [
"Apache-2.0"
] | null | null | null | siml/networks/pyg/gin.py | ricosjp/siml | 8fc07d798cdedd77622c16221ee44a575d36bad0 | [
"Apache-2.0"
] | 2 | 2021-04-28T09:41:47.000Z | 2021-07-01T21:18:51.000Z |
import torch
import torch_geometric
from . import abstract_pyg_gcn
from .. import identity
from .. import mlp
from ... import setting
class GIN(abstract_pyg_gcn.AbstractPyGGCN):
"""Graph Isomorphism Network based on https://arxiv.org/abs/1810.00826 ."""
def __init__(self, block_setting):
super().__init__(
block_setting, create_subchain=True, residual=False)
self.epsilon = block_setting.optional.get('epsilon', 0.)
self.train_epsilon = block_setting.optional.get('train_epsilon', False)
self.gins = torch.nn.ModuleList([
torch_geometric.nn.GINConv(
identity.Identity(setting.BlockSetting()),
eps=self.epsilon, train_eps=self.train_epsilon)
for _ in self.subchains])
block_setting_for_mlp = setting.BlockSetting(
type='mlp', nodes=self.block_setting.nodes,
activations=self.block_setting.activations,
dropouts=self.block_setting.dropouts)
self.mlps = torch.nn.ModuleList([
mlp.MLP(block_setting_for_mlp) for _ in self.subchains])
return
def _forward_single_core(self, x, subchain_index, support):
edge_index = self._remove_self_loop_if_exists(support)
return self.mlps[subchain_index](
self.gins[subchain_index](x, edge_index))
| 34.615385 | 79 | 0.674815 |
a4be16d899ce61fb742b295fba400967bcbdc207 | 55,340 | py | Python | igramscraper/instagram.py | sczerniawski/instagram-scraper | 1f85474182667edc1ce530fd63b985ae8997ce04 | [
"MIT"
] | null | null | null | igramscraper/instagram.py | sczerniawski/instagram-scraper | 1f85474182667edc1ce530fd63b985ae8997ce04 | [
"MIT"
] | null | null | null | igramscraper/instagram.py | sczerniawski/instagram-scraper | 1f85474182667edc1ce530fd63b985ae8997ce04 | [
"MIT"
] | null | null | null | import time
import requests
import re
import json
import hashlib
import os
from slugify import slugify
import random
from .session_manager import CookieSessionManager
from .exception.instagram_auth_exception import InstagramAuthException
from .exception.instagram_exception import InstagramException
from .exception.instagram_not_found_exception import InstagramNotFoundException
from .model.account import Account
from .model.comment import Comment
from .model.location import Location
from .model.media import Media
from .model.story import Story
from .model.user_stories import UserStories
from .model.tag import Tag
from . import endpoints
from .two_step_verification.console_verification import ConsoleVerification
class Instagram:
HTTP_NOT_FOUND = 404
HTTP_OK = 200
HTTP_FORBIDDEN = 403
HTTP_BAD_REQUEST = 400
MAX_COMMENTS_PER_REQUEST = 300
MAX_LIKES_PER_REQUEST = 50
# 30 mins time limit on operations that require multiple self.__req
PAGING_TIME_LIMIT_SEC = 1800
PAGING_DELAY_MINIMUM_MICROSEC = 1000000 # 1 sec min delay to simulate browser
PAGING_DELAY_MAXIMUM_MICROSEC = 3000000 # 3 sec max delay to simulate browser
instance_cache = None
def __init__(self, sleep_between_requests=0):
self.__req = requests.session()
self.paging_time_limit_sec = Instagram.PAGING_TIME_LIMIT_SEC
self.paging_delay_minimum_microsec = Instagram.PAGING_DELAY_MINIMUM_MICROSEC
self.paging_delay_maximum_microsec = Instagram.PAGING_DELAY_MAXIMUM_MICROSEC
self.session_username = None
self.session_password = None
self.user_session = None
self.rhx_gis = None
self.sleep_between_requests = sleep_between_requests
self.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) ' \
'AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/66.0.3359.139 Safari/537.36'
self.user_media_progress = dict()
def with_credentials(self, username, password, session_folder=None):
"""
param string username
param string password
param null sessionFolder
return Instagram
"""
Instagram.instance_cache = None
if not session_folder:
cwd = os.getcwd()
session_folder = cwd + os.path.sep + 'sessions' + os.path.sep
if isinstance(session_folder, str):
Instagram.instance_cache = CookieSessionManager(
session_folder, slugify(username) + '.txt')
else:
Instagram.instance_cache = session_folder
Instagram.instance_cache.empty_saved_cookies()
self.session_username = username
self.session_password = password
def set_proxies(self, proxy):
if proxy and isinstance(proxy, dict):
self.__req.proxies = proxy
def disable_verify(self):
self.__req.verify = False
def disable_proxies(self):
self.__req.proxies = {}
def get_user_agent(self):
return self.user_agent
def set_user_agent(self, user_agent):
self.user_agent = user_agent
@staticmethod
def set_account_medias_request_count(count):
"""
Set how many media objects should be retrieved in a single request
param int count
"""
endpoints.request_media_count = count
def get_account_by_id(self, id):
"""
:param id: account id
:return: Account
"""
username = self.get_username_by_id(id)
return self.get_account(username)
def get_username_by_id(self, id):
"""
:param id: account id
:return: username string from response
"""
time.sleep(self.sleep_between_requests)
response = self.__req.get(
endpoints.get_account_json_private_info_link_by_account_id(
id), headers=self.generate_headers(self.user_session))
if Instagram.HTTP_NOT_FOUND == response.status_code:
raise InstagramNotFoundException(
'Failed to fetch account with given id')
if Instagram.HTTP_OK != response.status_code:
raise InstagramException.default(response.text,
response.status_code)
json_response = response.json()
if not json_response:
raise InstagramException('Response does not JSON')
if json_response['status'] != 'ok':
message = json_response['message'] if (
'message' in json_response.keys()) else 'Unknown Error'
raise InstagramException(message)
return json_response['user']['username']
def generate_headers(self, session, gis_token=None):
"""
:param session: user session dict
:param gis_token: a token used to be verified by instagram in header
:return: header dict
"""
headers = {}
if session is not None:
cookies = ''
for key in session.keys():
cookies += f"{key}={session[key]}; "
csrf = session['x-csrftoken'] if session['csrftoken'] is None else \
session['csrftoken']
headers = {
'cookie': cookies,
'referer': endpoints.BASE_URL + '/',
'x-csrftoken': csrf
}
if self.user_agent is not None:
headers['user-agent'] = self.user_agent
if gis_token is not None:
headers['x-instagram-gis'] = gis_token
return headers
def __generate_gis_token(self, variables):
"""
:param variables: a dict used to generate_gis_token
:return: a token used to be verified by instagram
"""
rhx_gis = self.__get_rhx_gis() if self.__get_rhx_gis() is not None else 'NULL'
string_to_hash = ':'.join([rhx_gis, json.dumps(variables, separators=(',', ':')) if isinstance(variables, dict) else variables])
return hashlib.md5(string_to_hash.encode('utf-8')).hexdigest()
def __get_rhx_gis(self):
"""
:return: a string to generate gis_token
"""
if self.rhx_gis is None:
try:
shared_data = self.__get_shared_data_from_page()
except Exception as _:
raise InstagramException('Could not extract gis from page')
if 'rhx_gis' in shared_data.keys():
self.rhx_gis = shared_data['rhx_gis']
else:
self.rhx_gis = None
return self.rhx_gis
def __get_mid(self):
"""manually fetches the machine id from graphQL"""
time.sleep(self.sleep_between_requests)
response = self.__req.get('https://www.instagram.com/web/__mid/')
if response.status_code != Instagram.HTTP_OK:
raise InstagramException.default(response.text,
response.status_code)
return response.text
def __get_shared_data_from_page(self, url=endpoints.BASE_URL):
"""
:param url: the requested url
:return: a dict extract from page
"""
url = url.rstrip('/') + '/'
time.sleep(self.sleep_between_requests)
response = self.__req.get(url, headers=self.generate_headers(
self.user_session))
if Instagram.HTTP_NOT_FOUND == response.status_code:
raise InstagramNotFoundException(f"Page {url} not found")
if not Instagram.HTTP_OK == response.status_code:
raise InstagramException.default(response.text,
response.status_code)
return Instagram.extract_shared_data_from_body(response.text)
@staticmethod
def extract_shared_data_from_body(body):
"""
:param body: html string from a page
:return: a dict extract from page
"""
array = re.findall(r'_sharedData = .*?;</script>', body)
if len(array) > 0:
raw_json = array[0][len("_sharedData ="):-len(";</script>")]
return json.loads(raw_json)
return None
def search_tags_by_tag_name(self, tag):
"""
:param tag: tag string
:return: list of Tag
"""
# TODO: Add tests and auth
time.sleep(self.sleep_between_requests)
response = self.__req.get(endpoints.get_general_search_json_link(tag))
if Instagram.HTTP_NOT_FOUND == response.status_code:
raise InstagramNotFoundException(
'Account with given username does not exist.')
if not Instagram.HTTP_OK == response.status_code:
raise InstagramException.default(response.text,
response.status_code)
json_response = response.json()
try:
status = json_response['status']
if status != 'ok':
raise InstagramException(
'Response code is not equal 200. '
'Something went wrong. Please report issue.')
except KeyError:
raise InstagramException('Response code is not equal 200. Something went wrong. Please report issue.')
try:
hashtags_raw = json_response['hashtags']
if len(hashtags_raw) == 0:
return []
except KeyError:
return []
hashtags = []
for json_hashtag in hashtags_raw:
hashtags.append(Tag(json_hashtag['hashtag']))
return hashtags
def get_medias(self, username, count=20, maxId=''):
"""
:param username: instagram username
:param count: the number of how many media you want to get
:param maxId: used to paginate
:return: list of Media
"""
account = self.get_account(username)
return self.get_medias_by_user_id(account.identifier, count, maxId)
def get_medias_by_code(self, media_code):
"""
:param media_code: media code
:return: Media
"""
url = endpoints.get_media_page_link(media_code)
return self.get_media_by_url(url)
def get_medias_by_user_id(self, id, count=12, max_id=''):
"""
:param id: instagram account id
:param count: the number of how many media you want to get
:param max_id: used to paginate
:return: list of Media
"""
index = 0
medias = []
is_more_available = True
while index < count and is_more_available:
variables = {
'id': str(id),
'first': str(count),
'after': str(max_id)
}
self.user_media_progress[str(id)] = str(max_id)
headers = self.generate_headers(self.user_session,
self.__generate_gis_token(
variables))
time.sleep(self.sleep_between_requests)
response = self.__req.get(
endpoints.get_account_medias_json_link(variables),
headers=headers)
if not Instagram.HTTP_OK == response.status_code:
raise InstagramException.default(response.text,
response.status_code)
arr = json.loads(response.text)
try:
nodes = arr['data']['user']['edge_owner_to_timeline_media'][
'edges']
except KeyError:
return {}
for mediaArray in nodes:
if index == count:
return medias
media = Media(mediaArray['node'])
medias.append(media)
index += 1
if not nodes or nodes == '':
return medias
max_id = \
arr['data']['user']['edge_owner_to_timeline_media'][
'page_info'][
'end_cursor']
is_more_available = \
arr['data']['user']['edge_owner_to_timeline_media'][
'page_info'][
'has_next_page']
self.user_media_progress[str(id)] = str(max_id)
return medias
def get_media_by_id(self, media_id):
"""
:param media_id: media id
:return: list of Media
"""
media_link = Media.get_link_from_id(media_id)
return self.get_media_by_url(media_link)
def get_media_by_url(self, media_url):
"""
:param media_url: media url
:return: Media
"""
url_regex = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
if len(re.findall(url_regex, media_url)) <= 0:
raise ValueError('Malformed media url')
url = media_url.rstrip('/') + '/?__a=1'
time.sleep(self.sleep_between_requests)
response = self.__req.get(url, headers=self.generate_headers(
self.user_session))
if Instagram.HTTP_NOT_FOUND == response.status_code:
raise InstagramNotFoundException(
'Media with given code does not exist or account is private.')
if Instagram.HTTP_OK != response.status_code:
raise InstagramException.default(response.text,
response.status_code)
media_array = response.json()
try:
media_in_json = media_array['graphql']['shortcode_media']
except KeyError:
raise InstagramException('Media with this code does not exist')
return Media(media_in_json)
def get_medias_from_feed(self, username, count=20):
"""
:param username: instagram username
:param count: the number of how many media you want to get
:return: list of Media
"""
medias = []
index = 0
time.sleep(self.sleep_between_requests)
response = self.__req.get(endpoints.get_account_json_link(username),
headers=self.generate_headers(
self.user_session))
if Instagram.HTTP_NOT_FOUND == response.status_code:
raise InstagramNotFoundException(
'Account with given username does not exist.')
if Instagram.HTTP_OK != response.status_code:
raise InstagramException.default(response.text,
response.status_code)
user_array = response.json()
try:
user = user_array['graphql']['user']
except KeyError:
raise InstagramNotFoundException(
'Account with this username does not exist')
try:
nodes = user['edge_owner_to_timeline_media']['edges']
if len(nodes) == 0:
return []
except Exception:
return []
for media_array in nodes:
if index == count:
return medias
medias.append(Media(media_array['node']))
index += 1
return medias
def get_medias_by_tag(self, tag, count=12, max_id='', min_timestamp=None):
"""
:param tag: tag string
:param count: the number of how many media you want to get
:param max_id: used to paginate
:param min_timestamp: limit the time you want to start from
:return: list of Media
"""
index = 0
medias = []
media_ids = []
has_next_page = True
while index < count and has_next_page:
time.sleep(self.sleep_between_requests)
response = self.__req.get(
endpoints.get_medias_json_by_tag_link(tag, max_id),
headers=self.generate_headers(self.user_session))
if response.status_code != Instagram.HTTP_OK:
raise InstagramException.default(response.text,
response.status_code)
arr = response.json()
try:
arr['graphql']['hashtag']['edge_hashtag_to_media']['count']
except KeyError:
return []
nodes = arr['graphql']['hashtag']['edge_hashtag_to_media']['edges']
for media_array in nodes:
if index == count:
return medias
media = Media(media_array['node'])
if media.identifier in media_ids:
return medias
if min_timestamp is not None \
and media.created_time < min_timestamp:
return medias
media_ids.append(media.identifier)
medias.append(media)
index += 1
if len(nodes) == 0:
return medias
max_id = \
arr['graphql']['hashtag']['edge_hashtag_to_media']['page_info'][
'end_cursor']
has_next_page = \
arr['graphql']['hashtag']['edge_hashtag_to_media']['page_info'][
'has_next_page']
return medias
def get_medias_by_location_id(self, facebook_location_id, count=24,
max_id=''):
"""
:param facebook_location_id: facebook location id
:param count: the number of how many media you want to get
:param max_id: used to paginate
:return: list of Media
"""
index = 0
medias = []
has_next_page = True
while index < count and has_next_page:
time.sleep(self.sleep_between_requests)
response = self.__req.get(
endpoints.get_medias_json_by_location_id_link(
facebook_location_id, max_id),
headers=self.generate_headers(self.user_session))
if response.status_code != Instagram.HTTP_OK:
raise InstagramException.default(response.text,
response.status_code)
arr = response.json()
nodes = arr['graphql']['location']['edge_location_to_media'][
'edges']
for media_array in nodes:
if index == count:
return medias
medias.append(Media(media_array['node']))
index += 1
if len(nodes) == 0:
return medias
has_next_page = \
arr['graphql']['location']['edge_location_to_media'][
'page_info'][
'has_next_page']
max_id = \
arr['graphql']['location']['edge_location_to_media'][
'page_info'][
'end_cursor']
return medias
def get_current_top_medias_by_tag_name(self, tag_name):
"""
:param tag_name: tag string
:return: list of the top Media
"""
time.sleep(self.sleep_between_requests)
response = self.__req.get(
endpoints.get_medias_json_by_tag_link(tag_name, ''),
headers=self.generate_headers(self.user_session))
if response.status_code == Instagram.HTTP_NOT_FOUND:
raise InstagramNotFoundException(
'Account with given username does not exist.')
if response.status_code is not Instagram.HTTP_OK:
raise InstagramException.default(response.text,
response.status_code)
json_response = response.json()
medias = []
nodes = \
json_response['graphql']['hashtag']['edge_hashtag_to_top_posts'][
'edges']
for media_array in nodes:
medias.append(Media(media_array['node']))
return medias
def get_current_top_medias_by_location_id(self, facebook_location_id):
"""
:param facebook_location_id: facebook location id
:return: list of the top Media
"""
time.sleep(self.sleep_between_requests)
response = self.__req.get(
endpoints.get_medias_json_by_location_id_link(facebook_location_id),
headers=self.generate_headers(self.user_session))
if response.status_code == Instagram.HTTP_NOT_FOUND:
raise InstagramNotFoundException(
"Location with this id doesn't exist")
if response.status_code != Instagram.HTTP_OK:
raise InstagramException.default(response.text,
response.status_code)
json_response = response.json()
nodes = \
json_response['graphql']['location']['edge_location_to_top_posts'][
'edges']
medias = []
for media_array in nodes:
medias.append(Media(media_array['node']))
return medias
def get_paginate_medias(self, username, max_id=''):
"""
:param username: instagram user name
:param max_id: used to paginate next time
:return: dict that contains Media list, maxId, hasNextPage
"""
account = self.get_account(username)
return self.get_paginate_medias_by_user_id(account.identifier, max_id=max_id)
def get_paginate_medias_by_user_id(self, user_id, max_id=''):
"""
:param user_id: instagram user id
:param max_id: used to paginate next time
:return: dict that contains Media list, maxId, hasNextPage
"""
has_next_page = True
medias = []
to_return = {
'medias': medias,
'maxId': max_id,
'hasNextPage': has_next_page,
}
variables = {
'id': str(user_id),
'first': str(endpoints.request_media_count),
'after': str(max_id)
}
time.sleep(self.sleep_between_requests)
response = self.__req.get(
endpoints.get_account_medias_json_link(variables),
headers=self.generate_headers(self.user_session,
self.__generate_gis_token(variables))
)
if not Instagram.HTTP_OK == response.status_code:
raise InstagramException.default(response.text,
response.status_code)
arr = response.json()
try:
nodes = arr['data']['user']['edge_owner_to_timeline_media']['edges']
except KeyError:
return to_return
for mediaArray in nodes:
print('received raw:', mediaArray['node'])
medias.append(Media(mediaArray['node']))
max_id = \
arr['data']['user']['edge_owner_to_timeline_media']['page_info']['end_cursor']
has_next_page = \
arr['data']['user']['edge_owner_to_timeline_media']['page_info']['has_next_page']
to_return = {
'medias': medias,
'maxId': max_id,
'hasNextPage': has_next_page,
}
return to_return
def get_paginate_medias_by_tag(self, tag, max_id=''):
"""
:param tag: tag name
:param max_id: used to paginate next time
:return: dict that contains Media list, maxId, hasNextPage
"""
has_next_page = True
medias = []
to_return = {
'medias': medias,
'maxId': max_id,
'hasNextPage': has_next_page,
}
time.sleep(self.sleep_between_requests)
response = self.__req.get(
endpoints.get_medias_json_by_tag_link(tag, max_id),
headers=self.generate_headers(self.user_session))
if response.status_code != Instagram.HTTP_OK:
raise InstagramException.default(response.text,
response.status_code)
arr = response.json()
try:
nodes = arr['graphql']['hashtag']['edge_hashtag_to_media']['edges']
except KeyError:
return to_return
for media_array in nodes:
medias.append(Media(media_array['node']))
max_id = \
arr['graphql']['hashtag']['edge_hashtag_to_media']['page_info'][
'end_cursor']
has_next_page = \
arr['graphql']['hashtag']['edge_hashtag_to_media']['page_info'][
'has_next_page']
try:
media_count = arr['graphql']['hashtag']['edge_hashtag_to_media'][
'count']
except KeyError:
return to_return
to_return = {
'medias': medias,
'count': media_count,
'maxId': max_id,
'hasNextPage': has_next_page,
}
return to_return
def get_location_by_id(self, facebook_location_id):
"""
:param facebook_location_id: facebook location id
:return: Location
"""
time.sleep(self.sleep_between_requests)
response = self.__req.get(
endpoints.get_medias_json_by_location_id_link(facebook_location_id),
headers=self.generate_headers(self.user_session))
if response.status_code == Instagram.HTTP_NOT_FOUND:
raise InstagramNotFoundException(
'Location with this id doesn\'t exist')
if response.status_code != Instagram.HTTP_OK:
raise InstagramException.default(response.text,
response.status_code)
json_response = response.json()
return Location(json_response['graphql']['location'])
def get_media_likes_by_code(self, code, count=10, max_id=None):
"""
:param code:
:param count:
:param max_id:
:return:
"""
remain = count
likes = []
index = 0
has_previous = True
#TODO: $index < $count (bug index getting to high since max_likes_per_request gets sometimes changed by instagram)
while (has_previous and index < count):
if (remain > self.MAX_LIKES_PER_REQUEST):
number_of_likes_to_receive = self.MAX_LIKES_PER_REQUEST
remain -= self.MAX_LIKES_PER_REQUEST
index += self.MAX_LIKES_PER_REQUEST
else:
number_of_likes_to_receive = remain
index += remain
remain = 0
variables = {
"shortcode": str(code),
"first": str(number_of_likes_to_receive),
"after": '' if not max_id else max_id
}
time.sleep(self.sleep_between_requests)
response = self.__req.get(
endpoints.get_last_likes_by_code(variables),
headers=self.generate_headers(self.user_session))
if not response.status_code == Instagram.HTTP_OK:
raise InstagramException.default(response.text,response.status_code)
jsonResponse = response.json()
nodes = jsonResponse['data']['shortcode_media']['edge_liked_by']['edges']
for likesArray in nodes:
like = Account(likesArray['node'])
likes.append(like)
has_previous = jsonResponse['data']['shortcode_media']['edge_liked_by']['page_info']['has_next_page']
number_of_likes = jsonResponse['data']['shortcode_media']['edge_liked_by']['count']
if count > number_of_likes:
count = number_of_likes
if len(nodes) == 0:
data = {}
data['next_page'] = max_id
data['accounts'] = likes
return data
max_id = jsonResponse['data']['shortcode_media']['edge_liked_by']['page_info']['end_cursor']
data = {}
data['next_page'] = max_id
data['accounts'] = likes
return data
def get_followers(self, account_id, count=20, page_size=20, end_cursor='',
delayed=True):
"""
:param account_id:
:param count:
:param page_size:
:param end_cursor:
:param delayed:
:return:
"""
# TODO set time limit
# if ($delayed) {
# set_time_limit($this->pagingTimeLimitSec);
# }
index = 0
accounts = []
next_page = end_cursor
if count < page_size:
raise InstagramException(
'Count must be greater than or equal to page size.')
while True:
time.sleep(self.sleep_between_requests)
variables = {
'id': str(account_id),
'first': str(count),
'after': next_page
}
headers = self.generate_headers(self.user_session)
response = self.__req.get(
endpoints.get_followers_json_link(variables),
headers=headers)
if not response.status_code == Instagram.HTTP_OK:
raise InstagramException.default(response.text,
response.status_code)
jsonResponse = response.json()
if jsonResponse['data']['user']['edge_followed_by']['count'] == 0:
return accounts
edgesArray = jsonResponse['data']['user']['edge_followed_by'][
'edges']
if len(edgesArray) == 0:
InstagramException(
f'Failed to get followers of account id {account_id}.'
f' The account is private.',
Instagram.HTTP_FORBIDDEN)
pageInfo = jsonResponse['data']['user']['edge_followed_by'][
'page_info']
if pageInfo['has_next_page']:
next_page = pageInfo['end_cursor']
for edge in edgesArray:
accounts.append(Account(edge['node']))
index += 1
if index >= count:
#since break 2 not in python, looking for better solution since duplicate code
data = {}
data['next_page'] = next_page
data['accounts'] = accounts
return data
#must be below here
if not pageInfo['has_next_page']:
break
if delayed != None:
# Random wait between 1 and 3 sec to mimic browser
microsec = random.uniform(1.0, 3.0)
time.sleep(microsec)
data = {}
data['next_page'] = next_page
data['accounts'] = accounts
return data
def get_following(self, account_id, count=20, page_size=20, end_cursor='',
delayed=True):
"""
:param account_id:
:param count:
:param page_size:
:param end_cursor:
:param delayed:
:return:
"""
#TODO
# if ($delayed) {
# set_time_limit($this->pagingTimeLimitSec);
# }
index = 0
accounts = []
next_page = end_cursor
if count < page_size:
raise InstagramException('Count must be greater than or equal to page size.')
while True:
variables = {
'id': str(account_id),
'first': str(count),
'after': next_page
}
headers = self.generate_headers(self.user_session)
response = self.__req.get(
endpoints.get_following_json_link(variables),
headers=headers)
if not response.status_code == Instagram.HTTP_OK:
raise InstagramException.default(response.text,response.status_code)
jsonResponse = response.json()
if jsonResponse['data']['user']['edge_follow']['count'] == 0:
return accounts
edgesArray = jsonResponse['data']['user']['edge_follow'][
'edges']
if len(edgesArray) == 0:
raise InstagramException(
f'Failed to get follows of account id {account_id}.'
f' The account is private.',
Instagram.HTTP_FORBIDDEN)
pageInfo = jsonResponse['data']['user']['edge_follow']['page_info']
if pageInfo['has_next_page']:
next_page = pageInfo['end_cursor']
for edge in edgesArray:
accounts.append(Account(edge['node']))
index += 1
if index >= count:
#since no break 2, looking for better solution since duplicate code
data = {}
data['next_page'] = next_page
data['accounts'] = accounts
return data
#must be below here
if not pageInfo['has_next_page']:
break
if delayed != None:
# Random wait between 1 and 3 sec to mimic browser
microsec = random.uniform(1.0, 3.0)
time.sleep(microsec)
data = {}
data['next_page'] = next_page
data['accounts'] = accounts
return data
def get_media_comments_by_id(self, media_id, count=10, max_id=None):
"""
:param media_id: media id
:param count: the number of how many comments you want to get
:param max_id: used to paginate
:return: Comment List
"""
code = Media.get_code_from_id(media_id)
return self.get_media_comments_by_code(code, count, max_id)
def get_media_comments_by_code(self, code, count=10, max_id=''):
"""
:param code: media code
:param count: the number of how many comments you want to get
:param max_id: used to paginate
:return: Comment List
"""
comments = []
index = 0
has_previous = True
while has_previous and index < count:
number_of_comments_to_receive = 0
if count - index > Instagram.MAX_COMMENTS_PER_REQUEST:
number_of_comments_to_receive = Instagram.MAX_COMMENTS_PER_REQUEST
else:
number_of_comments_to_receive = count - index
variables = {
"shortcode": str(code),
"first": str(number_of_comments_to_receive),
"after": '' if not max_id else max_id
}
comments_url = endpoints.get_comments_before_comments_id_by_code(
variables)
time.sleep(self.sleep_between_requests)
response = self.__req.get(comments_url,
headers=self.generate_headers(
self.user_session,
self.__generate_gis_token(variables)))
if not response.status_code == Instagram.HTTP_OK:
raise InstagramException.default(response.text,
response.status_code)
jsonResponse = response.json()
nodes = jsonResponse['data']['shortcode_media']['edge_media_to_parent_comment']['edges']
for commentArray in nodes:
comment = Comment(commentArray['node'])
comments.append(comment)
index += 1
has_previous = jsonResponse['data']['shortcode_media']['edge_media_to_parent_comment']['page_info']['has_next_page']
number_of_comments = jsonResponse['data']['shortcode_media']['edge_media_to_parent_comment']['count']
if count > number_of_comments:
count = number_of_comments
max_id = jsonResponse['data']['shortcode_media']['edge_media_to_parent_comment']['page_info']['end_cursor']
if len(nodes) == 0:
break
data = {}
data['next_page'] = max_id
data['comments'] = comments
return data
def get_account(self, username):
"""
:param username: username
:return: Account
"""
time.sleep(self.sleep_between_requests)
response = self.__req.get(endpoints.get_account_page_link(
username), headers=self.generate_headers(self.user_session))
if Instagram.HTTP_NOT_FOUND == response.status_code:
raise InstagramNotFoundException(
'Account with given username does not exist.')
if Instagram.HTTP_OK != response.status_code:
raise InstagramException.default(response.text,
response.status_code)
user_array = Instagram.extract_shared_data_from_body(response.text)
if user_array['entry_data']['ProfilePage'][0]['graphql']['user'] is None:
raise InstagramNotFoundException(
'Account with this username does not exist')
return Account(
user_array['entry_data']['ProfilePage'][0]['graphql']['user'])
def get_stories(self, reel_ids=None):
"""
:param reel_ids: reel ids
:return: UserStories List
"""
variables = {'precomposed_overlay': False, 'reel_ids': []}
if reel_ids is None or len(reel_ids) == 0:
time.sleep(self.sleep_between_requests)
response = self.__req.get(endpoints.get_user_stories_link(),
headers=self.generate_headers(
self.user_session))
if not Instagram.HTTP_OK == response.status_code:
raise InstagramException.default(response.text,
response.status_code)
json_response = response.json()
try:
edges = json_response['data']['user']['feed_reels_tray'][
'edge_reels_tray_to_reel']['edges']
except KeyError:
return []
for edge in edges:
variables['reel_ids'].append(edge['node']['id'])
else:
variables['reel_ids'] = reel_ids
time.sleep(self.sleep_between_requests)
response = self.__req.get(endpoints.get_stories_link(variables),
headers=self.generate_headers(
self.user_session))
if not Instagram.HTTP_OK == response.status_code:
raise InstagramException.default(response.text,
response.status_code)
json_response = response.json()
try:
reels_media = json_response['data']['reels_media']
if len(reels_media) == 0:
return []
except KeyError:
return []
stories = []
for user in reels_media:
user_stories = UserStories()
user_stories.owner = Account(user['user'])
for item in user['items']:
story = Story(item)
user_stories.stories.append(story)
stories.append(user_stories)
return stories
def search_accounts_by_username(self, username):
"""
:param username: user name
:return: Account List
"""
time.sleep(self.sleep_between_requests)
response = self.__req.get(
endpoints.get_general_search_json_link(username),
headers=self.generate_headers(self.user_session))
if Instagram.HTTP_NOT_FOUND == response.status_code:
raise InstagramNotFoundException(
'Account with given username does not exist.')
if not Instagram.HTTP_OK == response.status_code:
raise InstagramException.default(response.text,
response.status_code)
json_response = response.json()
try:
status = json_response['status']
if not status == 'ok':
raise InstagramException(
'Response code is not equal 200.'
' Something went wrong. Please report issue.')
except KeyError:
raise InstagramException(
'Response code is not equal 200.'
' Something went wrong. Please report issue.')
try:
users = json_response['users']
if len(users) == 0:
return []
except KeyError:
return []
accounts = []
for json_account in json_response['users']:
accounts.append(Account(json_account['user']))
return accounts
# TODO not optimal separate http call after getMedia
def get_media_tagged_users_by_code(self, code):
"""
:param code: media short code
:return: list contains tagged_users dict
"""
url = endpoints.get_media_json_link(code)
time.sleep(self.sleep_between_requests)
response = self.__req.get(url, headers=self.generate_headers(
self.user_session))
if not Instagram.HTTP_OK == response.status_code:
raise InstagramException.default(response.text,
response.status_code)
json_response = response.json()
try:
tag_data = json_response['graphql']['shortcode_media'][
'edge_media_to_tagged_user']['edges']
except KeyError:
return []
tagged_users = []
for tag in tag_data:
x_pos = tag['node']['x']
y_pos = tag['node']['y']
user = tag['node']['user']
# TODO: add Model and add Data to it instead of Dict
tagged_user = dict()
tagged_user['x_pos'] = x_pos
tagged_user['y_pos'] = y_pos
tagged_user['user'] = user
tagged_users.append(tagged_user)
return tagged_users
def is_logged_in(self, session):
"""
:param session: session dict
:return: bool
"""
if session is None or 'sessionid' not in session.keys():
return False
session_id = session['sessionid']
csrf_token = session['csrftoken']
headers = {
'cookie': f"ig_cb=1; csrftoken={csrf_token}; sessionid={session_id};",
'referer': endpoints.BASE_URL + '/',
'x-csrftoken': csrf_token,
'X-CSRFToken': csrf_token,
'user-agent': self.user_agent,
}
time.sleep(self.sleep_between_requests)
response = self.__req.get(endpoints.BASE_URL, headers=headers)
if not response.status_code == Instagram.HTTP_OK:
return False
cookies = response.cookies.get_dict()
if cookies is None or not 'ds_user_id' in cookies.keys():
return False
return True
def login(self, force=False, two_step_verificator=None):
"""support_two_step_verification true works only in cli mode - just run login in cli mode - save cookie to file and use in any mode
:param force: true will refresh the session
:param two_step_verificator: true will need to do verification when an account goes wrong
:return: headers dict
"""
if self.session_username is None or self.session_password is None:
raise InstagramAuthException("User credentials not provided")
if two_step_verificator:
two_step_verificator = ConsoleVerification()
session = json.loads(
Instagram.instance_cache.get_saved_cookies()) if Instagram.instance_cache.get_saved_cookies() != None else None
if force or not self.is_logged_in(session):
time.sleep(self.sleep_between_requests)
response = self.__req.get(endpoints.BASE_URL)
if not response.status_code == Instagram.HTTP_OK:
raise InstagramException.default(response.text,
response.status_code)
match = re.findall(r'"csrf_token":"(.*?)"', response.text)
if len(match) > 0:
csrfToken = match[0]
cookies = response.cookies.get_dict()
# cookies['mid'] doesnt work at the moment so fetch it with function
mid = self.__get_mid()
headers = {
'cookie': f"ig_cb=1; csrftoken={csrfToken}; mid={mid};",
'referer': endpoints.BASE_URL + '/',
'x-csrftoken': csrfToken,
'X-CSRFToken': csrfToken,
'user-agent': self.user_agent,
}
payload = {'username': self.session_username,
'password': self.session_password}
response = self.__req.post(endpoints.LOGIN_URL, data=payload,
headers=headers)
if not response.status_code == Instagram.HTTP_OK:
if (
response.status_code == Instagram.HTTP_BAD_REQUEST
and response.text is not None
and response.json()['message'] == 'checkpoint_required'
and two_step_verificator is not None):
response = self.__verify_two_step(response, cookies,
two_step_verificator)
print('checkpoint required')
elif response.status_code is not None and response.text is not None:
raise InstagramAuthException(
f'Response code is {response.status_code}. Body: {response.text} Something went wrong. Please report issue.',
response.status_code)
else:
raise InstagramAuthException(
'Something went wrong. Please report issue.',
response.status_code)
if not response.json()['authenticated']:
raise InstagramAuthException('User credentials are wrong.')
cookies = response.cookies.get_dict()
cookies['mid'] = mid
Instagram.instance_cache.set_saved_cookies(json.dumps(cookies, separators=(',', ':')))
self.user_session = cookies
else:
self.user_session = session
return self.generate_headers(self.user_session)
def __verify_two_step(self, response, cookies, two_step_verificator):
"""
:param response: Response object returned by Request
:param cookies: user cookies
:param two_step_verificator: two_step_verification instance
:return: Response
"""
new_cookies = response.cookies.get_dict()
cookies = {**cookies, **new_cookies}
cookie_string = ''
for key in cookies.keys():
cookie_string += f'{key}={cookies[key]};'
headers = {
'cookie': cookie_string,
'referer': endpoints.LOGIN_URL,
'x-csrftoken': cookies['csrftoken'],
'user-agent': self.user_agent,
}
url = endpoints.BASE_URL + response.json()['checkpoint_url']
time.sleep(self.sleep_between_requests)
response = self.__req.get(url, headers=headers)
data = Instagram.extract_shared_data_from_body(response.text)
if data is not None:
try:
choices = \
data['entry_data']['Challenge'][0]['extraData']['content'][
3][
'fields'][0]['values']
except KeyError:
choices = dict()
try:
fields = data['entry_data']['Challenge'][0]['fields']
try:
choices.update({'label': f"Email: {fields['email']}",
'value': 1})
except KeyError:
pass
try:
choices.update(
{'label': f"Phone: {fields['phone_number']}",
'value': 0})
except KeyError:
pass
except KeyError:
pass
if len(choices) > 0:
selected_choice = two_step_verificator.get_verification_type(
choices)
response = self.__req.post(url,
data={'choice': selected_choice},
headers=headers)
if len(re.findall('name="security_code"', response.text)) <= 0:
raise InstagramAuthException(
'Something went wrong when try '
'two step verification. Please report issue.',
response.status_code)
security_code = two_step_verificator.get_security_code()
post_data = {
'csrfmiddlewaretoken': cookies['csrftoken'],
'verify': 'Verify Account',
'security_code': security_code,
}
response = self.__req.post(url, data=post_data, headers=headers)
if not response.status_code == Instagram.HTTP_OK \
or 'Please check the code we sent you and try again' in response.text:
raise InstagramAuthException(
'Something went wrong when try two step'
' verification and enter security code. Please report issue.',
response.status_code)
return response
def add_comment(self, media_id, text, replied_to_comment_id=None):
"""
:param media_id: media id
:param text: the content you want to post
:param replied_to_comment_id: the id of the comment you want to reply
:return: Comment
"""
media_id = media_id.identifier if isinstance(media_id, Media) else media_id
replied_to_comment_id = replied_to_comment_id._data['id'] if isinstance(replied_to_comment_id, Comment) else replied_to_comment_id
body = {'comment_text': text,
'replied_to_comment_id': replied_to_comment_id
if replied_to_comment_id is not None else ''}
response = self.__req.post(endpoints.get_add_comment_url(media_id),
data=body, headers=self.generate_headers(
self.user_session))
if not Instagram.HTTP_OK == response.status_code:
raise InstagramException.default(response.text,
response.status_code)
json_response = response.json()
if json_response['status'] != 'ok':
status = json_response['status']
raise InstagramException(
f'Response status is {status}. '
f'Body: {response.text} Something went wrong.'
f' Please report issue.',
response.status_code)
return Comment(json_response)
def delete_comment(self, media_id, comment_id):
"""
:param media_id: media id
:param comment_id: the id of the comment you want to delete
"""
media_id = media_id.identifier if isinstance(media_id,
Media) else media_id
comment_id = comment_id._data['id'] if isinstance(comment_id,
Comment) else comment_id
response = self.__req.post(
endpoints.get_delete_comment_url(media_id, comment_id),
headers=self.generate_headers(self.user_session))
if not Instagram.HTTP_OK == response.status_code:
raise InstagramException.default(response.text,
response.status_code)
json_response = response.json()
if json_response['status'] != 'ok':
status = json_response['status']
raise InstagramException(
f'Response status is {status}. '
f'Body: {response.text} Something went wrong.'
f' Please report issue.',
response.status_code)
def like(self, media_id):
"""
:param media_id: media id
"""
media_id = media_id.identifier if isinstance(media_id,
Media) else media_id
response = self.__req.post(endpoints.get_like_url(media_id),
headers=self.generate_headers(
self.user_session))
if not Instagram.HTTP_OK == response.status_code:
raise InstagramException.default(response.text,
response.status_code)
json_response = response.json()
if json_response['status'] != 'ok':
status = json_response['status']
raise InstagramException(
f'Response status is {status}. '
f'Body: {response.text} Something went wrong.'
f' Please report issue.',
response.status_code)
def unlike(self, media_id):
"""
:param media_id: media id
"""
media_id = media_id.identifier if isinstance(media_id,
Media) else media_id
response = self.__req.post(endpoints.get_unlike_url(media_id),
headers=self.generate_headers(
self.user_session))
if not Instagram.HTTP_OK == response.status_code:
raise InstagramException.default(response.text,
response.status_code)
json_response = response.json()
if json_response['status'] != 'ok':
status = json_response['status']
raise InstagramException(
f'Response status is {status}. '
f'Body: {response.text} Something went wrong.'
f' Please report issue.',
response.status_code)
def follow(self, user_id):
"""
:param user_id: user id
:return: bool
"""
if self.is_logged_in(self.user_session):
url = endpoints.get_follow_url(user_id)
try:
follow = self.__req.post(url,
headers=self.generate_headers(
self.user_session))
if follow.status_code == Instagram.HTTP_OK:
return True
except:
raise InstagramException("Except on follow!")
return False
def unfollow(self, user_id):
"""
:param user_id: user id
:return: bool
"""
if self.is_logged_in(self.user_session):
url_unfollow = endpoints.get_unfollow_url(user_id)
try:
unfollow = self.__req.post(url_unfollow)
if unfollow.status_code == Instagram.HTTP_OK:
return unfollow
except:
raise InstagramException("Exept on unfollow!")
return False
| 34.914826 | 139 | 0.55627 |
c9a0c8250ccb513ff9db05ca3a4e4354d19d5cca | 869 | py | Python | a2ml/api/auger/a2ml.py | gitter-badger/a2ml | 1d9ef6657645b61c64090284ed8fadb1a68b932c | [
"Apache-2.0"
] | 2 | 2020-04-09T16:59:22.000Z | 2020-04-09T17:01:10.000Z | a2ml/api/auger/a2ml.py | arita37/a2ml | 3e92bede2c2ef6e63be74560cc6b904d3ec9d931 | [
"Apache-2.0"
] | null | null | null | a2ml/api/auger/a2ml.py | arita37/a2ml | 3e92bede2c2ef6e63be74560cc6b904d3ec9d931 | [
"Apache-2.0"
] | null | null | null | from a2ml.api.auger.model import AugerModel
from a2ml.api.auger.dataset import AugerDataset
from a2ml.api.auger.experiment import AugerExperiment
class AugerA2ML(object):
"""Auger A2ML implementation."""
def __init__(self, ctx):
super(AugerA2ML, self).__init__()
self.ctx = ctx
def import_data(self):
return AugerDataset(self.ctx).create()
def train(self):
return AugerExperiment(self.ctx).start()
def evaluate(self, run_id = None):
return AugerExperiment(self.ctx).leaderboard(run_id)
def deploy(self, model_id, locally=False):
return AugerModel(self.ctx).deploy(model_id, locally)
def predict(self, filename, model_id, threshold=None, locally=False):
return AugerModel(self.ctx).predict(
filename, model_id, threshold, locally)
def review(self):
pass
| 28.966667 | 73 | 0.686997 |
d659e68cfcf05a923d53159a475c96ec6a324eca | 1,035 | py | Python | src/pywink/devices/types.py | vickyg3/python-wink | 1b9f4acd22a6784023ae57c2ff0ef4e26b9a38f7 | [
"MIT"
] | 78 | 2017-08-19T03:46:13.000Z | 2020-02-19T04:29:45.000Z | src/pywink/devices/types.py | vickyg3/python-wink | 1b9f4acd22a6784023ae57c2ff0ef4e26b9a38f7 | [
"MIT"
] | 5 | 2017-08-21T16:33:08.000Z | 2018-06-21T18:37:18.000Z | src/pywink/devices/types.py | vickyg3/python-wink | 1b9f4acd22a6784023ae57c2ff0ef4e26b9a38f7 | [
"MIT"
] | 13 | 2017-08-19T16:46:08.000Z | 2018-11-05T23:11:34.000Z | """
These are all the devices we currently support.
"""
LIGHT_BULB = 'light_bulb'
BINARY_SWITCH = 'binary_switch'
SENSOR_POD = 'sensor_pod'
LOCK = 'lock'
EGGTRAY = 'eggtray'
GARAGE_DOOR = 'garage_door'
POWERSTRIP = 'powerstrip'
SHADE = 'shade'
SIREN = 'siren'
KEY = 'key'
PIGGY_BANK = 'piggy_bank'
SMOKE_DETECTOR = 'smoke_detector'
THERMOSTAT = 'thermostat'
HUB = 'hub'
FAN = 'fan'
DOOR_BELL = 'door_bell'
REMOTE = 'remote'
SPRINKLER = 'sprinkler'
BUTTON = 'button'
GANG = 'gang'
CAMERA = 'camera'
AIR_CONDITIONER = 'air_conditioner'
PROPANE_TANK = 'propane_tank'
ROBOT = 'robot'
SCENE = 'scene'
GROUP = 'group'
WATER_HEATER = 'water_heater'
ALL_SUPPORTED_DEVICES = [LIGHT_BULB, BINARY_SWITCH, SENSOR_POD, LOCK, EGGTRAY,
GARAGE_DOOR, POWERSTRIP, SHADE, SIREN, KEY, PIGGY_BANK,
SMOKE_DETECTOR, THERMOSTAT, HUB, FAN, DOOR_BELL, REMOTE,
SPRINKLER, BUTTON, GANG, CAMERA, AIR_CONDITIONER,
PROPANE_TANK, ROBOT, SCENE, GROUP, WATER_HEATER]
| 27.236842 | 81 | 0.672464 |
97acb3d7a73364f96b82348b0c2346850cef3867 | 251 | py | Python | honda/honda/doctype/supplier/supplier.py | umapulkurte/honda | 1c2ac3fb177e04bad7df8cdcbffcbd376591dc76 | [
"MIT"
] | null | null | null | honda/honda/doctype/supplier/supplier.py | umapulkurte/honda | 1c2ac3fb177e04bad7df8cdcbffcbd376591dc76 | [
"MIT"
] | null | null | null | honda/honda/doctype/supplier/supplier.py | umapulkurte/honda | 1c2ac3fb177e04bad7df8cdcbffcbd376591dc76 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Wayzon and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Supplier(Document):
pass
| 22.818182 | 49 | 0.776892 |
7785d70fcd21a59d4a0b7f842173c96513f3157f | 11,174 | py | Python | env/lib/python3.8/site-packages/plotly/graph_objs/scatter/_textfont.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | env/lib/python3.8/site-packages/plotly/graph_objs/scatter/_textfont.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | env/lib/python3.8/site-packages/plotly/graph_objs/scatter/_textfont.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatter"
_path_str = "scatter.textfont"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Textfont object
Sets the text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter.Textfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 33.758308 | 82 | 0.557186 |
3d3febad416c678088daf5d8889290e773722a23 | 1,059 | py | Python | tests/test_models/test_base_model.py | Jian137/mmediting-1 | e1ac6c93441ec96696d0b530f040b91b809015b6 | [
"Apache-2.0"
] | 1,884 | 2020-07-09T18:53:43.000Z | 2022-03-31T12:06:18.000Z | tests/test_models/test_base_model.py | Jian137/mmediting-1 | e1ac6c93441ec96696d0b530f040b91b809015b6 | [
"Apache-2.0"
] | 622 | 2020-07-09T18:52:27.000Z | 2022-03-31T14:41:09.000Z | tests/test_models/test_base_model.py | Jian137/mmediting-1 | e1ac6c93441ec96696d0b530f040b91b809015b6 | [
"Apache-2.0"
] | 361 | 2020-07-09T19:21:47.000Z | 2022-03-31T09:58:27.000Z | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest.mock import patch
import pytest
import torch
from mmedit.models import BaseModel
class TestBaseModel(unittest.TestCase):
@patch.multiple(BaseModel, __abstractmethods__=set())
def test_parse_losses(self):
self.base_model = BaseModel()
with pytest.raises(TypeError):
losses = dict(loss=0.5)
self.base_model.parse_losses(losses)
a_loss = [torch.randn(5, 5), torch.randn(5, 5)]
b_loss = torch.randn(5, 5)
losses = dict(a_loss=a_loss, b_loss=b_loss)
r_a_loss = sum(_loss.mean() for _loss in a_loss)
r_b_loss = b_loss.mean()
r_loss = [r_a_loss, r_b_loss]
r_loss = sum(r_loss)
loss, log_vars = self.base_model.parse_losses(losses)
assert r_loss == loss
assert set(log_vars.keys()) == set(['a_loss', 'b_loss', 'loss'])
assert log_vars['a_loss'] == r_a_loss
assert log_vars['b_loss'] == r_b_loss
assert log_vars['loss'] == r_loss
| 30.257143 | 72 | 0.644004 |
b21e385938d0b40830e7ecc917d77c7ec77f1fe5 | 4,909 | py | Python | examples/native_contacts_and_free_energy_of_folding/helical_contacts_fixed_tolerance/Q_vs_T_helical_fixed_tolerance.py | shirtsgroup/cg_openmm | f71dbd7393c83386a73c4cee4b059bd17a56f12a | [
"MIT"
] | 8 | 2020-05-26T23:07:13.000Z | 2021-12-20T21:42:22.000Z | examples/native_contacts_and_free_energy_of_folding/helical_contacts_fixed_tolerance/Q_vs_T_helical_fixed_tolerance.py | shirtsgroup/cg_openmm | f71dbd7393c83386a73c4cee4b059bd17a56f12a | [
"MIT"
] | 122 | 2019-11-01T18:39:32.000Z | 2022-03-21T19:55:54.000Z | examples/native_contacts_and_free_energy_of_folding/helical_contacts_fixed_tolerance/Q_vs_T_helical_fixed_tolerance.py | shirtsgroup/cg_openmm | f71dbd7393c83386a73c4cee4b059bd17a56f12a | [
"MIT"
] | 5 | 2019-10-04T14:25:16.000Z | 2021-04-09T05:45:48.000Z | import os
import pickle
import time
from cg_openmm.parameters.free_energy import *
from cg_openmm.parameters.secondary_structure import *
from simtk import unit
# Using a helical backbone native contact scheme with fixed native contact tolerance,
# calculate native contact fraction and free energy of folding as functions of T.
# Specify location of output .nc files
data_directory = '../../run_replica_exchange/output'
# Load in cgmodel:
cgmodel = pickle.load(open("../../run_replica_exchange/stored_cgmodel.pkl","rb"))
# Load in trajectory stats:
analysis_stats = pickle.load(open("../../run_replica_exchange/analysis_stats_discard_20ns.pkl","rb"))
# Specify native structure file:
native_structure_file = "../native_medoid_min.dcd"
# Create list of replica trajectory files:
dcd_file_list_rep = []
number_replicas = 12
for rep in range(number_replicas):
dcd_file_list_rep.append(f"{data_directory}/replica_{rep+1}.dcd")
# Get the native contact list and distances:
native_contact_list, native_contact_distances, opt_seq_spacing = get_helix_contacts(
cgmodel,
native_structure_file,
backbone_type_name='bb',
)
# Set a fixed native contact multiplicative tolerance (relative to native distances)
native_contact_tol = 1.3
print(f'Optimal native contacts are i to i+{opt_seq_spacing}')
print(f'Using fixed tolerance factor of {native_contact_tol}')
# Bootstrap native contacts to get Q_folded and uncertainties
temp_list, Q_values, Q_uncertainty, sigmoid_results_boot = bootstrap_native_contacts_expectation(
cgmodel,
dcd_file_list_rep,
native_contact_list,
native_contact_distances,
output_data=f"{data_directory}/output.nc",
frame_begin=analysis_stats["production_start"],
sample_spacing=analysis_stats["energy_decorrelation"],
native_contact_tol=native_contact_tol,
num_intermediate_states=3,
n_trial_boot=200,
conf_percent='sigma',
plotfile='native_contacts_boot200_fixed_tol1_3.pdf',
)
Q_folded = sigmoid_results_boot['Q_folded_value']
print(f'Q_folded: {Q_folded}')
#------------------------------------#
# Free energy of folding calculation #
#------------------------------------#
# Using optimized parameters, compute the free energy of folding
# Determine native contact fraction of current trajectories:
Q, Q_avg, Q_stderr, decorrelation_time = fraction_native_contacts(
cgmodel,
dcd_file_list_rep,
native_contact_list,
native_contact_distances,
frame_begin=analysis_stats["production_start"],
native_contact_tol=native_contact_tol,
)
plot_native_contact_timeseries(
Q,
frame_begin=analysis_stats["production_start"],
time_interval=1*unit.picosecond,
plot_per_page=3,
plotfile="Q_vs_time_fixed_tol1_3.pdf",
figure_title="Native contact fraction",
)
# Compute free energy/entropy/enthalpy of folding curves
G_unit = unit.kilojoule / unit.mole
S_unit = G_unit / unit.kelvin
H_unit = G_unit
# From bootstrapping:
(full_T_list_boot, deltaG_values_boot, deltaG_uncertainty_boot, \
deltaS_values_boot, deltaS_uncertainty_boot, \
deltaH_values_boot, deltaH_uncertainty_boot) = bootstrap_free_energy_folding(
Q,
Q_folded,
frame_begin=analysis_stats["production_start"],
sample_spacing=analysis_stats["energy_decorrelation"],
output_data=f"{data_directory}/output.nc",
num_intermediate_states=3,
n_trial_boot=200,
conf_percent='sigma',
plotfile_dir=os.getcwd(),
)
deltaG_values_boot = deltaG_values_boot['state0_state1'].value_in_unit(G_unit)
deltaG_uncertainty_boot = deltaG_uncertainty_boot['state0_state1'][1].value_in_unit(G_unit)
deltaS_values_boot = deltaS_values_boot['state0_state1'].value_in_unit(S_unit)
deltaS_uncertainty_boot = deltaS_uncertainty_boot['state0_state1'][1].value_in_unit(S_unit)
deltaH_values_boot = deltaH_values_boot['state0_state1'].value_in_unit(H_unit)
deltaH_uncertainty_boot = deltaH_uncertainty_boot['state0_state1'][1].value_in_unit(H_unit)
print(f"T (K), deltaG (kJ/mol), deltaG_uncertainty (kJ/mol)")
for i in range(len(full_T_list_boot)):
print(f"{full_T_list_boot[i].value_in_unit(unit.kelvin):>6.4f}, \
{deltaG_values_boot[i]:>6.8f}, \
{deltaG_uncertainty_boot[i]:>6.8f}")
print(f"\nT (K), deltaS (kJ/mol/K), deltaS_uncertainty (kJ/mol/K)")
for i in range(len(full_T_list_boot)):
print(f"{full_T_list_boot[i].value_in_unit(unit.kelvin):>6.4f}, \
{deltaS_values_boot[i]:>6.8f}, \
{deltaS_uncertainty_boot[i]:>6.8f}")
print(f"\nT (K), deltaH (kJ/mol), deltaH_uncertainty_boot (kJ/mol)")
for i in range(len(full_T_list_boot)):
print(f"{full_T_list_boot[i].value_in_unit(unit.kelvin):>6.4f}, \
{deltaH_values_boot[i]:>6.8f}, \
{deltaH_uncertainty_boot[i]:>6.8f}")
| 36.634328 | 105 | 0.73131 |
653a539f8f6965b29a8d488b1c704fa05fc12ccc | 665 | py | Python | manage.py | junydania/lostsalesapp | 116fe3cf24106d172b4e75d38a2ab97f60168125 | [
"MIT"
] | 2 | 2020-10-11T13:49:36.000Z | 2020-10-22T13:27:12.000Z | manage.py | heyrun/lostsales | 95cc9c388838b8de26b534fd53ff9a836ccb6f11 | [
"MIT"
] | null | null | null | manage.py | heyrun/lostsales | 95cc9c388838b8de26b534fd53ff9a836ccb6f11 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lostsales.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.913043 | 73 | 0.679699 |
ab405693bf86d46b1e3437bc2cf11a12593b2b78 | 1,086 | py | Python | pyaestro/bases.py | FrankD412/pyaestro | fb5797fe260c4285b40667a1e82337824c30f0ff | [
"MIT"
] | 2 | 2020-05-20T02:19:49.000Z | 2020-09-13T03:30:38.000Z | pyaestro/bases.py | FrankD412/pyaestro | fb5797fe260c4285b40667a1e82337824c30f0ff | [
"MIT"
] | 12 | 2020-09-24T03:43:00.000Z | 2021-08-31T13:53:40.000Z | pyaestro/bases.py | FrankD412/pyaestro | fb5797fe260c4285b40667a1e82337824c30f0ff | [
"MIT"
] | 1 | 2021-02-09T22:35:34.000Z | 2021-02-09T22:35:34.000Z | """Package-wide utility base classes that all submodules can use."""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Dict, Type, TypeVar
T = TypeVar("T", bound="Specifiable")
class Serializable(ABC):
"""An abstract API denoting a class' ability to be serialized."""
@abstractmethod
def serialize(self) -> dict:
"""Generate a serialized dictionary of the class instance.
Returns:
dict: A specification dictionary representing the object instance.
"""
raise NotImplementedError
class Specifiable(ABC):
"""An abstract API for classes that can be specified by a dictionary."""
@classmethod
@abstractmethod
def from_specification(cls: Type[T], specification: Dict) -> T:
"""Creates an instance of a class from a specification dictionary.
Args:
specification (Dict): A specification describing the new instance.
Returns:
Specifiable: An instance of the Specifiable class.
"""
raise NotImplementedError
| 27.846154 | 78 | 0.677716 |
632f1f9847559cdd90e4454db97eb1ec28816217 | 1,955 | py | Python | wcics/server/routes/admin/news_create.py | CS-Center/CS-Center | 3cd09f29d214406e6618fc67b9faf59a18f3f11b | [
"MIT"
] | null | null | null | wcics/server/routes/admin/news_create.py | CS-Center/CS-Center | 3cd09f29d214406e6618fc67b9faf59a18f3f11b | [
"MIT"
] | 6 | 2019-12-06T18:06:28.000Z | 2021-12-01T20:19:05.000Z | wcics/server/routes/admin/news_create.py | CS-Center/CS-Center | 3cd09f29d214406e6618fc67b9faf59a18f3f11b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from wcics import app, db, md
from wcics.auth.manage_user import assert_login, organization_page, user
from wcics.database.models import News, NewsAuthors, Users, Organizations, OrganizationUsers
from wcics.database.models.roles import NewsRoles
from wcics.database.utils import db_commit
from wcics.mail.utils import send_many
from wcics.server.forms import NewsSudoCreateForm, flash_form_errors
from wcics.utils.time import get_time
from wcics.utils.url import get_org_id
from flask import abort, flash, redirect, render_template
import time
@app.route("/organization/<org>/admin/news-create/", methods = ["GET", "POST"])
@organization_page
@assert_login
def serve_news_sudo_create_request(org):
if user.organization_roles.news <= NewsRoles.default:
abort(403)
form = NewsSudoCreateForm()
if form.validate_on_submit():
flash("Successfully created news item!", category = "SUCCESS")
news_sudo_create(form, org)
return redirect("/organization/%s/admin/news/" % org, code = 303)
else:
flash_form_errors(form)
return render_template("adminpages/news-create.html", sudo = True, active = "news", form = form)
def news_sudo_create(form, oid):
org = Organizations.query.filter_by(oid = oid).first()
article = News.add(oid = get_org_id(), nid = form.nid.data, title = form.title.data, body = form.body.data, time = get_time())
db_commit()
for uid in form.authors.data.split():
NewsAuthors.add(nid = article.id, uid = int(uid), oid = get_org_id())
if form.email.data:
co = Organizations.query.filter_by(id = get_org_id()).first()
send_many([
tup[0]
for tup in db.session.query(Users.email).\
join(OrganizationUsers).\
filter(OrganizationUsers.oid == co.id, Users.subscribed == True).all()
],
"%s Announcement - %s" % ("CS Center" if org.id == 1 else org.name, form.title.data), md.render(form.body.data))
db_commit() | 33.135593 | 128 | 0.715601 |
7326e602a4d4f1732af092db550378eeb996fa19 | 9,681 | bzl | Python | tools/osx/xcode_configure.bzl | juhalindfors/bazel-patches | d915827cd9db2fd5e81abda9cc3c63b2fe4663f7 | [
"Apache-2.0"
] | null | null | null | tools/osx/xcode_configure.bzl | juhalindfors/bazel-patches | d915827cd9db2fd5e81abda9cc3c63b2fe4663f7 | [
"Apache-2.0"
] | null | null | null | tools/osx/xcode_configure.bzl | juhalindfors/bazel-patches | d915827cd9db2fd5e81abda9cc3c63b2fe4663f7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Repository rule to generate host xcode_config and xcode_version targets.
The xcode_config and xcode_version targets are configured for xcodes/SDKs
installed on the local host.
"""
def _search_string(fullstring, prefix, suffix):
"""Returns the substring between two given substrings of a larger string.
Args:
fullstring: The larger string to search.
prefix: The substring that should occur directly before the returned string.
suffix: The substring that should occur direclty after the returned string.
Returns:
A string occurring in fullstring exactly prefixed by prefix, and exactly
terminated by suffix. For example, ("hello goodbye", "lo ", " bye") will
return "good". If there is no such string, returns the empty string.
"""
prefix_index = fullstring.find(prefix)
if (prefix_index < 0):
return ""
result_start_index = prefix_index + len(prefix)
suffix_index = fullstring.find(suffix, result_start_index)
if (suffix_index < 0):
return ""
return fullstring[result_start_index:suffix_index]
def _search_sdk_output(output, sdkname):
"""Returns the sdk version given xcodebuild stdout and an sdkname."""
return _search_string(output, "(%s" % sdkname, ")")
def _xcode_version_output(repository_ctx, name, version, aliases, developer_dir):
"""Returns a string containing an xcode_version build target."""
build_contents = ""
decorated_aliases = []
error_msg = ""
for alias in aliases:
decorated_aliases.append("'%s'" % alias)
xcodebuild_result = repository_ctx.execute(["xcrun", "xcodebuild", "-version", "-sdk"], 30,
{"DEVELOPER_DIR": developer_dir})
if (xcodebuild_result.return_code != 0):
error_msg = (
"Invoking xcodebuild failed, developer dir: {devdir} ," +
"return code {code}, stderr: {err}, stdout: {out}").format(
devdir=developer_dir,
code=xcodebuild_result.return_code,
err=xcodebuild_result.stderr,
out=xcodebuild_result.stdout)
ios_sdk_version = _search_sdk_output(xcodebuild_result.stdout, "iphoneos")
tvos_sdk_version = _search_sdk_output(xcodebuild_result.stdout, "appletvos")
macosx_sdk_version = _search_sdk_output(xcodebuild_result.stdout, "macosx")
watchos_sdk_version = _search_sdk_output(xcodebuild_result.stdout, "watchos")
build_contents += "xcode_version(\n name = '%s'," % name
build_contents += "\n version = '%s'," % version
if aliases:
build_contents += "\n aliases = [%s]," % " ,".join(decorated_aliases)
if ios_sdk_version:
build_contents += "\n default_ios_sdk_version = '%s'," % ios_sdk_version
if tvos_sdk_version:
build_contents += "\n default_tvos_sdk_version = '%s'," % tvos_sdk_version
if macosx_sdk_version:
build_contents += "\n default_macosx_sdk_version = '%s'," % macosx_sdk_version
if watchos_sdk_version:
build_contents += "\n default_watchos_sdk_version = '%s'," % watchos_sdk_version
build_contents += "\n)\n"
if error_msg:
build_contents += "\n# Error: " + error_msg.replace("\n", " ") + "\n"
print(error_msg)
return build_contents
VERSION_CONFIG_STUB = "xcode_config(name = 'host_xcodes')"
def run_xcode_locator(repository_ctx, xcode_locator_src_label):
"""Generates xcode-locator from source and runs it.
Builds xcode-locator in the current repository directory.
Returns the standard output of running xcode-locator with -v, which will
return information about locally installed Xcode toolchains and the versions
they are associated with.
This should only be invoked on a darwin OS, as xcode-locator cannot be built
otherwise.
Args:
repository_ctx: The repository context.
xcode_locator_src_label: The label of the source file for xcode-locator.
Returns:
A 2-tuple containing:
output: A list representing installed xcode toolchain information. Each
element of the list is a struct containing information for one installed
toolchain. This is instead None if there was an error building or
running xcode-locator.
err: An error string describing the error that occurred when attempting
to build and run xcode-locator, or None if the run was successful.
"""
xcodeloc_src_path = str(repository_ctx.path(xcode_locator_src_label))
xcrun_result = repository_ctx.execute(["env", "-i", "xcrun", "clang", "-fobjc-arc", "-framework",
"CoreServices", "-framework", "Foundation", "-o",
"xcode-locator-bin", xcodeloc_src_path], 30)
if (xcrun_result.return_code != 0):
error_msg = (
"Generating xcode-locator-bin failed, " +
"return code {code}, stderr: {err}, stdout: {out}").format(
code=xcrun_result.return_code,
err=xcrun_result.stderr,
out=xcrun_result.stdout)
return (None, error_msg.replace("\n", " "))
xcode_locator_result = repository_ctx.execute(["./xcode-locator-bin", "-v"], 30)
if (xcode_locator_result.return_code != 0):
error_msg = (
"Invoking xcode-locator failed, " +
"return code {code}, stderr: {err}, stdout: {out}").format(
code=xcode_locator_result.return_code,
err=xcode_locator_result.stderr,
out=xcode_locator_result.stdout)
return (None, error_msg.replace("\n", " "))
xcode_toolchains = []
# xcode_dump is comprised of newlines with different installed xcode versions,
# each line of the form <version>:<comma_separated_aliases>:<developer_dir>.
xcode_dump = xcode_locator_result.stdout
for xcodeversion in xcode_dump.split("\n"):
if ":" in xcodeversion:
infosplit = xcodeversion.split(":")
toolchain = struct(
version = infosplit[0],
aliases = infosplit[1].split(","),
developer_dir = infosplit[2]
)
xcode_toolchains.append(toolchain)
return (xcode_toolchains, None)
def _darwin_build_file(repository_ctx):
"""Evaluates local system state to create xcode_config and xcode_version targets."""
xcodebuild_result = repository_ctx.execute(["env", "-i", "xcrun", "xcodebuild", "-version"], 30)
# "xcodebuild -version" failing may be indicative of no versions of xcode
# installed, which is an acceptable machine configuration to have for using
# bazel. Thus no print warning should be emitted here.
if (xcodebuild_result.return_code != 0):
error_msg = (
"Running xcodebuild -version failed, " +
"return code {code}, stderr: {err}, stdout: {out}").format(
code=xcodebuild_result.return_code,
err=xcodebuild_result.stderr,
out=xcodebuild_result.stdout)
return VERSION_CONFIG_STUB + "\n# Error: " + error_msg.replace("\n", " ") + "\n"
(toolchains, xcodeloc_err) = run_xcode_locator(repository_ctx,
Label(repository_ctx.attr.xcode_locator))
if xcodeloc_err:
return VERSION_CONFIG_STUB + "\n# Error: " + xcodeloc_err + "\n"
default_xcode_version = _search_string(xcodebuild_result.stdout, "Xcode ", "\n")
default_xcode_target = ""
target_names = []
buildcontents = ""
for toolchain in toolchains:
version = toolchain.version
aliases = toolchain.aliases
developer_dir = toolchain.developer_dir
target_name = "version%s" % version.replace(".", "_")
buildcontents += _xcode_version_output(repository_ctx, target_name, version, aliases, developer_dir)
target_names.append("':%s'" % target_name)
if (version == default_xcode_version or default_xcode_version in aliases):
default_xcode_target = target_name
buildcontents += "xcode_config(name = 'host_xcodes',"
if target_names:
buildcontents += "\n versions = [%s]," % ", ".join(target_names)
if default_xcode_target:
buildcontents += "\n default = ':%s'," % default_xcode_target
buildcontents += "\n)\n"
return buildcontents
def _impl(repository_ctx):
"""Implementation for the local_config_xcode repository rule.
Generates a BUILD file containing a root xcode_config target named 'host_xcodes',
which points to an xcode_version target for each version of xcode installed on
the local host machine. If no versions of xcode are present on the machine
(for instance, if this is a non-darwin OS), creates a stub target.
Args:
repository_ctx: The repository context.
"""
os_name = repository_ctx.os.name.lower()
build_contents = "package(default_visibility = ['//visibility:public'])\n\n"
if (os_name.startswith("mac os")):
build_contents += _darwin_build_file(repository_ctx)
else:
build_contents += VERSION_CONFIG_STUB
repository_ctx.file("BUILD", build_contents)
xcode_autoconf = repository_rule(
implementation=_impl,
local=True,
attrs={
"xcode_locator": attr.string(),
}
)
def xcode_configure(xcode_locator_label):
"""Generates a repository containing host xcode version information."""
xcode_autoconf(
name="local_config_xcode",
xcode_locator=xcode_locator_label
)
| 41.549356 | 104 | 0.700754 |
44e1829e2412a7e7dd1d780c6240e8f47abcdd83 | 4,625 | py | Python | tests/test_db_user_collection_perms.py | rjw57/componentsdb | 7e5fd96d3afbbcde09d2f7fba1d6c86975e41272 | [
"MIT"
] | null | null | null | tests/test_db_user_collection_perms.py | rjw57/componentsdb | 7e5fd96d3afbbcde09d2f7fba1d6c86975e41272 | [
"MIT"
] | null | null | null | tests/test_db_user_collection_perms.py | rjw57/componentsdb | 7e5fd96d3afbbcde09d2f7fba1d6c86975e41272 | [
"MIT"
] | null | null | null | # pylint: disable=redefined-outer-name
import pytest
from componentsdb.model import (
Collection, User, UserCollectionPermission, Permission,
)
@pytest.fixture
def collections(mixer):
return mixer.cycle(5).blend(Collection, name=mixer.FAKE)
@pytest.fixture
def users(mixer):
return mixer.cycle(5).blend(User, name=mixer.FAKE)
@pytest.fixture
def perms(mixer, users, collections):
# pylint: disable=unused-argument
perms = mixer.cycle(15).blend(
UserCollectionPermission, user=mixer.SELECT, collection=mixer.SELECT,
)
return perms
def test_has_permission(user, collection):
# User should have no permissions on the new collection
assert not collection.has_permission(user, Permission.CREATE)
assert not collection.has_permission(user, Permission.READ)
assert not collection.has_permission(user, Permission.UPDATE)
assert not collection.has_permission(user, Permission.DELETE)
def test_add_permission(user, collection):
# User cannot read or update initially
assert not collection.has_permission(user, Permission.READ)
assert not collection.has_permission(user, Permission.UPDATE)
# Allow user permission to read
collection.add_permission(user, Permission.READ)
# User can now read but still not update
assert collection.has_permission(user, Permission.READ)
assert not collection.has_permission(user, Permission.UPDATE)
def test_add_all_permissions(user, collection):
# User cannot do anything initially
for p in Permission:
assert not collection.has_permission(user, p)
# Allow user permission to do anything
collection.add_all_permissions(user)
# Check permissions
for p in Permission:
assert collection.has_permission(user, p)
def test_remove_permission(user, collection):
# Allow user permission to read
collection.add_permission(user, Permission.READ)
# User can now read
assert collection.has_permission(user, Permission.READ)
# Revoke permission
collection.remove_permission(user, Permission.READ)
# User cannot now read
assert not collection.has_permission(user, Permission.READ)
def test_remove_non_existant_permission(user, collection):
"""It should always be possible to remove a permission a user does not
have."""
assert not collection.has_permission(user, Permission.READ)
collection.remove_permission(user, Permission.READ)
assert not collection.has_permission(user, Permission.READ)
def test_add_duplicate_permission(user, collection):
"""It should always be possible to add a permission a user already has."""
assert not collection.has_permission(user, Permission.READ)
collection.add_permission(user, Permission.READ)
assert collection.has_permission(user, Permission.READ)
collection.add_permission(user, Permission.READ)
assert collection.has_permission(user, Permission.READ)
def test_removes_all_permissions(user, collection):
"""Duplicate permissions are removed."""
assert not collection.has_permission(user, Permission.READ)
collection.add_permission(user, Permission.READ)
collection.add_permission(user, Permission.READ)
assert collection.has_permission(user, Permission.READ)
collection.remove_permission(user, Permission.READ)
assert not collection.has_permission(user, Permission.READ)
def test_can_create(current_user, collection):
"""Test can_create helper."""
assert not collection.can_create
collection.add_permission(current_user, Permission.CREATE)
assert collection.can_create
collection.remove_permission(current_user, Permission.CREATE)
assert not collection.can_create
def test_can_read(current_user, collection):
"""Test can_read helper."""
assert not collection.can_read
collection.add_permission(current_user, Permission.READ)
assert collection.can_read
collection.remove_permission(current_user, Permission.READ)
assert not collection.can_read
def test_can_update(current_user, collection):
"""Test can_update helper."""
assert not collection.can_update
collection.add_permission(current_user, Permission.UPDATE)
assert collection.can_update
collection.remove_permission(current_user, Permission.UPDATE)
assert not collection.can_update
def test_can_delete(current_user, collection):
"""Test can_delete helper."""
assert not collection.can_delete
collection.add_permission(current_user, Permission.DELETE)
assert collection.can_delete
collection.remove_permission(current_user, Permission.DELETE)
assert not collection.can_delete
| 37 | 78 | 0.771676 |
25e89b1973b35e149814364f458a0c75267a185b | 269 | py | Python | z42/z42/web/boot/index.py | jumploop/collection_python | f66f18dc5ae50fce95679e0f4aee5e28b2543432 | [
"MIT"
] | null | null | null | z42/z42/web/boot/index.py | jumploop/collection_python | f66f18dc5ae50fce95679e0f4aee5e28b2543432 | [
"MIT"
] | null | null | null | z42/z42/web/boot/index.py | jumploop/collection_python | f66f18dc5ae50fce95679e0f4aee5e28b2543432 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#coding:utf-8
import _env
from z42.web.mongo import mongo
import sys
APPNAME = sys.argv[1]
__import__("zapp.%s.view._url"%APPNAME)
for k,v in mongo._registered_documents.iteritems():
print "indexing", k
v.generate_index(v._collection)
| 19.214286 | 51 | 0.736059 |
6143683a2ef845243e153f3136601f1d9aac409a | 3,527 | py | Python | prepare_data/loader.py | jimeffry/MTCNN-TF | 4d41c5fd2dc13008d39b868aa2e921a7ff731e10 | [
"MIT"
] | 8 | 2018-08-15T11:07:03.000Z | 2019-12-05T10:05:41.000Z | prepare_data/loader.py | jimeffry/MTCNN-TF | 4d41c5fd2dc13008d39b868aa2e921a7ff731e10 | [
"MIT"
] | 2 | 2018-12-04T07:16:02.000Z | 2019-11-04T09:42:03.000Z | prepare_data/loader.py | jimeffry/MTCNN-TF | 4d41c5fd2dc13008d39b868aa2e921a7ff731e10 | [
"MIT"
] | 6 | 2018-08-07T01:09:12.000Z | 2021-08-13T07:19:47.000Z | import numpy as np
import minibatch
import sys
import cv2
sys.path.append("../")
from train_models.MTCNN_config import config
class TestLoader:
#imdb image_path(list)
def __init__(self, imdb, batch_size=1, shuffle=False):
self.imdb = imdb
self.batch_size = batch_size
self.shuffle = shuffle
self.size = len(imdb)#num of data
#self.index = np.arange(self.size)
self.cur = 0
self.data = None
self.label = None
self.img_path = None
self.reset()
self.get_batch()
def reset(self):
self.cur = 0
if self.shuffle:
#shuffle test image
np.random.shuffle(self.imdb)
def iter_next(self):
return self.cur + self.batch_size <= self.size
#realize __iter__() and next()--->iterator
#return iter object
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return self.data
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
imdb = self.imdb[self.cur]
'''
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
#picked image
imdb = [self.imdb[self.index[i]] for i in range(cur_from, cur_to)]
#print(imdb)
'''
#print type(imdb)
#print len(imdb)
#assert len(imdb) == 1, "Single batch only"
#print("image path :",imdb)
self.img_path = imdb
im = cv2.imread(imdb)
self.data = im
class ImageLoader:
def __init__(self, imdb, im_size, batch_size=config.BATCH_SIZE, shuffle=False):
self.imdb = imdb
self.batch_size = batch_size
self.im_size = im_size
self.shuffle = shuffle
self.cur = 0
self.size = len(imdb)
self.index = np.arange(self.size)
self.num_classes = 2
self.batch = None
self.data = None
self.label = None
self.label_names = ['label', 'bbox_target']
self.reset()
self.get_batch()
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return self.data, self.label
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
imdb = [self.imdb[self.index[i]] for i in range(cur_from, cur_to)]
data, label = minibatch.get_minibatch(imdb, self.num_classes, self.im_size)
self.data = data['data']
self.label = [label[name] for name in self.label_names]
| 26.125926 | 83 | 0.570456 |
24802cb0c932c4638c57f4d596209304495d2fda | 889 | py | Python | servicesAlert/function.py | afourdraine/AmbariMonitor | 2ebab38ef200e25ff2c22ddd4803c84060b67457 | [
"MIT"
] | 1 | 2019-01-14T11:33:32.000Z | 2019-01-14T11:33:32.000Z | servicesAlert/function.py | afourdraine/AmbariMonitor | 2ebab38ef200e25ff2c22ddd4803c84060b67457 | [
"MIT"
] | null | null | null | servicesAlert/function.py | afourdraine/AmbariMonitor | 2ebab38ef200e25ff2c22ddd4803c84060b67457 | [
"MIT"
] | null | null | null | import requests
import json
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def getServicesAlert(nodeName, port, clusterName, auth_values, https, verify):
if https == "n":
url = "http://" + nodeName + ":" + port + "/api/v1/clusters/" + clusterName + "/alerts"
elif https == "y":
url = "https://" + nodeName + ":" + port + "/api/v1/clusters/" + clusterName + "/alerts"
else:
exit("[servicesAlert] could not run because of : bad value provided. Please check the https var")
querystring = {"fields": "*"}
headers = {
'cache-control': "no-cache",
'Postman-Token': "7eb64f13-6076-4c96-a5c3-d0de174b5df9"
}
response = requests.request("GET", url, headers=headers, params=querystring, auth=auth_values, verify=verify)
result = json.loads(response.text)
return result
| 26.147059 | 113 | 0.64342 |
592bf40a4d37562ea6612cd71ee9a8c3776fdf87 | 4,313 | py | Python | trainer/srresnet_task.py | jason-zl190/sisr_medical | 570d638a2e5a3301176af671d6b233deb507564f | [
"Apache-2.0"
] | 1 | 2020-12-08T11:23:28.000Z | 2020-12-08T11:23:28.000Z | trainer/srresnet_task.py | jason-zl190/sisr_ct | 570d638a2e5a3301176af671d6b233deb507564f | [
"Apache-2.0"
] | null | null | null | trainer/srresnet_task.py | jason-zl190/sisr_ct | 570d638a2e5a3301176af671d6b233deb507564f | [
"Apache-2.0"
] | 1 | 2020-04-13T06:56:19.000Z | 2020-04-13T06:56:19.000Z | """
Copyright Zisheng Liang 2019
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tensorflow as tf
from trainer import utils, callbacks, config
from trainer.datasets.sisr_ct import deeplesion_lr_hr_pair
from trainer.models.sisr_ct import MySRResNet
import os
from pathlib import Path
_DATA_DIR = '/datacommons/plusds/team10-zl190/Spring20/tensorflow_datasets' # data_dir
# dataset
train_dataset, train_count = deeplesion_lr_hr_pair(split='train',
size=(config.im_h, config.im_w, 1),
downsampling_factor=config.upsampling_rate,
batch_size=config.batch_size,
augment=False,
data_dir = config.data_dir)
validation_dataset, validation_count = deeplesion_lr_hr_pair(split='validation',
size=(config.im_h, config.im_w, 1),
downsampling_factor=config.upsampling_rate,
batch_size=config.batch_size,
augment=False,
data_dir = config.data_dir)
# model
if config.g_weight:
model = tf.keras.models.load_model(config.g_weight, custom_objects={"ssim": utils.ssim, "psnr": utils.psnr})
else:
model = MySRResNet(shape=(config.im_h, config.im_w, 1), upsampling_rate=config.upsampling_rate)()
optimizer = tf.keras.optimizers.Adam(learning_rate=config.lr, beta_1=0.9)
loss = ['mse']
metrics = [utils.ssim, utils.psnr]
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
# callbacks -- save model
Path(config.model_dir).mkdir(parents=True, exist_ok=True)
model_path = os.path.join(config.model_dir, 'model.{epoch:02d}-{val_loss:.5f}.h5')
saving = tf.keras.callbacks.ModelCheckpoint(model_path,
monitor='val_loss',
verbose=1,
save_freq='epoch',
save_best_only=True,
save_weights_only=True)
# callbacks -- log training
write_freq = int(train_count / config.batch_size / 10)
tensorboard = tf.keras.callbacks.TensorBoard(log_dir=config.job_dir,
write_graph=True,
update_freq=write_freq)
image_gen_val = callbacks.GenerateImages(model,
validation_dataset,
config.job_dir,
interval=write_freq,
postfix='val')
image_gen = callbacks.GenerateImages(model,
train_dataset,
config.job_dir,
interval=write_freq,
postfix='train')
# callbacks -- start tensorboard
start_tensorboard = callbacks.StartTensorBoard(config.job_dir)
# callbacks -- log code and trained models
log_code = callbacks.LogCode(config.job_dir, './trainer')
copy_keras = callbacks.CopyKerasModel(config.model_dir, config.job_dir)
model.fit(train_dataset,
steps_per_epoch=int(train_count / config.batch_size),
validation_data=validation_dataset,
validation_steps=int(validation_count / config.batch_size),
epochs=config.num_epochs,
callbacks=[
saving, tensorboard, start_tensorboard, log_code,
copy_keras, image_gen, image_gen_val
])
| 45.4 | 110 | 0.572687 |
050449ef9822feeac7bb72dfde0e3a025067a58b | 6,603 | py | Python | bindings/python/dbr_module/dbr.py | kant/data-broker | 0a1efd3cf1fac2c5eb17c12279609c444fb34228 | [
"Apache-2.0"
] | null | null | null | bindings/python/dbr_module/dbr.py | kant/data-broker | 0a1efd3cf1fac2c5eb17c12279609c444fb34228 | [
"Apache-2.0"
] | null | null | null | bindings/python/dbr_module/dbr.py | kant/data-broker | 0a1efd3cf1fac2c5eb17c12279609c444fb34228 | [
"Apache-2.0"
] | null | null | null | #
# Copyright © 2018 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from _dbr_interface import ffi
libtransport = ffi.dlopen("libdbbe_transport.so", ffi.RTLD_GLOBAL|ffi.RTLD_NOW)
libbackend = ffi.dlopen("libdbbe_redis.so", ffi.RTLD_GLOBAL|ffi.RTLD_NOW)
libdatabroker = ffi.dlopen("libdatabroker.so")
import _cffi_backend
from dbr_module.dbr_errorcodes import Errors
ERRORTABLE = Errors()
# Copy for direct access
DBR_SUCCESS = ERRORTABLE.DBR_SUCCESS # no error, clean result, operation successful
DBR_ERR_GENERIC = ERRORTABLE.DBR_ERR_GENERIC # a general or unknown error has occurred
DBR_ERR_INVALID = ERRORTABLE.DBR_ERR_INVALID # an invalid parameter was passed into a function or other general error
DBR_ERR_HANDLE = ERRORTABLE.DBR_ERR_HANDLE # an invalid handle was encountered
DBR_ERR_INPROGRESS = ERRORTABLE.DBR_ERR_INPROGRESS # a request is still in progress, check again later
DBR_ERR_TIMEOUT = ERRORTABLE.DBR_ERR_TIMEOUT # a timeout occurred
DBR_ERR_UBUFFER = ERRORTABLE.DBR_ERR_UBUFFER # provided user buffer problem (too small, not available)
DBR_ERR_UNAVAIL = ERRORTABLE.DBR_ERR_UNAVAIL # the requested tuple or namespace is not available in the backing storage
DBR_ERR_EXISTS = ERRORTABLE.DBR_ERR_EXISTS # Entry already exists
DBR_ERR_NSBUSY = ERRORTABLE.DBR_ERR_NSBUSY # there are still clients attached to a namespace
DBR_ERR_NSINVAL = ERRORTABLE.DBR_ERR_NSINVAL # invalid name space
DBR_ERR_NOMEMORY = ERRORTABLE.DBR_ERR_NOMEMORY # the amount of memory or storage was insufficient to
DBR_ERR_TAGERROR = ERRORTABLE.DBR_ERR_TAGERROR # the returned tag is an error
DBR_ERR_NOFILE = ERRORTABLE.DBR_ERR_NOFILE # a file was not found
DBR_ERR_NOAUTH = ERRORTABLE.DBR_ERR_NOAUTH # access authorization required or failed
DBR_ERR_NOCONNECT = ERRORTABLE.DBR_ERR_NOCONNECT # connection to a storage backend failed
DBR_ERR_CANCELLED = ERRORTABLE.DBR_ERR_CANCELLED # operation was cancelled
DBR_ERR_NOTIMPL = ERRORTABLE.DBR_ERR_NOTIMPL # operation not implemented
DBR_ERR_INVALIDOP = ERRORTABLE.DBR_ERR_INVALIDOP # invalid operation
DBR_ERR_BE_POST = ERRORTABLE.DBR_ERR_BE_POST # posting request to back-end failed
DBR_ERR_BE_GENERAL = ERRORTABLE.DBR_ERR_BE_GENERAL # Unspecified back-end error
DBR_ERR_MAXERROR = ERRORTABLE.DBR_ERR_MAXERROR
# Tuple persist level
DBR_PERST_VOLATILE_SIMPLE = libdatabroker.DBR_PERST_VOLATILE_SIMPLE
DBR_PERST_VOLATILE_FT = libdatabroker.DBR_PERST_VOLATILE_FT
DBR_PERST_TEMPORARY_SIMPLE = libdatabroker.DBR_PERST_TEMPORARY_SIMPLE
DBR_PERST_TEMPORARY_FT = libdatabroker.DBR_PERST_TEMPORARY_FT
DBR_PERST_PERMANENT_SIMPLE = libdatabroker.DBR_PERST_PERMANENT_SIMPLE
DBR_PERST_PERMANENT_FT = libdatabroker.DBR_PERST_PERMANENT_FT
DBR_PERST_MAX = libdatabroker.DBR_PERST_MAX
DBR_FLAGS_NONE = libdatabroker.DBR_FLAGS_NONE
DBR_FLAGS_NOWAIT = libdatabroker.DBR_FLAGS_NOWAIT
DBR_FLAGS_MAX = libdatabroker.DBR_FLAGS_MAX
# Mask
DBR_STATE_MASK_ALL = libdatabroker.DBR_STATE_MASK_ALL
def getErrorCode(error_code):
return ERRORTABLE.getErrorCode(error_code)
def getErrorMessage(error_code):
if error_code < DBR_ERR_MAXERROR:
return ERRORTABLE.getErrorMessage(error_code)
return "Unknown Error"
def createBuf(buftype, bufsize):
retval = ffi.buffer(ffi.new(buftype, bufsize))
return retval
def dbrCreate(dbrname, level, groups):
dbr_hdl = libdatabroker.dbrCreate(dbrname, level, groups)
return dbr_hdl
def dbrAttach(dbr_name):
dbr_hdl = libdatabroker.dbrAttach(dbr_name)
return dbr_hdl
def dbrDelete(dbr_name):
retval = libdatabroker.dbrDelete(dbr_name)
return retval
def dbrDetach(dbr_handle):
retval = libdatabroker.dbrDetach(dbr_handle)
return retval
def dbrQuery(dbr_handle, dbr_state, state_mask):
retval = libdatabroker.dbrQuery(dbr_handle, dbr_state, state_mask)
return retval
def dbrAddUnits(dbr_handle, units):
retval = libdatabroker.dbrAddUnits(dbr_handle, units)
return retval
def dbrRemoveUnits(dbr_handle, units):
retval = libdatabroker.dbrRemoveUnits(dbr_handle, unitss)
return retval
def dbrPut(dbr_hdl, tuple_val, tuple_name, group):
retval = libdatabroker.dbrPut(dbr_hdl, tuple_val, len(tuple_val), tuple_name, group)
return retval
def dbrRead(dbr_hdl, out_buffer, buffer_size, tuple_name, match_template, group, flag):
retval = libdatabroker.dbrRead(dbr_hdl, ffi.from_buffer(out_buffer), buffer_size, tuple_name, match_template, group, flag)
return retval
def dbrGet(dbr_hdl, out_buffer, buffer_size, tuple_name, match_template, group, flag):
retval = libdatabroker.dbrGet(dbr_hdl, ffi.from_buffer(out_buffer), buffer_size, tuple_name, match_template, group, flag)
return retval
def dbrReadA(dbr_hdl, out_buffer, buffer_size, tuple_name, match_template, group):
tag = libdatabroker.dbrReadA(dbr_hdl, ffi.from_buffer(out_buffer), buffer_size, tuple_name, match_template, group)
return tag
def dbrPutA(dbr_hdl, tuple_val, tuple_name, group):
tag = libdatabroker.dbrPutA(dbr_hdl, tuple_val, len(tuple_val), tuple_name, group)
return tag
def dbrGetA(dbr_hdl, out_buffer, buffer_size, tuple_name, match_template, group):
tag = libdatabroker.dbrGetA(dbr_hdl, ffi.from_buffer(out_buffer), buffer_size, tuple_name, match_template, group)
return tag
def dbrTestKey(dbr_hdl, tuple_name):
retval = libdatabroker.dbrTestKey(dbr_hdl, tuple_name)
return retval
def dbrMove(src_DBRHandle, src_group, tuple_name, match_template, dest_DBRHandle, dest_group):
retval = libdatabroker.dbrMove(src_DBRHandle, src_group, tuple_name, match_template, dest_DBRHandle, dest_group)
return retval
def dbrRemove(dbr_hdl, group, tuple_name, match_template):
retval = libdatabroker.dbrRemove(dbr_hdl, group, tuple_name, match_template)
return retval
def dbrTest(tag):
retval = libdatabroker.dbrTest(tag)
return retval
def dbrCancel(tag):
retval = libdatabroker.dbrCancel(tag)
return retval
def dbrEval(dbr_hdl, tuple_val, tuple_name, group, fn_ptr):
retval = libdatabroker.dbrEval(dbr_hdl, tuple_val, len(tuple_val), tuple_name, group, fn_ptr)
return retval
| 42.6 | 126 | 0.802211 |
07331e15506ed33cd15a399ade517bed668cc9a6 | 68,233 | py | Python | python/ccxt/async_support/delta.py | ChristianCoenen/ccxt | 261e3549b4cfe9fa4ecf1a00feb0450337eab686 | [
"MIT"
] | null | null | null | python/ccxt/async_support/delta.py | ChristianCoenen/ccxt | 261e3549b4cfe9fa4ecf1a00feb0450337eab686 | [
"MIT"
] | null | null | null | python/ccxt/async_support/delta.py | ChristianCoenen/ccxt | 261e3549b4cfe9fa4ecf1a00feb0450337eab686 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TICK_SIZE
class delta(Exchange):
def describe(self):
return self.deep_extend(super(delta, self).describe(), {
'id': 'delta',
'name': 'Delta Exchange',
'countries': ['VC'], # Saint Vincent and the Grenadines
'rateLimit': 300,
'version': 'v2',
# new metainfo interface
'has': {
'CORS': None,
'spot': True,
'margin': None,
'swap': None,
'future': None,
'option': None,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDeposit': None,
'fetchDepositAddress': True,
'fetchDeposits': None,
'fetchLedger': True,
'fetchLeverageTiers': False, # An infinite number of tiers, see examples/js/delta-maintenance-margin-rate-max-leverage.js
'fetchMarketLeverageTiers': False,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrderBook': True,
'fetchPosition': True,
'fetchPositions': True,
'fetchStatus': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTransfer': None,
'fetchTransfers': None,
'fetchWithdrawal': None,
'fetchWithdrawals': None,
'transfer': False,
'withdraw': False,
},
'timeframes': {
'1m': '1m',
'3m': '3m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'1d': '1d',
'7d': '7d',
'1w': '1w',
'2w': '2w',
'1M': '30d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/99450025-3be60a00-2931-11eb-9302-f4fd8d8589aa.jpg',
'test': {
'public': 'https://testnet-api.delta.exchange',
'private': 'https://testnet-api.delta.exchange',
},
'api': {
'public': 'https://api.delta.exchange',
'private': 'https://api.delta.exchange',
},
'www': 'https://www.delta.exchange',
'doc': [
'https://docs.delta.exchange',
],
'fees': 'https://www.delta.exchange/fees',
'referral': 'https://www.delta.exchange/app/signup/?code=IULYNB',
},
'api': {
'public': {
'get': [
'assets',
'settings',
'indices',
'products',
'tickers',
'tickers/{symbol}',
'l2orderbook/{symbol}',
'trades/{symbol}',
'history/candles',
'history/sparklines',
],
},
'private': {
'get': [
'orders',
'orders/leverage',
'positions',
'positions/margined',
'orders/history',
'fills',
'fills/history/download/csv',
'wallet/balances',
'wallet/transactions',
'wallet/transactions/download',
'deposits/address',
],
'post': [
'orders',
'orders/batch',
'orders/leverage',
'positions/change_margin',
],
'put': [
'orders',
'orders/batch',
],
'delete': [
'orders',
'orders/all',
'orders/batch',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.15 / 100,
'maker': 0.10 / 100,
'tiers': {
'taker': [
[0, 0.15 / 100],
[100, 0.13 / 100],
[250, 0.13 / 100],
[1000, 0.1 / 100],
[5000, 0.09 / 100],
[10000, 0.075 / 100],
[20000, 0.065 / 100],
],
'maker': [
[0, 0.1 / 100],
[100, 0.1 / 100],
[250, 0.09 / 100],
[1000, 0.075 / 100],
[5000, 0.06 / 100],
[10000, 0.05 / 100],
[20000, 0.05 / 100],
],
},
},
},
'precisionMode': TICK_SIZE,
'requiredCredentials': {
'apiKey': True,
'secret': False,
},
'exceptions': {
'exact': {
# Margin required to place order with selected leverage and quantity is insufficient.
'insufficient_margin': InsufficientFunds, # {"error":{"code":"insufficient_margin","context":{"available_balance":"0.000000000000000000","required_additional_balance":"1.618626000000000000000000000"}},"success":false}
'order_size_exceed_available': InvalidOrder, # The order book doesn't have sufficient liquidity, hence the order couldnt be filled, for example, ioc orders
'risk_limits_breached': BadRequest, # orders couldn't be placed as it will breach allowed risk limits.
'invalid_contract': BadSymbol, # The contract/product is either doesn't exist or has already expired.
'immediate_liquidation': InvalidOrder, # Order will cause immediate liquidation.
'out_of_bankruptcy': InvalidOrder, # Order prices are out of position bankruptcy limits.
'self_matching_disrupted_post_only': InvalidOrder, # Self matching is not allowed during auction.
'immediate_execution_post_only': InvalidOrder, # orders couldn't be placed as it includes post only orders which will be immediately executed
'bad_schema': BadRequest, # {"error":{"code":"bad_schema","context":{"schema_errors":[{"code":"validation_error","message":"id is required","param":""}]}},"success":false}
'invalid_api_key': AuthenticationError, # {"success":false,"error":{"code":"invalid_api_key"}}
'invalid_signature': AuthenticationError, # {"success":false,"error":{"code":"invalid_signature"}}
'open_order_not_found': OrderNotFound, # {"error":{"code":"open_order_not_found"},"success":false}
'unavailable': ExchangeNotAvailable, # {"error":{"code":"unavailable"},"success":false}
},
'broad': {
},
},
})
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the delta api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetSettings(params)
# full response sample under `fetchStatus`
result = self.safe_value(response, 'result', {})
return self.safe_integer_product(result, 'server_time', 0.001)
async def fetch_status(self, params={}):
"""
the latest known information on the availability of the exchange API
:param dict params: extra parameters specific to the delta api endpoint
:returns dict: a `status structure <https://docs.ccxt.com/en/latest/manual.html#exchange-status-structure>`
"""
response = await self.publicGetSettings(params)
#
# {
# "result": {
# "deto_liquidity_mining_daily_reward": "40775",
# "deto_msp": "1.0",
# "deto_staking_daily_reward": "23764.08",
# "enabled_wallets": [
# "BTC",
# ...
# ],
# "portfolio_margin_params": {
# "enabled_portfolios": {
# ".DEAVAXUSDT": {
# "asset_id": 5,
# "futures_contingency_margin_percent": "1",
# "interest_rate": "0",
# "maintenance_margin_multiplier": "0.8",
# "max_price_shock": "20",
# "max_short_notional_limit": "2000",
# "options_contingency_margin_percent": "1",
# "options_discount_range": "10",
# "options_liq_band_range_percentage": "25",
# "settling_asset": "USDT",
# "sort_priority": 5,
# "underlying_asset": "AVAX",
# "volatility_down_shock": "30",
# "volatility_up_shock": "45"
# },
# ...
# },
# "portfolio_enabled_contracts": [
# "futures",
# "perpetual_futures",
# "call_options",
# "put_options"
# ]
# },
# "server_time": 1650640673500273,
# "trade_farming_daily_reward": "100000",
# "circulating_supply": "140000000",
# "circulating_supply_update_time": "1636752800",
# "deto_referral_mining_daily_reward": "0",
# "deto_total_reward_pool": "100000000",
# "deto_trade_mining_daily_reward": "0",
# "kyc_deposit_limit": "20",
# "kyc_withdrawal_limit": "10000",
# "maintenance_start_time": "1650387600000000",
# "msp_deto_commission_percent": "25",
# "under_maintenance": "false"
# },
# "success": True
# }
#
result = self.safe_value(response, 'result', {})
underMaintenance = self.safe_string(result, 'under_maintenance')
status = 'maintenance' if (underMaintenance == 'true') else 'ok'
updated = self.safe_integer_product(result, 'server_time', 0.001, self.milliseconds())
return {
'status': status,
'updated': updated,
'eta': None,
'url': None,
'info': response,
}
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the delta api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.publicGetAssets(params)
#
# {
# "result":[
# {
# "base_withdrawal_fee":"0.0005",
# "deposit_status":"enabled",
# "id":2,
# "interest_credit":true,
# "interest_slabs":[
# {"limit":"0.1","rate":"0"},
# {"limit":"1","rate":"0.05"},
# {"limit":"5","rate":"0.075"},
# {"limit":"10","rate":"0.1"},
# {"limit":"9999999999999999","rate":"0"}
# ],
# "kyc_deposit_limit":"10",
# "kyc_withdrawal_limit":"2",
# "min_withdrawal_amount":"0.001",
# "minimum_precision":4,
# "name":"Bitcoin",
# "precision":8,
# "sort_priority":1,
# "symbol":"BTC",
# "variable_withdrawal_fee":"0",
# "withdrawal_status":"enabled"
# },
# ],
# "success":true
# }
#
currencies = self.safe_value(response, 'result', [])
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_string(currency, 'symbol')
numericId = self.safe_integer(currency, 'id')
code = self.safe_currency_code(id)
depositStatus = self.safe_string(currency, 'deposit_status')
withdrawalStatus = self.safe_string(currency, 'withdrawal_status')
depositsEnabled = (depositStatus == 'enabled')
withdrawalsEnabled = (withdrawalStatus == 'enabled')
active = depositsEnabled and withdrawalsEnabled
precision = self.safe_integer(currency, 'precision')
result[code] = {
'id': id,
'numericId': numericId,
'code': code,
'name': self.safe_string(currency, 'name'),
'info': currency, # the original payload
'active': active,
'deposit': depositsEnabled,
'withdraw': withdrawalsEnabled,
'fee': self.safe_number(currency, 'base_withdrawal_fee'),
'precision': 1 / math.pow(10, precision),
'limits': {
'amount': {'min': None, 'max': None},
'withdraw': {
'min': self.safe_number(currency, 'min_withdrawal_amount'),
'max': None,
},
},
}
return result
async def load_markets(self, reload=False, params={}):
markets = await super(delta, self).load_markets(reload, params)
currenciesByNumericId = self.safe_value(self.options, 'currenciesByNumericId')
if (currenciesByNumericId is None) or reload:
self.options['currenciesByNumericId'] = self.index_by(self.currencies, 'numericId')
marketsByNumericId = self.safe_value(self.options, 'marketsByNumericId')
if (marketsByNumericId is None) or reload:
self.options['marketsByNumericId'] = self.index_by(self.markets, 'numericId')
return markets
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for delta
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetProducts(params)
#
# {
# "meta":{
# "after":null,
# "before":null,
# "limit":100,
# "total_count":81
# },
# "result":[
# {
# "annualized_funding":"5.475000000000000000",
# "is_quanto":false,
# "ui_config":{
# "default_trading_view_candle":"15",
# "leverage_slider_values":[1,3,5,10,25,50],
# "price_clubbing_values":[0.001,0.005,0.05,0.1,0.5,1,5],
# "show_bracket_orders":false,
# "sort_priority":29,
# "tags":[]
# },
# "basis_factor_max_limit":"0.15",
# "symbol":"P-LINK-D-151120",
# "id":1584,
# "default_leverage":"5.000000000000000000",
# "maker_commission_rate":"0.0005",
# "contract_unit_currency":"LINK",
# "strike_price":"12.507948",
# "settling_asset":{
# # asset structure
# },
# "auction_start_time":null,
# "auction_finish_time":null,
# "settlement_time":"2020-11-15T12:00:00Z",
# "launch_time":"2020-11-14T11:55:05Z",
# "spot_index":{
# # index structure
# },
# "trading_status":"operational",
# "tick_size":"0.001",
# "position_size_limit":100000,
# "notional_type":"vanilla", # vanilla, inverse
# "price_band":"0.4",
# "barrier_price":null,
# "description":"Daily LINK PUT options quoted in USDT and settled in USDT",
# "insurance_fund_margin_contribution":"1",
# "quoting_asset":{
# # asset structure
# },
# "liquidation_penalty_factor":"0.2",
# "product_specs":{"max_volatility":3,"min_volatility":0.3,"spot_price_band":"0.40"},
# "initial_margin_scaling_factor":"0.0001",
# "underlying_asset":{
# # asset structure
# },
# "state":"live",
# "contract_value":"1",
# "initial_margin":"2",
# "impact_size":5000,
# "settlement_price":null,
# "contract_type":"put_options", # put_options, call_options, move_options, perpetual_futures, interest_rate_swaps, futures, spreads
# "taker_commission_rate":"0.0005",
# "maintenance_margin":"1",
# "short_description":"LINK Daily PUT Options",
# "maintenance_margin_scaling_factor":"0.00005",
# "funding_method":"mark_price",
# "max_leverage_notional":"20000"
# },
# ],
# "success":true
# }
#
markets = self.safe_value(response, 'result', [])
result = []
for i in range(0, len(markets)):
market = markets[i]
type = self.safe_string(market, 'contract_type')
# settlingAsset = self.safe_value(market, 'settling_asset', {})
quotingAsset = self.safe_value(market, 'quoting_asset', {})
underlyingAsset = self.safe_value(market, 'underlying_asset', {})
settlingAsset = self.safe_value(market, 'settling_asset')
baseId = self.safe_string(underlyingAsset, 'symbol')
quoteId = self.safe_string(quotingAsset, 'symbol')
settleId = self.safe_string(settlingAsset, 'symbol')
id = self.safe_string(market, 'symbol')
numericId = self.safe_integer(market, 'id')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
callOptions = (type == 'call_options')
putOptions = (type == 'put_options')
moveOptions = (type == 'move_options')
spot = (type == 'spot')
swap = (type == 'perpetual_futures')
future = (type == 'futures')
option = (callOptions or putOptions or moveOptions)
strike = self.safe_string(market, 'strike_price')
expiryDatetime = self.safe_string(market, 'settlement_time')
expiry = self.parse8601(expiryDatetime)
contractSize = self.safe_number(market, 'contract_value')
linear = (settle == base)
optionType = None
symbol = base + '/' + quote
if swap or future or option:
symbol = symbol + ':' + settle
if future or option:
symbol = symbol + '-' + self.yymmdd(expiry)
if option:
type = 'option'
letter = 'C'
optionType = 'call'
if putOptions:
letter = 'P'
optionType = 'put'
elif moveOptions:
letter = 'M'
optionType = 'move'
symbol = symbol + ':' + strike + ':' + letter
else:
type = 'future'
else:
type = 'swap'
else:
symbol = id
state = self.safe_string(market, 'state')
result.append({
'id': id,
'numericId': numericId,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': type,
'spot': spot,
'margin': None if spot else False,
'swap': swap,
'future': future,
'option': option,
'active': (state == 'live'),
'contract': not spot,
'linear': None if spot else linear,
'inverse': None if spot else not linear,
'taker': self.safe_number(market, 'taker_commission_rate'),
'maker': self.safe_number(market, 'maker_commission_rate'),
'contractSize': contractSize,
'expiry': expiry,
'expiryDatetime': expiryDatetime,
'strike': self.parse_number(strike),
'optionType': optionType,
'precision': {
'amount': self.parse_number('1'), # number of contracts
'price': self.safe_number(market, 'tick_size'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.parse_number('1'),
'max': self.safe_number(market, 'position_size_limit'),
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'min_size'),
'max': None,
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
#
# fetchTicker, fetchTickers
#
# {
# "close":15837.5,
# "high":16354,
# "low":15751.5,
# "mark_price":"15820.100867",
# "open":16140.5,
# "product_id":139,
# "size":640552,
# "spot_price":"15827.050000000001",
# "symbol":"BTCUSDT",
# "timestamp":1605373550208262,
# "turnover":10298630.3735,
# "turnover_symbol":"USDT",
# "turnover_usd":10298630.3735,
# "volume":640.5520000000001
# }
#
timestamp = self.safe_integer_product(ticker, 'timestamp', 0.001)
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market)
last = self.safe_string(ticker, 'close')
open = self.safe_string(ticker, 'open')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'turnover')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the delta api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.publicGetTickersSymbol(self.extend(request, params))
#
# {
# "result":{
# "close":15837.5,
# "high":16354,
# "low":15751.5,
# "mark_price":"15820.100867",
# "open":16140.5,
# "product_id":139,
# "size":640552,
# "spot_price":"15827.050000000001",
# "symbol":"BTCUSDT",
# "timestamp":1605373550208262,
# "turnover":10298630.3735,
# "turnover_symbol":"USDT",
# "turnover_usd":10298630.3735,
# "volume":640.5520000000001
# },
# "success":true
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_ticker(result, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the delta api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTickers(params)
#
# {
# "result":[
# {
# "close":0.003966,
# "high":0.004032,
# "low":0.003606,
# "mark_price":"0.00396328",
# "open":0.003996,
# "product_id":1327,
# "size":6242,
# "spot_price":"0.0039555",
# "symbol":"AAVEBTC",
# "timestamp":1605374143864107,
# "turnover":23.997904999999996,
# "turnover_symbol":"BTC",
# "turnover_usd":387957.4544782897,
# "volume":6242
# },
# ],
# "success":true
# }
#
tickers = self.safe_value(response, 'result', [])
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the delta api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetL2orderbookSymbol(self.extend(request, params))
#
# {
# "result":{
# "buy":[
# {"price":"15814.0","size":912},
# {"price":"15813.5","size":1279},
# {"price":"15813.0","size":1634},
# ],
# "sell":[
# {"price":"15814.5","size":625},
# {"price":"15815.0","size":982},
# {"price":"15815.5","size":1328},
# ],
# "symbol":"BTCUSDT"
# },
# "success":true
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_order_book(result, symbol, None, 'buy', 'sell', 'price', 'size')
def parse_trade(self, trade, market=None):
#
# public fetchTrades
#
# {
# "buyer_role":"maker",
# "price":"15896.5",
# "seller_role":"taker",
# "size":241,
# "symbol":"BTCUSDT",
# "timestamp":1605376684714595
# }
#
# private fetchMyTrades
#
# {
# "commission":"0.008335000000000000",
# "created_at":"2020-11-16T19:07:19Z",
# "fill_type":"normal",
# "id":"e7ff05c233a74245b72381f8dd91d1ce",
# "meta_data":{
# "effective_commission_rate":"0.0005",
# "order_price":"16249",
# "order_size":1,
# "order_type":"market_order",
# "order_unfilled_size":0,
# "trading_fee_credits_used":"0"
# },
# "order_id":"152999629",
# "price":"16669",
# "product":{
# "contract_type":"perpetual_futures",
# "contract_unit_currency":"BTC",
# "contract_value":"0.001",
# "id":139,
# "notional_type":"vanilla",
# "quoting_asset":{"minimum_precision":2,"precision":6,"symbol":"USDT"},
# "settling_asset":{"minimum_precision":2,"precision":6,"symbol":"USDT"},
# "symbol":"BTCUSDT",
# "tick_size":"0.5",
# "underlying_asset":{"minimum_precision":4,"precision":8,"symbol":"BTC"}
# },
# "product_id":139,
# "role":"taker",
# "side":"sell",
# "size":1
# }
#
id = self.safe_string(trade, 'id')
orderId = self.safe_string(trade, 'order_id')
timestamp = self.parse8601(self.safe_string(trade, 'created_at'))
timestamp = self.safe_integer_product(trade, 'timestamp', 0.001, timestamp)
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'size')
product = self.safe_value(trade, 'product', {})
marketId = self.safe_string(product, 'symbol')
symbol = self.safe_symbol(marketId, market)
sellerRole = self.safe_string(trade, 'seller_role')
side = self.safe_string(trade, 'side')
if side is None:
if sellerRole == 'taker':
side = 'sell'
elif sellerRole == 'maker':
side = 'buy'
takerOrMaker = self.safe_string(trade, 'role')
metaData = self.safe_value(trade, 'meta_data', {})
type = self.safe_string(metaData, 'order_type')
if type is not None:
type = type.replace('_order', '')
feeCostString = self.safe_string(trade, 'commission')
fee = None
if feeCostString is not None:
settlingAsset = self.safe_value(product, 'settling_asset', {})
feeCurrencyId = self.safe_string(settlingAsset, 'symbol')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
return self.safe_trade({
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'price': priceString,
'amount': amountString,
'cost': None,
'takerOrMaker': takerOrMaker,
'fee': fee,
'info': trade,
}, market)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the delta api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.publicGetTradesSymbol(self.extend(request, params))
#
# {
# "result":[
# {
# "buyer_role":"maker",
# "price":"15896.5",
# "seller_role":"taker",
# "size":241,
# "symbol":"BTCUSDT",
# "timestamp":1605376684714595
# }
# ],
# "success":true
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_trades(result, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "time":1605393120,
# "open":15989,
# "high":15989,
# "low":15987.5,
# "close":15987.5,
# "volume":565
# }
#
return [
self.safe_timestamp(ohlcv, 'time'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'volume'),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the delta api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'resolution': self.timeframes[timeframe],
}
duration = self.parse_timeframe(timeframe)
limit = limit if limit else 2000 # max 2000
if since is None:
end = self.seconds()
request['end'] = end
request['start'] = end - limit * duration
else:
start = int(since / 1000)
request['start'] = start
request['end'] = self.sum(start, limit * duration)
response = await self.publicGetHistoryCandles(self.extend(request, params))
#
# {
# "success":true,
# "result":[
# {"time":1605393120,"open":15989,"high":15989,"low":15987.5,"close":15987.5,"volume":565},
# {"time":1605393180,"open":15966,"high":15966,"low":15959,"close":15959,"volume":24},
# {"time":1605393300,"open":15973,"high":15973,"low":15973,"close":15973,"volume":1288},
# ]
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_ohlcvs(result, market, timeframe, since, limit)
def parse_balance(self, response):
balances = self.safe_value(response, 'result', [])
result = {'info': response}
currenciesByNumericId = self.safe_value(self.options, 'currenciesByNumericId', {})
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'asset_id')
currency = self.safe_value(currenciesByNumericId, currencyId)
code = currencyId if (currency is None) else currency['code']
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
account['free'] = self.safe_string(balance, 'available_balance')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the delta api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetWalletBalances(params)
#
# {
# "result":[
# {
# "asset_id":1,
# "available_balance":"0",
# "balance":"0",
# "commission":"0",
# "id":154883,
# "interest_credit":"0",
# "order_margin":"0",
# "pending_referral_bonus":"0",
# "pending_trading_fee_credit":"0",
# "position_margin":"0",
# "trading_fee_credit":"0",
# "user_id":22142
# },
# ],
# "success":true
# }
#
return self.parse_balance(response)
async def fetch_position(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'product_id': market['numericId'],
}
response = await self.privateGetPositions(self.extend(request, params))
#
# {
# "result":{
# "entry_price":null,
# "size":0,
# "timestamp":1605454074268079
# },
# "success":true
# }
#
result = self.safe_value(response, 'result', {})
return result
async def fetch_positions(self, symbols=None, params={}):
"""
fetch all open positions
:param [str]|None symbols: list of unified market symbols
:param dict params: extra parameters specific to the delta api endpoint
:returns [dict]: a list of `position structure <https://docs.ccxt.com/en/latest/manual.html#position-structure>`
"""
await self.load_markets()
response = await self.privateGetPositionsMargined(params)
#
# {
# "success": True,
# "result": [
# {
# "user_id": 0,
# "size": 0,
# "entry_price": "string",
# "margin": "string",
# "liquidation_price": "string",
# "bankruptcy_price": "string",
# "adl_level": 0,
# "product_id": 0
# }
# ]
# }
#
result = self.safe_value(response, 'result', [])
return result
def parse_order_status(self, status):
statuses = {
'open': 'open',
'pending': 'open',
'closed': 'closed',
'cancelled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder, cancelOrder, editOrder, fetchOpenOrders, fetchClosedOrders
#
# {
# "average_fill_price":null,
# "bracket_order":null,
# "bracket_stop_loss_limit_price":null,
# "bracket_stop_loss_price":null,
# "bracket_take_profit_limit_price":null,
# "bracket_take_profit_price":null,
# "bracket_trail_amount":null,
# "cancellation_reason":null,
# "client_order_id":null,
# "close_on_trigger":"false",
# "commission":"0",
# "created_at":"2020-11-16T02:38:26Z",
# "id":152870626,
# "limit_price":"10000",
# "meta_data":{"source":"api"},
# "order_type":"limit_order",
# "paid_commission":"0",
# "product_id":139,
# "reduce_only":false,
# "side":"buy",
# "size":0,
# "state":"open",
# "stop_order_type":null,
# "stop_price":null,
# "stop_trigger_method":"mark_price",
# "time_in_force":"gtc",
# "trail_amount":null,
# "unfilled_size":0,
# "user_id":22142
# }
#
id = self.safe_string(order, 'id')
clientOrderId = self.safe_string(order, 'client_order_id')
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
marketId = self.safe_string(order, 'product_id')
marketsByNumericId = self.safe_value(self.options, 'marketsByNumericId', {})
market = self.safe_value(marketsByNumericId, marketId, market)
symbol = marketId if (market is None) else market['symbol']
status = self.parse_order_status(self.safe_string(order, 'state'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'order_type')
type = type.replace('_order', '')
price = self.safe_string(order, 'limit_price')
amount = self.safe_string(order, 'size')
remaining = self.safe_string(order, 'unfilled_size')
average = self.safe_string(order, 'average_fill_price')
fee = None
feeCostString = self.safe_string(order, 'paid_commission')
if feeCostString is not None:
feeCurrencyCode = None
if market is not None:
settlingAsset = self.safe_value(market['info'], 'settling_asset', {})
feeCurrencyId = self.safe_string(settlingAsset, 'symbol')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': None,
'average': average,
'filled': None,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
}, market)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the delta api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
orderType = type + '_order'
market = self.market(symbol)
request = {
'product_id': market['numericId'],
# 'limit_price': self.price_to_precision(symbol, price),
'size': self.amount_to_precision(symbol, amount),
'side': side,
'order_type': orderType,
# 'client_order_id': 'string',
# 'time_in_force': 'gtc', # gtc, ioc, fok
# 'post_only': 'false', # 'true',
# 'reduce_only': 'false', # 'true',
}
if type == 'limit':
request['limit_price'] = self.price_to_precision(symbol, price)
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_order_id')
params = self.omit(params, ['clientOrderId', 'client_order_id'])
if clientOrderId is not None:
request['client_order_id'] = clientOrderId
response = await self.privatePostOrders(self.extend(request, params))
#
# {
# "result":{
# "average_fill_price":null,
# "bracket_order":null,
# "bracket_stop_loss_limit_price":null,
# "bracket_stop_loss_price":null,
# "bracket_take_profit_limit_price":null,
# "bracket_take_profit_price":null,
# "bracket_trail_amount":null,
# "cancellation_reason":null,
# "client_order_id":null,
# "close_on_trigger":"false",
# "commission":"0",
# "created_at":"2020-11-16T02:38:26Z",
# "id":152870626,
# "limit_price":"10000",
# "meta_data":{"source":"api"},
# "order_type":"limit_order",
# "paid_commission":"0",
# "product_id":139,
# "reduce_only":false,
# "side":"buy",
# "size":0,
# "state":"open",
# "stop_order_type":null,
# "stop_price":null,
# "stop_trigger_method":"mark_price",
# "time_in_force":"gtc",
# "trail_amount":null,
# "unfilled_size":0,
# "user_id":22142
# },
# "success":true
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_order(result, market)
async def edit_order(self, id, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'id': int(id),
'product_id': market['numericId'],
# 'limit_price': self.price_to_precision(symbol, price),
# 'size': self.amount_to_precision(symbol, amount),
}
if amount is not None:
request['size'] = int(self.amount_to_precision(symbol, amount))
if price is not None:
request['limit_price'] = self.price_to_precision(symbol, price)
response = await self.privatePutOrders(self.extend(request, params))
#
# {
# "success": True,
# "result": {
# "id": "ashb1212",
# "product_id": 27,
# "limit_price": "9200",
# "side": "buy",
# "size": 100,
# "unfilled_size": 50,
# "user_id": 1,
# "order_type": "limit_order",
# "state": "open",
# "created_at": "..."
# }
# }
#
result = self.safe_value(response, 'result')
return self.parse_order(result, market)
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the delta api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'id': int(id),
'product_id': market['numericId'],
}
response = await self.privateDeleteOrders(self.extend(request, params))
#
# {
# "result":{
# "average_fill_price":null,
# "bracket_order":null,
# "bracket_stop_loss_limit_price":null,
# "bracket_stop_loss_price":null,
# "bracket_take_profit_limit_price":null,
# "bracket_take_profit_price":null,
# "bracket_trail_amount":null,
# "cancellation_reason":"cancelled_by_user",
# "client_order_id":null,
# "close_on_trigger":"false",
# "commission":"0",
# "created_at":"2020-11-16T02:38:26Z",
# "id":152870626,
# "limit_price":"10000",
# "meta_data":{"source":"api"},
# "order_type":"limit_order",
# "paid_commission":"0",
# "product_id":139,
# "reduce_only":false,
# "side":"buy",
# "size":0,
# "state":"cancelled",
# "stop_order_type":null,
# "stop_price":null,
# "stop_trigger_method":"mark_price",
# "time_in_force":"gtc",
# "trail_amount":null,
# "unfilled_size":0,
# "user_id":22142
# },
# "success":true
# }
#
result = self.safe_value(response, 'result')
return self.parse_order(result, market)
async def cancel_all_orders(self, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelAllOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'product_id': market['numericId'],
# 'cancel_limit_orders': 'true',
# 'cancel_stop_orders': 'true',
}
response = self.privateDeleteOrdersAll(self.extend(request, params))
#
# {
# "result":{},
# "success":true
# }
#
return response
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_with_method('privateGetOrders', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_with_method('privateGetOrdersHistory', symbol, since, limit, params)
async def fetch_orders_with_method(self, method, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'product_ids': market['id'], # comma-separated
# 'contract_types': types, # comma-separated, futures, perpetual_futures, call_options, put_options, interest_rate_swaps, move_options, spreads
# 'order_types': types, # comma-separated, market, limit, stop_market, stop_limit, all_stop
# 'start_time': since * 1000,
# 'end_time': self.microseconds(),
# 'after': string, # after cursor for pagination
# 'before': string, # before cursor for pagination
# 'page_size': limit, # number of records per page
}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_ids'] = market['numericId'] # accepts a comma-separated list of ids
if since is not None:
request['start_time'] = str(since) + '000'
if limit is not None:
request['page_size'] = limit
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "success": True,
# "result": [
# {
# "id": "ashb1212",
# "product_id": 27,
# "limit_price": "9200",
# "side": "buy",
# "size": 100,
# "unfilled_size": 50,
# "user_id": 1,
# "order_type": "limit_order",
# "state": "open",
# "created_at": "..."
# }
# ],
# "meta": {
# "after": "string",
# "before": "string"
# }
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_orders(result, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'product_ids': market['id'], # comma-separated
# 'contract_types': types, # comma-separated, futures, perpetual_futures, call_options, put_options, interest_rate_swaps, move_options, spreads
# 'start_time': since * 1000,
# 'end_time': self.microseconds(),
# 'after': string, # after cursor for pagination
# 'before': string, # before cursor for pagination
# 'page_size': limit, # number of records per page
}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_ids'] = market['numericId'] # accepts a comma-separated list of ids
if since is not None:
request['start_time'] = str(since) + '000'
if limit is not None:
request['page_size'] = limit
response = await self.privateGetFills(self.extend(request, params))
#
# {
# "meta":{
# "after":null,
# "before":null,
# "limit":10,
# "total_count":2
# },
# "result":[
# {
# "commission":"0.008335000000000000",
# "created_at":"2020-11-16T19:07:19Z",
# "fill_type":"normal",
# "id":"e7ff05c233a74245b72381f8dd91d1ce",
# "meta_data":{
# "effective_commission_rate":"0.0005",
# "order_price":"16249",
# "order_size":1,
# "order_type":"market_order",
# "order_unfilled_size":0,
# "trading_fee_credits_used":"0"
# },
# "order_id":"152999629",
# "price":"16669",
# "product":{
# "contract_type":"perpetual_futures",
# "contract_unit_currency":"BTC",
# "contract_value":"0.001",
# "id":139,
# "notional_type":"vanilla",
# "quoting_asset":{"minimum_precision":2,"precision":6,"symbol":"USDT"},
# "settling_asset":{"minimum_precision":2,"precision":6,"symbol":"USDT"},
# "symbol":"BTCUSDT",
# "tick_size":"0.5",
# "underlying_asset":{"minimum_precision":4,"precision":8,"symbol":"BTC"}
# },
# "product_id":139,
# "role":"taker",
# "side":"sell",
# "size":1
# }
# ],
# "success":true
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_trades(result, market, since, limit)
async def fetch_ledger(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'asset_id': currency['numericId'],
# 'end_time': self.seconds(),
# 'after': 'string', # after cursor for pagination
# 'before': 'string', # before cursor for pagination
# 'page_size': limit,
}
currency = None
if code is not None:
currency = self.currency(code)
request['asset_id'] = currency['numericId']
if limit is not None:
request['page_size'] = limit
response = await self.privateGetWalletTransactions(self.extend(request, params))
#
# {
# "meta":{"after":null,"before":null,"limit":10,"total_count":1},
# "result":[
# {
# "amount":"29.889184",
# "asset_id":5,
# "balance":"29.889184",
# "created_at":"2020-11-15T21:25:01Z",
# "meta_data":{
# "deposit_id":3884,
# "transaction_id":"0x41a60174849828530abb5008e98fc63c9b598288743ec4ba9620bcce900a3b8d"
# },
# "transaction_type":"deposit",
# "user_id":22142,
# "uuid":"70bb5679da3c4637884e2dc63efaa846"
# }
# ],
# "success":true
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_ledger(result, currency, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'pnl': 'pnl',
'deposit': 'transaction',
'withdrawal': 'transaction',
'commission': 'fee',
'conversion': 'trade',
# 'perpetual_futures_funding': 'perpetual_futures_funding',
# 'withdrawal_cancellation': 'withdrawal_cancellation',
'referral_bonus': 'referral',
'commission_rebate': 'rebate',
# 'promo_credit': 'promo_credit',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# {
# "amount":"29.889184",
# "asset_id":5,
# "balance":"29.889184",
# "created_at":"2020-11-15T21:25:01Z",
# "meta_data":{
# "deposit_id":3884,
# "transaction_id":"0x41a60174849828530abb5008e98fc63c9b598288743ec4ba9620bcce900a3b8d"
# },
# "transaction_type":"deposit",
# "user_id":22142,
# "uuid":"70bb5679da3c4637884e2dc63efaa846"
# }
#
id = self.safe_string(item, 'uuid')
direction = None
account = None
metaData = self.safe_value(item, 'meta_data', {})
referenceId = self.safe_string(metaData, 'transaction_id')
referenceAccount = None
type = self.safe_string(item, 'transaction_type')
if (type == 'deposit') or (type == 'commission_rebate') or (type == 'referral_bonus') or (type == 'pnl') or (type == 'withdrawal_cancellation') or (type == 'promo_credit'):
direction = 'in'
elif (type == 'withdrawal') or (type == 'commission') or (type == 'conversion') or (type == 'perpetual_futures_funding'):
direction = 'out'
type = self.parse_ledger_entry_type(type)
currencyId = self.safe_integer(item, 'asset_id')
currenciesByNumericId = self.safe_value(self.options, 'currenciesByNumericId')
currency = self.safe_value(currenciesByNumericId, currencyId, currency)
code = None if (currency is None) else currency['code']
amount = self.safe_number(item, 'amount')
timestamp = self.parse8601(self.safe_string(item, 'created_at'))
after = self.safe_number(item, 'balance')
before = max(0, after - amount)
status = 'ok'
return {
'info': item,
'id': id,
'direction': direction,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': None,
}
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'asset_symbol': currency['id'],
}
response = await self.privateGetDepositsAddress(self.extend(request, params))
#
# {
# "success":true,
# "result":{
# "id":19628,
# "user_id":22142,
# "address":"0x0eda26523397534f814d553a065d8e46b4188e9a",
# "status":"active",
# "updated_at":"2020-11-15T20:25:53.000Z",
# "created_at":"2020-11-15T20:25:53.000Z",
# "asset_symbol":"USDT",
# "custodian":"onc"
# }
# }
#
result = self.safe_value(response, 'result', {})
address = self.safe_string(result, 'address')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': None,
'network': None,
'info': response,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
requestPath = '/' + self.version + '/' + self.implode_params(path, params)
url = self.urls['api'][api] + requestPath
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
elif api == 'private':
self.check_required_credentials()
timestamp = str(self.seconds())
headers = {
'api-key': self.apiKey,
'timestamp': timestamp,
}
auth = method + timestamp + requestPath
if (method == 'GET') or (method == 'DELETE'):
if query:
queryString = '?' + self.urlencode(query)
auth += queryString
url += queryString
else:
body = self.json(query)
auth += body
headers['Content-Type'] = 'application/json'
signature = self.hmac(self.encode(auth), self.encode(self.secret))
headers['signature'] = signature
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
#
# {"error":{"code":"insufficient_margin","context":{"available_balance":"0.000000000000000000","required_additional_balance":"1.618626000000000000000000000"}},"success":false}
#
error = self.safe_value(response, 'error', {})
errorCode = self.safe_string(error, 'code')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
| 43.295051 | 238 | 0.474653 |
f1eb9fadb1be11221aa72b1cef5457f6b96db7a7 | 38,520 | py | Python | eggs/boto-2.27.0-py2.7.egg/boto/sqs/connection2.py | psnehal/MethylSig | 5efad71e71ff2515feff2e49579c856ef9a1bbd8 | [
"CC-BY-3.0"
] | 1 | 2017-11-08T08:15:45.000Z | 2017-11-08T08:15:45.000Z | eggs/boto-2.27.0-py2.7.egg/boto/sqs/connection2.py | psnehal/MethylSig | 5efad71e71ff2515feff2e49579c856ef9a1bbd8 | [
"CC-BY-3.0"
] | null | null | null | eggs/boto-2.27.0-py2.7.egg/boto/sqs/connection2.py | psnehal/MethylSig | 5efad71e71ff2515feff2e49579c856ef9a1bbd8 | [
"CC-BY-3.0"
] | 4 | 2016-10-12T23:54:55.000Z | 2020-07-25T23:28:25.000Z | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
try:
import json
except ImportError:
import simplejson as json
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.sqs import exceptions
class SQSConnection(AWSQueryConnection):
"""
Welcome to the Amazon Simple Queue Service API Reference . This
section describes who should read this guide, how the guide is
organized, and other resources related to the Amazon Simple Queue
Service (Amazon SQS).
Amazon SQS offers reliable and scalable hosted queues for storing
messages as they travel between computers. By using Amazon SQS,
you can move data between distributed components of your
applications that perform different tasks without losing messages
or requiring each component to be always available.
Helpful Links:
+ `Current WSDL (2012-11-05)`_
+ `Making API Requests`_
+ `Amazon SQS product page`_
+ `Regions and Endpoints`_
We also provide SDKs that enable you to access Amazon SQS from
your preferred programming language. The SDKs contain
functionality that automatically takes care of tasks such as:
+ Cryptographically signing your service requests
+ Retrying requests
+ Handling error responses
For a list of available SDKs, go to `Tools for Amazon Web
Services`_.
"""
APIVersion = "2012-11-05"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "sqs.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"QueueDoesNotExist": exceptions.QueueDoesNotExist,
"BatchEntryIdsNotDistinct": exceptions.BatchEntryIdsNotDistinct,
"EmptyBatchRequest": exceptions.EmptyBatchRequest,
"OverLimit": exceptions.OverLimit,
"QueueNameExists": exceptions.QueueNameExists,
"InvalidMessageContents": exceptions.InvalidMessageContents,
"TooManyEntriesInBatchRequest": exceptions.TooManyEntriesInBatchRequest,
"QueueDeletedRecently": exceptions.QueueDeletedRecently,
"InvalidBatchEntryId": exceptions.InvalidBatchEntryId,
"BatchRequestTooLong": exceptions.BatchRequestTooLong,
"InvalidIdFormat": exceptions.InvalidIdFormat,
"ReceiptHandleIsInvalid": exceptions.ReceiptHandleIsInvalid,
"InvalidAttributeName": exceptions.InvalidAttributeName,
"MessageNotInflight": exceptions.MessageNotInflight,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(SQSConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def add_permission(self, queue_url, label, aws_account_ids, actions):
"""
Adds a permission to a queue for a specific `principal`_. This
allows for sharing access to the queue.
When you create a queue, you have full control access rights
for the queue. Only you (as owner of the queue) can grant or
deny permissions to the queue. For more information about
these permissions, see `Shared Queues`_ in the Amazon SQS
Developer Guide .
`AddPermission` writes an Amazon SQS-generated policy. If you
want to write your own policy, use SetQueueAttributes to
upload your policy. For more information about writing your
own policy, see `Using The Access Policy Language`_ in the
Amazon SQS Developer Guide .
Some API actions take lists of parameters. These lists are
specified using the `param.n` notation. Values of `n` are
integers starting from 1. For example, a parameter list with
two elements looks like this:
`&Attribute.1=this`
`&Attribute.2=that`
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type label: string
:param label: The unique identification of the permission you're
setting (e.g., `AliceSendMessage`). Constraints: Maximum 80
characters; alphanumeric characters, hyphens (-), and underscores
(_) are allowed.
:type aws_account_ids: list
:param aws_account_ids: The AWS account number of the `principal`_ who
will be given permission. The principal must have an AWS account,
but does not need to be signed up for Amazon SQS. For information
about locating the AWS account identification, see `Your AWS
Identifiers`_ in the Amazon SQS Developer Guide .
:type actions: list
:param actions: The action the client wants to allow for the specified
principal. The following are valid values: `* | SendMessage |
ReceiveMessage | DeleteMessage | ChangeMessageVisibility |
GetQueueAttributes | GetQueueUrl`. For more information about these
actions, see `Understanding Permissions`_ in the Amazon SQS
Developer Guide .
Specifying `SendMessage`, `DeleteMessage`, or `ChangeMessageVisibility`
for the `ActionName.n` also grants permissions for the
corresponding batch versions of those actions: `SendMessageBatch`,
`DeleteMessageBatch`, and `ChangeMessageVisibilityBatch`.
"""
params = {'QueueUrl': queue_url, 'Label': label, }
self.build_list_params(params,
aws_account_ids,
'AWSAccountIds.member')
self.build_list_params(params,
actions,
'Actions.member')
return self._make_request(
action='AddPermission',
verb='POST',
path='/', params=params)
def change_message_visibility(self, queue_url, receipt_handle,
visibility_timeout):
"""
Changes the visibility timeout of a specified message in a
queue to a new value. The maximum allowed timeout value you
can set the value to is 12 hours. This means you can't extend
the timeout of a message in an existing queue to more than a
total visibility timeout of 12 hours. (For more information
visibility timeout, see `Visibility Timeout`_ in the Amazon
SQS Developer Guide .)
For example, let's say you have a message and its default
message visibility timeout is 30 minutes. You could call
`ChangeMessageVisiblity` with a value of two hours and the
effective timeout would be two hours and 30 minutes. When that
time comes near you could again extend the time out by calling
ChangeMessageVisiblity, but this time the maximum allowed
timeout would be 9 hours and 30 minutes.
If you attempt to set the `VisibilityTimeout` to an amount
more than the maximum time left, Amazon SQS returns an error.
It will not automatically recalculate and increase the timeout
to the maximum time remaining. Unlike with a queue, when you
change the visibility timeout for a specific message, that
timeout value is applied immediately but is not saved in
memory for that message. If you don't delete a message after
it is received, the visibility timeout for the message the
next time it is received reverts to the original timeout
value, not the value you set with the
`ChangeMessageVisibility` action.
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type receipt_handle: string
:param receipt_handle: The receipt handle associated with the message
whose visibility timeout should be changed. This parameter is
returned by the ReceiveMessage action.
:type visibility_timeout: integer
:param visibility_timeout: The new value (in seconds - from 0 to 43200
- maximum 12 hours) for the message's visibility timeout.
"""
params = {
'QueueUrl': queue_url,
'ReceiptHandle': receipt_handle,
'VisibilityTimeout': visibility_timeout,
}
return self._make_request(
action='ChangeMessageVisibility',
verb='POST',
path='/', params=params)
def change_message_visibility_batch(self, queue_url, entries):
"""
Changes the visibility timeout of multiple messages. This is a
batch version of ChangeMessageVisibility. The result of the
action on each message is reported individually in the
response. You can send up to 10 ChangeMessageVisibility
requests with each `ChangeMessageVisibilityBatch` action.
Because the batch request can result in a combination of
successful and unsuccessful actions, you should check for
batch errors even when the call returns an HTTP status code of
200. Some API actions take lists of parameters. These lists
are specified using the `param.n` notation. Values of `n` are
integers starting from 1. For example, a parameter list with
two elements looks like this:
`&Attribute.1=this`
`&Attribute.2=that`
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type entries: list
:param entries: A list of receipt handles of the messages for which the
visibility timeout must be changed.
"""
params = {'QueueUrl': queue_url, }
self.build_complex_list_params(
params, entries,
'Entries.member',
('Id', 'ReceiptHandle', 'VisibilityTimeout'))
return self._make_request(
action='ChangeMessageVisibilityBatch',
verb='POST',
path='/', params=params)
def create_queue(self, queue_name, attributes=None):
"""
Creates a new queue, or returns the URL of an existing one.
When you request `CreateQueue`, you provide a name for the
queue. To successfully create a new queue, you must provide a
name that is unique within the scope of your own queues.
If you delete a queue, you must wait at least 60 seconds
before creating a queue with the same name.
You may pass one or more attributes in the request. If you do
not provide a value for any attribute, the queue will have the
default value for that attribute. Permitted attributes are the
same that can be set using SetQueueAttributes.
Use GetQueueUrl to get a queue's URL. GetQueueUrl requires
only the `QueueName` parameter.
If you provide the name of an existing queue, along with the
exact names and values of all the queue's attributes,
`CreateQueue` returns the queue URL for the existing queue. If
the queue name, attribute names, or attribute values do not
match an existing queue, `CreateQueue` returns an error.
Some API actions take lists of parameters. These lists are
specified using the `param.n` notation. Values of `n` are
integers starting from 1. For example, a parameter list with
two elements looks like this:
`&Attribute.1=this`
`&Attribute.2=that`
:type queue_name: string
:param queue_name: The name for the queue to be created.
:type attributes: map
:param attributes: A map of attributes with their corresponding values.
The following lists the names, descriptions, and values of the special
request parameters the `CreateQueue` action uses:
+ `DelaySeconds` - The time in seconds that the delivery of all
messages in the queue will be delayed. An integer from 0 to 900 (15
minutes). The default for this attribute is 0 (zero).
+ `MaximumMessageSize` - The limit of how many bytes a message can
contain before Amazon SQS rejects it. An integer from 1024 bytes (1
KiB) up to 262144 bytes (256 KiB). The default for this attribute
is 262144 (256 KiB).
+ `MessageRetentionPeriod` - The number of seconds Amazon SQS retains a
message. Integer representing seconds, from 60 (1 minute) to
1209600 (14 days). The default for this attribute is 345600 (4
days).
+ `Policy` - The queue's policy. A valid form-url-encoded policy. For
more information about policy structure, see `Basic Policy
Structure`_ in the Amazon SQS Developer Guide . For more
information about form-url-encoding, see `http://www.w3.org/MarkUp
/html-spec/html-spec_8.html#SEC8.2.1`_.
+ `ReceiveMessageWaitTimeSeconds` - The time for which a ReceiveMessage
call will wait for a message to arrive. An integer from 0 to 20
(seconds). The default for this attribute is 0.
+ `VisibilityTimeout` - The visibility timeout for the queue. An
integer from 0 to 43200 (12 hours). The default for this attribute
is 30. For more information about visibility timeout, see
`Visibility Timeout`_ in the Amazon SQS Developer Guide .
"""
params = {'QueueName': queue_name, }
if attributes is not None:
params['Attributes'] = attributes
return self._make_request(
action='CreateQueue',
verb='POST',
path='/', params=params)
def delete_message(self, queue_url, receipt_handle):
"""
Deletes the specified message from the specified queue. You
specify the message by using the message's `receipt handle`
and not the `message ID` you received when you sent the
message. Even if the message is locked by another reader due
to the visibility timeout setting, it is still deleted from
the queue. If you leave a message in the queue for longer than
the queue's configured retention period, Amazon SQS
automatically deletes it.
The receipt handle is associated with a specific instance of
receiving the message. If you receive a message more than
once, the receipt handle you get each time you receive the
message is different. When you request `DeleteMessage`, if you
don't provide the most recently received receipt handle for
the message, the request will still succeed, but the message
might not be deleted.
It is possible you will receive a message even after you have
deleted it. This might happen on rare occasions if one of the
servers storing a copy of the message is unavailable when you
request to delete the message. The copy remains on the server
and might be returned to you again on a subsequent receive
request. You should create your system to be idempotent so
that receiving a particular message more than once is not a
problem.
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type receipt_handle: string
:param receipt_handle: The receipt handle associated with the message
to delete.
"""
params = {
'QueueUrl': queue_url,
'ReceiptHandle': receipt_handle,
}
return self._make_request(
action='DeleteMessage',
verb='POST',
path='/', params=params)
def delete_message_batch(self, queue_url, entries):
"""
Deletes multiple messages. This is a batch version of
DeleteMessage. The result of the delete action on each message
is reported individually in the response.
Because the batch request can result in a combination of
successful and unsuccessful actions, you should check for
batch errors even when the call returns an HTTP status code of
200.
Some API actions take lists of parameters. These lists are
specified using the `param.n` notation. Values of `n` are
integers starting from 1. For example, a parameter list with
two elements looks like this:
`&Attribute.1=this`
`&Attribute.2=that`
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type entries: list
:param entries: A list of receipt handles for the messages to be
deleted.
"""
params = {'QueueUrl': queue_url, }
self.build_complex_list_params(
params, entries,
'Entries.member',
('Id', 'ReceiptHandle'))
return self._make_request(
action='DeleteMessageBatch',
verb='POST',
path='/', params=params)
def delete_queue(self, queue_url):
"""
Deletes the queue specified by the **queue URL**, regardless
of whether the queue is empty. If the specified queue does not
exist, Amazon SQS returns a successful response.
Use `DeleteQueue` with care; once you delete your queue, any
messages in the queue are no longer available.
When you delete a queue, the deletion process takes up to 60
seconds. Requests you send involving that queue during the 60
seconds might succeed. For example, a SendMessage request
might succeed, but after the 60 seconds, the queue and that
message you sent no longer exist. Also, when you delete a
queue, you must wait at least 60 seconds before creating a
queue with the same name.
We reserve the right to delete queues that have had no
activity for more than 30 days. For more information, see `How
Amazon SQS Queues Work`_ in the Amazon SQS Developer Guide .
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
"""
params = {'QueueUrl': queue_url, }
return self._make_request(
action='DeleteQueue',
verb='POST',
path='/', params=params)
def get_queue_attributes(self, queue_url, attribute_names=None):
"""
Gets attributes for the specified queue. The following
attributes are supported:
+ `All` - returns all values.
+ `ApproximateNumberOfMessages` - returns the approximate
number of visible messages in a queue. For more information,
see `Resources Required to Process Messages`_ in the Amazon
SQS Developer Guide .
+ `ApproximateNumberOfMessagesNotVisible` - returns the
approximate number of messages that are not timed-out and not
deleted. For more information, see `Resources Required to
Process Messages`_ in the Amazon SQS Developer Guide .
+ `VisibilityTimeout` - returns the visibility timeout for the
queue. For more information about visibility timeout, see
`Visibility Timeout`_ in the Amazon SQS Developer Guide .
+ `CreatedTimestamp` - returns the time when the queue was
created (epoch time in seconds).
+ `LastModifiedTimestamp` - returns the time when the queue
was last changed (epoch time in seconds).
+ `Policy` - returns the queue's policy.
+ `MaximumMessageSize` - returns the limit of how many bytes a
message can contain before Amazon SQS rejects it.
+ `MessageRetentionPeriod` - returns the number of seconds
Amazon SQS retains a message.
+ `QueueArn` - returns the queue's Amazon resource name (ARN).
+ `ApproximateNumberOfMessagesDelayed` - returns the
approximate number of messages that are pending to be added to
the queue.
+ `DelaySeconds` - returns the default delay on the queue in
seconds.
+ `ReceiveMessageWaitTimeSeconds` - returns the time for which
a ReceiveMessage call will wait for a message to arrive.
+ `RedrivePolicy` - returns the parameters for dead letter
queue functionality of the source queue. For more information
about RedrivePolicy and dead letter queues, see `Using Amazon
SQS Dead Letter Queues`_ in the Amazon SQS Developer Guide .
Going forward, new attributes might be added. If you are
writing code that calls this action, we recommend that you
structure your code so that it can handle new attributes
gracefully. Some API actions take lists of parameters. These
lists are specified using the `param.n` notation. Values of
`n` are integers starting from 1. For example, a parameter
list with two elements looks like this:
`&Attribute.1=this`
`&Attribute.2=that`
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type attribute_names: list
:param attribute_names: A list of attributes to retrieve information
for.
"""
params = {'QueueUrl': queue_url, }
if attribute_names is not None:
self.build_list_params(params,
attribute_names,
'AttributeNames.member')
return self._make_request(
action='GetQueueAttributes',
verb='POST',
path='/', params=params)
def get_queue_url(self, queue_name, queue_owner_aws_account_id=None):
"""
Returns the URL of an existing queue. This action provides a
simple way to retrieve the URL of an Amazon SQS queue.
To access a queue that belongs to another AWS account, use the
`QueueOwnerAWSAccountId` parameter to specify the account ID
of the queue's owner. The queue's owner must grant you
permission to access the queue. For more information about
shared queue access, see AddPermission or go to `Shared
Queues`_ in the Amazon SQS Developer Guide .
:type queue_name: string
:param queue_name: The name of the queue whose URL must be fetched.
Maximum 80 characters; alphanumeric characters, hyphens (-), and
underscores (_) are allowed.
:type queue_owner_aws_account_id: string
:param queue_owner_aws_account_id: The AWS account ID of the account
that created the queue.
"""
params = {'QueueName': queue_name, }
if queue_owner_aws_account_id is not None:
params['QueueOwnerAWSAccountId'] = queue_owner_aws_account_id
return self._make_request(
action='GetQueueUrl',
verb='POST',
path='/', params=params)
def list_dead_letter_source_queues(self, queue_url):
"""
Returns a list of your queues that have the RedrivePolicy
queue attribute configured with a dead letter queue.
:type queue_url: string
:param queue_url: The queue URL of a dead letter queue.
"""
params = {'QueueUrl': queue_url, }
return self._make_request(
action='ListDeadLetterSourceQueues',
verb='POST',
path='/', params=params)
def list_queues(self, queue_name_prefix=None):
"""
Returns a list of your queues. The maximum number of queues
that can be returned is 1000. If you specify a value for the
optional `QueueNamePrefix` parameter, only queues with a name
beginning with the specified value are returned.
:type queue_name_prefix: string
:param queue_name_prefix: A string to use for filtering the list
results. Only those queues whose name begins with the specified
string are returned.
"""
params = {}
if queue_name_prefix is not None:
params['QueueNamePrefix'] = queue_name_prefix
return self._make_request(
action='ListQueues',
verb='POST',
path='/', params=params)
def receive_message(self, queue_url, attribute_names=None,
max_number_of_messages=None, visibility_timeout=None,
wait_time_seconds=None):
"""
Retrieves one or more messages from the specified queue. Long
poll support is enabled by using the `WaitTimeSeconds`
parameter. For more information, see `Amazon SQS Long Poll`_
in the Amazon SQS Developer Guide .
Short poll is the default behavior where a weighted random set
of machines is sampled on a `ReceiveMessage` call. This means
only the messages on the sampled machines are returned. If the
number of messages in the queue is small (less than 1000), it
is likely you will get fewer messages than you requested per
`ReceiveMessage` call. If the number of messages in the queue
is extremely small, you might not receive any messages in a
particular `ReceiveMessage` response; in which case you should
repeat the request.
For each message returned, the response includes the
following:
+ Message body
+ MD5 digest of the message body. For information about MD5,
go to `http://www.faqs.org/rfcs/rfc1321.html`_.
+ Message ID you received when you sent the message to the
queue.
+ Receipt handle.
The receipt handle is the identifier you must provide when
deleting the message. For more information, see `Queue and
Message Identifiers`_ in the Amazon SQS Developer Guide .
You can provide the `VisibilityTimeout` parameter in your
request, which will be applied to the messages that Amazon SQS
returns in the response. If you do not include the parameter,
the overall visibility timeout for the queue is used for the
returned messages. For more information, see `Visibility
Timeout`_ in the Amazon SQS Developer Guide .
Going forward, new attributes might be added. If you are
writing code that calls this action, we recommend that you
structure your code so that it can handle new attributes
gracefully.
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type attribute_names: list
:param attribute_names:
A list of attributes that need to be returned along with each message.
The following lists the names and descriptions of the attributes that
can be returned:
+ `All` - returns all values.
+ `ApproximateFirstReceiveTimestamp` - returns the time when the
message was first received (epoch time in milliseconds).
+ `ApproximateReceiveCount` - returns the number of times a message has
been received but not deleted.
+ `SenderId` - returns the AWS account number (or the IP address, if
anonymous access is allowed) of the sender.
+ `SentTimestamp` - returns the time when the message was sent (epoch
time in milliseconds).
:type max_number_of_messages: integer
:param max_number_of_messages: The maximum number of messages to
return. Amazon SQS never returns more messages than this value but
may return fewer.
All of the messages are not necessarily returned.
:type visibility_timeout: integer
:param visibility_timeout: The duration (in seconds) that the received
messages are hidden from subsequent retrieve requests after being
retrieved by a `ReceiveMessage` request.
:type wait_time_seconds: integer
:param wait_time_seconds: The duration (in seconds) for which the call
will wait for a message to arrive in the queue before returning. If
a message is available, the call will return sooner than
WaitTimeSeconds.
"""
params = {'QueueUrl': queue_url, }
if attribute_names is not None:
self.build_list_params(params,
attribute_names,
'AttributeNames.member')
if max_number_of_messages is not None:
params['MaxNumberOfMessages'] = max_number_of_messages
if visibility_timeout is not None:
params['VisibilityTimeout'] = visibility_timeout
if wait_time_seconds is not None:
params['WaitTimeSeconds'] = wait_time_seconds
return self._make_request(
action='ReceiveMessage',
verb='POST',
path='/', params=params)
def remove_permission(self, queue_url, label):
"""
Revokes any permissions in the queue policy that matches the
specified `Label` parameter. Only the owner of the queue can
remove permissions.
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type label: string
:param label: The identification of the permission to remove. This is
the label added with the AddPermission action.
"""
params = {'QueueUrl': queue_url, 'Label': label, }
return self._make_request(
action='RemovePermission',
verb='POST',
path='/', params=params)
def send_message(self, queue_url, message_body, delay_seconds=None):
"""
Delivers a message to the specified queue. With Amazon SQS,
you now have the ability to send large payload messages that
are up to 256KB (262,144 bytes) in size. To send large
payloads, you must use an AWS SDK that supports SigV4 signing.
To verify whether SigV4 is supported for an AWS SDK, check the
SDK release notes.
The following list shows the characters (in Unicode) allowed
in your message, according to the W3C XML specification. For
more information, go to `http://www.w3.org/TR/REC-
xml/#charsets`_ If you send any characters not included in the
list, your request will be rejected.
#x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] |
[#x10000 to #x10FFFF]
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type message_body: string
:param message_body: The message to send. String maximum 256 KB in
size. For a list of allowed characters, see the preceding important
note.
:type delay_seconds: integer
:param delay_seconds: The number of seconds (0 to 900 - 15 minutes) to
delay a specific message. Messages with a positive `DelaySeconds`
value become available for processing after the delay time is
finished. If you don't specify a value, the default value for the
queue applies.
"""
params = {
'QueueUrl': queue_url,
'MessageBody': message_body,
}
if delay_seconds is not None:
params['DelaySeconds'] = delay_seconds
return self._make_request(
action='SendMessage',
verb='POST',
path='/', params=params)
def send_message_batch(self, queue_url, entries):
"""
Delivers up to ten messages to the specified queue. This is a
batch version of SendMessage. The result of the send action on
each message is reported individually in the response. The
maximum allowed individual message size is 256 KB (262,144
bytes).
The maximum total payload size (i.e., the sum of all a batch's
individual message lengths) is also 256 KB (262,144 bytes).
If the `DelaySeconds` parameter is not specified for an entry,
the default for the queue is used.
The following list shows the characters (in Unicode) that are
allowed in your message, according to the W3C XML
specification. For more information, go to
`http://www.faqs.org/rfcs/rfc1321.html`_. If you send any
characters that are not included in the list, your request
will be rejected.
#x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] |
[#x10000 to #x10FFFF]
Because the batch request can result in a combination of
successful and unsuccessful actions, you should check for
batch errors even when the call returns an HTTP status code of
200. Some API actions take lists of parameters. These lists
are specified using the `param.n` notation. Values of `n` are
integers starting from 1. For example, a parameter list with
two elements looks like this:
`&Attribute.1=this`
`&Attribute.2=that`
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type entries: list
:param entries: A list of SendMessageBatchRequestEntry items.
"""
params = {'QueueUrl': queue_url, }
self.build_complex_list_params(
params, entries,
'Entries.member',
('Id', 'MessageBody', 'DelaySeconds'))
return self._make_request(
action='SendMessageBatch',
verb='POST',
path='/', params=params)
def set_queue_attributes(self, queue_url, attributes):
"""
Sets the value of one or more queue attributes.
Going forward, new attributes might be added. If you are
writing code that calls this action, we recommend that you
structure your code so that it can handle new attributes
gracefully.
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type attributes: map
:param attributes: A map of attributes to set.
The following lists the names, descriptions, and values of the special
request parameters the `SetQueueAttributes` action uses:
+ `DelaySeconds` - The time in seconds that the delivery of all
messages in the queue will be delayed. An integer from 0 to 900 (15
minutes). The default for this attribute is 0 (zero).
+ `MaximumMessageSize` - The limit of how many bytes a message can
contain before Amazon SQS rejects it. An integer from 1024 bytes (1
KiB) up to 262144 bytes (256 KiB). The default for this attribute
is 262144 (256 KiB).
+ `MessageRetentionPeriod` - The number of seconds Amazon SQS retains a
message. Integer representing seconds, from 60 (1 minute) to
1209600 (14 days). The default for this attribute is 345600 (4
days).
+ `Policy` - The queue's policy. A valid form-url-encoded policy. For
more information about policy structure, see `Basic Policy
Structure`_ in the Amazon SQS Developer Guide . For more
information about form-url-encoding, see `http://www.w3.org/MarkUp
/html-spec/html-spec_8.html#SEC8.2.1`_.
+ `ReceiveMessageWaitTimeSeconds` - The time for which a ReceiveMessage
call will wait for a message to arrive. An integer from 0 to 20
(seconds). The default for this attribute is 0.
+ `VisibilityTimeout` - The visibility timeout for the queue. An
integer from 0 to 43200 (12 hours). The default for this attribute
is 30. For more information about visibility timeout, see
Visibility Timeout in the Amazon SQS Developer Guide .
+ `RedrivePolicy` - The parameters for dead letter queue functionality
of the source queue. For more information about RedrivePolicy and
dead letter queues, see Using Amazon SQS Dead Letter Queues in the
Amazon SQS Developer Guide .
"""
params = {'QueueUrl': queue_url, }
# TODO: NEED TO PROCESS COMPLEX ARG attributes of type map.
return self._make_request(
action='SetQueueAttributes',
verb='POST',
path='/', params=params)
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
path='/', params=params)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
json_body = json.loads(body)
fault_name = json_body.get('Error', {}).get('Code', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| 44.326812 | 81 | 0.652518 |
56b854f387f4cabfaff693407133dbeba63b696b | 1,716 | py | Python | groot-backend/django/code/groot/models/room.py | ayushmantripathy9/We-Are-Groot | db919dbe24363d02ee08ea79520813e9dc9af174 | [
"MIT"
] | null | null | null | groot-backend/django/code/groot/models/room.py | ayushmantripathy9/We-Are-Groot | db919dbe24363d02ee08ea79520813e9dc9af174 | [
"MIT"
] | null | null | null | groot-backend/django/code/groot/models/room.py | ayushmantripathy9/We-Are-Groot | db919dbe24363d02ee08ea79520813e9dc9af174 | [
"MIT"
] | null | null | null | from django.db import models
from django.conf import settings
class Room(models.Model):
"""
The Room Model.
This would contain the rooms created by users for interaction.
Attributes:
- room_name : CharField
- The name of the room
- room_code : ForeignKey
- The room code
- participants: ManyToManyField
- Participants (users) currently present in the room
- participants_history: ManyToManyField
- Any participant(user) that ever attended the room
- start_time: DateTimeField
- The creation time of the room
- end_time: DateTimeField
- The end time of the room
"""
# stores the name of the rooom
room_name = models.CharField(
max_length=255,
null=False,
blank=False
)
# stores the unique room_code
room_code = models.CharField(
max_length=9,
null=False,
blank=False
)
# the participants who are a part of the room
participants = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='rooms_of_user',
blank=True
)
# the participants history
participants_history = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='past_rooms_of_user',
blank=True
)
# creation time of the room
start_time = models.DateTimeField(
auto_now_add=True
)
end_time = models.DateTimeField(
null=True,
blank=True
)
def __str__(self) :
return f"{self.room_name}, code : {self.room_code}"
| 24.514286 | 70 | 0.588578 |
da608ae174c2eca19a2e02bebbdb05fef6020444 | 15,920 | py | Python | project/vin_fake_ocr/car_exam.py | zylo117/Mask_RCNN | c484f5fa9136ca68a4076833f99dd56bef8d5c7e | [
"MIT"
] | null | null | null | project/vin_fake_ocr/car_exam.py | zylo117/Mask_RCNN | c484f5fa9136ca68a4076833f99dd56bef8d5c7e | [
"MIT"
] | null | null | null | project/vin_fake_ocr/car_exam.py | zylo117/Mask_RCNN | c484f5fa9136ca68a4076833f99dd56bef8d5c7e | [
"MIT"
] | 1 | 2020-11-19T01:35:22.000Z | 2020-11-19T01:35:22.000Z | """
Mask R-CNN
Train on the toy Balloon dataset and implement color splash effect.
Copyright (c) 2018 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 car_exam.py train --dataset=/path/to/car_exam/dataset --weights=coco
# Resume training a model that you had trained earlier
python3 car_exam.py train --dataset=/path/to/car_exam/dataset --weights=last
# Train a new model starting from ImageNet weights
python3 car_exam.py train --dataset=/path/to/car_exam/dataset --weights=imagenet
# Apply color splash to an image
python3 car_exam.py splash --weights=/path/to/weights/file.h5 --image=<URL or path to file>
# Apply color splash to video using the last weights you trained
python3 car_exam.py splash --weights=last --video=<URL or path to file>
"""
import os
from keras.callbacks import EarlyStopping
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
import sys
import json
import datetime
import numpy as np
import skimage.draw
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from callbacks.trainingmonitor import TrainMonitor
from mrcnn.config import Config
from mrcnn import model as modellib, utils
# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
############################################################
# Configurations
############################################################
class CarExamConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "car_exam"
# NUMBER OF GPUs to use. For CPU training, use 1
GPU_COUNT = 4
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 2 # Background + car_exam...
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
# custom history
EPOCH = 100 # train till EPOCH xx, doesn't mean training another xx EPOCHs
# 1-8
# LEARNING_RATE = 0.001
LEARNING_RATE = 0.0005
# EPOCH = 100
############################################################
# Dataset
############################################################
# Remember, class id of BG(BackGround) is always 0!!! Remember to add 1 to class ids
# OBJ_LIST = ['license_plate', 'tripod', 'car_light', 'car_logo', 'safe_belt', 'people']
OBJ_LIST = ['license_plate', 'tripod']
class CarExamDataset(utils.Dataset):
def load_car_exam(self, dataset_dir, subset):
"""Load a subset of the CarExam dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. We have only one class to add.
# self.add_class("car_exam", 1, "license_plate")
for i, obj in enumerate(OBJ_LIST):
self.add_class('car_exam', i + 1, obj)
# Train or validation dataset?
assert subset in ["train", "val"]
dataset_dir = os.path.join(dataset_dir, subset)
# Load annotations
# VGG Image Annotator saves each image in the form:
# { 'filename': '28503151_5b5b7ec140_b.jpg',
# 'regions': {
# '0': {
# 'region_attributes': {},
# 'shape_attributes': {
# 'all_points_x': [...],
# 'all_points_y': [...],
# 'name': 'polygon'}},
# ... more regions ...
# },
# 'size': 100202
# }
# We mostly care about the x and y coordinates of each region
annotations = json.load(open(os.path.join(dataset_dir, "via_region_data.json")))
annotations = list(annotations.values()) # don't need the dict keys
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
annotations = [a for a in annotations if a['regions']]
# Add images
for a in annotations:
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. There are stores in the
# shape_attributes (see json format above)
# bug fix, auto fit json config
# because vgg image annotation 2.0 change 'regions' from dict to list
try:
# old version, dict
polygons = [r['shape_attributes'] for r in a['regions'].values()]
polygons_labels = [r['region_attributes']['obj'] for r in a['regions'].values()]
except AttributeError:
# new version, list
polygons = [r['shape_attributes'] for r in a['regions']]
polygons_labels = [r['region_attributes']['obj'] for r in a['regions']]
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
"car_exam",
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons,
polygons_labels=polygons_labels)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a car_exam dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "car_exam":
return super(self.__class__, self).load_mask(image_id)
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
class_ids = []
for i, p in enumerate(info["polygons"]):
try:
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 1
try:
class_ids.append(OBJ_LIST.index(info['polygons_labels'][i]) + 1)
except ValueError:
class_ids.append(0) # 0 is BG
except IndexError:
pass
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
# return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
return mask.astype(np.bool), np.asarray(class_ids, dtype=np.int32)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "car_exam":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
def train(model):
"""Train the model."""
# Training dataset.
dataset_train = CarExamDataset()
dataset_train.load_car_exam(args.dataset, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = CarExamDataset()
dataset_val.load_car_exam(args.dataset, "val")
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
print("Training network heads")
# add train monitor
callbacks = [TrainMonitor(model.log_dir + '/monitor.png',
jsonPath=model.log_dir + '/monitor.json',
startAt=model.epoch), EarlyStopping(patience=5)]
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=config.EPOCH,
custom_callbacks=callbacks,
layers='heads')
# layers='5+')
def color_splash(image, mask):
"""Apply color splash effect.
image: RGB image [height, width, 3]
mask: instance segmentation mask [height, width, instance count]
Returns result image.
"""
# Make a grayscale copy of the image. The grayscale copy still
# has 3 RGB channels, though.
gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255
# Copy color pixels from the original color image where mask is set
if mask.shape[-1] > 0:
# We're treating all instances as one, so collapse the mask into one layer
mask = (np.sum(mask, -1, keepdims=True) >= 1)
splash = np.where(mask, image, gray).astype(np.uint8)
else:
splash = gray.astype(np.uint8)
return splash
def detect_and_color_splash(model, image_path=None, video_path=None):
assert image_path or video_path
# Image or video?
if image_path:
# Run model detection and generate the color splash effect
print("Running on {}".format(args.image))
# Read image
image = skimage.io.imread(args.image)
# Detect objects
r = model.detect([image], verbose=1)[0]
# Color splash
splash = color_splash(image, r['masks'])
# Save output
file_name = "splash_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
skimage.io.imsave(file_name, splash)
elif video_path:
import cv2
# Video capture
vcapture = cv2.VideoCapture(video_path)
width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = vcapture.get(cv2.CAP_PROP_FPS)
# Define codec and create video writer
file_name = "splash_{:%Y%m%dT%H%M%S}.avi".format(datetime.datetime.now())
vwriter = cv2.VideoWriter(file_name,
cv2.VideoWriter_fourcc(*'MJPG'),
fps, (width, height))
count = 0
success = True
while success:
print("frame: ", count)
# Read next image
success, image = vcapture.read()
if success:
# OpenCV returns images as BGR, convert to RGB
image = image[..., ::-1]
# Detect objects
r = model.detect([image], verbose=0)[0]
# Color splash
splash = color_splash(image, r['masks'])
# RGB -> BGR to save image to video
splash = splash[..., ::-1]
# Add image to video writer
vwriter.write(splash)
count += 1
vwriter.release()
print("Saved to ", file_name)
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN to detect car_exams.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'splash'")
parser.add_argument('-d', '--dataset', required=False,
metavar="/path/to/car_exam/dataset/",
help='Directory of the CarExam dataset')
parser.add_argument('-w', '--weights', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('-l', '--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('-i', '--image', required=False,
metavar="path or URL to image",
help='Image to apply the color splash effect on')
parser.add_argument('-v', '--video', required=False,
metavar="path or URL to video",
help='Video to apply the color splash effect on')
args = parser.parse_args()
# Validate arguments
if args.command == "train":
assert args.dataset, "Argument --dataset is required for training"
elif args.command == "splash":
assert args.image or args.video, \
"Provide --image or --video to apply color splash"
print("Weights: ", args.weights)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
# Configurations
if args.command == "train":
config = CarExamConfig()
else:
class InferenceConfig(CarExamConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Create model
if args.weights.lower() == "last":
model = modellib.MaskRCNN(mode="resume_training", config=config,
model_dir=args.logs)
elif args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.weights.lower() == "coco":
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif args.weights.lower() == "last":
# Find last trained weights
weights_path = model.find_last()
elif args.weights.lower() == "imagenet":
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
else:
weights_path = args.weights
# Load weights
print("Loading weights ", weights_path)
if args.weights.lower() == "coco":
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
# Train or evaluate
if args.command == "train":
train(model)
elif args.command == "splash":
detect_and_color_splash(model, image_path=args.image,
video_path=args.video)
else:
print("'{}' is not recognized. "
"Use 'train' or 'splash'".format(args.command))
| 37.904762 | 96 | 0.58593 |
7f2ac45c2825e894a10e33bce3c049eeb4607735 | 7,436 | py | Python | utils.py | carlos-bologna/kaggle_diabetic_retinopathy | 6c283fa906a94652965999f29d13d2b866ba8e7c | [
"MIT"
] | null | null | null | utils.py | carlos-bologna/kaggle_diabetic_retinopathy | 6c283fa906a94652965999f29d13d2b866ba8e7c | [
"MIT"
] | null | null | null | utils.py | carlos-bologna/kaggle_diabetic_retinopathy | 6c283fa906a94652965999f29d13d2b866ba8e7c | [
"MIT"
] | null | null | null | import re
import glob
import os
import sys
import skimage
import numpy as np
import theano.tensor as T
from sklearn.cross_validation import StratifiedShuffleSplit
import string
import lasagne as nn
def padtosquare(im):
w, l = im.shape
if w < l:
pad_size = (l - w) / 2.0
im_new = skimage.util.pad(im, pad_width=((int(np.floor(pad_size)),
int(np.ceil(pad_size))),
(0, 0)),
mode='constant',
constant_values=(1, 1))
else:
pad_size = (w - l) / 2.0
im_new = skimage.util.pad(im, pad_width=((0, 0),
(int(np.floor(pad_size)),
int(np.ceil(pad_size)))),
mode='constant',
constant_values=(1, 1))
return im_new
def one_hot(vec, m=None):
if m is None:
m = int(np.max(vec)) + 1
return np.eye(m)[vec].astype('int32')
def hms(seconds):
seconds = np.floor(seconds)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return "%02d:%02d:%02d" % (hours, minutes, seconds)
def rms(x, axis=None, epsilon=1e-12):
return T.sqrt(T.mean(T.sqr(x), axis=axis) + epsilon)
# TODO clean this mess up
def split_data(train_labels, labels_split, valid_size=20,
SEED=42, stratified=True, pairs=False):
if valid_size >= 100:
return None
num_all = len(train_labels)
np.random.seed(SEED)
if stratified:
if pairs:
# TODO: Taking max level to stratify for now.
label_pairs = labels_split.groupby('id')['level'].max()
label_pairs.index = map(int, label_pairs.index)
label_pairs = label_pairs.sort_index(ascending=True)
sss = StratifiedShuffleSplit(label_pairs.values, n_iter=1,
test_size=0.01 * valid_size,
indices=None, random_state=SEED)
else:
sss = StratifiedShuffleSplit(train_labels.level, n_iter=1, test_size=0.01 * valid_size, indices=None, random_state=SEED)
for ix_train, ix_test in sss:
pass
# TODO: has no next(), need to figure this out
else:
shuffled_index = np.random.permutation(np.arange(num_all))
num_valid = num_all // (100 / valid_size)
num_train = num_all - num_valid
ix_train = shuffled_index[:num_train]
ix_test = shuffled_index[num_train:]
if pairs:
id_train = np.sort(np.asarray(label_pairs.index[ix_train]))
y_train_left = labels_split[
labels_split.id.isin(id_train)].level.values[::2]
y_train_right = labels_split[
labels_split.id.isin(id_train)].level.values[1::2]
y_train = np.vstack([y_train_left, y_train_right]).T
# TODO are they sorted
assert labels_split[
labels_split.id.isin(id_train)].eye[::2].unique().shape[0] == 1
assert labels_split[
labels_split.id.isin(id_train)].eye[1::2].unique().shape[0] == 1
id_valid = np.sort(np.asarray(label_pairs.index[ix_test]))
y_valid_left = labels_split[
labels_split.id.isin(id_valid)].level.values[::2]
y_valid_right = labels_split[
labels_split.id.isin(id_valid)].level.values[1::2]
y_valid = np.vstack([y_valid_left, y_valid_right]).T
# TODO are they sorted
assert labels_split[
labels_split.id.isin(id_valid)].eye[::2].unique().shape[0] == 1
assert labels_split[
labels_split.id.isin(id_valid)].eye[1::2].unique().shape[0] == 1
else:
id_train = train_labels.ix[ix_train].image.values
y_train = train_labels.ix[ix_train].level.values
id_valid = train_labels.ix[ix_test].image.values
y_valid = train_labels.ix[ix_test].level.values
return id_train, y_train, id_valid, y_valid
# TODO: very ugly stuff here, can probably be done a lot better
def oversample_set(id_train, y_train, coefs):
train_1 = list(np.where(np.apply_along_axis(
lambda x: 1 in x,
1,
y_train))[0])
train_2 = list(np.where(np.apply_along_axis(
lambda x: 2 in x,
1,
y_train))[0])
train_3 = list(np.where(np.apply_along_axis(
lambda x: 3 in x,
1,
y_train))[0])
train_4 = list(np.where(np.apply_along_axis(
lambda x: 4 in x,
1,
y_train))[0])
id_train_oversample = list(id_train)
id_train_oversample += list(id_train[coefs[1] * train_1])
id_train_oversample += list(id_train[coefs[2] * train_2])
id_train_oversample += list(id_train[coefs[3] * train_3])
id_train_oversample += list(id_train[coefs[4] * train_4])
labels_train_oversample = np.array(y_train)
labels_train_oversample = np.vstack([labels_train_oversample,
y_train[coefs[1] * train_1]])
labels_train_oversample = np.vstack([labels_train_oversample,
y_train[coefs[2] * train_2]])
labels_train_oversample = np.vstack([labels_train_oversample,
y_train[coefs[3] * train_3]])
labels_train_oversample = np.vstack([labels_train_oversample,
y_train[coefs[4] * train_4]])
return id_train_oversample, labels_train_oversample
def get_img_ids_from_iter(ar):
test_ids = []
prog = re.compile(r'\b(\d+)_(\w+)')
for img_fn in ar:
try:
test_id, test_side = prog.search(img_fn).groups()
except AttributeError:
print img_fn
sys.exit(0)
test_id = int(test_id)
test_ids.append(test_id)
return test_ids
def get_img_ids_from_dir(img_dir):
test_fns = glob.glob(os.path.join(img_dir, "*.jpeg"))
return get_img_ids_from_iter(test_fns)
def softmax(ar, temp=1):
e = np.exp(ar / temp)
return e / e.sum(axis=1)[:, None]
def architecture_string(layer):
model_arch = ''
for i, layer in enumerate(nn.layers.get_all_layers(layer)):
name = string.ljust(layer.__class__.__name__, 28)
model_arch += " %2i %s %s " % (i, name,
nn.layers.get_output_shape(layer))
if hasattr(layer, 'filter_size'):
model_arch += str(layer.filter_size[0])
model_arch += ' //'
elif hasattr(layer, 'pool_size'):
if isinstance(layer.pool_size, int):
model_arch += str(layer.pool_size)
else:
model_arch += str(layer.pool_size[0])
model_arch += ' //'
if hasattr(layer, 'p'):
model_arch += ' [%.2f]' % layer.p
if hasattr(layer, 'stride'):
model_arch += str(layer.stride[0])
if hasattr(layer, 'learning_rate_scale'):
if layer.learning_rate_scale != 1.0:
model_arch += ' [lr_scale=%.2f]' % layer.learning_rate_scale
if hasattr(layer, 'params'):
for param in layer.params:
if 'trainable' not in layer.params[param]:
model_arch += ' [NT] '
model_arch += '\n'
return model_arch
| 33.345291 | 132 | 0.572754 |
eda82c3da8c1dd921bc4db8b173ca6777e22836c | 344 | py | Python | products/utilities.py | endonte/quotation2 | ddb9bf2f98ea8b45c9c43f272e61bb85fb563613 | [
"MIT"
] | null | null | null | products/utilities.py | endonte/quotation2 | ddb9bf2f98ea8b45c9c43f272e61bb85fb563613 | [
"MIT"
] | null | null | null | products/utilities.py | endonte/quotation2 | ddb9bf2f98ea8b45c9c43f272e61bb85fb563613 | [
"MIT"
] | null | null | null | from django.urls import reverse_lazy
from jqgrid import JqGrid
from .models import Product
class ProductGrid(JqGrid):
model = Product
fields = ['id', 'product_name', 'category', 'uom']
url = reverse_lazy('grid_handler')
caption = 'Product Grid'
colmodel_overrides = {
'id': { 'editable':False, 'width': 10 },
}
| 26.461538 | 54 | 0.659884 |
17459b17e5209741d9ac469493e24478ee5ad2ed | 27,350 | py | Python | pyannote-parser-develop/tests/pyannote/metrics/diarization.py | cvossos2046/visual_speaker_diarization_svm_project | bdc57d893ad9c04145568310c068f9c3e2305cf8 | [
"MIT"
] | null | null | null | pyannote-parser-develop/tests/pyannote/metrics/diarization.py | cvossos2046/visual_speaker_diarization_svm_project | bdc57d893ad9c04145568310c068f9c3e2305cf8 | [
"MIT"
] | null | null | null | pyannote-parser-develop/tests/pyannote/metrics/diarization.py | cvossos2046/visual_speaker_diarization_svm_project | bdc57d893ad9c04145568310c068f9c3e2305cf8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2012-2019 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
"""Metrics for diarization"""
import numpy as np
from matcher import HungarianMapper
from matcher import GreedyMapper
from base import BaseMetric, f_measure
from utils import UEMSupportMixin
from identification import IdentificationErrorRate
DER_NAME = 'diarization error rate'
class DiarizationErrorRate(IdentificationErrorRate):
"""Diarization error rate
First, the optimal mapping between reference and hypothesis labels
is obtained using the Hungarian algorithm. Then, the actual diarization
error rate is computed as the identification error rate with each hypothesis
label translated into the corresponding reference label.
Parameters
----------
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments.
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
Usage
-----
* Diarization error rate between `reference` and `hypothesis` annotations
>>> metric = DiarizationErrorRate()
>>> reference = Annotation(...) # doctest: +SKIP
>>> hypothesis = Annotation(...) # doctest: +SKIP
>>> value = metric(reference, hypothesis) # doctest: +SKIP
* Compute global diarization error rate and confidence interval
over multiple documents
>>> for reference, hypothesis in ... # doctest: +SKIP
... metric(reference, hypothesis) # doctest: +SKIP
>>> global_value = abs(metric) # doctest: +SKIP
>>> mean, (lower, upper) = metric.confidence_interval() # doctest: +SKIP
* Get diarization error rate detailed components
>>> components = metric(reference, hypothesis, detailed=True) #doctest +SKIP
* Get accumulated components
>>> components = metric[:] # doctest: +SKIP
>>> metric['confusion'] # doctest: +SKIP
See Also
--------
:class:`pyannote.metric.base.BaseMetric`: details on accumulation
:class:`pyannote.metric.identification.IdentificationErrorRate`: identification error rate
"""
@classmethod
def metric_name(cls):
return DER_NAME
def __init__(self, collar=0.0, skip_overlap=False, **kwargs):
super(DiarizationErrorRate, self).__init__(
collar=collar, skip_overlap=skip_overlap, **kwargs)
self.mapper_ = HungarianMapper()
def optimal_mapping(self, reference, hypothesis, uem=None):
"""Optimal label mapping
Parameters
----------
reference : Annotation
hypothesis : Annotation
Reference and hypothesis diarization
uem : Timeline
Evaluation map
Returns
-------
mapping : dict
Mapping between hypothesis (key) and reference (value) labels
"""
# NOTE that this 'uemification' will not be called when
# 'optimal_mapping' is called from 'compute_components' as it
# has already been done in 'compute_components'
if uem:
reference, hypothesis = self.uemify(reference, hypothesis, uem=uem)
# call hungarian mapper
return self.mapper_(hypothesis, reference)
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
# crop reference and hypothesis to evaluated regions (uem)
# remove collars around reference segment boundaries
# remove overlap regions (if requested)
reference, hypothesis, uem = self.uemify(
reference, hypothesis, uem=uem,
collar=self.collar, skip_overlap=self.skip_overlap,
returns_uem=True)
# NOTE that this 'uemification' must be done here because it
# might have an impact on the search for the optimal mapping.
# make sure reference only contains string labels ('A', 'B', ...)
reference = reference.rename_labels(generator='string')
# make sure hypothesis only contains integer labels (1, 2, ...)
hypothesis = hypothesis.rename_labels(generator='int')
# optimal (int --> str) mapping
mapping = self.optimal_mapping(reference, hypothesis)
# compute identification error rate based on mapped hypothesis
# NOTE that collar is set to 0.0 because 'uemify' has already
# been applied (same reason for setting skip_overlap to False)
mapped = hypothesis.rename_labels(mapping=mapping)
return super(DiarizationErrorRate, self)\
.compute_components(reference, mapped, uem=uem,
collar=0.0, skip_overlap=False,
**kwargs)
class GreedyDiarizationErrorRate(IdentificationErrorRate):
"""Greedy diarization error rate
First, the greedy mapping between reference and hypothesis labels is
obtained. Then, the actual diarization error rate is computed as the
identification error rate with each hypothesis label translated into the
corresponding reference label.
Parameters
----------
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments.
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
Usage
-----
* Greedy diarization error rate between `reference` and `hypothesis` annotations
>>> metric = GreedyDiarizationErrorRate()
>>> reference = Annotation(...) # doctest: +SKIP
>>> hypothesis = Annotation(...) # doctest: +SKIP
>>> value = metric(reference, hypothesis) # doctest: +SKIP
* Compute global greedy diarization error rate and confidence interval
over multiple documents
>>> for reference, hypothesis in ... # doctest: +SKIP
... metric(reference, hypothesis) # doctest: +SKIP
>>> global_value = abs(metric) # doctest: +SKIP
>>> mean, (lower, upper) = metric.confidence_interval() # doctest: +SKIP
* Get greedy diarization error rate detailed components
>>> components = metric(reference, hypothesis, detailed=True) #doctest +SKIP
* Get accumulated components
>>> components = metric[:] # doctest: +SKIP
>>> metric['confusion'] # doctest: +SKIP
See Also
--------
:class:`pyannote.metric.base.BaseMetric`: details on accumulation
"""
@classmethod
def metric_name(cls):
return DER_NAME
def __init__(self, collar=0.0, skip_overlap=False, **kwargs):
super(GreedyDiarizationErrorRate, self).__init__(
collar=collar, skip_overlap=skip_overlap, **kwargs)
self.mapper_ = GreedyMapper()
def greedy_mapping(self, reference, hypothesis, uem=None):
"""Greedy label mapping
Parameters
----------
reference : Annotation
hypothesis : Annotation
Reference and hypothesis diarization
uem : Timeline
Evaluation map
Returns
-------
mapping : dict
Mapping between hypothesis (key) and reference (value) labels
"""
if uem:
reference, hypothesis = self.uemify(reference, hypothesis, uem=uem)
return self.mapper_(hypothesis, reference)
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
# crop reference and hypothesis to evaluated regions (uem)
# remove collars around reference segment boundaries
# remove overlap regions (if requested)
reference, hypothesis, uem = self.uemify(
reference, hypothesis, uem=uem,
collar=self.collar, skip_overlap=self.skip_overlap,
returns_uem=True)
# NOTE that this 'uemification' must be done here because it
# might have an impact on the search for the greedy mapping.
# make sure reference only contains string labels ('A', 'B', ...)
reference = reference.rename_labels(generator='string')
# make sure hypothesis only contains integer labels (1, 2, ...)
hypothesis = hypothesis.rename_labels(generator='int')
# greedy (int --> str) mapping
mapping = self.greedy_mapping(reference, hypothesis)
# compute identification error rate based on mapped hypothesis
# NOTE that collar is set to 0.0 because 'uemify' has already
# been applied (same reason for setting skip_overlap to False)
mapped = hypothesis.rename_labels(mapping=mapping)
return super(GreedyDiarizationErrorRate, self)\
.compute_components(reference, mapped, uem=uem,
collar=0.0, skip_overlap=False,
**kwargs)
JER_NAME = 'jaccard error rate'
JER_SPEAKER_ERROR = 'speaker error'
JER_SPEAKER_COUNT = 'speaker count'
class JaccardErrorRate(DiarizationErrorRate):
"""Jaccard error rate
Reference
---------
Second DIHARD Challenge Evaluation Plan. Version 1.1
N. Ryant, K. Church, C. Cieri, A. Cristia, J. Du, S. Ganapathy, M. Liberman
https://coml.lscp.ens.fr/dihard/2019/second_dihard_eval_plan_v1.1.pdf
"The Jaccard error rate is based on the Jaccard index, a similarity measure
used to evaluate the output of image segmentation systems. An optimal
mapping between reference and system speakers is determined and for each
pair the Jaccard index is computed. The Jaccard error rate is then defined
as 1 minus the average of these scores. While similar to DER, it weights
every speaker’s contribution equally, regardless of how much speech they
actually produced.
More concretely, assume we have N reference speakers and M system speakers.
An optimal mapping between speakers is determined using the Hungarian
algorithm so that each reference speaker is paired with at most one system
speaker and each system speaker with at most one reference speaker. Then,
for each reference speaker ref the speaker-specific Jaccard error rate
JERref is computed as JERref = (FA + MISS) / TOTAL where
* TOTAL is the duration of the union of reference and system speaker
segments; if the reference speaker was not paired with a system
speaker, it is the duration of all reference speaker segments
* FA is the total system speaker time not attributed to the reference
speaker; if the reference speaker was not paired with a system speaker,
it is 0
* MISS is the total reference speaker time not attributed to the system
speaker; if the reference speaker was not paired with a system speaker,
it is equal to TOTAL
The Jaccard error rate then is the average of the speaker specific Jaccard
error rates.
JER and DER are highly correlated with JER typically being higher,
especially in recordings where one or more speakers is particularly
dominant. Where it tends to track DER is in outliers where the diarization
is especially bad, resulting in one or more unmapped system speakers whose
speech is not then penalized. In these cases, where DER can easily exceed
500%, JER will never exceed 100% and may be far lower if the reference
speakers are handled correctly."
Parameters
----------
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments.
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
Usage
-----
>>> metric = JaccardErrorRate()
>>> reference = Annotation(...) # doctest: +SKIP
>>> hypothesis = Annotation(...) # doctest: +SKIP
>>> jer = metric(reference, hypothesis) # doctest: +SKIP
"""
@classmethod
def metric_name(cls):
return JER_NAME
@classmethod
def metric_components(cls):
return [
JER_SPEAKER_COUNT,
JER_SPEAKER_ERROR,
]
def __init__(self, collar=0.0, skip_overlap=False, **kwargs):
super().__init__(
collar=collar, skip_overlap=skip_overlap, **kwargs)
self.mapper_ = HungarianMapper()
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
# crop reference and hypothesis to evaluated regions (uem)
# remove collars around reference segment boundaries
# remove overlap regions (if requested)
reference, hypothesis, uem = self.uemify(
reference, hypothesis, uem=uem,
collar=self.collar, skip_overlap=self.skip_overlap,
returns_uem=True)
# NOTE that this 'uemification' must be done here because it
# might have an impact on the search for the optimal mapping.
# make sure reference only contains string labels ('A', 'B', ...)
reference = reference.rename_labels(generator='string')
# make sure hypothesis only contains integer labels (1, 2, ...)
hypothesis = hypothesis.rename_labels(generator='int')
# optimal (str --> int) mapping
mapping = self.optimal_mapping(hypothesis, reference)
detail = self.init_components()
for ref_speaker in reference.labels():
hyp_speaker = mapping.get(ref_speaker, None)
if hyp_speaker is None:
# if the reference speaker was not paired with a system speaker
# [total] is the duration of all reference speaker segments
# if the reference speaker was not paired with a system speaker
# [fa] is 0
# if the reference speaker was not paired with a system speaker
# [miss] is equal to total
# overall: jer = (fa + miss) / total = (0 + total) / total = 1
jer = 1.
else:
# total is the duration of the union of reference and system
# speaker segments
r = reference.label_timeline(ref_speaker)
h = hypothesis.label_timeline(hyp_speaker)
total = r.union(h).support().duration()
# fa is the total system speaker time not attributed to the
# reference speaker
fa = h.duration() - h.crop(r).duration()
# miss is the total reference speaker time not attributed to
# the system speaker
miss = r.duration() - r.crop(h).duration()
jer = (fa + miss) / total
detail[JER_SPEAKER_COUNT] += 1
detail[JER_SPEAKER_ERROR] += jer
return detail
def compute_metric(self, detail):
return detail[JER_SPEAKER_ERROR] / detail[JER_SPEAKER_COUNT]
PURITY_NAME = 'purity'
PURITY_TOTAL = 'total'
PURITY_CORRECT = 'correct'
class DiarizationPurity(UEMSupportMixin, BaseMetric):
"""Cluster purity
A hypothesized annotation has perfect purity if all of its labels overlap
only segments which are members of a single reference label.
Parameters
----------
weighted : bool, optional
When True (default), each cluster is weighted by its overall duration.
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments.
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
"""
@classmethod
def metric_name(cls):
return PURITY_NAME
@classmethod
def metric_components(cls):
return [PURITY_TOTAL, PURITY_CORRECT]
def __init__(self, collar=0.0, skip_overlap=False,
weighted=True, **kwargs):
super(DiarizationPurity, self).__init__(**kwargs)
self.weighted = weighted
self.collar = collar
self.skip_overlap = skip_overlap
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
detail = self.init_components()
# crop reference and hypothesis to evaluated regions (uem)
reference, hypothesis = self.uemify(
reference, hypothesis, uem=uem,
collar=self.collar, skip_overlap=self.skip_overlap)
if not reference:
return detail
# cooccurrence matrix
matrix = reference * hypothesis
# duration of largest class in each cluster
largest = matrix.max(axis=0)
duration = matrix.sum(axis=0)
if self.weighted:
detail[PURITY_CORRECT] = 0.
if np.prod(matrix.shape):
detail[PURITY_CORRECT] = largest.sum()
detail[PURITY_TOTAL] = duration.sum()
else:
detail[PURITY_CORRECT] = (largest / duration).sum()
detail[PURITY_TOTAL] = len(largest)
return detail
def compute_metric(self, detail):
if detail[PURITY_TOTAL] > 0.:
return detail[PURITY_CORRECT] / detail[PURITY_TOTAL]
return 1.
COVERAGE_NAME = 'coverage'
class DiarizationCoverage(DiarizationPurity):
"""Cluster coverage
A hypothesized annotation has perfect coverage if all segments from a
given reference label are clustered in the same cluster.
Parameters
----------
weighted : bool, optional
When True (default), each cluster is weighted by its overall duration.
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments.
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
"""
@classmethod
def metric_name(cls):
return COVERAGE_NAME
def __init__(self, collar=0.0, skip_overlap=False,
weighted=True, **kwargs):
super(DiarizationCoverage, self).__init__(
collar=collar, skip_overlap=skip_overlap,
weighted=weighted, **kwargs)
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
return super(DiarizationCoverage, self)\
.compute_components(hypothesis, reference, uem=uem, **kwargs)
PURITY_COVERAGE_NAME = 'F[purity|coverage]'
PURITY_COVERAGE_LARGEST_CLASS = 'largest_class'
PURITY_COVERAGE_TOTAL_CLUSTER = 'total_cluster'
PURITY_COVERAGE_LARGEST_CLUSTER = 'largest_cluster'
PURITY_COVERAGE_TOTAL_CLASS = 'total_class'
class DiarizationPurityCoverageFMeasure(UEMSupportMixin, BaseMetric):
"""Compute diarization purity and coverage, and return their F-score.
Parameters
----------
weighted : bool, optional
When True (default), each cluster/class is weighted by its overall
duration.
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments.
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
beta : float, optional
When beta > 1, greater importance is given to coverage.
When beta < 1, greater importance is given to purity.
Defaults to 1.
See also
--------
pyannote.metrics.diarization.DiarizationPurity
pyannote.metrics.diarization.DiarizationCoverage
pyannote.metrics.base.f_measure
"""
@classmethod
def metric_name(cls):
return PURITY_COVERAGE_NAME
@classmethod
def metric_components(cls):
return [PURITY_COVERAGE_LARGEST_CLASS,
PURITY_COVERAGE_TOTAL_CLUSTER,
PURITY_COVERAGE_LARGEST_CLUSTER,
PURITY_COVERAGE_TOTAL_CLASS]
def __init__(self, collar=0.0, skip_overlap=False,
weighted=True, beta=1., **kwargs):
super(DiarizationPurityCoverageFMeasure, self).__init__(**kwargs)
self.collar = collar
self.skip_overlap = skip_overlap
self.weighted = weighted
self.beta = beta
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
detail = self.init_components()
# crop reference and hypothesis to evaluated regions (uem)
reference, hypothesis = self.uemify(
reference, hypothesis, uem=uem,
collar=self.collar, skip_overlap=self.skip_overlap)
# cooccurrence matrix
matrix = reference * hypothesis
# duration of largest class in each cluster
largest_class = matrix.max(axis=0)
# duration of clusters
duration_cluster = matrix.sum(axis=0)
# duration of largest cluster in each class
largest_cluster = matrix.max(axis=1)
# duration of classes
duration_class = matrix.sum(axis=1)
if self.weighted:
# compute purity components
detail[PURITY_COVERAGE_LARGEST_CLASS] = 0.
if np.prod(matrix.shape):
detail[PURITY_COVERAGE_LARGEST_CLASS] = largest_class.sum()
detail[PURITY_COVERAGE_TOTAL_CLUSTER] = duration_cluster.sum()
# compute coverage components
detail[PURITY_COVERAGE_LARGEST_CLUSTER] = 0.
if np.prod(matrix.shape):
detail[PURITY_COVERAGE_LARGEST_CLUSTER] = largest_cluster.sum()
detail[PURITY_COVERAGE_TOTAL_CLASS] = duration_class.sum()
else:
# compute purity components
detail[PURITY_COVERAGE_LARGEST_CLASS] = (largest_class / duration_cluster).sum()
detail[PURITY_COVERAGE_TOTAL_CLUSTER] = len(largest_class)
# compute coverage components
detail[PURITY_COVERAGE_LARGEST_CLUSTER] = (largest_cluster / duration_class).sum()
detail[PURITY_COVERAGE_TOTAL_CLASS] = len(largest_cluster)
# compute purity
detail[PURITY_NAME] = \
1. if detail[PURITY_COVERAGE_TOTAL_CLUSTER] == 0. \
else detail[PURITY_COVERAGE_LARGEST_CLASS] / detail[PURITY_COVERAGE_TOTAL_CLUSTER]
# compute coverage
detail[COVERAGE_NAME] = \
1. if detail[PURITY_COVERAGE_TOTAL_CLASS] == 0. \
else detail[PURITY_COVERAGE_LARGEST_CLUSTER] / detail[PURITY_COVERAGE_TOTAL_CLASS]
return detail
def compute_metric(self, detail):
_, _, value = self.compute_metrics(detail=detail)
return value
def compute_metrics(self, detail=None):
detail = self.accumulated_ if detail is None else detail
purity = \
1. if detail[PURITY_COVERAGE_TOTAL_CLUSTER] == 0. \
else detail[PURITY_COVERAGE_LARGEST_CLASS] / detail[PURITY_COVERAGE_TOTAL_CLUSTER]
coverage = \
1. if detail[PURITY_COVERAGE_TOTAL_CLASS] == 0. \
else detail[PURITY_COVERAGE_LARGEST_CLUSTER] / detail[PURITY_COVERAGE_TOTAL_CLASS]
return purity, coverage, f_measure(purity, coverage, beta=self.beta)
HOMOGENEITY_NAME = 'homogeneity'
HOMOGENEITY_ENTROPY = 'entropy'
HOMOGENEITY_CROSS_ENTROPY = 'cross-entropy'
class DiarizationHomogeneity(UEMSupportMixin, BaseMetric):
"""Cluster homogeneity
Parameters
----------
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments.
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
"""
@classmethod
def metric_name(cls):
return HOMOGENEITY_NAME
@classmethod
def metric_components(cls):
return [HOMOGENEITY_ENTROPY, HOMOGENEITY_CROSS_ENTROPY]
def __init__(self, collar=0.0, skip_overlap=False, **kwargs):
super(DiarizationHomogeneity, self).__init__(**kwargs)
self.collar = collar
self.skip_overlap = skip_overlap
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
detail = self.init_components()
# crop reference and hypothesis to evaluated regions (uem)
reference, hypothesis = self.uemify(
reference, hypothesis, uem=uem,
collar=self.collar, skip_overlap=self.skip_overlap)
# cooccurrence matrix
matrix = reference * hypothesis
duration = np.sum(matrix)
rduration = np.sum(matrix, axis=1)
hduration = np.sum(matrix, axis=0)
# reference entropy and reference/hypothesis cross-entropy
ratio = np.ma.divide(rduration, duration).filled(0.)
detail[HOMOGENEITY_ENTROPY] = \
-np.sum(ratio * np.ma.log(ratio).filled(0.))
ratio = np.ma.divide(matrix, duration).filled(0.)
hratio = np.ma.divide(matrix, hduration).filled(0.)
detail[HOMOGENEITY_CROSS_ENTROPY] = \
-np.sum(ratio * np.ma.log(hratio).filled(0.))
return detail
def compute_metric(self, detail):
numerator = 1. * detail[HOMOGENEITY_CROSS_ENTROPY]
denominator = 1. * detail[HOMOGENEITY_ENTROPY]
if denominator == 0.:
if numerator == 0:
return 1.
else:
return 0.
else:
return 1. - numerator / denominator
COMPLETENESS_NAME = 'completeness'
class DiarizationCompleteness(DiarizationHomogeneity):
"""Cluster completeness
Parameters
----------
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments.
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
"""
@classmethod
def metric_name(cls):
return COMPLETENESS_NAME
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
return super(DiarizationCompleteness, self)\
.compute_components(hypothesis, reference, uem=uem, **kwargs)
| 36.515354 | 94 | 0.656782 |
42ec5735f838bdd5d0177a6c2c1e02839f96a634 | 1,524 | py | Python | modules/player.py | ddealer/avacity-2.0 | 756b9877c6df2bdc8b1368dfd5fdf5ca96179516 | [
"MIT"
] | 1 | 2019-10-20T14:09:23.000Z | 2019-10-20T14:09:23.000Z | modules/player.py | ddealer/avacity-2.0 | 756b9877c6df2bdc8b1368dfd5fdf5ca96179516 | [
"MIT"
] | null | null | null | modules/player.py | ddealer/avacity-2.0 | 756b9877c6df2bdc8b1368dfd5fdf5ca96179516 | [
"MIT"
] | null | null | null | from modules.base_module import Module
from modules.location import gen_plr
class_name = "Player"
class Player(Module):
prefix = "pl"
def __init__(self, server):
self.server = server
self.commands = {"gid": self.players_by_id, "flw": self.follow,
"gos": self.get_online_statuses}
def players_by_id(self, msg, client):
players = []
for uid in msg[2]["uids"]:
plr = gen_plr(uid, self.server)
if not plr:
continue
players.append(plr)
client.send(["pl.get", {"plrs": players, "clid": msg[2]["clid"]}])
def follow(self, msg, client):
user = None
for tmp in self.server.online.copy():
if tmp.uid == msg[2]["uid"]:
user = tmp
break
if not user:
scs = "userOffline"
locinfo = None
else:
scs = "success"
locinfo = {"st": 0, "s": "127.0.0.1", "at": None, "d": 0, "x": -1.0,
"y": -1.0, "shlc": True, "pl": "", "l": tmp.room}
client.send(["pl.flw", {"scs": scs, "locinfo": locinfo}])
def get_online_statuses(self, msg, client):
online = {}
for uid in msg[2]["uids"]:
online[uid] = False
for tmp in self.server.online.copy():
if tmp.uid == uid:
online[uid] = True
break
client.send(["pl.gos", {"clid": msg[2]["clid"], "onl": online}])
| 32.425532 | 80 | 0.48622 |
1d738070529b26dbca264956935e34e4c7f99c65 | 354 | py | Python | models/apps/game/unit/unit_stat/models.py | metinberkkaratas/ProjectMagic-MightofHeroes | 578697e637aba0f18b4f83762bf1c87fb20db2ee | [
"MIT"
] | null | null | null | models/apps/game/unit/unit_stat/models.py | metinberkkaratas/ProjectMagic-MightofHeroes | 578697e637aba0f18b4f83762bf1c87fb20db2ee | [
"MIT"
] | 4 | 2021-03-19T02:37:45.000Z | 2022-02-10T11:18:04.000Z | PMMH/apps/game/unit/unit_stat/models.py | metinberkkaratas/ProjectMagic-MightofHeroes | 578697e637aba0f18b4f83762bf1c87fb20db2ee | [
"MIT"
] | 1 | 2019-10-21T20:32:20.000Z | 2019-10-21T20:32:20.000Z | from django.db import models
class UnitStat(models.Model):
vitality = models.IntegerField()
strength = models.IntegerField()
agility = models.IntegerField()
stamina = models.IntegerField()
intelligence = models.IntegerField()
armor = models.IntegerField()
damage = models.IntegerField()
available = models.IntegerField() | 29.5 | 40 | 0.717514 |
3e5d7a35df83ee80d9d37e28aeb3d1dd024f49cd | 3,255 | py | Python | scripts/delay_filter_run.py | LBJ-Wade/hera_cal | 868122b04b8e7f627aa72317427f89ca3eaf7d60 | [
"MIT"
] | 10 | 2017-06-22T22:14:23.000Z | 2022-03-08T17:33:45.000Z | scripts/delay_filter_run.py | LBJ-Wade/hera_cal | 868122b04b8e7f627aa72317427f89ca3eaf7d60 | [
"MIT"
] | 610 | 2017-06-22T22:16:27.000Z | 2022-03-31T16:11:34.000Z | scripts/delay_filter_run.py | LBJ-Wade/hera_cal | 868122b04b8e7f627aa72317427f89ca3eaf7d60 | [
"MIT"
] | 8 | 2017-10-30T18:16:19.000Z | 2021-04-01T09:20:18.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Project
# Licensed under the MIT License
"Command-line drive script for hera_cal.delay_filter with baseline parallelization. Only performs filtering for DAYENU"
from hera_cal import delay_filter
import sys
parser = delay_filter.delay_filter_argparser()
ap = parser.parse_args()
# set kwargs
if ap.mode == 'clean':
filter_kwargs = {'window': ap.window,
'maxiter': ap.maxiter, 'edgecut_hi': ap.edgecut_hi,
'edgecut_low': ap.edgecut_low, 'gain': ap.gain}
if ap.window == 'tukey':
filter_kwargs['alpha'] = ap.alpha
avg_red_bllens = False
skip_flagged_edges = False
flag_model_rms_outliers = False
elif ap.mode == 'dayenu':
filter_kwargs = {}
avg_red_bllens = True
filter_kwargs['skip_contiguous_flags'] = False
filter_kwargs['max_contiguous_edge_flags'] = 10000
filter_kwargs['flag_model_rms_outliers'] = False
elif ap.mode == 'dpss_leastsq':
filter_kwargs = {}
avg_red_bllens = True
filter_kwargs['skip_contiguous_flags'] = True
skip_flagged_edges = True
filter_kwargs['max_contiguous_edge_flags'] = 1
filter_kwargs['flag_model_rms_outliers'] = True
else:
raise ValueError(f"mode {mode} not supported.")
if ap.cornerturnfile is not None:
baseline_list = io.baselines_from_filelist_position(filename=ap.cornerturnfile, filelist=ap.datafilelist)
else:
baseline_list = None
# allow none string to be passed through to ap.calfile
if isinstance(ap.calfilelist, str) and ap.calfilelist.lower() == 'none':
ap.calfilelist = None
# Run Delay Filter
delay_filter.load_delay_filter_and_write(ap.datafilelist, calfile_list=ap.calfilelist, avg_red_bllens=avg_red_bllens,
baseline_list=baseline_list, spw_range=ap.spw_range,
cache_dir=ap.cache_dir, res_outfilename=ap.res_outfilename,
clobber=ap.clobber, write_cache=ap.write_cache, external_flags=ap.external_flags,
read_cache=ap.read_cache, mode=ap.mode, overwrite_flags=ap.overwrite_flags,
factorize_flags=ap.factorize_flags, time_thresh=ap.time_thresh,
add_to_history=' '.join(sys.argv), polarizations=ap.polarizations,
verbose=ap.verbose, skip_if_flag_within_edge_distance=ap.skip_if_flag_within_edge_distance,
flag_yaml=ap.flag_yaml, Nbls_per_load=ap.Nbls_per_load,
skip_flagged_edges=skip_flagged_edges,
filled_outfilename=ap.filled_outfilename,
CLEAN_outfilename=ap.CLEAN_outfilename,
standoff=ap.standoff, horizon=ap.horizon, tol=ap.tol,
skip_wgt=ap.skip_wgt, min_dly=ap.min_dly, zeropad=ap.zeropad,
filter_spw_ranges=ap.filter_spw_ranges,
clean_flags_in_resid_flags=True, **filter_kwargs)
| 50.076923 | 132 | 0.626114 |
9f71194ee91b096b1450eea2024e35caad021706 | 530 | py | Python | src/account/models.py | masteritua/currency_exchange | 9555e96fd66a72b1aee9d0d269e5d21fefe6bf9c | [
"MIT"
] | null | null | null | src/account/models.py | masteritua/currency_exchange | 9555e96fd66a72b1aee9d0d269e5d21fefe6bf9c | [
"MIT"
] | 11 | 2021-03-19T08:38:20.000Z | 2022-03-12T00:18:51.000Z | src/account/models.py | masteritua/currency_exchange | 9555e96fd66a72b1aee9d0d269e5d21fefe6bf9c | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractUser
from uuid import uuid4
def avatar_path(instance, filename):
return "/".join(["avatar", str(instance.id), str(uuid4()), filename])
class User(AbstractUser):
avatar = models.ImageField(upload_to=avatar_path, null=True, blank=True, default=None)
class Contact(models.Model):
email = models.EmailField()
title = models.CharField(max_length=256)
body = models.TextField()
created = models.DateTimeField(auto_now_add=True)
| 25.238095 | 90 | 0.739623 |
ef1037cf2adf7776d6f74879677fc35de3b0a69e | 3,462 | py | Python | modules/transformer_layer.py | NLPInBLCU/BiaffineDependencyParsing | 40b133648c747957dacd59916add0403371fe680 | [
"MIT"
] | 67 | 2019-10-29T12:00:07.000Z | 2022-02-16T10:18:19.000Z | module/transformer_layer.py | LiangsLi/Pytorch_Project_Template | d6779351cf3b2f3069f601b7c4d8508316e5ed83 | [
"MIT"
] | 8 | 2019-12-11T12:16:14.000Z | 2021-09-03T11:03:30.000Z | module/transformer_layer.py | LiangsLi/Pytorch_Project_Template | d6779351cf3b2f3069f601b7c4d8508316e5ed83 | [
"MIT"
] | 17 | 2019-10-29T02:42:36.000Z | 2021-11-01T05:09:31.000Z | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import torch.nn as nn
import torch.nn.functional as F
import fairseq.utils as utils
from fairseq.modules import (
LayerNorm,
MultiheadAttention,
)
from utils.model.initialization import init_bert_params
class TransformerSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: float = 768,
ffn_embedding_dim: float = 3072,
num_attention_heads: float = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = 'relu',
add_bias_kv: bool = False,
add_zero_attn: bool = False,
export: bool = False,
use_residual: bool = True,
use_norm: bool = True,
) -> None:
super().__init__()
self.use_residual = use_residual
self.use_norm = use_norm
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
# Initialize blocks
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = MultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=True
)
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim, export=export)
self.apply(init_bert_params)
def forward(
self,
x: torch.Tensor,
self_attn_mask: torch.Tensor = None,
self_attn_padding_mask: torch.Tensor = None,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
residual = x
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
if self.use_residual:
x = residual + x
if self.use_norm:
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
if self.use_residual:
x = residual + x
if self.use_norm:
x = self.final_layer_norm(x)
return x, attn
| 33.288462 | 80 | 0.621895 |
2d460abaffa52c27bc7279a00c9d6535e4d92508 | 3,686 | py | Python | samples/publish_workbook.py | essentia-team/server-client-python | c9375204c581c5288fe4b6abc3c84fea41a887b5 | [
"CC0-1.0",
"MIT"
] | 1 | 2019-10-24T02:00:52.000Z | 2019-10-24T02:00:52.000Z | samples/publish_workbook.py | essentia-team/server-client-python | c9375204c581c5288fe4b6abc3c84fea41a887b5 | [
"CC0-1.0",
"MIT"
] | null | null | null | samples/publish_workbook.py | essentia-team/server-client-python | c9375204c581c5288fe4b6abc3c84fea41a887b5 | [
"CC0-1.0",
"MIT"
] | 1 | 2022-03-29T23:19:43.000Z | 2022-03-29T23:19:43.000Z | ####
# This script demonstrates how to use the Tableau Server Client
# to publish a workbook to a Tableau server. It will publish
# a specified workbook to the 'default' project of the given server.
#
# Note: The REST API publish process cannot automatically include
# extracts or other resources that the workbook uses. Therefore,
# a .twb file with data from a local computer cannot be published,
# unless packaged into a .twbx file.
#
# For more information, refer to the documentations on 'Publish Workbook'
# (https://onlinehelp.tableau.com/current/api/rest_api/en-us/help.htm)
#
# To run the script, you must have installed Python 2.7.X or 3.3 and later.
####
import argparse
import getpass
import logging
import tableauserverclient as TSC
from tableauserverclient import ConnectionCredentials, ConnectionItem
def main():
parser = argparse.ArgumentParser(description='Publish a workbook to server.')
parser.add_argument('--server', '-s', required=True, help='server address')
parser.add_argument('--username', '-u', required=True, help='username to sign into server')
parser.add_argument('--filepath', '-f', required=True, help='filepath to the workbook to publish')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
parser.add_argument('--as-job', '-a', help='Publishing asynchronously', action='store_true')
args = parser.parse_args()
password = getpass.getpass("Password: ")
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
# Step 1: Sign in to server.
tableau_auth = TSC.TableauAuth(args.username, password)
server = TSC.Server(args.server)
overwrite_true = TSC.Server.PublishMode.Overwrite
with server.auth.sign_in(tableau_auth):
# Step 2: Get all the projects on server, then look for the default one.
all_projects, pagination_item = server.projects.get()
default_project = next((project for project in all_projects if project.is_default()), None)
connection1 = ConnectionItem()
connection1.server_address = "mssql.test.com"
connection1.connection_credentials = ConnectionCredentials("test", "password", True)
connection2 = ConnectionItem()
connection2.server_address = "postgres.test.com"
connection2.server_port = "5432"
connection2.connection_credentials = ConnectionCredentials("test", "password", True)
all_connections = list()
all_connections.append(connection1)
all_connections.append(connection2)
# Step 3: If default project is found, form a new workbook item and publish.
if default_project is not None:
new_workbook = TSC.WorkbookItem(default_project.id)
if args.as_job:
new_job = server.workbooks.publish(new_workbook, args.filepath, overwrite_true,
connections=all_connections, as_job=args.as_job)
print("Workbook published. JOB ID: {0}".format(new_job.id))
else:
new_workbook = server.workbooks.publish(new_workbook, args.filepath, overwrite_true,
connections=all_connections, as_job=args.as_job)
print("Workbook published. ID: {0}".format(new_workbook.id))
else:
error = "The default project could not be found."
raise LookupError(error)
if __name__ == '__main__':
main()
| 42.860465 | 104 | 0.680955 |
51941609a90c550b100604bc4e24745647a17f7b | 1,816 | py | Python | lotto/downloader.py | rmayherr/eurojackpot | 87ce9a73e074db1947c99c91ff3064528e9721a8 | [
"MIT"
] | null | null | null | lotto/downloader.py | rmayherr/eurojackpot | 87ce9a73e074db1947c99c91ff3064528e9721a8 | [
"MIT"
] | null | null | null | lotto/downloader.py | rmayherr/eurojackpot | 87ce9a73e074db1947c99c91ff3064528e9721a8 | [
"MIT"
] | null | null | null | from configparser import ConfigParser
import requests
import os
import sys
from datetime import datetime
def get_from_config(name):
"""Gain a specific value from config.txt """
cfg = ConfigParser()
base_path = os.path.dirname(os.path.abspath(__file__))
cfg.read("/".join([base_path, 'config.txt']))
return cfg['default'][name]
def download(url):
"""Download csv file"""
try:
"""Send a get request"""
r = requests.get(url, allow_redirects=True, timeout=10)
a = r.headers['Last-Modified'][5:16]
try:
"""Download the header and see whether the csv file was modified"""
#if os.path.exists(get_from_config('csv_file_name')) and not \
# r.headers['Last-Modified'][5:16] >= \
# datetime.fromtimestamp(
# os.stat(get_from_config('csv_file_name'))
# .st_ctime).strftime('%d %b %Y'):
# print(f'File is the newest copy, downloading is unnecessary.')
# return 0
#else:
# """
# Download csv file, display its size and
# put content to data.csv file
# """
print(f"Downloading file {url.split('/')[-1]} with size"
f"{int(r.headers['Content-Length']) / 1024 : .2f}"
f"kbyte...")
r = requests.get(url, allow_redirects=True, timeout=10)
f = open(get_from_config('csv_file_name'), 'w')
f.write(r.text)
f.close()
print(f'Done.')
return 0
except Exception as e:
print(f"Error occured while writing to file! \n\t{e}")
return 1
except Exception as e:
print(f'Error occured! \n\t{e}')
return 1
| 35.607843 | 79 | 0.536894 |
a42a2c3dec66eb959a68c8ee4b709c5bc22c01b7 | 29,121 | py | Python | Sticky-Notes/tests/test_inputs/checksForPossibleTitle.py | v2thegreat/sticky-notes | e79dd10b8fb88e0195ac1ca90d6b8dcb0f56e002 | [
"Apache-2.0"
] | null | null | null | Sticky-Notes/tests/test_inputs/checksForPossibleTitle.py | v2thegreat/sticky-notes | e79dd10b8fb88e0195ac1ca90d6b8dcb0f56e002 | [
"Apache-2.0"
] | null | null | null | Sticky-Notes/tests/test_inputs/checksForPossibleTitle.py | v2thegreat/sticky-notes | e79dd10b8fb88e0195ac1ca90d6b8dcb0f56e002 | [
"Apache-2.0"
] | null | null | null | import random
nouns = [ #Source: https://gist.github.com/bergantine/2390284
'people',
'history',
'way',
'art',
'world',
'information',
'map',
'two',
'family',
'government',
'health',
'system',
'computer',
'meat',
'year',
'thanks',
'music',
'person',
'reading',
'method',
'data',
'food',
'understanding',
'theory',
'law',
'bird',
'literature',
'problem',
'software',
'control',
'knowledge',
'power',
'ability',
'economics',
'love',
'internet',
'television',
'science',
'library',
'nature',
'fact',
'product',
'idea',
'temperature',
'investment',
'area',
'society',
'activity',
'story',
'industry',
'media',
'thing',
'oven',
'community',
'definition',
'safety',
'quality',
'development',
'language',
'management',
'player',
'variety',
'video',
'week',
'security',
'country',
'exam',
'movie',
'organization',
'equipment',
'physics',
'analysis',
'policy',
'series',
'thought',
'basis',
'boyfriend',
'direction',
'strategy',
'technology',
'army',
'camera',
'freedom',
'paper',
'environment',
'child',
'instance',
'month',
'truth',
'marketing',
'university',
'writing',
'article',
'department',
'difference',
'goal',
'news',
'audience',
'fishing',
'growth',
'income',
'marriage',
'user',
'combination',
'failure',
'meaning',
'medicine',
'philosophy',
'teacher',
'communication',
'night',
'chemistry',
'disease',
'disk',
'energy',
'nation',
'road',
'role',
'soup',
'advertising',
'location',
'success',
'addition',
'apartment',
'education',
'math',
'moment',
'painting',
'politics',
'attention',
'decision',
'event',
'property',
'shopping',
'student',
'wood',
'competition',
'distribution',
'entertainment',
'office',
'population',
'president',
'unit',
'category',
'cigarette',
'context',
'introduction',
'opportunity',
'performance',
'driver',
'flight',
'length',
'magazine',
'newspaper',
'relationship',
'teaching',
'cell',
'dealer',
'debate',
'finding',
'lake',
'member',
'message',
'phone',
'scene',
'appearance',
'association',
'concept',
'customer',
'death',
'discussion',
'housing',
'inflation',
'insurance',
'mood',
'woman',
'advice',
'blood',
'effort',
'expression',
'importance',
'opinion',
'payment',
'reality',
'responsibility',
'situation',
'skill',
'statement',
'wealth',
'application',
'city',
'county',
'depth',
'estate',
'foundation',
'grandmother',
'heart',
'perspective',
'photo',
'recipe',
'studio',
'topic',
'collection',
'depression',
'imagination',
'passion',
'percentage',
'resource',
'setting',
'ad',
'agency',
'college',
'connection',
'criticism',
'debt',
'description',
'memory',
'patience',
'secretary',
'solution',
'administration',
'aspect',
'attitude',
'director',
'personality',
'psychology',
'recommendation',
'response',
'selection',
'storage',
'version',
'alcohol',
'argument',
'complaint',
'contract',
'emphasis',
'highway',
'loss',
'membership',
'possession',
'preparation',
'steak',
'union',
'agreement',
'cancer',
'currency',
'employment',
'engineering',
'entry',
'interaction',
'limit',
'mixture',
'preference',
'region',
'republic',
'seat',
'tradition',
'virus',
'actor',
'classroom',
'delivery',
'device',
'difficulty',
'drama',
'election',
'engine',
'football',
'guidance',
'hotel',
'match',
'owner',
'priority',
'protection',
'suggestion',
'tension',
'variation',
'anxiety',
'atmosphere',
'awareness',
'bread',
'climate',
'comparison',
'confusion',
'construction',
'elevator',
'emotion',
'employee',
'employer',
'guest',
'height',
'leadership',
'mall',
'manager',
'operation',
'recording',
'respect',
'sample',
'transportation',
'boring',
'charity',
'cousin',
'disaster',
'editor',
'efficiency',
'excitement',
'extent',
'feedback',
'guitar',
'homework',
'leader',
'mom',
'outcome',
'permission',
'presentation',
'promotion',
'reflection',
'refrigerator',
'resolution',
'revenue',
'session',
'singer',
'tennis',
'basket',
'bonus',
'cabinet',
'childhood',
'church',
'clothes',
'coffee',
'dinner',
'drawing',
'hair',
'hearing',
'initiative',
'judgment',
'lab',
'measurement',
'mode',
'mud',
'orange',
'poetry',
'police',
'possibility',
'procedure',
'queen',
'ratio',
'relation',
'restaurant',
'satisfaction',
'sector',
'signature',
'significance',
'song',
'tooth',
'town',
'vehicle',
'volume',
'wife',
'accident',
'airport',
'appointment',
'arrival',
'assumption',
'baseball',
'chapter',
'committee',
'conversation',
'database',
'enthusiasm',
'error',
'explanation',
'farmer',
'gate',
'girl',
'hall',
'historian',
'hospital',
'injury',
'instruction',
'maintenance',
'manufacturer',
'meal',
'perception',
'pie',
'poem',
'presence',
'proposal',
'reception',
'replacement',
'revolution',
'river',
'son',
'speech',
'tea',
'village',
'warning',
'winner',
'worker',
'writer',
'assistance',
'breath',
'buyer',
'chest',
'chocolate',
'conclusion',
'contribution',
'cookie',
'courage',
'dad',
'desk',
'drawer',
'establishment',
'examination',
'garbage',
'grocery',
'honey',
'impression',
'improvement',
'independence',
'insect',
'inspection',
'inspector',
'king',
'ladder',
'menu',
'penalty',
'piano',
'potato',
'profession',
'professor',
'quantity',
'reaction',
'requirement',
'salad',
'sister',
'supermarket',
'tongue',
'weakness',
'wedding',
'affair',
'ambition',
'analyst',
'apple',
'assignment',
'assistant',
'bathroom',
'bedroom',
'beer',
'birthday',
'celebration',
'championship',
'cheek',
'client',
'consequence',
'departure',
'diamond',
'dirt',
'ear',
'fortune',
'friendship',
'funeral',
'gene',
'girlfriend',
'hat',
'indication',
'intention',
'lady',
'midnight',
'negotiation',
'obligation',
'passenger',
'pizza',
'platform',
'poet',
'pollution',
'recognition',
'reputation',
'shirt',
'sir',
'speaker',
'stranger',
'surgery',
'sympathy',
'tale',
'throat',
'trainer',
'uncle',
'youth',
'time',
'work',
'film',
'water',
'money',
'example',
'while',
'business',
'study',
'game',
'life',
'form',
'air',
'day',
'place',
'number',
'part',
'field',
'fish',
'back',
'process',
'heat',
'hand',
'experience',
'job',
'book',
'end',
'point',
'type',
'home',
'economy',
'value',
'body',
'market',
'guide',
'interest',
'state',
'radio',
'course',
'company',
'price',
'size',
'card',
'list',
'mind',
'trade',
'line',
'care',
'group',
'risk',
'word',
'fat',
'force',
'key',
'light',
'training',
'name',
'school',
'top',
'amount',
'level',
'order',
'practice',
'research',
'sense',
'service',
'piece',
'web',
'boss',
'sport',
'fun',
'house',
'page',
'term',
'test',
'answer',
'sound',
'focus',
'matter',
'kind',
'soil',
'board',
'oil',
'picture',
'access',
'garden',
'range',
'rate',
'reason',
'future',
'site',
'demand',
'exercise',
'image',
'case',
'cause',
'coast',
'action',
'age',
'bad',
'boat',
'record',
'result',
'section',
'building',
'mouse',
'cash',
'class',
'nothing',
'period',
'plan',
'store',
'tax',
'side',
'subject',
'space',
'rule',
'stock',
'weather',
'chance',
'figure',
'man',
'model',
'source',
'beginning',
'earth',
'program',
'chicken',
'design',
'feature',
'head',
'material',
'purpose',
'question',
'rock',
'salt',
'act',
'birth',
'car',
'dog',
'object',
'scale',
'sun',
'note',
'profit',
'rent',
'speed',
'style',
'war',
'bank',
'craft',
'half',
'inside',
'outside',
'standard',
'bus',
'exchange',
'eye',
'fire',
'position',
'pressure',
'stress',
'advantage',
'benefit',
'box',
'frame',
'issue',
'step',
'cycle',
'face',
'item',
'metal',
'paint',
'review',
'room',
'screen',
'structure',
'view',
'account',
'ball',
'discipline',
'medium',
'share',
'balance',
'bit',
'black',
'bottom',
'choice',
'gift',
'impact',
'machine',
'shape',
'tool',
'wind',
'address',
'average',
'career',
'culture',
'morning',
'pot',
'sign',
'table',
'task',
'condition',
'contact',
'credit',
'egg',
'hope',
'ice',
'network',
'north',
'square',
'attempt',
'date',
'effect',
'link',
'post',
'star',
'voice',
'capital',
'challenge',
'friend',
'self',
'shot',
'brush',
'couple',
'exit',
'front',
'function',
'lack',
'living',
'plant',
'plastic',
'spot',
'summer',
'taste',
'theme',
'track',
'wing',
'brain',
'button',
'click',
'desire',
'foot',
'gas',
'influence',
'notice',
'rain',
'wall',
'base',
'damage',
'distance',
'feeling',
'pair',
'savings',
'staff',
'sugar',
'target',
'text',
'animal',
'author',
'budget',
'discount',
'file',
'ground',
'lesson',
'minute',
'officer',
'phase',
'reference',
'register',
'sky',
'stage',
'stick',
'title',
'trouble',
'bowl',
'bridge',
'campaign',
'character',
'club',
'edge',
'evidence',
'fan',
'letter',
'lock',
'maximum',
'novel',
'option',
'pack',
'park',
'plenty',
'quarter',
'skin',
'sort',
'weight',
'baby',
'background',
'carry',
'dish',
'factor',
'fruit',
'glass',
'joint',
'master',
'muscle',
'red',
'strength',
'traffic',
'trip',
'vegetable',
'appeal',
'chart',
'gear',
'ideal',
'kitchen',
'land',
'log',
'mother',
'net',
'party',
'principle',
'relative',
'sale',
'season',
'signal',
'spirit',
'street',
'tree',
'wave',
'belt',
'bench',
'commission',
'copy',
'drop',
'minimum',
'path',
'progress',
'project',
'sea',
'south',
'status',
'stuff',
'ticket',
'tour',
'angle',
'blue',
'breakfast',
'confidence',
'daughter',
'degree',
'doctor',
'dot',
'dream',
'duty',
'essay',
'father',
'fee',
'finance',
'hour',
'juice',
'luck',
'milk',
'mouth',
'peace',
'pipe',
'stable',
'storm',
'substance',
'team',
'trick',
'afternoon',
'bat',
'beach',
'blank',
'catch',
'chain',
'consideration',
'cream',
'crew',
'detail',
'gold',
'interview',
'kid',
'mark',
'mission',
'pain',
'pleasure',
'score',
'screw',
'sex',
'shop',
'shower',
'suit',
'tone',
'window',
'agent',
'band',
'bath',
'block',
'bone',
'calendar',
'candidate',
'cap',
'coat',
'contest',
'corner',
'court',
'cup',
'district',
'door',
'east',
'finger',
'garage',
'guarantee',
'hole',
'hook',
'implement',
'layer',
'lecture',
'lie',
'manner',
'meeting',
'nose',
'parking',
'partner',
'profile',
'rice',
'routine',
'schedule',
'swimming',
'telephone',
'tip',
'winter',
'airline',
'bag',
'battle',
'bed',
'bill',
'bother',
'cake',
'code',
'curve',
'designer',
'dimension',
'dress',
'ease',
'emergency',
'evening',
'extension',
'farm',
'fight',
'gap',
'grade',
'holiday',
'horror',
'horse',
'host',
'husband',
'loan',
'mistake',
'mountain',
'nail',
'noise',
'occasion',
'package',
'patient',
'pause',
'phrase',
'proof',
'race',
'relief',
'sand',
'sentence',
'shoulder',
'smoke',
'stomach',
'string',
'tourist',
'towel',
'vacation',
'west',
'wheel',
'wine',
'arm',
'aside',
'associate',
'bet',
'blow',
'border',
'branch',
'breast',
'brother',
'buddy',
'bunch',
'chip',
'coach',
'cross',
'document',
'draft',
'dust',
'expert',
'floor',
'god',
'golf',
'habit',
'iron',
'judge',
'knife',
'landscape',
'league',
'mail',
'mess',
'native',
'opening',
'parent',
'pattern',
'pin',
'pool',
'pound',
'request',
'salary',
'shame',
'shelter',
'shoe',
'silver',
'tackle',
'tank',
'trust',
'assist',
'bake',
'bar',
'bell',
'bike',
'blame',
'boy',
'brick',
'chair',
'closet',
'clue',
'collar',
'comment',
'conference',
'devil',
'diet',
'fear',
'fuel',
'glove',
'jacket',
'lunch',
'monitor',
'mortgage',
'nurse',
'pace',
'panic',
'peak',
'plane',
'reward',
'row',
'sandwich',
'shock',
'spite',
'spray',
'surprise',
'till',
'transition',
'weekend',
'welcome',
'yard',
'alarm',
'bend',
'bicycle',
'bite',
'blind',
'bottle',
'cable',
'candle',
'clerk',
'cloud',
'concert',
'counter',
'flower',
'grandfather',
'harm',
'knee',
'lawyer',
'leather',
'load',
'mirror',
'neck',
'pension',
'plate',
'purple',
'ruin',
'ship',
'skirt',
'slice',
'snow',
'specialist',
'stroke',
'switch',
'trash',
'tune',
'zone',
'anger',
'award',
'bid',
'bitter',
'boot',
'bug',
'camp',
'candy',
'carpet',
'cat',
'champion',
'channel',
'clock',
'comfort',
'cow',
'crack',
'engineer',
'entrance',
'fault',
'grass',
'guy',
'hell',
'highlight',
'incident',
'island',
'joke',
'jury',
'leg',
'lip',
'mate',
'motor',
'nerve',
'passage',
'pen',
'pride',
'priest',
'prize',
'promise',
'resident',
'resort',
'ring',
'roof',
'rope',
'sail',
'scheme',
'script',
'sock',
'station',
'toe',
'tower',
'truck',
'witness',
'a',
'you',
'it',
'can',
'will',
'if',
'one',
'many',
'most',
'other',
'use',
'make',
'good',
'look',
'help',
'go',
'great',
'being',
'few',
'might',
'still',
'public',
'read',
'keep',
'start',
'give',
'human',
'local',
'general',
'she',
'specific',
'long',
'play',
'feel',
'high',
'tonight',
'put',
'common',
'set',
'change',
'simple',
'past',
'big',
'possible',
'particular',
'today',
'major',
'personal',
'current',
'national',
'cut',
'natural',
'physical',
'show',
'try',
'check',
'second',
'call',
'move',
'pay',
'let',
'increase',
'single',
'individual',
'turn',
'ask',
'buy',
'guard',
'hold',
'main',
'offer',
'potential',
'professional',
'international',
'travel',
'cook',
'alternative',
'following',
'special',
'working',
'whole',
'dance',
'excuse',
'cold',
'commercial',
'low',
'purchase',
'deal',
'primary',
'worth',
'fall',
'necessary',
'positive',
'produce',
'search',
'present',
'spend',
'talk',
'creative',
'tell',
'cost',
'drive',
'green',
'support',
'glad',
'remove',
'return',
'run',
'complex',
'due',
'effective',
'middle',
'regular',
'reserve',
'independent',
'leave',
'original',
'reach',
'rest',
'serve',
'watch',
'beautiful',
'charge',
'active',
'break',
'negative',
'safe',
'stay',
'visit',
'visual',
'affect',
'cover',
'report',
'rise',
'walk',
'white',
'beyond',
'junior',
'pick',
'unique',
'anything',
'classic',
'final',
'lift',
'mix',
'private',
'stop',
'teach',
'western',
'concern',
'familiar',
'fly',
'official',
'broad',
'comfortable',
'gain',
'maybe',
'rich',
'save',
'stand',
'young',
'heavy',
'hello',
'lead',
'listen',
'valuable',
'worry',
'handle',
'leading',
'meet',
'release',
'sell',
'finish',
'normal',
'press',
'ride',
'secret',
'spread',
'spring',
'tough',
'wait',
'brown',
'deep',
'display',
'flow',
'hit',
'objective',
'shoot',
'touch',
'cancel',
'chemical',
'cry',
'dump',
'extreme',
'push',
'conflict',
'eat',
'fill',
'formal',
'jump',
'kick',
'opposite',
'pass',
'pitch',
'remote',
'total',
'treat',
'vast',
'abuse',
'beat',
'burn',
'deposit',
'print',
'raise',
'sleep',
'somewhere',
'advance',
'anywhere',
'consist',
'dark',
'double',
'draw',
'equal',
'fix',
'hire',
'internal',
'join',
'kill',
'sensitive',
'tap',
'win',
'attack',
'claim',
'constant',
'drag',
'drink',
'guess',
'minor',
'pull',
'raw',
'soft',
'solid',
'wear',
'weird',
'wonder',
'annual',
'count',
'dead',
'doubt',
'feed',
'forever',
'impress',
'nobody',
'repeat',
'round',
'sing',
'slide',
'strip',
'whereas',
'wish',
'combine',
'command',
'dig',
'divide',
'equivalent',
'hang',
'hunt',
'initial',
'march',
'mention',
'spiritual',
'survey',
'tie',
'adult',
'brief',
'crazy',
'escape',
'gather',
'hate',
'prior',
'repair',
'rough',
'sad',
'scratch',
'sick',
'strike',
'employ',
'external',
'hurt',
'illegal',
'laugh',
'lay',
'mobile',
'nasty',
'ordinary',
'respond',
'royal',
'senior',
'split',
'strain',
'struggle',
'swim',
'train',
'upper',
'wash',
'yellow',
'convert',
'crash',
'dependent',
'fold',
'funny',
'grab',
'hide',
'miss',
'permit',
'quote',
'recover',
'resolve',
'roll',
'sink',
'slip',
'spare',
'suspect',
'sweet',
'swing',
'twist',
'upstairs',
'usual',
'abroad',
'brave',
'calm',
'concentrate',
'estimate',
'grand',
'male',
'mine',
'prompt',
'quiet',
'refuse',
'regret',
'reveal',
'rush',
'shake',
'shift',
'shine',
'steal',
'suck',
'surround',
'anybody',
'bear',
'brilliant',
'dare',
'dear',
'delay',
'drunk',
'female',
'hurry',
'inevitable',
'invite',
'kiss',
'neat',
'pop',
'punch',
'quit',
'reply',
'representative',
'resist',
'rip',
'rub',
'silly',
'smile',
'spell',
'stretch',
'stupid',
'tear',
'temporary',
'tomorrow',
'wake',
'wrap',
'yesterday',
]
def getRandomDaysTimeStamp(n):
import sys
sys.path.append('../..')
from datetime import datetime
from time import sleep, gmtime, strftime
from Defaults.base_functions import DFLT_TIME_STAMP_FORMAT
years = [random.choice(range(1950, 2019)) for x in range(n)]
months = [random.choice(range(1, 13)) for x in range(n)]
days = [random.choice(range(1, 29)) for x in range(n)]
hours = [random.choice(range(1, 24)) for x in range(n)]
mins = [random.choice(range(1, 60)) for x in range(n)]
sec = [random.choice(range(1, 60)) for x in range(n)]
wday = [random.choice(range(1, 7)) for x in range(n)]
yday = [random.choice(range(1, 365)) for x in range(n)]
isdst = [1 for x in range(n)]
dateTime_Inputs = [strftime(DFLT_TIME_STAMP_FORMAT, (years[x], months[x], days[x], hours[x], mins[x], sec[x], wday[x], yday[x], isdst[x])) for x in range(n)]
return dateTime_Inputs
def getRandomTitleContent(n):
randomTitleContent = [' '.join(nouns[random.randrange(0, len(nouns))] for i in range(random.randint(0, 10))) for x in range(n)]
return randomTitleContent
def getRandomTitles(n):
titleIndicatorPrefixes = ('#', '# ', ' #')
randomTitleContent = getRandomTitleContent(n)
randomTitles = [random.choice(titleIndicatorPrefixes) + x for x in randomTitleContent]
return randomTitles
randomDaysTimeStamp = getRandomDaysTimeStamp(100)
actualTitles = getRandomTitles(100)
notActualTitles = getRandomTitleContent(100)
test_inputs = randomDaysTimeStamp + actualTitles + notActualTitles
| 18.560229 | 161 | 0.354555 |
ef2389fc1ad527a98937354b76b2803c89a71ade | 13,785 | py | Python | elegantrl/agents/AgentBase.py | XFFXFF/ElegantRL | d31d3a55f85d7301a028302bf56212a03f4ffb8c | [
"Apache-2.0"
] | 1 | 2022-03-19T09:29:59.000Z | 2022-03-19T09:29:59.000Z | elegantrl/agents/AgentBase.py | XFFXFF/ElegantRL | d31d3a55f85d7301a028302bf56212a03f4ffb8c | [
"Apache-2.0"
] | null | null | null | elegantrl/agents/AgentBase.py | XFFXFF/ElegantRL | d31d3a55f85d7301a028302bf56212a03f4ffb8c | [
"Apache-2.0"
] | null | null | null | import os
from copy import deepcopy
import numpy as np
import numpy.random as rd
import torch
from torch.nn.utils import clip_grad_norm_
class AgentBase: # [ElegantRL.2021.11.11]
def __init__(
self,
net_dim=256,
state_dim=8,
action_dim=2,
reward_scale=1.0,
gamma=0.99,
learning_rate=1e-4,
if_per_or_gae=False,
env_num=1,
gpu_id=0,
):
"""initialize
replace by different DRL algorithms
explict call self.init() for multiprocessing.
:param net_dim: the dimension of networks (the width of neural networks)
:param state_dim: the dimension of state (the number of state vector)
:param action_dim: the dimension of action (the number of discrete action)
:param reward_scale: scale the reward to get a appropriate scale Q value
:param gamma: the discount factor of Reinforcement Learning
:param learning_rate: learning rate of optimizer
:param if_per_or_gae: PER (off-policy) or GAE (on-policy) for sparse reward
:param env_num: the env number of VectorEnv. env_num == 1 means don't use VectorEnv
:param gpu_id: the gpu_id of the training device. Use CPU when cuda is not available.
"""
self.gamma = None
self.states = None
self.device = None
self.action_dim = None
self.reward_scale = None
self.if_off_policy = True
self.env_num = env_num
self.explore_rate = 1.0
self.explore_noise = 0.1
self.clip_grad_norm = 4.0
# self.amp_scale = None # automatic mixed precision
"""attribute"""
self.explore_env = None
self.get_obj_critic = None
self.criterion = torch.nn.SmoothL1Loss()
self.cri = (
self.cri_target
) = self.if_use_cri_target = self.cri_optim = self.ClassCri = None
self.act = (
self.act_target
) = self.if_use_act_target = self.act_optim = self.ClassAct = None
assert isinstance(gpu_id, int)
assert isinstance(env_num, int)
assert isinstance(net_dim, int)
assert isinstance(state_dim, int)
assert isinstance(action_dim, int)
assert isinstance(if_per_or_gae, bool)
assert isinstance(gamma, float)
assert isinstance(reward_scale, float)
assert isinstance(learning_rate, float)
def init(
self,
net_dim=256,
state_dim=8,
action_dim=2,
reward_scale=1.0,
gamma=0.99,
learning_rate=1e-4,
if_per_or_gae=False,
env_num=1,
gpu_id=0,
):
"""initialize the self.object in `__init__()`
replace by different DRL algorithms
explict call self.init() for multiprocessing.
:param net_dim: the dimension of networks (the width of neural networks)
:param state_dim: the dimension of state (the number of state vector)
:param action_dim: the dimension of action (the number of discrete action)
:param reward_scale: scale the reward to get a appropriate scale Q value
:param gamma: the discount factor of Reinforcement Learning
:param learning_rate: learning rate of optimizer
:param if_per_or_gae: PER (off-policy) or GAE (on-policy) for sparse reward
:param env_num: the env number of VectorEnv. env_num == 1 means don't use VectorEnv
:param gpu_id: the gpu_id of the training device. Use CPU when cuda is not available.
"""
self.gamma = gamma
self.action_dim = action_dim
self.reward_scale = reward_scale
# self.amp_scale = torch.cuda.amp.GradScaler()
self.device = torch.device(
f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu"
)
self.cri = self.ClassCri(int(net_dim * 1.25), state_dim, action_dim).to(
self.device
)
self.act = (
self.ClassAct(net_dim, state_dim, action_dim).to(self.device)
if self.ClassAct
else self.cri
)
self.cri_target = deepcopy(self.cri) if self.if_use_cri_target else self.cri
self.act_target = deepcopy(self.act) if self.if_use_act_target else self.act
self.cri_optim = torch.optim.Adam(self.cri.parameters(), learning_rate)
self.act_optim = (
torch.optim.Adam(self.act.parameters(), learning_rate)
if self.ClassAct
else self.cri
)
def get_optim_param(
optim,
): # optim = torch.optim.Adam(network_param, learning_rate)
params_list = []
for params_dict in optim.state_dict()["state"].values():
params_list.extend(
[t for t in params_dict.values() if isinstance(t, torch.Tensor)]
)
return params_list
from types import MethodType
self.act_optim.parameters = MethodType(get_optim_param, self.act_optim)
self.cri_optim.parameters = MethodType(get_optim_param, self.cri_optim)
assert isinstance(if_per_or_gae, bool)
if env_num == 1:
self.explore_env = self.explore_one_env
else:
self.explore_env = self.explore_vec_env
def select_action(self, state: np.ndarray) -> np.ndarray:
"""Select an action via a given state.
:param state: a state in a shape (state_dim, ).
:return: action [array], action.shape == (action_dim, ) where each action is clipped into range(-1, 1).
"""
s_tensor = torch.as_tensor(state[np.newaxis], device=self.device)
a_tensor = self.act(s_tensor)
return a_tensor.detach().cpu().numpy()
def select_actions(self, state: torch.Tensor) -> torch.Tensor:
"""Select continuous actions for exploration
:param state: states.shape==(batch_size, state_dim, )
:return: actions.shape==(batch_size, action_dim, ), -1 < action < +1
"""
action = self.act(state.to(self.device))
if rd.rand() < self.explore_rate: # epsilon-greedy
action = (action + torch.randn_like(action) * self.explore_noise).clamp(
-1, 1
)
return action.detach().cpu()
def explore_one_env(self, env, target_step: int) -> list:
"""actor explores in single Env, then returns the trajectory (env transitions) for ReplayBuffer
:param env: RL training environment. env.reset() env.step()
:param target_step: explored target_step number of step in env
:return: `[traj_env_0, ]`
`traj_env_0 = [(state, reward, mask, action, noise), ...]` for on-policy
`traj_env_0 = [(state, other), ...]` for off-policy
"""
state = self.states[0]
traj = []
for _ in range(target_step):
ten_state = torch.as_tensor(state, dtype=torch.float32)
ten_action = self.select_actions(ten_state.unsqueeze(0))[0]
action = ten_action.numpy()
next_s, reward, done, _ = env.step(action)
ten_other = torch.empty(2 + self.action_dim)
ten_other[0] = reward
ten_other[1] = done
ten_other[2:] = ten_action
traj.append((ten_state, ten_other))
state = env.reset() if done else next_s
self.states[0] = state
traj_state = torch.stack([item[0] for item in traj])
traj_other = torch.stack([item[1] for item in traj])
traj_list = [
(traj_state, traj_other),
]
return self.convert_trajectory(traj_list) # [traj_env_0, ]
def explore_vec_env(self, env, target_step: int) -> list:
"""actor explores in VectorEnv, then returns the trajectory (env transitions) for ReplayBuffer
:param env: RL training environment. env.reset() env.step(). It should be a vector env.
:param target_step: explored target_step number of step in env
:return: `[traj_env_0, ]`
`traj_env_0 = [(state, reward, mask, action, noise), ...]` for on-policy
`traj_env_0 = [(state, other), ...]` for off-policy
"""
ten_states = self.states
traj = []
for _ in range(target_step):
ten_actions = self.select_actions(ten_states)
ten_next_states, ten_rewards, ten_dones = env.step(ten_actions)
ten_others = torch.cat(
(ten_rewards.unsqueeze(0), ten_dones.unsqueeze(0), ten_actions)
)
traj.append((ten_states, ten_others))
ten_states = ten_next_states
self.states = ten_states
# traj = [(env_ten, ...), ...], env_ten = (env1_ten, env2_ten, ...)
traj_state = torch.stack([item[0] for item in traj])
traj_other = torch.stack([item[1] for item in traj])
traj_list = [
(traj_state[:, env_i, :], traj_other[:, env_i, :])
for env_i in range(len(self.states))
]
# traj_list = [traj_env_0, ...], traj_env_0 = (ten_state, ten_other)
return self.convert_trajectory(traj_list) # [traj_env_0, ...]
def update_net(
self, buffer, batch_size: int, repeat_times: float, soft_update_tau: float
) -> tuple:
"""update the neural network by sampling batch data from ReplayBuffer
:param buffer: Experience replay buffer
:param batch_size: sample batch_size of data for Stochastic Gradient Descent
:param repeat_times: `batch_sampling_times = int(target_step * repeat_times / batch_size)`
:param soft_update_tau: soft target update: `target_net = target_net * (1-tau) + current_net * tau`,
"""
def optim_update(self, optimizer, objective): # [ElegantRL 2021.11.11]
"""minimize the optimization objective via update the network parameters
:param optimizer: `optimizer = torch.optim.SGD(net.parameters(), learning_rate)`
:param objective: `objective = net(...)` the optimization objective, sometimes is a loss function.
"""
optimizer.zero_grad()
objective.backward()
clip_grad_norm_(
parameters=optimizer.param_groups[0]["params"], max_norm=self.clip_grad_norm
)
optimizer.step()
# def optim_update_amp(self, optimizer, objective): # automatic mixed precision
# """minimize the optimization objective via update the network parameters
#
# amp: Automatic Mixed Precision
#
# :param optimizer: `optimizer = torch.optim.SGD(net.parameters(), learning_rate)`
# :param objective: `objective = net(...)` the optimization objective, sometimes is a loss function.
# :param params: `params = net.parameters()` the network parameters which need to be updated.
# """
# # self.amp_scale = torch.cuda.amp.GradScaler()
#
# optimizer.zero_grad()
# self.amp_scale.scale(objective).backward() # loss.backward()
# self.amp_scale.unscale_(optimizer) # amp
#
# # from torch.nn.utils import clip_grad_norm_
# # clip_grad_norm_(model.parameters(), max_norm=3.0) # amp, clip_grad_norm_
# self.amp_scale.step(optimizer) # optimizer.step()
# self.amp_scale.update() # optimizer.step()
@staticmethod
def soft_update(target_net, current_net, tau):
"""soft update target network via current network
:param target_net: update target network via current network to make training more stable.
:param current_net: current network update via an optimizer
:param tau: tau of soft target update: `target_net = target_net * (1-tau) + current_net * tau`
"""
for tar, cur in zip(target_net.parameters(), current_net.parameters()):
tar.data.copy_(cur.data * tau + tar.data * (1.0 - tau))
def save_or_load_agent(self, cwd: str, if_save: bool):
"""save or load training files for Agent
:param cwd: Current Working Directory. ElegantRL save training files in CWD.
:param if_save: True: save files. False: load files.
"""
def load_torch_file(model_or_optim, _path):
state_dict = torch.load(_path, map_location=lambda storage, loc: storage)
model_or_optim.load_state_dict(state_dict)
name_obj_list = [
("actor", self.act),
("act_target", self.act_target),
("act_optim", self.act_optim),
("critic", self.cri),
("cri_target", self.cri_target),
("cri_optim", self.cri_optim),
]
name_obj_list = [(name, obj) for name, obj in name_obj_list if obj is not None]
if if_save:
for name, obj in name_obj_list:
save_path = f"{cwd}/{name}.pth"
torch.save(obj.state_dict(), save_path)
else:
for name, obj in name_obj_list:
save_path = f"{cwd}/{name}.pth"
load_torch_file(obj, save_path) if os.path.isfile(save_path) else None
def convert_trajectory(self, traj_list: list) -> list: # off-policy
"""convert trajectory (env exploration type) to trajectory (replay buffer type)
convert `other = concat(( reward, done, ...))`
to `other = concat((scale_reward, mask, ...))`
:param traj_list: `traj_list = [(tensor_state, other_state), ...]`
:return: `traj_list = [(tensor_state, other_state), ...]`
"""
for ten_state, ten_other in traj_list:
ten_other[:, 0] = ten_other[:, 0] * self.reward_scale # ten_reward
ten_other[:, 1] = (
1.0 - ten_other[:, 1]
) * self.gamma # ten_mask = (1.0 - ary_done) * gamma
return traj_list
| 40.42522 | 111 | 0.615742 |
c281569fd7bd9996748769613fb40a317ffadf26 | 2,754 | py | Python | game/Estructuras.py | GeinerGV/TS1_ProyectoFinal | b433a9b7a9bafe82186fa6b90b37fbbe35cbf12d | [
"MIT"
] | null | null | null | game/Estructuras.py | GeinerGV/TS1_ProyectoFinal | b433a9b7a9bafe82186fa6b90b37fbbe35cbf12d | [
"MIT"
] | null | null | null | game/Estructuras.py | GeinerGV/TS1_ProyectoFinal | b433a9b7a9bafe82186fa6b90b37fbbe35cbf12d | [
"MIT"
] | null | null | null | Fondos_de_escenas = ["#006CFF", "blue"]
Estructuras_Escenarios = [
{
"color": {"1": ("#00E8FF", "#00FFB9"), "-1": "#AB6000"}
},
{
"celdas":[
[["1-34"]],
[(1,2,33,34)],
[(1,2,33,34)],
[(1,2,33,34),(24,25)],
[(1,2,33,34)],
[(1,2,33,34)],
[(1,2,33,34)],
[(1,2,33,34),(14,15)],
[(1,2,33,34)],
[(1,2,33,34)],
[(1,2,33,34)],
[(1,2,33,34)],
[(1,2,33,34)],
[(1,2,33,34)],
[(1,2,33,34)],
[(1,2,33,34),(19,20)],
[(1,2,33,34)],
[(1,2,33,34)],
[(1,2,33,34)],
[(1,2,33,34)],
[["1-34"]]
]
},
{
"bgcolor": "red",
"estructuras": [
[(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34),()],
[(1,14,15,16,34),()],
[(1,34),()],
[(1,34),(24,25)],
[(1,34),()],
[(1,34),()],
[(1,34),()],
[(1,34),(14,15)],
[(1,17,18,19,20,34),()],
[(1,17,18,19,20,34),()],
[(1,17,18,19,20,34),()],
[(1,34),()],
[(1,34),()],
[(1,34),()],
[(1,34),()],
[(1,34),(20,21,22)],
[(1,34),(20,21,22)],
[(1,34),(20,21,22)],
[(1,34),()],
[(1,34),()],
[(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34),()]
]
},
{
"bgcolor":"green",
"estructuras":[
[(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34),()],
[(1,2,3,4,5,6,7,8,27,28,29,30,31,32,33,34),()],
[(1,2,3,4,5,6,7,28,29,30,31,32,33,34),()],
[(1,2,3,4,5,6,29,30,31,32,33,34),()],
[(1,2,3,4,5,30,31,32,33,34),()],
[(1,2,3,4,5,30,31,32,33,34),()],
[(1,2,3,4,5,30,31,32,33,34),()],
[(1,2,3,4,5,6,29,30,31,32,33,34),()],
[(1,2,3,4,5,6,29,30,31,32,33,34),()],
[(1,2,3,4,5,30,31,32,33,34),()],
[(1,2,3,4,5,30,31,32,33,34),()],
[(1,2),()],
[(1,2),()],
[(1,2,3),()],
[(1,2),()],
[(1),()],
[(1),()],
[(1,2,3,4,5,6,29,30,31,32,33,34),()],
[(1,2,3,4,5,6,7,28,29,30,31,32,33,34),()],
[(1,2,3,4,5,6,7,8,27,28,29,30,31,32,33,34),()],
[(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34),()]
]
}
]
| 28.989474 | 112 | 0.309731 |
945730b39510f0268f163d7175cd7c244c0ec34a | 9,050 | py | Python | test/augmentation/test_augmentation_mix.py | EStorm21/kornia | b2bba7950d748ba0b8ce0cc68035a248799a1044 | [
"ECL-2.0",
"Apache-2.0"
] | 4,894 | 2019-10-24T15:51:39.000Z | 2022-03-30T22:58:33.000Z | test/augmentation/test_augmentation_mix.py | EStorm21/kornia | b2bba7950d748ba0b8ce0cc68035a248799a1044 | [
"ECL-2.0",
"Apache-2.0"
] | 912 | 2019-10-24T16:08:42.000Z | 2022-03-31T19:07:09.000Z | test/augmentation/test_augmentation_mix.py | EStorm21/kornia | b2bba7950d748ba0b8ce0cc68035a248799a1044 | [
"ECL-2.0",
"Apache-2.0"
] | 557 | 2019-10-24T16:02:43.000Z | 2022-03-28T07:33:33.000Z | import torch
from kornia.augmentation import RandomCutMix, RandomMixUp
from kornia.testing import assert_close
class TestRandomMixUp:
def test_smoke(self, device, dtype):
f = RandomMixUp()
repr = "RandomMixUp(lambda_val=None, p=1.0, p_batch=1.0, same_on_batch=False)"
assert str(f) == repr
def test_random_mixup_p1(self, device, dtype):
torch.manual_seed(0)
f = RandomMixUp(p=1.0)
input = torch.stack(
[torch.ones(1, 3, 4, device=device, dtype=dtype), torch.zeros(1, 3, 4, device=device, dtype=dtype)]
)
label = torch.tensor([1, 0], device=device)
lam = torch.tensor([0.1320, 0.3074], device=device, dtype=dtype)
expected = torch.stack(
[
torch.ones(1, 3, 4, device=device, dtype=dtype) * (1 - lam[0]),
torch.ones(1, 3, 4, device=device, dtype=dtype) * lam[1],
]
)
out_image, out_label = f(input, label)
assert_close(out_image, expected, rtol=1e-4, atol=1e-4)
assert (out_label[:, 0] == label).all()
assert (out_label[:, 1] == torch.tensor([0, 1], device=device, dtype=dtype)).all()
assert_close(out_label[:, 2], lam, rtol=1e-4, atol=1e-4)
def test_random_mixup_p0(self, device, dtype):
torch.manual_seed(0)
f = RandomMixUp(p=0.0)
input = torch.stack(
[torch.ones(1, 3, 4, device=device, dtype=dtype), torch.zeros(1, 3, 4, device=device, dtype=dtype)]
)
label = torch.tensor([1, 0], device=device)
# TODO(jian): where is it used ?
# lam = torch.tensor([0.0, 0.0], device=device, dtype=dtype)
expected = input.clone()
out_image, out_label = f(input, label)
assert_close(out_image, expected, rtol=1e-4, atol=1e-4)
assert (out_label == label).all()
def test_random_mixup_lam0(self, device, dtype):
torch.manual_seed(0)
f = RandomMixUp(lambda_val=(0.0, 0.0), p=1.0)
input = torch.stack(
[torch.ones(1, 3, 4, device=device, dtype=dtype), torch.zeros(1, 3, 4, device=device, dtype=dtype)]
)
label = torch.tensor([1, 0], device=device)
lam = torch.tensor([0.0, 0.0], device=device, dtype=dtype)
expected = input.clone()
out_image, out_label = f(input, label)
assert_close(out_image, expected, rtol=1e-4, atol=1e-4)
assert (out_label[:, 0] == label).all()
assert (out_label[:, 1] == torch.tensor([0, 1], device=device)).all()
assert_close(out_label[:, 2], lam, rtol=1e-4, atol=1e-4)
def test_random_mixup_same_on_batch(self, device, dtype):
torch.manual_seed(0)
f = RandomMixUp(same_on_batch=True, p=1.0)
input = torch.stack(
[torch.ones(1, 3, 4, device=device, dtype=dtype), torch.zeros(1, 3, 4, device=device, dtype=dtype)]
)
label = torch.tensor([1, 0], device=device)
lam = torch.tensor([0.0885, 0.0885], device=device, dtype=dtype)
expected = torch.stack(
[
torch.ones(1, 3, 4, device=device, dtype=dtype) * (1 - lam[0]),
torch.ones(1, 3, 4, device=device, dtype=dtype) * lam[1],
]
)
out_image, out_label = f(input, label)
assert_close(out_image, expected, rtol=1e-4, atol=1e-4)
assert (out_label[:, 0] == label).all()
assert (out_label[:, 1] == torch.tensor([0, 1], device=device, dtype=dtype)).all()
assert_close(out_label[:, 2], lam, rtol=1e-4, atol=1e-4)
class TestRandomCutMix:
def test_smoke(self, device, dtype):
f = RandomCutMix(width=3, height=3)
repr = (
"RandomCutMix(num_mix=1, beta=None, cut_size=None, height=3, width=3, p=1.0, "
"p_batch=1.0, same_on_batch=False)"
)
assert str(f) == repr
def test_random_mixup_p1(self, device, dtype):
torch.manual_seed(76)
f = RandomCutMix(width=4, height=3, p=1.0)
input = torch.stack(
[torch.ones(1, 3, 4, device=device, dtype=dtype), torch.zeros(1, 3, 4, device=device, dtype=dtype)]
)
label = torch.tensor([1, 0], device=device)
# TODO(jian): where is it used ?
# lam = torch.tensor([0.1320, 0.3074], device=device, dtype=dtype)
expected = torch.tensor(
[
[[[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0], [1.0, 1.0, 1.0, 1.0]]],
[[[1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0]]],
],
device=device,
dtype=dtype,
)
out_image, out_label = f(input, label)
assert_close(out_image, expected, rtol=1e-4, atol=1e-4)
assert (out_label[0, :, 0] == label).all()
assert (out_label[0, :, 1] == torch.tensor([0, 1], device=device, dtype=dtype)).all()
assert (out_label[0, :, 2] == torch.tensor([0.5, 0.5], device=device, dtype=dtype)).all()
def test_random_mixup_p0(self, device, dtype):
torch.manual_seed(76)
f = RandomCutMix(p=0.0, width=4, height=3)
input = torch.stack(
[torch.ones(1, 3, 4, device=device, dtype=dtype), torch.zeros(1, 3, 4, device=device, dtype=dtype)]
)
label = torch.tensor([1, 0], device=device)
expected = input.clone()
out_image, out_label = f(input, label)
assert_close(out_image, expected, rtol=1e-4, atol=1e-4)
assert (out_label == label).all()
def test_random_mixup_beta0(self, device, dtype):
torch.manual_seed(76)
# beta 0 => resample 0.5 area
# beta cannot be 0 after torch 1.8.0
f = RandomCutMix(beta=1e-7, width=4, height=3, p=1.0)
input = torch.stack(
[torch.ones(1, 3, 4, device=device, dtype=dtype), torch.zeros(1, 3, 4, device=device, dtype=dtype)]
)
label = torch.tensor([1, 0], device=device)
expected = torch.tensor(
[
[[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]],
[[[1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]],
],
device=device,
dtype=dtype,
)
out_image, out_label = f(input, label)
assert_close(out_image, expected, rtol=1e-4, atol=1e-4)
assert (out_label[0, :, 0] == label).all()
assert (out_label[0, :, 1] == torch.tensor([0, 1], device=device, dtype=dtype)).all()
# cut area = 4 / 12
assert_close(out_label[0, :, 2], torch.tensor([0.33333, 0.33333], device=device, dtype=dtype))
def test_random_mixup_num2(self, device, dtype):
torch.manual_seed(76)
f = RandomCutMix(width=4, height=3, num_mix=5, p=1.0)
input = torch.stack(
[torch.ones(1, 3, 4, device=device, dtype=dtype), torch.zeros(1, 3, 4, device=device, dtype=dtype)]
)
label = torch.tensor([1, 0], device=device)
expected = torch.tensor(
[
[[[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0], [1.0, 1.0, 1.0, 1.0]]],
[[[1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]],
],
device=device,
dtype=dtype,
)
out_image, out_label = f(input, label)
assert_close(out_image, expected, rtol=1e-4, atol=1e-4)
assert (out_label[:, :, 0] == label).all()
assert (out_label[:, :, 1] == torch.tensor([[1, 0], [1, 0], [1, 0], [1, 0], [0, 1]], device=device)).all()
assert_close(
out_label[:, :, 2],
torch.tensor(
[[0.0833, 0.3333], [0.0, 0.1667], [0.5, 0.0833], [0.0833, 0.0], [0.5, 0.3333]],
device=device,
dtype=dtype,
),
rtol=1e-4,
atol=1e-4,
)
def test_random_mixup_same_on_batch(self, device, dtype):
torch.manual_seed(42)
f = RandomCutMix(same_on_batch=True, width=4, height=3, p=1.0)
input = torch.stack(
[torch.ones(1, 3, 4, device=device, dtype=dtype), torch.zeros(1, 3, 4, device=device, dtype=dtype)]
)
label = torch.tensor([1, 0], device=device)
# TODO(jian): where is it used ?
# lam = torch.tensor([0.0885, 0.0885], device=device, dtype=dtype)
expected = torch.tensor(
[
[[[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0], [1.0, 1.0, 1.0, 1.0]]],
[[[1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0]]],
],
device=device,
dtype=dtype,
)
out_image, out_label = f(input, label)
assert_close(out_image, expected, rtol=1e-4, atol=1e-4)
assert (out_label[0, :, 0] == label).all()
assert (out_label[0, :, 1] == torch.tensor([0, 1], device=device, dtype=dtype)).all()
assert_close(
out_label[0, :, 2], torch.tensor([0.5000, 0.5000], device=device, dtype=dtype), rtol=1e-4, atol=1e-4
)
| 37.708333 | 114 | 0.538564 |
6a0237b1c50faf7c7c35167396832ae7b1883850 | 204 | py | Python | scratch.py | pxp920/Stat-Tracker | a50af101c186d09f6b6c12d48e34e1e4475e916b | [
"Apache-2.0"
] | null | null | null | scratch.py | pxp920/Stat-Tracker | a50af101c186d09f6b6c12d48e34e1e4475e916b | [
"Apache-2.0"
] | null | null | null | scratch.py | pxp920/Stat-Tracker | a50af101c186d09f6b6c12d48e34e1e4475e916b | [
"Apache-2.0"
] | null | null | null | import pandas as pd
df = pd.read_csv("C:/Users/Panagiotis.Pantazis/Documents/Python Scripts/Stat Tracker/ScoreTracking.csv", header=0,
encoding='ANSI')
print(list(df))
print(df.loc(1)) | 25.5 | 114 | 0.696078 |
b7316dc04fc1daed528255d8dcbb0e7d4e7db6c6 | 1,403 | py | Python | script.py | dariobaron/MAPD-A-fir-filter | 0b5ab1134999909cc9350b2db847cabc90d8e3f2 | [
"BSD-2-Clause"
] | null | null | null | script.py | dariobaron/MAPD-A-fir-filter | 0b5ab1134999909cc9350b2db847cabc90d8e3f2 | [
"BSD-2-Clause"
] | null | null | null | script.py | dariobaron/MAPD-A-fir-filter | 0b5ab1134999909cc9350b2db847cabc90d8e3f2 | [
"BSD-2-Clause"
] | null | null | null | import os
import serial
import numpy as np
from numpy import log2
import time
def computeQ(y,taps):
M = max(max(np.abs(taps)),max(np.abs(y)))
Q = log2(127) - log2(M)
return Q
def double_to_signed(a, Q):
int_a = np.round(a*2**Q)
signed = np.where(int_a >= 0, int_a, int_a + 256).astype(int)
return signed
def signed_to_double(a,Q):
signed = np.where(a >= 128, a-256, a)
double_a = signed*2**(-Q)
return double_a
ser = serial.Serial('/dev/ttyUSB5', baudrate=115200)
time.sleep(5)
####### CHECK
taps = np.array([0.02856076, 0.14288273, 0.32855651, 0.32855651, 0.14288273, 0.02856076])
countout=0
count=0
with open("signal.txt") as f, open("filtered_signal.txt", "w") as out:
signal = np.loadtxt(f)
Q = computeQ(signal,taps)
for sig in signal:
#print("Processing : "+str(100*i//signal.size)+"%", end='\r', flush=True)
ser.write(chr(double_to_signed(sig,Q=Q)))
#print(sig)
count+=1
time.sleep(0.1)
if(ser.in_waiting>0):
#while True:
d= ser.read(1)
res=signed_to_double(ord(d),Q=Q)*max(signal)
countout+=1
print(countout)
out.write(str(res)+'\n')
#if(ser.in_waiting>0):
# d = ser.read()
# res = signed_to_double(ord(d),Q=Q) * max(signal)
# countout+=1
# out.write(str(res) + '\n' )
# time.sleep(0.1)
f.close()
out.close()
ser.close() # close port
print(count,countout)
| 21.584615 | 89 | 0.617962 |
1f5c01a3ed17302edd5756d9ef6849c7a8c71ba2 | 951 | py | Python | profiles_api/migrations/0002_auto_20200501_0658.py | ravikishorechitakani/profiles-rest-api | ef27db59fe034eb84f1cf1a53350c2e739ffef4c | [
"MIT"
] | null | null | null | profiles_api/migrations/0002_auto_20200501_0658.py | ravikishorechitakani/profiles-rest-api | ef27db59fe034eb84f1cf1a53350c2e739ffef4c | [
"MIT"
] | 5 | 2020-06-06T01:47:16.000Z | 2021-06-10T20:06:55.000Z | profiles_api/migrations/0002_auto_20200501_0658.py | ravikishorechitakani/profiles-rest-api | ef27db59fe034eb84f1cf1a53350c2e739ffef4c | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2020-05-01 06:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='is_staff',
field=models.BooleanField(default=False),
),
migrations.CreateModel(
name='ProfileFeedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status_text', models.CharField(max_length=255)),
('created_on', models.DateTimeField(auto_now_add=True)),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 31.7 | 126 | 0.617245 |
84c46ea7c1049b4ac109044f0215ed5e4296681a | 5,875 | py | Python | Spyder/DecryptLogin_note/DecryptLogin_modules/core/sohu.py | Lightblues/10-playground | 68e48be2f14897e70d0a37ba863246138cbb45d0 | [
"Apache-2.0"
] | 2,268 | 2019-03-02T16:17:17.000Z | 2022-03-31T17:10:42.000Z | Spyder/DecryptLogin_note/DecryptLogin_modules/core/sohu.py | Lightblues/10-playground | 68e48be2f14897e70d0a37ba863246138cbb45d0 | [
"Apache-2.0"
] | 68 | 2020-02-13T07:47:33.000Z | 2022-03-29T13:48:52.000Z | Spyder/DecryptLogin_note/DecryptLogin_modules/core/sohu.py | Lightblues/10-playground | 68e48be2f14897e70d0a37ba863246138cbb45d0 | [
"Apache-2.0"
] | 683 | 2019-03-05T14:18:44.000Z | 2022-03-31T03:25:18.000Z | '''
Function:
搜狐模拟登录
Author:
Charles
微信公众号:
Charles的皮卡丘
更新日期:
2020-10-29
'''
import time
import requests
from hashlib import md5
'''PC端登录搜狐'''
class sohuPC():
is_callable = False
def __init__(self, **kwargs):
for key, value in kwargs.items(): setattr(self, key, value)
self.info = 'login in sohu in pc mode'
self.session = requests.Session()
self.__initialize()
'''登录函数'''
def login(self, username, password, crack_captcha_func=None, **kwargs):
# 设置代理
self.session.proxies.update(kwargs.get('proxies', {}))
# 请求home_url
self.session.get(self.home_url)
# 请求login_url
data = {
'userid': username,
'password': md5(password.encode(encoding='utf-8')).hexdigest(),
'persistentCookie': '1',
'appid': '116005',
}
response = self.session.post(self.login_url, data=data)
response_json = response.json()
# 登录成功
if response_json.get('status') == 200 and response_json.get('message') == 'Success':
print('[INFO]: Account -> %s, login successfully' % username)
infos_return = {'username': username}
return infos_return, self.session
# 账号或密码有误
elif response_json.get('status') in [404, 459]:
raise RuntimeError('Account -> %s, fail to login, username or password error' % username)
# 其他原因
else:
raise RuntimeError(response_json.get('message'))
'''初始化'''
def __initialize(self):
self.headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
'origin': 'https://www.sohu.com',
'upgrade-insecure-requests': '1',
'referer': 'https://www.sohu.com/',
'origin': 'https://www.sohu.com'
}
self.home_url = 'http://www.sohu.com/'
self.login_url = 'https://v4.passport.sohu.com/i/login/116005'
self.session.headers.update(self.headers)
'''移动端登录搜狐'''
class sohuMobile():
is_callable = True
def __init__(self, **kwargs):
for key, value in kwargs.items(): setattr(self, key, value)
self.info = 'login in sohu in mobile mode'
self.session = requests.Session()
self.__initialize()
'''登录函数'''
def login(self, username, password, crack_captcha_func=None, **kwargs):
# 设置代理
self.session.proxies.update(kwargs.get('proxies', {}))
# 访问app_login_url
params = {
'appid': 116001,
'r': 'https://m.sohu.com/ucenter?_from=passport'
}
self.session.get(self.app_login_url, params=params)
# 请求security_login_url
data = {
'userid': username,
'password': md5(password.encode(encoding='utf-8')).hexdigest(),
'appid': 116001
}
self.session.headers.update({
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'https://m.passport.sohu.com',
'Referer': 'https://m.passport.sohu.com/app/login?appid=116001&r=https%3A%2F%2Fm.sohu.com%2Fucenter%3F_from%3Dpassport'
})
response = self.session.post(self.security_login_url.format(int(time.time()*1000)), data=data)
response_json = response.json()
# 登录成功
if response_json.get('status') == 200 and response_json.get('message') == 'Success':
print('[INFO]: Account -> %s, login successfully' % username)
infos_return = {'username': username}
return infos_return, self.session
# 账号或密码有误
elif response_json.get('status') in [404, 459]:
raise RuntimeError('Account -> %s, fail to login, username or password error' % username)
# 其他原因
else:
raise RuntimeError(response_json.get('message'))
'''初始化'''
def __initialize(self):
self.headers = {
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1'
}
self.app_login_url = 'https://m.passport.sohu.com/app/login'
self.security_login_url = 'https://m.passport.sohu.com/security/login?t={}'
self.session.headers.update(self.headers)
'''扫码登录搜狐'''
class sohuScanqr():
is_callable = False
def __init__(self, **kwargs):
for key, value in kwargs.items(): setattr(self, key, value)
self.info = 'login in sohu in scanqr mode'
'''
Function:
搜狐模拟登录
Detail:
-login:
Input:
--username: 用户名
--password: 密码
--mode: mobile/pc/scanqr
--crack_captcha_func: 若提供验证码接口, 则利用该接口来实现验证码的自动识别
--proxies: 为requests.Session()设置代理
Return:
--infos_return: 用户名等信息
--session: 登录后的requests.Session()
'''
class sohu():
def __init__(self, **kwargs):
self.info = 'login in sohu'
self.supported_modes = {
'pc': sohuPC(**kwargs),
'mobile': sohuMobile(**kwargs),
'scanqr': sohuScanqr(**kwargs),
}
'''登录函数'''
def login(self, username, password, mode='mobile', crack_captcha_func=None, **kwargs):
assert mode in self.supported_modes, 'unsupport mode %s in sohu.login' % mode
selected_api = self.supported_modes[mode]
if not selected_api.is_callable: raise NotImplementedError('not be implemented for mode %s in sohu.login' % mode)
args = {
'username': username,
'password': password,
'crack_captcha_func': crack_captcha_func,
}
args.update(kwargs)
return selected_api.login(**args) | 36.71875 | 163 | 0.587404 |
c0160d5ec70b1857522cde7f4cbd4865ef197073 | 10,508 | py | Python | subforms/tests/test_subforms.py | urbanairship/django-subforms | 87efa7526352f21acfb3f5902817b3763be36509 | [
"Apache-2.0"
] | 1 | 2019-09-09T00:20:55.000Z | 2019-09-09T00:20:55.000Z | subforms/tests/test_subforms.py | urbanairship/django-subforms | 87efa7526352f21acfb3f5902817b3763be36509 | [
"Apache-2.0"
] | null | null | null | subforms/tests/test_subforms.py | urbanairship/django-subforms | 87efa7526352f21acfb3f5902817b3763be36509 | [
"Apache-2.0"
] | null | null | null | # (c) 2013 Urban Airship and Contributors
import unittest
from django import forms
from subforms.decorators import ClassProperty
from subforms.mapper import Mapper
class SubFormWithMapping(forms.Form):
fake_title = forms.CharField(required=False)
fake_field = forms.CharField(required=False)
form_to_model = (
('fake_field', 'fake_field'),
('fake_title', 'fake_title'),
)
model_to_form = (
('fake_field', 'fake_field'),
('fake_title', 'fake_title'),
)
class OtherSubFormWithMapping(forms.Form):
fake_data = forms.CharField(required=False)
form_to_model = (
('fake_data', 'fake_data'),
)
model_to_form = (
('fake_data', 'fake_data'),
)
class SubFormWithNoMapping(forms.Form):
fake_field = forms.CharField(required=False)
class FakeForm(forms.Form):
name = forms.CharField(required=False)
subform_config = (
('test_mapping', SubFormWithMapping),
('test_no_mapping', SubFormWithNoMapping),
)
@ClassProperty
def subforms(cls):
return dict(cls.subform_config)
form_to_model = (
('name', 'item_name'),
)
model_to_form = (
('item_name', 'name'),
)
class FakeModel(object):
item_name = ''
class TestFormMapper(unittest.TestCase):
def test_create_subform_mappings(self):
"""Make sure that mapper properly creates subforms at init."""
# Ensure that sub maps are created for sub forms.
fake_mapper = Mapper(FakeForm)
self.assertEqual(len(fake_mapper.sub_maps), 2)
# Order is preserved
self.assertEqual(
fake_mapper.sub_maps[0].form_class.__name__,
'SubFormWithMapping'
)
self.assertEqual(
fake_mapper.sub_maps[1].form_class.__name__,
'SubFormWithNoMapping'
)
# Recursion has only gone one level.
for sub_map in fake_mapper.sub_maps:
self.assertEqual(
len(sub_map.sub_maps),
0
)
# If we create a mapping of a form without subforms,
# ensure that no sub_maps are created.
fake_mapper2 = Mapper(SubFormWithMapping)
self.assertEqual(len(fake_mapper2.sub_maps), 0)
def test_get_form_data(self):
"""Get model data for form fields using our mapping."""
expected_output = {
'name': 'Database',
'test_no_mapping-initial': {},
'test_mapping-initial': {
'fake_field': 'Something unexpected',
'fake_title': None,
},
}
fake_mapper = Mapper(FakeForm)
fake_model = FakeModel()
fake_model.item_name = 'Database'
fake_model.fake_field = 'Something unexpected'
fake_initial_form_data = fake_mapper.get_form_data(fake_model)
# Our form field name (as we expect it) equals our model attribute.
self.assertEqual(fake_initial_form_data, expected_output)
def test_get_form_data_as_callable(self):
"""Use callable instead of static definition of form field."""
fake_model_value = 'Fun Test Data'
def get_model_attr(model_inst):
"""Pretend we're looking up model data from another model."""
return fake_model_value
class FakeFormWithCallable(forms.Form):
something = forms.CharField(required=False)
model_to_form = ((get_model_attr, 'something'),)
form_to_model = (('something', get_model_attr),)
fake_mapper = Mapper(FakeFormWithCallable)
fake_model = FakeModel()
fake_model.database = fake_model_value
fake_form_data = fake_mapper.get_form_data(fake_model)
self.assertEqual(
fake_form_data['something'],
fake_model_value
)
def test_apply_form_data(self):
"""Make sure that form data changes our model inst. data."""
fake_mapper = Mapper(FakeForm)
fake_model = FakeModel()
fake_model.item_name = ''
fake_mapped_field = 'Something fake'
fake_mapped_title = 'A Terrible Beginning'
fake_unmapped_field = 'Will never see the light of day.'
fake_form_data = {
# Parent form
'name': 'Something new!',
# Sub form with mapping
'test_mapping': {
'fake_field': fake_mapped_field,
'fake_title': fake_mapped_title,
},
# Sub form without mapping (doesn't get applied)
'test_no_mapping': {
'fake_field': fake_unmapped_field,
},
}
instances = fake_mapper.apply_form_data(fake_form_data, fake_model)
# We should have only modified the model which we passed in.
self.assertEqual(len(instances), 1)
# Test that these reference the same object
self.assertEqual(instances[0], fake_model)
# Parent form field's mapping updates the model:
self.assertEqual(fake_model.item_name, fake_form_data['name'])
# Sub form's mapping updates the model:
self.assertEqual(fake_model.fake_field, fake_mapped_field)
self.assertEqual(fake_model.fake_title, fake_mapped_title)
def test_apply_form_data_as_callable(self):
"""Make sure form values sent to callable update model."""
fake_form_value = 'This is some new data'
def set_model_value(model_inst, value):
"""Pretend we're looking up "form" data from another model."""
model_inst.database = value
class FakeFormWithCallable(forms.Form):
something = forms.CharField(required=False)
model_to_form = ((set_model_value, 'something'),)
form_to_model = (('something', set_model_value),)
fake_mapper = Mapper(FakeFormWithCallable)
fake_model = FakeModel()
fake_model.database = ''
form_data = {'something': fake_form_value}
fake_instances = fake_mapper.apply_form_data(form_data, fake_model)
self.assertEqual(len(fake_instances), 1)
fake_instance = fake_instances[0]
self.assertEqual(fake_instance.database, fake_form_value)
def test_multiple_sub_forms_one_model(self):
"""Ensure two sub forms modifying the same 'model' don't drop data."""
fake_form_value = 'This is some new data'
class FakeFormWithSubForms(forms.Form):
something = forms.CharField(required=False)
model_to_form = (('something_else', 'something'),)
form_to_model = (('something', 'something_else'),)
# The same form validation mapped onto two separate
# subforms.
subform_config = (
('subform1', SubFormWithMapping),
('subform2', OtherSubFormWithMapping),
)
@ClassProperty
def subforms(cls):
return dict(cls.subform_config)
fake_mapper = Mapper(FakeFormWithSubForms)
fake_model = FakeModel()
fake_field = 'Whaaaat'
fake_title = 'Crrrrrrazy'
fake_data = 'Yellow'
form_data = {
'something': fake_form_value,
'subform2': {'fake_data': fake_data},
'subform1': {'fake_field': fake_field, 'fake_title': fake_title},
}
fake_instances = fake_mapper.apply_form_data(form_data, fake_model)
# Make sure we're still working with the same model instance.
self.assertEqual(fake_instances[0], fake_model)
# Saved from subform 2
self.assertEqual(fake_model.fake_field, fake_field)
# Saved from subform 1
self.assertEqual(fake_model.fake_data, fake_data)
self.assertEqual(fake_model.fake_title, fake_title)
def test_instance_for_behavior(self):
"""Ensure transitioning between models from form -> subform works.
Note: we also test what happens when two subforms specify the same
model (they should create two separate instances of the model). This
behavior is probably rarely desired, but is how the system works as
constructed.
"""
class OtherModel(object):
"""Model to be transitioned to from FakeModel."""
pass
def return_other_model(parent_model_inst):
try:
result = parent_model_inst.other_model
except AttributeError:
result = OtherModel()
return result
class FakeFormWithSubForms(forms.Form):
something = forms.CharField(required=False)
model_to_form = (('something_else', 'something'),)
form_to_model = (('something', 'something_else'),)
# The same form validation mapped onto two separate
# subforms.
subform_config = (
('subform1', SubFormWithMapping),
('subform2', SubFormWithMapping),
)
# model to test that we don't stomp on our changes.
@staticmethod
def instance_for_subform1(parent_model_inst):
return return_other_model(parent_model_inst)
@staticmethod
def instance_for_subform2(parent_model_inst):
return return_other_model(parent_model_inst)
@ClassProperty
def subforms(cls):
return dict(cls.subform_config)
fake_mapper = Mapper(FakeFormWithSubForms)
fake_model = FakeModel()
fake_field = 'What is the meaning of this!?'
fake_title = 'Canterbury Tales'
form_data = {
'subform2': {'fake_title': fake_title},
'subform1': {'fake_field': fake_field},
}
# This call should result in other_model being created and set as an
# attr on our fake_model
fake_instances = fake_mapper.apply_form_data(form_data, fake_model)
# There'll be one for each form in this case.
self.assertEqual(len(fake_instances), 3)
fake_sub1 = fake_instances[1]
fake_sub2 = fake_instances[2]
# As nice as it would be to have these data merged into one
# instance, it's not easily doable, so we save into separate
# instances and leave implementation to the caller.
self.assertEqual(fake_sub1.fake_field, fake_field)
self.assertEqual(fake_sub2.fake_title, fake_title)
if __name__ == '__main__':
unittest.main()
| 34.228013 | 78 | 0.624286 |
324e3e12313c34f9d0627f201452a21ce364967a | 1,508 | py | Python | test.py | Pilpur/twitter-bot-fan | 9609baac144c81080c337469e87bf70ffeae8f9f | [
"MIT"
] | null | null | null | test.py | Pilpur/twitter-bot-fan | 9609baac144c81080c337469e87bf70ffeae8f9f | [
"MIT"
] | null | null | null | test.py | Pilpur/twitter-bot-fan | 9609baac144c81080c337469e87bf70ffeae8f9f | [
"MIT"
] | 1 | 2021-03-07T03:31:46.000Z | 2021-03-07T03:31:46.000Z | # import random
# with open ("daily_tweets.txt", "r") as daily_tweets_file:
# daily_tweets = daily_tweets_file.readlines()
# daily_tweets = [s.rstrip("\n") for s in daily_tweets]
# random.shuffle(daily_tweets)
# with open ("daily_tweets.txt", "w") as daily_tweets_file:
# for item in daily_tweets:
# daily_tweets_file.write(item + "\n")
import logging
import time
from datetime import datetime
class Bot:
"""Create the bot"""
def __init__(self):
self.daily_index = 0
def post_daily_tweet(self):
# Import all daily_tweets from the 'daily_tweets.txt' into a list without the '\n' character
with open ("daily_tweets.txt", "r") as daily_tweets_file:
daily_tweets = daily_tweets_file.readlines()
daily_tweets = [s.rstrip("\n") for s in daily_tweets]
logging.debug("daily_tweets.txt loaded")
# Get a message from the list of messages wrote.
daily_tweet = daily_tweets[self.daily_index]
self.daily_index += 1
logging.debug("found random daily_tweet")
logging.info(daily_tweet)
print(daily_tweet)
if self.daily_index == len(daily_tweets) :
print("\n\n\n")
self.daily_index = 0
def run(self):
"""Run the main loop."""
while True:
self.post_daily_tweet()
time.sleep(0.1)
if __name__ == "__main__":
"""Launch the bot when running the program."""
Bot().run()
| 29.568627 | 100 | 0.619363 |
33f8700b11bc2b61891a9053e35ad402c7150fd7 | 4,382 | py | Python | core/retry/api.py | 1v1expert/SmartLighting | e5f4e39869cb7fa382dce6b2184d181b87fe79ef | [
"MIT"
] | null | null | null | core/retry/api.py | 1v1expert/SmartLighting | e5f4e39869cb7fa382dce6b2184d181b87fe79ef | [
"MIT"
] | 7 | 2020-06-05T21:53:37.000Z | 2021-09-22T18:46:32.000Z | core/retry/api.py | 1v1expert/SmartLighting | e5f4e39869cb7fa382dce6b2184d181b87fe79ef | [
"MIT"
] | null | null | null | import logging
import random
import time
from functools import partial
from .compat import decorator
logging_logger = logging.getLogger(__name__)
def __retry_internal(f, exceptions=Exception, tries=-1, delay=0, max_delay=None, backoff=1, jitter=0,
logger=logging_logger):
"""
Executes a function and retries it if it failed.
:param f: the function to execute.
:param exceptions: an exception or a tuple of exceptions to catch. default: Exception.
:param tries: the maximum number of attempts. default: -1 (infinite).
:param delay: initial delay between attempts. default: 0.
:param max_delay: the maximum value of delay. default: None (no limit).
:param backoff: multiplier applied to delay between attempts. default: 1 (no backoff).
:param jitter: extra seconds added to delay between attempts. default: 0.
fixed if a number, random if a range tuple (min, max)
:param logger: logger.warning(fmt, error, delay) will be called on failed attempts.
default: retry.logging_logger. if None, logging is disabled.
:returns: the result of the f function.
"""
_tries, _delay = tries, delay
while _tries:
try:
return f()
except exceptions as e:
_tries -= 1
if not _tries:
raise
if logger is not None:
logger.warning('%s, retrying in %s seconds...', e, _delay)
time.sleep(_delay)
_delay *= backoff
if isinstance(jitter, tuple):
_delay += random.uniform(*jitter)
else:
_delay += jitter
if max_delay is not None:
_delay = min(_delay, max_delay)
def retry(exceptions=Exception, tries=-1, delay=0, max_delay=None, backoff=1, jitter=0, logger=logging_logger):
"""Returns a retry decorator.
:param exceptions: an exception or a tuple of exceptions to catch. default: Exception.
:param tries: the maximum number of attempts. default: -1 (infinite).
:param delay: initial delay between attempts. default: 0.
:param max_delay: the maximum value of delay. default: None (no limit).
:param backoff: multiplier applied to delay between attempts. default: 1 (no backoff).
:param jitter: extra seconds added to delay between attempts. default: 0.
fixed if a number, random if a range tuple (min, max)
:param logger: logger.warning(fmt, error, delay) will be called on failed attempts.
default: retry.logging_logger. if None, logging is disabled.
:returns: a retry decorator.
"""
@decorator
def retry_decorator(f, *fargs, **fkwargs):
args = fargs if fargs else list()
kwargs = fkwargs if fkwargs else dict()
return __retry_internal(partial(f, *args, **kwargs), exceptions, tries, delay, max_delay, backoff, jitter,
logger)
return retry_decorator
def retry_call(f, fargs=None, fkwargs=None, exceptions=Exception, tries=-1, delay=0, max_delay=None, backoff=1,
jitter=0,
logger=logging_logger):
"""
Calls a function and re-executes it if it failed.
:param f: the function to execute.
:param fargs: the positional arguments of the function to execute.
:param fkwargs: the named arguments of the function to execute.
:param exceptions: an exception or a tuple of exceptions to catch. default: Exception.
:param tries: the maximum number of attempts. default: -1 (infinite).
:param delay: initial delay between attempts. default: 0.
:param max_delay: the maximum value of delay. default: None (no limit).
:param backoff: multiplier applied to delay between attempts. default: 1 (no backoff).
:param jitter: extra seconds added to delay between attempts. default: 0.
fixed if a number, random if a range tuple (min, max)
:param logger: logger.warning(fmt, error, delay) will be called on failed attempts.
default: retry.logging_logger. if None, logging is disabled.
:returns: the result of the f function.
"""
args = fargs if fargs else list()
kwargs = fkwargs if fkwargs else dict()
return __retry_internal(partial(f, *args, **kwargs), exceptions, tries, delay, max_delay, backoff, jitter, logger) | 44.714286 | 118 | 0.657462 |
dbe8333dc2199ffa00bc2ec105445e3af7ab1bb6 | 39,921 | py | Python | rpython/translator/c/node.py | kantai/passe-pypy-taint-tracking | b60a3663f8fe89892dc182c8497aab97e2e75d69 | [
"MIT"
] | 2 | 2016-07-06T23:30:20.000Z | 2017-05-30T15:59:31.000Z | rpython/translator/c/node.py | kantai/passe-pypy-taint-tracking | b60a3663f8fe89892dc182c8497aab97e2e75d69 | [
"MIT"
] | null | null | null | rpython/translator/c/node.py | kantai/passe-pypy-taint-tracking | b60a3663f8fe89892dc182c8497aab97e2e75d69 | [
"MIT"
] | 2 | 2020-07-09T08:14:22.000Z | 2021-01-15T18:01:25.000Z | from rpython.rtyper.lltypesystem.lltype import (Struct, Array, FixedSizeArray,
FuncType, typeOf, GcStruct, GcArray, RttiStruct, ContainerType, parentlink,
Ptr, Void, OpaqueType, Float, RuntimeTypeInfo, getRuntimeTypeInfo, Char,
_subarray)
from rpython.rtyper.lltypesystem import llmemory, llgroup
from rpython.translator.c.funcgen import FunctionCodeGenerator
from rpython.translator.c.external import CExternalFunctionCodeGenerator
from rpython.translator.c.support import USESLOTS # set to False if necessary while refactoring
from rpython.translator.c.support import cdecl, forward_cdecl, somelettersfrom
from rpython.translator.c.support import c_char_array_constant, barebonearray
from rpython.translator.c.primitive import PrimitiveType, name_signed
from rpython.rlib import exports
from rpython.rlib.rfloat import isfinite, isinf
from rpython.rlib.rstackovf import _StackOverflow
from rpython.translator.c import extfunc
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from py.builtin import BaseException
def needs_gcheader(T):
if not isinstance(T, ContainerType):
return False
if T._gckind != 'gc':
return False
if isinstance(T, GcStruct):
if T._first_struct() != (None, None):
return False # gcheader already in the first field
return True
class defaultproperty(object):
def __init__(self, fget):
self.fget = fget
def __get__(self, obj, cls=None):
if obj is None:
return self
else:
return self.fget(obj)
class StructDefNode:
typetag = 'struct'
extra_union_for_varlength = True
def __init__(self, db, STRUCT, varlength=1):
self.db = db
self.STRUCT = STRUCT
self.LLTYPE = STRUCT
self.varlength = varlength
if varlength == 1:
basename = STRUCT._name
with_number = True
else:
basename = db.gettypedefnode(STRUCT).barename
basename = '%s_len%d' % (basename, varlength)
with_number = False
if STRUCT._hints.get('union'):
self.typetag = 'union'
assert STRUCT._gckind == 'raw' # not supported: "GcUnion"
if STRUCT._hints.get('typedef'):
self.typetag = ''
assert STRUCT._hints.get('external')
if self.STRUCT._hints.get('external'): # XXX hack
self.forward_decl = None
if STRUCT._hints.get('c_name'):
self.barename = self.name = STRUCT._hints['c_name']
self.c_struct_field_name = self.verbatim_field_name
else:
(self.barename,
self.name) = db.namespace.uniquename(basename,
with_number=with_number,
bare=True)
self.prefix = somelettersfrom(STRUCT._name) + '_'
self.dependencies = {}
#
self.fieldnames = STRUCT._names
if STRUCT._hints.get('typeptr', False):
if db.gcpolicy.need_no_typeptr():
assert self.fieldnames == ('typeptr',)
self.fieldnames = ()
#
self.fulltypename = '%s %s @' % (self.typetag, self.name)
def setup(self):
# this computes self.fields
if self.STRUCT._hints.get('external'): # XXX hack
self.fields = None # external definition only
return
self.fields = []
db = self.db
STRUCT = self.STRUCT
if self.varlength != 1:
self.normalizedtypename = db.gettype(STRUCT, who_asks=self)
if needs_gcheader(self.STRUCT):
HDR = db.gcpolicy.struct_gcheader_definition(self)
if HDR is not None:
gc_field = ("_gcheader", db.gettype(HDR, who_asks=self))
self.fields.append(gc_field)
for name in self.fieldnames:
T = self.c_struct_field_type(name)
if name == STRUCT._arrayfld:
typename = db.gettype(T, varlength=self.varlength,
who_asks=self)
else:
typename = db.gettype(T, who_asks=self)
self.fields.append((self.c_struct_field_name(name), typename))
self.gcinfo # force it to be computed
def computegcinfo(self):
# let the gcpolicy do its own setup
self.gcinfo = None # unless overwritten below
rtti = None
STRUCT = self.STRUCT
if isinstance(STRUCT, RttiStruct):
try:
rtti = getRuntimeTypeInfo(STRUCT)
except ValueError:
pass
if self.varlength == 1:
self.db.gcpolicy.struct_setup(self, rtti)
return self.gcinfo
gcinfo = defaultproperty(computegcinfo)
def gettype(self):
return self.fulltypename
def c_struct_field_name(self, name):
# occasionally overridden in __init__():
# self.c_struct_field_name = self.verbatim_field_name
return self.prefix + name
def verbatim_field_name(self, name):
assert name.startswith('c_') # produced in this way by rffi
return name[2:]
def c_struct_field_type(self, name):
return self.STRUCT._flds[name]
def access_expr(self, baseexpr, fldname):
fldname = self.c_struct_field_name(fldname)
return '%s.%s' % (baseexpr, fldname)
def ptr_access_expr(self, baseexpr, fldname, baseexpr_is_const=False):
fldname = self.c_struct_field_name(fldname)
if baseexpr_is_const:
return '%s->%s' % (baseexpr, fldname)
return 'RPyField(%s, %s)' % (baseexpr, fldname)
def definition(self):
if self.fields is None: # external definition only
return
yield '%s %s {' % (self.typetag, self.name)
is_empty = True
for name, typename in self.fields:
line = '%s;' % cdecl(typename, name)
if typename == PrimitiveType[Void]:
line = '/* %s */' % line
else:
is_empty = False
yield '\t' + line
if is_empty:
yield '\t' + 'char _dummy; /* this struct is empty */'
yield '};'
if self.varlength != 1:
assert self.typetag == 'struct'
yield 'union %su {' % self.name
yield ' struct %s a;' % self.name
yield ' %s;' % cdecl(self.normalizedtypename, 'b')
yield '};'
def visitor_lines(self, prefix, on_field):
for name in self.fieldnames:
FIELD_T = self.c_struct_field_type(name)
cname = self.c_struct_field_name(name)
for line in on_field('%s.%s' % (prefix, cname),
FIELD_T):
yield line
def debug_offsets(self):
# generate number exprs giving the offset of the elements in the struct
assert self.varlength == 1
for name in self.fieldnames:
FIELD_T = self.c_struct_field_type(name)
if FIELD_T is Void:
yield '-1'
else:
try:
cname = self.c_struct_field_name(name)
except ValueError:
yield '-1'
else:
yield 'offsetof(%s %s, %s)' % (self.typetag,
self.name, cname)
class ArrayDefNode:
typetag = 'struct'
extra_union_for_varlength = True
def __init__(self, db, ARRAY, varlength=1):
self.db = db
self.ARRAY = ARRAY
self.LLTYPE = ARRAY
self.gcfields = []
self.varlength = varlength
if varlength == 1:
basename = 'array'
with_number = True
else:
basename = db.gettypedefnode(ARRAY).barename
basename = '%s_len%d' % (basename, varlength)
with_number = False
(self.barename,
self.name) = db.namespace.uniquename(basename, with_number=with_number,
bare=True)
self.dependencies = {}
self.fulltypename = '%s %s @' % (self.typetag, self.name)
self.fullptrtypename = '%s %s *@' % (self.typetag, self.name)
def setup(self):
if hasattr(self, 'itemtypename'):
return # setup() was already called, likely by __init__
db = self.db
ARRAY = self.ARRAY
self.gcinfo # force it to be computed
if self.varlength != 1:
self.normalizedtypename = db.gettype(ARRAY, who_asks=self)
if needs_gcheader(ARRAY):
HDR = db.gcpolicy.array_gcheader_definition(self)
if HDR is not None:
gc_field = ("_gcheader", db.gettype(HDR, who_asks=self))
self.gcfields.append(gc_field)
self.itemtypename = db.gettype(ARRAY.OF, who_asks=self)
def computegcinfo(self):
# let the gcpolicy do its own setup
self.gcinfo = None # unless overwritten below
if self.varlength == 1:
self.db.gcpolicy.array_setup(self)
return self.gcinfo
gcinfo = defaultproperty(computegcinfo)
def gettype(self):
return self.fulltypename
def getptrtype(self):
return self.fullptrtypename
def access_expr(self, baseexpr, index):
return '%s.items[%s]' % (baseexpr, index)
access_expr_varindex = access_expr
def ptr_access_expr(self, baseexpr, index, dummy=False):
assert 0 <= index <= sys.maxint, "invalid constant index %r" % (index,)
return self.itemindex_access_expr(baseexpr, index)
def itemindex_access_expr(self, baseexpr, indexexpr):
if self.ARRAY._hints.get('nolength', False):
return 'RPyNLenItem(%s, %s)' % (baseexpr, indexexpr)
else:
return 'RPyItem(%s, %s)' % (baseexpr, indexexpr)
def definition(self):
gcpolicy = self.db.gcpolicy
yield 'struct %s {' % self.name
for fname, typename in self.gcfields:
yield '\t' + cdecl(typename, fname) + ';'
if not self.ARRAY._hints.get('nolength', False):
yield '\tlong length;'
line = '%s;' % cdecl(self.itemtypename, 'items[%d]'% self.varlength)
if self.ARRAY.OF is Void: # strange
line = '/* array of void */'
if self.ARRAY._hints.get('nolength', False):
line = 'char _dummy; ' + line
yield '\t' + line
yield '};'
if self.varlength != 1:
yield 'union %su {' % self.name
yield ' struct %s a;' % self.name
yield ' %s;' % cdecl(self.normalizedtypename, 'b')
yield '};'
def visitor_lines(self, prefix, on_item):
assert self.varlength == 1
ARRAY = self.ARRAY
# we need a unique name for this C variable, or at least one that does
# not collide with the expression in 'prefix'
i = 0
varname = 'p0'
while prefix.find(varname) >= 0:
i += 1
varname = 'p%d' % i
body = list(on_item('(*%s)' % varname, ARRAY.OF))
if body:
yield '{'
yield '\t%s = %s.items;' % (cdecl(self.itemtypename, '*' + varname),
prefix)
yield '\t%s = %s + %s.length;' % (cdecl(self.itemtypename,
'*%s_end' % varname),
varname,
prefix)
yield '\twhile (%s != %s_end) {' % (varname, varname)
for line in body:
yield '\t\t' + line
yield '\t\t%s++;' % varname
yield '\t}'
yield '}'
def debug_offsets(self):
# generate three offsets for debugging inspection
assert self.varlength == 1
if not self.ARRAY._hints.get('nolength', False):
yield 'offsetof(struct %s, length)' % (self.name,)
else:
yield '-1'
if self.ARRAY.OF is not Void:
yield 'offsetof(struct %s, items[0])' % (self.name,)
yield 'offsetof(struct %s, items[1])' % (self.name,)
else:
yield '-1'
yield '-1'
class BareBoneArrayDefNode:
"""For 'simple' array types which don't need a length nor GC headers.
Implemented directly as a C array instead of a struct with an items field.
rffi kind of expects such arrays to be 'bare' C arrays.
"""
gcinfo = None
name = None
forward_decl = None
extra_union_for_varlength = False
def __init__(self, db, ARRAY, varlength=1):
self.db = db
self.ARRAY = ARRAY
self.LLTYPE = ARRAY
self.varlength = varlength
self.dependencies = {}
contained_type = ARRAY.OF
# There is no such thing as an array of voids:
# we use a an array of chars instead; only the pointer can be void*.
self.itemtypename = db.gettype(contained_type, who_asks=self)
self.fulltypename = self.itemtypename.replace('@', '(@)[%d]' %
(self.varlength,))
if ARRAY._hints.get("render_as_void"):
self.fullptrtypename = 'void *@'
else:
self.fullptrtypename = self.itemtypename.replace('@', '*@')
def setup(self):
"""Array loops are forbidden by ForwardReference.become() because
there is no way to declare them in C."""
def gettype(self):
return self.fulltypename
def getptrtype(self):
return self.fullptrtypename
def access_expr(self, baseexpr, index):
return '%s[%d]' % (baseexpr, index)
access_expr_varindex = access_expr
def ptr_access_expr(self, baseexpr, index, dummy=False):
assert 0 <= index <= sys.maxint, "invalid constant index %r" % (index,)
return self.itemindex_access_expr(baseexpr, index)
def itemindex_access_expr(self, baseexpr, indexexpr):
if self.ARRAY._hints.get("render_as_void"):
return 'RPyBareItem((char*)%s, %s)' % (baseexpr, indexexpr)
else:
return 'RPyBareItem(%s, %s)' % (baseexpr, indexexpr)
def definition(self):
return [] # no declaration is needed
def visitor_lines(self, prefix, on_item):
raise Exception("cannot visit C arrays - don't know the length")
def debug_offsets(self):
# generate three offsets for debugging inspection,
yield '-1' # no length
yield '0' # first element is immediately at the start of the array
yield 'sizeof(%s)' % (cdecl(self.itemtypename, ''),)
class FixedSizeArrayDefNode:
gcinfo = None
name = None
typetag = 'struct'
extra_union_for_varlength = False
def __init__(self, db, FIXEDARRAY):
self.db = db
self.FIXEDARRAY = FIXEDARRAY
self.LLTYPE = FIXEDARRAY
self.dependencies = {}
self.itemtypename = db.gettype(FIXEDARRAY.OF, who_asks=self)
self.fulltypename = self.itemtypename.replace('@', '(@)[%d]' %
FIXEDARRAY.length)
self.fullptrtypename = self.itemtypename.replace('@', '*@')
def setup(self):
"""Loops are forbidden by ForwardReference.become() because
there is no way to declare them in C."""
def gettype(self):
return self.fulltypename
def getptrtype(self):
return self.fullptrtypename
def access_expr(self, baseexpr, index, dummy=False):
if not isinstance(index, int):
assert index.startswith('item')
index = int(index[4:])
if not (0 <= index < self.FIXEDARRAY.length):
raise IndexError("refusing to generate a statically out-of-bounds"
" array indexing")
return '%s[%d]' % (baseexpr, index)
ptr_access_expr = access_expr
def access_expr_varindex(self, baseexpr, index):
return '%s[%s]' % (baseexpr, index)
def itemindex_access_expr(self, baseexpr, indexexpr):
return 'RPyFxItem(%s, %s, %d)' % (baseexpr, indexexpr,
self.FIXEDARRAY.length)
def definition(self):
return [] # no declaration is needed
def visitor_lines(self, prefix, on_item):
FIXEDARRAY = self.FIXEDARRAY
# we need a unique name for this C variable, or at least one that does
# not collide with the expression in 'prefix'
i = 0
varname = 'p0'
while prefix.find(varname) >= 0:
i += 1
varname = 'p%d' % i
body = list(on_item('(*%s)' % varname, FIXEDARRAY.OF))
if body:
yield '{'
yield '\t%s = %s;' % (cdecl(self.itemtypename, '*' + varname),
prefix)
yield '\t%s = %s + %d;' % (cdecl(self.itemtypename,
'*%s_end' % varname),
varname,
FIXEDARRAY.length)
yield '\twhile (%s != %s_end) {' % (varname, varname)
for line in body:
yield '\t\t' + line
yield '\t\t%s++;' % varname
yield '\t}'
yield '}'
def debug_offsets(self):
# XXX not implemented
return []
class ExtTypeOpaqueDefNode:
"""For OpaqueTypes created with the hint render_structure."""
typetag = 'struct'
def __init__(self, db, T):
self.db = db
self.T = T
self.dependencies = {}
self.name = 'RPyOpaque_%s' % (T.tag,)
def setup(self):
pass
def definition(self):
return []
# ____________________________________________________________
class ContainerNode(object):
if USESLOTS: # keep the number of slots down!
__slots__ = """db obj
typename implementationtypename
name
_funccodegen_owner
globalcontainer""".split()
eci_name = '_compilation_info'
def __init__(self, db, T, obj):
self.db = db
self.obj = obj
#self.dependencies = {}
self.typename = db.gettype(T) #, who_asks=self)
self.implementationtypename = db.gettype(T, varlength=self.getlength())
parent, parentindex = parentlink(obj)
if obj in exports.EXPORTS_obj2name:
self.name = exports.EXPORTS_obj2name[obj]
self.globalcontainer = True
elif parent is None:
self.name = db.namespace.uniquename('g_' + self.basename())
self.globalcontainer = True
else:
self.globalcontainer = False
parentnode = db.getcontainernode(parent)
defnode = db.gettypedefnode(parentnode.getTYPE())
self.name = defnode.access_expr(parentnode.name, parentindex)
if self.typename != self.implementationtypename:
if db.gettypedefnode(T).extra_union_for_varlength:
self.name += '.b'
self._funccodegen_owner = None
def getptrname(self):
return '(&%s)' % self.name
def getTYPE(self):
return typeOf(self.obj)
def is_thread_local(self):
T = self.getTYPE()
return hasattr(T, "_hints") and T._hints.get('thread_local')
def compilation_info(self):
return getattr(self.obj, self.eci_name, None)
def get_declaration(self):
if self.name[-2:] == '.b':
# xxx fish fish
assert self.implementationtypename.startswith('struct ')
assert self.implementationtypename.endswith(' @')
uniontypename = 'union %su @' % self.implementationtypename[7:-2]
return uniontypename, self.name[:-2]
else:
return self.implementationtypename, self.name
def forward_declaration(self):
if llgroup.member_of_group(self.obj):
return
type, name = self.get_declaration()
yield '%s;' % (
forward_cdecl(type, name, self.db.standalone,
self.is_thread_local()))
def implementation(self):
if llgroup.member_of_group(self.obj):
return []
lines = list(self.initializationexpr())
type, name = self.get_declaration()
if name != self.name:
lines[0] = '{ ' + lines[0] # extra braces around the 'a' part
lines[-1] += ' }' # of the union
lines[0] = '%s = %s' % (
cdecl(type, name, self.is_thread_local()),
lines[0])
lines[-1] += ';'
return lines
def startupcode(self):
return []
def getlength(self):
return 1
assert not USESLOTS or '__dict__' not in dir(ContainerNode)
class StructNode(ContainerNode):
nodekind = 'struct'
if USESLOTS:
__slots__ = ()
def basename(self):
T = self.getTYPE()
return T._name
def enum_dependencies(self):
T = self.getTYPE()
for name in T._names:
yield getattr(self.obj, name)
def getlength(self):
T = self.getTYPE()
if T._arrayfld is None:
return 1
else:
array = getattr(self.obj, T._arrayfld)
return len(array.items)
def initializationexpr(self, decoration=''):
T = self.getTYPE()
is_empty = True
yield '{'
defnode = self.db.gettypedefnode(T)
data = []
if needs_gcheader(T):
gc_init = self.db.gcpolicy.struct_gcheader_initdata(self)
data.append(('gcheader', gc_init))
for name in defnode.fieldnames:
data.append((name, getattr(self.obj, name)))
# Reasonably, you should only initialise one of the fields of a union
# in C. This is possible with the syntax '.fieldname value' or
# '.fieldname = value'. But here we don't know which of the
# fields need initialization, so XXX we pick the first one
# arbitrarily.
if hasattr(T, "_hints") and T._hints.get('union'):
data = data[0:1]
if 'get_padding_drop' in T._hints:
d = {}
for name, _ in data:
T1 = defnode.c_struct_field_type(name)
typename = self.db.gettype(T1)
d[name] = cdecl(typename, '')
padding_drop = T._hints['get_padding_drop'](d)
else:
padding_drop = []
for name, value in data:
if name in padding_drop:
continue
c_expr = defnode.access_expr(self.name, name)
lines = generic_initializationexpr(self.db, value, c_expr,
decoration + name)
for line in lines:
yield '\t' + line
if not lines[0].startswith('/*'):
is_empty = False
if is_empty:
yield '\t%s' % '0,'
yield '}'
assert not USESLOTS or '__dict__' not in dir(StructNode)
class GcStructNodeWithHash(StructNode):
# for the outermost level of nested structures, if it has a _hash_cache_.
nodekind = 'struct'
if USESLOTS:
__slots__ = ()
def get_hash_typename(self):
return 'struct _hashT_%s @' % self.name
def forward_declaration(self):
T = self.getTYPE()
assert self.typename == self.implementationtypename # no array part
hash_typename = self.get_hash_typename()
hash_offset = self.db.gctransformer.get_hash_offset(T)
yield '%s {' % cdecl(hash_typename, '')
yield '\tunion {'
yield '\t\t%s;' % cdecl(self.implementationtypename, 'head')
yield '\t\tchar pad[%s];' % name_signed(hash_offset, self.db)
yield '\t} u;'
yield '\tlong hash;'
yield '};'
yield '%s;' % (
forward_cdecl(hash_typename, '_hash_' + self.name,
self.db.standalone, self.is_thread_local()),)
yield '#define %s _hash_%s.u.head' % (self.name, self.name)
def implementation(self):
hash_typename = self.get_hash_typename()
hash = self.db.gcpolicy.get_prebuilt_hash(self.obj)
assert hash is not None
lines = list(self.initializationexpr())
lines.insert(0, '%s = { {' % (
cdecl(hash_typename, '_hash_' + self.name,
self.is_thread_local()),))
lines.append('}, %s /* hash */ };' % name_signed(hash, self.db))
return lines
def gcstructnode_factory(db, T, obj):
if db.gcpolicy.get_prebuilt_hash(obj) is not None:
cls = GcStructNodeWithHash
else:
cls = StructNode
return cls(db, T, obj)
class ArrayNode(ContainerNode):
nodekind = 'array'
if USESLOTS:
__slots__ = ()
def getptrname(self):
if barebonearray(self.getTYPE()):
return self.name
return ContainerNode.getptrname(self)
def basename(self):
return 'array'
def enum_dependencies(self):
return self.obj.items
def getlength(self):
return len(self.obj.items)
def initializationexpr(self, decoration=''):
T = self.getTYPE()
defnode = self.db.gettypedefnode(T)
yield '{'
if needs_gcheader(T):
gc_init = self.db.gcpolicy.array_gcheader_initdata(self)
lines = generic_initializationexpr(self.db, gc_init, 'gcheader',
'%sgcheader' % (decoration,))
for line in lines:
yield line
if T._hints.get('nolength', False):
length = ''
else:
length = '%d, ' % len(self.obj.items)
if T.OF is Void or len(self.obj.items) == 0:
yield '\t%s' % length.rstrip(', ')
yield '}'
elif T.OF == Char:
if len(self.obj.items) and self.obj.items[0] is None:
s = ''.join([self.obj.getitem(i) for i in range(len(self.obj.items))])
else:
s = ''.join(self.obj.items)
array_constant = c_char_array_constant(s)
if array_constant.startswith('{') and barebonearray(T):
assert array_constant.endswith('}')
array_constant = array_constant[1:-1].strip()
yield '\t%s%s' % (length, array_constant)
yield '}'
else:
barebone = barebonearray(T)
if not barebone:
yield '\t%s{' % length
for j in range(len(self.obj.items)):
value = self.obj.items[j]
basename = self.name
if basename.endswith('.b'):
basename = basename[:-2] + '.a'
lines = generic_initializationexpr(self.db, value,
'%s.items[%d]' % (basename, j),
'%s%d' % (decoration, j))
for line in lines:
yield '\t' + line
if not barebone:
yield '} }'
else:
yield '}'
assert not USESLOTS or '__dict__' not in dir(ArrayNode)
class FixedSizeArrayNode(ContainerNode):
nodekind = 'array'
if USESLOTS:
__slots__ = ()
def getptrname(self):
if not isinstance(self.obj, _subarray): # XXX hackish
return self.name
return ContainerNode.getptrname(self)
def basename(self):
T = self.getTYPE()
return T._name
def enum_dependencies(self):
for i in range(self.obj.getlength()):
yield self.obj.getitem(i)
def getlength(self):
return 1 # not variable-sized!
def initializationexpr(self, decoration=''):
T = self.getTYPE()
assert self.typename == self.implementationtypename # not var-sized
is_empty = True
yield '{'
# _names == ['item0', 'item1', ...]
for j, name in enumerate(T._names):
value = getattr(self.obj, name)
lines = generic_initializationexpr(self.db, value,
'%s[%d]' % (self.name, j),
'%s%d' % (decoration, j))
for line in lines:
yield '\t' + line
yield '}'
def generic_initializationexpr(db, value, access_expr, decoration):
if isinstance(typeOf(value), ContainerType):
node = db.getcontainernode(value)
lines = list(node.initializationexpr(decoration+'.'))
lines[-1] += ','
return lines
else:
comma = ','
if typeOf(value) == Float and not isfinite(value):
db.late_initializations.append(('%s' % access_expr, db.get(value)))
if isinf(value):
name = '-+'[value > 0] + 'inf'
else:
name = 'NaN'
expr = '0.0 /* patched later with %s */' % (name,)
else:
expr = db.get(value)
if typeOf(value) is Void:
comma = ''
expr += comma
i = expr.find('\n')
if i<0: i = len(expr)
expr = '%s\t/* %s */%s' % (expr[:i], decoration, expr[i:])
return expr.split('\n')
# ____________________________________________________________
class FuncNode(ContainerNode):
nodekind = 'func'
eci_name = 'compilation_info'
# there not so many node of this kind, slots should not
# be necessary
def __init__(self, db, T, obj, forcename=None):
self.globalcontainer = True
self.db = db
self.T = T
self.obj = obj
callable = getattr(obj, '_callable', None)
if (callable is not None and
getattr(callable, 'c_name', None) is not None):
self.name = forcename or obj._callable.c_name
elif getattr(obj, 'external', None) == 'C' and not db.need_sandboxing(obj):
self.name = forcename or self.basename()
else:
self.name = (forcename or
db.namespace.uniquename('g_' + self.basename()))
self.make_funcgens()
#self.dependencies = {}
self.typename = db.gettype(T) #, who_asks=self)
def getptrname(self):
return self.name
def make_funcgens(self):
self.funcgens = select_function_code_generators(self.obj, self.db, self.name)
if self.funcgens:
argnames = self.funcgens[0].argnames() #Assume identical for all funcgens
self.implementationtypename = self.db.gettype(self.T, argnames=argnames)
self._funccodegen_owner = self.funcgens[0]
else:
self._funccodegen_owner = None
def basename(self):
return self.obj._name
def enum_dependencies(self):
if not self.funcgens:
return []
return self.funcgens[0].allconstantvalues() #Assume identical for all funcgens
def forward_declaration(self):
for funcgen in self.funcgens:
yield '%s;' % (
forward_cdecl(self.implementationtypename,
funcgen.name(self.name), self.db.standalone))
def implementation(self):
for funcgen in self.funcgens:
for s in self.funcgen_implementation(funcgen):
yield s
def graphs_to_patch(self):
for funcgen in self.funcgens:
for i in funcgen.graphs_to_patch():
yield i
def funcgen_implementation(self, funcgen):
funcgen.implementation_begin()
# recompute implementationtypename as the argnames may have changed
argnames = funcgen.argnames()
implementationtypename = self.db.gettype(self.T, argnames=argnames)
yield '%s {' % cdecl(implementationtypename, funcgen.name(self.name))
#
# declare the local variables
#
localnames = list(funcgen.cfunction_declarations())
lengths = [len(a) for a in localnames]
lengths.append(9999)
start = 0
while start < len(localnames):
# pack the local declarations over as few lines as possible
total = lengths[start] + 8
end = start+1
while total + lengths[end] < 77:
total += lengths[end] + 1
end += 1
yield '\t' + ' '.join(localnames[start:end])
start = end
#
# generate the body itself
#
bodyiter = funcgen.cfunction_body()
for line in bodyiter:
# performs some formatting on the generated body:
# indent normal lines with tabs; indent labels less than the rest
if line.endswith(':'):
if line.startswith('err'):
try:
nextline = bodyiter.next()
except StopIteration:
nextline = ''
# merge this 'err:' label with the following line
line = '\t%s\t%s' % (line, nextline)
else:
line = ' ' + line
elif line:
line = '\t' + line
yield line
yield '}'
del bodyiter
funcgen.implementation_end()
def sandbox_stub(fnobj, db):
# unexpected external function for --sandbox translation: replace it
# with a "Not Implemented" stub. To support these functions, port them
# to the new style registry (e.g. rpython.module.ll_os.RegisterOs).
from rpython.translator.sandbox import rsandbox
graph = rsandbox.get_external_function_sandbox_graph(fnobj, db,
force_stub=True)
return [FunctionCodeGenerator(graph, db)]
def sandbox_transform(fnobj, db):
# for --sandbox: replace a function like os_open_llimpl() with
# code that communicates with the external process to ask it to
# perform the operation.
from rpython.translator.sandbox import rsandbox
graph = rsandbox.get_external_function_sandbox_graph(fnobj, db)
return [FunctionCodeGenerator(graph, db)]
def select_function_code_generators(fnobj, db, functionname):
# XXX this logic is completely broken nowadays
# _external_name does not mean that this is done oldstyle
sandbox = db.need_sandboxing(fnobj)
if hasattr(fnobj, '_external_name'):
if sandbox:
return sandbox_stub(fnobj, db)
db.externalfuncs[fnobj._external_name] = fnobj
return []
elif fnobj._callable in extfunc.EXTERNALS:
# -- deprecated case --
# 'fnobj' is one of the ll_xyz() functions with the suggested_primitive
# flag in rpython.rtyper.module.*. The corresponding C wrappers are
# written by hand in src/ll_*.h, and declared in extfunc.EXTERNALS.
if sandbox and not fnobj._name.startswith('ll_stack_'): # XXX!!! Temporary
return sandbox_stub(fnobj, db)
db.externalfuncs[fnobj._callable] = fnobj
return []
elif hasattr(fnobj, 'graph'):
if sandbox and sandbox != "if_external":
# apply the sandbox transformation
return sandbox_transform(fnobj, db)
exception_policy = getattr(fnobj, 'exception_policy', None)
return [FunctionCodeGenerator(fnobj.graph, db, exception_policy,
functionname)]
elif getattr(fnobj, 'external', None) is not None:
if sandbox:
return sandbox_stub(fnobj, db)
elif fnobj.external == 'C':
return []
else:
assert fnobj.external == 'CPython'
return [CExternalFunctionCodeGenerator(fnobj, db)]
elif hasattr(fnobj._callable, "c_name"):
return []
else:
raise ValueError, "don't know how to generate code for %r" % (fnobj,)
class ExtType_OpaqueNode(ContainerNode):
nodekind = 'rpyopaque'
def enum_dependencies(self):
return []
def initializationexpr(self, decoration=''):
T = self.getTYPE()
raise NotImplementedError(
'seeing an unexpected prebuilt object: %s' % (T.tag,))
def startupcode(self):
T = self.getTYPE()
args = [self.getptrname()]
# XXX how to make this code more generic?
if T.tag == 'ThreadLock':
lock = self.obj.externalobj
if lock.locked():
args.append('1')
else:
args.append('0')
yield 'RPyOpaque_SETUP_%s(%s);' % (T.tag, ', '.join(args))
def opaquenode_factory(db, T, obj):
if T == RuntimeTypeInfo:
return db.gcpolicy.rtti_node_factory()(db, T, obj)
if T.hints.get("render_structure", False):
return ExtType_OpaqueNode(db, T, obj)
raise Exception("don't know about %r" % (T,))
def weakrefnode_factory(db, T, obj):
assert isinstance(obj, llmemory._wref)
ptarget = obj._dereference()
wrapper = db.gcpolicy.convert_weakref_to(ptarget)
container = wrapper._obj
#obj._converted_weakref = container # hack for genllvm :-/
return db.getcontainernode(container, _dont_write_c_code=False)
class GroupNode(ContainerNode):
nodekind = 'group'
count_members = None
def __init__(self, *args):
ContainerNode.__init__(self, *args)
self.implementationtypename = 'struct group_%s_s @' % self.name
def basename(self):
return self.obj.name
def enum_dependencies(self):
# note: for the group used by the GC, it can grow during this phase,
# which means that we might not return all members yet. This is fixed
# by get_finish_tables() in rpython.memory.gctransform.framework.
for member in self.obj.members:
yield member._as_ptr()
def _fix_members(self):
if self.obj.outdated:
raise Exception(self.obj.outdated)
if self.count_members is None:
self.count_members = len(self.obj.members)
else:
# make sure no new member showed up, because it's too late
assert len(self.obj.members) == self.count_members
def forward_declaration(self):
self._fix_members()
yield ''
ctype = ['%s {' % cdecl(self.implementationtypename, '')]
for i, member in enumerate(self.obj.members):
structtypename = self.db.gettype(typeOf(member))
ctype.append('\t%s;' % cdecl(structtypename, 'member%d' % i))
ctype.append('} @')
ctype = '\n'.join(ctype)
yield '%s;' % (
forward_cdecl(ctype, self.name, self.db.standalone,
self.is_thread_local()))
yield '#include "src/llgroup.h"'
yield 'PYPY_GROUP_CHECK_SIZE(%s)' % (self.name,)
for i, member in enumerate(self.obj.members):
structnode = self.db.getcontainernode(member)
yield '#define %s %s.member%d' % (structnode.name,
self.name, i)
yield ''
def initializationexpr(self):
self._fix_members()
lines = ['{']
lasti = len(self.obj.members) - 1
for i, member in enumerate(self.obj.members):
structnode = self.db.getcontainernode(member)
lines1 = list(structnode.initializationexpr())
lines1[0] += '\t/* member%d: %s */' % (i, structnode.name)
if i != lasti:
lines1[-1] += ','
lines.extend(lines1)
lines.append('}')
return lines
ContainerNodeFactory = {
Struct: StructNode,
GcStruct: gcstructnode_factory,
Array: ArrayNode,
GcArray: ArrayNode,
FixedSizeArray: FixedSizeArrayNode,
FuncType: FuncNode,
OpaqueType: opaquenode_factory,
llmemory._WeakRefType: weakrefnode_factory,
llgroup.GroupType: GroupNode,
}
| 36.624771 | 95 | 0.567897 |
5c3537ec9b875bcc114b7f034c93eda3d518bee7 | 1,089 | py | Python | dwetl/reader/mpf_file_reader.py | ThisIsNima/dwetl | 12e06148929ec3ff5946345251c955cb4277d167 | [
"Apache-2.0"
] | 1 | 2021-04-08T11:58:51.000Z | 2021-04-08T11:58:51.000Z | dwetl/reader/mpf_file_reader.py | tsboom/dwetl | b137b8ad3fa36fcabb6a0de33c23e1328b6e3a19 | [
"Apache-2.0"
] | 1 | 2019-12-17T16:41:25.000Z | 2019-12-17T16:41:25.000Z | dwetl/reader/mpf_file_reader.py | ThisIsNima/dwetl | 12e06148929ec3ff5946345251c955cb4277d167 | [
"Apache-2.0"
] | 3 | 2019-04-26T11:47:30.000Z | 2022-03-05T06:11:57.000Z | import csv
class MpfFileReader:
"""
Reads an "mpf" TSV file, returning a Dictionary of key/value pairs
This implementation assumes that the first line in the file
is a tab-separated list of the keys to use.
"""
def __init__(self, file_path):
"""
Constructs a new MpfFileReader.
:param file_path: the fully-qualified path to the file
"""
self.file_path = file_path
self.fd = open(self.file_path)
self.reader = csv.reader(self.fd, delimiter='\t')
# Assume first line is the header line
self.headers = next(self.reader)
def __iter__(self):
for line in self.reader:
# Create a dictionary from headers and line values
result = {}
for i, header in enumerate(self.headers):
# Skip rest of headers if we run out of values
if i < len(line):
result[self.headers[i]] = line[i]
yield result
def __del__(self):
if hasattr(self, 'fd') and self.fd:
self.fd.close()
| 29.432432 | 70 | 0.581267 |
6aac1cb32ce7977efc33f93beb1fd0745c63b7a7 | 1,314 | py | Python | tests/functional/legacy_api/test_removed.py | fairhopeweb/warehouse | 7d8ef742e8fe6b401190c28ce56761848041c89f | [
"Apache-2.0"
] | 3,103 | 2015-01-30T00:24:10.000Z | 2022-03-31T23:21:39.000Z | tests/functional/legacy_api/test_removed.py | fairhopeweb/warehouse | 7d8ef742e8fe6b401190c28ce56761848041c89f | [
"Apache-2.0"
] | 6,709 | 2015-01-05T01:23:20.000Z | 2022-03-31T14:49:46.000Z | tests/functional/legacy_api/test_removed.py | fairhopeweb/warehouse | 7d8ef742e8fe6b401190c28ce56761848041c89f | [
"Apache-2.0"
] | 959 | 2015-01-12T22:22:40.000Z | 2022-03-31T22:21:51.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
@pytest.mark.parametrize("action", ["submit", "submit_pkg_info"])
def test_removed_upload_apis(webtest, action):
resp = webtest.post("/legacy/?:action={}".format(action), status=410)
assert resp.status == (
"410 Project pre-registration is no longer required or supported, "
"upload your files instead."
)
def test_remove_doc_upload(webtest):
resp = webtest.post("/legacy/?:action=doc_upload", status=410)
assert resp.status == (
"410 Uploading documentation is no longer supported, we recommend "
"using https://readthedocs.org/."
)
def test_doap(webtest):
resp = webtest.get("/pypi?:action=doap&name=foo&version=1.0", status=410)
assert resp.status == "410 DOAP is no longer supported."
| 36.5 | 77 | 0.717656 |
bda5057116f8317f3660d28ca6bb9f850137443b | 115 | py | Python | tensorflow-check.py | srish28/tensorflow-windows-build-tutorial | 78252dd769929b3154a8c9a39321694aee8c0269 | [
"Apache-2.0"
] | 6 | 2019-01-22T16:41:46.000Z | 2021-04-08T17:28:47.000Z | tensorflow-check.py | srish28/tensorflow-windows-build-tutorial | 78252dd769929b3154a8c9a39321694aee8c0269 | [
"Apache-2.0"
] | 1 | 2019-01-21T08:40:29.000Z | 2020-03-15T11:40:57.000Z | tensorflow-check.py | srish28/tensorflow-windows-build-tutorial | 78252dd769929b3154a8c9a39321694aee8c0269 | [
"Apache-2.0"
] | 57 | 2018-09-25T15:49:44.000Z | 2021-08-16T12:48:26.000Z | import tensorflow as tf
hello = tf.constant('Hello, TensorFlow!')
session = tf.Session()
print(session.run(hello))
| 23 | 41 | 0.747826 |
d912140d969ea76f785ebb01515978671be28c5b | 6,960 | py | Python | instrumentation/opentelemetry-instrumentation-starlette/src/opentelemetry/instrumentation/starlette/__init__.py | brett-bim/opentelemetry-python-contrib | 22cc215c6cf1adddca6fa04d7d68b45bbe5b6bf3 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-11-30T09:54:59.000Z | 2021-11-30T09:54:59.000Z | instrumentation/opentelemetry-instrumentation-starlette/src/opentelemetry/instrumentation/starlette/__init__.py | brett-bim/opentelemetry-python-contrib | 22cc215c6cf1adddca6fa04d7d68b45bbe5b6bf3 | [
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2021-09-05T07:58:39.000Z | 2021-09-14T06:34:42.000Z | instrumentation/opentelemetry-instrumentation-starlette/src/opentelemetry/instrumentation/starlette/__init__.py | brett-bim/opentelemetry-python-contrib | 22cc215c6cf1adddca6fa04d7d68b45bbe5b6bf3 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage
-----
.. code-block:: python
from opentelemetry.instrumentation.starlette import StarletteInstrumentor
from starlette import applications
from starlette.responses import PlainTextResponse
from starlette.routing import Route
def home(request):
return PlainTextResponse("hi")
app = applications.Starlette(
routes=[Route("/foobar", home)]
)
StarletteInstrumentor.instrument_app(app)
Configuration
-------------
Exclude lists
*************
To exclude certain URLs from being tracked, set the environment variable ``OTEL_PYTHON_STARLETTE_EXCLUDED_URLS``
(or ``OTEL_PYTHON_EXCLUDED_URLS`` as fallback) with comma delimited regexes representing which URLs to exclude.
For example,
::
export OTEL_PYTHON_STARLETTE_EXCLUDED_URLS="client/.*/info,healthcheck"
will exclude requests such as ``https://site/client/123/info`` and ``https://site/xyz/healthcheck``.
Request/Response hooks
**********************
Utilize request/reponse hooks to execute custom logic to be performed before/after performing a request. The server request hook takes in a server span and ASGI
scope object for every incoming request. The client request hook is called with the internal span and an ASGI scope which is sent as a dictionary for when the method recieve is called.
The client response hook is called with the internal span and an ASGI event which is sent as a dictionary for when the method send is called.
.. code-block:: python
def server_request_hook(span: Span, scope: dict):
if span and span.is_recording():
span.set_attribute("custom_user_attribute_from_request_hook", "some-value")
def client_request_hook(span: Span, scope: dict):
if span and span.is_recording():
span.set_attribute("custom_user_attribute_from_client_request_hook", "some-value")
def client_response_hook(span: Span, message: dict):
if span and span.is_recording():
span.set_attribute("custom_user_attribute_from_response_hook", "some-value")
StarletteInstrumentor().instrument(server_request_hook=server_request_hook, client_request_hook=client_request_hook, client_response_hook=client_response_hook)
API
---
"""
import typing
from typing import Collection
from starlette import applications
from starlette.routing import Match
from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware
from opentelemetry.instrumentation.asgi.package import _instruments
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.trace import Span
from opentelemetry.util.http import get_excluded_urls
_excluded_urls = get_excluded_urls("STARLETTE")
_ServerRequestHookT = typing.Optional[typing.Callable[[Span, dict], None]]
_ClientRequestHookT = typing.Optional[typing.Callable[[Span, dict], None]]
_ClientResponseHookT = typing.Optional[typing.Callable[[Span, dict], None]]
class StarletteInstrumentor(BaseInstrumentor):
"""An instrumentor for starlette
See `BaseInstrumentor`
"""
_original_starlette = None
@staticmethod
def instrument_app(
app: applications.Starlette,
server_request_hook: _ServerRequestHookT = None,
client_request_hook: _ClientRequestHookT = None,
client_response_hook: _ClientResponseHookT = None,
tracer_provider=None,
):
"""Instrument an uninstrumented Starlette application."""
if not getattr(app, "is_instrumented_by_opentelemetry", False):
app.add_middleware(
OpenTelemetryMiddleware,
excluded_urls=_excluded_urls,
default_span_details=_get_route_details,
server_request_hook=server_request_hook,
client_request_hook=client_request_hook,
client_response_hook=client_response_hook,
tracer_provider=tracer_provider,
)
app.is_instrumented_by_opentelemetry = True
def instrumentation_dependencies(self) -> Collection[str]:
return _instruments
def _instrument(self, **kwargs):
self._original_starlette = applications.Starlette
_InstrumentedStarlette._tracer_provider = kwargs.get("tracer_provider")
_InstrumentedStarlette._server_request_hook = kwargs.get(
"server_request_hook"
)
_InstrumentedStarlette._client_request_hook = kwargs.get(
"client_request_hook"
)
_InstrumentedStarlette._client_response_hook = kwargs.get(
"client_response_hook"
)
applications.Starlette = _InstrumentedStarlette
def _uninstrument(self, **kwargs):
applications.Starlette = self._original_starlette
class _InstrumentedStarlette(applications.Starlette):
_tracer_provider = None
_server_request_hook: _ServerRequestHookT = None
_client_request_hook: _ClientRequestHookT = None
_client_response_hook: _ClientResponseHookT = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_middleware(
OpenTelemetryMiddleware,
excluded_urls=_excluded_urls,
default_span_details=_get_route_details,
server_request_hook=_InstrumentedStarlette._server_request_hook,
client_request_hook=_InstrumentedStarlette._client_request_hook,
client_response_hook=_InstrumentedStarlette._client_response_hook,
tracer_provider=_InstrumentedStarlette._tracer_provider,
)
def _get_route_details(scope):
"""Callback to retrieve the starlette route being served.
TODO: there is currently no way to retrieve http.route from
a starlette application from scope.
See: https://github.com/encode/starlette/pull/804
"""
app = scope["app"]
route = None
for starlette_route in app.routes:
match, _ = starlette_route.matches(scope)
if match == Match.FULL:
route = starlette_route.path
break
if match == Match.PARTIAL:
route = starlette_route.path
# method only exists for http, if websocket
# leave it blank.
span_name = route or scope.get("method", "")
attributes = {}
if route:
attributes[SpanAttributes.HTTP_ROUTE] = route
return span_name, attributes
| 37.219251 | 184 | 0.728161 |
c5860c414fff254820d54a870c5f629feb7997ca | 4,100 | py | Python | python/GafferSceneTest/ObjectToSceneTest.py | cwmartin/gaffer | 1f8a0f75522105c9d5efefac6d55cb61c1038909 | [
"BSD-3-Clause"
] | null | null | null | python/GafferSceneTest/ObjectToSceneTest.py | cwmartin/gaffer | 1f8a0f75522105c9d5efefac6d55cb61c1038909 | [
"BSD-3-Clause"
] | null | null | null | python/GafferSceneTest/ObjectToSceneTest.py | cwmartin/gaffer | 1f8a0f75522105c9d5efefac6d55cb61c1038909 | [
"BSD-3-Clause"
] | null | null | null | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import IECore
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
class ObjectToSceneTest( GafferSceneTest.SceneTestCase ) :
def testFileInput( self ) :
fileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/cobs/pSphereShape1.cob" )
read = Gaffer.ObjectReader()
read["fileName"].setValue( fileName )
object = IECore.Reader.create( fileName ).read()
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setInput( read["out"] )
self.assertEqual( objectToScene["out"].bound( "/" ), object.bound() )
self.assertEqual( objectToScene["out"].transform( "/" ), IECore.M44f() )
self.assertEqual( objectToScene["out"].object( "/" ), IECore.NullObject() )
self.assertEqual( objectToScene["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "object" ] ) )
self.assertEqual( objectToScene["out"].bound( "/object" ), object.bound() )
self.assertEqual( objectToScene["out"].transform( "/object" ), IECore.M44f() )
self.assertEqual( objectToScene["out"].object( "/object" ), object )
self.assertEqual( objectToScene["out"].childNames( "/object" ), IECore.InternedStringVectorData() )
self.assertSceneValid( objectToScene["out"] )
def testMeshInput( self ) :
p = GafferScene.ObjectToScene()
p["object"].setValue( IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) ) )
self.assertSceneValid( p["out"] )
self.assertEqual( p["out"].object( "/object" ), IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) ) )
p["object"].setValue( IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -2 ), IECore.V2f( 2 ) ) ) )
self.assertSceneValid( p["out"] )
self.assertEqual( p["out"].object( "/object" ), IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -2 ), IECore.V2f( 2 ) ) ) )
def testProceduralInput( self ) :
p = IECore.ReadProcedural()
s = GafferScene.ObjectToScene()
s["object"].setValue( p, _copy = False )
self.failUnless( isinstance( s["out"].object( "/object" ), IECore.ParameterisedProcedural ) )
self.assertTrue( s["out"].object( "/object", _copy=False ).isSame( p ) )
if __name__ == "__main__":
unittest.main()
| 41.836735 | 138 | 0.692195 |
cea54c128fcf69aea0223b6e35fd5792b5612759 | 2,707 | py | Python | src/utils/split_dir.py | gregbugaj/marie-ai | f51a74f19ab5d7231c9f8a426284feff1671b974 | [
"MIT"
] | 4 | 2021-09-23T22:38:48.000Z | 2022-01-19T12:03:02.000Z | src/utils/split_dir.py | gregbugaj/marie-icr | f51a74f19ab5d7231c9f8a426284feff1671b974 | [
"MIT"
] | 17 | 2021-12-22T16:37:21.000Z | 2022-03-16T16:07:34.000Z | src/utils/split_dir.py | gregbugaj/marie-ai | f51a74f19ab5d7231c9f8a426284feff1671b974 | [
"MIT"
] | null | null | null | import os
from utils.utils import ensure_exists
def split_dir(dir_src, dir_dest):
import math
import random
import shutil
print("dir_src : %s" % (dir_src))
print("dir_dest : %s" % (dir_dest))
# expecting two directories [image, masked]
image_dir_src = os.path.join(dir_src, "image")
mask_dir_src = os.path.join(dir_src, "mask")
mask_filenames = os.listdir(mask_dir_src)
mask_filenames = random.sample(mask_filenames, len(mask_filenames))
size = len(mask_filenames)
validation_size = math.ceil(size * 0.0) # 5 percent validation size
test_size = math.ceil(size * 0.20) # 25 percent testing size
training_size = size - validation_size - test_size # 70 percent training
print(
"Class >> size = {} training = {} validation = {} test = {} ".format(
size, training_size, validation_size, test_size
)
)
validation_files = mask_filenames[:validation_size]
testing_files = mask_filenames[validation_size : validation_size + test_size]
training_files = mask_filenames[validation_size + test_size :]
print("Number of training images : {}".format(len(training_files)))
print("Number of validation images : {}".format(len(validation_files)))
print("Number of testing images : {}".format(len(testing_files)))
# prepare output directories
test_image_dir_out = os.path.join(dir_dest, "test", "image")
test_mask_dir_out = os.path.join(dir_dest, "test", "mask")
train_image_dir_out = os.path.join(dir_dest, "train", "image")
train_mask_dir_out = os.path.join(dir_dest, "train", "mask")
validation_image_dir_out = os.path.join(dir_dest, "validation", "image")
validation_mask_dir_out = os.path.join(dir_dest, "validation", "mask")
ensure_exists(test_image_dir_out)
ensure_exists(test_mask_dir_out)
ensure_exists(train_image_dir_out)
ensure_exists(train_mask_dir_out)
ensure_exists(validation_image_dir_out)
ensure_exists(validation_mask_dir_out)
def copyfiles(files, srcDir, destDir):
if not os.path.exists(destDir):
os.makedirs(destDir)
for filename in files:
src = os.path.join(srcDir, filename)
dest = os.path.join(destDir, filename)
shutil.copy(src, dest)
copyfiles(training_files, image_dir_src, train_image_dir_out)
copyfiles(training_files, mask_dir_src, train_mask_dir_out)
copyfiles(testing_files, image_dir_src, test_image_dir_out)
copyfiles(testing_files, mask_dir_src, test_mask_dir_out)
copyfiles(validation_files, image_dir_src, validation_image_dir_out)
copyfiles(validation_files, mask_dir_src, validation_mask_dir_out)
| 35.618421 | 81 | 0.708903 |
c19c59d1aaa452fa5c868884d5eaceb01848cd67 | 1,560 | py | Python | kolibri/core/webpack/test/base.py | nucleogenesis/kolibri | 7b653a28f014ed9d0f29d116e120d1a02eb62b4c | [
"MIT"
] | null | null | null | kolibri/core/webpack/test/base.py | nucleogenesis/kolibri | 7b653a28f014ed9d0f29d116e120d1a02eb62b4c | [
"MIT"
] | null | null | null | kolibri/core/webpack/test/base.py | nucleogenesis/kolibri | 7b653a28f014ed9d0f29d116e120d1a02eb62b4c | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import copy
import json
import tempfile
from ..hooks import WebpackBundleHook
TEST_STATS_FILE_DATA = {
"status": "done",
"chunks": {
"untitled": [
{
"name": "non_default_frontend-2c4fb3d6a29238b06f84.js",
"publicPath": "non_default_frontend/non_default_frontend-2c4fb3d6a29238b06f84.js",
"path": "kolibri/core/static/non_default_frontend/non_default_frontend-2c4fb3d6a29238b06f84.js",
}
]
},
"publicPath": "default_frontend/",
"messages": "true",
}
class TestHookMixin(object):
"""
This hook will automatically create a stats file (normally created by npm)
and populate it with test data according to the uniqe_slug of the hook
that it's mixed into.
"""
src_file = "assets/src/kolibri_core_app.js"
@property
def _stats_file(self):
self.TEST_STATS_FILE = tempfile.NamedTemporaryFile(mode="w+", delete=False)
self.TEST_STATS_FILE_DATA = copy.deepcopy(TEST_STATS_FILE_DATA)
self.TEST_STATS_FILE_DATA["chunks"][
self.unique_slug
] = self.TEST_STATS_FILE_DATA["chunks"].pop("untitled")
json.dump(self.TEST_STATS_FILE_DATA, self.TEST_STATS_FILE)
self.TEST_STATS_FILE.close()
print(self.unique_slug)
return self.TEST_STATS_FILE.name
class TestHook(TestHookMixin, WebpackBundleHook):
unique_slug = "non_default_frontend"
| 30 | 112 | 0.689103 |
d2e25e90f8ac7ef8b70fd4768d4b51aed160aaa8 | 513 | py | Python | mock_base/mock_messaging.py | SpinaNico/python-mock-base | 2a53b54e9157160733a6e16a5ad7e6e129bd5c04 | [
"MIT"
] | null | null | null | mock_base/mock_messaging.py | SpinaNico/python-mock-base | 2a53b54e9157160733a6e16a5ad7e6e129bd5c04 | [
"MIT"
] | null | null | null | mock_base/mock_messaging.py | SpinaNico/python-mock-base | 2a53b54e9157160733a6e16a5ad7e6e129bd5c04 | [
"MIT"
] | null | null | null | from . import get_mock_app, MockApp
from ._messaging.messaging import Message, Notification
def send(message: Message, dry_run=False, app: MockApp = None):
if app is None:
_a: MockApp = get_mock_app()
if _a is None:
raise Exception("default app not initialized ")
_a.notify_listeners(message)
else:
_a = get_mock_app(app.name)
if _a is None:
raise Exception("{} app not initialized ".format(app.name))
_a.notify_listeners(message)
| 32.0625 | 71 | 0.649123 |
63777fdb7a5aec2a563b02c27135f72e42c2abe0 | 25,748 | py | Python | 2/code/circuit/full_circuit.py | mkeshita/6006psets_ng | f3cdbd978a2d8d4c5f3e419e75b3d8e166d94a17 | [
"MIT"
] | 1 | 2021-10-12T03:54:57.000Z | 2021-10-12T03:54:57.000Z | 2/code/circuit/full_circuit.py | mkeshita/6006psets_ng | f3cdbd978a2d8d4c5f3e419e75b3d8e166d94a17 | [
"MIT"
] | null | null | null | 2/code/circuit/full_circuit.py | mkeshita/6006psets_ng | f3cdbd978a2d8d4c5f3e419e75b3d8e166d94a17 | [
"MIT"
] | 2 | 2021-10-12T03:54:50.000Z | 2022-03-29T15:32:15.000Z | #!/usr/bin/env python
import heapq # Only used when QUEUE=lib
import json # Used when TRACE=jsonp
import os # Used to get the QUEUE and TRACE environment variables
import re # Used when TRACE=jsonp
import sys # Used to smooth over the range / xrange issue.
# Python 3 doesn't have xrange, and range behaves like xrange.
if sys.version_info >= (3,):
xrange = range
# Circuit simulation library.
class TruthTable:
"""Truth table representation of the logic inside a gate."""
def __init__(self, name, output_list):
"""Creates a truth table from a list representation.
Args:
name: User-friendly name for the truth table.
output_list: The entries in the truth table, in the standard order
(the inputs should look like an incrementing counter).
Raises:
TypeError: An exception if the list's length is not a power of two.
"""
self.name = name
self.table = self._build_table(output_list)
self.input_count = self._table_depth(self.table)
def output(self, inputs):
"""Computes the output for this truth table, given a list of inputs."""
if len(inputs) != self.input_count:
raise ValueError('Inputs list is incorrectly sized')
value = self.table
for i in inputs:
value = value[i]
return value
def _build_table(self, output_list):
# Builds an evaluation table out of a list of truth table values.
#
# Raises:
# TypeError: An exception if the list's length is not a power of two.
if len(output_list) == 2:
for value in output_list:
if value != 0 and value != 1:
raise TypeError('Invalid value in truth output list')
return output_list
else:
length = len(output_list)
if length % 2 != 0:
raise ValueError('Invalid truth output list length')
half = length // 2
return [self._build_table(output_list[0:half]),
self._build_table(output_list[half:])]
def _table_depth(self, table):
# The depth (number of inputs) of a truth table.
depth = 0
while table != 0 and table != 1:
depth += 1
table = table[0]
return depth
class GateType:
"""A type of gate, e.g. 2-input NAND with 60ps delay."""
def __init__(self, name, truth_table, delay):
"""Creates a gate type with a truth table and output delay.
Args:
name: User-friendly name for the gate type.
truth_table: TruthTable instance containing the gate's logic.
delay: The time it takes an input transition to cause an output
transition.
Raises:
ValueError: An exception if the delay is negative.
"""
self.name = name
if delay < 0:
raise ValueError('Invalid delay')
self.truth_table = truth_table
self.input_count = truth_table.input_count
self.delay = delay
def output(self, inputs):
"""The gate's output value, given a list of inputs."""
return self.truth_table.output(inputs)
def output_time(self, input_time):
"""The time of the gate's output transition.
Computes the time of the output transition given an input transition
time.
Args:
input_time: Time of the input transition.
"""
return self.delay + input_time
class Gate:
"""A gate in a circuit."""
def __init__(self, name, gate_type):
""" Creates an unconnected gate whose initial output is false.
Args:
name: User-friendly name for the gate.
gate_type: GateType instance specifying the gate's behavior.
"""
self.name = name
self.gate_type = gate_type
self.in_gates = [None for i in xrange(gate_type.input_count)]
self.out_gates = []
self.probed = False
self.output = 0
def connect_input(self, gate, terminal):
"""Connects one of this gate's input terminals to another gate's output.
Args:
gate: The gate whose output terminal will be connected.
terminal: The number of this gate's input terminal that will be
connected (using 0-based indexing)
"""
if self.in_gates[terminal] is not None:
raise RuntimeError('Input terminal already connected')
self.in_gates[terminal] = gate
gate.out_gates.append(self)
def probe(self):
"""Marks this gate as probed.
So the simulator will record its transitions.
Raises:
RuntimeError: An exception if the gate is already probed.
"""
if self.probed:
raise RuntimeError('Gate already probed')
self.probed = True
def has_inputs_connected(self):
"""True if all the gate's input terminals are connected to other gates.
"""
for input in self.in_gates:
if input == None:
return False
return True
def has_output_connected(self):
"""True if the gate's output terminal is connected to another gate."""
return self.out_gates.length > 0
def is_connected(self):
"""True if all the gate's inputs and outputs are connected."""
return self.has_inputs_connected and self.has_output_connected
def transition_output(self):
"""The value that the gate's output will have after transition.
The gate's output will not reflect this value right away. Each gate has
a delay from its inputs' transitions to the output's transition. The
circuit simulator is responsible for setting the appropriate time.
"""
return self.gate_type.output([gate.output for gate in self.in_gates])
def transition_time(self, input_time):
"""The time that the gate's output will reflect a change in its inputs.
Args:
input_time: The last time when the gate's inputs changed.
"""
return self.gate_type.output_time(input_time)
def as_json(self):
""""A hash that obeys the JSON format, representing the gate."""
return {'id': self.name, 'table': self.gate_type.truth_table.name,
'type': self.gate_type.name, 'probed': self.probed,
'inputs': [g and g.name for g in self.in_gates],
'outputs': [g and g.name for g in self.out_gates]}
class Circuit:
"""The topology of a combinational circuit, and a snapshot of its state.
This class contains topological information about a circuit (how the gates
are connected to each other) as well as information about the gates' states
(values at their output terminals) at an instance of time.
"""
def __init__(self):
"""Creates an empty circuit."""
self.truth_tables = {}
self.gate_types = {}
self.gates = {}
def add_truth_table(self, name, output_list):
"""Adds a truth table that can be later attached to gate types.
Args:
name: A unique string used to identify the truth table.
output_list: A list of outputs for the truth table.
Returns:
A newly created TruthTable instance.
"""
if name in self.truth_tables:
raise ValueError('Truth table name already used')
self.truth_tables[name] = TruthTable(name, output_list)
def add_gate_type(self, name, truth_table_name, delay):
"""Adds a gate type that can be later attached to gates.
Args:
name: A unique string used to identify the gate type.
truth_table_name: The name of the gate's truth table.
delay: The gate's delay from an input transition to an output
transition.
Returns:
The newly created GateType instance.
"""
if name in self.gate_types:
raise ValueError('Gate type name already used')
truth_table = self.truth_tables[truth_table_name]
if delay < 0:
raise ValueError('Invalid delay')
self.gate_types[name] = GateType(name, truth_table, delay)
def add_gate(self, name, type_name, input_names):
"""Adds a gate and connects it to other gates.
Args:
name: A unique string used to identify the gate.
type_name: The name of the gate's type.
input_names: List of the names of gates whose outputs are connected
to this gate's inputs.
Returns:
The newly created Gate instance.
"""
if name in self.gates:
raise ValueError('Gate name already used')
gate_type = self.gate_types[type_name]
self.gates[name] = new_gate = Gate(name, gate_type)
for i in xrange(len(input_names)):
gate = self.gates[input_names[i]]
new_gate.connect_input(gate, i)
return new_gate
def add_probe(self, gate_name):
"""Adds a gate to the list of outputs."""
gate = self.gates[gate_name]
gate.probe()
def as_json(self):
"""A hash that obeys the JSON format, representing the circuit."""
json = {}
json['gates'] = [gate.as_json() for gate in self.gates.itervalues()]
return json
class Transition:
"""A transition in a gate's output."""
def __init__(self, gate, new_output, time):
"""Creates a potential transition of a gate's output to a new value.
Args:
gate: The Gate whose output might transition.
new_output: The new output value that the gate will take.
time: The time at which the Gate's output will match the new value.
Raises:
ValueError: An exception if the output is not 0 or 1.
"""
if new_output != 0 and new_output != 1:
raise ValueError('Invalid output value')
self.gate = gate
self.new_output = new_output
self.time = time
self.object_id = Transition.next_object_id()
def __lt__(self, other):
# :nodoc: Transitions should be comparable.
return (self.time < other.time or
(self.time == other.time and self.object_id < other.object_id))
def __le__(self, other):
# :nodoc: Transitions should be comparable.
return (self.time < other.time or
(self.time == other.time and self.object_id <= other.object_id))
def __gt__(self, other):
# :nodoc: Transitions should be comparable.
return (self.time > other.time or
(self.time == other.time and self.object_id > other.object_id))
def __ge__(self, other):
# :nodoc: Transitions should be comparable.
return (self.time > other.time or
(self.time == other.time and self.object_id >= other.object_id))
# NOTE: Due to the comparisons' use of object_id, no two Transitions will be
# equal. So we don't need to override __eq__, __ne__, or __hash__.
def is_valid(self):
"""True if the transition would cause an actual change in the gate's
output.
"""
return self.gate.output != self.new_output
def apply(self):
"""Makes this transition effective by changing the gate's output.
Raises:
ValueError: An exception if applying the transition wouldn't cause
an actual change in the gate's output.
"""
if self.gate.output == self.new_output:
raise ValueError('Gate output should not transition to the same '
'value')
self.gate.output = self.new_output
def __repr__(self):
# :nodoc: debug output
return ('<Transition at t=' + str(self.time) + ', gate ' +
self.gate.name + ' -> ' + str(self.new_output) + '>')
# Next number handed out by Transition.next_object_id()
_next_id = 0
@staticmethod
def next_object_id():
"""Returns a unique numerical ID to be used as a Transition's object_id.
"""
id = Transition._next_id
Transition._next_id += 1
return id
class PriorityQueue:
"""Array-based priority queue implementation."""
def __init__(self):
"""Initially empty priority queue."""
self.queue = []
self.min_index = None
def __len__(self):
# Number of elements in the queue.
return len(self.queue)
def append(self, key):
"""Inserts an element in the priority queue."""
if key is None:
raise ValueError('Cannot insert None in the queue')
self.queue.append(key)
self.min_index = None
def min(self):
"""The smallest element in the queue."""
if len(self.queue) == 0:
return None
self._find_min()
# The array is sorted in reverse order.
return self.queue[self.min_index]
def pop(self):
"""Removes the minimum element in the queue.
Returns:
The value of the removed element.
"""
if len(self.queue) == 0:
return None
self._find_min()
popped_key = self.queue.pop(self.min_index)
self.min_index = None
return popped_key
def _find_min(self):
# Computes the index of the minimum element in the queue.
#
# This method may crash if called when the queue is empty.
if self.min_index is not None:
return
min = self.queue[0]
self.min_index = 0
for i in xrange(1, len(self.queue)):
key = self.queue[i]
if key < min:
min = key
self.min_index = i
class BlitPriorityQueue:
"""Priority queue implementation that maintains a sorted array of keys.
This is a devil's-advocate N^2 implementation. It takes advantage of the
fact that array operations are done in native code (most likely in a memmove
call), and so they're much faster than high-level statements in a log(N)
implementation.
"""
def __init__(self):
"""Initially empty priority queue."""
self.queue = []
def __len__(self):
# Number of elements in the queue.
return len(self.queue)
def append(self, key):
"""Inserts an element in the priority queue."""
if key is None:
raise ValueError('Cannot insert None in the queue')
low, high = 0, len(self.queue) - 1
while low <= high:
mid = (low + high) // 2
mid_key = self.queue[mid]
if key == mid_key:
self.queue[mid] = key
return
# The comparison is flipped because the array is sorted in reversed
# order.
if key < mid_key:
low = mid + 1
else:
high = mid - 1
self.queue.insert(high + 1, key)
def min(self):
"""The smallest element in the queue."""
# The array is sorted in reverse order.
return self.queue[-1]
def pop(self):
"""Removes the minimum element in the queue.
Returns:
The value of the removed element.
"""
# The array is sorted in reverse order to make this operation fast.
return self.queue.pop()
class HeapPriorityQueue(PriorityQueue):
"""Heap-based priority queue implementation."""
def __init__(self):
"""Initially empty priority queue."""
self.heap = [None]
def __len__(self):
# Number of elements in the queue.
return len(self.heap) - 1
def append(self, key):
"""Inserts an element in the priority queue."""
if key is None:
raise ValueError('Cannot insert None in the queue')
i = len(self.heap)
self.heap.append(key)
while i > 1:
parent = i // 2
if key < self.heap[parent]:
self.heap[i], self.heap[parent] = self.heap[parent], key
i = parent
else:
break
def min(self):
"""The smallest element in the queue."""
return self.heap[1]
def pop(self):
"""Removes the minimum element in the queue.
Returns:
The value of the removed element.
"""
heap = self.heap
popped_key = heap[1]
if len(heap) == 2:
return heap.pop()
heap[1] = key = heap.pop()
i = 1
while True:
left = i * 2
if len(heap) <= left:
break
left_key = heap[left]
right = i * 2 + 1
right_key = right < len(heap) and heap[right]
if right_key and right_key < left_key:
child_key = right_key
child = right
else:
child_key = left_key
child = left
if key <= child_key:
break
self.heap[i], self.heap[child] = child_key, key
i = child
return popped_key
class LibPriorityQueue(PriorityQueue):
"""Priority queue implementation that uses Python's heap library."""
def __init__(self):
"""Initially empty priority queue."""
self.heap = []
def __len__(self):
# Number of elements in the queue.
return len(self.heap)
def append(self, key):
"""Inserts an element in the priority queue."""
if key is None:
raise ValueError('Cannot insert None in the queue')
heapq.heappush(self.heap, key)
def min(self):
"""The smallest element in the queue."""
return self.heap[0]
def pop(self):
"""Removes the minimum element in the queue.
Returns:
The value of the removed element.
"""
return heapq.heappop(self.heap)
class Simulation:
"""State needed to compute a circuit's state as it evolves over time."""
def __init__(self, circuit):
"""Creates a simulation that will run on a pre-built circuit.
The Circuit instance does not need to be completely built before it is
given to the class constructor. However, it does need to be complete
before the run method is called.
Args:
circuit: The circuit whose state transitions will be simulated.
"""
self.circuit = circuit
self.in_transitions = []
queue_type = os.environ.get('QUEUE')
if queue_type == 'heap':
self.queue = HeapPriorityQueue()
elif queue_type == 'lib':
self.queue = LibPriorityQueue()
elif queue_type == 'blit':
self.queue = BlitPriorityQueue()
else:
self.queue = PriorityQueue()
self.probes = []
self.probe_all_undo_log = []
def add_transition(self, gate_name, output_value, output_time):
"""Adds a transition to the simulation's initial conditions.
The transition should involve one of the circuit's input gates.
"""
gate = self.circuit.gates[gate_name]
self.in_transitions.append([output_time, gate_name, output_value, gate])
def step(self):
"""Runs the simulation for one time slice.
A step does not equal one unit of time. The simulation logic ignores
time units where nothing happens, and bundles all the transitions that
happen at the same time in a single step.
Returns:
The simulation time after the step occurred.
"""
step_time = self.queue.min().time
# Need to apply all the transitions at the same time before propagating.
transitions = []
while len(self.queue) > 0 and self.queue.min().time == step_time:
transition = self.queue.pop()
if not transition.is_valid():
continue
transition.apply()
if transition.gate.probed:
self.probes.append([transition.time, transition.gate.name,
transition.new_output])
transitions.append(transition)
# Propagate the transition effects.
for transition in transitions:
for gate in transition.gate.out_gates:
output = gate.transition_output()
time = gate.transition_time(step_time)
self.queue.append(Transition(gate, output, time))
return step_time
def run(self):
"""Runs the simulation to completion."""
for in_transition in sorted(self.in_transitions):
self.queue.append(Transition(in_transition[3], in_transition[2],
in_transition[0]))
while len(self.queue) > 0:
self.step()
self.probes.sort()
def probe_all_gates(self):
"""Turns on probing for all gates in the simulation."""
for gate in self.circuit.gates.itervalues():
if not gate.probed:
self.probe_all_undo_log.append(gate)
gate.probe()
def undo_probe_all_gates(self):
"""Reverts the effects of calling probe_all_gates!"""
for gate in self.probe_all_undo_log:
gate.probed = False
self.probe_all_undo_log = []
@staticmethod
def from_file(file):
"""Builds a simulation by reading a textual description from a file.
Args:
file: A File object supplying the input.
Returns: A new Simulation instance.
"""
circuit = Circuit()
simulation = Simulation(circuit)
while True:
command = file.readline().split()
if len(command) < 1:
continue
if command[0] == 'table':
outputs = [int(token) for token in command[2:]]
circuit.add_truth_table(command[1], outputs)
elif command[0] == 'type':
if len(command) != 4:
raise ValueError('Invalid number of arguments for gate type'
' command')
circuit.add_gate_type(command[1], command[2], int(command[3]))
elif command[0] == 'gate':
circuit.add_gate(command[1], command[2], command[3:])
elif command[0] == 'probe':
if len(command) != 2:
raise ValueError('Invalid number of arguments for gate '
'probe command')
circuit.add_probe(command[1])
elif command[0] == 'flip':
if len(command) != 4:
raise ValueError('Invalid number of arguments for flip '
'command')
simulation.add_transition(command[1], int(command[2]),
int(command[3]))
elif command[0] == 'done':
break
return simulation
def layout_from_file(self, file):
"""Reads the simulation's visual layout from a file.
Args:
file: A File-like object supplying the input.
Returns:
self.
"""
while True:
line = file.readline()
if len(line) == 0:
raise ValueError('Input lacks circuit layout information')
if line.strip() == 'layout':
svg = file.read()
# Get rid of the XML doctype.
svg = re.sub('\\<\\?xml.*\\?\\>', '', svg)
svg = re.sub('\\<\\!DOCTYPE[^>]*\\>', '', svg)
self.layout_svg = svg.strip()
break
self
def trace_as_json(self):
"""A hash that obeys the JSON format, containing simulation data."""
return {'circuit': self.circuit.as_json(), 'trace': self.probes,
'layout': self.layout_svg}
def outputs_to_line_list(self):
return [' '.join([str(probe[0]), probe[1], str(probe[2])]) for probe in self.probes]
def outputs_to_file(self, file):
"""Writes a textual description of the simulation's probe results to a
file.
Args:
file: A File object that receives the probe results.
"""
for line in self.outputs_to_line_list():
file.write(line)
file.write("\n")
def jsonp_to_file(self, file):
"""Writes a JSONP description of the simulation's probe results to a
file.
Args:
file: A File object that receives the probe results.
"""
file.write('onJsonp(')
json.dump(self.trace_as_json(), file)
file.write(');\n')
# Command-line controller.
if __name__ == '__main__':
import sys
sim = Simulation.from_file(sys.stdin)
if os.environ.get('TRACE') == 'jsonp':
sim.layout_from_file(sys.stdin)
sim.probe_all_gates()
sim.run()
if os.environ.get('TRACE') == 'jsonp':
sim.undo_probe_all_gates()
sim.jsonp_to_file(sys.stdout)
else:
sim.outputs_to_file(sys.stdout)
| 35.031293 | 92 | 0.569132 |
67674e789e708fff5f472823574d3accc6bb317a | 264 | py | Python | blaze/dispatch.py | quantopian-enterprise/blaze | 6b686bed87993494b11676ed25e7b30f18ca2248 | [
"BSD-3-Clause"
] | 2,106 | 2015-08-20T11:53:30.000Z | 2022-03-30T19:42:11.000Z | blaze/dispatch.py | quantopian-enterprise/blaze | 6b686bed87993494b11676ed25e7b30f18ca2248 | [
"BSD-3-Clause"
] | 479 | 2015-08-20T06:09:38.000Z | 2020-10-21T13:44:57.000Z | blaze/dispatch.py | quantopian-enterprise/blaze | 6b686bed87993494b11676ed25e7b30f18ca2248 | [
"BSD-3-Clause"
] | 280 | 2015-08-20T08:42:01.000Z | 2022-03-16T08:05:19.000Z | from __future__ import absolute_import, division, print_function
from datashape.dispatch import namespace
from multipledispatch import dispatch
from functools import partial
__all__ = 'dispatch', 'namespace'
dispatch = partial(dispatch, namespace=namespace)
| 20.307692 | 64 | 0.825758 |
529fa3d2cccef04dc5d948a27faea82feb935051 | 816 | py | Python | user/tests.py | vivianrdu/gfi-project | 3d322e263a153f2f1eb624f8f4f33837d2af362c | [
"MIT"
] | 1 | 2021-06-22T22:50:04.000Z | 2021-06-22T22:50:04.000Z | user/tests.py | vivianrdu/gfi-project | 3d322e263a153f2f1eb624f8f4f33837d2af362c | [
"MIT"
] | null | null | null | user/tests.py | vivianrdu/gfi-project | 3d322e263a153f2f1eb624f8f4f33837d2af362c | [
"MIT"
] | 1 | 2021-05-28T05:39:20.000Z | 2021-05-28T05:39:20.000Z | from django.test import TestCase
from django.contrib.auth.models import User
from django.contrib.auth import SESSION_KEY
# Create your tests here.
class UserModelTests(TestCase):
def setUp(self):
# Every test needs a client.
self.client = Client()
self.credentials = {
'username': 'testuser',
'password': 'secret'}
User.objects.create_user(**self.credentials)
def test_response(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_login(self):
response = self.client.post('/login/', **self.credentials)
self.assertTrue(response.context['user'].is_active)
def test_create_user(self):
c = Client()
c.login(username='fred', password='secret')
| 28.137931 | 72 | 0.63848 |
a6e3ee2b84cf44014ae798c51c949ba416a54a98 | 1,347 | py | Python | proxyspiders/ipool/middlewares.py | seaify/ipool | fb12a8db260240d4ca51b065f4219fe9a58f93c4 | [
"MIT"
] | 27 | 2015-05-27T10:10:31.000Z | 2021-11-16T12:44:35.000Z | proxyspiders/ipool/middlewares.py | seaify/ipool | fb12a8db260240d4ca51b065f4219fe9a58f93c4 | [
"MIT"
] | 14 | 2015-04-28T10:13:59.000Z | 2015-06-02T07:19:38.000Z | proxyspiders/ipool/middlewares.py | seaify/ipool | fb12a8db260240d4ca51b065f4219fe9a58f93c4 | [
"MIT"
] | 15 | 2015-09-19T17:46:26.000Z | 2019-11-08T15:11:16.000Z | from scrapy import log
from scrapy.http import Request
import random
import time
class InitRequest(object):
def __init__(self, settings):
self.proxy_dict = {}
with open('books_spider/proxy.list') as fd:
for x in fd:
self.proxy_dict[x.strip()] = 100.0
self.proxy_dict['http://localhost:8087/'] = 10.0
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def process_request(self, request, spider):
print('number of proxys is %d' % len(self.proxy_dict))
proxys = sorted(self.proxy_dict.items(), key=lambda x: x[1])
request.meta['proxy'] = random.choice(proxys[0:60])[0]
print('current proxy is %s' % request.meta['proxy'])
def process_response(self, request, response, spider):
if response.status != 200:
print(response.status)
self.proxy_dict[request.meta['proxy']] = 1000.0
return Request(request.url)
print(request.meta.get('download_latency'))
self.proxy_dict[request.meta['proxy']] = \
request.meta['download_latency']
return response
def process_exception(self, request, exception, spider):
print('hello ' + str(exception))
del self.proxy_dict[request.meta['proxy']]
return Request(request.url)
| 33.675 | 68 | 0.628805 |
4e4f6066370e464ab1e78fbf8943f1076749d79d | 5,846 | py | Python | airflow/contrib/auth/backends/google_auth.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 8 | 2017-04-20T16:15:44.000Z | 2020-10-11T13:44:10.000Z | airflow/contrib/auth/backends/google_auth.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 219 | 2017-03-15T18:40:16.000Z | 2022-02-28T22:52:43.000Z | airflow/contrib/auth/backends/google_auth.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 3 | 2016-07-14T21:51:10.000Z | 2020-10-12T13:26:36.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import flask_login
from flask import redirect, request, url_for
# Need to expose these downstream
# flake8: noqa: F401
from flask_login import current_user, login_required, login_user, logout_user
from flask_oauthlib.client import OAuth
from airflow import models
from airflow.configuration import conf
from airflow.utils.session import provide_session
log = logging.getLogger(__name__)
def get_config_param(param):
return str(conf.get('google', param))
class GoogleUser(models.User):
def __init__(self, user):
self.user = user
@property
def is_active(self):
"""Required by flask_login"""
return True
@property
def is_authenticated(self):
"""Required by flask_login"""
return True
@property
def is_anonymous(self):
"""Required by flask_login"""
return False
def get_id(self):
"""Returns the current user id as required by flask_login"""
return self.user.get_id()
def data_profiling(self):
"""Provides access to data profiling tools"""
return True
def is_superuser(self):
"""Access all the things"""
return True
class AuthenticationError(Exception):
pass
class GoogleAuthBackend:
def __init__(self):
# self.google_host = get_config_param('host')
self.login_manager = flask_login.LoginManager()
self.login_manager.login_view = 'airflow.login'
self.flask_app = None
self.google_oauth = None
self.api_rev = None
def init_app(self, flask_app):
self.flask_app = flask_app
self.login_manager.init_app(self.flask_app)
self.google_oauth = OAuth(self.flask_app).remote_app(
'google',
consumer_key=get_config_param('client_id'),
consumer_secret=get_config_param('client_secret'),
request_token_params={'scope': [
'https://www.googleapis.com/auth/userinfo.profile',
'https://www.googleapis.com/auth/userinfo.email']},
base_url='https://www.google.com/accounts/',
request_token_url=None,
access_token_method='POST',
access_token_url='https://accounts.google.com/o/oauth2/token',
authorize_url='https://accounts.google.com/o/oauth2/auth')
self.login_manager.user_loader(self.load_user)
self.flask_app.add_url_rule(get_config_param('oauth_callback_route'),
'google_oauth_callback',
self.oauth_callback)
def login(self, request):
log.debug('Redirecting user to Google login')
return self.google_oauth.authorize(callback=url_for(
'google_oauth_callback',
_external=True),
state=request.args.get('next') or request.referrer or None)
def get_google_user_profile_info(self, google_token):
resp = self.google_oauth.get(
'https://www.googleapis.com/oauth2/v1/userinfo',
token=(google_token, ''))
if not resp or resp.status != 200:
raise AuthenticationError(
'Failed to fetch user profile, status ({0})'.format(
resp.status if resp else 'None'))
return resp.data['name'], resp.data['email']
def domain_check(self, email):
domain = email.split('@')[1]
domains = get_config_param('domain').split(',')
if domain in domains:
return True
return False
@provide_session
def load_user(self, userid, session=None):
if not userid or userid == 'None':
return None
user = session.query(models.User).filter(
models.User.id == int(userid)).first()
return GoogleUser(user)
@provide_session
def oauth_callback(self, session=None):
log.debug('Google OAuth callback called')
next_url = request.args.get('state') or url_for('admin.index')
resp = self.google_oauth.authorized_response()
try:
if resp is None:
raise AuthenticationError(
'Null response from Google, denying access.'
)
google_token = resp['access_token']
username, email = self.get_google_user_profile_info(google_token)
if not self.domain_check(email):
return redirect(url_for('airflow.noaccess'))
except AuthenticationError:
return redirect(url_for('airflow.noaccess'))
user = session.query(models.User).filter(
models.User.username == username).first()
if not user:
user = models.User(
username=username,
email=email,
is_superuser=False)
session.merge(user)
session.commit()
login_user(GoogleUser(user))
session.commit()
return redirect(next_url)
login_manager = GoogleAuthBackend()
def login(self, request):
return login_manager.login(request)
| 30.768421 | 77 | 0.641977 |
93a51ffd5441e114f95221cbcc716a3380fe3c4f | 185 | py | Python | harshad_no.py | Akshara2820/Python_WhileLoop | d525b547bc8c8236cb2cd1881080ec4e6604fffc | [
"MIT"
] | 1 | 2021-09-15T03:42:15.000Z | 2021-09-15T03:42:15.000Z | harshad_no.py | Akshara2820/Python_WhileLoop | d525b547bc8c8236cb2cd1881080ec4e6604fffc | [
"MIT"
] | null | null | null | harshad_no.py | Akshara2820/Python_WhileLoop | d525b547bc8c8236cb2cd1881080ec4e6604fffc | [
"MIT"
] | null | null | null | num=int(input("enter no."))
sum=0
num1=num
while num>0:
rem=num%10
sum=sum+rem
num//=10
if num1%sum==0:
print("harshad no.",num1)
else:
print("not harshad no.",num1) | 16.818182 | 33 | 0.610811 |
9b48ef99dc531e07244a4d31dcc140f38fd34092 | 616 | py | Python | ebl/bibliography/domain/reference.py | ElectronicBabylonianLiterature/dictionary | 5977a57314cf57f94f75cd12520f178b1d6a6555 | [
"MIT"
] | 4 | 2020-04-12T14:24:51.000Z | 2020-10-15T15:48:15.000Z | ebl/bibliography/domain/reference.py | ElectronicBabylonianLiterature/dictionary | 5977a57314cf57f94f75cd12520f178b1d6a6555 | [
"MIT"
] | 200 | 2019-12-04T09:53:20.000Z | 2022-03-30T20:11:31.000Z | ebl/bibliography/domain/reference.py | ElectronicBabylonianLiterature/dictionary | 5977a57314cf57f94f75cd12520f178b1d6a6555 | [
"MIT"
] | 1 | 2021-09-06T16:22:39.000Z | 2021-09-06T16:22:39.000Z | from enum import Enum, auto
from typing import NewType, Optional, Sequence
import attr
BibliographyId = NewType("BibliographyId", str)
class ReferenceType(Enum):
EDITION = auto()
DISCUSSION = auto()
COPY = auto()
PHOTO = auto()
TRANSLATION = auto()
@attr.s(auto_attribs=True, frozen=True)
class Reference:
id: BibliographyId
type: ReferenceType
pages: str = ""
notes: str = ""
lines_cited: Sequence[str] = tuple()
document: Optional[dict] = None
def set_document(self, new_document: dict) -> "Reference":
return attr.evolve(self, document=new_document)
| 22 | 62 | 0.676948 |
023d94fdfca9ee90bbede6959eac50f0c017e7aa | 6,579 | py | Python | src/network.py | anthonywittemann/neural-networks-and-deep-learning-implementations | 08eea066662f65264ac99711dfbedd8f5fde12b7 | [
"Unlicense"
] | null | null | null | src/network.py | anthonywittemann/neural-networks-and-deep-learning-implementations | 08eea066662f65264ac99711dfbedd8f5fde12b7 | [
"Unlicense"
] | null | null | null | src/network.py | anthonywittemann/neural-networks-and-deep-learning-implementations | 08eea066662f65264ac99711dfbedd8f5fde12b7 | [
"Unlicense"
] | null | null | null | """
network.py
~~~~~~~~~~
A module to implement the stochastic gradient descent learning
algorithm for a feedforward neural network. Gradients are calculated
using backpropagation. Note that I have focused on making the code
simple, easily readable, and easily modifiable. It is not optimized,
and omits many desirable features.
"""
#### Libraries
# Standard library
import random
# Third-party libraries
import numpy as np
class Network():
def __init__(self, sizes):
"""The list ``sizes`` contains the number of neurons in the
respective layers of the network. For example, if the list
was [2, 3, 1] then it would be a three-layer network, with the
first layer containing 2 neurons, the second layer 3 neurons,
and the third layer 1 neuron. The biases and weights for the
network are initialized randomly, using a Gaussian
distribution with mean 0, and variance 1. Note that the first
layer is assumed to be an input layer, and by convention we
won't set any biases for those neurons, since biases are only
ever used in computing the outputs from later layers."""
self.num_layers = len(sizes)
self.sizes = sizes
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
self.weights = [np.random.randn(y, x)
for x, y in zip(sizes[:-1], sizes[1:])]
def feedforward(self, a):
"""Return the output of the network if ``a`` is input."""
for b, w in zip(self.biases, self.weights):
a = sigmoid_vec(np.dot(w, a)+b)
return a
def SGD(self, training_data, epochs, mini_batch_size, eta,
test_data=None):
"""Train the neural network using mini-batch stochastic
gradient descent. The ``training_data`` is a list of tuples
``(x, y)`` representing the training inputs and the desired
outputs. The other non-optional parameters are
self-explanatory. If ``test_data`` is provided then the
network will be evaluated against the test data after each
epoch, and partial progress printed out. This is useful for
tracking progress, but slows things down substantially."""
if test_data: n_test = len(test_data)
n = len(training_data)
for j in xrange(epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k+mini_batch_size]
for k in xrange(0, n, mini_batch_size)]
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
if test_data:
print "Epoch {0}: {1} / {2}".format(
j, self.evaluate(test_data), n_test)
else:
print "Epoch {0} complete".format(j)
def update_mini_batch(self, mini_batch, eta):
"""Update the network's weights and biases by applying
gradient descent using backpropagation to a single mini batch.
The ``mini_batch`` is a list of tuples ``(x, y)``, and ``eta``
is the learning rate."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [w-(eta/len(mini_batch))*nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb
for b, nb in zip(self.biases, nabla_b)]
def backprop(self, x, y):
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
gradient for the cost function C_x. ``nabla_b`` and
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
to ``self.biases`` and ``self.weights``."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid_vec(z)
activations.append(activation)
# backward pass - aka backpropigation
delta = self.cost_derivative(activations[-1], y) * \
sigmoid_prime_vec(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
# Note that the variable l in the loop below is used a little
# differently to the notation in Chapter 2 of the book. Here,
# l = 1 means the last layer of neurons, l = 2 is the
# second-last layer, and so on. It's a renumbering of the
# scheme in the book, used here to take advantage of the fact
# that Python can use negative indices in lists.
for l in xrange(2, self.num_layers):
z = zs[-l]
spv = sigmoid_prime_vec(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * spv
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def evaluate(self, test_data):
"""Return the number of test inputs for which the neural
network outputs the correct result. Note that the neural
network's output is assumed to be the index of whichever
neuron in the final layer has the highest activation."""
test_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def cost_derivative(self, output_activations, y):
"""Return the vector of partial derivatives \partial C_x /
\partial a for the output activations."""
return (output_activations-y)
#### Miscellaneous functions
def sigmoid(z):
"""The sigmoid function."""
return 1.0/(1.0+np.exp(-z))
sigmoid_vec = np.vectorize(sigmoid)
def sigmoid_prime(z):
"""Derivative of the sigmoid function."""
return sigmoid(z)*(1-sigmoid(z))
sigmoid_prime_vec = np.vectorize(sigmoid_prime)
| 44.452703 | 78 | 0.604651 |
8714757e73312c9de899806062da802daebaa3cc | 4,428 | py | Python | modules/setup.py | MrSpaar/Dpy-PolyBot | 7eb46ddd6fc63f46365a2c7b1cf6eb623b30877d | [
"BSD-3-Clause"
] | 1 | 2021-06-29T23:47:48.000Z | 2021-06-29T23:47:48.000Z | modules/setup.py | MrSpaar/polybot | 7eb46ddd6fc63f46365a2c7b1cf6eb623b30877d | [
"BSD-3-Clause"
] | null | null | null | modules/setup.py | MrSpaar/polybot | 7eb46ddd6fc63f46365a2c7b1cf6eb623b30877d | [
"BSD-3-Clause"
] | 1 | 2022-01-16T21:41:24.000Z | 2022-01-16T21:41:24.000Z | from discord import Role, TextChannel, Embed, Guild, Member
from discord.ext.commands import Context
from discord.ext import commands
from discord.utils import get
from typing import Union
from core.cls import Bot
from os import listdir
class SetupCommands(commands.Cog, name='Configuration', description='admin'):
def __init__(self, bot: Bot):
self.bot = bot
@commands.command(
name='set',
brief='channel #🧙-polybot',
usage='<mute, logs ou channel> <@role ou #channel>',
description='Modifier les paramètres du bot'
)
@commands.guild_only()
@commands.has_permissions(administrator=True)
async def _set(self, ctx: Context, key: str, value: Union[Role, TextChannel], *, message=None):
settings = {
'mute': 'Rôle des muets',
'logs': 'Channel de logs',
'channel': 'Channel du bot',
'role': 'Rôle des nouveaux membres',
'welcome': 'Message de bienvenu'
}
if key not in settings:
embed = Embed(color=0xe74c3c, description=f"❌ Catégorie invalide : {', '.join(settings.keys())}")
return await ctx.send(embed=embed)
if key == 'welcome':
await self.bot.db.setup.update({'_id': ctx.guild.id}, {'$set': {key: {'id': value.id, 'txt': message}}})
else:
await self.bot.db.setup.update({'_id': ctx.guild.id}, {'$set': {key: value.id}})
embed = Embed(color=0x2ecc71, description=f"{settings[key]} modifié ({value.mention})")
await ctx.send(embed=embed)
@commands.command(
brief='',
usage='',
description='Afficher les paramètres du bot'
)
@commands.guild_only()
@commands.has_permissions(administrator=True)
async def settings(self, ctx: Context):
settings = await self.bot.db.setup.find({'_id': ctx.guild.id})
channel = getattr(get(ctx.guild.text_channels, id=settings['channel']), 'mention', 'pas défini')
logs = getattr(get(ctx.guild.text_channels, id=settings['logs']), 'mention', 'pas défini')
mute = getattr(get(ctx.guild.roles, id=settings['mute']), 'mention', 'pas défini')
embed = Embed(color=0x3498db, description=f"💬 Bot : {channel}\n📟 Logs : {logs}\n🔇 Mute : {mute}")
await ctx.send(embed=embed)
@commands.command()
@commands.is_owner()
async def reload(self, ctx: Context):
for file in listdir('modules'):
if file != '__pycache__' and not (file in ['errors.py', 'logs.py'] and self.bot.debug):
self.bot.reload_extension(f'modules.{file[:-3]}')
embed = Embed(color=0x2ecc71, description='✅ Tous les modules ont été relancé')
await ctx.send(embed=embed)
@commands.Cog.listener()
async def on_guild_join(self, guild):
await self.bot.db.setup.insert({'_id': guild.id, 'mute': None, 'logs': None, 'channel': None, 'new': []})
for member in filter(lambda m: not m.bot, guild.members):
await self.bot.db.members.update({'_id': member.id}, {'$addToSet': {'guilds': {'id': guild.id, 'level': 0, 'xp':0}}}, True)
await guild.owner.send("Merci beaucoup de m'avoir ajouté 👍" +
"\n\nPour certaines de mes commandes, quelques réglages sont nécessaires :" +
"\n • `!set channel <#channel>` pour indiquer au bot ou faire les annonces de level up" +
"\n • `!set logs <#channel>` pour indiquer au bot où envoyer les messages de logs" +
"\n\nCes **commandes sont à faire sur ton serveur**, pas ici, en privé ⚠️")
@commands.Cog.listener()
async def on_guild_remove(self, guild: Guild):
await self.bot.db.setup.delete({'_id': guild.id})
await self.bot.db.members.collection.update_many({'_id': {'$in': [member.id for member in guild.members]}}, {'$pull': {'guilds': {'id': guild.id}}})
@commands.Cog.listener()
async def on_member_join(self, member: Member):
await self.bot.db.members.update({'_id': member.id}, {'$addToSet': {'guilds': {'id': member.guild.id, 'level': 0, 'xp': 0}}}, True)
@commands.Cog.listener()
async def on_member_remove(self, member: Member):
await self.bot.db.members.update({'_id': member.id}, {'$pull': {'guilds': {'id': member.guild.id}}})
def setup(bot):
bot.add_cog(SetupCommands(bot))
| 44.727273 | 156 | 0.607724 |
f555a95cf8cc88b32120c07434fa96dce7ed8ac0 | 91 | py | Python | openbook/apps.py | VisheshJain112/universe | 699968263b632edee59d481c66756950c2054f0e | [
"MIT"
] | null | null | null | openbook/apps.py | VisheshJain112/universe | 699968263b632edee59d481c66756950c2054f0e | [
"MIT"
] | null | null | null | openbook/apps.py | VisheshJain112/universe | 699968263b632edee59d481c66756950c2054f0e | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class OpenbookConfig(AppConfig):
name = 'openbook'
| 15.166667 | 33 | 0.758242 |
cc8b284b9740aa740c0040c03703475fd50309da | 5,646 | py | Python | homeassistant/components/notify/telegram.py | smilepc/Home-assistant | db3bfad0b5e0815ba1e255d4d646af7c99caef8b | [
"MIT"
] | null | null | null | homeassistant/components/notify/telegram.py | smilepc/Home-assistant | db3bfad0b5e0815ba1e255d4d646af7c99caef8b | [
"MIT"
] | null | null | null | homeassistant/components/notify/telegram.py | smilepc/Home-assistant | db3bfad0b5e0815ba1e255d4d646af7c99caef8b | [
"MIT"
] | null | null | null | """
Telegram platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.telegram/
"""
import io
import logging
import urllib
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (
ATTR_TITLE, ATTR_DATA, PLATFORM_SCHEMA, BaseNotificationService)
from homeassistant.const import (CONF_API_KEY, ATTR_LOCATION, ATTR_LATITUDE,
ATTR_LONGITUDE)
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['python-telegram-bot==5.0.0']
ATTR_PHOTO = "photo"
ATTR_DOCUMENT = "document"
ATTR_CAPTION = "caption"
ATTR_URL = 'url'
ATTR_FILE = 'file'
ATTR_USERNAME = 'username'
ATTR_PASSWORD = 'password'
CONF_CHAT_ID = 'chat_id'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_CHAT_ID): cv.string,
})
def get_service(hass, config):
"""Get the Telegram notification service."""
import telegram
try:
chat_id = config.get(CONF_CHAT_ID)
api_key = config.get(CONF_API_KEY)
bot = telegram.Bot(token=api_key)
username = bot.getMe()['username']
_LOGGER.info("Telegram bot is '%s'.", username)
except urllib.error.HTTPError:
_LOGGER.error("Please check your access token.")
return None
return TelegramNotificationService(api_key, chat_id)
def load_data(url=None, file=None, username=None, password=None):
"""Load photo/document into ByteIO/File container from a source."""
try:
if url is not None:
# load photo from url
if username is not None and password is not None:
req = requests.get(url, auth=(username, password), timeout=15)
else:
req = requests.get(url, timeout=15)
return io.BytesIO(req.content)
elif file is not None:
# load photo from file
return open(file, "rb")
else:
_LOGGER.warning("Can't load photo no photo found in params!")
except (OSError, IOError, requests.exceptions.RequestException):
_LOGGER.error("Can't load photo into ByteIO")
return None
# pylint: disable=too-few-public-methods
class TelegramNotificationService(BaseNotificationService):
"""Implement the notification service for Telegram."""
def __init__(self, api_key, chat_id):
"""Initialize the service."""
import telegram
self._api_key = api_key
self._chat_id = chat_id
self.bot = telegram.Bot(token=self._api_key)
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
import telegram
title = kwargs.get(ATTR_TITLE)
data = kwargs.get(ATTR_DATA)
# exists data for send a photo/location
if data is not None and ATTR_PHOTO in data:
photos = data.get(ATTR_PHOTO, None)
photos = photos if isinstance(photos, list) else [photos]
for photo_data in photos:
self.send_photo(photo_data)
return
elif data is not None and ATTR_LOCATION in data:
return self.send_location(data.get(ATTR_LOCATION))
elif data is not None and ATTR_DOCUMENT in data:
return self.send_document(data.get(ATTR_DOCUMENT))
if title:
text = '{} {}'.format(title, message)
else:
text = message
parse_mode = telegram.parsemode.ParseMode.MARKDOWN
# send message
try:
self.bot.sendMessage(chat_id=self._chat_id,
text=text,
parse_mode=parse_mode)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending message.")
return
def send_photo(self, data):
"""Send a photo."""
import telegram
caption = data.get(ATTR_CAPTION)
# send photo
try:
photo = load_data(
url=data.get(ATTR_URL),
file=data.get(ATTR_FILE),
username=data.get(ATTR_USERNAME),
password=data.get(ATTR_PASSWORD),
)
self.bot.sendPhoto(chat_id=self._chat_id,
photo=photo, caption=caption)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending photo.")
return
def send_document(self, data):
"""Send a document."""
import telegram
caption = data.get(ATTR_CAPTION)
# send photo
try:
document = load_data(
url=data.get(ATTR_URL),
file=data.get(ATTR_FILE),
username=data.get(ATTR_USERNAME),
password=data.get(ATTR_PASSWORD),
)
self.bot.sendDocument(chat_id=self._chat_id,
document=document, caption=caption)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending document.")
return
def send_location(self, gps):
"""Send a location."""
import telegram
latitude = float(gps.get(ATTR_LATITUDE, 0.0))
longitude = float(gps.get(ATTR_LONGITUDE, 0.0))
# send location
try:
self.bot.sendLocation(chat_id=self._chat_id,
latitude=latitude, longitude=longitude)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending location.")
return
| 31.541899 | 78 | 0.611406 |
7da7cc5b87eb62202adf4b658706461fcda03fb6 | 25,084 | py | Python | main.py | aerfalwl/xiaomisport | c50b183ba8122258aea4f43e31b280f424446ad4 | [
"Apache-2.0"
] | null | null | null | main.py | aerfalwl/xiaomisport | c50b183ba8122258aea4f43e31b280f424446ad4 | [
"Apache-2.0"
] | null | null | null | main.py | aerfalwl/xiaomisport | c50b183ba8122258aea4f43e31b280f424446ad4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf8 -*-
# python >=3.8
import requests,time,re,json,random
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
headers = {
'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 9; MI 6 MIUI/20.6.18)'
}
#获取登录code
def get_code(location):
code_pattern = re.compile("(?<=access=).*?(?=&)")
code = code_pattern.findall(location)[0]
return code
#登录
def login(user,password):
url1 = "https://api-user.huami.com/registrations/+86" + user + "/tokens"
headers = {
"Content-Type":"application/x-www-form-urlencoded;charset=UTF-8",
"User-Agent":"MiFit/4.6.0 (iPhone; iOS 14.0.1; Scale/2.00)"
}
data1 = {
"client_id":"HuaMi",
"password":f"{password}",
"redirect_uri":"https://s3-us-west-2.amazonaws.com/hm-registration/successsignin.html",
"token":"access"
}
r1 = requests.post(url1,data=data1,headers=headers,allow_redirects=False)
try:
location = r1.headers["Location"]
code = get_code(location)
except:
return 0,0
#print("access_code获取成功!")
#print(code)
url2 = "https://account.huami.com/v2/client/login"
data2 = {
"app_name":"com.xiaomi.hm.health",
"app_version":"4.6.0",
"code":f"{code}",
"country_code":"CN",
"device_id":"2C8B4939-0CCD-4E94-8CBA-CB8EA6E613A1",
"device_model":"phone",
"grant_type":"access_token",
"third_name":"huami_phone",
}
r2 = requests.post(url2,data=data2,headers=headers).json()
login_token = r2["token_info"]["login_token"]
#print("login_token获取成功!")
#print(login_token)
userid = r2["token_info"]["user_id"]
#print("userid获取成功!")
#print(userid)
return login_token,userid
#主函数
def main(user, passwd, step):
user = str(user)
password = str(passwd)
step = str(step)
if user == '' or password == '':
print ("用户名或密码不能为空!")
return "user and passwd not empty!"
if step == '':
print ("已设置为随机步数(24000-25000)")
step = str(random.randint(24000,25000))
login_token = 0
login_token,userid = login(user,password)
if login_token == 0:
print("登陆失败!")
return "login fail!"
t = get_time()
app_token = get_app_token(login_token)
today = time.strftime("%F")
data_json = '%5B%7B%22data_hr%22%3A%22%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F9L%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FVv%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F0v%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F9e%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F0n%5C%2Fa%5C%2F%5C%2F%5C%2FS%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F0b%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F1FK%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FR%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F9PTFFpaf9L%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FR%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F0j%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F9K%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FOv%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2Fzf%5C%2F%5C%2F%5C%2F86%5C%2Fzr%5C%2FOv88%5C%2Fzf%5C%2FPf%5C%2F%5C%2F%5C%2F0v%5C%2FS%5C%2F8%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FSf%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2Fz3%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F0r%5C%2FOv%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FS%5C%2F9L%5C%2Fzb%5C%2FSf9K%5C%2F0v%5C%2FRf9H%5C%2Fzj%5C%2FSf9K%5C%2F0%5C%2F%5C%2FN%5C%2F%5C%2F%5C%2F%5C%2F0D%5C%2FSf83%5C%2Fzr%5C%2FPf9M%5C%2F0v%5C%2FOv9e%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FS%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2Fzv%5C%2F%5C%2Fz7%5C%2FO%5C%2F83%5C%2Fzv%5C%2FN%5C%2F83%5C%2Fzr%5C%2FN%5C%2F86%5C%2Fz%5C%2F%5C%2FNv83%5C%2Fzn%5C%2FXv84%5C%2Fzr%5C%2FPP84%5C%2Fzj%5C%2FN%5C%2F9e%5C%2Fzr%5C%2FN%5C%2F89%5C%2F03%5C%2FP%5C%2F89%5C%2Fz3%5C%2FQ%5C%2F9N%5C%2F0v%5C%2FTv9C%5C%2F0H%5C%2FOf9D%5C%2Fzz%5C%2FOf88%5C%2Fz%5C%2F%5C%2FPP9A%5C%2Fzr%5C%2FN%5C%2F86%5C%2Fzz%5C%2FNv87%5C%2F0D%5C%2FOv84%5C%2F0v%5C%2FO%5C%2F84%5C%2Fzf%5C%2FMP83%5C%2FzH%5C%2FNv83%5C%2Fzf%5C%2FN%5C%2F84%5C%2Fzf%5C%2FOf82%5C%2Fzf%5C%2FOP83%5C%2Fzb%5C%2FMv81%5C%2FzX%5C%2FR%5C%2F9L%5C%2F0v%5C%2FO%5C%2F9I%5C%2F0T%5C%2FS%5C%2F9A%5C%2Fzn%5C%2FPf89%5C%2Fzn%5C%2FNf9K%5C%2F07%5C%2FN%5C%2F83%5C%2Fzn%5C%2FNv83%5C%2Fzv%5C%2FO%5C%2F9A%5C%2F0H%5C%2FOf8%5C%2F%5C%2Fzj%5C%2FPP83%5C%2Fzj%5C%2FS%5C%2F87%5C%2Fzj%5C%2FNv84%5C%2Fzf%5C%2FOf83%5C%2Fzf%5C%2FOf83%5C%2Fzb%5C%2FNv9L%5C%2Fzj%5C%2FNv82%5C%2Fzb%5C%2FN%5C%2F85%5C%2Fzf%5C%2FN%5C%2F9J%5C%2Fzf%5C%2FNv83%5C%2Fzj%5C%2FNv84%5C%2F0r%5C%2FSv83%5C%2Fzf%5C%2FMP%5C%2F%5C%2F%5C%2Fzb%5C%2FMv82%5C%2Fzb%5C%2FOf85%5C%2Fz7%5C%2FNv8%5C%2F%5C%2F0r%5C%2FS%5C%2F85%5C%2F0H%5C%2FQP9B%5C%2F0D%5C%2FNf89%5C%2Fzj%5C%2FOv83%5C%2Fzv%5C%2FNv8%5C%2F%5C%2F0f%5C%2FSv9O%5C%2F0ZeXv%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F1X%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F9B%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FTP%5C%2F%5C%2F%5C%2F1b%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F0%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F9N%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%22%2C%22date%22%3A%222021-08-07%22%2C%22data%22%3A%5B%7B%22start%22%3A0%2C%22stop%22%3A1439%2C%22value%22%3A%22UA8AUBQAUAwAUBoAUAEAYCcAUBkAUB4AUBgAUCAAUAEAUBkAUAwAYAsAYB8AYB0AYBgAYCoAYBgAYB4AUCcAUBsAUB8AUBwAUBIAYBkAYB8AUBoAUBMAUCEAUCIAYBYAUBwAUCAAUBgAUCAAUBcAYBsAYCUAATIPYD0KECQAYDMAYB0AYAsAYCAAYDwAYCIAYB0AYBcAYCQAYB0AYBAAYCMAYAoAYCIAYCEAYCYAYBsAYBUAYAYAYCIAYCMAUB0AUCAAUBYAUCoAUBEAUC8AUB0AUBYAUDMAUDoAUBkAUC0AUBQAUBwAUA0AUBsAUAoAUCEAUBYAUAwAUB4AUAwAUCcAUCYAUCwKYDUAAUUlEC8IYEMAYEgAYDoAYBAAUAMAUBkAWgAAWgAAWgAAWgAAWgAAUAgAWgAAUBAAUAQAUA4AUA8AUAkAUAIAUAYAUAcAUAIAWgAAUAQAUAkAUAEAUBkAUCUAWgAAUAYAUBEAWgAAUBYAWgAAUAYAWgAAWgAAWgAAWgAAUBcAUAcAWgAAUBUAUAoAUAIAWgAAUAQAUAYAUCgAWgAAUAgAWgAAWgAAUAwAWwAAXCMAUBQAWwAAUAIAWgAAWgAAWgAAWgAAWgAAWgAAWgAAWgAAWREAWQIAUAMAWSEAUDoAUDIAUB8AUCEAUC4AXB4AUA4AWgAAUBIAUA8AUBAAUCUAUCIAUAMAUAEAUAsAUAMAUCwAUBYAWgAAWgAAWgAAWgAAWgAAWgAAUAYAWgAAWgAAWgAAUAYAWwAAWgAAUAYAXAQAUAMAUBsAUBcAUCAAWwAAWgAAWgAAWgAAWgAAUBgAUB4AWgAAUAcAUAwAWQIAWQkAUAEAUAIAWgAAUAoAWgAAUAYAUB0AWgAAWgAAUAkAWgAAWSwAUBIAWgAAUC4AWSYAWgAAUAYAUAoAUAkAUAIAUAcAWgAAUAEAUBEAUBgAUBcAWRYAUA0AWSgAUB4AUDQAUBoAXA4AUA8AUBwAUA8AUA4AUA4AWgAAUAIAUCMAWgAAUCwAUBgAUAYAUAAAUAAAUAAAUAAAUAAAUAAAUAAAUAAAUAAAWwAAUAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAeSEAeQ8AcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcBcAcAAAcAAAcCYOcBUAUAAAUAAAUAAAUAAAUAUAUAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcCgAeQAAcAAAcAAAcAAAcAAAcAAAcAYAcAAAcBgAeQAAcAAAcAAAegAAegAAcAAAcAcAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcCkAeQAAcAcAcAAAcAAAcAwAcAAAcAAAcAIAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcCIAeQAAcAAAcAAAcAAAcAAAcAAAeRwAeQAAWgAAUAAAUAAAUAAAUAAAUAAAcAAAcAAAcBoAeScAeQAAegAAcBkAeQAAUAAAUAAAUAAAUAAAUAAAUAAAcAAAcAAAcAAAcAAAcAAAcAAAegAAegAAcAAAcAAAcBgAeQAAcAAAcAAAcAAAcAAAcAAAcAkAegAAegAAcAcAcAAAcAcAcAAAcAAAcAAAcAAAcA8AeQAAcAAAcAAAeRQAcAwAUAAAUAAAUAAAUAAAUAAAUAAAcAAAcBEAcA0AcAAAWQsAUAAAUAAAUAAAUAAAUAAAcAAAcAoAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAYAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcBYAegAAcAAAcAAAegAAcAcAcAAAcAAAcAAAcAAAcAAAeRkAegAAegAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAEAcAAAcAAAcAAAcAUAcAQAcAAAcBIAeQAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcBsAcAAAcAAAcBcAeQAAUAAAUAAAUAAAUAAAUAAAUBQAcBYAUAAAUAAAUAoAWRYAWTQAWQAAUAAAUAAAUAAAcAAAcAAAcAAAcAAAcAAAcAMAcAAAcAQAcAAAcAAAcAAAcDMAeSIAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcBQAeQwAcAAAcAAAcAAAcAMAcAAAeSoAcA8AcDMAcAYAeQoAcAwAcFQAcEMAeVIAaTYAbBcNYAsAYBIAYAIAYAIAYBUAYCwAYBMAYDYAYCkAYDcAUCoAUCcAUAUAUBAAWgAAYBoAYBcAYCgAUAMAUAYAUBYAUA4AUBgAUAgAUAgAUAsAUAsAUA4AUAMAUAYAUAQAUBIAASsSUDAAUDAAUBAAYAYAUBAAUAUAUCAAUBoAUCAAUBAAUAoAYAIAUAQAUAgAUCcAUAsAUCIAUCUAUAoAUA4AUB8AUBkAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAA%22%2C%22tz%22%3A32%2C%22did%22%3A%22DA932FFFFE8816E7%22%2C%22src%22%3A24%7D%5D%2C%22summary%22%3A%22%7B%5C%22v%5C%22%3A6%2C%5C%22slp%5C%22%3A%7B%5C%22st%5C%22%3A1628296479%2C%5C%22ed%5C%22%3A1628296479%2C%5C%22dp%5C%22%3A0%2C%5C%22lt%5C%22%3A0%2C%5C%22wk%5C%22%3A0%2C%5C%22usrSt%5C%22%3A-1440%2C%5C%22usrEd%5C%22%3A-1440%2C%5C%22wc%5C%22%3A0%2C%5C%22is%5C%22%3A0%2C%5C%22lb%5C%22%3A0%2C%5C%22to%5C%22%3A0%2C%5C%22dt%5C%22%3A0%2C%5C%22rhr%5C%22%3A0%2C%5C%22ss%5C%22%3A0%7D%2C%5C%22stp%5C%22%3A%7B%5C%22ttl%5C%22%3A18272%2C%5C%22dis%5C%22%3A10627%2C%5C%22cal%5C%22%3A510%2C%5C%22wk%5C%22%3A41%2C%5C%22rn%5C%22%3A50%2C%5C%22runDist%5C%22%3A7654%2C%5C%22runCal%5C%22%3A397%2C%5C%22stage%5C%22%3A%5B%7B%5C%22start%5C%22%3A327%2C%5C%22stop%5C%22%3A341%2C%5C%22mode%5C%22%3A1%2C%5C%22dis%5C%22%3A481%2C%5C%22cal%5C%22%3A13%2C%5C%22step%5C%22%3A680%7D%2C%7B%5C%22start%5C%22%3A342%2C%5C%22stop%5C%22%3A367%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A2295%2C%5C%22cal%5C%22%3A95%2C%5C%22step%5C%22%3A2874%7D%2C%7B%5C%22start%5C%22%3A368%2C%5C%22stop%5C%22%3A377%2C%5C%22mode%5C%22%3A4%2C%5C%22dis%5C%22%3A1592%2C%5C%22cal%5C%22%3A88%2C%5C%22step%5C%22%3A1664%7D%2C%7B%5C%22start%5C%22%3A378%2C%5C%22stop%5C%22%3A386%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A1072%2C%5C%22cal%5C%22%3A51%2C%5C%22step%5C%22%3A1245%7D%2C%7B%5C%22start%5C%22%3A387%2C%5C%22stop%5C%22%3A393%2C%5C%22mode%5C%22%3A4%2C%5C%22dis%5C%22%3A1036%2C%5C%22cal%5C%22%3A57%2C%5C%22step%5C%22%3A1124%7D%2C%7B%5C%22start%5C%22%3A394%2C%5C%22stop%5C%22%3A398%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A488%2C%5C%22cal%5C%22%3A19%2C%5C%22step%5C%22%3A607%7D%2C%7B%5C%22start%5C%22%3A399%2C%5C%22stop%5C%22%3A414%2C%5C%22mode%5C%22%3A4%2C%5C%22dis%5C%22%3A2220%2C%5C%22cal%5C%22%3A120%2C%5C%22step%5C%22%3A2371%7D%2C%7B%5C%22start%5C%22%3A415%2C%5C%22stop%5C%22%3A427%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A1268%2C%5C%22cal%5C%22%3A59%2C%5C%22step%5C%22%3A1489%7D%2C%7B%5C%22start%5C%22%3A428%2C%5C%22stop%5C%22%3A433%2C%5C%22mode%5C%22%3A1%2C%5C%22dis%5C%22%3A152%2C%5C%22cal%5C%22%3A4%2C%5C%22step%5C%22%3A238%7D%2C%7B%5C%22start%5C%22%3A434%2C%5C%22stop%5C%22%3A444%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A2295%2C%5C%22cal%5C%22%3A95%2C%5C%22step%5C%22%3A2874%7D%2C%7B%5C%22start%5C%22%3A445%2C%5C%22stop%5C%22%3A455%2C%5C%22mode%5C%22%3A4%2C%5C%22dis%5C%22%3A1592%2C%5C%22cal%5C%22%3A88%2C%5C%22step%5C%22%3A1664%7D%2C%7B%5C%22start%5C%22%3A456%2C%5C%22stop%5C%22%3A466%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A1072%2C%5C%22cal%5C%22%3A51%2C%5C%22step%5C%22%3A1245%7D%2C%7B%5C%22start%5C%22%3A467%2C%5C%22stop%5C%22%3A477%2C%5C%22mode%5C%22%3A4%2C%5C%22dis%5C%22%3A1036%2C%5C%22cal%5C%22%3A57%2C%5C%22step%5C%22%3A1124%7D%2C%7B%5C%22start%5C%22%3A478%2C%5C%22stop%5C%22%3A488%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A488%2C%5C%22cal%5C%22%3A19%2C%5C%22step%5C%22%3A607%7D%2C%7B%5C%22start%5C%22%3A489%2C%5C%22stop%5C%22%3A499%2C%5C%22mode%5C%22%3A4%2C%5C%22dis%5C%22%3A2220%2C%5C%22cal%5C%22%3A120%2C%5C%22step%5C%22%3A2371%7D%2C%7B%5C%22start%5C%22%3A500%2C%5C%22stop%5C%22%3A511%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A1268%2C%5C%22cal%5C%22%3A59%2C%5C%22step%5C%22%3A1489%7D%2C%7B%5C%22start%5C%22%3A512%2C%5C%22stop%5C%22%3A522%2C%5C%22mode%5C%22%3A1%2C%5C%22dis%5C%22%3A152%2C%5C%22cal%5C%22%3A4%2C%5C%22step%5C%22%3A238%7D%5D%7D%2C%5C%22goal%5C%22%3A8000%2C%5C%22tz%5C%22%3A%5C%2228800%5C%22%7D%22%2C%22source%22%3A24%2C%22type%22%3A0%7D%5D'
finddate = re.compile(r'.*?date%22%3A%22(.*?)%22%2C%22data.*?')
findstep = re.compile(r'.*?ttl%5C%22%3A(.*?)%2C%5C%22dis.*?')
data_json = re.sub(finddate.findall(data_json)[0], today, str(data_json))
data_json = re.sub(findstep.findall(data_json)[0], step, str(data_json))
url = f'https://api-mifit-cn.huami.com/v1/data/band_data.json?&t={t}'
head = {
"apptoken": app_token,
"Content-Type": "application/x-www-form-urlencoded"
}
data = f'userid={userid}&last_sync_data_time=1597306380&device_type=0&last_deviceid=DA932FFFFE8816E7&data_json={data_json}'
response = requests.post(url, data=data, headers=head).json()
#print(response)
result = f"{user[:4]}****{user[-4:]}: [{now}] 修改步数({step})"+ response['message']
print(result)
return result
#获取时间戳
def get_time():
url = 'http://api.m.taobao.com/rest/api3.do?api=mtop.common.getTimestamp'
response = requests.get(url,headers=headers).json()
t = response['data']['t']
return t
#获取app_token
def get_app_token(login_token):
url = f"https://account-cn.huami.com/v1/client/app_tokens?app_name=com.xiaomi.hm.health&dn=api-user.huami.com%2Capi-mifit.huami.com%2Capp-analytics.huami.com&login_token={login_token}"
response = requests.get(url,headers=headers).json()
app_token = response['token_info']['app_token']
#print("app_token获取成功!")
#print(app_token)
return app_token
# 推送server酱
def push_wx(sckey, desp=""):
"""
推送消息到微信
"""
if sckey == '':
print("[注意] 未提供sckey,不进行推送!")
else:
server_url = f"https://sc.ftqq.com/{sckey}.send"
params = {
"text": '小米运动 步数修改',
"desp": desp
}
response = requests.get(server_url, params=params)
json_data = response.json()
if json_data['errno'] == 0:
print(f"[{now}] 推送成功。")
else:
print(f"[{now}] 推送失败:{json_data['errno']}({json_data['errmsg']})")
# 推送server
def push_server(sckey, desp=""):
"""
推送消息到微信
"""
if sckey == '':
print("[注意] 未提供sckey,不进行微信推送!")
else:
server_url = f"https://sctapi.ftqq.com/{sckey}.send"
params = {
"title": '小米运动 步数修改',
"desp": desp
}
response = requests.get(server_url, params=params)
json_data = response.json()
if json_data['code'] == 0:
print(f"[{now}] 推送成功。")
else:
print(f"[{now}] 推送失败:{json_data['code']}({json_data['message']})")
# 推送pushplus
def push_pushplus(token, content=""):
"""
推送消息到pushplus
"""
if token == '':
print("[注意] 未提供token,不进行pushplus推送!")
else:
server_url = f"http://www.pushplus.plus/send"
params = {
"token": token,
"title": '小米运动 步数修改',
"content": content
}
response = requests.get(server_url, params=params)
json_data = response.json()
if json_data['code'] == 200:
print(f"[{now}] 推送成功。")
else:
print(f"[{now}] 推送失败:{json_data['code']}({json_data['message']})")
# 推送tg
def push_tg(token, chat_id, desp=""):
"""
推送消息到TG
"""
if token == '':
print("[注意] 未提供token,不进行tg推送!")
elif chat_id == '':
print("[注意] 未提供chat_id,不进行tg推送!")
else:
server_url = f"https://api.telegram.org/bot{token}/sendmessage"
params = {
"text": '小米运动 步数修改\n\n' + desp,
"chat_id": chat_id
}
response = requests.get(server_url, params=params)
json_data = response.json()
if json_data['ok'] == True:
print(f"[{now}] 推送成功。")
else:
print(f"[{now}] 推送失败:{json_data['error_code']}({json_data['description']})")
# 企业微信推送
def wxpush(msg, usr, corpid, corpsecret, agentid=1000002):
base_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?'
req_url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token='
corpid = corpid
corpsecret = corpsecret
agentid = agentid
if agentid == 0:
agentid = 1000002
#获取access_token,每次的access_token都不一样,所以需要运行一次请求一次
def get_access_token(base_url, corpid, corpsecret):
urls = base_url + 'corpid=' + corpid + '&corpsecret=' + corpsecret
resp = requests.get(urls).json()
access_token = resp['access_token']
return access_token
def send_message(msg, usr):
data = get_message(msg, usr)
req_urls = req_url + get_access_token(base_url, corpid, corpsecret)
res = requests.post(url=req_urls, data=data)
ret = res.json()
if ret["errcode"] == 0:
print(f"[{now}] 企业微信推送成功")
else:
print(f"[{now}] 推送失败:{ret['errcode']} 错误信息:{ret['errmsg']}")
def get_message(msg, usr):
data = {
"touser": usr,
"toparty": "@all",
"totag": "@all",
"msgtype": "text",
"agentid": agentid,
"text": {
"content": msg
},
"safe": 0,
"enable_id_trans": 0,
"enable_duplicate_check": 0,
"duplicate_check_interval": 1800
}
data = json.dumps(data)
return data
msg = msg
usr = usr
if corpid == '':
print("[注意] 未提供corpid,不进行企业微信推送!")
elif corpsecret == '':
print("[注意] 未提供corpsecret,不进行企业微信推送!")
else:
send_message(msg, usr)
if __name__ == "__main__":
# Push Mode
Pm = input()
if Pm == 'wx' or Pm == 'nwx':
# ServerChan
sckey = input()
if str(sckey) == '0':
sckey = ''
elif Pm == 'tg':
token = input()
sl = token.split('@')
if len(sl) != 2:
print('tg推送参数有误!')
elif Pm == 'qwx':
token = input()
sl = token.split('-')
if len(sl) < 3:
print('企业微信推送参数有误!')
elif Pm == 'pp':
token = input()
if token == '':
print('pushplus token错误')
elif Pm == 'off':
input()
print('不推送')
else:
print('推送选项有误!')
exit(0)
# 用户名(格式为 13800138000)
user = input()
# 登录密码
passwd = input()
# 要修改的步数,直接输入想要修改的步数值,留空为随机步数
step = input().replace('[', '').replace(']', '')
user_list = user.split('#')
passwd_list = passwd.split('#')
setp_array = step.split('-')
if len(user_list) == len(passwd_list):
push = ''
for line in range(0,len(user_list)):
if len(setp_array) == 2:
step = str(random.randint(int(setp_array[0]),int(setp_array[1])))
print (f"已设置为随机步数({setp_array[0]}-{setp_array[1]})")
elif str(step) == '0':
step = ''
push += main(user_list[line], passwd_list[line], step) + '\n'
if Pm == 'wx':
push_wx(sckey, push)
elif Pm == 'nwx':
push_server(sckey, push)
elif Pm == 'tg':
push_tg(sl[0], sl[1], push)
elif Pm == 'qwx':
if len(sl) == 4:
wxpush(push, sl[0], sl[1], sl[2], int(sl[3]))
else:
wxpush(push, sl[0], sl[1], sl[2])
elif Pm == 'pp':
push_pushplus(token, push)
elif Pm == 'off':
pass
else:
print('用户名和密码数量不对')
| 75.554217 | 15,335 | 0.719981 |
c689adbe1bf89f016967df9dff654c75238b606f | 977 | py | Python | _lab/lab09/lab09_student_tests.py | ucsb-cs8/s20-ccs | 38ab65bcbb427264cf7817c57a591c8892912331 | [
"MIT"
] | null | null | null | _lab/lab09/lab09_student_tests.py | ucsb-cs8/s20-ccs | 38ab65bcbb427264cf7817c57a591c8892912331 | [
"MIT"
] | 2 | 2020-02-26T14:40:50.000Z | 2021-09-28T00:35:51.000Z | _lab/lab09/lab09_student_tests.py | ucsb-cs8/s20-ccs | 38ab65bcbb427264cf7817c57a591c8892912331 | [
"MIT"
] | null | null | null | # Student(s): (insert name here)
# Make sure to read the comments for each function.
import pytest
'''
You will write your own test cases (3 - 5 tests per function).
There's an example for each function to test:
'''
####################
from lab09 import recursiveDigitSum
def test_recursiveDigitSum_0():
assert recursiveDigitSum(9999) == 36
# Your tests for recursiveDigitSum...
####################
from lab09 import recursiveSubstring
def test_recursiveSubstring_0():
assert recursiveSubstring("CS8", "CS") == True
# Your tests for recursiveSubstring...
####################
from lab09 import recursiveReverseList
def test_recursiveReverseList_0():
assert recursiveReverseList([1,2,3]) == [3,2,1]
# Your tests for recursiveReverseList...
####################
from lab09 import recursiveAccumulateVowels
def test_recursiveAccumulateVowels_0():
assert recursiveAccumulateVowels("apple") == "ae"
# Your tests for recursiveAccumulateVowels...
| 23.829268 | 65 | 0.691914 |
0cf1b88b0884282fe09c5f8e528f2467260179ea | 835 | py | Python | sagemaker-serverless-huggingface-endpoint/app.py | philschmid/sample-huggingface-lambda-sagemaker-cdk | 96a107b8f3f11a3f3d63071854214da8cbb000ad | [
"Apache-2.0"
] | null | null | null | sagemaker-serverless-huggingface-endpoint/app.py | philschmid/sample-huggingface-lambda-sagemaker-cdk | 96a107b8f3f11a3f3d63071854214da8cbb000ad | [
"Apache-2.0"
] | null | null | null | sagemaker-serverless-huggingface-endpoint/app.py | philschmid/sample-huggingface-lambda-sagemaker-cdk | 96a107b8f3f11a3f3d63071854214da8cbb000ad | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import os
# For consistency with TypeScript code, `cdk` is the preferred import name for
# the CDK's core module. The following line also imports it as `core` for use
# with examples from the CDK Developer's Guide, which are in the process of
# being updated to use `cdk`. You may delete this import if you don't need it.
from aws_cdk import core as cdk
from huggingface_sagemaker.huggingface_stack import HuggingfaceSagemaker
# Environment
# CDK_DEFAULT_ACCOUNT and CDK_DEFAULT_REGION are set based on the
# AWS profile specified using the --profile option.
my_environment = cdk.Environment(account=os.environ["CDK_DEFAULT_ACCOUNT"], region=os.environ["CDK_DEFAULT_REGION"])
app = cdk.App()
sagemaker = HuggingfaceSagemaker(app, "HuggingfaceServerlessSagemakerEndpoint", env=my_environment)
app.synth()
| 37.954545 | 116 | 0.791617 |
54b001e50767bd7ffaec14084c60d2a01773ec83 | 43,039 | py | Python | keras_preprocessing/image/image_data_generator.py | jnphilipp/keras-preprocessing | 6f679b06d10d39edcb066142eec9e3bcd6d9de4b | [
"MIT"
] | null | null | null | keras_preprocessing/image/image_data_generator.py | jnphilipp/keras-preprocessing | 6f679b06d10d39edcb066142eec9e3bcd6d9de4b | [
"MIT"
] | null | null | null | keras_preprocessing/image/image_data_generator.py | jnphilipp/keras-preprocessing | 6f679b06d10d39edcb066142eec9e3bcd6d9de4b | [
"MIT"
] | null | null | null | """Utilities for real-time data augmentation on image data.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from six.moves import range
import numpy as np
try:
import scipy
# scipy.linalg cannot be accessed until explicitly imported
from scipy import linalg
# scipy.ndimage cannot be accessed until explicitly imported
except ImportError:
scipy = None
from .dataframe_iterator import DataFrameIterator
from .directory_iterator import DirectoryIterator
from .numpy_array_iterator import NumpyArrayIterator
from .affine_transformations import (apply_affine_transform,
apply_brightness_shift,
apply_channel_shift,
flip_axis)
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats
in the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats
in the interval [-1.0, +1.0).
brightness_range: Tuple or list of two floats. Range for picking
a brightness shift value from.
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(after applying all other transformations).
preprocessing_function: function that will be applied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
dtype: Dtype to use for the generated arrays.
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
Example of using ```.flow_from_dataframe(dataframe, directory,
x_col, y_col)```:
```python
train_df = pandas.read_csv("./train.csv")
valid_df = pandas.read_csv("./valid.csv")
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_dataframe(
dataframe=train_df,
directory='data/train',
x_col="filename",
y_col="class",
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_dataframe(
dataframe=valid_df,
directory='data/validation',
x_col="filename",
y_col="class",
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format='channels_last',
validation_split=0.0,
interpolation_order=1,
dtype='float32'):
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
self.dtype = dtype
self.interpolation_order = interpolation_order
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % (zoom_range,))
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
if brightness_range is not None:
if (not isinstance(brightness_range, (tuple, list)) or
len(brightness_range) != 2):
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % (brightness_range,))
self.brightness_range = brightness_range
def flow(self,
x,
y=None,
batch_size=32,
shuffle=True,
sample_weight=None,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None):
"""Takes data & label arrays, generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, in case
of RGB data, it should have value 3, and in case
of RGBA data, it should have value 4.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x,
y,
self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset
)
def flow_from_directory(self,
directory,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: string, path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](
https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
Whether the images will be converted to
have 1, 3, or 4 channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
If set to False, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory,
self,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation
)
def flow_from_dataframe(self,
dataframe,
directory=None,
x_col="filename",
y_col="class",
weight_col=None,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None,
interpolation='nearest',
drop_duplicates=True,
validate_filenames=True,
**kwargs):
"""Takes the dataframe and the path to a directory
and generates batches of augmented/normalized data.
**A simple tutorial can be found **[here](
http://bit.ly/keras_flow_from_dataframe).
# Arguments
dataframe: Pandas dataframe containing the filepaths relative to
`directory` (or absolute paths if `directory` is None) of the
images in a string column. It should include other column/s
depending on the `class_mode`:
- if `class_mode` is `"categorical"` (default value) it must
include the `y_col` column with the class/es of each image.
Values in column can be string/list/tuple if a single class
or list/tuple if multiple classes.
- if `class_mode` is `"binary"` or `"sparse"` it must include
the given `y_col` column with class values as strings.
- if `class_mode` is `"other"` it should contain the columns
specified in `y_col`.
- if `class_mode` is `"input"` or `None` no extra column is
needed.
directory: string, path to the directory to read images from. If `None`,
data in `x_col` column should be absolute paths.
x_col: string, column in `dataframe` that contains the filenames (or
absolute paths if `directory` is `None`).
y_col: string or list, column/s in `dataframe` that has the target data.
weight_col: string, column in `dataframe` that contains the sample
weights. Default: `None`.
target_size: tuple of integers `(height, width)`, default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: one of "grayscale", "rgb", "rgba". Default: "rgb".
Whether the images will be converted to have 1 or 3 color channels.
classes: optional list of classes (e.g. `['dogs', 'cats']`).
Default: None. If not provided, the list of classes will be
automatically inferred from the `y_col`,
which will map to the label indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: one of "categorical", "binary", "sparse", "input",
"other" or None. Default: "categorical".
Mode for yielding the targets:
- `"binary"`: 1D numpy array of binary labels,
- `"categorical"`: 2D numpy array of one-hot encoded labels.
Supports multi-label output.
- `"sparse"`: 1D numpy array of integer labels,
- `"input"`: images identical to input images (mainly used to
work with autoencoders),
- `"other"`: numpy array of `y_col` data,
- `None`, no targets are returned (the generator will only yield
batches of image data, which is useful to use in
`model.predict_generator()`).
batch_size: size of the batches of data (default: 32).
shuffle: whether to shuffle the data (default: True)
seed: optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: whether to follow symlinks inside class subdirectories
(default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`, and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed, `"box"` and
`"hamming"` are also supported. By default, `"nearest"` is used.
drop_duplicates: Boolean, whether to drop duplicate rows
based on filename.
validate_filenames: Boolean, whether to validate image filenames in
`x_col`. If `True`, invalid images will be ignored. Disabling this
option can lead to speed-up in the execution of this function.
Default: `True`.
# Returns
A `DataFrameIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
if 'has_ext' in kwargs:
warnings.warn('has_ext is deprecated, filenames in the dataframe have '
'to match the exact filenames in disk.',
DeprecationWarning)
if 'sort' in kwargs:
warnings.warn('sort is deprecated, batches will be created in the'
'same order than the filenames provided if shuffle'
'is set to False.')
return DataFrameIterator(
dataframe,
directory,
self,
x_col=x_col,
y_col=y_col,
weight_col=weight_col,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset,
interpolation=interpolation,
drop_duplicates=drop_duplicates,
validate_filenames=validate_filenames
)
def standardize(self, x):
"""Applies the normalization configuration in-place to a batch of inputs.
`x` is changed in-place since the function is mainly used internally
to standarize images and feed them to your network. If a copy of `x`
would be created instead it would have a significant performance cost.
If you want to apply this method without changing the input in-place
you can call the method creating a copy before:
standarize(np.copy(x))
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + 1e-6)
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + 1e-6)
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def get_random_transform(self, img_shape, seed=None):
"""Generates random parameters for a transformation.
# Arguments
seed: Random seed.
img_shape: Tuple of integers.
Shape of the image that is transformed.
# Returns
A dictionary containing randomly chosen parameters describing the
transformation.
"""
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
if seed is not None:
np.random.seed(seed)
if self.rotation_range:
theta = np.random.uniform(
-self.rotation_range,
self.rotation_range)
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= img_shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= img_shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.random.uniform(
-self.shear_range,
self.shear_range)
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
flip_horizontal = (np.random.random() < 0.5) * self.horizontal_flip
flip_vertical = (np.random.random() < 0.5) * self.vertical_flip
channel_shift_intensity = None
if self.channel_shift_range != 0:
channel_shift_intensity = np.random.uniform(-self.channel_shift_range,
self.channel_shift_range)
brightness = None
if self.brightness_range is not None:
brightness = np.random.uniform(self.brightness_range[0],
self.brightness_range[1])
transform_parameters = {'theta': theta,
'tx': tx,
'ty': ty,
'shear': shear,
'zx': zx,
'zy': zy,
'flip_horizontal': flip_horizontal,
'flip_vertical': flip_vertical,
'channel_shift_intensity': channel_shift_intensity,
'brightness': brightness}
return transform_parameters
def apply_transform(self, x, transform_parameters):
"""Applies a transformation to an image according to given parameters.
# Arguments
x: 3D tensor, single image.
transform_parameters: Dictionary with string - parameter pairs
describing the transformation.
Currently, the following parameters
from the dictionary are used:
- `'theta'`: Float. Rotation angle in degrees.
- `'tx'`: Float. Shift in the x direction.
- `'ty'`: Float. Shift in the y direction.
- `'shear'`: Float. Shear angle in degrees.
- `'zx'`: Float. Zoom in the x direction.
- `'zy'`: Float. Zoom in the y direction.
- `'flip_horizontal'`: Boolean. Horizontal flip.
- `'flip_vertical'`: Boolean. Vertical flip.
- `'channel_shift_intencity'`: Float. Channel shift intensity.
- `'brightness'`: Float. Brightness shift intensity.
# Returns
A transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
x = apply_affine_transform(x, transform_parameters.get('theta', 0),
transform_parameters.get('tx', 0),
transform_parameters.get('ty', 0),
transform_parameters.get('shear', 0),
transform_parameters.get('zx', 1),
transform_parameters.get('zy', 1),
row_axis=img_row_axis,
col_axis=img_col_axis,
channel_axis=img_channel_axis,
fill_mode=self.fill_mode,
cval=self.cval,
order=self.interpolation_order)
if transform_parameters.get('channel_shift_intensity') is not None:
x = apply_channel_shift(x,
transform_parameters['channel_shift_intensity'],
img_channel_axis)
if transform_parameters.get('flip_horizontal', False):
x = flip_axis(x, img_col_axis)
if transform_parameters.get('flip_vertical', False):
x = flip_axis(x, img_row_axis)
if transform_parameters.get('brightness') is not None:
x = apply_brightness_shift(x, transform_parameters['brightness'])
return x
def random_transform(self, x, seed=None):
"""Applies a random transformation to an image.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
params = self.get_random_transform(x.shape, seed)
return self.apply_transform(x, params)
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Fits the data generator to some sample data.
This computes the internal data stats related to the
data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, in case
of RGB data, it should have value 3, and in case
of RGBA data, it should have value 4.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=self.dtype)
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=self.dtype)
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + 1e-6)
if self.zca_whitening:
if scipy is None:
raise ImportError('Using zca_whitening requires SciPy. '
'Install SciPy.')
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
| 44.278807 | 84 | 0.554056 |
fd8079ddab507b16654dc21bdab760fc31099b34 | 7,702 | py | Python | octavia/api/v2/types/pool.py | sajuptpm/octavia | fde4ebe822072a79bb74497b504ca3f0a6a6518d | [
"Apache-2.0"
] | null | null | null | octavia/api/v2/types/pool.py | sajuptpm/octavia | fde4ebe822072a79bb74497b504ca3f0a6a6518d | [
"Apache-2.0"
] | null | null | null | octavia/api/v2/types/pool.py | sajuptpm/octavia | fde4ebe822072a79bb74497b504ca3f0a6a6518d | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from wsme import types as wtypes
from octavia.api.common import types
from octavia.api.v2.types import health_monitor
from octavia.api.v2.types import member
from octavia.common import constants
class SessionPersistenceResponse(types.BaseType):
"""Defines which attributes are to be shown on any response."""
type = wtypes.wsattr(wtypes.text)
cookie_name = wtypes.wsattr(wtypes.text)
class SessionPersistencePOST(types.BaseType):
"""Defines mandatory and optional attributes of a POST request."""
type = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_SP_TYPES),
mandatory=True)
cookie_name = wtypes.wsattr(wtypes.StringType(max_length=255),
default=None)
class SessionPersistencePUT(types.BaseType):
"""Defines attributes that are acceptable of a PUT request."""
type = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_SP_TYPES))
cookie_name = wtypes.wsattr(wtypes.StringType(max_length=255),
default=None)
class BasePoolType(types.BaseType):
_type_to_model_map = {'admin_state_up': 'enabled',
'healthmonitor': 'health_monitor'}
_child_map = {}
class PoolResponse(BasePoolType):
"""Defines which attributes are to be shown on any response."""
id = wtypes.wsattr(wtypes.UuidType())
name = wtypes.wsattr(wtypes.StringType())
description = wtypes.wsattr(wtypes.StringType())
provisioning_status = wtypes.wsattr(wtypes.StringType())
operating_status = wtypes.wsattr(wtypes.StringType())
admin_state_up = wtypes.wsattr(bool)
protocol = wtypes.wsattr(wtypes.text)
lb_algorithm = wtypes.wsattr(wtypes.text)
session_persistence = wtypes.wsattr(SessionPersistenceResponse)
project_id = wtypes.wsattr(wtypes.StringType())
loadbalancers = wtypes.wsattr([types.IdOnlyType])
listeners = wtypes.wsattr([types.IdOnlyType])
created_at = wtypes.wsattr(wtypes.datetime.datetime)
updated_at = wtypes.wsattr(wtypes.datetime.datetime)
healthmonitor_id = wtypes.wsattr(wtypes.UuidType())
members = wtypes.wsattr([types.IdOnlyType])
@classmethod
def from_data_model(cls, data_model, children=False):
pool = super(PoolResponse, cls).from_data_model(
data_model, children=children)
if data_model.session_persistence:
pool.session_persistence = (
SessionPersistenceResponse.from_data_model(
data_model.session_persistence))
if cls._full_response():
del pool.loadbalancers
member_model = member.MemberFullResponse
if pool.healthmonitor:
pool.healthmonitor = (
health_monitor.HealthMonitorFullResponse
.from_data_model(data_model.health_monitor))
else:
if data_model.load_balancer:
pool.loadbalancers = [
types.IdOnlyType.from_data_model(data_model.load_balancer)]
else:
pool.loadbalancers = []
member_model = types.IdOnlyType
if data_model.health_monitor:
pool.healthmonitor_id = data_model.health_monitor.id
pool.listeners = [
types.IdOnlyType.from_data_model(i) for i in data_model.listeners]
pool.members = [
member_model.from_data_model(i) for i in data_model.members]
return pool
class PoolFullResponse(PoolResponse):
@classmethod
def _full_response(cls):
return True
members = wtypes.wsattr([member.MemberFullResponse])
healthmonitor = wtypes.wsattr(health_monitor.HealthMonitorFullResponse)
class PoolRootResponse(types.BaseType):
pool = wtypes.wsattr(PoolResponse)
class PoolsRootResponse(types.BaseType):
pools = wtypes.wsattr([PoolResponse])
pools_links = wtypes.wsattr([types.PageType])
class PoolPOST(BasePoolType):
"""Defines mandatory and optional attributes of a POST request."""
name = wtypes.wsattr(wtypes.StringType(max_length=255))
description = wtypes.wsattr(wtypes.StringType(max_length=255))
admin_state_up = wtypes.wsattr(bool, default=True)
listener_id = wtypes.wsattr(wtypes.UuidType())
loadbalancer_id = wtypes.wsattr(wtypes.UuidType())
protocol = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_PROTOCOLS),
mandatory=True)
lb_algorithm = wtypes.wsattr(
wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS),
mandatory=True)
session_persistence = wtypes.wsattr(SessionPersistencePOST)
# TODO(johnsom) Remove after deprecation (R series)
project_id = wtypes.wsattr(wtypes.StringType(max_length=36))
healthmonitor = wtypes.wsattr(health_monitor.HealthMonitorSingleCreate)
members = wtypes.wsattr([member.MemberSingleCreate])
class PoolRootPOST(types.BaseType):
pool = wtypes.wsattr(PoolPOST)
class PoolPUT(BasePoolType):
"""Defines attributes that are acceptable of a PUT request."""
name = wtypes.wsattr(wtypes.StringType())
description = wtypes.wsattr(wtypes.StringType())
admin_state_up = wtypes.wsattr(bool)
lb_algorithm = wtypes.wsattr(
wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS))
session_persistence = wtypes.wsattr(SessionPersistencePUT)
class PoolRootPut(types.BaseType):
pool = wtypes.wsattr(PoolPUT)
class PoolSingleCreate(BasePoolType):
"""Defines mandatory and optional attributes of a POST request."""
name = wtypes.wsattr(wtypes.StringType(max_length=255))
description = wtypes.wsattr(wtypes.StringType(max_length=255))
admin_state_up = wtypes.wsattr(bool, default=True)
protocol = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_PROTOCOLS))
lb_algorithm = wtypes.wsattr(
wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS))
session_persistence = wtypes.wsattr(SessionPersistencePOST)
healthmonitor = wtypes.wsattr(health_monitor.HealthMonitorSingleCreate)
members = wtypes.wsattr([member.MemberSingleCreate])
class PoolStatusResponse(BasePoolType):
"""Defines which attributes are to be shown on status response."""
id = wtypes.wsattr(wtypes.UuidType())
name = wtypes.wsattr(wtypes.StringType())
provisioning_status = wtypes.wsattr(wtypes.StringType())
operating_status = wtypes.wsattr(wtypes.StringType())
health_monitor = wtypes.wsattr(
health_monitor.HealthMonitorStatusResponse)
members = wtypes.wsattr([member.MemberStatusResponse])
@classmethod
def from_data_model(cls, data_model, children=False):
pool = super(PoolStatusResponse, cls).from_data_model(
data_model, children=children)
member_model = member.MemberStatusResponse
if data_model.health_monitor:
pool.health_monitor = (
health_monitor.HealthMonitorStatusResponse.from_data_model(
data_model.health_monitor))
pool.members = [
member_model.from_data_model(i) for i in data_model.members]
return pool
| 39.497436 | 79 | 0.705531 |
edcf9aa28d1c3c3d5196c042058f5cf736bf2e88 | 309 | py | Python | data/multilingual/Latn.MOS/Sun-ExtA_8/pdf_to_json_test_Latn.MOS_Sun-ExtA_8.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | 1 | 2021-09-19T19:47:35.000Z | 2021-09-19T19:47:35.000Z | data/multilingual/Latn.MOS/Sun-ExtA_8/pdf_to_json_test_Latn.MOS_Sun-ExtA_8.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | data/multilingual/Latn.MOS/Sun-ExtA_8/pdf_to_json_test_Latn.MOS_Sun-ExtA_8.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.MOS/Sun-ExtA_8/udhr_Latn.MOS_Sun-ExtA_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.9 | 79 | 0.809061 |
5456007885cbaa89731c79da6ee855d1d0aac057 | 686 | py | Python | bluebottle/activities/migrations/0022_activity_video_url.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 10 | 2015-05-28T18:26:40.000Z | 2021-09-06T10:07:03.000Z | bluebottle/activities/migrations/0022_activity_video_url.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 762 | 2015-01-15T10:00:59.000Z | 2022-03-31T15:35:14.000Z | bluebottle/activities/migrations/0022_activity_video_url.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 9 | 2015-02-20T13:19:30.000Z | 2022-03-08T14:09:17.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2020-05-20 12:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('activities', '0021_auto_20200415_1501'),
]
operations = [
migrations.AddField(
model_name='activity',
name='video_url',
field=models.URLField(blank=True, default=b'', help_text="Do you have a video pitch or a short movie that explains your activity? Cool! We can't wait to see it! You can paste the link to YouTube or Vimeo video here", max_length=100, null=True, verbose_name='video'),
),
]
| 32.666667 | 278 | 0.666181 |
9c0f3e743d7313516cbae507595263813b495b92 | 480 | py | Python | setup.py | mehta-lab/waveorder | 9892c20955d3487778fd440a0d7f4f86334e7b8e | [
"Unlicense"
] | 2 | 2020-12-19T02:55:09.000Z | 2022-02-24T19:40:26.000Z | setup.py | mehta-lab/waveorder | 9892c20955d3487778fd440a0d7f4f86334e7b8e | [
"Unlicense"
] | 42 | 2021-01-20T22:34:14.000Z | 2022-03-31T00:13:37.000Z | setup.py | mehta-lab/waveorder | 9892c20955d3487778fd440a0d7f4f86334e7b8e | [
"Unlicense"
] | null | null | null | from setuptools import setup, find_packages
setup( name = 'waveorder',
version = '0.0.1',
description = 'functions for reconstructing and visualizing phase and birefrigence',
author = 'Li-Hao Yeh',
author_email = 'lihao.yeh@czbiohub.org',
license = 'BSD License',
packages = find_packages(),
install_requires = ['numpy', 'matplotlib', 'scipy', 'ipywidgets']
)
| 40 | 97 | 0.554167 |
2bcf27c9def145289c5fbe1e854d3ce8dd3422ec | 3,699 | py | Python | deepdanbooru/data/dataset_wrapper.py | fumiama/DeepDanbooru | 17abe1ba7d656334006b8dc66d78fa0ece084d93 | [
"MIT"
] | null | null | null | deepdanbooru/data/dataset_wrapper.py | fumiama/DeepDanbooru | 17abe1ba7d656334006b8dc66d78fa0ece084d93 | [
"MIT"
] | null | null | null | deepdanbooru/data/dataset_wrapper.py | fumiama/DeepDanbooru | 17abe1ba7d656334006b8dc66d78fa0ece084d93 | [
"MIT"
] | null | null | null | import random
import numpy as np
import tensorflow as tf
import tensorflow_io as tfio
import deepdanbooru as dd
class DatasetWrapper:
"""
Wrapper class for data pipelining/augmentation.
"""
def __init__(
self, inputs, tags, width, height, scale_range, rotation_range, shift_range
):
self.inputs = inputs
self.width = width
self.height = height
self.scale_range = scale_range
self.rotation_range = rotation_range
self.shift_range = shift_range
self.tag_all_array = np.array(tags)
def get_dataset(self, minibatch_size):
dataset = tf.data.Dataset.from_tensor_slices(self.inputs)
dataset = dataset.map(
self.map_load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
dataset = dataset.apply(tf.data.experimental.ignore_errors())
dataset = dataset.map(
self.map_transform_image_and_label,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
dataset = dataset.batch(minibatch_size)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
# dataset = dataset.apply(
# tf.data.experimental.prefetch_to_device('/device:GPU:0'))
return dataset
def map_load_image(self, image_path, tag_string):
image_raw = tf.io.read_file(image_path)
try:
image = tf.io.decode_png(image_raw, channels=3)
except:
image = tfio.image.decode_webp(image_raw)
image = tfio.experimental.color.rgba_to_rgb(image)
if self.scale_range:
pre_scale = self.scale_range[1]
else:
pre_scale = 1.0
size = (int(self.height * pre_scale), int(self.width * pre_scale))
image = tf.image.resize(
image,
size=size,
method=tf.image.ResizeMethod.AREA,
preserve_aspect_ratio=True,
)
return (image, tag_string)
def map_transform_image_and_label(self, image, tag_string):
return tf.py_function(
self.map_transform_image_and_label_py,
(image, tag_string),
(tf.float32, tf.float32),
)
def map_transform_image_and_label_py(self, image, tag_string):
# transform image
image = image.numpy()
if self.scale_range:
scale = random.uniform(self.scale_range[0], self.scale_range[1]) * (
1.0 / self.scale_range[1]
)
else:
scale = None
if self.rotation_range:
rotation = random.uniform(self.rotation_range[0], self.rotation_range[1])
else:
rotation = None
if self.shift_range:
shift_x = random.uniform(self.shift_range[0], self.shift_range[1])
shift_y = random.uniform(self.shift_range[0], self.shift_range[1])
shift = (shift_x, shift_y)
else:
shift = None
image = dd.image.transform_and_pad_image(
image=image,
target_width=self.width,
target_height=self.height,
rotation=rotation,
scale=scale,
shift=shift,
)
image = image / 255.0 # normalize to 0~1
# image = image.astype(np.float32)
# transform tag
tag_string = tag_string.numpy().decode()
tag_array = np.array(tag_string.split(" "))
labels = np.where(np.isin(self.tag_all_array, tag_array), 1, 0).astype(
np.float32
)
return (image, labels)
| 31.347458 | 86 | 0.583942 |
91e9b4ad06a129c5c51bcafa6c3895489cda88b9 | 36,773 | py | Python | generate.py | joaor96/BLADE | 6f0cd0e7e5fe8f7d200a63719ecb347987fd9a1e | [
"Apache-2.0"
] | 5 | 2020-04-12T22:05:14.000Z | 2021-09-29T08:52:05.000Z | generate.py | joaor96/RBM-tDBN | 6f0cd0e7e5fe8f7d200a63719ecb347987fd9a1e | [
"Apache-2.0"
] | null | null | null | generate.py | joaor96/RBM-tDBN | 6f0cd0e7e5fe8f7d200a63719ecb347987fd9a1e | [
"Apache-2.0"
] | 1 | 2020-09-24T15:57:15.000Z | 2020-09-24T15:57:15.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 1 10:26:50 2019
@author: joaor
"""
import numpy as np
import pandas as pd
n_instances = 400
n_time_points = 5
def generate_binomial_1(n_instances,n_time_points):
n_features=2
data = np.zeros([n_instances, n_features*n_time_points])
data[:,0] = np.random.binomial(1, 0.5, n_instances)
labels = np.zeros([n_instances, 1])
for i in range(0,n_instances):
labels[i] = np.random.binomial(1, 0.5, 1)
#LABEL 0
if labels[i] == 0:
if data[i,0] == 0:
data[i,1] = np.random.binomial(1, 0.1, 1)
else:
data[i,1] = np.random.binomial(1, 0.9, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0 and data[i,t*n_features+1] == 0:
data[i,t*n_features+2] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.1, 1)
elif data[i,t*n_features] == 1 and data[i,t*n_features+1] == 1:
data[i,t*n_features+2] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+2] = np.random.binomial(1, 0.5, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.5, 1)
#LABEL 1
elif labels[i] == 1:
if data[i,0] == 0:
data[i,1] = np.random.binomial(1, 0.1, 1)
else:
data[i,1] = np.random.binomial(1, 0.9, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0 and data[i,t*n_features+1] == 0:
data[i,t*n_features+2] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.9, 1)
elif data[i,t*n_features] == 1 and data[i,t*n_features+1] == 1:
data[i,t*n_features+2] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+2] = np.random.binomial(1, 0.5, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.5, 1)
col = []
for t in range(n_time_points):
for f in range(n_features):
col.append("X"+str(f)+"__"+str(t))
df = pd.DataFrame(data=data, # values
index=list(range(n_instances)), # 1st column as index
columns=col)
df.index.name = 'subject_id'
labels_df = pd.DataFrame(data=labels, # values
index=list(range(n_instances)), # 1st column as index
columns=['label'])
labels_df.index.name = 'subject_id'
df.to_csv('binomial_1_'+str(n_time_points)+'_parsed.csv',quoting=1)
labels_df.to_csv('binomial_1_'+str(n_time_points)+'_target.csv',quoting=1)
def generate_binomial_2(n_instances,n_time_points):
n_features=5
data = np.zeros([n_instances, n_features*n_time_points])
data[:,0] = np.random.binomial(1, 0.5, n_instances)
data[:,1] = np.random.binomial(1, 0.5, n_instances)
labels = np.zeros([n_instances, 1])
for i in range(0,n_instances):
labels[i] = np.random.binomial(1, 0.5, 1)
#LABEL 0
if labels[i] == 0:
if data[i,1] == 0:
data[i,2] = np.random.binomial(1, 0.9, 1)
data[i,3] = np.random.binomial(1, 0.1, 1)
else:
data[i,2] = np.random.binomial(1, 0.1, 1)
data[i,3] = np.random.binomial(1, 0.9, 1)
if data[i,2] == 0 and data[i,3] == 1:
data[i,4] = np.random.binomial(1, 0.1, 1)
elif data[i,2] == 1 and data[i,3] == 0:
data[i,4] = np.random.binomial(1, 0.9, 1)
else:
data[i,4] = np.random.binomial(1, 0.5, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0:
data[i,t*n_features+5] = np.random.binomial(1, 0.7, 1)
else:
data[i,t*n_features+5] = np.random.binomial(1, 0.3, 1)
if data[i,t*n_features+5] == 0:
data[i,t*n_features+6] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+6] = np.random.binomial(1, 0.9, 1)
if data[i,t*n_features+6] == 0:
data[i,t*n_features+7] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+8] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+7] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+8] = np.random.binomial(1, 0.9, 1)
if data[i,t*n_features+7] == 0 and data[i,t*n_features+8] == 1:
data[i,t*n_features+9] = np.random.binomial(1, 0.1, 1)
elif data[i,t*n_features+7] == 1 and data[i,t*n_features+8] == 0:
data[i,t*n_features+9] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+9] = np.random.binomial(1, 0.5, 1)
#LABEL 1
elif labels[i] == 1:
if data[i,1] == 0:
data[i,2] = np.random.binomial(1, 0.1, 1)
data[i,4] = np.random.binomial(1, 0.9, 1)
else:
data[i,2] = np.random.binomial(1, 0.9, 1)
data[i,4] = np.random.binomial(1, 0.1, 1)
if data[i,2] == 1 and data[i,4] == 0:
data[i,3] = np.random.binomial(1, 0.1, 1)
elif data[i,2] == 0 and data[i,4] == 1:
data[i,3] = np.random.binomial(1, 0.9, 1)
else:
data[i,3] = np.random.binomial(1, 0.5, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0:
data[i,t*n_features+5] = np.random.binomial(1, 0.3, 1)
else:
data[i,t*n_features+5] = np.random.binomial(1, 0.7, 1)
if data[i,t*n_features+5] == 0:
data[i,t*n_features+6] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+6] = np.random.binomial(1, 0.9, 1)
if data[i,t*n_features+6] == 0:
data[i,t*n_features+7] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+9] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+7] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+9] = np.random.binomial(1, 0.1, 1)
if data[i,t*n_features+7] == 1 and data[i,t*n_features+9] == 0:
data[i,t*n_features+8] = np.random.binomial(1, 0.1, 1)
elif data[i,t*n_features+7] == 0 and data[i,t*n_features+9] == 1:
data[i,t*n_features+8] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+8] = np.random.binomial(1, 0.5, 1)
col = []
for t in range(n_time_points):
for f in range(n_features):
col.append("X"+str(f)+"__"+str(t))
df = pd.DataFrame(data=data, # values
index=list(range(n_instances)), # 1st column as index
columns=col)
df.index.name = 'subject_id'
for t in range(n_time_points):
df.drop(columns=["X0__"+str(t)], inplace=True)
labels_df = pd.DataFrame(data=labels, # values
index=list(range(n_instances)), # 1st column as index
columns=['label'])
labels_df.index.name = 'subject_id'
df.to_csv('binomial_2_'+str(n_time_points)+'_parsed.csv',quoting=1)
labels_df.to_csv('binomial_2_'+str(n_time_points)+'_target.csv',quoting=1)
def generate_binomial_3(n_instances,n_time_points):
n_features=5
data = np.zeros([n_instances, n_features*n_time_points])
data[:,0] = np.random.binomial(1, 0.5, n_instances)
data[:,1] = np.random.binomial(1, 0.5, n_instances)
labels = np.zeros([n_instances, 1])
for i in range(0,n_instances):
labels[i] = np.random.binomial(1, 0.5, 1)
#LABEL 0
if labels[i] == 0:
if data[i,0] == 0:
data[i,2] = np.random.binomial(1, 0.9, 1)
data[i,3] = np.random.binomial(1, 0.7, 1)
else:
data[i,2] = np.random.binomial(1, 0.1, 1)
data[i,3] = np.random.binomial(1, 0.3, 1)
if data[i,1] == 0:
data[i,4] = np.random.binomial(1, 0.9, 1)
else:
data[i,4] = np.random.binomial(1, 0.1, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0:
data[i,t*n_features+5] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+5] = np.random.binomial(1, 0.1, 1)
if data[i,t*n_features+5] == 0:
data[i,t*n_features+7] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+8] = np.random.binomial(1, 0.7, 1)
else:
data[i,t*n_features+7] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+8] = np.random.binomial(1, 0.3, 1)
if data[i,t*n_features+6] == 0:
data[i,t*n_features+9] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+9] = np.random.binomial(1, 0.1, 1)
#LABEL 1
elif labels[i] == 1:
if data[i,0] == 0:
data[i,2] = np.random.binomial(1, 0.1, 1)
data[i,4] = np.random.binomial(1, 0.7, 1)
else:
data[i,2] = np.random.binomial(1, 0.9, 1)
data[i,4] = np.random.binomial(1, 0.3, 1)
if data[i,1] == 0:
data[i,3] = np.random.binomial(1, 0.1, 1)
else:
data[i,3] = np.random.binomial(1, 0.9, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0:
data[i,t*n_features+5] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+5] = np.random.binomial(1, 0.1, 1)
if data[i,t*n_features+1] == 0:
data[i,t*n_features+6] = np.random.binomial(1, 0.6, 1)
else:
data[i,t*n_features+6] = np.random.binomial(1, 0.4, 1)
if data[i,t*n_features+5] == 0:
data[i,t*n_features+7] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+9] = np.random.binomial(1, 0.7, 1)
else:
data[i,t*n_features+7] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+9] = np.random.binomial(1, 0.3, 1)
if data[i,t*n_features+6] == 0:
data[i,t*n_features+8] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+8] = np.random.binomial(1, 0.9, 1)
col = []
for t in range(n_time_points):
for f in range(n_features):
col.append("X"+str(f)+"__"+str(t))
df = pd.DataFrame(data=data, # values
index=list(range(n_instances)), # 1st column as index
columns=col)
df.index.name = 'subject_id'
for t in range(n_time_points):
df.drop(columns=["X0__"+str(t)], inplace=True)
df.drop(columns=["X1__"+str(t)], inplace=True)
labels_df = pd.DataFrame(data=labels, # values
index=list(range(n_instances)), # 1st column as index
columns=['label'])
labels_df.index.name = 'subject_id'
df.to_csv('binomial_3_'+str(n_time_points)+'_parsed.csv',quoting=1)
labels_df.to_csv('binomial_3_'+str(n_time_points)+'_target.csv',quoting=1)
def generate_multinomial_1(n_instances,n_time_points):
n_features=3
values=np.arange(3)
data = np.zeros([n_instances, n_features*n_time_points])
uniform=np.ones(len(values))/len(values)
data[:,0] = np.random.choice(values,p=uniform, size=n_instances)
labels = np.zeros([n_instances, 1])
for i in range(0,n_instances):
labels[i] = np.random.binomial(1, 0.5, 1)
#LABEL 0
if labels[i] == 0:
if data[i,0] == 2:
data[i,1] = np.random.choice(values,p=[0.9,0.05,0.05])
elif data[i,0] == 0:
data[i,1] = np.random.choice(values,p=[0.05,0.05,0.9])
else:
data[i,1] = np.random.choice(values,p=[0.05,0.9,0.05])
if data[i,0] == 2:
data[i,2] = np.random.choice(values,p=uniform)
elif data[i,0] == 0:
data[i,2] = np.random.choice(values,p=uniform)
else:
data[i,2] = np.random.choice(values,p=uniform)
#THIS FOR TIME SLICE
for t in range(n_time_points-1):
if data[i,t*n_features] == 2 and data[i,t*n_features+1] == 0:
data[i,t*n_features+3] = np.random.choice(values,p=[0.9,0.05,0.05])
data[i,t*n_features+4] = np.random.choice(values,p=[0.05,0.05,0.9])
elif data[i,t*n_features] == 0 and data[i,t*n_features+1] == 2:
data[i,t*n_features+3] = np.random.choice(values,p=[0.05,0.9,0.05])
data[i,t*n_features+4] = np.random.choice(values,p=[0.05,0.9,0.05])
elif data[i,t*n_features] == 1 and data[i,t*n_features+1] == 1:
data[i,t*n_features+3] = np.random.choice(values,p=[0.05,0.05,0.9])
data[i,t*n_features+4] = np.random.choice(values,p=[0.9,0.05,0.05])
else:
data[i,t*n_features+3] = np.random.choice(values,p=uniform)
data[i,t*n_features+4] = np.random.choice(values,p=uniform)
if data[i,t*n_features+3] == 2:
data[i,t*n_features+5] = np.random.choice(values,p=uniform)
elif data[i,t*n_features+3] == 0:
data[i,t*n_features+5] = np.random.choice(values,p=uniform)
else:
data[i,t*n_features+5] = np.random.choice(values,p=uniform)
#LABEL 1
elif labels[i] == 1:
if data[i,0] == 2:
data[i,2] = np.random.choice(values,p=[0.9,0.05,0.05])
elif data[i,0] == 0:
data[i,2] = np.random.choice(values,p=[0.05,0.05,0.9])
else:
data[i,2] = np.random.choice(values,p=[0.05,0.9,0.05])
if data[i,0] == 2:
data[i,1] = np.random.choice(values,p=uniform)
elif data[i,0] == 0:
data[i,1] = np.random.choice(values,p=uniform)
else:
data[i,1] = np.random.choice(values,p=uniform)
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
if data[i,t*n_features] == 2 and data[i,t*n_features+2] == 0:
data[i,t*n_features+3] = np.random.choice(values,p=[0.9,0.05,0.05])
data[i,t*n_features+5] = np.random.choice(values,p=[0.05,0.05,0.9])
elif data[i,t*n_features+0] == 0 and data[i,t*n_features+2] == 2:
data[i,t*n_features+3] = np.random.choice(values,p=[0.05,0.9,0.05])
data[i,t*n_features+5] = np.random.choice(values,p=[0.05,0.9,0.05])
elif data[i,t*n_features] == 1 and data[i,t*n_features+2] == 1:
data[i,t*n_features+3] = np.random.choice(values,p=[0.05,0.05,0.9])
data[i,t*n_features+5] = np.random.choice(values,p=[0.9,0.05,0.05])
else:
data[i,t*n_features+3] = np.random.choice(values,p=uniform)
data[i,t*n_features+5] = np.random.choice(values,p=uniform)
if data[i,t*n_features+3] == 2:
data[i,t*n_features+4] = np.random.choice(values,p=uniform)
elif data[i,t*n_features+4] == 0:
data[i,t*n_features+4] = np.random.choice(values,p=uniform)
else:
data[i,t*n_features+4] = np.random.choice(values,p=uniform)
col = []
for t in range(n_time_points):
for f in range(n_features):
col.append("X"+str(f)+"__"+str(t))
df = pd.DataFrame(data=data, # values
index=list(range(n_instances)), # 1st column as index
columns=col)
df.index.name = 'subject_id'
labels_df = pd.DataFrame(data=labels, # values
index=list(range(n_instances)), # 1st column as index
columns=['label'])
labels_df.index.name = 'subject_id'
df.to_csv('multinomial_1_'+str(n_time_points)+'_parsed.csv',quoting=1)
labels_df.to_csv('multinomial_1_'+str(n_time_points)+'_target.csv',quoting=1)
def generate_multinomial_2(n_instances,n_time_points):
n_features=4
values=np.arange(3)
data = np.zeros([n_instances, n_features*n_time_points])
uniform=np.ones(len(values))/len(values)
labels = np.zeros([n_instances, 1])
for i in range(0,n_instances):
labels[i] = np.random.binomial(1, 0.5, 1)
#LABEL 0
if labels[i] == 0:
data[i,0] = np.random.choice(values,p=uniform)
if data[i,0] == 2:
data[i,2] = np.random.choice(values,p=[0.9,0.05,0.05])
elif data[i,0] == 0:
data[i,2] = np.random.choice(values,p=[0.05,0.05,0.9])
else:
data[i,2] = np.random.choice(values,p=uniform)
data[i,1] = np.random.choice(values,p=uniform)
data[i,3] = np.random.choice(values,p=uniform)
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
if data[i,t*n_features] == 2 and data[i,t*n_features+2] == 0:
data[i,t*n_features+4] = np.random.choice(values,p=[0.9,0.05,0.05])
data[i,t*n_features+6] = np.random.choice(values,p=[0.05,0.05,0.9])
elif data[i,t*n_features] == 0 and data[i,t*n_features+2] == 2:
data[i,t*n_features+4] = np.random.choice(values,p=[0.05,0.05,0.9])
data[i,t*n_features+6] = np.random.choice(values,p=[0.9,0.05,0.05])
else:
data[i,t*n_features+4] = np.random.choice(values,p=uniform)
data[i,t*n_features+6] = np.random.choice(values,p=uniform)
data[i,t*n_features+5] = np.random.choice(values,p=uniform)
data[i,t*n_features+7] = np.random.choice(values,p=uniform)
#LABEL 1
elif labels[i] == 1:
data[i,1] = np.random.choice(values,p=uniform)
if data[i,1] == 2:
data[i,3] = np.random.choice(values,p=[0.9,0.05,0.05])
elif data[i,1] == 0:
data[i,3] = np.random.choice(values,p=[0.05,0.05,0.9])
else:
data[i,3] = np.random.choice(values,p=uniform)
data[i,0] = np.random.choice(values,p=uniform)
data[i,2] = np.random.choice(values,p=uniform)
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
if data[i,t*n_features+1] == 2 and data[i,t*n_features+3] == 0:
data[i,t*n_features+5] = np.random.choice(values,p=[0.9,0.05,0.05])
data[i,t*n_features+7] = np.random.choice(values,p=[0.05,0.05,0.9])
elif data[i,t*n_features+1] == 0 and data[i,t*n_features+3] == 2:
data[i,t*n_features+5] = np.random.choice(values,p=[0.05,0.05,0.9])
data[i,t*n_features+7] = np.random.choice(values,p=[0.9,0.05,0.05])
else:
data[i,t*n_features+5] = np.random.choice(values,p=uniform)
data[i,t*n_features+7] = np.random.choice(values,p=uniform)
data[i,t*n_features+4] = np.random.choice(values,p=uniform)
data[i,t*n_features+6] = np.random.choice(values,p=uniform)
col = []
for t in range(n_time_points):
for f in range(n_features):
col.append("X"+str(f)+"__"+str(t))
df = pd.DataFrame(data=data, # values
index=list(range(n_instances)), # 1st column as index
columns=col)
df.index.name = 'subject_id'
labels_df = pd.DataFrame(data=labels, # values
index=list(range(n_instances)), # 1st column as index
columns=['label'])
labels_df.index.name = 'subject_id'
df.to_csv('multinomial_2_'+str(n_time_points)+'_parsed.csv',quoting=1)
labels_df.to_csv('multinomial_2_'+str(n_time_points)+'_target.csv',quoting=1)
def generate_multiclass(n_instances,n_time_points):
n_features=10
n_values = 4
values=np.arange(n_values)
classes=np.arange(6)
data = np.zeros([n_instances, n_features*n_time_points])
uniform=np.ones(n_values)/n_values
uniform_class=np.ones(len(classes))/len(classes)
for i in range(n_instances):
for j in range(n_features*n_time_points):
data[i,j] = np.random.choice(values,p=uniform)
labels = np.zeros([n_instances, 1])
for i in range(0,n_instances):
labels[i] = np.random.choice(classes,p=uniform_class)
#LABEL 0
if labels[i] == 0:
data[i,0] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,1] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,2] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
data[i,3] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+0] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+1] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+2] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
data[i,t*n_features+n_features+3] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
#LABEL 1
elif labels[i] == 1:
data[i,0] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,1] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,2] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,3] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+0] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+1] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+2] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,t*n_features+n_features+3] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
#LABEL 2
elif labels[i] == 2:
data[i,2] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,3] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,4] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,5] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+2] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,t*n_features+n_features+3] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,t*n_features+n_features+4] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,t*n_features+n_features+5] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
#LABEL 3
elif labels[i] == 3:
data[i,2] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,3] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,4] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,5] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+2] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,t*n_features+n_features+3] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,t*n_features+n_features+4] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,t*n_features+n_features+5] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
#LABEL 4
elif labels[i] == 4:
data[i,4] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,5] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,6] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
data[i,7] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+4] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,t*n_features+n_features+5] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,t*n_features+n_features+6] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
data[i,t*n_features+n_features+7] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
#LABEL 5
elif labels[i] == 5:
data[i,4] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,5] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,6] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,7] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+4] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,t*n_features+n_features+5] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,t*n_features+n_features+6] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+7] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
#LABEL 6
elif labels[i] == 6:
data[i,6] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,7] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,8] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
data[i,9] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+6] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+7] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+8] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
data[i,t*n_features+n_features+9] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
#LABEL 7
elif labels[i] == 7:
data[i,7] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,6] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,8] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,9] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+6] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+7] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+8] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,t*n_features+n_features+9] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
#LABEL 8
elif labels[i] == 8:
data[i,0] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,1] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,8] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,9] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+0] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,t*n_features+n_features+1] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,t*n_features+n_features+8] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,t*n_features+n_features+9] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
#LABEL 9
elif labels[i] == 9:
data[i,0] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,1] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,8] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,9] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+0] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,t*n_features+n_features+1] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+8] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,t*n_features+n_features+9] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
col = []
for t in range(n_time_points):
for f in range(n_features):
col.append("X"+str(f)+"__"+str(t))
df = pd.DataFrame(data=data, # values
index=list(range(n_instances)), # 1st column as index
columns=col)
df.index.name = 'subject_id'
labels_df = pd.DataFrame(data=labels, # values
index=list(range(n_instances)), # 1st column as index
columns=['label'])
labels_df.index.name = 'subject_id'
df.to_csv('multiclass_'+str(len(classes))+'_parsed.csv',quoting=1)
labels_df.to_csv('multiclass_'+str(len(classes))+'_target.csv',quoting=1)
def generate_binomial_4(n_instances,n_time_points):
n_features=10
data = np.zeros([n_instances, n_features*n_time_points])
labels = np.zeros([n_instances, 1])
for j in range(n_features*n_time_points):
data[:,j] = np.random.binomial(1, 0.5, n_instances)
for i in range(0,n_instances):
labels[i] = np.random.binomial(1, 0.5, 1)
#LABEL 0
if labels[i] == 0:
if data[i,0] == 0:
data[i,1] = np.random.binomial(1, 0.1, 1)
else:
data[i,1] = np.random.binomial(1, 0.9, 1)
if data[i,2] == 0:
data[i,3] = np.random.binomial(1, 0.9, 1)
else:
data[i,3] = np.random.binomial(1, 0.1, 1)
for t in range(n_time_points-1):
if data[i,t*n_features+0] == 0 and data[i,t*n_features+1] == 0:
data[i,t*n_features+n_features+0] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+n_features+1] = np.random.binomial(1, 0.9, 1)
elif data[i,t*n_features+0] == 1 and data[i,t*n_features+1] == 1:
data[i,t*n_features+n_features+0] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+n_features+1] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+n_features+0] = np.random.binomial(1, 0.5, 1)
data[i,t*n_features+n_features+1] = np.random.binomial(1, 0.5, 1)
if data[i,t*n_features+2] == 0 and data[i,t*n_features+3] == 1:
data[i,t*n_features+n_features+2] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+n_features+3] = np.random.binomial(1, 0.1, 1)
elif data[i,t*n_features+2] == 1 and data[i,t*n_features+3] == 0:
data[i,t*n_features+n_features+2] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+n_features+3] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+n_features+2] = np.random.binomial(1, 0.5, 1)
data[i,t*n_features+n_features+3] = np.random.binomial(1, 0.5, 1)
#LABEL 1
elif labels[i] == 1:
if data[i,0] == 0:
data[i,3] = np.random.binomial(1, 0.9, 1)
else:
data[i,3] = np.random.binomial(1, 0.1, 1)
if data[i,1] == 0:
data[i,2] = np.random.binomial(1, 0.9, 1)
else:
data[i,2] = np.random.binomial(1, 0.1, 1)
for t in range(n_time_points-1):
if data[i,t*n_features+0] == 0 and data[i,t*n_features+3] == 1:
data[i,t*n_features+n_features+0] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+n_features+3] = np.random.binomial(1, 0.1, 1)
elif data[i,t*n_features+0] == 1 and data[i,t*n_features+3] == 0:
data[i,t*n_features+n_features+0] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+n_features+3] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+n_features+0] = np.random.binomial(1, 0.5, 1)
data[i,t*n_features+n_features+3] = np.random.binomial(1, 0.5, 1)
if data[i,t*n_features+1] == 0 and data[i,t*n_features+2] == 1:
data[i,t*n_features+n_features+1] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+n_features+2] = np.random.binomial(1, 0.1, 1)
elif data[i,t*n_features+1] == 1 and data[i,t*n_features+2] == 0:
data[i,t*n_features+n_features+1] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+n_features+2] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+n_features+1] = np.random.binomial(1, 0.5, 1)
data[i,t*n_features+n_features+2] = np.random.binomial(1, 0.5, 1)
col = []
for t in range(n_time_points):
for f in range(n_features):
col.append("X"+str(f)+"__"+str(t))
df = pd.DataFrame(data=data, # values
index=list(range(n_instances)), # 1st column as index
columns=col)
df.index.name = 'subject_id'
labels_df = pd.DataFrame(data=labels, # values
index=list(range(n_instances)), # 1st column as index
columns=['label'])
labels_df.index.name = 'subject_id'
df.to_csv('binomial_joao_parsed.csv',quoting=1)
labels_df.to_csv('binomial_joao_target.csv',quoting=1) | 47.387887 | 100 | 0.503141 |
866c5b3b07acc6def5733aa05b165a19a42e13ed | 13,673 | py | Python | atomate/vasp/builders/tasks_materials.py | srshivani/atomate | 1e851d70a5f107736e3b9c6775e2e9e4a2de7a5d | [
"BSD-3-Clause-LBNL"
] | 3 | 2021-08-02T09:19:20.000Z | 2022-03-28T17:37:47.000Z | atomate/vasp/builders/tasks_materials.py | srshivani/atomate | 1e851d70a5f107736e3b9c6775e2e9e4a2de7a5d | [
"BSD-3-Clause-LBNL"
] | 4 | 2020-10-14T08:25:24.000Z | 2020-10-16T01:05:12.000Z | atomate/vasp/builders/tasks_materials.py | srshivani/atomate | 1e851d70a5f107736e3b9c6775e2e9e4a2de7a5d | [
"BSD-3-Clause-LBNL"
] | 2 | 2020-09-23T13:10:00.000Z | 2020-10-21T17:47:04.000Z | # coding: utf-8
import os
from datetime import datetime
from pymongo import ReturnDocument
from tqdm import tqdm
from atomate.utils.utils import get_mongolike, get_logger
from atomate.vasp.builders.base import AbstractBuilder
from atomate.vasp.builders.utils import dbid_to_str, dbid_to_int
from atomate.utils.utils import get_database
from monty.serialization import loadfn
from pymatgen import Structure
from pymatgen.analysis.structure_matcher import StructureMatcher, ElementComparator
logger = get_logger(__name__)
__author__ = 'Anubhav Jain <ajain@lbl.gov>'
module_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
"""
This class collects all "tasks" (individual calculations) on a single compound and produces a
summary report in a new collection ("materials"). The tasks are matched based on having the same
crystal structure, with some options for overriding this.
There is lots of config for this builder in the accompanying "tasks_materials_settings.yaml" file.
"""
class TasksMaterialsBuilder(AbstractBuilder):
def __init__(self, materials_write, counter_write, tasks_read, tasks_prefix="t",
materials_prefix="m", query=None, settings_file=None):
"""
Create a materials collection from a tasks collection.
Args:
materials_write (pymongo.collection): mongodb collection for materials (write access needed)
counter_write (pymongo.collection): mongodb collection for counter (write access needed)
tasks_read (pymongo.collection): mongodb collection for tasks (suggest read-only for safety)
tasks_prefix (str): a string prefix for tasks, e.g. "t" gives a task_id like "t-132"
materials_prefix (str): a string prefix to prepend to material_ids
query (dict): a pymongo query on tasks_read for which tasks to include in the builder
settings_file (str): filepath to a custom settings path
"""
settings_file = settings_file or os.path.join(
module_dir, "tasks_materials_settings.yaml")
x = loadfn(settings_file)
self.supported_task_labels = x['supported_task_labels']
self.property_settings = x['property_settings']
self.indexes = x.get('indexes', [])
self.properties_root = x.get('properties_root', [])
self._materials = materials_write
if self._materials.count() == 0:
self._build_indexes()
self._counter = counter_write
if self._counter.find({"_id": "materialid"}).count() == 0:
self._counter.insert_one({"_id": "materialid", "c": 0})
self._tasks = tasks_read
self._t_prefix = tasks_prefix
self._m_prefix = materials_prefix
self.query = query
def run(self):
logger.info("MaterialsTaskBuilder starting...")
logger.info("Initializing list of all new task_ids to process ...")
previous_task_ids = []
for m in self._materials.find({}, {"_tasksbuilder.all_task_ids": 1}):
previous_task_ids.extend(m["_tasksbuilder"]["all_task_ids"])
q = {"state": "successful", "task_label": {"$in": self.supported_task_labels}}
if self.query:
common_keys = [k for k in q.keys() if k in self.query.keys()]
if common_keys:
raise ValueError("User query parameter cannot contain key(s): {}".
format(common_keys))
q.update(self.query)
all_task_ids = [dbid_to_str(self._t_prefix, t["task_id"]) for t in
self._tasks.find(q, {"task_id": 1})]
task_ids = [t_id for t_id in all_task_ids if t_id not in previous_task_ids]
logger.info("There are {} new task_ids to process.".format(len(task_ids)))
pbar = tqdm(task_ids)
for t_id in pbar:
pbar.set_description("Processing task_id: {}".format(t_id))
try:
taskdoc = self._tasks.find_one({"task_id": dbid_to_int(t_id)})
m_id = self._match_material(taskdoc)
if not m_id:
m_id = self._create_new_material(taskdoc)
self._update_material(m_id, taskdoc)
except:
import traceback
logger.exception("<---")
logger.exception("There was an error processing task_id: {}".format(t_id))
logger.exception(traceback.format_exc())
logger.exception("--->")
logger.info("TasksMaterialsBuilder finished processing.")
def reset(self):
logger.info("Resetting TasksMaterialsBuilder")
self._materials.delete_many({})
self._counter.delete_one({"_id": "materialid"})
self._counter.insert_one({"_id": "materialid", "c": 0})
self._build_indexes()
logger.info("Finished resetting TasksMaterialsBuilder.")
@classmethod
def from_file(cls, db_file, m="materials", c="counter", t="tasks", **kwargs):
"""
Get a TaskMaterialsBuilder using only a db file.
Args:
db_file (str): path to db file
m (str): name of "materials" collection
c (str): name of "counter" collection
t (str): name of "tasks" collection
**kwargs: other params to put into TasksMaterialsBuilder
"""
db_write = get_database(db_file, admin=True)
try:
db_read = get_database(db_file, admin=False)
db_read.collection_names() # throw error if auth failed
except:
logger.warning("Warning: could not get read-only database; using write creds")
db_read = get_database(db_file, admin=True)
return cls(db_write[m], db_write[c], db_read[t], **kwargs)
def _build_indexes(self):
"""
Create indexes for faster searching
"""
self._materials.create_index("material_id", unique=True)
for index in self.indexes:
self._materials.create_index(index)
def _match_material(self, taskdoc, ltol=0.2, stol=0.3, angle_tol=5):
"""
Returns the material_id that has the same structure as this task as
determined by the structure matcher. Returns None if no match.
Args:
taskdoc (dict): a JSON-like task document
ltol (float): StructureMatcher tuning parameter
stol (float): StructureMatcher tuning parameter
angle_tol (float): StructureMatcher tuning parameter
Returns:
(int) matching material_id or None
"""
formula = taskdoc["formula_reduced_abc"]
# handle the "parent structure" option, which is used to intentionally force slightly
# different structures to contribute to the same "material", e.g. from an ordering scheme
if "parent_structure" in taskdoc:
t_struct = Structure.from_dict(taskdoc["parent_structure"]["structure"])
q = {"formula_reduced_abc": formula, "parent_structure.spacegroup.number": taskdoc[
"parent_structure"]["spacegroup"]["number"]}
else:
sgnum = taskdoc["output"]["spacegroup"]["number"]
t_struct = Structure.from_dict(taskdoc["output"]["structure"])
q = {"formula_reduced_abc": formula, "sg_number": sgnum}
for m in self._materials.find(q, {"parent_structure": 1, "structure": 1, "material_id": 1}):
s_dict = m["parent_structure"]["structure"] if "parent_structure" in m else m[
"structure"]
m_struct = Structure.from_dict(s_dict)
sm = StructureMatcher(ltol=ltol, stol=stol, angle_tol=angle_tol,
primitive_cell=True, scale=True,
attempt_supercell=False, allow_subset=False,
comparator=ElementComparator())
if sm.fit(m_struct, t_struct):
return m["material_id"]
return None
def _create_new_material(self, taskdoc):
"""
Create a new material document.
Args:
taskdoc (dict): a JSON-like task document
Returns:
(int) - material_id of the new document
"""
doc = {"created_at": datetime.utcnow()}
doc["_tasksbuilder"] = {"all_task_ids": [], "prop_metadata":
{"labels": {}, "task_ids": {}}, "updated_at": datetime.utcnow()}
doc["spacegroup"] = taskdoc["output"]["spacegroup"]
doc["structure"] = taskdoc["output"]["structure"]
doc["material_id"] = dbid_to_str(
self._m_prefix, self._counter.find_one_and_update(
{"_id": "materialid"}, {"$inc": {"c": 1}},
return_document=ReturnDocument.AFTER)["c"])
doc["sg_symbol"] = doc["spacegroup"]["symbol"]
doc["sg_number"] = doc["spacegroup"]["number"]
for x in ["formula_anonymous", "formula_pretty", "formula_reduced_abc", "elements",
"nelements", "chemsys"]:
doc[x] = taskdoc[x]
if "parent_structure" in taskdoc:
doc["parent_structure"] = taskdoc["parent_structure"]
t_struct = Structure.from_dict(taskdoc["parent_structure"]["structure"])
doc["parent_structure"]["formula_reduced_abc"] = t_struct.composition.reduced_formula
self._materials.insert_one(doc)
return doc["material_id"]
def _update_material(self, m_id, taskdoc):
"""
Update a material document based on a new task and using complex logic
Args:
m_id (int): material_id for material document to update
taskdoc (dict): a JSON-like task document
"""
# For each materials property, figure out what kind of task the data is currently based on
# as defined by the task label. This is used to decide if the new taskdoc is a type of
# calculation that provides higher quality data for that property
prop_tlabels = self._materials.find_one(
{"material_id": m_id}, {"_tasksbuilder.prop_metadata.labels": 1})[
"_tasksbuilder"]["prop_metadata"]["labels"]
task_label = taskdoc["task_label"] # task label of new doc that updates this material
# figure out what materials properties need to be updated based on new task
for x in self.property_settings:
for p in x["properties"]:
# check if this is a valid task for getting the property
if task_label in x["quality_scores"]:
# assert: this is a valid task for the property
# but is it the "best" task for that property (highest quality score)?
t_quality = x["quality_scores"][task_label]
m_quality = x["quality_scores"].get(prop_tlabels.get(p, None), None)
# check if this task's quality is better than existing data
# 3 possibilities:
# i) materials property data not present, so this is best
# ii) task quality higher based on task label
# iii) task quality equal to materials; use lowest energy task
if not m_quality or t_quality > m_quality \
or (t_quality == m_quality
and taskdoc["output"]["energy_per_atom"] <
self._materials.find_one({"material_id": m_id}, {
"_tasksbuilder": 1})["_tasksbuilder"]["prop_metadata"][
"energies"][p]):
# this task has better quality data
# figure out where the property data lives in the materials doc and
# in the task doc
materials_key = "{}.{}".format(x["materials_key"], p) \
if x.get("materials_key") else p
tasks_key = "{}.{}".format(x["tasks_key"], p) \
if x.get("tasks_key") else p
# insert property data AND metadata about this task
self._materials.\
update_one({"material_id": m_id},
{"$set": {materials_key: get_mongolike(taskdoc, tasks_key),
"_tasksbuilder.prop_metadata.labels.{}".format(p): task_label,
"_tasksbuilder.prop_metadata.task_ids.{}".format(p): dbid_to_str(
self._t_prefix, taskdoc["task_id"]),
"_tasksbuilder.prop_metadata.energies.{}".format(p): taskdoc["output"]["energy_per_atom"],
"_tasksbuilder.updated_at": datetime.utcnow()}})
# copy property to document root if in properties_root
# i.e., intentionally duplicate some data to the root level
if p in self.properties_root:
self._materials.\
update_one({"material_id": m_id},
{"$set": {p: get_mongolike(taskdoc, tasks_key)}})
# update the database to reflect that this task_id was already processed
self._materials.update_one({"material_id": m_id},
{"$push": {"_tasksbuilder.all_task_ids": dbid_to_str(
self._t_prefix, taskdoc["task_id"])}})
| 46.506803 | 139 | 0.590653 |
cd15aca487428ee8a9d1e4a87af466017fc5bbf4 | 3,504 | py | Python | hoomd/md/pytest/test_special_pair.py | USF-GT-Molecular-Modeling/hoomd-blue | 2ba2f9e60b0320746d21aa8219bfc9df119c053f | [
"BSD-3-Clause"
] | null | null | null | hoomd/md/pytest/test_special_pair.py | USF-GT-Molecular-Modeling/hoomd-blue | 2ba2f9e60b0320746d21aa8219bfc9df119c053f | [
"BSD-3-Clause"
] | null | null | null | hoomd/md/pytest/test_special_pair.py | USF-GT-Molecular-Modeling/hoomd-blue | 2ba2f9e60b0320746d21aa8219bfc9df119c053f | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2009-2022 The Regents of the University of Michigan.
# Part of HOOMD-blue, released under the BSD 3-Clause License.
import hoomd
import pytest
import numpy
R = 0.9
CHARGE = [-2.5, 2.5]
# test parameters include the class, special pair params, r_cut, force, and
# energy
special_pair_test_parameters = [
(
hoomd.md.special_pair.LJ,
dict(epsilon=1.5, sigma=0.5),
2.5,
24 * 0.5**6 * 1.5 * (R**6 - 2 * 0.5**6) / R**13,
4 * 1.5 * ((0.5 / R)**12 - (0.5 / R)**6),
),
(
hoomd.md.special_pair.Coulomb,
dict(alpha=1.5),
2.5,
-1.5 * CHARGE[0] * CHARGE[1] / R**2,
1.5 * CHARGE[0] * CHARGE[1] / R,
),
]
@pytest.mark.parametrize("special_pair_cls, params, r_cut, force, energy",
special_pair_test_parameters)
def test_before_attaching(special_pair_cls, params, r_cut, force, energy):
potential = special_pair_cls()
potential.params['A-A'] = params
potential.r_cut['A-A'] = r_cut
assert potential.r_cut['A-A'] == r_cut
for key in params:
assert potential.params['A-A'][key] == pytest.approx(params[key])
@pytest.fixture(scope='session')
def snapshot_factory(two_particle_snapshot_factory):
def make_snapshot():
snapshot = two_particle_snapshot_factory(d=R, L=R * 10)
if snapshot.communicator.rank == 0:
snapshot.particles.charge[:] = CHARGE
snapshot.pairs.N = 1
snapshot.pairs.types = ['A-A']
snapshot.pairs.typeid[0] = 0
snapshot.pairs.group[0] = (0, 1)
return snapshot
return make_snapshot
@pytest.mark.parametrize("special_pair_cls, params, r_cut, force, energy",
special_pair_test_parameters)
def test_after_attaching(snapshot_factory, simulation_factory, special_pair_cls,
params, r_cut, force, energy):
snapshot = snapshot_factory()
sim = simulation_factory(snapshot)
potential = special_pair_cls()
potential.params['A-A'] = params
potential.r_cut['A-A'] = r_cut
sim.operations.integrator = hoomd.md.Integrator(dt=0.005,
forces=[potential])
sim.run(0)
potential.r_cut['A-A'] = r_cut
for key in params:
assert potential.params['A-A'][key] == pytest.approx(params[key])
@pytest.mark.parametrize("special_pair_cls, params, r_cut, force, energy",
special_pair_test_parameters)
def test_forces_and_energies(snapshot_factory, simulation_factory,
special_pair_cls, params, r_cut, force, energy):
snapshot = snapshot_factory()
sim = simulation_factory(snapshot)
potential = special_pair_cls()
potential.params['A-A'] = params
potential.r_cut['A-A'] = r_cut
sim.operations.integrator = hoomd.md.Integrator(dt=0.005,
forces=[potential])
sim.run(0)
sim_energies = potential.energies
sim_forces = potential.forces
if sim.device.communicator.rank == 0:
assert sum(sim_energies) == pytest.approx(energy)
numpy.testing.assert_allclose(sim_forces[0], [force, 0.0, 0.0],
rtol=1e-6,
atol=1e-5)
numpy.testing.assert_allclose(sim_forces[1], [-1 * force, 0.0, 0.0],
rtol=1e-6,
atol=1e-5)
| 33.371429 | 80 | 0.589041 |
8400d66d6d86c9ddc800d5b67e30cfb2c7c70f55 | 366 | py | Python | HASHING/Anagrams.py | rajansh87/Algorithms-Implementations | 1f3dd1bc2decf10638fe0fdeeede47a650a9057b | [
"MIT"
] | 1 | 2020-05-10T19:01:51.000Z | 2020-05-10T19:01:51.000Z | HASHING/Anagrams.py | rajansh87/Algorithms-Implementations | 1f3dd1bc2decf10638fe0fdeeede47a650a9057b | [
"MIT"
] | 9 | 2021-03-17T18:10:18.000Z | 2021-03-29T19:35:06.000Z | HASHING/Anagrams.py | rajansh87/Data-Structures-and-Algorithms-Implementations | 0529079fbcd4d1a047210e9f2ff42c194c0818fe | [
"MIT"
] | null | null | null | arr=["cat","dog","god","tca"]
dic={}
count=[0]*27
ind=[]
for i in range(len(arr)):
for j in range(len(arr[i])):
count[ord(arr[i][j])-97]+=1
#dic[i+1]=count
if tuple(count) not in dic:
dic[tuple(count)]=[i+1]
else:
dic[tuple(count)].append(i+1)
count=[0]*27
brr=list(dic.values())
print(brr)
| 18.3 | 38 | 0.497268 |
34049a0328ca19438c54205b3ee1973ac2d5c205 | 785 | py | Python | semana-02/lista-exercicio/lista-3/exercicio_8.py | larissajusten/ufsc-object-oriented-programming | 839e6abcc20580ea1a47479232c3ed3cb0153e4b | [
"MIT"
] | 6 | 2021-11-29T05:43:19.000Z | 2022-03-15T21:54:54.000Z | semana-02/lista-exercicio/lista-3/exercicio_8.py | larissajusten/ufsc-object-oriented-programming | 839e6abcc20580ea1a47479232c3ed3cb0153e4b | [
"MIT"
] | 3 | 2021-11-21T03:44:03.000Z | 2021-11-21T03:44:05.000Z | semana-02/lista-exercicio/lista-3/exercicio_8.py | larissajusten/ufsc-object-oriented-programming | 839e6abcc20580ea1a47479232c3ed3cb0153e4b | [
"MIT"
] | null | null | null | """
Exercício 8. Crie uma classe MatrizEsparsa que pode ser construida das duas formas abaixo,
e contenha métodos para mostrar a matriz no formato String e também no formato de dicionário.
1) receber uma dupla (tupla com 2 elementos) indicando quantas linhas e colunas tem a matriz,
e um dicionário com duplas como chave (linha, coluna) e um valor numérico.
2) Receber uma string no seguinte formato:
matriz = '''0 8 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0
0 2 0 0 0 0 1 0 0
0 0 0 0 0 0 0 0 0
0 0 0 7 0 0 0 0 0
0 0 0 0 0 0 4 0 0'''
*dica: use os métodos splitlines() e split da classe String, e o método get() da classe dict
""" | 46.176471 | 102 | 0.569427 |
195c0b7bfcf18e78a9ae8612acf4d19806b32de4 | 5,560 | py | Python | .github/scripts/cover2cover.py | ocihangir/aws-greengrass-fleet-provisioning-by-claim | bb6773822edc31b93ee729f0e3c0fe7fa4a9a3d3 | [
"Apache-2.0"
] | 11 | 2020-12-15T18:23:51.000Z | 2022-03-26T16:40:30.000Z | .github/scripts/cover2cover.py | ocihangir/aws-greengrass-fleet-provisioning-by-claim | bb6773822edc31b93ee729f0e3c0fe7fa4a9a3d3 | [
"Apache-2.0"
] | 50 | 2020-12-15T15:45:48.000Z | 2022-03-29T19:44:50.000Z | .github/scripts/cover2cover.py | ocihangir/aws-greengrass-fleet-provisioning-by-claim | bb6773822edc31b93ee729f0e3c0fe7fa4a9a3d3 | [
"Apache-2.0"
] | 6 | 2020-12-16T20:14:34.000Z | 2021-04-27T17:23:21.000Z | #!/usr/bin/env python
# Copyright Amazon.com Inc. or its affiliates.
# SPDX-License-Identifier: Apache-2.0
import sys
import xml.etree.ElementTree as ET
import re
import os.path
# branch-rate="0.0" complexity="0.0" line-rate="1.0"
# branch="true" hits="1" number="86"
def find_lines(j_package, filename):
"""Return all <line> elements for a given source file in a package."""
lines = list()
sourcefiles = j_package.findall("sourcefile")
for sourcefile in sourcefiles:
if sourcefile.attrib.get("name") == os.path.basename(filename):
lines = lines + sourcefile.findall("line")
return lines
def line_is_after(jm, start_line):
return int(jm.attrib.get('line', 0)) > start_line
def method_lines(jmethod, jmethods, jlines):
"""Filter the lines from the given set of jlines that apply to the given jmethod."""
start_line = int(jmethod.attrib.get('line', 0))
larger = list(int(jm.attrib.get('line', 0)) for jm in jmethods if line_is_after(jm, start_line))
end_line = min(larger) if len(larger) else 99999999
for jline in jlines:
if start_line <= int(jline.attrib['nr']) < end_line:
yield jline
def convert_lines(j_lines, into):
"""Convert the JaCoCo <line> elements into Cobertura <line> elements, add them under the given element."""
c_lines = ET.SubElement(into, 'lines')
for jline in j_lines:
mb = int(jline.attrib['mb'])
cb = int(jline.attrib['cb'])
ci = int(jline.attrib['ci'])
cline = ET.SubElement(c_lines, 'line')
cline.set('number', jline.attrib['nr'])
cline.set('hits', '1' if ci > 0 else '0') # Probably not true but no way to know from JaCoCo XML file
if mb + cb > 0:
percentage = str(int(100 * (float(cb) / (float(cb) + float(mb))))) + '%'
cline.set('branch', 'true')
cline.set('condition-coverage', percentage + ' (' + str(cb) + '/' + str(cb + mb) + ')')
cond = ET.SubElement(ET.SubElement(cline, 'conditions'), 'condition')
cond.set('number', '0')
cond.set('type', 'jump')
cond.set('coverage', percentage)
else:
cline.set('branch', 'false')
def guess_filename(path_to_class):
m = re.match('([^$]*)', path_to_class)
return (m.group(1) if m else path_to_class) + '.java'
def add_counters(source, target):
target.set('line-rate', counter(source, 'LINE'))
target.set('branch-rate', counter(source, 'BRANCH'))
target.set('complexity', counter(source, 'COMPLEXITY', sum))
def fraction(covered, missed):
return covered / (covered + missed)
def sum(covered, missed):
return covered + missed
def counter(source, type, operation=fraction):
cs = source.findall('counter')
c = next((ct for ct in cs if ct.attrib.get('type') == type), None)
if c is not None:
covered = float(c.attrib['covered'])
missed = float(c.attrib['missed'])
return str(operation(covered, missed))
else:
return '0.0'
def convert_method(j_method, j_lines):
c_method = ET.Element('method')
c_method.set('name', j_method.attrib['name'])
c_method.set('signature', j_method.attrib['desc'])
add_counters(j_method, c_method)
convert_lines(j_lines, c_method)
return c_method
def convert_class(j_class, j_package):
c_class = ET.Element('class')
c_class.set('name', j_class.attrib['name'].replace('/', '.'))
c_class.set('filename', guess_filename(j_class.attrib['name']))
all_j_lines = list(find_lines(j_package, c_class.attrib['filename']))
c_methods = ET.SubElement(c_class, 'methods')
all_j_methods = list(j_class.findall('method'))
for j_method in all_j_methods:
j_method_lines = method_lines(j_method, all_j_methods, all_j_lines)
c_methods.append(convert_method(j_method, j_method_lines))
add_counters(j_class, c_class)
convert_lines(all_j_lines, c_class)
return c_class
def convert_package(j_package):
c_package = ET.Element('package')
c_package.attrib['name'] = j_package.attrib['name'].replace('/', '.')
c_classes = ET.SubElement(c_package, 'classes')
for j_class in j_package.findall('class'):
# Only output the class if it has methods to be covered
if j_class.findall('method'):
c_classes.append(convert_class(j_class, j_package))
add_counters(j_package, c_package)
return c_package
def convert_root(source, target, source_roots):
target.set('timestamp', str(int(source.find('sessioninfo').attrib['start']) / 1000))
sources = ET.SubElement(target, 'sources')
for s in source_roots:
ET.SubElement(sources, 'source').text = s
packages = ET.SubElement(target, 'packages')
for package in source.findall('package'):
packages.append(convert_package(package))
add_counters(source, target)
def jacoco2cobertura(filename, source_roots):
if filename == '-':
root = ET.fromstring(sys.stdin.read())
else:
tree = ET.parse(filename)
root = tree.getroot()
into = ET.Element('coverage')
convert_root(root, into, source_roots)
print('<?xml version="1.0" ?>')
print(ET.tostring(into, encoding='unicode'))
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: cover2cover.py FILENAME [SOURCE_ROOTS]")
sys.exit(1)
filename = sys.argv[1]
source_roots = sys.argv[2:] if 2 < len(sys.argv) else '.'
jacoco2cobertura(filename, source_roots) | 34.320988 | 110 | 0.644784 |
b91c73c9f2adbd49a713a195525f34bb8e2380fb | 1,146 | py | Python | ciphers/transposition_cipher_encrypt_decrypt_file.py | NavpreetDevpuri/Python | 7ef5ae66d777e8ed702993c6aa9270e0669cb0c6 | [
"MIT"
] | 145,614 | 2016-07-21T05:40:05.000Z | 2022-03-31T22:17:22.000Z | ciphers/transposition_cipher_encrypt_decrypt_file.py | Agha-Muqarib/Python | 04f156a8973d6156a4357e0717d9eb0aa264d086 | [
"MIT"
] | 3,987 | 2016-07-28T17:31:25.000Z | 2022-03-30T23:07:46.000Z | ciphers/transposition_cipher_encrypt_decrypt_file.py | Agha-Muqarib/Python | 04f156a8973d6156a4357e0717d9eb0aa264d086 | [
"MIT"
] | 40,014 | 2016-07-26T15:14:41.000Z | 2022-03-31T22:23:03.000Z | import os
import sys
import time
from . import transposition_cipher as transCipher
def main() -> None:
inputFile = "Prehistoric Men.txt"
outputFile = "Output.txt"
key = int(input("Enter key: "))
mode = input("Encrypt/Decrypt [e/d]: ")
if not os.path.exists(inputFile):
print("File %s does not exist. Quitting..." % inputFile)
sys.exit()
if os.path.exists(outputFile):
print("Overwrite %s? [y/n]" % outputFile)
response = input("> ")
if not response.lower().startswith("y"):
sys.exit()
startTime = time.time()
if mode.lower().startswith("e"):
with open(inputFile) as f:
content = f.read()
translated = transCipher.encryptMessage(key, content)
elif mode.lower().startswith("d"):
with open(outputFile) as f:
content = f.read()
translated = transCipher.decryptMessage(key, content)
with open(outputFile, "w") as outputObj:
outputObj.write(translated)
totalTime = round(time.time() - startTime, 2)
print(("Done (", totalTime, "seconds )"))
if __name__ == "__main__":
main()
| 27.285714 | 64 | 0.604712 |
cb823a94e9eecf708c23800c7927bd73c6937f93 | 972 | py | Python | src/search/serializers/combined.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 18 | 2021-05-20T13:20:16.000Z | 2022-02-11T02:40:18.000Z | src/search/serializers/combined.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 109 | 2021-05-21T20:14:23.000Z | 2022-03-31T20:56:10.000Z | src/search/serializers/combined.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 4 | 2021-05-17T13:47:53.000Z | 2022-02-12T10:48:21.000Z | from rest_framework import serializers
from django.utils.html import strip_tags
from search.serializers import (
PersonDocumentSerializer,
HubDocumentSerializer,
PaperDocumentSerializer,
PostDocumentSerializer,
)
class CombinedSerializer(serializers.BaseSerializer):
index_serializers = {
'person': PersonDocumentSerializer,
'hub': HubDocumentSerializer,
'paper': PaperDocumentSerializer,
'post': PostDocumentSerializer,
}
def __init__(self, *args, **kwargs):
many = kwargs.pop('many', True)
super(CombinedSerializer, self).__init__(many=many, *args, **kwargs)
def to_representation(self, obj):
return self.get_hit(obj)
def get_hit(self, obj):
index_serializers = getattr(self, 'index_serializers')
if obj.meta.index in index_serializers:
serializer = index_serializers[obj.meta.index]
hit = serializer(obj).data
return hit
| 27.771429 | 76 | 0.688272 |
66faa05e0ea9eb4f3e32d250cb773026187b34ce | 2,728 | py | Python | Python/CheckiO/Home/Pawn Brotherhood.py | bhupendpatil/Practice | 9663b3f41e359787cbbd04aedb3db3c605c6ec8e | [
"MIT"
] | 1 | 2020-12-23T06:22:29.000Z | 2020-12-23T06:22:29.000Z | Python/CheckiO/Home/Pawn Brotherhood.py | bhupendpatil/Practice | 9663b3f41e359787cbbd04aedb3db3c605c6ec8e | [
"MIT"
] | 8 | 2020-06-18T19:32:39.000Z | 2022-03-11T11:37:07.000Z | Python/CheckiO/Home/Pawn Brotherhood.py | bhupendpatil/Practice | 9663b3f41e359787cbbd04aedb3db3c605c6ec8e | [
"MIT"
] | 1 | 2021-01-19T00:16:34.000Z | 2021-01-19T00:16:34.000Z | """
Almost everyone in the world knows about the ancient game Chess and has at
least a basic understanding of its rules. It has various units with a wide
range of movement patterns allowing for a huge number of possible different
game positions (for example Number of possible chess games at the end of the
n-th plies.) For this mission, we will examine the movements and behavior of
chess pawns.
Chess is a two-player strategy game played on a checkered game board laid out
in eight rows (called ranks and denoted with numbers 1 to 8) and eight
columns (called files and denoted with letters a to h) of squares. Each
square of the chessboard is identified by a unique coordinate pair — a
letter and a number (ex, "a1", "h8", "d6"). For this mission we only need
to concern ourselves with pawns. A pawn may capture an opponent's piece on
a square diagonally in front of it on an adjacent file, by moving to that
square. For white pawns the front squares are squares with greater row than
their.
A pawn is generally a weak unit, but we have 8 of them which we can use to
build a pawn defense wall. With this strategy, one pawn defends the others.
A pawn is safe if another pawn can capture a unit on that square. We have
several white pawns on the chess board and only white pawns. You should design
your code to find how many pawns are safe.
pawns
You are given a set of square coordinates where we have placed white pawns.
You should count how many pawns are safe.
Input: Placed pawns coordinates as a set of strings.
Output: The number of safe pawns as a integer.
Example:
safe_pawns({"b4", "d4", "f4", "c3", "e3", "g5", "d2"}) == 6
safe_pawns({"b4", "c4", "d4", "e4", "f4", "g4", "e5"}) == 1
1
2
How it is used: For a game AI one of the important tasks is the
ability to estimate game state. This concept will show how you can
do this on the simple chess figures positions.
Precondition:
0 < pawns ≤ 8
"""
def safe_pawns(pawns):
def get_coord(a):
return ord(a[0])-96, int(a[1])
def defend_coord(x, y):
defend = []
if (x+1 <= 8) and (y+1 <= 8):
defend.append([x+1, y+1])
if (x-1 <= 8) and (y+1 <= 8):
defend.append([x-1, y+1])
return defend
board = [[0 for j in range(9)] for i in range(9)]
pawns, ans = list(pawns), 0
for pawn in pawns:
for pwn in defend_coord(*get_coord(pawn)):
board[pwn[0]][pwn[1]] = 1
for pawn in pawns:
ans += board[get_coord(pawn)[0]][get_coord(pawn)[1]]
return ans
def test_function():
assert safe_pawns({"b4", "d4", "f4", "c3", "e3", "g5", "d2"}) == 6
assert safe_pawns({"b4", "c4", "d4", "e4", "f4", "g4", "e5"}) == 1
assert safe_pawns({"e8"}) == 0
| 34.974359 | 78 | 0.677786 |
1c2e3f6cda81575e2f211c0c19990a5adcb75fb9 | 2,608 | py | Python | src/third_party/wiredtiger/test/suite/test_upgrade.py | hgGeorg/mongo | b5bea92504b2612f433b55e7b901f9ae276d11ec | [
"Apache-2.0"
] | 1 | 2020-01-01T06:16:58.000Z | 2020-01-01T06:16:58.000Z | src/third_party/wiredtiger/test/suite/test_upgrade.py | Man1029/CMONGO | c40380caa14e05509f46993aa8b8da966b09b0b5 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/third_party/wiredtiger/test/suite/test_upgrade.py | Man1029/CMONGO | c40380caa14e05509f46993aa8b8da966b09b0b5 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Public Domain 2014-2016 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import os, time
import wiredtiger, wttest
from helper import complex_populate, simple_populate
from wtscenario import make_scenarios
# test_upgrade.py
# session level upgrade operation
class test_upgrade(wttest.WiredTigerTestCase):
name = 'test_upgrade'
scenarios = make_scenarios([
('file', dict(uri='file:')),
('table', dict(uri='table:'))
])
# Populate an object, then upgrade it.
def upgrade(self, populate, with_cursor):
uri = self.uri + self.name
populate(self, uri, 'key_format=S', 10)
# Open cursors should cause failure.
if with_cursor:
cursor = self.session.open_cursor(uri, None, None)
self.assertRaises(wiredtiger.WiredTigerError,
lambda: self.session.drop(uri, None))
cursor.close()
self.session.upgrade(uri, None)
self.session.drop(uri)
# Test upgrade of an object.
def test_upgrade(self):
# Simple file or table object.
self.upgrade(simple_populate, False)
self.upgrade(simple_populate, True)
# A complex, multi-file table object.
if self.uri == "table:":
self.upgrade(complex_populate, False)
self.upgrade(complex_populate, True)
if __name__ == '__main__':
wttest.run()
| 36.222222 | 73 | 0.705521 |
0a042108a7cfc503a99198641738ac7b96a4d8e5 | 1,536 | py | Python | GUI.py | Babar-Awan/CP19_05 | 5d852cc4bac724aba3acec6bcefc2e3a1d3b0a58 | [
"MIT"
] | null | null | null | GUI.py | Babar-Awan/CP19_05 | 5d852cc4bac724aba3acec6bcefc2e3a1d3b0a58 | [
"MIT"
] | null | null | null | GUI.py | Babar-Awan/CP19_05 | 5d852cc4bac724aba3acec6bcefc2e3a1d3b0a58 | [
"MIT"
] | null | null | null | from tkinter import *
root = Tk()
root.geometry("1000x600")
root.title("Face Detection In Real Time")
def testing():
print("TESTING")
def save():
print ("YOUR DATA IS SAVE")
f1 = Frame(root, bg = "black", borderwidth = 1 , relief = GROOVE)
f1.pack(side = TOP, fill="x")
f2 = Frame(root, bg = "black", borderwidth = 5 , relief = GROOVE)
f2.pack(side = BOTTOM, fill="x")
f3 = Frame(root, bg = "black", borderwidth = 1 , relief = GROOVE)
f3.pack(side = TOP, fill="y")
l2 = Label(f1, text = " FACE DETECTON ",bg = "black" , fg = "white" , font = ("Arial",30,"bold"))
l2.pack()
l3 = Label(f2, text = "Teachers: Sir Asim Imdad, Sir Adil Rao, Miss Ishrat Fatima ", bg = "black", fg="white" , font = ("Arial",10,"bold") )
l3.pack(side=LEFT)
l3 = Label(f2, text = "Members: Hassan Ali, Arsalan Ahmed, Haziq Ahmed, Babar Awan, Farooq Arman ", bg = "black", fg="white" , font = ("Arial",10,"bold") )
l3.pack(side=RIGHT)
B3 = Button(root, text ="DATA SAVE", bg = "Black", fg="white", height = 2, width = 10 ,font = ("Arial",10,"bold"), command=save )
B3.pack(side=RIGHT, anchor="sw", padx=20, pady=20)
B2 = Button(root, text ="CLOSE", bg = "Black", fg="white", height = 2, width = 10 ,font = ("Arial",10,"bold"), command=root.destroy )
B2.pack(side=RIGHT, anchor="sw", padx=20, pady=20)
B1 = Button(root, text ="START", bg = "Black", fg="white", height = 2, width = 10,font = ("Arial",10,"bold"), command=testing )
B1.pack(side=RIGHT, anchor="sw", padx=20, pady=20)
root.mainloop()
| 37.463415 | 162 | 0.610677 |
adb866e58b58cb14a0569bfa260224046f323049 | 4,589 | py | Python | napari/utils/io.py | SaraLatif99/napari | b17235ee77d30e58492368a73d7c8d8189397fa4 | [
"BSD-3-Clause"
] | null | null | null | napari/utils/io.py | SaraLatif99/napari | b17235ee77d30e58492368a73d7c8d8189397fa4 | [
"BSD-3-Clause"
] | null | null | null | napari/utils/io.py | SaraLatif99/napari | b17235ee77d30e58492368a73d7c8d8189397fa4 | [
"BSD-3-Clause"
] | null | null | null | import os
from glob import glob
from pathlib import Path
import numpy as np
from skimage import io
from skimage.io.collection import alphanumeric_key
from dask import delayed
from dask import array as da
def magic_imread(filenames, *, use_dask=None, stack=True):
"""Dispatch the appropriate reader given some files.
The files are assumed to all have the same shape.
Parameters
-------
filenames : list
List of filenames or directories to be opened.
A list of `pathlib.Path` objects and a single filename or `Path` object
are also accepted.
use_dask : bool
Whether to use dask to create a lazy array, rather than NumPy.
Default of None will resolve to True if filenames contains more than
one image, False otherwise.
stack : bool
Whether to stack the images in multiple files into a single array. If
False, a list of arrays will be returned.
Returns
-------
image : array-like
Array or list of images
"""
# cast Path to string
if isinstance(filenames, Path):
filenames = filenames.as_posix()
if len(filenames) == 0:
return None
if isinstance(filenames, str):
filenames = [filenames] # ensure list
# replace folders with their contents
filenames_expanded = []
for filename in filenames:
ext = os.path.splitext(filename)[-1]
# zarr files are folders, but should be read as 1 file
if os.path.isdir(filename) and not ext == '.zarr':
dir_contents = sorted(
glob(os.path.join(filename, '*.*')), key=alphanumeric_key
)
# remove subdirectories
dir_contents_files = filter(
lambda f: not os.path.isdir(f), dir_contents
)
filenames_expanded.extend(dir_contents_files)
else:
filenames_expanded.append(filename)
if use_dask is None:
use_dask = len(filenames_expanded) > 1
# then, read in images
images = []
shape = None
for filename in filenames_expanded:
ext = os.path.splitext(filename)[-1]
if ext == '.zarr':
image, zarr_shape = read_zarr_dataset(filename)
if shape is None:
shape = zarr_shape
else:
if shape is None:
image = io.imread(filename)
shape = image.shape
dtype = image.dtype
if use_dask:
image = da.from_delayed(
delayed(io.imread)(filename), shape=shape, dtype=dtype
)
elif len(images) > 0: # not read by shape clause
image = io.imread(filename)
images.append(image)
if len(images) == 1:
image = images[0]
else:
if stack:
if use_dask:
image = da.stack(images)
else:
try:
image = np.stack(images)
except ValueError as e:
if 'input arrays must have the same shape' in str(e):
msg = (
'To stack multiple files into a single array with '
'numpy, all input arrays must have the same shape.'
' Set `use_dask` to True to stack arrays with '
'different shapes.'
)
raise ValueError(msg) from e
else:
image = images # return a list
return image
def read_zarr_dataset(path):
"""Read a zarr dataset, including an array or a group of arrays.
Parameters
--------
path : str
Path to directory ending in '.zarr'. Path can contain either an array
or a group of arrays in the case of pyramid data.
Returns
-------
image : array-like
Array or list of arrays
shape : tuple
Shape of array or first array in list
"""
if os.path.exists(os.path.join(path, '.zarray')):
# load zarr array
image = da.from_zarr(path)
shape = image.shape
elif os.path.exists(os.path.join(path, '.zgroup')):
# else load zarr all arrays inside file, useful for pyramid data
image = []
for subpath in sorted(os.listdir(path)):
if not subpath.startswith('.'):
image.append(read_zarr_dataset(os.path.join(path, subpath))[0])
shape = image[0].shape
else:
raise ValueError(f"Not a zarr dataset or group: {path}")
return image, shape
| 32.778571 | 79 | 0.568969 |
130347d21271b025a1f9976bad75573c476c5a0c | 51 | py | Python | tests/naif_pds4_bundler/functional/__init__.py | NASA-PDS/naif-pds4-bundler | bd7207d157ec9cae60f42cb9ea387ac194b1671c | [
"Apache-2.0"
] | null | null | null | tests/naif_pds4_bundler/functional/__init__.py | NASA-PDS/naif-pds4-bundler | bd7207d157ec9cae60f42cb9ea387ac194b1671c | [
"Apache-2.0"
] | null | null | null | tests/naif_pds4_bundler/functional/__init__.py | NASA-PDS/naif-pds4-bundler | bd7207d157ec9cae60f42cb9ea387ac194b1671c | [
"Apache-2.0"
] | null | null | null | """NAIF PDS4 Bundle Functional Tests Namespace."""
| 25.5 | 50 | 0.745098 |
b4e2198cc81da4801731a61be781984e21111e42 | 4,525 | py | Python | npipes/triggers/sqs.py | praxik/nPipes | 4edf8fa0d0467e3455941c46e960fdf3f43e2d31 | [
"Apache-2.0"
] | null | null | null | npipes/triggers/sqs.py | praxik/nPipes | 4edf8fa0d0467e3455941c46e960fdf3f43e2d31 | [
"Apache-2.0"
] | null | null | null | npipes/triggers/sqs.py | praxik/nPipes | 4edf8fa0d0467e3455941c46e960fdf3f43e2d31 | [
"Apache-2.0"
] | null | null | null | # -*- mode: python;-*-
from ..message.header import Message
from ..outcome import Outcome, Success, Failure
from ..message.header import Encoding, EncodingPlainText, EncodingGzB64, S3Asset, AssetSettings, Decompression, BodyInString, BodyInAsset
from ..assethandlers.assets import randomName
from ..assethandlers.s3utils import uploadData
from ..assethandlers.s3path import S3Path
from ..message.ezqconverter import toEzqOrJsonLines
from typing import Generator, List
import boto3
import hashlib
import gzip
from base64 import b64encode
def sendMessage(queuename:str, overflowPath:str, message:Message) -> Outcome[str, None]:
"""Sends *message* to SQS queue *queuename*
If *message* is larger than max size permitted by SQS, the *Body* is
sent to *overflowPath* in S3 and *message* is altered to reflect the
change.
**overflowPath** should be of the form "s3://bucket/my/prefix". The
actual message body will then be written to
"s3://bucket/my/prefix/some_random_name.gz"
"""
try:
sqs = boto3.resource("sqs")
queue = sqs.get_queue_by_name(QueueName=queuename)
messageAutoflowed = overflow(message, overflowPath)
messageBody = toEzqOrJsonLines(overflow(message, overflowPath))
# Probably want to maintain an md5 of the overflowed body in
# the message as well so the receiving side can check that it
# has everything.
md5 = hashlib.md5(messageBody.encode("utf-8")).hexdigest()
response = queue.send_message(MessageBody=messageBody)
if response.get("MD5OfMessageBody") == md5:
return Success(None)
else:
return Failure("Enqueued message MD5 does not match what was sent")
except Exception as err:
return Failure("Unable to send SQS message: {}".format(err))
def overflow(message:Message, overflowPath:str) -> Message:
# If body is already in an asset, there's nothing we can do here.
body = Message.body
if isinstance(body, BodyInString):
# SQS accepts messages up to 256kB (262,144 B), *including* the SQS
# header data. The size of the SQS header is unspecified, but is
# unlikely to be > 2,144B ... probably maybe. Hence the choice of 260000 here:
overAmount = len(message.toJsonLines().encode()) - 260000
if overAmount > 0:
# Message is too big to fit in SQS. Try two things:
# 1. gzip the base64encode the body string. If that gets us
# under the bar, then we go with that. NOTE: gzip, not plain zlib,
# so bytes can be written to file as proper .gz
# 2. If the above fails, we take the gzip bytestring (no b64 stuff), send
# it to overflowPath in S3, then re-jigger the Message to reference
# a BodyInAsset.
bodyBytes = body.string.encode()
gzBodyBytes = gzip.compress(bodyBytes, compresslevel=9)
b64BodyBytes = b64encode(gzBodyBytes)
# So...did the compression get us under the threshold?
if len(b64BodyBytes) < (overAmount - 23): # 23 is the add'l bytes occupied
# by the now-nec. encoding info
newBody = BodyInString(b64BodyBytes.decode(), encoding=EncodingGzB64())
return message._with([(".body", newBody)])
else:
# Have to overflow to S3
fname = randomName()
s3Path = S3Path(overflowPath).add(fname)
uploadData(gzBodyBytes, s3Path)
asset = S3Asset(s3Path, AssetSettings(id="AutoOverflow",
decompression=Decompression(True)))
oldsteps = message.header.steps
oldstep = oldsteps[0]
newstep = oldstep._with([(".assets", oldstep.assets + [asset])])
newsteps = [newstep] + oldsteps[1:]
return message._with([(".header.steps", newsteps),
(".header.body", BodyInAsset(assetId="AutoOverflow"))])
# We don't check the message at this point to see if we're truly under
# size now. That's because we're not going to put header information into
# S3. If someone has dreamed up a workflow that results in a *Header* that
# is 256kiB...good grief.
else:
return message
else:
return message
| 48.655914 | 137 | 0.621878 |
a64e4680a0cbeba652da09c0d4fb9a1524203da3 | 781 | py | Python | x_com/evacuation_priorities.py | SashaNullptr/X-COM-UFO-Defense | 9ae394c9c593428d072b51a5e1cedad47876a452 | [
"MIT"
] | null | null | null | x_com/evacuation_priorities.py | SashaNullptr/X-COM-UFO-Defense | 9ae394c9c593428d072b51a5e1cedad47876a452 | [
"MIT"
] | null | null | null | x_com/evacuation_priorities.py | SashaNullptr/X-COM-UFO-Defense | 9ae394c9c593428d072b51a5e1cedad47876a452 | [
"MIT"
] | null | null | null | import psycopg2
from x_com import config
def evac_priorties():
sightings_by_city_query = "SELECT city, COUNT(*) FROM ufo_data WHERE country LIKE \'us\' GROUP BY city ORDER BY COUNT(*) DESC;"
sightings = []
try:
params = config()
conn = psycopg2.connect(**params)
cur = conn.cursor()
cur.execute(sightings_by_city_query)
def process_result():
result = cur.fetchone()
entry = {"city":result[0],"count":result[1]}
return entry
sightings = [ process_result() for _ in range(10) ]
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return {"sightings":sightings} | 24.40625 | 131 | 0.597951 |
45482879576e9c88a0eabe1c0d9bdeabd68f4f53 | 5,780 | py | Python | examples/test_slack.py | q0w/snug | a9de335b48d96190a2bfe5e606830c4a60cb5705 | [
"MIT"
] | 123 | 2018-01-23T17:29:29.000Z | 2022-02-11T06:57:57.000Z | examples/test_slack.py | q0w/snug | a9de335b48d96190a2bfe5e606830c4a60cb5705 | [
"MIT"
] | 274 | 2018-01-25T07:17:55.000Z | 2022-01-20T07:37:10.000Z | examples/test_slack.py | q0w/snug | a9de335b48d96190a2bfe5e606830c4a60cb5705 | [
"MIT"
] | 5 | 2017-11-26T21:31:12.000Z | 2021-11-28T10:19:57.000Z | import json
from pathlib import Path
import aiohttp
import pytest
import slack
import snug
from gentools import sendreturn
live = pytest.config.getoption('--live')
CRED_PATH = Path('~/.snug/slack.json').expanduser()
token = CRED_PATH.read_text().strip()
@pytest.fixture(scope='module')
async def exec():
async with aiohttp.ClientSession() as client:
yield snug.async_executor(auth=slack.token_auth(token),
client=client)
@pytest.mark.asyncio
async def test_channel_list(exec):
lookup = slack.channels.list_(exclude_archived=True)
if live:
result = await exec(lookup)
assert isinstance(result.content[0], slack.Channel)
query = iter(lookup)
req = next(query)
assert req.url.endswith('channels.list')
assert req.params['exclude_archived'] == 'true'
outcome = sendreturn(query, snug.Response(200, CHANNEL_LIST_RESULT))
assert isinstance(outcome[0], slack.Channel)
assert len(outcome[0].members) == 2
assert outcome.next_query.cursor == "dGVhbTpDMUg5UkVTR0w="
@pytest.mark.asyncio
async def test_channel_create(exec):
create = slack.channels.create('test channel')
query = iter(create)
req = next(query)
assert req.method == 'POST'
assert req.url.endswith('channels.create')
assert req.headers['Content-Type'] == 'application/json'
assert json.loads(req.content) == {
'name': 'test channel'
}
channel = sendreturn(query, snug.Response(200, CREATE_CHANNEL_RESPONSE))
assert isinstance(channel, slack.Channel)
assert channel.id == 'C0DEL09A5'
@pytest.mark.asyncio
async def test_post_chat_message(exec):
post = slack.chat.post_message('#python', 'test message')
query = iter(post)
req = next(query)
assert req.method == 'POST'
assert req.url.endswith('chat.postMessage')
assert req.headers['Content-Type'] == 'application/json'
assert json.loads(req.content) == {
'channel': '#python',
'text': 'test message'
}
msg = sendreturn(query, snug.Response(200, POST_MESSAGE_RESPONSE))
assert isinstance(msg, slack.Message)
assert msg.text == 'Here\'s a message for you'
CHANNEL_LIST_RESULT = b'''\
{
"ok": true,
"channels": [
{
"id": "C0G9QF9GW",
"name": "random",
"is_channel": true,
"created": 1449709280,
"creator": "U0G9QF9C6",
"is_archived": false,
"is_general": false,
"name_normalized": "random",
"is_shared": false,
"is_org_shared": false,
"is_member": true,
"is_private": false,
"is_mpim": false,
"members": [
"U0G9QF9C6",
"U0G9WFXNZ"
],
"topic": {
"value": "Other stuff",
"creator": "U0G9QF9C6",
"last_set": 1449709352
},
"purpose": {
"value": "A place for non-work-related flimflam, faffing, \
hodge-podge or jibber-jabber you'd prefer to keep out of more focused \
work-related channels.",
"creator": "",
"last_set": 0
},
"previous_names": [],
"num_members": 2
},
{
"id": "C0G9QKBBL",
"name": "general",
"is_channel": true,
"created": 1449709280,
"creator": "U0G9QF9C6",
"is_archived": false,
"is_general": true,
"name_normalized": "general",
"is_shared": false,
"is_org_shared": false,
"is_member": true,
"is_private": false,
"is_mpim": false,
"members": [
"U0G9QF9C6",
"U0G9WFXNZ"
],
"topic": {
"value": "Talk about anything!",
"creator": "U0G9QF9C6",
"last_set": 1449709364
},
"purpose": {
"value": "To talk about anything!",
"creator": "U0G9QF9C6",
"last_set": 1449709334
},
"previous_names": [],
"num_members": 2
}
],
"response_metadata": {
"next_cursor": "dGVhbTpDMUg5UkVTR0w="
}
}
'''
CREATE_CHANNEL_RESPONSE = b'''\
{
"ok": true,
"channel": {
"id": "C0DEL09A5",
"name": "endeavor",
"is_channel": true,
"created": 1502833204,
"creator": "U061F7AUR",
"is_archived": false,
"is_general": false,
"name_normalized": "endeavor",
"is_shared": false,
"is_org_shared": false,
"is_member": true,
"is_private": false,
"is_mpim": false,
"last_read": "0000000000.000000",
"latest": null,
"unread_count": 0,
"unread_count_display": 0,
"members": [
"U061F7AUR"
],
"topic": {
"value": "",
"creator": "",
"last_set": 0
},
"purpose": {
"value": "",
"creator": "",
"last_set": 0
},
"previous_names": []
}
}
'''
POST_MESSAGE_RESPONSE = b'''\
{
"ok": true,
"channel": "C1H9RESGL",
"ts": "1503435956.000247",
"message": {
"text": "Here's a message for you",
"username": "ecto1",
"bot_id": "B19LU7CSY",
"attachments": [
{
"text": "This is an attachment",
"id": 1,
"fallback": "This is an attachment's fallback"
}
],
"type": "message",
"subtype": "bot_message",
"ts": "1503435956.000247"
}
}
'''
| 26.759259 | 76 | 0.520761 |
ef92e34df321c4752b1147b9c0374a0ab069b226 | 26,874 | py | Python | setup.py | zclimes/airflow | 2fb68342b01da4cb5d79ac9e5c0f7687d74351f3 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | setup.py | zclimes/airflow | 2fb68342b01da4cb5d79ac9e5c0f7687d74351f3 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | setup.py | zclimes/airflow | 2fb68342b01da4cb5d79ac9e5c0f7687d74351f3 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Setup.py for the Airflow project."""
import logging
import os
import subprocess
import unittest
from os.path import dirname
from textwrap import wrap
from typing import Dict, List, Set, Tuple
from setuptools import Command, Distribution, find_namespace_packages, setup
logger = logging.getLogger(__name__)
version = '2.1.0.dev0'
my_dir = dirname(__file__)
def airflow_test_suite():
"""Test suite for Airflow tests"""
test_loader = unittest.TestLoader()
test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')
return test_suite
class CleanCommand(Command):
"""
Command to tidy up the project root.
Registered as cmdclass in setup() so it can be called with ``python setup.py extra_clean``.
"""
description = "Tidy up the project root"
user_options = [] # type: List[str]
def initialize_options(self):
"""Set default values for options."""
def finalize_options(self):
"""Set final values for options."""
def run(self): # noqa
"""Run command to remove temporary files and directories."""
os.chdir(my_dir)
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
class CompileAssets(Command):
"""
Compile and build the frontend assets using yarn and webpack.
Registered as cmdclass in setup() so it can be called with ``python setup.py compile_assets``.
"""
description = "Compile and build the frontend assets"
user_options = [] # type: List[str]
def initialize_options(self):
"""Set default values for options."""
def finalize_options(self):
"""Set final values for options."""
def run(self): # noqa
"""Run a command to compile and build assets."""
subprocess.check_call('./airflow/www/compile_assets.sh')
class ListExtras(Command):
"""
List all available extras
Registered as cmdclass in setup() so it can be called with ``python setup.py list_extras``.
"""
description = "List available extras"
user_options = [] # type: List[str]
def initialize_options(self):
"""Set default values for options."""
def finalize_options(self):
"""Set final values for options."""
def run(self): # noqa
"""List extras."""
print("\n".join(wrap(", ".join(EXTRAS_REQUIREMENTS.keys()), 100)))
def git_version(version_: str) -> str:
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted
changes are present.
:param str version_: Semver version
:return: Found Airflow version in Git repo
:rtype: str
"""
try:
import git
try:
repo = git.Repo(os.path.join(*[my_dir, '.git']))
except git.NoSuchPathError:
logger.warning('.git directory not found: Cannot compute the git version')
return ''
except git.InvalidGitRepositoryError:
logger.warning('Invalid .git directory not found: Cannot compute the git version')
return ''
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return f'.dev0+{sha}.dirty'
# commit is clean
return f'.release:{version_}+{sha}'
else:
return 'no_git_version'
def write_version(filename: str = os.path.join(*[my_dir, "airflow", "git_version"])):
"""
Write the Semver version + git hash to file, e.g. ".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65".
:param str filename: Destination file to write
"""
text = "{}".format(git_version(version))
with open(filename, 'w') as file:
file.write(text)
if os.environ.get('USE_THEME_FROM_GIT'):
_SPHINX_AIRFLOW_THEME_URL = (
"@ https://github.com/apache/airflow-site/releases/download/0.0.4/"
"sphinx_airflow_theme-0.0.4-py3-none-any.whl"
)
else:
_SPHINX_AIRFLOW_THEME_URL = ''
# 'Start dependencies group' and 'Start dependencies group' are mark for ./scripts/ci/check_order_setup.py
# If you change this mark you should also change ./scripts/ci/check_order_setup.py
# Start dependencies group
amazon = [
'boto3>=1.15.0,<1.16.0',
'botocore>=1.18.0,<1.19.0',
'watchtower~=0.7.3',
]
apache_beam = [
'apache-beam[gcp]',
]
async_packages = [
'eventlet>= 0.9.7',
'gevent>=0.13',
'greenlet>=0.4.9',
]
atlas = [
'atlasclient>=0.1.2',
]
azure = [
'azure-batch>=8.0.0',
'azure-cosmos>=3.0.1,<4',
'azure-datalake-store>=0.0.45',
'azure-identity>=1.3.1',
'azure-keyvault>=4.1.0',
'azure-kusto-data>=0.0.43,<0.1',
'azure-mgmt-containerinstance>=1.5.0,<2.0',
'azure-mgmt-datalake-store>=0.5.0',
'azure-mgmt-resource>=2.2.0',
'azure-storage>=0.34.0, <0.37.0',
]
cassandra = [
'cassandra-driver>=3.13.0,<3.21.0',
]
celery = [
'celery~=4.4.2',
'flower>=0.7.3, <1.0',
'vine~=1.3', # https://stackoverflow.com/questions/32757259/celery-no-module-named-five
]
cgroups = [
'cgroupspy>=0.1.4',
]
cloudant = [
'cloudant>=2.0',
]
dask = ['cloudpickle>=1.4.1, <1.5.0', 'distributed>=2.11.1, <2.20']
databricks = [
'requests>=2.20.0, <3',
]
datadog = [
'datadog>=0.14.0',
]
doc = [
'sphinx>=2.1.2',
f'sphinx-airflow-theme{_SPHINX_AIRFLOW_THEME_URL}',
'sphinx-argparse>=0.1.13',
'sphinx-autoapi==1.0.0',
'sphinx-copybutton',
'sphinx-jinja~=1.1',
'sphinx-rtd-theme>=0.1.6',
'sphinxcontrib-httpdomain>=1.7.0',
'sphinxcontrib-redoc>=1.6.0',
'sphinxcontrib-spelling==5.2.1',
]
docker = [
'docker~=3.0',
]
druid = [
'pydruid>=0.4.1',
]
elasticsearch = [
'elasticsearch>7, <7.6.0',
'elasticsearch-dbapi==0.1.0',
'elasticsearch-dsl>=5.0.0',
]
exasol = [
'pyexasol>=0.5.1,<1.0.0',
]
facebook = [
'facebook-business>=6.0.2',
]
flask_oauth = [
'Flask-OAuthlib>=0.9.1,<0.9.6', # Flask OAuthLib 0.9.6 requires Flask-Login 0.5.0 - breaks FAB
'oauthlib!=2.0.3,!=2.0.4,!=2.0.5,<3.0.0,>=1.1.2',
'requests-oauthlib<1.2.0',
]
google = [
'PyOpenSSL',
'google-ads>=4.0.0,<8.0.0',
'google-api-python-client>=1.6.0,<2.0.0',
'google-auth>=1.0.0,<2.0.0',
'google-auth-httplib2>=0.0.1',
'google-cloud-automl>=0.4.0,<2.0.0',
'google-cloud-bigquery-datatransfer>=3.0.0,<4.0.0',
'google-cloud-bigtable>=1.0.0,<2.0.0',
'google-cloud-container>=0.1.1,<2.0.0',
'google-cloud-datacatalog>=1.0.0,<2.0.0',
'google-cloud-dataproc>=1.0.1,<2.0.0',
'google-cloud-dlp>=0.11.0,<2.0.0',
'google-cloud-kms>=2.0.0,<3.0.0',
'google-cloud-language>=1.1.1,<2.0.0',
'google-cloud-logging>=1.14.0,<2.0.0',
'google-cloud-memcache>=0.2.0',
'google-cloud-monitoring>=0.34.0,<2.0.0',
'google-cloud-os-login>=2.0.0,<3.0.0',
'google-cloud-pubsub>=2.0.0,<3.0.0',
'google-cloud-redis>=2.0.0,<3.0.0',
'google-cloud-secret-manager>=0.2.0,<2.0.0',
'google-cloud-spanner>=1.10.0,<2.0.0',
'google-cloud-speech>=0.36.3,<2.0.0',
'google-cloud-storage>=1.30,<2.0.0',
'google-cloud-tasks>=1.2.1,<2.0.0',
'google-cloud-texttospeech>=0.4.0,<2.0.0',
'google-cloud-translate>=1.5.0,<2.0.0',
'google-cloud-videointelligence>=1.7.0,<2.0.0',
'google-cloud-vision>=0.35.2,<2.0.0',
'grpcio-gcp>=0.2.2',
'json-merge-patch~=0.2',
'pandas-gbq',
]
grpc = [
'google-auth>=1.0.0, <2.0.0dev',
'google-auth-httplib2>=0.0.1',
'grpcio>=1.15.0',
]
hashicorp = [
'hvac~=0.10',
]
hdfs = [
'snakebite-py3',
]
hive = [
'hmsclient>=0.1.0',
'pyhive[hive]>=0.6.0',
'thrift>=0.9.2',
]
jdbc = [
'jaydebeapi>=1.1.1',
]
jenkins = [
'python-jenkins>=1.0.0',
]
jira = [
'JIRA>1.0.7',
]
kerberos = [
'pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
]
kubernetes = [
'cryptography>=2.0.0',
'kubernetes>=3.0.0, <12.0.0',
]
kylin = ['kylinpy>=2.6']
ldap = [
'ldap3>=2.5.1',
'python-ldap',
]
mongo = [
'dnspython>=1.13.0,<2.0.0',
'pymongo>=3.6.0',
]
mssql = [
'pymssql~=2.1,>=2.1.5',
]
mysql = [
'mysql-connector-python>=8.0.11, <=8.0.22',
'mysqlclient>=1.3.6,<1.4',
]
odbc = [
'pyodbc',
]
oracle = [
'cx_Oracle>=5.1.2',
]
pagerduty = [
'pdpyras>=4.1.2,<5',
]
papermill = [
'nteract-scrapbook[all]>=0.3.1',
'papermill[all]>=1.2.1',
]
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
pinot = [
# pinotdb v0.1.1 may still work with older versions of Apache Pinot, but we've confirmed that it
# causes a problem with newer versions.
'pinotdb>0.1.2,<1.0.0',
]
plexus = [
'arrow>=0.16.0',
]
postgres = [
'psycopg2-binary>=2.7.4',
]
presto = ['presto-python-client>=0.7.0,<0.8']
qubole = [
'qds-sdk>=1.10.4',
]
rabbitmq = [
'amqp<5.0.0',
]
redis = [
'redis~=3.2',
]
salesforce = [
'simple-salesforce>=1.0.0',
]
samba = [
'pysmbclient>=0.1.3',
]
segment = [
'analytics-python>=1.2.9',
]
sendgrid = [
'sendgrid>=6.0.0,<7',
]
sentry = [
'blinker>=1.1',
'sentry-sdk>=0.8.0',
]
singularity = ['spython>=0.0.56']
slack = [
'slackclient>=2.0.0,<3.0.0',
]
snowflake = [
# The `azure` provider uses legacy `azure-storage` library, where `snowflake` uses the
# newer and more stable versions of those libraries. Most of `azure` operators and hooks work
# fine together with `snowflake` because the deprecated library does not overlap with the
# new libraries except the `blob` classes. So while `azure` works fine for most cases
# blob is the only exception
# Solution to that is being worked on in https://github.com/apache/airflow/pull/12188
# once it is merged, we can move those two back to `azure` extra.
'azure-storage-blob',
'azure-storage-common',
# snowflake is not compatible with latest version.
# This library monkey patches the requests library, so SSL is broken globally.
# See: https://github.com/snowflakedb/snowflake-connector-python/issues/324
'requests<2.24.0',
# Newest version drop support for old version of azure-storage-blob
# Until #12188 is solved at least we need to limit maximum version.
# https://github.com/apache/airflow/pull/12188
'snowflake-connector-python>=1.5.2,<=2.3.6',
'snowflake-sqlalchemy>=1.1.0',
]
spark = [
'pyspark',
]
ssh = [
'paramiko>=2.6.0',
'pysftp>=0.2.9',
'sshtunnel>=0.1.4,<0.2',
]
statsd = [
'statsd>=3.3.0, <4.0',
]
tableau = [
'tableauserverclient~=0.12',
]
telegram = [
'python-telegram-bot==13.0',
]
vertica = [
'vertica-python>=0.5.1',
]
virtualenv = [
'virtualenv',
]
webhdfs = [
'hdfs[avro,dataframe,kerberos]>=2.0.4',
]
winrm = [
'pywinrm~=0.4',
]
yandex = [
'yandexcloud>=0.22.0',
]
zendesk = [
'zdesk',
]
# End dependencies group
############################################################################################################
# IMPORTANT NOTE!!!!!!!!!!!!!!!
# IF you are removing dependencies from this list, please make sure that you also increase
# DEPENDENCIES_EPOCH_NUMBER in the Dockerfile.ci
############################################################################################################
devel = [
'beautifulsoup4~=4.7.1',
'black',
'blinker',
'bowler',
'click~=7.1',
'coverage',
'docutils',
'flake8>=3.6.0',
'flake8-colors',
'flaky',
'freezegun',
'github3.py',
'gitpython',
'importlib-resources~=1.4',
'ipdb',
'jira',
'jsonpath-ng',
# HACK: Moto is not compatible with newer versions
# See: https://github.com/spulec/moto/issues/3535
'mock<4.0.3',
'mongomock',
'moto',
'mypy==0.770',
'parameterized',
'paramiko',
'pipdeptree',
'pre-commit',
'pylint==2.6.0',
'pysftp',
'pytest',
'pytest-cov',
'pytest-instafail',
'pytest-rerunfailures',
'pytest-timeouts',
'pytest-xdist',
'pywinrm',
'qds-sdk>=1.9.6',
'requests_mock',
'testfixtures',
'wheel',
'yamllint',
]
############################################################################################################
# IMPORTANT NOTE!!!!!!!!!!!!!!!
# If you are removing dependencies from the above list, please make sure that you also increase
# DEPENDENCIES_EPOCH_NUMBER in the Dockerfile.ci
############################################################################################################
devel_minreq = cgroups + devel + doc + kubernetes + mysql + password
devel_hadoop = devel_minreq + hdfs + hive + kerberos + presto + webhdfs
############################################################################################################
# IMPORTANT NOTE!!!!!!!!!!!!!!!
# If you have a 'pip check' problem with dependencies, it might be because some dependency has been
# installed via 'install_requires' in setup.cfg in higher version than required in one of the options below.
# For example pip check was failing with requests=2.25.1 installed even if in some dependencies below
# < 2.24.0 was specified for it. Solution in such case is to add such limiting requirement to
# install_requires in setup.cfg (we've added requests<2.24.0 there to limit requests library).
# This should be done with appropriate comment explaining why the requirement was added.
############################################################################################################
# Dict of all providers which are part of the Apache Airflow repository together with their requirements
PROVIDERS_REQUIREMENTS: Dict[str, List[str]] = {
'amazon': amazon,
'apache.cassandra': cassandra,
'apache.druid': druid,
'apache.hdfs': hdfs,
'apache.hive': hive,
'apache.kylin': kylin,
'apache.livy': [],
'apache.pig': [],
'apache.pinot': pinot,
'apache.spark': spark,
'apache.sqoop': [],
'celery': celery,
'cloudant': cloudant,
'cncf.kubernetes': kubernetes,
'databricks': databricks,
'datadog': datadog,
'dingding': [],
'discord': [],
'docker': docker,
'elasticsearch': elasticsearch,
'exasol': exasol,
'facebook': facebook,
'ftp': [],
'google': google,
'grpc': grpc,
'hashicorp': hashicorp,
'http': [],
'imap': [],
'jdbc': jdbc,
'jenkins': jenkins,
'jira': jira,
'microsoft.azure': azure,
'microsoft.mssql': mssql,
'microsoft.winrm': winrm,
'mongo': mongo,
'mysql': mysql,
'odbc': odbc,
'openfaas': [],
'opsgenie': [],
'oracle': oracle,
'pagerduty': pagerduty,
'papermill': papermill,
'plexus': plexus,
'postgres': postgres,
'presto': presto,
'qubole': qubole,
'redis': redis,
'salesforce': salesforce,
'samba': samba,
'segment': segment,
'sendgrid': sendgrid,
'sftp': ssh,
'singularity': singularity,
'slack': slack,
'snowflake': snowflake,
'sqlite': [],
'ssh': ssh,
'telegram': telegram,
'vertica': vertica,
'yandex': yandex,
'zendesk': zendesk,
}
# Those are all extras which do not have own 'providers'
EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {
'apache.atlas': atlas,
'apache.beam': apache_beam,
'apache.webhdfs': webhdfs,
'async': async_packages,
'cgroups': cgroups,
'dask': dask,
'github_enterprise': flask_oauth,
'google_auth': flask_oauth,
'kerberos': kerberos,
'ldap': ldap,
'password': password,
'rabbitmq': rabbitmq,
'sentry': sentry,
'statsd': statsd,
'tableau': tableau,
'virtualenv': virtualenv,
}
# Add extras for all providers. For all providers the extras name = providers name
for provider_name, provider_requirement in PROVIDERS_REQUIREMENTS.items():
EXTRAS_REQUIREMENTS[provider_name] = provider_requirement
#############################################################################################################
# The whole section can be removed in Airflow 3.0 as those old aliases are deprecated in 2.* series
#############################################################################################################
# Dictionary of aliases from 1.10 - deprecated in Airflow 2.*
EXTRAS_DEPRECATED_ALIASES: Dict[str, str] = {
'atlas': 'apache.atlas',
'aws': 'amazon',
'azure': 'microsoft.azure',
'cassandra': 'apache.cassandra',
'crypto': '', # All crypto requirements are installation requirements of core Airflow
'druid': 'apache.druid',
'gcp': 'google',
'gcp_api': 'google',
'hdfs': 'apache.hdfs',
'hive': 'apache.hive',
'kubernetes': 'cncf.kubernetes',
'mssql': 'microsoft.mssql',
'pinot': 'apache.pinot',
'qds': 'qubole',
's3': 'amazon',
'spark': 'apache.spark',
'webhdfs': 'apache.webhdfs',
'winrm': 'microsoft.winrm',
}
def find_requirements_for_alias(alias_to_look_for: Tuple[str, str]) -> List[str]:
"""Finds requirements for an alias"""
deprecated_extra = alias_to_look_for[0]
new_extra = alias_to_look_for[1]
if new_extra == '': # Handle case for crypto
return []
try:
return EXTRAS_REQUIREMENTS[new_extra]
except KeyError: # noqa
raise Exception(f"The extra {new_extra} is missing for alias {deprecated_extra}")
# Add extras for all deprecated aliases. Requirements for those deprecated aliases are the same
# as the extras they are replaced with
for alias, extra in EXTRAS_DEPRECATED_ALIASES.items():
requirements = EXTRAS_REQUIREMENTS.get(extra) if extra != '' else []
if requirements is None:
raise Exception(f"The extra {extra} is missing for deprecated alias {alias}")
# Note the requirements are not copies - those are the same lists as for the new extras. This is intended.
# Thanks to that if the original extras are later extended with providers, aliases are extended as well.
EXTRAS_REQUIREMENTS[alias] = requirements
#############################################################################################################
# End of deprecated section
#############################################################################################################
# This is list of all providers. It's a shortcut for anyone who would like to easily get list of
# All providers. It is used by pre-commits.
ALL_PROVIDERS = list(PROVIDERS_REQUIREMENTS.keys())
ALL_DB_PROVIDERS = [
'apache.cassandra',
'apache.druid',
'apache.hdfs',
'apache.hive',
'apache.pinot',
'cloudant',
'exasol',
'microsoft.mssql',
'mongo',
'mysql',
'postgres',
'presto',
'vertica',
]
# Special requirements for all database-related providers. They are de-duplicated.
all_dbs = list({req for db_provider in ALL_DB_PROVIDERS for req in PROVIDERS_REQUIREMENTS[db_provider]})
# Requirements for all "user" extras (no devel). They are de-duplicated. Note that we do not need
# to separately add providers requirements - they have been already added as 'providers' extras above
_all_requirements = list({req for extras_reqs in EXTRAS_REQUIREMENTS.values() for req in extras_reqs})
# All user extras here
EXTRAS_REQUIREMENTS["all"] = _all_requirements
# All db user extras here
EXTRAS_REQUIREMENTS["all_dbs"] = all_dbs
# This can be simplified to devel_hadoop + _all_requirements due to inclusions
# but we keep it for explicit sake. We are de-duplicating it anyway.
devel_all = list(set(_all_requirements + doc + devel_minreq + devel_hadoop))
# Those are packages excluded for "all" dependencies
PACKAGES_EXCLUDED_FOR_ALL = []
PACKAGES_EXCLUDED_FOR_ALL.extend(
[
'snakebite',
]
)
# Those packages are excluded because they break tests (downgrading mock) and they are
# not needed to run our test suite. This can be removed as soon as we get non-conflicting
# requirements for the apache-beam as well. This waits for azure + snowflake fixes:
#
# * Azure: https://github.com/apache/airflow/issues/11968
# * Snowflake: https://github.com/apache/airflow/issues/12881
#
PACKAGES_EXCLUDED_FOR_CI = [
'apache-beam',
]
def is_package_excluded(package: str, exclusion_list: List[str]):
"""
Checks if package should be excluded.
:param package: package name (beginning of it)
:param exclusion_list: list of excluded packages
:return: true if package should be excluded
"""
return any(package.startswith(excluded_package) for excluded_package in exclusion_list)
devel_all = [
package
for package in devel_all
if not is_package_excluded(package=package, exclusion_list=PACKAGES_EXCLUDED_FOR_ALL)
]
devel_ci = [
package
for package in devel_all
if not is_package_excluded(
package=package, exclusion_list=PACKAGES_EXCLUDED_FOR_CI + PACKAGES_EXCLUDED_FOR_ALL
)
]
# Those are extras that we have to add for development purposes
# They can be use to install some predefined set of dependencies.
EXTRAS_REQUIREMENTS["doc"] = doc
EXTRAS_REQUIREMENTS["devel"] = devel_minreq # devel_minreq already includes doc
EXTRAS_REQUIREMENTS["devel_hadoop"] = devel_hadoop # devel_hadoop already includes devel_minreq
EXTRAS_REQUIREMENTS["devel_all"] = devel_all
EXTRAS_REQUIREMENTS["devel_ci"] = devel_ci
# For Python 3.6+ the dictionary order remains when keys() are retrieved.
# Sort both: extras and list of dependencies to make it easier to analyse problems
# external packages will be first, then if providers are added they are added at the end of the lists.
EXTRAS_REQUIREMENTS = dict(sorted(EXTRAS_REQUIREMENTS.items())) # noqa
for extra_list in EXTRAS_REQUIREMENTS.values():
extra_list.sort()
# A set that keeps all extras that install some providers.
# It is used by pre-commit that verifies if documentation in docs/apache-airflow/extra-packages-ref.rst
# are synchronized.
EXTRAS_WITH_PROVIDERS: Set[str] = set()
# Those providers are pre-installed always when airflow is installed.
# Those providers do not have dependency on airflow2.0 because that would lead to circular dependencies.
# This is not a problem for PIP but some tools (pipdeptree) show that as a warning.
PREINSTALLED_PROVIDERS = [
'ftp',
'http',
'imap',
'sqlite',
]
def get_provider_package_from_package_id(package_id: str):
"""
Builds the name of provider package out of the package id provided/
:param package_id: id of the package (like amazon or microsoft.azure)
:return: full name of package in PyPI
"""
package_suffix = package_id.replace(".", "-")
return f"apache-airflow-providers-{package_suffix}"
class AirflowDistribution(Distribution):
"""setuptools.Distribution subclass with Airflow specific behaviour"""
# https://github.com/PyCQA/pylint/issues/3737
def parse_config_files(self, *args, **kwargs): # pylint: disable=signature-differs
"""
Ensure that when we have been asked to install providers from sources
that we don't *also* try to install those providers from PyPI
"""
super().parse_config_files(*args, **kwargs)
if os.getenv('INSTALL_PROVIDERS_FROM_SOURCES') == 'true':
self.install_requires = [ # noqa pylint: disable=attribute-defined-outside-init
req for req in self.install_requires if not req.startswith('apache-airflow-providers-')
]
else:
self.install_requires.extend(
[get_provider_package_from_package_id(package_id) for package_id in PREINSTALLED_PROVIDERS]
)
def add_provider_packages_to_requirements(extra_with_providers: str, providers: List[str]):
"""
Adds provider packages to requirements
:param extra_with_providers: Name of the extra to add providers to
:param providers: list of provider names
"""
EXTRAS_WITH_PROVIDERS.add(extra_with_providers)
EXTRAS_REQUIREMENTS[extra_with_providers].extend(
[get_provider_package_from_package_id(package_name) for package_name in providers]
)
def add_all_provider_packages():
"""
In case of regular installation (when INSTALL_PROVIDERS_FROM_SOURCES is false), we should
add extra dependencies to Airflow - to get the providers automatically installed when
those extras are installed.
"""
for provider in ALL_PROVIDERS:
add_provider_packages_to_requirements(provider, [provider])
add_provider_packages_to_requirements("all", ALL_PROVIDERS)
add_provider_packages_to_requirements("devel_ci", ALL_PROVIDERS)
add_provider_packages_to_requirements("devel_all", ALL_PROVIDERS)
add_provider_packages_to_requirements("all_dbs", ALL_DB_PROVIDERS)
add_provider_packages_to_requirements("devel_hadoop", ["apache.hdfs", "apache.hive", "presto"])
def do_setup():
"""Perform the Airflow package setup."""
setup_kwargs = {}
if os.getenv('INSTALL_PROVIDERS_FROM_SOURCES') == 'true':
# Only specify this if we need this option, otherwise let default from
# setup.cfg control this (kwargs in setup() call take priority)
setup_kwargs['packages'] = find_namespace_packages(include=['airflow*'])
else:
add_all_provider_packages()
write_version()
setup(
distclass=AirflowDistribution,
# Most values come from setup.cfg -- see
# https://setuptools.readthedocs.io/en/latest/userguide/declarative_config.html
version=version,
extras_require=EXTRAS_REQUIREMENTS,
download_url=('https://archive.apache.org/dist/airflow/' + version),
cmdclass={
'extra_clean': CleanCommand,
'compile_assets': CompileAssets,
'list_extras': ListExtras,
},
test_suite='setup.airflow_test_suite',
**setup_kwargs,
)
if __name__ == "__main__":
do_setup()
| 31.176334 | 110 | 0.633214 |
2d14d8959f7d7cc52613f0ea50b222821d99a8bc | 14,300 | py | Python | pybind/nos/v7_1_0/rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/af_ipv6_uc_and_vrf_cmds_call_point_holder/redistribute/static/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/nos/v7_1_0/rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/af_ipv6_uc_and_vrf_cmds_call_point_holder/redistribute/static/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/nos/v7_1_0/rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/af_ipv6_uc_and_vrf_cmds_call_point_holder/redistribute/static/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class static(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-rbridge - based on the path /rbridge-id/router/router-bgp/address-family/ipv6/ipv6-unicast/af-ipv6-vrf/af-ipv6-uc-and-vrf-cmds-call-point-holder/redistribute/static. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__redistribute_static','__unicast_static_metric','__static_route_map',)
_yang_name = 'static'
_rest_name = 'static'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__unicast_static_metric = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4294967295']}), is_leaf=True, yang_name="unicast-static-metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Metric for redistributed routes', u'alt-name': u'metric'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='conn-metric', is_config=True)
self.__static_route_map = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="static-route-map", rest_name="route-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Route map reference', u'alt-name': u'route-map'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='rmap-type', is_config=True)
self.__redistribute_static = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="redistribute-static", rest_name="redistribute-static", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-run-template': u'$(.?$(../unicast-static-metric?\\r:$(../static-route-map?\\r:redistribute static\n)):\\r)', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'rbridge-id', u'router', u'router-bgp', u'address-family', u'ipv6', u'ipv6-unicast', u'af-ipv6-vrf', u'af-ipv6-uc-and-vrf-cmds-call-point-holder', u'redistribute', u'static']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'rbridge-id', u'router', u'bgp', u'address-family', u'ipv6', u'unicast', u'vrf', u'redistribute', u'static']
def _get_redistribute_static(self):
"""
Getter method for redistribute_static, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/af_ipv6_uc_and_vrf_cmds_call_point_holder/redistribute/static/redistribute_static (empty)
"""
return self.__redistribute_static
def _set_redistribute_static(self, v, load=False):
"""
Setter method for redistribute_static, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/af_ipv6_uc_and_vrf_cmds_call_point_holder/redistribute/static/redistribute_static (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_redistribute_static is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_redistribute_static() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="redistribute-static", rest_name="redistribute-static", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-run-template': u'$(.?$(../unicast-static-metric?\\r:$(../static-route-map?\\r:redistribute static\n)):\\r)', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """redistribute_static must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="redistribute-static", rest_name="redistribute-static", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-run-template': u'$(.?$(../unicast-static-metric?\\r:$(../static-route-map?\\r:redistribute static\n)):\\r)', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)""",
})
self.__redistribute_static = t
if hasattr(self, '_set'):
self._set()
def _unset_redistribute_static(self):
self.__redistribute_static = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="redistribute-static", rest_name="redistribute-static", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-run-template': u'$(.?$(../unicast-static-metric?\\r:$(../static-route-map?\\r:redistribute static\n)):\\r)', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
def _get_unicast_static_metric(self):
"""
Getter method for unicast_static_metric, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/af_ipv6_uc_and_vrf_cmds_call_point_holder/redistribute/static/unicast_static_metric (conn-metric)
"""
return self.__unicast_static_metric
def _set_unicast_static_metric(self, v, load=False):
"""
Setter method for unicast_static_metric, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/af_ipv6_uc_and_vrf_cmds_call_point_holder/redistribute/static/unicast_static_metric (conn-metric)
If this variable is read-only (config: false) in the
source YANG file, then _set_unicast_static_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_unicast_static_metric() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4294967295']}), is_leaf=True, yang_name="unicast-static-metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Metric for redistributed routes', u'alt-name': u'metric'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='conn-metric', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """unicast_static_metric must be of a type compatible with conn-metric""",
'defined-type': "brocade-bgp:conn-metric",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4294967295']}), is_leaf=True, yang_name="unicast-static-metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Metric for redistributed routes', u'alt-name': u'metric'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='conn-metric', is_config=True)""",
})
self.__unicast_static_metric = t
if hasattr(self, '_set'):
self._set()
def _unset_unicast_static_metric(self):
self.__unicast_static_metric = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4294967295']}), is_leaf=True, yang_name="unicast-static-metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Metric for redistributed routes', u'alt-name': u'metric'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='conn-metric', is_config=True)
def _get_static_route_map(self):
"""
Getter method for static_route_map, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/af_ipv6_uc_and_vrf_cmds_call_point_holder/redistribute/static/static_route_map (rmap-type)
"""
return self.__static_route_map
def _set_static_route_map(self, v, load=False):
"""
Setter method for static_route_map, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/af_ipv6_uc_and_vrf_cmds_call_point_holder/redistribute/static/static_route_map (rmap-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_static_route_map is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_static_route_map() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="static-route-map", rest_name="route-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Route map reference', u'alt-name': u'route-map'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='rmap-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """static_route_map must be of a type compatible with rmap-type""",
'defined-type': "brocade-bgp:rmap-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="static-route-map", rest_name="route-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Route map reference', u'alt-name': u'route-map'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='rmap-type', is_config=True)""",
})
self.__static_route_map = t
if hasattr(self, '_set'):
self._set()
def _unset_static_route_map(self):
self.__static_route_map = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="static-route-map", rest_name="route-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Route map reference', u'alt-name': u'route-map'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='rmap-type', is_config=True)
redistribute_static = __builtin__.property(_get_redistribute_static, _set_redistribute_static)
unicast_static_metric = __builtin__.property(_get_unicast_static_metric, _set_unicast_static_metric)
static_route_map = __builtin__.property(_get_static_route_map, _set_static_route_map)
_pyangbind_elements = {'redistribute_static': redistribute_static, 'unicast_static_metric': unicast_static_metric, 'static_route_map': static_route_map, }
| 74.093264 | 637 | 0.741049 |
4c7dfe2d666362ce27671963362fdca70f2e3bd4 | 2,499 | py | Python | conversations/callbacks/root_handler_callbacks.py | dattatreya303/round_robin_tasker | 74b031322ef1ca59072486c5c7d0d98e59bc62c3 | [
"MIT"
] | null | null | null | conversations/callbacks/root_handler_callbacks.py | dattatreya303/round_robin_tasker | 74b031322ef1ca59072486c5c7d0d98e59bc62c3 | [
"MIT"
] | 4 | 2020-08-03T15:50:48.000Z | 2020-11-01T06:05:38.000Z | conversations/callbacks/root_handler_callbacks.py | dattatreya303/round_robin_tasker | 74b031322ef1ca59072486c5c7d0d98e59bc62c3 | [
"MIT"
] | null | null | null | from typing import Dict
from telegram import Update, MessageEntity
from telegram.ext import CallbackContext, Handler
from Constants import logger
from conversations.commands import MainCommands
from conversations.handlers import ADD_TASK_CONVERSATION_HANDLER, CHECK_TASK_CONVERSATION_HANDLER, \
LIST_TASKS_CONVERSATION_HANDLER, DELETE_TASK_CONVERSATION_HANDLER
from conversations.handlers.common import ROOT_CANCEL_HANDLER, HELP_HANDLER, INVALID_COMMAND_HANDLER
from conversations.handlers.start import START_CONVERSATION_HANDLER
from entities.ChatData import ChatData
switcher_v2: Dict[str, Handler] = {
MainCommands.START.value: START_CONVERSATION_HANDLER,
MainCommands.ADD_TASK.value: ADD_TASK_CONVERSATION_HANDLER,
MainCommands.CHECK_TASK.value: CHECK_TASK_CONVERSATION_HANDLER,
MainCommands.LIST_TASKS.value: LIST_TASKS_CONVERSATION_HANDLER,
MainCommands.DELETE_TASK.value: DELETE_TASK_CONVERSATION_HANDLER,
MainCommands.INVALID_COMMAND.value : INVALID_COMMAND_HANDLER,
MainCommands.HELP.value: HELP_HANDLER,
MainCommands.CANCEL.value: ROOT_CANCEL_HANDLER,
}
def root_router_v2(update: Update, context: CallbackContext):
chat_id = update.effective_chat.id
logger.info("[root_router] Entered conv again {}".format(chat_id))
command = update.message.text.split('@')[0].lstrip('/')
logger.info('[root_router] command: {}'.format(command))
handler = find_handler(command, chat_id, context.chat_data)
check = handler.check_update(update)
logger.info('[root_router] check: {}'.format(check))
if check is None or check is False:
handler = INVALID_COMMAND_HANDLER
check = handler.check_update(update)
logger.error('[root_router] check is false or none')
handler.handle_update(update, context.dispatcher, check, context)
def find_handler(command: str, chat_id: int, chat_data: ChatData):
if chat_id in chat_data:
chat_data: ChatData = chat_data[chat_id]
ongoing_conv: MainCommands = chat_data.ongoing_conversation
if ongoing_conv is not None:
logger.info('[find_handler] ongoing_conv: {}'.format(ongoing_conv.value))
return find_in_command_switcher(ongoing_conv.value)
logger.info('[find_handler] switcher key: {}'.format(command))
return find_in_command_switcher(command)
def find_in_command_switcher(command):
if command in switcher_v2:
return switcher_v2[command]
return switcher_v2[MainCommands.INVALID_COMMAND.value]
| 44.625 | 100 | 0.777911 |
258dccb3cc2a13f6837f6cf408790a8359537f99 | 293 | py | Python | crys3d/command_line/HKLviewer.py | mphancock/cctbx_project | ec8a239c5bcee9c9b2d1c6c95dc3fff2580bbb85 | [
"BSD-3-Clause-LBNL"
] | null | null | null | crys3d/command_line/HKLviewer.py | mphancock/cctbx_project | ec8a239c5bcee9c9b2d1c6c95dc3fff2580bbb85 | [
"BSD-3-Clause-LBNL"
] | 1 | 2020-05-26T17:46:17.000Z | 2020-05-26T17:55:19.000Z | crys3d/command_line/HKLviewer.py | mphancock/cctbx_project | ec8a239c5bcee9c9b2d1c6c95dc3fff2580bbb85 | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-03-26T12:52:30.000Z | 2021-03-26T12:52:30.000Z | # LIBTBX_SET_DISPATCHER_NAME phenix.HKLviewer
# LIBTBX_SET_DISPATCHER_NAME cctbx.HKLviewer
# LIBTBX_SET_DISPATCHER_NAME phasertng.HKLviewer
from __future__ import absolute_import, division, print_function
from crys3d.hklview import HKLviewer
if (__name__ == "__main__") :
HKLviewer.run()
| 26.636364 | 64 | 0.832765 |
cd8a1c4bccfe16876b10eaff24b086348f9968a6 | 143 | py | Python | tests/web_platform/css_flexbox_1/test_auto_margins.py | jonboland/colosseum | cbf974be54fd7f6fddbe7285704cfaf7a866c5c5 | [
"BSD-3-Clause"
] | 71 | 2015-04-13T09:44:14.000Z | 2019-03-24T01:03:02.000Z | tests/web_platform/css_flexbox_1/test_auto_margins.py | jonboland/colosseum | cbf974be54fd7f6fddbe7285704cfaf7a866c5c5 | [
"BSD-3-Clause"
] | 35 | 2019-05-06T15:26:09.000Z | 2022-03-28T06:30:33.000Z | tests/web_platform/css_flexbox_1/test_auto_margins.py | jonboland/colosseum | cbf974be54fd7f6fddbe7285704cfaf7a866c5c5 | [
"BSD-3-Clause"
] | 139 | 2015-05-30T18:37:43.000Z | 2019-03-27T17:14:05.000Z | from tests.utils import W3CTestCase
class TestAutoMargins(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'auto-margins-'))
| 23.833333 | 68 | 0.783217 |
85cd82ad6359f04307ad63f28f66595bd5121383 | 7,878 | py | Python | hkm/tests/test_clean_unused_data.py | City-of-Helsinki/kuvaselaamo | 3fa9b69e3f5496620852d8b138129d0069339fcd | [
"MIT"
] | 1 | 2017-05-07T10:46:24.000Z | 2017-05-07T10:46:24.000Z | hkm/tests/test_clean_unused_data.py | City-of-Helsinki/kuvaselaamo | 3fa9b69e3f5496620852d8b138129d0069339fcd | [
"MIT"
] | 60 | 2016-10-18T11:18:48.000Z | 2022-02-13T20:04:18.000Z | hkm/tests/test_clean_unused_data.py | City-of-Helsinki/kuvaselaamo | 3fa9b69e3f5496620852d8b138129d0069339fcd | [
"MIT"
] | 9 | 2017-04-18T13:26:26.000Z | 2020-02-13T20:05:13.000Z | import pytest
from factories import FeedbackFactory, TmpImageFactory, ProductOrderFactory, UserFactory
from hkm.models.models import Feedback, TmpImage, ProductOrder, UserProfile, User, Collection, Record
from django.core.management import call_command
from freezegun import freeze_time
from datetime import datetime, timedelta
from hkm.management.commands.clean_unused_data import DEFAULT_DAYS_UNTIL_REMOVAL, DEFAULT_DAYS_UNTIL_NOTIFICATION
CUSTOM_DAYS_UNTIL_REMOVAL = 10
CUSTOM_DAYS_UNTIL_NOTIFICATION = 5
@pytest.fixture
def day_older_than_removal_date():
return datetime.today() - timedelta(days=DEFAULT_DAYS_UNTIL_REMOVAL + 1)
@pytest.fixture
def day_newer_than_removal_date():
return datetime.today() - timedelta(days=DEFAULT_DAYS_UNTIL_REMOVAL - 1)
@pytest.fixture
def day_within_grace_period():
return datetime.today() - timedelta(days=DEFAULT_DAYS_UNTIL_REMOVAL - DEFAULT_DAYS_UNTIL_NOTIFICATION - 1)
@pytest.fixture
def day_outside_grace_period():
return datetime.today() - timedelta(days=DEFAULT_DAYS_UNTIL_REMOVAL - DEFAULT_DAYS_UNTIL_NOTIFICATION + 1)
@pytest.fixture
def day_older_than_custom_removal_date():
return datetime.today() - timedelta(days=CUSTOM_DAYS_UNTIL_REMOVAL + 1)
@pytest.fixture
def day_newer_than_custom_removal_date():
return datetime.today() - timedelta(days=CUSTOM_DAYS_UNTIL_REMOVAL - 1)
@pytest.fixture
def day_within_custom_grace_period():
return datetime.today() - timedelta(days=CUSTOM_DAYS_UNTIL_REMOVAL - CUSTOM_DAYS_UNTIL_NOTIFICATION - 1)
@pytest.fixture
def day_outside_custom_grace_period():
return datetime.today() - timedelta(days=CUSTOM_DAYS_UNTIL_REMOVAL - CUSTOM_DAYS_UNTIL_NOTIFICATION + 1)
@pytest.mark.django_db
def test_that_old_anonymous_data_is_removed_using_default_date(day_older_than_removal_date,
day_newer_than_removal_date):
_create_anonymous_data(day_older_than_removal_date)
_create_anonymous_data(day_newer_than_removal_date)
_assert_anonymous_data_counts(2)
call_command('clean_unused_data')
_assert_anonymous_data_counts(1)
def _create_anonymous_data(modified_date):
with freeze_time(modified_date):
FeedbackFactory()
TmpImageFactory()
ProductOrderFactory()
@pytest.mark.django_db
def test_that_old_anonymous_data_is_removed_using_custom_date(day_older_than_custom_removal_date,
day_newer_than_custom_removal_date):
_create_anonymous_data(day_older_than_custom_removal_date)
_create_anonymous_data(day_newer_than_custom_removal_date)
_assert_anonymous_data_counts(2)
call_command('clean_unused_data', days_until_removal=CUSTOM_DAYS_UNTIL_REMOVAL,
days_until_notification=CUSTOM_DAYS_UNTIL_NOTIFICATION)
_assert_anonymous_data_counts(1)
def _assert_anonymous_data_counts(count):
assert Feedback.objects.count() == count
assert TmpImage.objects.count() == count
assert ProductOrder.objects.count() == count
@pytest.mark.django_db
def test_that_old_user_gets_deleted_using_default_date(day_older_than_removal_date, day_newer_than_removal_date,
day_outside_grace_period, day_within_grace_period):
not_deleted = [
UserFactory(last_login=day_newer_than_removal_date, profile__removal_notification_sent=None),
UserFactory(last_login=day_newer_than_removal_date, profile__removal_notification_sent=day_within_grace_period)
]
UserFactory(last_login=day_older_than_removal_date, profile__removal_notification_sent=day_outside_grace_period)
_assert_user_data_counts(3)
call_command('clean_unused_data')
_assert_user_data_counts(2)
_assert_users_exist(not_deleted)
@pytest.mark.django_db
def test_that_old_user_gets_deleted_using_custom_date(day_older_than_custom_removal_date,
day_newer_than_custom_removal_date,
day_outside_custom_grace_period):
not_deleted = [UserFactory(last_login=day_newer_than_custom_removal_date)]
UserFactory(last_login=day_older_than_custom_removal_date,
profile__removal_notification_sent=day_outside_custom_grace_period)
_assert_user_data_counts(2)
call_command('clean_unused_data', days_until_removal=CUSTOM_DAYS_UNTIL_REMOVAL,
days_until_notification=CUSTOM_DAYS_UNTIL_NOTIFICATION)
_assert_user_data_counts(1)
_assert_users_exist(not_deleted)
def _assert_user_data_counts(count):
assert User.objects.count() == count
assert UserProfile.objects.count() == count
assert Feedback.objects.count() == count
assert TmpImage.objects.count() == count
assert ProductOrder.objects.count() == count
assert Collection.objects.count() == count
assert Record.objects.count() == count
@pytest.mark.django_db
def test_that_staff_users_dont_get_deleted(day_older_than_removal_date):
not_deleted = [
UserFactory(last_login=day_older_than_removal_date, is_superuser=True),
UserFactory(last_login=day_older_than_removal_date, is_staff=True),
UserFactory(last_login=day_older_than_removal_date, profile__is_museum=True),
UserFactory(last_login=day_older_than_removal_date, profile__is_admin=True)
]
_assert_user_data_counts(4)
call_command('clean_unused_data')
_assert_user_data_counts(4)
_assert_users_exist(not_deleted)
def _assert_users_exist(users):
for user in users:
assert User.objects.get(pk=user.id)
@pytest.mark.django_db
def test_that_user_is_not_deleted_without_notification(day_older_than_removal_date, day_within_grace_period,
day_outside_grace_period):
not_deleted = [
UserFactory(last_login=day_older_than_removal_date, profile__removal_notification_sent=None),
UserFactory(last_login=day_older_than_removal_date, profile__removal_notification_sent=day_within_grace_period)
]
UserFactory(last_login=day_older_than_removal_date, profile__removal_notification_sent=day_outside_grace_period)
_assert_user_data_counts(3)
call_command('clean_unused_data')
_assert_user_data_counts(2)
_assert_users_exist(not_deleted)
@pytest.mark.django_db
def test_that_user_is_not_deleted_without_notification_using_custom_dates(
day_older_than_custom_removal_date, day_within_custom_grace_period,
day_outside_custom_grace_period):
not_deleted = [
UserFactory(last_login=day_older_than_custom_removal_date, profile__removal_notification_sent=None),
UserFactory(last_login=day_older_than_custom_removal_date,
profile__removal_notification_sent=day_within_custom_grace_period)
]
UserFactory(last_login=day_older_than_custom_removal_date,
profile__removal_notification_sent=day_outside_custom_grace_period)
_assert_user_data_counts(3)
call_command('clean_unused_data', days_until_removal=CUSTOM_DAYS_UNTIL_REMOVAL,
days_until_notification=CUSTOM_DAYS_UNTIL_NOTIFICATION)
_assert_user_data_counts(2)
_assert_users_exist(not_deleted)
def test_negative_days_until_removal(capsys):
call_command('clean_unused_data', days_until_removal=-1)
assert "Invalid parameters given." in capsys.readouterr()[0]
def test_negative_days_until_notification(capsys):
call_command('clean_unused_data', days_until_notification=-1)
assert "Invalid parameters given." in capsys.readouterr()[0]
def test_days_until_notification_cant_be_more_than_days_until_removal(capsys):
call_command('clean_unused_data', days_until_notification=10, days_until_removal=9)
assert "Invalid parameters given." in capsys.readouterr()[0]
| 36.304147 | 119 | 0.777101 |
c105e156ca9c4e0ed539f73c6429af7b15e31c6c | 21,787 | py | Python | tests/dbobject/test_privs.py | reedstrm/Pyrseas | 1ca23a906718bca36c12763fde54f40053dc3f81 | [
"BSD-3-Clause"
] | null | null | null | tests/dbobject/test_privs.py | reedstrm/Pyrseas | 1ca23a906718bca36c12763fde54f40053dc3f81 | [
"BSD-3-Clause"
] | null | null | null | tests/dbobject/test_privs.py | reedstrm/Pyrseas | 1ca23a906718bca36c12763fde54f40053dc3f81 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Test object privileges
The majority of other tests exclude access privileges. These
explicitly request it. In addition, the roles 'user1' and 'user2'
are created if they don't exist.
"""
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase
CREATE_TABLE = "CREATE TABLE t1 (c1 integer, c2 text)"
SOURCE1 = "SELECT 'dummy'::text"
CREATE_FUNC = "CREATE FUNCTION f1() RETURNS text LANGUAGE sql IMMUTABLE AS " \
"$_$%s$_$" % SOURCE1
CREATE_FDW = "CREATE FOREIGN DATA WRAPPER fdw1"
CREATE_FS = "CREATE SERVER fs1 FOREIGN DATA WRAPPER fdw1"
GRANT_SELECT = "GRANT SELECT ON TABLE t1 TO %s"
GRANT_INSUPD = "GRANT INSERT, UPDATE ON TABLE t1 TO %s"
def check_extra_users(db):
"Check existence of extra test users"
for user in ['user1', 'user2']:
row = db.fetchone("SELECT 1 FROM pg_roles WHERE rolname = %s", (user,))
if row is None:
db.execute_commit("CREATE ROLE %s" % user)
class PrivilegeToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of object privilege information"""
def setUp(self):
super(DatabaseToMapTestCase, self).setUp()
check_extra_users(self.db)
def test_map_schema(self):
"Map a schema with some GRANTs"
stmts = ["CREATE SCHEMA s1", "GRANT USAGE ON SCHEMA s1 TO PUBLIC",
"GRANT CREATE, USAGE ON SCHEMA s1 TO user1"]
dbmap = self.to_map(stmts, no_privs=False)
expmap = {'privileges': [{self.db.user: ['all']},
{'PUBLIC': ['usage']}, {'user1': ['all']}]}
assert dbmap['schema s1'] == expmap
def test_map_table(self):
"Map a table with various GRANTs"
stmts = [CREATE_TABLE, GRANT_SELECT % 'PUBLIC', GRANT_INSUPD % 'user1',
"GRANT REFERENCES, TRIGGER ON t1 TO user2 WITH GRANT OPTION"]
dbmap = self.to_map(stmts, no_privs=False)
expmap = {'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}],
'privileges': [{self.db.user: ['all']},
{'PUBLIC': ['select']},
{'user1': ['insert', 'update']},
{'user2': [{'trigger': {'grantable': True}},
{'references': {
'grantable': True}}]}]}
assert dbmap['schema public']['table t1'] == expmap
def test_map_column(self):
"Map a table with GRANTs on column"
self.maxDiff = None
stmts = [CREATE_TABLE, GRANT_SELECT % 'PUBLIC',
"GRANT INSERT (c1, c2) ON t1 TO user1",
"GRANT INSERT (c2), UPDATE (c2) ON t1 TO user2"]
dbmap = self.to_map(stmts, no_privs=False)
expmap = {'columns': [
{'c1': {'type': 'integer', 'privileges': [{'user1': ['insert']}]}},
{'c2': {'type': 'text', 'privileges': [
{'user1': ['insert']}, {'user2': ['insert', 'update']}]}}],
'privileges': [{self.db.user: ['all']}, {'PUBLIC': ['select']}]}
assert dbmap['schema public']['table t1'] == expmap
def test_map_sequence(self):
"Map a sequence with various GRANTs"
stmts = ["CREATE SEQUENCE seq1",
"GRANT SELECT ON SEQUENCE seq1 TO PUBLIC",
"GRANT USAGE, UPDATE ON SEQUENCE seq1 TO user1"]
dbmap = self.to_map(stmts, no_privs=False)
expmap = {'start_value': 1, 'increment_by': 1, 'max_value': None,
'min_value': None, 'cache_value': 1,
'privileges': [{self.db.user: ['all']},
{'PUBLIC': ['select']},
{'user1': ['usage', 'update']}]}
assert dbmap['schema public']['sequence seq1'] == expmap
def test_map_view(self):
"Map a view with various GRANTs"
stmts = ["CREATE VIEW v1 AS SELECT now()::date AS today",
"GRANT SELECT ON v1 TO PUBLIC",
"GRANT REFERENCES ON v1 TO user1"]
dbmap = self.to_map(stmts, no_privs=False)
expmap = {'definition': " SELECT now()::date AS today;",
'privileges': [{self.db.user: ['all']},
{'PUBLIC': ['select']},
{'user1': ['references']}]}
assert dbmap['schema public']['view v1'] == expmap
def test_map_function(self):
"Map a function with a GRANT and REVOKE from PUBLIC"
stmts = [CREATE_FUNC, "REVOKE ALL ON FUNCTION f1() FROM PUBLIC",
"GRANT EXECUTE ON FUNCTION f1() TO user1"]
dbmap = self.to_map(stmts, no_privs=False)
expmap = {'language': 'sql', 'returns': 'text',
'source': SOURCE1, 'volatility': 'immutable',
'privileges': [{self.db.user: ['execute']},
{'user1': ['execute']}]}
assert dbmap['schema public']['function f1()'] == expmap
def test_map_language(self):
"Map a language but REVOKE default privilege"
if self.db.version >= 90100:
self.skipTest('Only available before PG 9.1')
stmts = ["DROP LANGUAGE IF EXISTS plperl CASCADE",
"CREATE LANGUAGE plperl",
"REVOKE USAGE ON LANGUAGE plperl FROM PUBLIC"]
dbmap = self.to_map(stmts, no_privs=False)
self.db.execute_commit("DROP LANGUAGE plperl")
expmap = {'trusted': True, 'privileges': [{self.db.user: ['usage']}]}
assert dbmap['language plperl'] == expmap
def test_map_fd_wrapper(self):
"Map a foreign data wrapper with a GRANT"
stmts = [CREATE_FDW,
"GRANT USAGE ON FOREIGN DATA WRAPPER fdw1 TO PUBLIC"]
dbmap = self.to_map(stmts, no_privs=False, superuser=True)
expmap = {'privileges': [{self.db.user: ['usage']},
{'PUBLIC': ['usage']}]}
assert dbmap['foreign data wrapper fdw1'] == expmap
def test_map_server(self):
"Map a foreign server with a GRANT"
stmts = [CREATE_FDW, CREATE_FS,
"GRANT USAGE ON FOREIGN SERVER fs1 TO user1"]
dbmap = self.to_map(stmts, no_privs=False, superuser=True)
expmap = {'privileges': [{self.db.user: ['usage']},
{'user1': ['usage']}]}
assert dbmap['foreign data wrapper fdw1']['server fs1'] == expmap
def test_map_foreign_table(self):
"Map a foreign table with various GRANTs"
if self.db.version < 90100:
self.skipTest('Only available on PG 9.1')
stmts = [CREATE_FDW, CREATE_FS,
"CREATE FOREIGN TABLE ft1 (c1 integer, c2 text) SERVER fs1",
"GRANT SELECT ON ft1 TO PUBLIC",
"GRANT INSERT, UPDATE ON ft1 TO user1"]
dbmap = self.to_map(stmts, no_privs=False, superuser=True)
expmap = {'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}], 'server': 'fs1',
'privileges': [{self.db.user: ['all']},
{'PUBLIC': ['select']},
{'user1': ['insert', 'update']}]}
assert dbmap['schema public']['foreign table ft1'] == expmap
class PrivilegeToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation of privilege information (GRANTs)"""
def setUp(self):
super(InputMapToSqlTestCase, self).setUp()
check_extra_users(self.db)
def test_create_schema(self):
"Create a schema with various privileges"
inmap = self.std_map()
inmap.update({'schema s1': {
'owner': self.db.user, 'privileges': [{
self.db.user: ['all']}, {'PUBLIC': ['usage', 'create']}]}})
sql = self.to_sql(inmap)
# sql[0] = CREATE SCHEMA
# sql[1] = ALTER SCHEMA OWNER
assert sql[2] == "GRANT ALL ON SCHEMA s1 TO %s" % self.db.user
assert sql[3] == "GRANT ALL ON SCHEMA s1 TO PUBLIC"
def test_schema_new_grant(self):
"Grant privileges on an existing schema"
inmap = self.std_map()
inmap.update({'schema s1': {
'owner': self.db.user, 'privileges': [{self.db.user: ['all']},
{'PUBLIC': ['create']}]}})
sql = sorted(self.to_sql(inmap, ["CREATE SCHEMA s1"]))
assert len(sql) == 2
assert sql[0] == "GRANT ALL ON SCHEMA s1 TO %s" % self.db.user
assert sql[1] == "GRANT CREATE ON SCHEMA s1 TO PUBLIC"
def test_create_table(self):
"Create a table with various privileges"
inmap = self.std_map()
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}}],
'owner': self.db.user,
'privileges': [{self.db.user: ['all']}, {'PUBLIC': ['select']},
{'user1': ['insert', 'update']},
{'user2': [{'trigger': {'grantable': True}},
{'references': {'grantable': True}}]}]}})
sql = self.to_sql(inmap)
# sql[0] = CREATE TABLE
# sql[1] = ALTER TABLE OWNER
assert sql[2] == "GRANT ALL ON TABLE t1 TO %s" % self.db.user
assert sql[3] == GRANT_SELECT % 'PUBLIC'
assert sql[4] == GRANT_INSUPD % 'user1'
assert sql[5] == "GRANT TRIGGER, REFERENCES ON TABLE t1 " \
"TO user2 WITH GRANT OPTION"
def test_create_column_grants(self):
"Create a table with colum-level privileges"
inmap = self.std_map()
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer', 'privileges': [{'user1': [
'insert']}]}}, {'c2': {'type': 'text', 'privileges': [{'user1': [
'insert']}, {'user2': ['insert', 'update']}]}}],
'owner': self.db.user, 'privileges': [{self.db.user: ['all']},
{'PUBLIC': ['select']}]}})
sql = self.to_sql(inmap)
assert len(sql) == 7
# sql[0] = CREATE TABLE
# sql[1] = ALTER TABLE OWNER
assert sql[2] == "GRANT ALL ON TABLE t1 TO %s" % self.db.user
assert sql[3] == GRANT_SELECT % 'PUBLIC'
assert sql[4] == "GRANT INSERT (c1) ON TABLE t1 TO user1"
assert sql[5] == "GRANT INSERT (c2) ON TABLE t1 TO user1"
assert sql[6] == "GRANT INSERT (c2), UPDATE (c2) ON TABLE t1 TO user2"
def test_table_new_grant(self):
"Grant select privileges on an existing table"
inmap = self.std_map()
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}}],
'owner': self.db.user,
'privileges': [{self.db.user: ['all']}, {'user1': ['select']}]}})
sql = self.to_sql(inmap, [CREATE_TABLE])
assert len(sql) == 2
sql = sorted(sql)
assert sql[0] == "GRANT ALL ON TABLE t1 TO %s" % self.db.user
assert sql[1] == GRANT_SELECT % 'user1'
def test_table_change_grant(self):
"Grant select privileges on an existing table"
inmap = self.std_map()
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}}],
'owner': self.db.user,
'privileges': [{self.db.user: ['all']}, {'PUBLIC': ['select']},
{'user1': ['insert', 'update']}]}})
sql = self.to_sql(inmap, [CREATE_TABLE, GRANT_SELECT % 'user1'])
assert len(sql) == 3
assert sorted(sql) == [GRANT_INSUPD % 'user1', GRANT_SELECT % 'PUBLIC',
"REVOKE SELECT ON TABLE t1 FROM user1"]
def test_column_change_grants(self):
"Change existing colum-level privileges"
inmap = self.std_map()
inmap['schema public'].update(
{'table t1': {'columns': [{'c1': {
'type': 'integer', 'privileges': [{
'user1': ['insert']}, {'user2': ['insert', 'update']}]}},
{'c2': {'type': 'text', 'privileges': [{'user1': ['insert']}]}}],
'owner': self.db.user, 'privileges': [{self.db.user: ['all']},
{'PUBLIC': ['select']}]}})
stmts = [CREATE_TABLE, GRANT_SELECT % 'PUBLIC',
"GRANT INSERT (c1, c2) ON t1 TO user1",
"GRANT INSERT (c2), UPDATE (c2) ON t1 TO user2"]
sql = self.to_sql(inmap, stmts)
assert len(sql) == 2
assert sql[0] == "GRANT INSERT (c1), UPDATE (c1) ON TABLE t1 TO user2"
assert sql[1] == "REVOKE INSERT (c2), UPDATE (c2) ON TABLE t1 " \
"FROM user2"
def test_table_revoke_all(self):
"Revoke all privileges on an existing table"
inmap = self.std_map()
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}}],
'owner': self.db.user}})
stmts = [CREATE_TABLE, GRANT_SELECT % 'PUBLIC', GRANT_INSUPD % 'user1']
sql = sorted(self.to_sql(inmap, stmts))
assert len(sql) == 3
assert sql[0] == "REVOKE ALL ON TABLE t1 FROM %s" % self.db.user
assert sql[1] == "REVOKE INSERT, UPDATE ON TABLE t1 FROM user1"
assert sql[2] == "REVOKE SELECT ON TABLE t1 FROM PUBLIC"
def test_create_sequence(self):
"Create a sequence with some privileges"
inmap = self.std_map()
inmap['schema public'].update({'sequence seq1': {
'start_value': 1, 'increment_by': 1, 'max_value': None,
'min_value': None, 'cache_value': 1, 'owner': self.db.user,
'privileges': [{self.db.user: ['all']}, {'PUBLIC': ['select']}]}})
sql = self.to_sql(inmap)
# sql[0] = CREATE SEQUENCE
# sql[1] = ALTER SEQUENCE OWNER
assert sql[2] == "GRANT ALL ON SEQUENCE seq1 TO %s" % self.db.user
assert sql[3] == "GRANT SELECT ON SEQUENCE seq1 TO PUBLIC"
def test_sequence_new_grant(self):
"Grant privileges on an existing sequence"
inmap = self.std_map()
inmap['schema public'].update({'sequence seq1': {
'start_value': 1, 'increment_by': 1, 'max_value': None,
'min_value': None, 'cache_value': 1, 'owner': self.db.user,
'privileges': [{self.db.user: ['all']}, {'PUBLIC': ['select']}]}})
sql = sorted(self.to_sql(inmap, ["CREATE SEQUENCE seq1"]))
assert len(sql) == 2
assert sql[0] == "GRANT ALL ON SEQUENCE seq1 TO %s" % self.db.user
assert sql[1] == "GRANT SELECT ON SEQUENCE seq1 TO PUBLIC"
def test_create_view(self):
"Create a view with some privileges"
inmap = self.std_map()
inmap['schema public'].update({'view v1': {
'definition': " SELECT now()::date AS today;",
'owner': self.db.user,
'privileges': [{self.db.user: ['all']}, {'user1': ['select']}]}})
sql = self.to_sql(inmap)
# sql[0] = CREATE VIEW
# sql[1] = ALTER VIEW OWNER
assert sql[2] == "GRANT ALL ON TABLE v1 TO %s" % self.db.user
assert sql[3] == "GRANT SELECT ON TABLE v1 TO user1"
def test_view_new_grant(self):
"Grant privileges on an existing view"
inmap = self.std_map()
inmap['schema public'].update({'view v1': {
'definition': " SELECT now()::date AS today;",
'owner': self.db.user,
'privileges': [{self.db.user: ['all']}, {'user1': ['select']}]}})
sql = sorted(self.to_sql(inmap, ["CREATE VIEW v1 AS "
"SELECT now()::date AS today"]))
assert len(sql) == 2
assert sql[0] == "GRANT ALL ON TABLE v1 TO %s" % self.db.user
assert sql[1] == "GRANT SELECT ON TABLE v1 TO user1"
def test_create_function(self):
"Create a function with some privileges"
inmap = self.std_map()
inmap['schema public'].update({'function f1()': {
'language': 'sql', 'returns': 'text', 'source': SOURCE1,
'volatility': 'immutable', 'owner': self.db.user,
'privileges': [{self.db.user: ['all']}, {'PUBLIC': ['execute']}]}})
sql = self.to_sql(inmap)
# sql[0] = SET check_function_bodies
# sql[1] = CREATE FUNCTION
# sql[2] = ALTER FUNCTION OWNER
assert sql[3] == "GRANT EXECUTE ON FUNCTION f1() TO %s" % self.db.user
assert sql[4] == "GRANT EXECUTE ON FUNCTION f1() TO PUBLIC"
def test_function_new_grant(self):
"Grant privileges on an existing function"
inmap = self.std_map()
inmap['schema public'].update({'function f1()': {
'language': 'sql', 'returns': 'text', 'source': SOURCE1,
'volatility': 'immutable', 'owner': self.db.user,
'privileges': [{self.db.user: ['all']}, {'PUBLIC': ['execute']}]}})
sql = self.to_sql(inmap, [CREATE_FUNC])
assert len(sql) == 2
sql = sorted(sql)
# assumes self.db.user > PUBLIC
assert sql[0] == "GRANT EXECUTE ON FUNCTION f1() TO PUBLIC"
assert sql[1] == "GRANT EXECUTE ON FUNCTION f1() TO %s" % self.db.user
def test_create_fd_wrapper(self):
"Create a foreign data wrapper with some privileges"
inmap = self.std_map()
inmap.update({'foreign data wrapper fdw1': {
'owner': self.db.user,
'privileges': [{self.db.user: ['all']}, {'PUBLIC': ['usage']}]}})
sql = self.to_sql(inmap)
# sql[0] = CREATE FDW
# sql[1] = ALTER FDW OWNER
assert sql[2] == "GRANT USAGE ON FOREIGN DATA WRAPPER fdw1 " \
"TO %s" % self.db.user
assert sql[3] == "GRANT USAGE ON FOREIGN DATA WRAPPER fdw1 TO PUBLIC"
def test_fd_wrapper_new_grant(self):
"Grant privileges on an existing foreign data wrapper"
inmap = self.std_map()
inmap.update({'foreign data wrapper fdw1': {
'owner': self.db.user,
'privileges': [{self.db.user: ['all']}, {'PUBLIC': ['usage']}]}})
sql = sorted(self.to_sql(inmap, [CREATE_FDW], superuser=True))
assert len(sql) == 2
# assumes self.db.user > PUBLIC
assert sql[0] == "GRANT USAGE ON FOREIGN DATA WRAPPER fdw1 TO PUBLIC"
assert sql[1] == "GRANT USAGE ON FOREIGN DATA WRAPPER fdw1 " \
"TO %s" % self.db.user
def test_create_server(self):
"Create a foreign server with some privileges"
inmap = self.std_map()
inmap.update({'foreign data wrapper fdw1': {'server fs1': {
'owner': self.db.user,
'privileges': [{self.db.user: ['all']}, {'user2': ['usage']}]}}})
sql = self.to_sql(inmap, [CREATE_FDW], superuser=True)
# sql[0] = CREATE SERVER
# sql[1] = ALTER SERVER OWNER
assert sql[2] == "GRANT USAGE ON FOREIGN SERVER fs1 TO %s" % \
self.db.user
assert sql[3] == "GRANT USAGE ON FOREIGN SERVER fs1 TO user2"
def test_server_new_grant(self):
"Grant privileges on an existing foreign server"
inmap = self.std_map()
inmap.update({'foreign data wrapper fdw1': {'server fs1': {
'owner': self.db.user,
'privileges': [{self.db.user: ['all']}, {'user2': ['usage']}]}}})
sql = sorted(self.to_sql(inmap, [CREATE_FDW, CREATE_FS],
superuser=True))
assert len(sql) == 2
assert sql[0] == "GRANT USAGE ON FOREIGN SERVER fs1 TO %s" % \
self.db.user
assert sql[1] == "GRANT USAGE ON FOREIGN SERVER fs1 TO user2"
def test_create_foreign_table(self):
"Create a foreign table with some privileges"
if self.db.version < 90100:
self.skipTest('Only available on PG 9.1')
inmap = self.std_map()
inmap.update({'foreign data wrapper fdw1': {'server fs1': {}}})
inmap['schema public'].update({'foreign table ft1': {
'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}], 'server': 'fs1',
'owner': self.db.user,
'privileges': [{self.db.user: ['all']}, {'PUBLIC': ['select']},
{'user1': ['insert', 'update']}]}})
sql = self.to_sql(inmap, [CREATE_FDW, CREATE_FS], superuser=True)
# sql[0] = CREATE TABLE
# sql[1] = ALTER TABLE OWNER
assert sql[2] == "GRANT ALL ON TABLE ft1 TO %s" % self.db.user
assert sql[3] == "GRANT SELECT ON TABLE ft1 TO PUBLIC"
assert sql[4] == "GRANT INSERT, UPDATE ON TABLE ft1 TO user1"
def test_foreign_table_new_grant(self):
"Grant privileges on an existing foreign table"
if self.db.version < 90100:
self.skipTest('Only available on PG 9.1')
inmap = self.std_map()
inmap.update({'foreign data wrapper fdw1': {'server fs1': {}}})
inmap['schema public'].update({'foreign table ft1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}}],
'server': 'fs1', 'owner': self.db.user,
'privileges': [{self.db.user: ['all']}, {'PUBLIC': ['select']},
{'user1': ['insert', 'update']}]}})
sql = sorted(self.to_sql(inmap, [
CREATE_FDW, CREATE_FS,
"CREATE FOREIGN TABLE ft1 (c1 integer, c2 text) SERVER fs1"],
superuser=True))
assert len(sql) == 3
assert sql[0] == "GRANT ALL ON TABLE ft1 TO %s" % self.db.user
assert sql[1] == "GRANT INSERT, UPDATE ON TABLE ft1 TO user1"
assert sql[2] == "GRANT SELECT ON TABLE ft1 TO PUBLIC"
| 48.094923 | 79 | 0.542984 |
2be4fab8dcce45333d66ee0dd227bacbd1f4276a | 74,817 | py | Python | mac_easy_manager/mac_easy_manager.py | DallogFheir/mac-easy-manager | 20531efa504e2b752a1aa6154a87169be501ddd4 | [
"MIT"
] | null | null | null | mac_easy_manager/mac_easy_manager.py | DallogFheir/mac-easy-manager | 20531efa504e2b752a1aa6154a87169be501ddd4 | [
"MIT"
] | null | null | null | mac_easy_manager/mac_easy_manager.py | DallogFheir/mac-easy-manager | 20531efa504e2b752a1aa6154a87169be501ddd4 | [
"MIT"
] | null | null | null | from configparser import ConfigParser
from copy import deepcopy
from datetime import datetime
import json
from pathlib import Path
from PIL import ImageTk
import re
import shutil
import tkinter as tk
from tkinter import ttk, messagebox
import traceback
#region EXCEPTION HANDLER
# taken from here: https://mail.python.org/pipermail/python-list/2001-March/104202.html
class TkErrorCatcher:
def __init__(self,func,subst,widget):
self.func = func
self.subst = subst
self.widget = widget
def __call__(self, *args):
try:
if self.subst:
args = self.subst(*args)
return self.func(*args)
except:
messagebox.showwarning("Error!",f"Unknown error happened!\n{traceback.format_exc()}")
tk.CallWrapper = TkErrorCatcher
#endregion
class MACEasyManager:
def __init__(self):
# path to Firefox data folder on Windows
# C:\Users\{user}\AppData\Roaming\Mozilla\Firefox
self.folder_path = Path(Path.home() / Path("AppData", "Roaming", "Mozilla", "Firefox"))
# Tkinter init
self.root = tk.Tk()
self.root.title("MAC Easy Manager")
icon = ImageTk.PhotoImage(file="icons/icon.ico")
self.root.iconphoto(True,icon) # True to be default for all toplevels
# get system resolution to position window
self.screen_width=self.root.winfo_screenwidth()
self.screen_height=self.root.winfo_screenheight()
# load GUI vars from file
with open("config/gui_config.json",encoding="utf-8") as f:
self.gui_vars = json.load(f)
self.profile_select_window()
self.root.focus_force() # to focus after reinitialization
self.root.mainloop()
#region GUI METHODS
def profile_select_window(self):
# frame
self.profile_select_frame = tk.Frame(self.root)
self.profile_select_frame.pack(padx=self.gui_vars["pad"]["x"],pady=self.gui_vars["pad"]["y"])
# header
tk.Label(self.profile_select_frame,text=self.gui_vars["text"]["profile_select_window"]["header"],font=self.gui_vars["font"]["header"]).pack(pady=self.gui_vars["pad"]["y"])
# info about default profile
tk.Label(self.profile_select_frame,text=self.gui_vars["text"]["profile_select_window"]["subheader"],font=self.gui_vars["font"]["small_text"],fg=self.gui_vars["color"]["small_text"]).pack()
# separator
ttk.Separator(self.profile_select_frame, orient="horizontal").pack(fill="x",pady=self.gui_vars["pad"]["y"])
#region RADIOBUTTONS
self.get_profiles()
# before if because used by get_def_languages
self.profile_radiobtn_var = tk.StringVar()
# if no profiles found
if not self.prof_dict:
tk.Label(self.profile_select_frame,text=self.gui_vars["text"]["profile_select_window"]["no_profs"],font=self.gui_vars["font"]["normal"],fg=self.gui_vars["color"]["warn"]).pack()
# if there are profiles
else:
# in case default profile is empty
# make default button the 1st one
default_button = list(self.prof_dict.keys())[0]
# creates buttons
for profile in self.prof_dict:
cur_btn = tk.Radiobutton(self.profile_select_frame,variable=self.profile_radiobtn_var,value=profile,text=profile,font=self.gui_vars["font"]["normal"],command=self.get_def_language)
# if profile is default, makes text bold
if self.prof_dict[profile]["is_default"]:
cur_btn.config(font=self.gui_vars["font"]["default_profile"])
default_button = profile
cur_btn.pack()
# invokes default button
self.profile_radiobtn_var.set(default_button)
#endregion
# separator
ttk.Separator(self.profile_select_frame, orient="horizontal").pack(fill="x",pady=self.gui_vars["pad"]["y"])
# Select button
# continues to main window
# is focused on start
select_button = tk.Button(self.profile_select_frame,text=self.gui_vars["text"]["button"]["select"],font=self.gui_vars["font"]["normal"],command=self.main_window)
select_button.pack()
# if there are no profiles, disable the button
if not self.profile_radiobtn_var.get():
select_button.config(state="disabled")
else:
select_button.focus()
# binds Enter to button action
self.root.bind("<Return>",lambda *_: select_button.invoke())
#region IGNORE CONTAINERS OPTION
# frame
ignore_frame = tk.Frame(self.profile_select_frame)
ignore_frame.pack()
# checkbutton
self.if_ignored = tk.IntVar()
ignore_button = tk.Checkbutton(ignore_frame,text=self.gui_vars["text"]["button"]["ignore"],font=self.gui_vars["font"]["normal"],variable=self.if_ignored)
ignore_button.pack(side="left",pady=self.gui_vars["pad"]["y"])
ignore_button.invoke()
# entrybox
self.ignore_entrybox = tk.Entry(ignore_frame,font=self.gui_vars["font"]["normal"])
self.ignore_entrybox.pack(side="right",pady=self.gui_vars["pad"]["y"])
# default ignored container: "tmp"
# from Temporary Containers extension
self.ignore_entrybox.insert(0,"tmp")
# regex checkbutton
self.if_regex = tk.IntVar()
tk.Checkbutton(self.profile_select_frame,text=self.gui_vars["text"]["button"]["regex"],font=self.gui_vars["font"]["small_text"],variable=self.if_regex).pack()
# ignore case checkbutton
self.if_ignore_case = tk.IntVar()
tk.Checkbutton(self.profile_select_frame,text=self.gui_vars["text"]["button"]["ignore_case"],font=self.gui_vars["font"]["small_text"],variable=self.if_ignore_case).pack()
# info about ignore
tk.Label(self.profile_select_frame,text=self.gui_vars["text"]["profile_select_window"]["ignore_info"],font=self.gui_vars["font"]["small_text"],fg=self.gui_vars["color"]["small_text"]).pack(pady=self.gui_vars["pad"]["y"])
#endregion
#region LANGUAGE SELECT OPTION
# separator
ttk.Separator(self.profile_select_frame, orient="horizontal").pack(fill="x",pady=self.gui_vars["pad"]["y"])
# option menu
with open("config/container_translations.json",encoding="utf-8") as f:
self.translation_data = json.load(f)
self.language_select_var = tk.StringVar()
self.language_select = ttk.Combobox(self.profile_select_frame,textvariable=self.language_select_var,values=list(self.translation_data["by_name"].keys()),state="readonly")
self.language_select.config(font=self.gui_vars["font"]["normal"])
self.language_select.pack(pady=self.gui_vars["pad"]["y"])
# perform function on load
self.get_def_language()
# info
tk.Label(self.profile_select_frame,text=self.gui_vars["text"]["profile_select_window"]["language_select_info_1"],font=self.gui_vars["font"]["small_text"],fg=self.gui_vars["color"]["small_text"]).pack(pady=self.gui_vars["pad"]["y"])
tk.Label(self.profile_select_frame,text=self.gui_vars["text"]["profile_select_window"]["language_select_info_2"],font=self.gui_vars["font"]["small_text"],fg=self.gui_vars["color"]["small_text"]).pack()
#endregion
#region WINDOW POSITION
# get window size
self.root.update_idletasks() #update idletasks to get correct size
win_width=self.root.winfo_width()
win_height=self.root.winfo_height()
# calculate position
# half screen resolution - half window size
half_width=int(self.screen_width/2-win_width/2)
half_height=int(self.screen_height/2-win_height/2)
# position window
# "+X_position+Y_position"
self.root.geometry(f"+{str(half_width)}+{str(half_height)}")
#endregion
def main_window(self):
# unbinds Enter from Select button
self.root.unbind("<Return>")
# removes profile selection window
self.profile_select_frame.pack_forget()
# gets path of selected profile
self.sel_prof_path = self.prof_dict[self.profile_radiobtn_var.get()]["path"]
# gets ignored container name
self.ignored_str = self.ignore_entrybox.get()
# loads container icons
self.icon_imgs = {}
icon_path = Path("icons", "container_icons")
# icons > container_icons > {color} > {icon}.png
for icon_folder in icon_path.iterdir():
for icon in icon_folder.iterdir():
# name of icon = {color}{icon}
self.icon_imgs[f"{icon_folder.name}{icon.stem}"] = ImageTk.PhotoImage(file=icon)
# loads default order
with open("default_order.json",encoding="utf-8") as f:
self.default_order = json.load(f)
# gets current order from default order
self.current_order = deepcopy(self.default_order)
with open("config/original_order.json",encoding="utf-8") as f:
self.orig_order = json.load(f)
#region GUI
# supermain frame to center content
self.super_frame = tk.Frame(self.root)
self.super_frame.pack()
# main frames
# canvas to add scrollbar
# highlightthickness to remove border
self.main_frame = tk.Canvas(self.super_frame,highlightthickness=0)
self.main_frame.pack(side="left")
# wrapper frame
# needed a wrapper to draw inside of canvas to be scrollable
self.wrapper_frame = tk.Frame(self.main_frame)
self.main_frame.create_window((0,0),window=self.wrapper_frame)
# 0 y-padding at bottom
self.top_frame = tk.Frame(self.wrapper_frame)
self.top_frame.pack(padx=self.gui_vars["pad"]["main_x"],pady=(self.gui_vars["pad"]["main_y"],0),fill="x")
ttk.Separator(self.wrapper_frame,orient="horizontal").pack(fill="x")
self.bottom_frame = tk.Frame(self.wrapper_frame)
self.bottom_frame.pack(padx=self.gui_vars["pad"]["main_x"],pady=self.gui_vars["pad"]["main_y"],fill="x")
# main scrollbar
self.main_scrollbar = tk.Scrollbar(self.super_frame)
self.main_scrollbar.pack(side="left",fill="y")
self.main_scrollbar.config(command=self.main_frame.yview)
self.main_frame.config(yscrollcommand=self.main_scrollbar.set)
# bind <Configure> (= changing size) to change scroll region of canvas
self.wrapper_frame.bind("<Configure>",lambda *_: self.main_frame.configure(scrollregion=self.main_frame.bbox("all")))
#region TOP FRAME
#region LIST OF CONTAINERS
# frame
self.containers_frame = tk.Frame(self.top_frame)
self.containers_frame.pack(side="left",padx=self.gui_vars["pad"]["main_x"],pady=self.gui_vars["pad"]["main_y"],anchor="n")
# title
tk.Label(self.containers_frame,text=self.gui_vars["text"]["main_window"]["containers"],font=self.gui_vars["font"]["header"]).grid(**self.gui_vars["grid"]["containers_frame"]["title"])
# treeview
# selectmode:
# extended = multiple selections
# show:
# tree = without header
self.cont_treeview = ttk.Treeview(self.containers_frame,height=13,selectmode="extended",show="tree")
self.cont_treeview.grid(**self.gui_vars["grid"]["containers_frame"]["treeview"])
# binds selecting item in treeview to enable current container edit box and disable move up/down button if 1st or last item selected
self.cont_treeview.bind("<<TreeviewSelect>>",self.cont_handle_select)
# binds toggle_bind_treeview on hovering over/out to allow scrolling
self.cont_treeview.bind("<Enter>",self.toggle_bind_treeview)
self.cont_treeview.bind("<Leave>",self.toggle_bind_treeview)
# fixes rowheight to be readable
# once for all treeviews
ttk.Style().configure("Treeview",rowheight=30)
# scrollbar
self.cont_treeview_scrollbar=tk.Scrollbar(self.containers_frame)
self.cont_treeview_scrollbar.grid(**self.gui_vars["grid"]["containers_frame"]["scrollbar"])
self.cont_treeview.config(yscrollcommand = self.cont_treeview_scrollbar.set)
self.cont_treeview_scrollbar.config(command = self.cont_treeview.yview)
# move up and down buttons
# disabled at start
self.cont_move_up_btn=tk.Button(self.containers_frame,text=self.gui_vars["text"]["button"]["move_up"],font=self.gui_vars["font"]["normal"],command=self.cont_move_up)
self.cont_move_up_btn.config(state="disabled")
self.cont_move_up_btn.grid(**self.gui_vars["grid"]["containers_frame"]["move_up"])
self.cont_move_down_btn=tk.Button(self.containers_frame,text=self.gui_vars["text"]["button"]["move_down"],font=self.gui_vars["font"]["normal"],command=self.cont_move_down)
self.cont_move_down_btn.config(state="disabled")
self.cont_move_down_btn.grid(**self.gui_vars["grid"]["containers_frame"]["move_down"])
# Restore original order button
tk.Button(self.containers_frame,text=self.gui_vars["text"]["button"]["restore_cont"],font=self.gui_vars["font"]["normal"],command=self.cont_restore).grid(**self.gui_vars["grid"]["containers_frame"]["restore"])
# Restored label (shown for 1 second when restored)
self.cont_restored_label = tk.Label(self.containers_frame,font=self.gui_vars["font"]["normal"])
self.cont_restored_label.grid(**self.gui_vars["grid"]["containers_frame"]["saved"])
#endregion
#region SORTING OPTIONS
# frame
self.sorting_options_frame = tk.Frame(self.top_frame)
self.sorting_options_frame.pack(side="left",anchor="n")
# PRIMARY SORTING
# frame
self.prim_sort_options_frame = tk.Frame(self.sorting_options_frame)
self.prim_sort_options_frame.pack(padx=self.gui_vars["pad"]["main_x"],pady=self.gui_vars["pad"]["main_y"])
# title
tk.Label(self.prim_sort_options_frame,text=self.gui_vars["text"]["main_window"]["primary_sort"],font=self.gui_vars["font"]["header"]).pack()
# radiobuttons
self.prim_sort = tk.StringVar()
self.prim_sort_lst = []
for i, v in enumerate(["name", "color", "icon"]):
self.prim_sort_lst.append(tk.Radiobutton(self.prim_sort_options_frame,text=v,font=self.gui_vars["font"]["normal"],variable=self.prim_sort,value=i,command=self.handle_sorting_options))
self.prim_sort_lst[i].pack()
# select no button at start
self.prim_sort.set(None)
# reverse checkbutton
# reverse list for all 3 reverse checkbuttons
self.reverse_lst = [tk.IntVar() for _ in range(3)]
tk.Checkbutton(self.prim_sort_options_frame,text=self.gui_vars["text"]["main_window"]["reverse"],font=self.gui_vars["font"]["normal"],variable=self.reverse_lst[0],command=self.sort).pack()
# SECONDARY SORTING
# frame
self.sec_sort_options_frame = tk.Frame(self.sorting_options_frame)
self.sec_sort_options_frame.pack(padx=self.gui_vars["pad"]["main_x"],pady=self.gui_vars["pad"]["main_y"])
# title
tk.Label(self.sec_sort_options_frame,text=self.gui_vars["text"]["main_window"]["secondary_sort"],font=self.gui_vars["font"]["header"]).pack(side="top")
# radiobuttons
self.sec_sort = tk.StringVar()
# has to be StringVar because None for IntVar is 0, which messes up sort
self.sec_sort_lst = []
for i, v in enumerate(["name", "color", "icon"]):
self.sec_sort_lst.append(tk.Radiobutton(self.sec_sort_options_frame,text=v,font=self.gui_vars["font"]["normal"],variable=self.sec_sort,value=i,state="disabled",command=self.handle_sorting_options))
self.sec_sort_lst[i].pack()
# select no button at start
self.sec_sort.set(None)
# reverse checkbutton
tk.Checkbutton(self.sec_sort_options_frame,text=self.gui_vars["text"]["main_window"]["reverse"],font=self.gui_vars["font"]["normal"],variable=self.reverse_lst[1],command=self.sort).pack()
# TERTIARY SORTING
# frame
self.tert_sort_options_frame = tk.Frame(self.sorting_options_frame)
self.tert_sort_options_frame.pack(padx=self.gui_vars["pad"]["main_x"],pady=self.gui_vars["pad"]["main_y"])
# title
tk.Label(self.tert_sort_options_frame,text=self.gui_vars["text"]["main_window"]["tertiary_sort"],font=self.gui_vars["font"]["header"]).pack(side="top")
# reverse checkbutton
tk.Checkbutton(self.tert_sort_options_frame,text=self.gui_vars["text"]["main_window"]["reverse"],font=self.gui_vars["font"]["normal"],variable=self.reverse_lst[2],command=self.sort).pack()
# SAVE & RESTORE
# frame
self.save_sorting_frame = tk.Frame(self.sorting_options_frame)
self.save_sorting_frame.pack(padx=self.gui_vars["pad"]["main_x"],pady=self.gui_vars["pad"]["main_y"])
tk.Button(self.save_sorting_frame,text=self.gui_vars["text"]["button"]["save_options"],font=self.gui_vars["font"]["normal"],command=self.save_sorting_options).pack(pady=self.gui_vars["pad"]["y"])
tk.Button(self.save_sorting_frame,text=self.gui_vars["text"]["button"]["load_options"],font=self.gui_vars["font"]["normal"],command=self.sort_opts_restore).pack(pady=self.gui_vars["pad"]["y"])
self.sort_saved_label = tk.Label(self.save_sorting_frame,font=self.gui_vars["font"]["normal"])
self.sort_saved_label.pack(pady=self.gui_vars["pad"]["y"])
#endregion
#region COLOR SORT
# frame
self.color_frame = tk.Frame(self.top_frame)
self.color_frame.pack(side="left",padx=self.gui_vars["pad"]["main_x"],pady=self.gui_vars["pad"]["main_y"],anchor="n")
# title
tk.Label(self.color_frame,text=self.gui_vars["text"]["main_window"]["color"],font=self.gui_vars["font"]["header"]).grid(**self.gui_vars["grid"]["containers_frame"]["title"])
# treeview
# selectmode:
# browse = single selection
# extended = multiple selections
# show:
# tree = without header
self.color_treeview = ttk.Treeview(self.color_frame,height=13,selectmode="extended",show="tree")
self.color_treeview.grid(**self.gui_vars["grid"]["containers_frame"]["treeview"])
# binds selecting item in treeview to enable current container edit box and disable move up/down button if 1st or last item selected
self.color_treeview.bind("<<TreeviewSelect>>",self.color_handle_select)
# move up and down buttons
# disabled at start
self.color_move_up_btn=tk.Button(self.color_frame,text=self.gui_vars["text"]["button"]["move_up"],font=self.gui_vars["font"]["normal"],command=self.color_move_up)
self.color_move_up_btn.config(state="disabled")
self.color_move_up_btn.grid(**self.gui_vars["grid"]["containers_frame"]["move_up"])
self.color_move_down_btn=tk.Button(self.color_frame,text=self.gui_vars["text"]["button"]["move_down"],font=self.gui_vars["font"]["normal"],command=self.color_move_down)
self.color_move_down_btn.config(state="disabled")
self.color_move_down_btn.grid(**self.gui_vars["grid"]["containers_frame"]["move_down"])
# Reset to original order button
tk.Button(self.color_frame,text=self.gui_vars["text"]["button"]["reset_cont"],font=self.gui_vars["font"]["normal"],command=self.color_reset).grid(**self.gui_vars["grid"]["containers_frame"]["reset"])
# Restore default order button
tk.Button(self.color_frame,text=self.gui_vars["text"]["button"]["restore_cont"],font=self.gui_vars["font"]["normal"],command=self.color_restore).grid(**self.gui_vars["grid"]["containers_frame"]["restore"])
# Save as default button
tk.Button(self.color_frame,text=self.gui_vars["text"]["button"]["save_default"],font=self.gui_vars["font"]["normal"],command=self.color_save_order).grid(**self.gui_vars["grid"]["containers_frame"]["save"])
# Saved/Restored label (shown for 1 second when saved/restored)
self.color_saved_label = tk.Label(self.color_frame,font=self.gui_vars["font"]["normal"])
self.color_saved_label.grid(**self.gui_vars["grid"]["containers_frame"]["saved"])
#endregion
#region ICON SORT
# frame
self.icon_frame = tk.Frame(self.top_frame)
self.icon_frame.pack(side="left",padx=self.gui_vars["pad"]["main_x"],pady=self.gui_vars["pad"]["main_y"],anchor="n")
# title
tk.Label(self.icon_frame,text=self.gui_vars["text"]["main_window"]["icon"],font=self.gui_vars["font"]["header"]).grid(**self.gui_vars["grid"]["containers_frame"]["title"])
# treeview
# selectmode:
# browse = single selection
# extended = multiple selections
# show:
# tree = without header
self.icon_treeview = ttk.Treeview(self.icon_frame,height=13,selectmode="extended",show="tree")
self.icon_treeview.grid(**self.gui_vars["grid"]["containers_frame"]["treeview"])
# binds selecting item in treeview to enable current container edit box and disable move up/down button if 1st or last item selected
self.icon_treeview.bind("<<TreeviewSelect>>",self.icon_handle_select)
# move up and down buttons
# disabled at start
self.icon_move_up_btn=tk.Button(self.icon_frame,text=self.gui_vars["text"]["button"]["move_up"],font=self.gui_vars["font"]["normal"],command=self.icon_move_up)
self.icon_move_up_btn.config(state="disabled")
self.icon_move_up_btn.grid(**self.gui_vars["grid"]["containers_frame"]["move_up"])
self.icon_move_down_btn=tk.Button(self.icon_frame,text=self.gui_vars["text"]["button"]["move_down"],font=self.gui_vars["font"]["normal"],command=self.icon_move_down)
self.icon_move_down_btn.config(state="disabled")
self.icon_move_down_btn.grid(**self.gui_vars["grid"]["containers_frame"]["move_down"])
# Reset to original order button
tk.Button(self.icon_frame,text=self.gui_vars["text"]["button"]["reset_cont"],font=self.gui_vars["font"]["normal"],command=self.icon_reset).grid(**self.gui_vars["grid"]["containers_frame"]["reset"])
# Restore original order button
tk.Button(self.icon_frame,text=self.gui_vars["text"]["button"]["restore_cont"],font=self.gui_vars["font"]["normal"],command=self.icon_restore).grid(**self.gui_vars["grid"]["containers_frame"]["restore"])
# Save as default button
tk.Button(self.icon_frame,text=self.gui_vars["text"]["button"]["save_default"],font=self.gui_vars["font"]["normal"],command=self.icon_save_order).grid(**self.gui_vars["grid"]["containers_frame"]["save"])
# Saved/Restored label (shown for 1 second when saved/restored)
self.icon_saved_label = tk.Label(self.icon_frame,font=self.gui_vars["font"]["normal"])
self.icon_saved_label.grid(**self.gui_vars["grid"]["containers_frame"]["saved"])
#endregion
#endregion
#region BOTTOM FRAME
#region CONTAINER EDIT
# frame
self.cont_edit_frame = tk.Frame(self.bottom_frame)
self.cont_edit_frame.pack(side="left")
#region CURRENT CONTAINER
# frame
self.cur_cont_frame = tk.Frame(self.cont_edit_frame)
self.cur_cont_frame.pack(pady=self.gui_vars["pad"]["y"],anchor="w")
# label
tk.Label(self.cur_cont_frame,text=self.gui_vars["text"]["main_window"]["cur_cont"],font=self.gui_vars["font"]["header"]).pack(side="left")
# icon
# none at start
self.cur_cont_icon = tk.Label(self.cur_cont_frame)
self.cur_cont_icon.pack(side="left")
# name
# "none" at start
self.cur_cont_name = tk.Label(self.cur_cont_frame,text=self.gui_vars["text"]["main_window"]["cur_cont_name"],font=self.gui_vars["font"]["normal"])
self.cur_cont_name.pack(side="left")
#endregion
#region CHANGE NAME
# frame
self.change_name_frame = tk.Frame(self.cont_edit_frame)
self.change_name_frame.pack(pady=self.gui_vars["pad"]["y"],anchor="w")
# label
tk.Label(self.change_name_frame,text=self.gui_vars["text"]["main_window"]["cur_cont_change_name"],font=self.gui_vars["font"]["normal"]).pack(side="left",padx=self.gui_vars["pad"]["x"])
# entrybox
self.change_name_var = tk.StringVar()
self.change_name_entry = tk.Entry(self.change_name_frame,state="disabled",font=self.gui_vars["font"]["normal"],textvariable=self.change_name_var)
self.change_name_entry.pack(side="left",padx=self.gui_vars["pad"]["x"])
# bind change of input to black font
# font is grey when entry is "(multiple)"
self.change_name_var.trace_add("write",lambda *_: self.change_name_entry.config(fg="black"))
# bind clicking on input so that "(multiple)"" disappears when clicked
self.change_name_entry.bind("<Button-1>",lambda *_: self.change_name_var.get() == "(multiple)" and self.change_name_var.set(""))
# "Press Enter" info
tk.Label(self.change_name_frame,text=self.gui_vars["text"]["main_window"]["cur_cont_change_name_info"],font=self.gui_vars["font"]["small_text"],fg=self.gui_vars["color"]["small_text"]).pack(side="left")
#endregion
#region CHANGE COLOR
# frame
self.change_color_frame = tk.Frame(self.cont_edit_frame)
self.change_color_frame.pack(pady=self.gui_vars["pad"]["y"],anchor="w")
# label
tk.Label(self.change_color_frame,text=self.gui_vars["text"]["main_window"]["cur_cont_change_color"],font=self.gui_vars["font"]["normal"]).pack(side="left",padx=self.gui_vars["pad"]["x"])
# color images
self.change_color_lst = []
for i, color in enumerate(self.orig_order["color"]):
# disabled at start
# image name = {color}circle
self.change_color_lst.append(tk.Button(self.change_color_frame,image=self.icon_imgs[color+"circle"],borderwidth=0,state="disabled",command=lambda i=color: self.change_color(i)))
self.change_color_lst[i].pack(side="left",padx=self.gui_vars["pad"]["small_icon_x"])
#endregion
#region CHANGE ICON
# frame
self.change_icon_frame = tk.Frame(self.cont_edit_frame)
self.change_icon_frame.pack(pady=self.gui_vars["pad"]["y"],anchor="w")
# label
tk.Label(self.change_icon_frame,text=self.gui_vars["text"]["main_window"]["cur_cont_change_icon"],font=self.gui_vars["font"]["normal"]).pack(side="left",padx=self.gui_vars["pad"]["x"])
# icon images
self.change_icon_lst = []
for i, icon in enumerate(self.orig_order["icon"]):
# disabled at start
# image name = toolbar{icon}
self.change_icon_lst.append(tk.Button(self.change_icon_frame,image=self.icon_imgs["toolbar"+icon],borderwidth=0,state="disabled",command=lambda i=icon : self.change_icon(i)))
self.change_icon_lst[i].pack(side="left",padx=self.gui_vars["pad"]["small_icon_x"])
#endregion
#region DELETE & ADD BUTTONS
# frame
self.del_add_btns_frame = tk.Frame(self.cont_edit_frame)
self.del_add_btns_frame.pack(pady=self.gui_vars["pad"]["y"])
# Delete button
# disabled at start
self.del_button = tk.Button(self.del_add_btns_frame,text=self.gui_vars["text"]["button"]["delete"],font=self.gui_vars["font"]["normal"],state="disabled",command=self.delete_cont)
self.del_button.pack(side="left",padx=self.gui_vars["pad"]["x"])
# Add a new container
self.add_button = tk.Button(self.del_add_btns_frame,text=self.gui_vars["text"]["button"]["add"],font=self.gui_vars["font"]["normal"],command=self.add_cont)
self.add_button.pack(side="left",padx=self.gui_vars["pad"]["x"])
# sets focus to button
self.add_button.focus()
#endregion
#endregion
#region SAVE & BACK BUTTON
# frame
self.save_back_frame = tk.Frame(self.bottom_frame)
self.save_back_frame.pack(side="right",padx=self.gui_vars["pad"]["x"])
# Save button
tk.Button(self.save_back_frame,text=self.gui_vars["text"]["button"]["save"],font=self.gui_vars["font"]["normal"],command=self.save).pack(pady=self.gui_vars["pad"]["y"])
# Back to profile selection button
tk.Button(self.save_back_frame,text=self.gui_vars["text"]["button"]["back"],font=self.gui_vars["font"]["normal"],command=self.back_to_profile).pack(pady=self.gui_vars["pad"]["y"])
#endregion
#endregion
#endregion
# binds mouse click to set focus on widget
# to toggle Enter bind to Add a container button & changing name of current container
self.root.bind_all("<Button-1>",lambda event: event.widget.focus_set())
self.change_name_entry.bind("<FocusIn>",self.toggle_bind_entrybox)
self.change_name_entry.bind("<FocusOut>",self.toggle_bind_entrybox)
# calls initial toggle_bind_entrybox/treeview
self.if_toggled_entrybox = True
self.toggle_bind_entrybox()
self.if_toggled_treeview = False
self.toggle_bind_treeview()
# perform start-up methods
self.get_containers()
self.refresh_conts()
self.refresh_colors()
self.refresh_icons()
# removes selection from container treeview
# (because at start if_added gets triggered because start length = 0)
self.cont_treeview.selection_remove(self.cont_treeview.selection())
# scroll to top
self.cont_treeview.yview_moveto(0)
# marks as saved
self.if_saved = True
#region WINDOW POSITION
# get size of wrapper frame
self.wrapper_frame.update_idletasks()
win_width = self.wrapper_frame.winfo_width()
win_height = self.wrapper_frame.winfo_height()
# if frame size is bigger than 80% of screen size, uses screen size instead
if win_width > self.screen_width or win_height > self.screen_height:
win_width=int(self.screen_width * 0.8)
win_height=int(self.screen_height * 0.8)
# calculate position
# half screen resolution - half window size
half_width=int(self.screen_width/2-win_width/2)
half_height=int(self.screen_height/2-win_height/2)
# position window
# "{width}x{height}+{X_position}+{Y_position}"
# +20 for scrollbar
self.root.geometry(f"{win_width+20}x{win_height}+{half_width}+{half_height}")
# config canvas to fit window
self.main_frame.config(width=win_width,height=win_height)
#endregion
# handle quitting program
self.root.protocol("WM_DELETE_WINDOW",self.close)
def toggle_bind_entrybox(self,*_):
# if Change name entrybox is unfocused
if self.if_toggled_entrybox:
# unbinds Enter from saving Change name entrybox
self.change_name_entry.unbind("<Return>")
# binds Enter to invoke Add a new container button
# and sets focus to it
self.root.bind("<Return>",lambda *_: self.add_button.invoke())
self.add_button.focus()
# binds Ctrl+A to select all containers in Containers treeview
self.root.bind("<Control-a>",lambda *_: self.cont_treeview.selection_set(self.cont_treeview.get_children()))
# binds Delete to Delete button
# (works even before 1st selection, because button is disabled)
self.root.bind("<Delete>",lambda *_: self.del_button.invoke())
# toggles if_toggled_entrybox
self.if_toggled_entrybox = False
# if Change name entrybox is focused
else:
# unbinds Enter from Add a new container button
self.root.unbind("<Return>")
# unbinds Ctrl+A from selecting all containers in Containers treeview
# so that it works for selecting whole text in Change name entrybox
self.root.unbind("<Control-a>")
# unbinds Delete to Delete button
self.root.unbind("<Delete>")
# binds Enter to save name in Change name entry
self.change_name_entry.bind("<Return>",self.change_name)
# toggles if_toggled_entrybox
self.if_toggled_entrybox = True
def toggle_bind_treeview(self,*_):
# if containers treeview is unfocused
if self.if_toggled_treeview:
# unbind mouse wheel to scroll window so that user can scroll treeview
self.root.unbind_all("<MouseWheel>")
# toggles if_toggled_treeview
self.if_toggled_treeview = False
# if containers treeview is focused
else:
# bind mouse wheel to root to scroll anywhere
# taken from here: https://stackoverflow.com/a/17457843
self.root.bind_all("<MouseWheel>",lambda event: self.main_frame.yview_scroll(int(-1*event.delta/120),"units"))
# toggles if_toggled_treeview
self.if_toggled_treeview = True
def back_to_profile(self):
if self.check_if_saved():
# reinitalizes program
self.root.destroy()
self.__init__()
def close(self):
if self.check_if_saved():
# quits program
self.root.destroy()
self.root.quit()
#endregion
#region SORTING METHODS
def handle_sorting_options(self):
# gets selected button in Primary sorting
cur_btn = self.prim_sort.get()
# makes all Secondary sorting buttons enabled
for btn in self.sec_sort_lst:
btn.config(state="normal")
# if Primary == Secondary or no Secondary selected, selects next one
if self.sec_sort.get() in [cur_btn, "None"]:
self.sec_sort.set((int(cur_btn)+1)%3)
# disables Secondary button with same value as Primary
self.sec_sort_lst[int(cur_btn)].config(state="disabled")
# sorts containers
self.sort()
def sort(self):
# has to check if sorting option is selected
# because moving colors/icon up/down calls sort too
if self.prim_sort.get() != "None":
# dictionary to map sorting options to functions
# 0 : name (case insensitive)
# 1 : color = turns color name into number, given by order in sorting order
# 2 : icon = turns icon name into number, given by order in sorting order
sort_func_dict = {
0 : lambda cont: cont["name"].lower(),
1 : lambda cont: self.current_order["color"].index(cont["color"]),
2 : lambda cont: self.current_order["icon"].index(cont["icon"])
}
# gets 1st and 2nd sorting option from buttons
# 3rd sorting option from set difference
# (tuple to get value as int)
first_sort = int(self.prim_sort.get())
second_sort = int(self.sec_sort.get())
third_sort = tuple({0, 1, 2}.difference({first_sort, second_sort}))[0]
sort_options_lst = [first_sort, second_sort, third_sort]
# turns ints from sort_options_lst to functions in sort_func_dict
sort_func_lst = [sort_func_dict[option] for option in sort_options_lst]
# handles reverse sorting
# reverse sorting names has to be handled by built-in reverse keyword
# sets reverse to True if Reverse checkbutton for sorting name (0 in options list) is checked
# checkbutton IntVar = 1 if selected, 0 if not
reverse = self.reverse_lst[sort_options_lst.index(0)].get()
for index, option in enumerate(sort_options_lst):
# if option is not sorting by name (0)
if option:
# if button is unchecked (0) and reverse==True
# (because reverse will be handled by reverse keyword in sort, so unchecked options have to be reversed to be sorted normally)
# or button is checked (1) and reverse==False
if self.reverse_lst[index].get() != reverse:
# makes function return opposite number to sort number
# so that it will sort in opposite direction
sort_func_lst[index] = lambda cont, func=sort_func_lst[index]: -func(cont)
# performs sort with key being list of sorting indexes/names
self.ready_conts.sort(key=lambda cont: [func(cont) for func in sort_func_lst],reverse=reverse)
# refreshes container treeview
self.refresh_conts()
#endregion
#region GET METHODS
def get_profiles(self):
# gets profiles.ini file from Firefox data folder
# C:\Users\{user}\AppData\Roaming\Mozilla\Firefox
# if file/folder doesn't exist, ConfigParser handles it internally
config = ConfigParser()
config.read(self.folder_path / "profiles.ini")
#region FILE STRUCTURE
# [Install208046BA024A39CB]
# Default=Profiles/asd213.default-release (< PATH TO DEFAULT PROFILE)
# Locked=1
# [Profile2]
# Name=something
# IsRelative=0
# Path=C:\Users\User\1231sad.something
# [Profile1]
# Name=default
# IsRelative=1
# Path=Profiles/12321asd.default
# Default=1 (! THIS DOES NOT CHANGE WITH DEFAULT PROFILE CHANGE)
#endregion
# prevents unbound
default = None
# gets default profile from [Install] section
for section in config.sections():
if re.match("Install",section):
default = config[section]["Default"]
break
# gets all profiles names and paths from [ProfileN] sections
profile_dict = {}
for section in config.sections():
if re.match("Profile",section):
# gets path
path = config[section]["Path"]
# if path is relative, adds full folder path
full_path = self.folder_path / path if config[section]["isRelative"] else Path(path)
# checks if profile is not empty
if Path.exists(full_path / "containers.json"):
name = config[section]["Name"]
is_default = path==default
# profile name is unique
# creates profile_dict
profile_dict[name] = {}
profile_dict[name]["path"] = full_path
profile_dict[name]["is_default"] = is_default
self.prof_dict = profile_dict
def get_containers(self):
# loads whole containers.json file
with open(self.sel_prof_path / "containers.json",encoding="utf-8") as f:
self.raw_conts = json.load(f)
#region FILE STRUCTURE
# {
# "version": 4,
# "lastUserContextId": 6,
# "identities": [
# {
# "userContextId": 1,
# "public": true,
# "icon": "fingerprint",
# "color": "blue",
# "l10nID": "userContextPersonal.label",
# "accessKey": "userContextPersonal.accesskey",
# "telemetryId": 1
# },
# {
# "userContextId": 2,
# "public": true,
# "icon": "briefcase",
# "color": "orange",
# "l10nID": "userContextWork.label",
# "accessKey": "userContextWork.accesskey",
# "telemetryId": 2
# },
# {
# "userContextId": 3,
# "public": true,
# "icon": "dollar",
# "color": "green",
# "l10nID": "userContextBanking.label",
# "accessKey": "userContextBanking.accesskey",
# "telemetryId": 3
# },
# {
# "userContextId": 4,
# "public": true,
# "icon": "cart",
# "color": "pink",
# "l10nID": "userContextShopping.label",
# "accessKey": "userContextShopping.accesskey",
# "telemetryId": 4
# },
# {
# "userContextId": 5,
# "public": false,
# "icon": "",
# "color": "",
# "name": "userContextIdInternal.thumbnail",
# "accessKey": ""
# },
# {
# "userContextId": 4294967295,
# "public": false,
# "icon": "",
# "color": "",
# "name": "userContextIdInternal.webextStorageLocal",
# "accessKey": ""
# },
# {
# "userContextId": 6,
# "public": true,
# "icon": "dollar",
# "color": "green",
# "name": "Custom"
# }
# ]
# }
#endregion
# gets ignored name/regex pattern
# ignored name = {input} followed by any number of digits
# if regex chosen, doesn't add default pattern
added_regex = "" if self.if_regex else r"(\d+|$)"
# if ignore case, adds re.I flag
pattern_str = self.ignored_str + added_regex
pattern = re.compile(pattern_str,re.I) if self.if_ignore_case.get() else re.compile(pattern_str)
self.ready_conts = []
self.ignored_conts = []
for identity in self.raw_conts["identities"]:
# ignores non-public identities
if identity["public"]:
# adds name to default containers with only AccessKey
# userContextPersonal.accessKey, userContextBanking.accessKey etc.
if "accessKey" in identity:
# translates them to selected language
lang = self.language_select_var.get()
cont = re.search(r"userContext(.*)?.accesskey",identity["accessKey"]).group(1)
identity["name"] = self.translation_data["by_name"][lang][cont]
# puts ignored names into ignored_conts list
if self.if_ignored.get() and re.fullmatch(pattern,identity["name"]):
self.ignored_conts.append(identity)
else:
# else appends to main list ready_conts
self.ready_conts.append(identity)
else:
# if identity is not public, appends to ignored_conts
self.ignored_conts.append(identity)
# creates a deepcopy to compare to ready_cont to check if saved
self.orig_conts = deepcopy(self.ready_conts)
# gets last ID
self.last_id = self.raw_conts["lastUserContextId"]
def get_def_language(self):
# if any profile exists
# otherwise default to en-US
main_lang = "en-US"
if self.profile_radiobtn_var.get():
# get Firefox's language for selected profile
# in user.js (may not exist) or prefs.js
# user_pref("intl.locale.requested", "en-US,ast")
cur_prof_path = self.prof_dict[self.profile_radiobtn_var.get()]["path"]
user_path = cur_prof_path / "user.js"
pref_path = user_path if user_path.exists() else cur_prof_path / "prefs.js"
lang_pref_pattern = re.compile(r'user_pref\("intl\.locale\.requested", "(.*?)"\)')
with open(pref_path,encoding="utf-8") as f:
lang_settings = re.search(lang_pref_pattern,f.read())
# the setting may not exist if there's only 1 language
if lang_settings is not None:
all_langs = lang_settings.group(1)
main_lang = all_langs.split(",")[0]
self.language_select_var.set(self.translation_data["by_code"][main_lang])
#endregion
#region REFRESH METHODS
def refresh_conts(self,if_deleted=None):
selections = self.cont_treeview.selection()
# gets current items to compare if any items were added/deleted
# and to find next item after the one deleted
orig_items = self.cont_treeview.get_children()
next_item = None
if selections:
# gets highest index among selected items
max_index = max([self.cont_treeview.index(selection) for selection in selections])
# gets next item after last one selected (if it's not last)
# to select if selected items are deleted
if max_index != len(orig_items)-1:
next_item = (self.cont_treeview.get_children()[max_index+1],)
# else next_item is set to last item in new treeview after repopulating
# clears treeview
self.cont_treeview.delete(*self.cont_treeview.get_children())
# repopulates treeview
# args = parent ("" = new toplevel entry), index, id (= userContextId)
for container in self.ready_conts:
self.cont_treeview.insert("","end",container["userContextId"],text=container["name"],image=self.icon_imgs[container["color"]+container["icon"]])
# checks if items were added/deleted
new_items = self.cont_treeview.get_children()
if_added = len(orig_items) < len(new_items)
if if_deleted is None:
if_deleted = len(orig_items) > len(new_items)
# checks if containers were restored (are same as original list)
# then it needs to ignore if_added
# (which is triggered if containers were deleted)
# changes if_saved boole if current items are different from original original
self.if_saved = self.ready_conts==self.orig_conts
# disables Current container and Delete if empty
if len(new_items) == 0:
# disables Current container icon and name
self.cur_cont_icon.config(image="")
self.cur_cont_name.config(text="none")
# empties and disables Change name entrybox
self.change_name_entry.delete(0,"end")
self.change_name_entry.config(state="disabled")
# disables Change color/icon buttons
for btn in self.change_color_lst:
btn.config(state="disabled")
for btn in self.change_icon_lst:
btn.config(state="disabled")
# disables Delete buttons
self.del_button.config(state="disabled")
# if not empty, if there is selection (deleting also requires selection) or new container was added, restores selection
elif selections or if_added:
# gets original IDs as a tuple to compare to selection
orig_conts_items = tuple(str(cont["userContextId"]) for cont in self.orig_conts)
# if original containers were restored, only select those previously selected that are in orig conts
# ignore added conts
if new_items==orig_conts_items and not if_deleted:
selections = tuple(selection for selection in selections if selection in orig_conts_items)
elif if_added and not self.if_saved:
# gets added container's ID from difference of current items and orig_items
# tuple to get value as int
added_item = tuple(set(new_items).difference(set(orig_items)))[0]
# overwrites selection with added item
selections = (added_item,)
elif if_deleted:
# set next_item to last item if it's None
if next_item is None:
next_item = (self.cont_treeview.get_children()[-1],)
selections = next_item
# selects either previous selection, or added item, or next after last deleted item
self.cont_treeview.selection_set(selections)
# scroll to (1st) selection if not visible
# (condition to handle 1st time loading, when there is no selection but if_saved is True)
if selections:
self.cont_treeview.see(selections[0])
def refresh_colors(self):
selections = self.color_treeview.selection()
# clears treeview
self.color_treeview.delete(*self.color_treeview.get_children())
# repopulates treeview
# args = parent ("" = new toplevel entry), index, id (= color)
for color in self.current_order["color"]:
self.color_treeview.insert("","end",color,text=color,image=self.icon_imgs[color+"circle"])
# restores selection
# checks if there is selection to not trigger color_handle_select
if selections:
self.color_treeview.selection_set(selections)
def refresh_icons(self):
selections = self.icon_treeview.selection()
# clears treeview
self.icon_treeview.delete(*self.icon_treeview.get_children())
# repopulates treeview
# args = parent ("" = new toplevel entry), index, id (= icon)
for icon in self.current_order["icon"]:
self.icon_treeview.insert("","end",icon,text=icon,image=self.icon_imgs["toolbar"+icon])
# restore selection
# checks if there is selection to not trigger icon_handle_select
if selections:
self.icon_treeview.selection_set(selections)
#endregion
#region SELECT METHODS
def cont_handle_select(self,*_):
selections = self.cont_treeview.selection()
# prevents unbound
cont_name = "none"
fg = "black"
# handles start case when there is no selection
if selections:
# gets indexes to check if selection can be moved up and down
indices = [self.cont_treeview.index(selection) for selection in selections]
# if 1 item is selected
if len(selections) == 1:
# finds container of given ID in ready_conts
for container in self.ready_conts:
if container["userContextId"] == int(selections[0]):
# modifies Current container's name and icon
cont_name = container["name"]
self.cur_cont_name.config(text=cont_name)
self.cur_cont_icon.config(image=self.icon_imgs[container["color"]+container["icon"]])
# font color = black
fg = "black"
break
# if multiple items are selected
else:
# checks if all selected items have same name/color/icon
# by adding them to sets
name_set = set()
color_set = set()
icon_set = set()
for item_id in selections:
for container in self.ready_conts:
if container["userContextId"] == int(item_id):
name_set.add(container["name"])
color_set.add(container["color"])
icon_set.add(container["icon"])
# if all selected items have same name, show that name
if len(name_set) == 1:
cont_name = tuple(name_set)[0]
fg = "black"
# else, show "(multiple)" in grey
else:
cont_name = f'({self.gui_vars["text"]["main_window"]["cur_cont_name_multiple"]})'
fg = "grey"
only_color = tuple(color_set)[0]
only_icon = tuple(icon_set)[0]
# default color & icon
color_name = "toolbar"
icon_name = "default"
# if all items have same color, show it
if len(color_set)==1:
color_name = only_color
# if all items have same icon, show it
if len(icon_set)==1:
icon_name = only_icon
# sets name and icon
self.cur_cont_name.config(text=cont_name)
# if items don't have same color & icon, set container icon to nothing
if [color_name, icon_name] == ["toolbar", "default"]:
self.cur_cont_icon.config(image="")
else:
self.cur_cont_icon.config(image=self.icon_imgs[color_name+icon_name])
# enables Change color/icon buttons
for button in self.change_color_lst:
button.config(state="normal")
for button in self.change_icon_lst:
button.config(state="normal")
# enables Change name entrybox, sets it to container name
self.change_name_var.set(cont_name)
self.change_name_entry.config(state="normal",fg=fg)
# enables Delete button
self.del_button.config(state="normal")
# checks if selection can be moved up and down
# compares minimal index to 0
if min(indices)==0:
self.cont_move_up_btn.config(state="disabled")
else:
self.cont_move_up_btn.config(state="normal")
# compares maximal index to length of treeview
if max(indices)==len(self.cont_treeview.get_children())-1:
self.cont_move_down_btn.config(state="disabled")
else:
self.cont_move_down_btn.config(state="normal")
def color_handle_select(self,*_):
selections = self.color_treeview.selection()
# gets indexes to check if selection can be moved up and down
indices = [self.color_treeview.index(selection) for selection in selections]
# checks if can be moved up and down
# compares minimal index to 0
if min(indices)==0:
self.color_move_up_btn.config(state="disabled")
else:
self.color_move_up_btn.config(state="normal")
# compares maximal index to length of treeview
if max(indices)==len(self.color_treeview.get_children())-1:
self.color_move_down_btn.config(state="disabled")
else:
self.color_move_down_btn.config(state="normal")
def icon_handle_select(self,_):
selections = self.icon_treeview.selection()
# gets indexes to check if selection can be moved up and down
indices = [self.icon_treeview.index(selection) for selection in selections]
# checks if can be moved up and down
# compares minimal index to 0
if min(indices)==0:
self.icon_move_up_btn.config(state="disabled")
else:
self.icon_move_up_btn.config(state="normal")
# compares maximal index to length of treeview
# compares maximal index to length of treeview
# compares maximal index to length of treeview
if max(indices)==len(self.icon_treeview.get_children())-1:
self.icon_move_down_btn.config(state="disabled")
else:
self.icon_move_down_btn.config(state="normal")
#endregion
#region MOVE UP & DOWN METHODS
def cont_move_up(self):
selections = self.cont_treeview.selection()
# finds selected container and exchanges it with container below
for ind, container in enumerate(self.ready_conts):
if str(container["userContextId"]) in selections:
self.ready_conts[ind], self.ready_conts[ind-1] = self.ready_conts[ind-1], self.ready_conts[ind]
# refreshes treeview
self.refresh_conts()
# deselects sorting radiobuttons
self.prim_sort.set(None)
self.sec_sort.set(None)
# disables Secondary radiobuttons
for btn in self.sec_sort_lst:
btn.config(state="disabled")
def cont_move_down(self):
selections = self.cont_treeview.selection()
# finds selected container and exchanges it with container below
# has to go through list in reverse
for container in self.ready_conts[::-1]:
if str(container["userContextId"]) in selections:
ind = self.ready_conts.index(container)
self.ready_conts[ind], self.ready_conts[ind+1] = self.ready_conts[ind+1], self.ready_conts[ind]
self.refresh_conts()
# deselect sorting radiobuttons
self.prim_sort.set(None)
self.sec_sort.set(None)
for btn in self.sec_sort_lst:
btn.config(state="disabled")
selection_item = self.cont_treeview.selection()
selection_id = int(selection_item[0])
def color_move_up(self):
selections = self.color_treeview.selection()
# finds selected color and exchanges it with color below
for ind, color in enumerate(self.current_order["color"]):
if color in selections:
self.current_order["color"][ind], self.current_order["color"][ind-1] = self.current_order["color"][ind-1], self.current_order["color"][ind]
# refreshes color treeview and sorts
self.refresh_colors()
self.sort()
def color_move_down(self):
selections = self.color_treeview.selection()
# finds selected color and exchanges it with color below
# has to go through list in reverse
for color in self.current_order["color"][::-1]:
if color in selections:
ind = self.current_order["color"].index(color)
self.current_order["color"][ind], self.current_order["color"][ind+1] = self.current_order["color"][ind+1], self.current_order["color"][ind]
# refreshes color treeview and sorts
self.refresh_colors()
self.sort()
def icon_move_up(self):
selections = self.icon_treeview.selection()
# finds selected icon and exchanges it with icon below
for ind, icon in enumerate(self.current_order["icon"]):
if icon in selections:
self.current_order["icon"][ind], self.current_order["icon"][ind-1] = self.current_order["icon"][ind-1], self.current_order["icon"][ind]
# refreshes icon treeview and sorts
self.refresh_icons()
self.sort()
def icon_move_down(self):
selections = self.icon_treeview.selection()
# finds selected icon and exchanges it with icon below
# has to go through list in reverse
for icon in self.current_order["icon"][::-1]:
if icon in selections:
ind = self.current_order["icon"].index(icon)
self.current_order["icon"][ind], self.current_order["icon"][ind+1] = self.current_order["icon"][ind+1], self.current_order["icon"][ind]
# refreshes color treeview and sorts
# refreshes color treeview and sorts
# refreshes color treeview and sorts
# refreshes icon treeview and sorts
self.refresh_icons()
self.sort()
#endregion
#region RESET ORIGINAL ORDER METHODS
def color_reset(self):
# makes current order a deepcopy of original order
self.current_order["color"] = deepcopy(self.orig_order["color"])
# refreshes color treeview
self.sort()
self.refresh_colors()
# shows Reset! label
self.color_saved_label.config(text=self.gui_vars["text"]["main_window"]["reset"])
# after 1 second, removes text
self.color_saved_label.after(self.gui_vars["timer"],lambda: self.color_saved_label.config(text=""))
def icon_reset(self):
# makes current order a deepcopy of original order
self.current_order["icon"] = deepcopy(self.orig_order["icon"])
# refreshes icon treeview
self.sort()
self.refresh_icons()
# shows Reset! label
self.icon_saved_label.config(text=self.gui_vars["text"]["main_window"]["reset"])
# after 1 second, removes text
self.icon_saved_label.after(self.gui_vars["timer"],lambda: self.icon_saved_label.config(text=""))
#endregion
#region RESTORE DEFAULT ORDER METHODS
def cont_restore(self):
# check if new containers have been added/containers have been edited by simulated set difference
# (sets can't be used because dictionaries are not hashable)
changed_conts = [cont for cont in self.ready_conts if cont not in self.orig_conts]
# gets deleted containers too
deleted_conts = [cont for cont in self.orig_conts if cont not in self.ready_conts]
# if added/changed/deleted, show a warning message
if changed_conts or deleted_conts:
# messagebox returns true or false
# if user answers No, stops function
if not messagebox.askyesno(title=self.gui_vars["text"]["added_warning"]["title"],message=self.gui_vars["text"]["added_warning"]["message"]):
return
self.get_containers()
self.refresh_conts()
# show Restored! label
self.cont_restored_label.config(text=self.gui_vars["text"]["main_window"]["restored"])
# after 1 second, removes text
self.cont_restored_label.after(self.gui_vars["timer"],lambda: self.cont_restored_label.config(text=""))
# deselects sorting radiobuttons
self.prim_sort.set(None)
self.sec_sort.set(None)
# disables Secondary radiobuttons
for btn in self.sec_sort_lst:
btn.config(state="disabled")
def sort_opts_restore(self):
if not Path("sorting_options.json").exists():
return messagebox.showwarning(**self.gui_vars["text"]["no_sort_options"])
with open("sorting_options.json") as f:
opts = json.load(f)
prim, prim_rev = opts["primary"]
self.prim_sort.set(prim)
self.reverse_lst[0].set(prim_rev)
sec, sec_rev = opts["secondary"]
self.sec_sort.set(sec)
self.reverse_lst[1].set(sec_rev)
self.reverse_lst[2].set(opts["tertiary"])
self.sort()
# shows Restored! label
self.sort_saved_label.config(text=self.gui_vars["text"]["main_window"]["loaded"])
# after 1 second, removes text
self.sort_saved_label.after(self.gui_vars["timer"],lambda: self.sort_saved_label.config(text=""))
def color_restore(self):
# makes current order a deepcopy of default order
self.current_order["color"] = deepcopy(self.default_order["color"])
# refreshes color treeview
self.refresh_colors()
self.sort()
# shows Restored! label
self.color_saved_label.config(text=self.gui_vars["text"]["main_window"]["restored"])
# after 1 second, removes text
self.color_saved_label.after(self.gui_vars["timer"],lambda: self.color_saved_label.config(text=""))
def icon_restore(self):
# makes current order a deepcopy of default order
self.current_order["icon"] = deepcopy(self.default_order["icon"])
# refreshes icon treeview
self.refresh_icons()
self.sort()
# shows Restored! label
self.icon_saved_label.config(text=self.gui_vars["text"]["main_window"]["restored"])
# after 1 second, removes text
self.icon_saved_label.after(self.gui_vars["timer"],lambda: self.icon_saved_label.config(text=""))
#endregion
#region SAVE DEFAULT ORDER METHODS
def save_sorting_options(self):
opts = {
"primary" : [
self.prim_sort.get(),
self.reverse_lst[0].get()
],
"secondary" : [
self.sec_sort.get(),
self.reverse_lst[1].get()
],
"tertiary" : self.reverse_lst[2].get()
}
with open("sorting_options.json","w",encoding="utf-8") as f:
json.dump(opts,f)
# shows Saved! label
self.sort_saved_label.config(text=self.gui_vars["text"]["main_window"]["saved"])
# after 1 second, removes text
self.sort_saved_label.after(self.gui_vars["timer"],lambda: self.sort_saved_label.config(text=""))
def color_save_order(self):
# deepcopies current order to default order
self.default_order["color"] = deepcopy(self.current_order["color"])
# saves default order to config file
with open('default_order.json','w',encoding="utf-8") as f:
json.dump(self.default_order,f)
# shows Saved! label
self.color_saved_label.config(text=self.gui_vars["text"]["main_window"]["saved"])
# after 1 second, removes text
self.color_saved_label.after(self.gui_vars["timer"],lambda: self.color_saved_label.config(text=""))
def icon_save_order(self):
# deepcopies current order to default order
self.default_order["icon"] = deepcopy(self.current_order["icon"])
# saves default order to config file
with open('default_order.json','w',encoding="utf-8") as f:
json.dump(self.default_order,f)
# shows Saved! label
self.icon_saved_label.config(text=self.gui_vars["text"]["main_window"]["saved"])
# after 1 second, removes text
self.icon_saved_label.after(self.gui_vars["timer"],lambda: self.icon_saved_label.config(text=""))
#endregion
#region EDIT CONTAINER METHODS
def change_name(self,*_):
selections = self.cont_treeview.selection()
# gets name from Change name entrybox
name = self.change_name_entry.get()
for container in self.ready_conts:
if str(container["userContextId"]) in selections:
# if container is default, removes "l10nID" and "accessKey" keys
if "accessKey" in container:
del container["accessKey"]
del container["l10nID"]
container["name"] = name
self.sort()
def change_color(self,color):
selections = self.cont_treeview.selection()
for container in self.ready_conts:
if str(container["userContextId"]) in selections:
container["color"] = color
self.sort()
def change_icon(self,icon):
selections = self.cont_treeview.selection()
for container in self.ready_conts:
if str(container["userContextId"]) in selections:
container["icon"] = icon
self.sort()
def delete_cont(self):
selections = self.cont_treeview.selection()
# temp_lst to avoid iterating over list and deleting its items
temp_lst = deepcopy(self.ready_conts)
for container in temp_lst:
if str(container["userContextId"]) in selections:
self.ready_conts.remove(container)
# doesn't need to sort, order same
self.refresh_conts(if_deleted=True)
#endregion
#region ADD CONTAINER POPUP
def add_cont(self):
# popup
self.add_popup=tk.Toplevel()
self.add_popup.title(self.gui_vars["text"]["add_popup"]["title"])
# main frame
self.add_popup_frame = tk.Frame(self.add_popup)
self.add_popup_frame.pack(padx=self.gui_vars["pad"]["x"],pady=self.gui_vars["pad"]["y"])
#region NAME
# frame
self.add_name_frame = tk.Frame(self.add_popup_frame)
self.add_name_frame.pack(pady=self.gui_vars["pad"]["y"],anchor="w")
# label
tk.Label(self.add_name_frame,text=self.gui_vars["text"]["add_popup"]["name"],font=self.gui_vars["font"]["normal"]).pack(side="left",padx=self.gui_vars["pad"]["x"])
# entrybox
self.add_name_var = tk.StringVar()
# check if name is not empty
self.add_name_var.trace_add("write",self.add_check)
self.add_name_entry = tk.Entry(self.add_name_frame,font=self.gui_vars["font"]["normal"],textvariable=self.add_name_var)
self.add_name_entry.pack(side="left",padx=self.gui_vars["pad"]["x"])
# sets focus to entrybox
self.add_name_entry.focus()
#endregion
#region COLOR
# frame
self.add_color_frame = tk.Frame(self.add_popup_frame)
self.add_color_frame.pack(pady=self.gui_vars["pad"]["y"],anchor="w")
# label
tk.Label(self.add_color_frame,text=self.gui_vars["text"]["add_popup"]["color"],font=self.gui_vars["font"]["normal"]).pack(side="left",padx=self.gui_vars["pad"]["x"])
# buttons
self.add_color_lst = []
for index, color in enumerate(self.orig_order["color"]):
# command gets index to modify button
self.add_color_lst.append(tk.Button(self.add_color_frame,image=self.icon_imgs[color+"circle"],borderwidth=0,command=lambda i=index, j=color: self.add_color_update(i,j)))
self.add_color_lst[index].pack(side="left",padx=self.gui_vars["pad"]["small_icon_x"])
#endregion
#region ICON
# frame
self.add_icon_frame = tk.Frame(self.add_popup_frame)
self.add_icon_frame.pack(pady=self.gui_vars["pad"]["y"],anchor="w")
# label
tk.Label(self.add_icon_frame,text=self.gui_vars["text"]["add_popup"]["icon"],font=self.gui_vars["font"]["normal"]).pack(side="left",padx=self.gui_vars["pad"]["x"])
# buttons
self.add_icon_lst = []
for index, icon in enumerate(self.orig_order["icon"]):
# command gets index to modify button
self.add_icon_lst.append(tk.Button(self.add_icon_frame,image=self.icon_imgs["toolbar"+icon],borderwidth=0,command=lambda i=index, j=icon : self.add_icon_update(i,j)))
self.add_icon_lst[index].pack(side="left",padx=self.gui_vars["pad"]["small_icon_x"])
#endregion
#region ADD BUTTON
# frame
self.add_add_button_frame = tk.Frame(self.add_popup_frame)
self.add_add_button_frame.pack(pady=self.gui_vars["pad"]["y"])
# button
self.add_add_button = tk.Button(self.add_add_button_frame,text=self.gui_vars["text"]["add_popup"]["add"],state="disabled",command=self.add_cont_save)
self.add_add_button.pack(pady=self.gui_vars["pad"]["y"])
# bind Enter to button
self.add_popup.bind("<Return>",lambda *_: self.add_add_button.invoke())
#endregion
#region WINDOW POSITION
# get window size
self.add_popup.update_idletasks() #update idletasks to get correct size
add_width=self.add_popup.winfo_width()
add_height=self.add_popup.winfo_height()
# calculate position
# half screen resolution - half window size
half_width=int(self.screen_width/2-add_width/2)
half_height=int((self.screen_height-40)/2-add_height/2)
# makes window not resizable above max size
# self.add_popup.maxsize(add_width,add_height)
# makes window not resizable at all
self.add_popup.resizable(False,False)
# position window
# "+X_position+Y_position"
self.add_popup.geometry("+"+str(half_width)+"+"+str(half_height))
# sets focus to window
self.add_popup.focus()
#endregion
# sets default values
self.add_icon = None
self.add_color = None
def add_color_update(self,index,color):
self.add_color = color
# unsinks and unborders all buttons
for btn in self.add_color_lst:
btn.config(relief="flat",borderwidth=0)
# sinks and borders selected button
self.add_color_lst[index].config(relief="sunken",borderwidth=1)
# checks if entry can be added (name and icons are selected)
self.add_check()
def add_icon_update(self,index,icon):
self.add_icon = icon
# unsinks and unborders all buttons
for btn in self.add_icon_lst:
btn.config(relief="flat",borderwidth=0)
# sinks and borders selected button
self.add_icon_lst[index].config(relief="sunken",borderwidth=1)
# checks if entry can be added (name and color are selected)
self.add_check()
def add_check(self,*_):
# gets name from Name entrybox
self.add_name = self.add_name_entry.get()
# if color & icon are selected and name is not empty, enables Add button
if self.add_color and self.add_icon and self.add_name:
self.add_add_button.config(state="normal")
else:
# else disables Add button
self.add_add_button.config(state="disabled")
def add_cont_save(self):
self.last_id += 1
temp_con = {}
temp_con["userContextId"] = self.last_id
temp_con["public"] = True
temp_con["icon"] = self.add_icon
temp_con["color"] = self.add_color
temp_con["name"] = self.add_name_entry.get()
self.ready_conts.append(deepcopy(temp_con))
self.sort()
self.add_popup.destroy()
#endregion
#region SAVE METHODS
def check_if_saved(self):
if self.if_saved:
return True
else:
# messagebox returns True or False
return messagebox.askyesno(**self.gui_vars["text"]["save_warning"])
def save(self):
ready_output = deepcopy(self.ready_conts)
# removes "name" property if "accessKey" is present
for container in ready_output:
if "accessKey" in container:
del container["name"]
# appends ignored containers to ready containers
identities_output = ready_output + self.ignored_conts
# gets entire raw file
output = deepcopy(self.raw_conts)
# updates containers
output["identities"] = deepcopy(identities_output)
# updates lastUserContextId
output["lastUserContextId"] = self.last_id
# creates backup with name:
# {profile}_{DD-MM-YY_HH-MM-SS}.json
backup_filename = f'{self.profile_radiobtn_var.get()}_{datetime.now().strftime("%d-%m-%Y_%H-%M-%S")}.json'
backup_path = Path("backups")
if not backup_path.exists():
backup_path.mkdir()
shutil.copy(self.sel_prof_path / "containers.json", backup_path / backup_filename)
# saves file
with open(self.sel_prof_path / "containers.json","w",encoding="utf-8") as f:
json.dump(output,f)
# shows Success message
messagebox.showinfo(**self.gui_vars["text"]["save_success"])
self.if_saved = True
#endregion
| 43.803864 | 239 | 0.629122 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.