content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Created by Kelly Hwong on 2018年09月18日16:16:19
# Part A: House Hunting
import math
annual_salary = int(input("Enter your annual salary: "))
portion_saved = float(input("Enter the percent of your salary to save, as a decimal: "))
total_cost = int(input("Enter the cost of your dream home: "))
# salary, saving, current_savings*r/12, annual_salary/12
# initinalize some examples for tests
# annual_salary = 120000
# portion_saved = .10 # eg: 0.10
# total_cost = 1000000
# annual_salary = 80000
# portion_saved = .15 # eg: 0.10
# total_cost = 500000
# constants
r = 0.04
portion_down_payment = 0.25
monthly_salary = annual_salary/12
down_payment = total_cost * portion_down_payment
current_savings = 0
mouth_count = 0
while current_savings < down_payment:
# calc interests first
additional_interest = current_savings*r/12
current_savings += additional_interest
# then added by saving
current_savings += monthly_salary * portion_saved
mouth_count += 1
print("Number of months: " + str(mouth_count))
# or current_savings *= 1 + r/12 after added by saving
|
'''
A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a^2 + b^2 = c^2 For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
There exists exactly one Pythagorean triplet for which a + b + c = 1000.
Find the product abc.
Para n > m > 0:
a = n^2 - m^2
b = 2nm
c = n^2 + m^2
a, b, c son terna
'''
import time
def triplete_base(num):
for n in range(25):
for m in range(n):
a = n**2 - m**2
b = 2 * n * m
c = n**2 + m**2
if a + b + c == num:
return "{} * {} * {} = {}".format(a, b, c, a*b*c)
start = time.time()
# for i in range(100):
print(triplete_base(1000))
print("Tiempo total {} seg".format(time.time() - start))
# 375 * 200 * 425 = 31875000
# Tiempo total 0.0008294582366943359 seg
# Muy pitonico, mas lento (el doble aprox)
import time
def triplete(num):
return [[(n**2 - m**2, 2*m*n, n**2 + m**2) for m in range(1, n) if (n**2 - m**2 + 2*m*n + n**2 + m**2) == 1000] for n in range(1, 25)]
start = time.time()
# for i in range(100):
triplete(1000)
print("Tiempo total {} seg".format(time.time() - start))
# Tiempo total 0.001071929931640625 seg
for i in range(10):
t1 = time.clock()
triplete_base(1000)
print(time.clock() - t1)
print("------")
for i in range(10):
t1 = time.clock()
triplete(1000)
print(time.clock() - t1)
# 0.0002599999999972624
# 0.0004559999999997899
# 0.0004930000000058499
# 0.0004729999999995016
# 0.0004930000000058499
# 0.00047399999999697684
# 0.00047399999999697684
# 0.0004540000000048394
# 0.00048300000000267573
# 0.0004989999999907013
# ------
# 0.0007220000000103255
# 0.0006529999999997926
# 0.0007109999999954653
# 0.0007580000000046994
# 0.00041899999999372994
# 0.00039799999998990643
# 0.0003889999999984184
# 0.0003930000000025302
# 0.0003659999999996444
# 0.0004729999999995016
|
from logging import Handler
from queue import Queue
from threading import Thread
import logging.config
import logging
import asyncio
import datetime
import yaml
import sys
import os
from git import Repo
from functools import partial, wraps
from pythonjsonlogger import jsonlogger
RED = '\033[91m'
BLUE = '\033[94m'
BOLD = '\033[1m'
END = '\033[0m'
_BRANCH_NAME = None
http_pings_logs_disabled = True
def get_current_working_repo():
branch_name = None
current_tag = None
try:
repo = Repo(os.getcwd())
branch = repo.active_branch
branch_name = branch.name
tags = repo.tags
if tags and isinstance(tags, list):
current_tag = tags[-1].name
except:
pass
return (branch_name, current_tag)
def http_ping_filter(record):
if "GET /ping/" in record.getMessage():
return 0
return 1
class LogFormatHelper:
LogFormat = '%a %l %u %t "%r" %s %b %D "%{Referrer}i" "%{User-Agent}i" %{X-Request-ID}o'
class CustomTimeLoggingFormatter(logging.Formatter):
def formatTime(self, record, datefmt=None): # noqa
"""
Overrides formatTime method to use datetime module instead of time module
to display time in microseconds. Time module by default does not resolve
time to microseconds.
"""
record.branchname = _BRANCH_NAME
if datefmt:
s = datetime.datetime.now().strftime(datefmt)
else:
t = datetime.datetime.now().strftime(self.default_time_format)
s = self.default_msec_format % (t, record.msecs)
return s
class CustomJsonFormatter(jsonlogger.JsonFormatter):
def __init__(self, *args, **kwargs):
self.extrad = kwargs.pop('extrad', {})
super().__init__(*args, **kwargs)
def add_fields(self, log_record, record, message_dict):
message_dict.update(self.extrad)
record.branchname = _BRANCH_NAME
super().add_fields(log_record, record, message_dict)
def patch_async_emit(handler: Handler):
base_emit = handler.emit
queue = Queue()
def loop():
while True:
record = queue.get()
try:
base_emit(record)
except:
print(sys.exc_info())
def async_emit(record):
queue.put(record)
thread = Thread(target=loop)
thread.daemon = True
thread.start()
handler.emit = async_emit
return handler
def patch_add_handler(logger):
base_add_handler = logger.addHandler
def async_add_handler(handler):
async_handler = patch_async_emit(handler)
base_add_handler(async_handler)
return async_add_handler
DEFAULT_CONFIG_YAML = """
# logging config
version: 1
disable_existing_loggers: False
handlers:
stream:
class: logging.StreamHandler
level: INFO
formatter: ctf
stream: ext://sys.stdout
stats:
class: logging.FileHandler
level: INFO
formatter: cjf
filename: logs/vyked_stats.log
exceptions:
class: logging.FileHandler
level: INFO
formatter: cjf
filename: logs/vyked_exceptions.log
service:
class: logging.FileHandler
level: INFO
formatter: ctf
filename: logs/vyked_service.log
formatters:
ctf:
(): vyked.utils.log.CustomTimeLoggingFormatter
format: '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
datefmt: '%Y-%m-%d %H:%M:%S,%f'
cjf:
(): vyked.utils.log.CustomJsonFormatter
format: '{ "timestamp":"%(asctime)s", "message":"%(message)s"}'
datefmt: '%Y-%m-%d %H:%M:%S,%f'
root:
handlers: [stream, service]
level: INFO
loggers:
registry:
handlers: [service,]
level: INFO
stats:
handlers: [stats]
level: INFO
exceptions:
handlers: [exceptions]
level: INFO
"""
def setup_logging(_):
try:
with open('config_log.json', 'r') as f:
config_dict = yaml.load(f.read())
except:
config_dict = yaml.load(DEFAULT_CONFIG_YAML)
logging.getLogger('asyncio').setLevel(logging.WARNING)
logger = logging.getLogger()
logger.handlers = []
logger.addHandler = patch_add_handler(logger)
global _BRANCH_NAME
(branch_name, current_tag) = get_current_working_repo()
_BRANCH_NAME = branch_name
if 'handlers' in config_dict:
for handler in config_dict['handlers']:
if 'branch_name' in config_dict['handlers'][handler] and config_dict['handlers'][handler]['branch_name'] == True:
config_dict['handlers'][handler]['release'] = current_tag if current_tag else None
if 'tags' in config_dict['handlers'][handler] and isinstance(config_dict['handlers'][handler]['tags'], dict):
config_dict['handlers'][handler]['tags']['branch'] = branch_name if branch_name else None
logging.config.dictConfig(config_dict)
if http_pings_logs_disabled:
for handler in logging.root.handlers:
handler.addFilter(http_ping_filter)
def log(fn=None, logger=logging.getLogger(), debug_level=logging.DEBUG):
"""
logs parameters and result - takes no arguments
"""
if fn is None:
return partial(log, logger=logger, debug_level=debug_level)
@wraps(fn)
def func(*args, **kwargs):
arg_string = ""
for i in range(0, len(args)):
var_name = fn.__code__.co_varnames[i]
if var_name not in ['self', 'cls']:
arg_string += var_name + ":" + str(args[i]) + ","
arg_string = arg_string[0:len(arg_string) - 1]
string = (RED + BOLD + '>> ' + END + 'Calling {0}({1})'.format(fn.__name__, arg_string))
if len(kwargs):
string = (
RED + BOLD + '>> ' + END + 'Calling {0} with args {1} and kwargs {2}'.format(fn.__name__, arg_string,
kwargs))
logger.log(debug_level, string)
wrapped_fn = fn
if not asyncio.iscoroutine(fn):
wrapped_fn = asyncio.coroutine(fn)
try:
result = yield from wrapped_fn(*args, **kwargs)
string = BLUE + BOLD + '<< ' + END + 'Return {0} with result :{1}'.format(fn.__name__, result)
logger.log(debug_level, string)
return result
except Exception as e:
string = (RED + BOLD + '>> ' + END + '{0} raised exception :{1}'.format(fn.__name__, str(e)))
logger.log(debug_level, string)
raise e
return func
def logx(supress_args=[], supress_all_args=False, supress_result=False, logger=logging.getLogger(),
debug_level=logging.DEBUG):
"""
logs parameters and result
takes arguments
supress_args - list of parameter names to supress
supress_all_args - boolean to supress all arguments
supress_result - boolean to supress result
receiver - custom logging function which takes a string as input; defaults to logging on stdout
"""
def decorator(fn):
def func(*args, **kwargs):
if not supress_all_args:
arg_string = ""
for i in range(0, len(args)):
var_name = fn.__code__.co_varnames[i]
if var_name != "self" and var_name not in supress_args:
arg_string += var_name + ":" + str(args[i]) + ","
arg_string = arg_string[0:len(arg_string) - 1]
string = (RED + BOLD + '>> ' + END + 'Calling {0}({1})'.format(fn.__name__, arg_string))
if len(kwargs):
string = (
RED + BOLD + '>> ' + END + 'Calling {0} with args {1} and kwargs {2}'.format(
fn.__name__,
arg_string, kwargs))
logger.log(debug_level, string)
wrapped_fn = fn
if not asyncio.iscoroutine(fn):
wrapped_fn = asyncio.coroutine(fn)
result = yield from wrapped_fn(*args, **kwargs)
if not supress_result:
string = BLUE + BOLD + '<< ' + END + 'Return {0} with result : {1}'.format(fn.__name__, result)
logger.log(debug_level, string)
return result
return func
return decorator
|
"""
Tools to put CP2K orbitals on a real space grid
"""
import os
import numpy as np
import scipy
import scipy.io
import scipy.interpolate
import scipy.ndimage
import time
import copy
import sys
import re
import io
import ase
import ase.io
from .cube import Cube
from .cp2k_wfn_file import Cp2kWfnFile
from mpi4py import MPI
ang_2_bohr = 1.0/0.52917721067
hart_2_ev = 27.21138602
class Cp2kGridOrbitals:
"""
Class to load and put CP2K orbitals on a discrete real-space grid.
The orbitals will be equally divided between the mpi processes.
"""
def __init__(self, mpi_rank=0, mpi_size=1, mpi_comm=None, single_precision=True):
self.mpi_rank = mpi_rank
self.mpi_size = mpi_size
self.mpi_comm = mpi_comm
if single_precision:
self.dtype = np.float32
else:
self.dtype = np.float64
# geometry
self.cell = None # Bohr radii / [au]
self.ase_atoms = None
self.atom_kinds = None # saves the kind for each atom
# Basis set
self.kind_elem_basis = None # element [1] and basis set name [2] for each kind
self.basis_sets = None
# The global energy limits when loading the orbitals
self.emin = None
self.emax = None
# Object to deal with loading molecular orbitals from .wfn file
self.cwf = Cp2kWfnFile(self.mpi_rank, self.mpi_size, self.mpi_comm)
# Set by cwf:
self.morb_composition = None
self.morb_energies = None
self.i_homo_loc = None
self.i_homo_glob = None
self.nspin = None
self.ref_energy = None
self.global_morb_energies = None
# Orbitals on discrete grid
self.morb_grids = None
self.dv = None # [dx, dy, dz] in [au]
self.origin = None
self.eval_cell = None
self.eval_cell_n = None
self.last_calc_iz = None # last directly calculated z plane (others extrapolated)
### -----------------------------------------
### General cp2k routines
### -----------------------------------------
def read_cp2k_input(self, cp2k_input_file):
"""
Reads from the cp2k input file:
* Basis set names for all kinds
* Cell size
"""
self.kind_elem_basis = {}
self.cell = np.zeros(3)
with open(cp2k_input_file) as f:
lines = f.readlines()
for i in range(len(lines)):
parts = lines[i].split()
if len(parts) == 0:
continue
# Have we found the basis set info?
if parts[0] == "&KIND":
kind = parts[1]
elem = None
basis_name = None
subsec_count = 0
## ---------------------------------------------------------------------
## Loop over the proceeding lines to find the BASIS_SET and ELEMENT
for j in range(1, 100):
line = lines[i+j]
if line.strip()[0] == '&' and not line.strip().startswith("&END"):
# We entered into a subsection of kind
subsec_count += 1
if line.strip().startswith("&END"):
# We are either leaving &KIND or a subsection
if subsec_count == 0:
break
else:
subsec_count -= 1
parts = line.split()
if parts[0] == "ELEMENT":
elem = parts[1]
if parts[0] == "BASIS_SET":
basis_name = parts[1]
## ---------------------------------------------------------------------
if elem is None:
# if ELEMENT was not explicitly stated
if kind in ase.data.chemical_symbols:
# kind itself is the element
elem = kind
else:
# remove numbers
kind_no_nr = ''.join([i for i in kind if not i.isdigit()])
# remove anything appended by '_' or '-'
kind_processed = kind_no_nr.replace("_", ' ').replace("-", ' ').split()[0]
if kind_processed in ase.data.chemical_symbols:
elem = kind_processed
else:
print("Error: couldn't determine element for kind '%s'" % kind)
exit(1)
self.kind_elem_basis[kind] = (elem, basis_name)
# Have we found the CELL info?
if parts[0] == "ABC":
if parts[1] == "[angstrom]":
self.cell[0] = float(parts[2])
self.cell[1] = float(parts[3])
self.cell[2] = float(parts[4])
else:
self.cell[0] = float(parts[1])
self.cell[1] = float(parts[2])
self.cell[2] = float(parts[3])
if parts[0] == "A" or parts[0] == "B" or parts[0] == "C":
prim_vec = np.array([float(x) for x in parts[1:]])
if np.sum(prim_vec > 0.0) > 1:
raise ValueError("Cell is not rectangular")
ind = np.argmax(prim_vec > 0.0)
self.cell[ind] = prim_vec[ind]
self.cell *= ang_2_bohr
if any(self.cell < 1e-3):
raise ValueError("Cell " + str(self.cell) + " is invalid")
if self.ase_atoms is not None:
self.ase_atoms.cell = self.cell / ang_2_bohr
def read_xyz(self, file_xyz):
""" Read atomic positions from .xyz file (in Bohr radiuses) """
with open(file_xyz) as f:
fxyz_contents = f.readlines()
self.atom_kinds = []
for i_line, line in enumerate(fxyz_contents):
if i_line >= 2:
kind = line.split()[0]
self.atom_kinds.append(kind)
# Replace custom kinds with their corresponding element (e.g. for spin-pol calcs)
fxyz_contents[i_line] = self.kind_elem_basis[kind][0] + " " + " ".join(line.split()[1:]) + "\n"
self.ase_atoms = ase.io.read(io.StringIO("".join(fxyz_contents)), format="xyz")
if self.cell is not None:
self.ase_atoms.cell = self.cell / ang_2_bohr
def center_atoms_to_cell(self):
self.ase_atoms.center()
### -----------------------------------------
### Basis set routines
### -----------------------------------------
def _magic_basis_normalization(self, basis_sets_):
""" Normalizes basis sets to be compatible with cp2k """
basis_sets = copy.deepcopy(basis_sets_)
for kind, bsets in basis_sets.items():
for bset in bsets:
for shell in bset:
l = shell[0]
exps = shell[1]
coefs = shell[2]
nexps = len(exps)
norm_factor = 0
for i in range(nexps-1):
for j in range(i+1, nexps):
norm_factor += 2*coefs[i]*coefs[j]*(2*np.sqrt(exps[i]*exps[j])/(exps[i]+exps[j]))**((2*l+3)/2)
for i in range(nexps):
norm_factor += coefs[i]**2
for i in range(nexps):
coefs[i] = coefs[i]*exps[i]**((2*l+3)/4)/np.sqrt(norm_factor)
return basis_sets
def read_basis_functions(self, basis_set_file):
""" Reads the basis sets from basis_set_file specified in kind_elem_basis
returns:
basis_sets["kind"] =
"""
self.basis_sets = {}
used_elems_bases = list(self.kind_elem_basis.values())
corresp_kinds = list(self.kind_elem_basis.keys())
with open(basis_set_file) as f:
lines = f.readlines()
for i in range(len(lines)):
parts = lines[i].split()
if len(parts) <= 1:
continue
elem = parts[0]
trial_1 = (elem, parts[1])
trial_2 = None
if len(parts) > 2:
trial_2 = (elem, parts[2])
if trial_1 in used_elems_bases or trial_2 in used_elems_bases:
# We have a basis set we're using
# find all kinds using this basis set:
kinds = [corresp_kinds[i] for i, e_b in enumerate(used_elems_bases) if e_b == trial_1 or e_b == trial_2]
basis_functions = []
nsets = int(lines[i+1])
cursor = 2
for j in range(nsets):
basis_functions.append([])
comp = [int(x) for x in lines[i+cursor].split()]
n_princ, l_min, l_max, n_exp = comp[:4]
l_arr = np.arange(l_min, l_max+1, 1)
n_basisf_for_l = comp[4:]
assert len(l_arr) == len(n_basisf_for_l)
exps = []
coeffs = []
for k in range(n_exp):
exp_c = [float(x) for x in lines[i+cursor+k+1].split()]
exps.append(exp_c[0])
coeffs.append(exp_c[1:])
exps = np.array(exps)
coeffs = np.array(coeffs)
indx = 0
for l, nl in zip(l_arr, n_basisf_for_l):
for il in range(nl):
basis_functions[-1].append([l, exps, coeffs[:, indx]])
indx += 1
cursor += n_exp + 1
for kind in kinds:
self.basis_sets[kind] = basis_functions
self.basis_sets = self._magic_basis_normalization(self.basis_sets)
### -----------------------------------------
### WFN file routines
### -----------------------------------------
def load_restart_wfn_file(self, restart_file, emin=None, emax=None, n_occ=None, n_virt=None):
"""
Reads the specified molecular orbitals from cp2k restart wavefunction file
If both, energy limits and counts are given, then the extreme is used
Note that the energy range is in eV and with respect to HOMO energy.
"""
self.cwf.load_restart_wfn_file(restart_file, emin=emin, emax=emax, n_occ=n_occ, n_virt=n_virt)
self.cwf.convert_readable()
self.morb_composition = self.cwf.morb_composition
self.morb_energies = self.cwf.morb_energies
self.i_homo_loc = self.cwf.i_homo_loc
self.i_homo_glob = self.cwf.i_homo_glob
self.nspin = self.cwf.nspin
self.ref_energy = self.cwf.ref_energy
self.global_morb_energies = self.cwf.glob_morb_energies
### ---------------------------------------------------------------------------
### Methods directly related to putting stuff on grids
### ---------------------------------------------------------------------------
def _spherical_harmonic_grid(self, l, m, x_grid, y_grid, z_grid):
"""
Evaluates the spherical harmonics (times r^l) with some unknown normalization
(source: Carlo's Fortran code)
"""
c = (2.0/np.pi)**(3.0/4.0)
# s orbitals
if (l, m) == (0, 0):
return c
# p orbitals
elif (l, m) == (1, -1):
return c*2.0*y_grid
elif (l, m) == (1, 0):
return c*2.0*z_grid
elif (l, m) == (1, 1):
return c*2.0*x_grid
# d orbitals
elif (l, m) == (2, -2):
return c*4.0*x_grid*y_grid
elif (l, m) == (2, -1):
return c*4.0*y_grid*z_grid
elif (l, m) == (2, 0):
return c*2.0/np.sqrt(3)*(2*z_grid**2-x_grid**2-y_grid**2)
elif (l, m) == (2, 1):
return c*4.0*z_grid*x_grid
elif (l, m) == (2, 2):
return c*2.0*(x_grid**2-y_grid**2)
# f orbitals
elif (l, m) == (3, -3):
return c*np.sqrt(8/3)*y_grid*(3*x_grid**2-y_grid**2)
elif (l, m) == (3, -2):
return c*8.0*x_grid*y_grid*z_grid
elif (l, m) == (3, -1):
return c*np.sqrt(8/5)*y_grid*(4*z_grid**2-x_grid**2-y_grid**2)
elif (l, m) == (3, 0):
return c*4.0/np.sqrt(15.0)*z_grid*(2.0*z_grid**2-3.0*x_grid**2-3.0*y_grid**2)
elif (l, m) == (3, 1):
return c*np.sqrt(8/5)*x_grid*(4*z_grid**2-x_grid**2-y_grid**2)
elif (l, m) == (3, 2):
return c*4.0*z_grid*(x_grid**2-y_grid**2)
elif (l, m) == (3, 3):
return c*np.sqrt(8/3)*x_grid*(x_grid**2-3.0*y_grid**2)
print("No spherical harmonic found for l=%d, m=%d" % (l, m))
return 0
def _add_local_to_global_grid(self, loc_grid, glob_grid, origin_diff, wrap=(True, True, True)):
"""
Method to add a grid to another one
Arguments:
loc_grid -- grid that will be added to the glob_grid
glob_grid -- defines "wrapping" boundaries
origin_diff -- difference of origins between the grids; ignored for directions without wrapping
wrap -- specifies in which directions to wrap and take PBC into account
"""
loc_n = np.shape(loc_grid)
glob_n = np.shape(glob_grid)
od = origin_diff
inds = []
l_inds = []
for i in range(len(glob_n)):
if wrap[i]:
# Move the origin_diff vector to the main global cell if wrapping is enabled
od[i] = od[i] % glob_n[i]
ixs = [[od[i], od[i] + loc_n[i]]]
l_ixs = [0]
while ixs[-1][1] > glob_n[i]:
overshoot = ixs[-1][1]-glob_n[i]
ixs[-1][1] = glob_n[i]
l_ixs.append(l_ixs[-1]+glob_n[i]-ixs[-1][0])
ixs.append([0, overshoot])
l_ixs.append(loc_n[i])
inds.append(ixs)
l_inds.append(l_ixs)
else:
inds.append([-1])
l_inds.append([-1])
l_ixs = l_inds[0]
l_iys = l_inds[1]
l_izs = l_inds[2]
for i, ix in enumerate(inds[0]):
for j, iy in enumerate(inds[1]):
for k, iz in enumerate(inds[2]):
if wrap[0]:
i_gl_x = slice(ix[0], ix[1])
i_lc_x = slice(l_ixs[i], l_ixs[i+1])
else:
i_gl_x = slice(None)
i_lc_x = slice(None)
if wrap[1]:
i_gl_y = slice(iy[0], iy[1])
i_lc_y = slice(l_iys[j], l_iys[j+1])
else:
i_gl_y = slice(None)
i_lc_y = slice(None)
if wrap[2]:
i_gl_z = slice(iz[0], iz[1])
i_lc_z = slice(l_izs[k], l_izs[k+1])
else:
i_gl_z = slice(None)
i_lc_z = slice(None)
glob_grid[i_gl_x, i_gl_y, i_gl_z] += loc_grid[i_lc_x, i_lc_y, i_lc_z]
def calc_morbs_in_region(self, dr_guess,
x_eval_region = None,
y_eval_region = None,
z_eval_region = None,
eval_cutoff = 14.0,
reserve_extrap = 0.0,
print_info = True):
"""
Puts the molecular orbitals onto a specified grid
Arguments:
dr_guess -- spatial discretization step [ang], real value will change for every axis due to rounding
x_eval_region -- x evaluation (min, max) in [au]. If min == max, then evaluation only works on a plane.
If set, no PBC applied in direction and also no eval_cutoff.
If left at None, the whole range of the cell is taken and PBCs are applied.
eval_cutoff -- cutoff in [ang] for orbital evaluation if eval_region is None
"""
time1 = time.time()
dr_guess *= ang_2_bohr
eval_cutoff *= ang_2_bohr
reserve_extrap *= ang_2_bohr
global_cell_n = (np.round(self.cell/dr_guess)).astype(int)
self.dv = self.cell / global_cell_n
# Define local grid for orbital evaluation
# and convenient PBC implementation
eval_regions = [x_eval_region, y_eval_region, z_eval_region]
loc_cell_arrays = []
mid_ixs = np.zeros(3, dtype=int)
loc_cell_n = np.zeros(3, dtype=int)
eval_cell_n = np.zeros(3, dtype=int)
self.origin = np.zeros(3)
for i in range(3):
if eval_regions[i] is None:
# Define range in i direction with 0.0 at index mid_ixs[i]
loc_arr = np.arange(0, eval_cutoff, self.dv[i])
mid_ixs[i] = int(len(loc_arr)/2)
loc_arr -= loc_arr[mid_ixs[i]]
loc_cell_arrays.append(loc_arr)
eval_cell_n[i] = global_cell_n[i]
self.origin[i] = 0.0
else:
# Define the specified range in direction i
v_min, v_max = eval_regions[i]
### TODO: Probably should use np.arange to have exactly matching dv in the local grid... ###
loc_cell_arrays.append(np.linspace(v_min, v_max, int(np.round((v_max-v_min)/self.dv[i]))+1))
mid_ixs[i] = -1
eval_cell_n[i] = len(loc_cell_arrays[i])
self.origin[i] = v_min
loc_cell_n[i] = len(loc_cell_arrays[i])
loc_cell_grids = np.meshgrid(loc_cell_arrays[0], loc_cell_arrays[1], loc_cell_arrays[2], indexing='ij')
# Some info
if print_info:
print("Global cell: ", global_cell_n)
print("Eval cell: ", eval_cell_n)
print("local cell: ", loc_cell_n)
print("---- Setup: %.4f" % (time.time() - time1))
time_radial_calc = 0.0
time_spherical = 0.0
time_loc_glob_add = 0.0
time_loc_lmorb_add = 0.0
nspin = len(self.morb_composition)
num_morbs = []
morb_grids_local = []
self.morb_grids = []
ext_z_n = int(np.round(reserve_extrap/self.dv[2]))
for ispin in range(nspin):
num_morbs.append(len(self.morb_composition[ispin][0][0][0][0]))
self.morb_grids.append(np.zeros((num_morbs[ispin], eval_cell_n[0], eval_cell_n[1], eval_cell_n[2] + ext_z_n), dtype=self.dtype))
morb_grids_local.append(np.zeros((num_morbs[ispin], loc_cell_n[0], loc_cell_n[1], loc_cell_n[2]), dtype=self.dtype))
self.eval_cell_n = np.array([eval_cell_n[0], eval_cell_n[1], eval_cell_n[2] + ext_z_n])
self.eval_cell = self.eval_cell_n * self.dv
self.last_calc_iz = eval_cell_n[2] - 1
for i_at in range(len(self.ase_atoms)):
#elem = self.ase_atoms[i_at].symbol
kind = self.atom_kinds[i_at]
pos = self.ase_atoms[i_at].position * ang_2_bohr
# how does the position match with the grid?
int_shift = (pos/self.dv).astype(int)
frac_shift = pos/self.dv - int_shift
origin_diff = int_shift - mid_ixs
# Shift the local grid such that origin is on the atom
rel_loc_cell_grids = []
for i, loc_grid in enumerate(loc_cell_grids):
if eval_regions[i] is None:
rel_loc_cell_grids.append(loc_grid - frac_shift[i]*self.dv[i])
else:
rel_loc_cell_grids.append(loc_grid - pos[i])
r_vec_2 = rel_loc_cell_grids[0]**2 + \
rel_loc_cell_grids[1]**2 + \
rel_loc_cell_grids[2]**2
for ispin in range(nspin):
morb_grids_local[ispin].fill(0.0)
for i_set, bset in enumerate(self.basis_sets[kind]):
for i_shell, shell in enumerate(bset):
l = shell[0]
es = shell[1]
cs = shell[2]
# Calculate the radial part of the atomic orbital
time2 = time.time()
radial_part = np.zeros(loc_cell_n)
for e, c in zip(es, cs):
radial_part += c*np.exp(-1.0*e*r_vec_2)
time_radial_calc += time.time() - time2
for i_orb, m in enumerate(range(-l, l+1, 1)):
time2 = time.time()
atomic_orb = radial_part*self._spherical_harmonic_grid(l, m,
rel_loc_cell_grids[0],
rel_loc_cell_grids[1],
rel_loc_cell_grids[2])
time_spherical += time.time() - time2
time2 = time.time()
for i_spin in range(nspin):
#print("---------------")
#print(i_spin, len(self.morb_composition))
#print(i_at, len(self.morb_composition[i_spin]))
#print(i_set, len(self.morb_composition[i_spin][i_at]))
#print(i_shell, len(self.morb_composition[i_spin][i_at][i_set]))
#print(i_orb, len(self.morb_composition[i_spin][i_at][i_set][i_shell]))
#print("---------------")
coef_arr = self.morb_composition[i_spin][i_at][i_set][i_shell][i_orb]
for i_mo in range(num_morbs[i_spin]):
morb_grids_local[i_spin][i_mo] += coef_arr[i_mo]*atomic_orb
# slow:
#morb_grids_local += np.outer(coef_arr, atomic_orb).reshape(
# num_morbs, loc_cell_n[0], loc_cell_n[1], loc_cell_n[2])
time_loc_lmorb_add += time.time() - time2
time2 = time.time()
for i_spin in range(nspin):
for i_mo in range(num_morbs[i_spin]):
if ext_z_n == 0:
self._add_local_to_global_grid(
morb_grids_local[i_spin][i_mo],
self.morb_grids[i_spin][i_mo],
origin_diff,
wrap=(mid_ixs != -1))
else:
self._add_local_to_global_grid(
morb_grids_local[i_spin][i_mo],
self.morb_grids[i_spin][i_mo][:, :, :-ext_z_n],
origin_diff,
wrap=(mid_ixs != -1))
time_loc_glob_add += time.time() - time2
if print_info:
print("---- Radial calc time : %4f" % time_radial_calc)
print("---- Spherical calc time : %4f" % time_spherical)
print("---- Loc -> loc_morb time : %4f" % time_loc_lmorb_add)
print("---- loc_morb -> glob time : %4f" % time_loc_glob_add)
print("---- Total time: %.4f"%(time.time() - time1))
### -----------------------------------------
### Extrapolate wavefunctions
### -----------------------------------------
def _resize_2d_arr_with_interpolation(self, array, new_shape):
x_arr = np.linspace(0, 1, array.shape[0])
y_arr = np.linspace(0, 1, array.shape[1])
rgi = scipy.interpolate.RegularGridInterpolator(points=[x_arr, y_arr], values=array)
x_arr_new = np.linspace(0, 1, new_shape[0])
y_arr_new = np.linspace(0, 1, new_shape[1])
x_coords = np.repeat(x_arr_new, len(y_arr_new))
y_coords = np.tile(y_arr_new, len(x_arr_new))
return rgi(np.array([x_coords, y_coords]).T).reshape(new_shape)
def extrapolate_morbs(self, vacuum_pot=None, hart_plane=None, use_weighted_avg=True):
for ispin in range(self.nspin):
self.extrapolate_morbs_spin(ispin, vacuum_pot=vacuum_pot, hart_plane=hart_plane, use_weighted_avg=use_weighted_avg)
def extrapolate_morbs_spin(self, ispin, vacuum_pot=None, hart_plane=None, use_weighted_avg=True):
"""
Extrapolate molecular orbitals from a specified plane to a box or another plane
in case of "single_plane = True", the orbitals will be only extrapolated on
a plane "extent" distance away
Extent in bohr !!!
Either the vacuum potential or the hartree plane is needed!
Both are assumed to be in hartree units wrt to Fermi/Homo.
NB: everything in hartree units!
"""
time1 = time.time()
if vacuum_pot is None and hart_plane is None:
print("You must specify either the vac pot or the hartree plane.")
return None
morb_planes = self.morb_grids[ispin][:, :, :, self.last_calc_iz]
morb_energies = self.morb_energies[ispin]
num_morbs = np.shape(morb_planes)[0]
for morb_index in range(num_morbs):
morb_plane = morb_planes[morb_index]
if vacuum_pot != None:
hartree_avg = vacuum_pot
else:
if use_weighted_avg:
# weigh the hartree potential by the molecular orbital
density_plane = morb_plane**2
density_plane /= np.sum(density_plane)
weighted_hartree = density_plane * self._resize_2d_arr_with_interpolation(hart_plane, density_plane.shape)
hartree_avg = np.sum(weighted_hartree)
else:
hartree_avg = np.mean(hart_plane)
energy = morb_energies[morb_index]/hart_2_ev
if energy > hartree_avg:
print("Warning: unbound state, can't extrapolate! index: %d. Constant extrapolation." % morb_index)
energy = hartree_avg
fourier = np.fft.rfft2(morb_plane)
# NB: rfft2 takes REAL fourier transform over last (y) axis and COMPLEX over other (x) axes
# dv in BOHR, so k is in 1/bohr
kx_arr = 2*np.pi*np.fft.fftfreq(morb_plane.shape[0], self.dv[0])
ky_arr = 2*np.pi*np.fft.rfftfreq(morb_plane.shape[1], self.dv[1])
kx_grid, ky_grid = np.meshgrid(kx_arr, ky_arr, indexing='ij')
prefactors = np.exp(-np.sqrt(kx_grid**2 + ky_grid**2 - 2*(energy - hartree_avg))*self.dv[2])
for iz in range(self.last_calc_iz + 1, self.eval_cell_n[2]):
fourier *= prefactors
self.morb_grids[ispin][morb_index, :, :, iz] = np.fft.irfft2(fourier, morb_plane.shape)
print("Extrapolation time: %.3f s"%(time.time()-time1))
### -----------------------------------------
### Export data
### -----------------------------------------
def write_cube(self, filename, orbital_nr, spin=0, square=False):
local_ind = self.i_homo_loc[spin] + orbital_nr
if local_ind >= 0 and local_ind < self.morb_grids[spin].shape[0]:
print("R%d/%d is writing HOMO%+d cube" %(self.mpi_rank, self.mpi_size, orbital_nr))
energy = self.morb_energies[spin][local_ind]
comment = "E=%.8f eV (wrt HOMO)" % energy
if not square:
c = Cube(title="HOMO%+d"%orbital_nr, comment=comment, ase_atoms=self.ase_atoms,
origin=self.origin, cell=self.eval_cell*np.eye(3), data=self.morb_grids[spin][local_ind])
else:
c = Cube(title="HOMO%+d square"%orbital_nr, comment=comment, ase_atoms=self.ase_atoms,
origin=self.origin, cell=self.eval_cell*np.eye(3), data=self.morb_grids[spin][local_ind]**2)
c.write_cube_file(filename)
def calculate_and_save_charge_density(self, filename="./charge_density.cube"):
charge_dens = np.zeros(self.eval_cell_n)
for i_spin in range(self.nspin):
for i_mo, grid in enumerate(self.morb_grids[i_spin]):
if i_mo > self.i_homo_loc[i_spin]:
break
charge_dens += grid**2
if self.nspin == 1:
charge_dens *= 2
total_charge_dens = np.zeros(self.eval_cell_n)
self.mpi_comm.Reduce(charge_dens, total_charge_dens, op=MPI.SUM)
if self.mpi_rank == 0:
vol_elem = np.prod(self.dv)
integrated_charge = np.sum(total_charge_dens)*vol_elem
comment = "Integrated charge: %.6f" % integrated_charge
c = Cube(title="charge density", comment=comment, ase_atoms=self.ase_atoms,
origin=self.origin, cell=self.eval_cell*np.eye(3), data=total_charge_dens)
c.write_cube_file(filename)
def calculate_and_save_spin_density(self, filename="./spin_density.cube"):
if self.nspin == 1:
return
spin_dens = np.zeros(self.eval_cell_n)
for i_spin in range(self.nspin):
for i_mo, grid in enumerate(self.morb_grids[i_spin]):
if i_mo > self.i_homo_loc[i_spin]:
break
if i_spin == 0:
spin_dens += grid**2
else:
spin_dens -= grid**2
total_spin_dens = np.zeros(self.eval_cell_n)
self.mpi_comm.Reduce(spin_dens, total_spin_dens, op=MPI.SUM)
if self.mpi_rank == 0:
vol_elem = np.prod(self.dv)
integrated = np.sum(np.abs(total_spin_dens))*vol_elem
comment = "Integrated abs spin: %.6f" % integrated
c = Cube(title="spin density", comment=comment, ase_atoms=self.ase_atoms,
origin=self.origin, cell=self.eval_cell*np.eye(3), data=total_spin_dens)
c.write_cube_file(filename)
def calculate_and_save_charge_density_artif_core(self, filename="./charge_density_artif.cube"):
charge_dens = np.zeros(self.eval_cell_n)
for i_spin in range(self.nspin):
for i_mo, grid in enumerate(self.morb_grids[i_spin]):
if i_mo > self.i_homo_loc[i_spin]:
break
charge_dens += grid**2
if self.nspin == 1:
charge_dens *= 2
total_charge_dens = np.zeros(self.eval_cell_n)
self.mpi_comm.Reduce(charge_dens, total_charge_dens, op=MPI.SUM)
# free memory
charge_dens = None
if self.mpi_rank == 0:
def gaussian3d(r_arr, sigma):
#sigma = fwhm/2.355
return 1/(sigma**3*(2*np.pi)**(3/2))*np.exp(-(r_arr**2/(2*sigma**2)))
x = np.linspace(0.0, self.eval_cell[0], self.eval_cell_n[0]) + self.origin[0]
y = np.linspace(0.0, self.eval_cell[1], self.eval_cell_n[1]) + self.origin[1]
z = np.linspace(0.0, self.eval_cell[2], self.eval_cell_n[2]) + self.origin[2]
for at in self.ase_atoms:
if at.number == 1:
# No core density for H
continue
p = at.position * ang_2_bohr
if (p[0] < np.min(x) - 0.5 or p[0] > np.max(x) + 0.5 or
p[1] < np.min(y) - 0.5 or p[1] > np.max(y) + 0.5 or
p[2] < np.min(z) - 0.5 or p[2] > np.max(z) + 0.5):
continue
# Distance of the **Center** of each voxel to the atom
x_grid, y_grid, z_grid = np.meshgrid(x - p[0] - self.dv[0]/2, y - p[1] - self.dv[1]/2, z - p[2] - self.dv[2]/2, indexing='ij')
r_grid = np.sqrt(x_grid**2 + y_grid**2 + z_grid**2)
x_grid = None
y_grid = None
z_grid = None
core_charge = at.number - at.number % 8 # not exact...
#r_cut = 0.5
#hat_func = (1.0-r_grid/r_cut)
#hat_func[r_grid > r_cut] = 0.0
#total_charge_dens = hat_func*core_charge*gaussian3d(r_grid, 1.0*r_cut) + (1.0-hat_func)*total_charge_dens
# EMPIRICAL PARAMETER 1
#fwhm = 0.5 # ang
#total_charge_dens = core_charge*gaussian3d(r_grid, fwhm) + total_charge_dens
r_hat = 0.7
h_hat = 20.0
hat_func = (h_hat-h_hat*r_grid/r_hat)
hat_func[r_grid > r_hat] = 0.0
total_charge_dens = np.maximum(hat_func, total_charge_dens)
# EMPIRICAL PARAMETER 2
#total_charge_dens = scipy.ndimage.gaussian_filter(total_charge_dens, sigma = 0.4, mode='nearest')
c = Cube(title="charge density", comment="modif. cube", ase_atoms=self.ase_atoms,
origin=self.origin, cell=self.eval_cell*np.eye(3), data=total_charge_dens)
c.write_cube_file(filename)
# def _orb_plane_above_atoms(self, grid, height):
# """
# Returns the 2d plane above topmost atom in z direction
# height in [angstrom]
# """
# topmost_atom_z = np.max(self.ase_atoms.positions[:, 2]) # Angstrom
# plane_z = (height + topmost_atom_z) * ang_2_bohr
# plane_z_wrt_orig = plane_z - self.origin[2]
#
# plane_index = int(np.round(plane_z_wrt_orig/self.eval_cell[2]*self.eval_cell_n[2]))
# return grid[:, :, plane_index]
#
# def collect_and_save_ch_orbitals(self, orbital_list, height_list, path = "./orb.npz"):
# """
# Save constant-height planes of selected orbitals at selected heights
# orbital list wrt to HOMO
# """
# slice_list = []
#
# for i_spin in range(self.nspin):
# slice_list.append([])
# for h in height_list:
# slice_list[i_spin].append([])
#
# for i_mo in range(len(self.morb_energies[i_spin])):
# i_mo_wrt_homo = i_mo - self.i_homo_loc[i_spin]
#
# if i_mo_wrt_homo in orbital_list:
# for i_h, h in enumerate(height_list):
# orb_plane = self._orb_plane_above_atoms(self.morb_grids[i_spin][i_mo], h)
# slice_list[i_spin][i_h].append(orb_plane)
#
# # indexes of the slice_list: [i_spin], [i_h], [i_mo], [nx x ny]
# # gather to rank_0
# final_list = []
#
# for i_spin in range(self.nspin):
# final_list.append([]) # i_spin
# for i_h in range(len(height_list)):
# plane_gather = self.mpi_comm.gather(slice_list[i_spin][i_h], root = 0)
#
# if self.mpi_rank == 0:
# # flatten the list of lists to numpy
# flat_array = np.array([item for sublist in plane_gather for item in sublist])
# final_list[i_spin].append(flat_array)
#
# # select energy ranges
# #self.gather_global_energies()
#
# if self.mpi_rank == 0:
# # energy array
# # for rank 0, the homo index is given by loc_homo_ind
# save_energy_arr = []
# for i_spin in range(self.nspin):
# global_orb_list = [ind + self.i_homo_loc[i_spin] for ind in orbital_list]
# save_energy_arr.append(self.global_morb_energies[i_spin][global_orb_list])
#
# # turn the spin and height dimensions to numpy as well
# final_numpy = np.array([np.array(list_h) for list_h in final_list])
# save_data = {}
# save_data['orbitals'] = final_numpy
# save_data['heights'] = np.array(height_list)
# save_data['orb_list'] = np.array(orbital_list)
# save_data['x_arr'] = np.arange(0.0, self.eval_cell_n[0]*self.dv[0] + self.dv[0]/2, self.dv[0]) + self.origin[0]
# save_data['y_arr'] = np.arange(0.0, self.eval_cell_n[1]*self.dv[1] + self.dv[1]/2, self.dv[1]) + self.origin[1]
# save_data['energies'] = np.array(save_energy_arr)
# np.savez_compressed(path, **save_data)
### -----------------------------------------
### mpi communication
### -----------------------------------------
#def gather_global_energies(self):
# self.global_morb_energies = []
# for ispin in range(self.nspin):
# morb_en_gather = self.mpi_comm.allgather(self.morb_energies[ispin])
# self.global_morb_energies.append(np.hstack(morb_en_gather))
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from google.appengine.ext import deferred
from dashboard import update_test_suites
from dashboard.common import datastore_hooks
from dashboard.common import descriptor
from dashboard.common import namespaced_stored_object
from dashboard.common import request_handler
from dashboard.common import stored_object
from dashboard.common import utils
from dashboard.models import graph_data
def CacheKey(test_suite):
return 'test_suite_descriptor_' + test_suite
def FetchCachedTestSuiteDescriptor(test_suite):
return namespaced_stored_object.Get(CacheKey(test_suite))
class UpdateTestSuiteDescriptorsHandler(request_handler.RequestHandler):
def get(self):
self.post()
def post(self):
namespace = datastore_hooks.EXTERNAL
if self.request.get('internal_only') == 'true':
namespace = datastore_hooks.INTERNAL
UpdateTestSuiteDescriptors(namespace)
def UpdateTestSuiteDescriptors(namespace):
key = namespaced_stored_object.NamespaceKey(
update_test_suites.TEST_SUITES_2_CACHE_KEY, namespace)
for test_suite in stored_object.Get(key):
ScheduleUpdateDescriptor(test_suite, namespace)
def ScheduleUpdateDescriptor(test_suite, namespace):
deferred.defer(_UpdateDescriptor, test_suite, namespace)
def _UpdateDescriptor(test_suite, namespace):
logging.info('%s %s', test_suite, namespace)
# This function always runs in the taskqueue as an anonymous user.
if namespace == datastore_hooks.INTERNAL:
datastore_hooks.SetPrivilegedRequest()
desc = descriptor.Descriptor(test_suite=test_suite, bot='place:holder')
test_path = list(desc.ToTestPathsSync())[0].split('/')
measurements = set()
bots = set()
cases = set()
# TODO(4549) Tagmaps.
query = graph_data.TestMetadata.query()
query = query.filter(graph_data.TestMetadata.suite_name == test_path[2])
if len(test_path) > 3:
# test_suite is composite.
query = query.filter(
graph_data.TestMetadata.test_part1_name == test_path[3])
query = query.filter(graph_data.TestMetadata.deprecated == False)
query = query.filter(graph_data.TestMetadata.has_rows == True)
# Use an iterator because some test suites have more keys than can fit in
# memory.
for key in query.iter(keys_only=True):
desc = descriptor.Descriptor.FromTestPathSync(utils.TestPath(key))
bots.add(desc.bot)
if desc.measurement:
measurements.add(desc.measurement)
if desc.test_case:
cases.add(desc.test_case)
logging.info('%d measurements, %d bots, %d cases',
len(measurements), len(bots), len(cases))
desc = {
'measurements': list(sorted(measurements)),
'bots': list(sorted(bots)),
'cases': list(sorted(cases)),
}
key = namespaced_stored_object.NamespaceKey(
CacheKey(test_suite), namespace)
stored_object.Set(key, desc)
|
from api.models import Api, Profile, ConnectLog
from api.serializers import ApiSerializer, ProfileSerializer, \
UserSerializer, ConnectLogSerializer
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status, generics
from django_filters.rest_framework import DjangoFilterBackend
from django.contrib.auth.models import User
from rest_framework import renderers
from rest_framework.decorators import api_view
from rest_framework.reverse import reverse
@api_view(['GET'])
def api_root(request, format=None):
return Response({
'users': reverse('user-list', request=request, format=format),
'api': reverse('api-list', request=request, format=format),
'profile': reverse('profile-list', request=request, format=format),
'connectlog': reverse('connectlog-list',
request=request, format=format),
})
class ApiHighlight(generics.GenericAPIView):
queryset = Api.objects.all()
renderer_classes = (renderers.StaticHTMLRenderer,)
def get(self, request, *args, **kwargs):
api = self.get_object()
return Response(api.highlighted)
class UserList(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetail(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class ApiList(generics.ListCreateAPIView):
queryset = Api.objects.all()
serializer_class = ApiSerializer
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class ApiDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Api.objects.all()
serializer_class = ApiSerializer
class ProfileList(generics.ListCreateAPIView):
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
def get_queryset(self):
"""
Optionally restricts the returned purchases to a given user,
by filtering against a `username` query parameter in the URL.
"""
queryset = Profile.objects.all()
badgenum = self.request.query_params.get('badge', None)
username = self.request.query_params.get('user', None)
if badgenum is not None:
queryset = queryset.filter(badge=badgenum)
elif username is not None:
queryset = queryset.filter(user__username=username)
return queryset
class ProfileDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
# filter_backends = (DjangoFilterBackend,)
# filter_fields = ('badge')
class ConnectLogList(generics.ListCreateAPIView):
queryset = ConnectLog.objects.all()
serializer_class = ConnectLogSerializer
class ConnectLogDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = ConnectLog.objects.all()
serializer_class = ConnectLogSerializer
|
from flask import Blueprint
from flask_restful import Resource
from atp.utils.common import get_request_json, make_response, username_to_nickname
from atp.api.mysql_manager import UITestCaseInfoManage, UICasePageInfoManager, UICasePageInfo, BaseSystemInfoManager
from atp.engine.exceptions import LoadCaseError
from atp.utils.tools import json_dumps, json_loads
ui_testcase = Blueprint('ui_testcase_interface', __name__)
class UiTestCase(Resource):
def __init__(self):
self.utcim = UITestCaseInfoManage()
self.data = get_request_json()
def post(self, action):
if action == 'add':
try:
self.handle_ui_testcase(action, **self.data)
except LoadCaseError:
return make_response({"code": "200", "desc": "新增用例时出错"})
return make_response({"code": "000", "desc": "用例新增成功"})
elif action == "edit":
try:
self.handle_ui_testcase(action, **self.data)
except LoadCaseError:
return make_response({"code": "200", "desc": "编辑用例时出错"})
return make_response({"code": "000", "desc": "编辑用例成功"})
elif action == "delete":
try:
id_ = self.data.pop("id")
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
self.utcim.delete_ui_testcase(id_)
return make_response({"code": "000", "desc": "测试用例{}删除成功".format(id_)})
def handle_ui_testcase(self, action, **kwargs):
'''
:param kwargs:
:return:
'''
base = kwargs.pop("base")
module_id = base.pop("moduleId")
system_id = base.pop("systemId")
testcase_name = base.pop("testcaseName")
simple_desc = base.pop("testcaseDesc")
setup_info = kwargs.pop("setupInfo")
variable_info = kwargs.pop("variableInfo")
validate_Info = kwargs.pop("validateInfo")
include = kwargs.pop("include")
steps = kwargs.pop("steps")
setup_case_list = []
# 配置URL
# system_obj =BaseSystemInfoManager.query_system(id=system_id)
# system_url = system_obj.base_host
# setup_info
for setup in setup_info:
if setup["setup_type"] == 'setupcase':
setup_case_list.append(setup["setup_args"])
elif setup["setup_type"] == 'setup_db_operation':
# sql = setup["args"]["sql"]
pass
# steps操作步骤
for step in steps:
'''根据页面id返回page名称'''
if step["page_id"]:
page_id = step["page_id"]
obj = UICasePageInfoManager.query_ui_page(id=page_id)
page_name = obj.page_name
step["page_name"] = page_name
# ui_request = {
# "systemId":system_id,
# "testcases": [
# {
# "name": testcase_name,
# "teststeps": steps,
# "variables": variable_info,
# "validates": validate_Info,
# }
# ]
# }
# 结果验证
if validate_Info:
for validate in validate_Info:
page_id = validate["page_id"]
obj = UICasePageInfoManager.query_ui_page(id=page_id)
page_name = obj.page_name
validate["page_name"] = page_name
ui_request = {
"systemId": system_id,
"testcases": [
{
"name": testcase_name,
"teststeps": steps,
"variables": variable_info,
"validates": validate_Info,
}
]
}
'''公共变量'''
if not isinstance(include, list):
include = [{"public_variables": []}]
include.append({"setup_cases": setup_case_list})
if action == 'add':
UITestCaseInfoManage.insert_ui_testcase(
testcase_name=testcase_name,
simple_desc=simple_desc,
request=json_dumps(ui_request),
inlude=json_dumps(include),
module_id=module_id
)
elif action == 'edit':
testcase_id = base.pop("id", None)
UITestCaseInfoManage.update_ui_testcase(
id_=testcase_id,
testcase_name=testcase_name,
inlude=json_dumps(include),
request=json_dumps(ui_request),
simple_desc=simple_desc,
module_id=module_id
)
|
"""
Copyright 2019 Paul T. Grogan, Stevens Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division
from scipy.linalg import orth
import numpy as np
class Session(object):
"""
An experimental session. Includes settings and the
list of tasks (training and experimental).
"""
def __init__(self, name='', num_designers=4, error_tol=0.05, training = [], rounds = []):
"""
Initializes this session.
@param name: the session name
@type name: str
@param num_designers: the number of designers
@type num_designers: int
@param error_tol: the error tolerance for solutions
@type error_tol: float
@param training: the training rounds
@type training: array(Round)
@param rounds: the experimental rounds
@type rounds: array(Round)
"""
self.name = name
self.num_designers = num_designers
self.error_tol = error_tol
self.training = training
self.rounds = rounds
@staticmethod
def parse(json):
return Session(
name = json.get('name', ''),
num_designers = json.get('num_designers', 4),
error_tol = json.get('error_tol', 0.05),
training = list(map(lambda r: Round.parse(r), json.get('training', []))),
rounds = list(map(lambda r: Round.parse(r), json.get('rounds', [])))
)
class Round(object):
"""
An experimental round with a set of technical tasks.
"""
def __init__(self, name, assignments, tasks, max_time):
"""
Initializes this round.
@param name: the name
@type name: str
@param assignments: the task assignments: list of lists of designers
@type assignments: list(list(int))
@param tasks: the technical tasks
@type array(Task)
@param max_time: the maximum allowable time (milliseconds)
@type number
"""
self.name = name
self.assignments = assignments
self.tasks = tasks
self.max_time = max_time
def getDesignerTask(self, designer):
return next((t for t in self.tasks if designer in t.designers))
@staticmethod
def parse(json):
return Round(
name = json.get('name'),
assignments = json.get('assignments'),
tasks = list(map(lambda t: Task.parse(t), json.get('tasks'))),
max_time = json.get('max_time')
)
@staticmethod
def generate(name, size, assignments, is_coupled=True, max_time=None, random=np.random):
return Round(
name = name,
assignments = assignments,
tasks = [Task.generate(designers, size, is_coupled=is_coupled, random=random) for designers in assignments],
max_time = max_time*1000 if max_time is not None else None
)
class Task(object):
"""
An experimental task.
"""
def __init__(self, designers, num_inputs, num_outputs, coupling, target, inputs, outputs):
"""
Initializes this task.
@param designers: the designers asigned to this task
@type designers: list(int)
@param num_inputs: the number of inputs per designer
@type num_inputs: list(int)
@param num_outputs: the number of outputs per designer
@type num_outputs: list(int)
@param coupling: the coupling matrix (M)
@type coupling: list(float)
@param target: the target vector (y_star)
@type target: list(float)
@param inputs: the input assignments
@type inputs: list(int)
@param outputs: the output assignments
@type outputs: list(int)
"""
self.designers = designers
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.coupling = coupling
self.target = target
self.inputs = inputs
self.outputs = outputs
self.time_start = None # set by post-processor
self.time_complete = None # set by post-processor
self.actions = None # set by post-processor
self.score = None # set by post-processor
def getSolution(self):
"""
Gets the zero-error solution for this task.
@returns: the solution vector
@rtype numpy.Array(float)
"""
return np.matmul(np.array(self.coupling).T, self.target)
def getDuration(self):
"""
Gets the duration of this task.
@returns: the elapsed time (milliseconds)
@rtype: long
"""
return (self.time_complete - self.time_start) if self.time_complete else -1
def getCountActions(self, designer=None):
return np.sum([1 if i > 0 and not np.array_equal(a.getInput(self, designer), self.actions[i-1].getInput(self, designer)) else 0 for i,a in enumerate(self.actions)])
def getCountProductiveActions(self, designer=None):
return np.sum([1 if i > 0 and a.getErrorNorm(self, designer) < self.actions[i-1].getErrorNorm(self, designer) else 0 for i,a in enumerate(self.actions)])
def getCumulativeInputDistanceNorm(self, designer=None):
return np.sum([np.linalg.norm(a.getInput(self, designer) - self.actions[i-1].getInput(self, designer)) if i > 0 else 0 for i,a in enumerate(self.actions)])
def getCumulativeErrorNorm(self, designer=None):
return np.sum([a.getErrorNorm(self, designer) for a in self.actions])
@staticmethod
def parse(json):
return Task(
designers = json.get('designers'),
num_inputs = json.get('num_inputs'),
num_outputs = json.get('num_outputs'),
coupling = json.get('coupling'),
target = json.get('target'),
inputs = json.get('inputs'),
outputs = json.get('outputs')
)
@staticmethod
def generate(designers, size, inputs=None, outputs=None, is_coupled=True, random=np.random):
if inputs is None:
# try to assign equally among designers
inputs = [designers[int(i//(size/len(designers)))] for i in range(size)]
num_inputs = [np.sum(np.array(inputs) == designer).item() for designer in designers];
if outputs is None:
# try to assign equally among designers
outputs = [designers[int(i//(size/len(designers)))] for i in range(size)]
num_outputs = [np.sum(np.array(outputs) == designer).item() for designer in designers];
coupling = np.zeros((size, size))
if is_coupled:
# coupling matrix is orthonormal basis of random matrix
coupling = orth(random.rand(size, size))
else:
# coupling matrix has random 1/-1 along diagonal
coupling = np.diag(2*random.randint(0,2,size)-1)
# find a target with no solution values "close" to initial condition
solution = np.zeros(size)
while np.any(np.abs(solution) <= 0.20):
target = orth(2*random.rand(size,1)-1)
# solve using dot product of coupling transpose and target
solution = np.matmul(coupling.T, target)
return Task(designers, num_inputs, num_outputs, coupling.tolist(), target[:,0].tolist(), inputs, outputs)
class Action(object):
"""
An experimental action.
"""
def __init__(self, time, input):
"""
Initializes this action.
@param time: the action time (milliseconds)
@type time: long
@param input: the resulting input vector
@type input: np.Array(float)
"""
self.time = time
self.input = input
def getError(self, task, designer = None):
"""
Gets the error in design after this action for a task.
@param task: the task
@type task: Task
@param designer: the designer (optional, default = None)
@type designer: int
@returns: the error
@rtype: numpy.Array(float)
"""
# compute error as outputs - targets
if designer is None:
return self.getOutput(task, designer) - task.target
else:
return (self.getOutput(task, designer)[np.array(task.outputs) == designer]
- np.array(task.target)[np.array(task.outputs) == designer])
def getErrorNorm(self, task, designer = None):
"""
Gets the error norm in design after this action for a task.
@param task: the task
@type task: Task
@param designer: the designer (optional, default = None)
@type designer: int
@returns: the error norm
@rtype: float
"""
# compute 2-norm of error
return np.linalg.norm(self.getError(task, designer))
def getElapsedTime(self, task):
"""
Gets the elapsed time of this action.
@param task: the task
@type task: Task
@returns: the elapsed time (milliseconds)
@rtype: long
"""
return self.time - task.time_start
def isSolved(self, session, task):
"""
Checks if the task is solved.
@param session: the experimental session
@type session: Session
@param task: the task
@type task: Task
@returns: true, if this action solves the task
@rtype: bool
"""
# all errors must be less than tolerance values
return all(abs(e) < session.error_tol for e in self.getError(task))
def getInputDesignerIndex(self, task):
"""
Gets the designer index associated with this action. Returns -1 for
the first action (initialization).
@param task: the task
@type task: Task
@returns: the designer index who performed this action
@rtype: int
"""
action_id = task.actions.index(self)
if action_id > 0:
return task.inputs[self.getInputIndex(task)]
else:
return -1
def getInputDeltaDesignerIndex(self, task):
"""
Gets the change in designer index associated with this action. Returns 0 for
the first and second actions (initialization).
@param task: the task
@type task: Task
@returns: the index of the changed input
@rtype: int
"""
action_id = task.actions.index(self)
if action_id > 1:
return self.getInputDesignerIndex(task) - task.actions[action_id-1].getInputDesignerIndex(task)
else:
return 0
def getInputIndex(self, task, designer = None):
"""
Gets the design variable input index modified with this action. Returns
-1 for the first action (initialization).
@param task: the task
@type task: Task
@param designer: the designer (optional, default = None)
@type designer: int
@returns: the index of the changed input
@rtype: int
"""
changed_id = np.argwhere(self.getInputDelta(task, designer) != 0)
if len(changed_id) == 0:
return -1
else:
return changed_id[0][0]
def getInputDeltaIndex(self, task, designer = None):
"""
Gets the change in design variable input index between this action
and the previous action. Returns 0 for the first and second actions
(initialization).
@param task: the task
@type task: Task
@param designer: the designer (optional, default = None)
@type designer: int
@returns: the change in input index relative to the previous action
@rtype: int
"""
action_id = task.actions.index(self)
if action_id > 1:
return self.getInputIndex(task, designer) - task.actions[action_id-1].getInputIndex(task, designer)
else:
return 0
def getInputDeltaSize(self, task, designer = None):
"""
Gets the magnitude (norm) of the change in design input relative to
the previous action.
@param task: the task
@type task: Task
@param designer: the designer (optional, default = None)
@type designer: int
@returns: the size of the input change relative to the previous action
@rtype: int
"""
return np.linalg.norm(self.getInputDelta(task, designer))
def getInputDelta(self, task, designer = None):
"""
Gets the difference in input vector after versus before this action.
@param task: the task
@type task: Task
@param designer: the designer (optional, default = None)
@type designer: int
@returns: the difference in input
@rtype: int
"""
action_id = task.actions.index(self)
if action_id > 0:
return self.getInput(task, designer) - task.actions[action_id-1].getInput(task, designer)
else:
return np.zeros(np.shape(self.getInput(task, designer)))
def getInput(self, task, designer = None):
"""
Gets the input for a designer.
@param task: the task
@type task: Task
@param designer: the designer (optional, default = None)
@type designer: int
@returns: the input vector
@rtype: numpy.Array(float)
"""
if designer is None:
return np.array(self.input)
else:
return np.array(self.input)[np.array(task.inputs) == designer]
def getOutput(self, task, designer = None):
"""
Gets the output for a designer.
@param task: the task
@type task: Task
@param designer: the designer (optional, default = None)
@type designer: int
@returns: the output vector
@rtype: numpy.Array(float)
"""
if designer is None:
return np.matmul(task.coupling, self.input)
else:
return np.matmul(task.coupling, self.input)[np.array(task.outputs) == designer]
|
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import EmailUser
class EmailUserCreationForm(UserCreationForm):
class Meta(UserCreationForm):
model = EmailUser
fields = ('email', 'name')
class EmailUserChangeForm(UserChangeForm):
class Meta:
model = EmailUser
fields = ('email', 'name')
|
# File: S (Python 2.4)
from direct.fsm.StatePush import AttrSetter, FunctionCall, StateVar
from pirates.pvp import PVPGlobals
class ShipRepairSpotMgrBase:
def __init__(self):
self._state = DestructiveScratchPad(health = StateVar(0), speed = StateVar(0), armor = StateVar(0), modelClass = StateVar(0), pvpTeam = StateVar(0), siegeTeam = StateVar(0), fullHealth = StateVar(False), willBeFullHealth = StateVar(False), validShipClass = StateVar(False), hasTeam = StateVar(False))
self._statePushes = []
def destroy(self):
for statePush in self._statePushes:
statePush.destroy()
del self._statePushes
self._state.destroy()
def _onShipReady(self):
self._statePushes.extend([
FunctionCall(self._evalFullHealth, self._state.health, self._state.speed, self._state.armor, self._state.willBeFullHealth).pushCurrentState(),
FunctionCall(self._evalValidShipClass, self._state.modelClass).pushCurrentState(),
FunctionCall(self._evalHasTeam, self._state.pvpTeam, self._state.siegeTeam).pushCurrentState()])
def updateHealth(self, health):
self._state.health.set(health)
def updateSpeed(self, speed):
self._state.speed.set(speed)
def updateArmor(self, armor):
self._state.armor.set(armor)
def updateWillBeFullHealth(self, willBeFullHealth):
self._state.willBeFullHealth.set(willBeFullHealth)
def updateShipClass(self, modelClass):
self._state.modelClass.set(modelClass)
def updatePVPTeam(self, team):
self._state.pvpTeam.set(team)
def updateSiegeTeam(self, team):
self._state.siegeTeam.set(team)
def _evalFullHealth(self, health, speed, armor, willBeFullHealth):
if willBeFullHealth and health > 99.900000000000006 and speed > 99.900000000000006:
pass
self._state.fullHealth.set(armor > 99.900000000000006)
def _evalValidShipClass(self, modelClass):
self._state.validShipClass.set(modelClass in PVPGlobals.ShipClass2repairLocators)
def _evalHasTeam(self, pvpTeam, siegeTeam):
if not pvpTeam:
pass
self._state.hasTeam.set(siegeTeam)
|
# -*- test-case-name: twisted.application.runner.test.test_pidfile -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
PID file.
"""
import errno
from os import getpid, kill, name as SYSTEM_NAME
from types import TracebackType
from typing import Optional, Type
from zope.interface import Interface, implementer
from twisted.logger import Logger
from twisted.python.filepath import FilePath
class IPIDFile(Interface):
"""
Manages a file that remembers a process ID.
"""
def read() -> int:
"""
Read the process ID stored in this PID file.
@return: The contained process ID.
@raise NoPIDFound: If this PID file does not exist.
@raise EnvironmentError: If this PID file cannot be read.
@raise ValueError: If this PID file's content is invalid.
"""
def writeRunningPID() -> None:
"""
Store the PID of the current process in this PID file.
@raise EnvironmentError: If this PID file cannot be written.
"""
def remove() -> None:
"""
Remove this PID file.
@raise EnvironmentError: If this PID file cannot be removed.
"""
def isRunning() -> bool:
"""
Determine whether there is a running process corresponding to the PID
in this PID file.
@return: True if this PID file contains a PID and a process with that
PID is currently running; false otherwise.
@raise EnvironmentError: If this PID file cannot be read.
@raise InvalidPIDFileError: If this PID file's content is invalid.
@raise StalePIDFileError: If this PID file's content refers to a PID
for which there is no corresponding running process.
"""
def __enter__() -> "IPIDFile":
"""
Enter a context using this PIDFile.
Writes the PID file with the PID of the running process.
@raise AlreadyRunningError: A process corresponding to the PID in this
PID file is already running.
"""
def __exit__(
excType: Optional[Type[BaseException]],
excValue: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Optional[bool]:
"""
Exit a context using this PIDFile.
Removes the PID file.
"""
@implementer(IPIDFile)
class PIDFile:
"""
Concrete implementation of L{IPIDFile}.
This implementation is presently not supported on non-POSIX platforms.
Specifically, calling L{PIDFile.isRunning} will raise
L{NotImplementedError}.
"""
_log = Logger()
@staticmethod
def _format(pid: int) -> bytes:
"""
Format a PID file's content.
@param pid: A process ID.
@return: Formatted PID file contents.
"""
return f"{int(pid)}\n".encode()
def __init__(self, filePath: FilePath) -> None:
"""
@param filePath: The path to the PID file on disk.
"""
self.filePath = filePath
def read(self) -> int:
pidString = b""
try:
with self.filePath.open() as fh:
for pidString in fh:
break
except OSError as e:
if e.errno == errno.ENOENT: # No such file
raise NoPIDFound("PID file does not exist")
raise
try:
return int(pidString)
except ValueError:
raise InvalidPIDFileError(
f"non-integer PID value in PID file: {pidString!r}"
)
def _write(self, pid: int) -> None:
"""
Store a PID in this PID file.
@param pid: A PID to store.
@raise EnvironmentError: If this PID file cannot be written.
"""
self.filePath.setContent(self._format(pid=pid))
def writeRunningPID(self) -> None:
self._write(getpid())
def remove(self) -> None:
self.filePath.remove()
def isRunning(self) -> bool:
try:
pid = self.read()
except NoPIDFound:
return False
if SYSTEM_NAME == "posix":
return self._pidIsRunningPOSIX(pid)
else:
raise NotImplementedError(f"isRunning is not implemented on {SYSTEM_NAME}")
@staticmethod
def _pidIsRunningPOSIX(pid: int) -> bool:
"""
POSIX implementation for running process check.
Determine whether there is a running process corresponding to the given
PID.
@param pid: The PID to check.
@return: True if the given PID is currently running; false otherwise.
@raise EnvironmentError: If this PID file cannot be read.
@raise InvalidPIDFileError: If this PID file's content is invalid.
@raise StalePIDFileError: If this PID file's content refers to a PID
for which there is no corresponding running process.
"""
try:
kill(pid, 0)
except OSError as e:
if e.errno == errno.ESRCH: # No such process
raise StalePIDFileError("PID file refers to non-existing process")
elif e.errno == errno.EPERM: # Not permitted to kill
return True
else:
raise
else:
return True
def __enter__(self) -> "PIDFile":
try:
if self.isRunning():
raise AlreadyRunningError()
except StalePIDFileError:
self._log.info("Replacing stale PID file: {log_source}")
self.writeRunningPID()
return self
def __exit__(
self,
excType: Optional[Type[BaseException]],
excValue: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.remove()
return None
@implementer(IPIDFile)
class NonePIDFile:
"""
PID file implementation that does nothing.
This is meant to be used as a "active None" object in place of a PID file
when no PID file is desired.
"""
def __init__(self) -> None:
pass
def read(self) -> int:
raise NoPIDFound("PID file does not exist")
def _write(self, pid: int) -> None:
"""
Store a PID in this PID file.
@param pid: A PID to store.
@raise EnvironmentError: If this PID file cannot be written.
@note: This implementation always raises an L{EnvironmentError}.
"""
raise OSError(errno.EPERM, "Operation not permitted")
def writeRunningPID(self) -> None:
self._write(0)
def remove(self) -> None:
raise OSError(errno.ENOENT, "No such file or directory")
def isRunning(self) -> bool:
return False
def __enter__(self) -> "NonePIDFile":
return self
def __exit__(
self,
excType: Optional[Type[BaseException]],
excValue: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
return None
nonePIDFile = NonePIDFile()
class AlreadyRunningError(Exception):
"""
Process is already running.
"""
class InvalidPIDFileError(Exception):
"""
PID file contents are invalid.
"""
class StalePIDFileError(Exception):
"""
PID file contents are valid, but there is no process with the referenced
PID.
"""
class NoPIDFound(Exception):
"""
No PID found in PID file.
"""
|
from manimlib.imports import *
class Dragon(MovingCameraScene):
CONFIG = {
"iterations":5,
"colors":[DARK_RED, YELLOW],
}
def construct(self):
path = VGroup()
line = Line(ORIGIN,UP / 10).set_color(DARK_RED)
path.add(line)
self.camera_frame.set_height(line.get_height() * 1.2)
self.camera_frame.move_to(line)
self.play(ShowCreation(line))
self.target_path = self.allP(path,self.iterations)
for i in range(self.iterations):
path.set_color_by_gradient(*self.colors)
self.dup(path,i)
#####
## Have to add flipper for twinDragon
####
self.wait()
def dup(self,path,i):
set_paths = self.target_path[:2**(i + 1)]
height = set_paths.get_height() * 1.1
new_P = path.copy()
self.add(new_P)
point = self.getP(path)
self.play(
Rotating(
new_P,
radians=PI/2,
about_point=path[-1].points[point],
rate_func=linear
),
self.camera_frame.move_to,set_paths,
self.camera_frame.set_height,height,
run_time=1, rate_func=smooth
)
newP = reversed([*new_P])
path.add(*newP)
def allP(self, path, iterations):
target_path = path.copy()
for _ in range(iterations):
new_P = target_path.copy()
point = self.getP(new_P)
new_P.rotate(
PI/2,
about_point=target_path[-1].points[point],
)
newP = reversed([*new_P])
target_path.add(*newP)
return target_path
def getP(self, path):
return 0 if len(path) > 1 else -1
|
# -*- coding: utf-8 -*-
"""Console script for alphashape."""
import os
import sys
import click
import click_log
import logging
import shapely
import geopandas
import alphashape
# Setup Logging
LOGGER = logging.getLogger(__name__)
click_log.basic_config(LOGGER)
@click.command()
@click.argument('source', type=click.Path(exists=True))
@click.argument('target', type=click.Path())
@click.option('--alpha', '-a', type=float, help='Alpha parameter')
@click.option('--epsg', '-e', type=int,
help='EPSG code to create alpha shape in')
@click_log.simple_verbosity_option()
def main(source, target, alpha, epsg):
"""
Example console appication using the alphashape toolbox.
Given an input shapefile or GeoJSON INPUT with point geometry, write out a
new OUTPUT that contains the geometries resulting from execting the alpha
shape toolbox.
The alpha parameter is optional. If provided it will return the alpha
shape for the given value, if one is not provided, the tightest fitting
alpha shape that contains all input points will be solved for.
The EPSG code of a coordinate system can also be given to conduct the alpha
shape analysis in. If one is not given the coordinate system of the input
data will be used.
The output file will always have the same coordinate system as the
source file.
The output file format will be determined by the extension of the provided
target filename and can be written out in shapefile format or GeoJSON.
"""
# Read in source data
source_filename = click.format_filename(source)
target_filename = click.format_filename(target)
LOGGER.info('Reading source file: %s', source_filename)
try:
gdf = geopandas.read_file(source_filename)
except: # noqa: E722
LOGGER.error('Could not read source file')
return 10
# Source data type checking
if not any([isinstance(
p, shapely.geometry.Point) for p in gdf['geometry']]):
LOGGER.error('Source file does not contain multipiont features')
return 20
# Project data if given an EPSG code
if epsg:
LOGGER.info('Projecting source data to EPSG=%s', epsg)
try:
gdf_input = gdf.to_crs({'init': 'epsg:%s' % epsg})
except: # noqa: E722
LOGGER.error('Could not project source data')
return 30
else:
gdf_input = gdf
# Generate the alpha shape
LOGGER.info('Createing alpha shape')
try:
alpha_shape = alphashape.alphashape(gdf_input, alpha)
except: # noqa: E722
LOGGER.error('Could not generate alpha shape')
return 40
# Project back to the input coordinate system if an EPSG code was given
if epsg:
LOGGER.info('Projecting alpha shape data to source projection')
try:
alpha_shape = alpha_shape.to_crs(gdf.crs)
except: # noqa: E722
LOGGER.error('Could not project alpha shape')
return 50
# Write out the target file
LOGGER.info('Writing target file: %s', target_filename)
try:
if os.path.splitext(target)[1].lower() == '.geojson':
alpha_shape.to_file(target, driver='GeoJSON')
else:
alpha_shape.to_file(target)
except: # noqa: E722
LOGGER.error('Could not write target file')
return 60
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
|
x = int(input())
count = [0,0,1,1]
for i in range(4,x+1):
count.append(count[i-1]+1)
if i%2==0: count[i] = min(count[i],count[i//2]+1)
if i%3==0: count[i] = min(count[i],count[i//3]+1)
print(count[x])
|
# coding=utf-8
"""
The number, 1406357289, is a 0 to 9 pandigital number because it is made up of each of the digits 0 to 9 in some order, but it also has a rather interesting sub-string divisibility property.
Let d1 be the 1st digit, d2 be the 2nd digit, and so on. In this way, we note the following:
d2d3d4=406 is divisible by 2
d3d4d5=063 is divisible by 3
d4d5d6=635 is divisible by 5
d5d6d7=357 is divisible by 7
d6d7d8=572 is divisible by 11
d7d8d9=728 is divisible by 13
d8d9d10=289 is divisible by 17
Find the sum of all 0 to 9 pandigital numbers with this property.
Solution comment:
Iterate over all 0-9 pandigitals. First digit must be one of
1-9, rest can be any of the 9 remaining digits.
Then check that each window of 3 digits is divisible by its
corresponding prime.
"""
from itertools import permutations, izip
def slidingWindow(sequence, winSize, step=1):
"""Returns a generator that will iterate through
the defined chunks of input sequence. Input sequence
must be iterable."""
numOfChunks = ((len(sequence)-winSize)/step)+1
for i in xrange(0, numOfChunks*step, step):
yield sequence[i:i+winSize]
digits = set(str(d) for d in xrange(10))
primes = [2, 3, 5, 7, 11, 13, 17]
results = []
for d1 in xrange(1, 10):
for perm in permutations(digits ^ {str(d1)}, 9):
n = str(d1) + ''.join(perm)
for part, p in izip(slidingWindow(n[1:], 3), primes):
if int(part) % p != 0:
break
else:
print n
results.append(int(n))
print results, sum(results)
|
# Generated by Django 2.2 on 2019-04-26 01:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Acao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome_acao', models.CharField(max_length=500)),
],
options={
'verbose_name_plural': 'Ações',
},
),
migrations.CreateModel(
name='Alocacao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantidade_referencia', models.IntegerField(blank=True)),
('capacidade_caixa', models.IntegerField(blank=True)),
('quantidade_caixas', models.IntegerField(blank=True)),
('pdf', models.FileField(blank=True, upload_to='pdf/')),
],
options={
'verbose_name_plural': 'Alocacoes',
},
),
migrations.CreateModel(
name='Cliente',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome_cliente', models.CharField(max_length=500)),
('cnpj', models.CharField(max_length=14)),
('telefone', models.CharField(max_length=15)),
('email', models.CharField(max_length=500)),
('data_criacao', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name_plural': 'Clientes',
},
),
migrations.CreateModel(
name='Funcionario',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome_funcionario', models.CharField(max_length=500)),
],
options={
'verbose_name_plural': 'Funcionarios',
},
),
migrations.CreateModel(
name='Maquina',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome_maquina', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='Pedido',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('op', models.CharField(blank=True, max_length=5000)),
('data_criacao', models.DateTimeField(auto_now_add=True)),
('cliente', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Cliente')),
],
options={
'verbose_name_plural': 'Pedidos',
},
),
migrations.CreateModel(
name='Sequencia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome_sequencia', models.CharField(max_length=500)),
],
options={
'verbose_name_plural': 'Sequencias',
},
),
migrations.CreateModel(
name='Pistola',
fields=[
('funcionario', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='core.Funcionario')),
('nome_pistola', models.CharField(max_length=500)),
],
options={
'verbose_name_plural': 'Pistolas',
},
),
migrations.CreateModel(
name='SequenciaAcao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ordem_execucao', models.CharField(max_length=500)),
('tempo_meta', models.CharField(max_length=500)),
('acao', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Acao')),
('sequencia', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Sequencia')),
],
options={
'verbose_name_plural': 'Sequencias e Ações',
},
),
migrations.CreateModel(
name='Referencia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome_referencia', models.CharField(max_length=500)),
('cliente', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Cliente')),
],
options={
'verbose_name_plural': 'Referencias',
},
),
migrations.CreateModel(
name='PedidoSKU',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantidade', models.IntegerField()),
('data_criacao', models.DateTimeField(auto_now_add=True)),
('pedido', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Pedido')),
('referencia', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Referencia')),
],
options={
'verbose_name_plural': 'Pedido_SKUs',
},
),
migrations.CreateModel(
name='Caixa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pdf', models.FileField(blank=True, upload_to='pdf/')),
('alocacao', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Alocacao')),
],
options={
'verbose_name_plural': 'Caixas',
},
),
migrations.CreateModel(
name='BackLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pedido_data_criacao', models.DateTimeField(auto_now_add=True)),
('pedido_op', models.TextField(blank=True)),
('nome_cliente', models.TextField(blank=True)),
('nome_referencia', models.TextField(blank=True)),
('nome_acao', models.TextField(blank=True)),
('ordem_execucao', models.IntegerField(null=True)),
('alocacao', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Alocacao')),
('caixa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Caixa')),
('cliente', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='core.Cliente')),
('pedido', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='core.Pedido')),
('referencia', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='core.Referencia')),
('sequencia_acao', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='core.SequenciaAcao')),
],
options={
'verbose_name_plural': 'BackLogs',
},
),
migrations.AddField(
model_name='alocacao',
name='pedido',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Pedido'),
),
migrations.AddField(
model_name='alocacao',
name='referencia',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Referencia'),
),
migrations.AddField(
model_name='alocacao',
name='sequencia',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Sequencia'),
),
migrations.AddField(
model_name='acao',
name='maquina',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Maquina'),
),
migrations.CreateModel(
name='LogTrabalho',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('id_leitura', models.IntegerField()),
('cod_barras', models.CharField(max_length=500)),
('data_criacao', models.DateTimeField(auto_now_add=True)),
('funcionario', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Funcionario')),
('pistola', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Pistola')),
],
options={
'verbose_name_plural': 'Log_Trabalhos',
},
),
]
|
grafo = {}
grafo[1] = [7, 6]
grafo[2] = [3, 7]
grafo[3] = [2, 4, 6, 7]
grafo[4] = [3, 7]
grafo[5] = [7]
grafo[6] = [1, 3]
grafo[7] = [1, 2, 3, 4, 5]
c = 0
aux = grafo.copy()
tam = sorted(grafo.values(),key = len)[0]
lista = []
while c < len(grafo):
for i in tam:
for e in grafo:
if i in grafo[e]:
aux[e].remove(i)
lista.append(e)
del aux[e]
c+=1
'''for e in grafo:
if len(grafo[e])<=1:
lista.append(e)'''
print(lista)
|
def method(method_class):
"""Decorator to use to mark an API method.
When invoking L{Registry.scan} the classes marked with this decorator
will be added to the registry.
@param method_class: The L{Method} class to register.
"""
def callback(scanner, name, method_class):
if method_class.actions is not None:
actions = method_class.actions
else:
actions = [name]
if method_class.versions is not None:
versions = method_class.versions
else:
versions = [None]
for action in actions:
for version in versions:
scanner.registry.add(method_class,
action=action,
version=version)
from venusian import attach
attach(method_class, callback, category="method")
return method_class
class Method(object):
"""Handle a single HTTP request to an API resource.
@cvar actions: List of actions that the Method can handle, if C{None}
the class name will be used as only supported action.
@cvar versions: List of versions that the Method can handle, if C{None}
all versions will be supported.
"""
actions = None
versions = None
def invoke(self, call):
"""Invoke this method for executing the given C{call}."""
raise NotImplemented("Sub-classes have to implement the invoke method")
def is_available(self):
"""Return a boolean indicating wether this method is available.
Override this to dynamically decide at run-time whether specific
methods are available or not.
"""
return True
|
"""
Modeling Relational Data with Graph Convolutional Networks
Paper: https://arxiv.org/abs/1703.06103
Code: https://github.com/tkipf/relational-gcn
Difference compared to tkipf/relation-gcn
* l2norm applied to all weights
* remove nodes that won't be touched
"""
import argparse
import numpy as np
import time
import tensorflow as tf
from tensorflow.keras import layers
from dgl import DGLGraph
from dgl.nn.tensorflow import RelGraphConv
from dgl.contrib.data import load_data
from functools import partial
from model import BaseRGCN
class EntityClassify(BaseRGCN):
def create_features(self):
features = tf.range(self.num_nodes)
return features
def build_input_layer(self):
return RelGraphConv(self.num_nodes, self.h_dim, self.num_rels, "basis",
self.num_bases, activation=tf.nn.relu, self_loop=self.use_self_loop,
dropout=self.dropout)
def build_hidden_layer(self, idx):
return RelGraphConv(self.h_dim, self.h_dim, self.num_rels, "basis",
self.num_bases, activation=tf.nn.relu, self_loop=self.use_self_loop,
dropout=self.dropout)
def build_output_layer(self):
return RelGraphConv(self.h_dim, self.out_dim, self.num_rels, "basis",
self.num_bases, activation=partial(tf.nn.softmax, axis=1),
self_loop=self.use_self_loop)
def acc(logits, labels, mask):
logits = tf.gather(logits, mask)
labels = tf.gather(labels, mask)
indices = tf.math.argmax(logits, axis=1)
acc = tf.reduce_mean(tf.cast(indices == labels, dtype=tf.float32))
return acc
def main(args):
# load graph data
data = load_data(args.dataset, bfs_level=args.bfs_level, relabel=args.relabel)
num_nodes = data.num_nodes
num_rels = data.num_rels
num_classes = data.num_classes
labels = data.labels
train_idx = data.train_idx
test_idx = data.test_idx
# split dataset into train, validate, test
if args.validation:
val_idx = train_idx[:len(train_idx) // 5]
train_idx = train_idx[len(train_idx) // 5:]
else:
val_idx = train_idx
# since the nodes are featureless, the input feature is then the node id.
feats = tf.range(num_nodes, dtype=tf.int64)
# edge type and normalization factor
edge_type = tf.convert_to_tensor(data.edge_type)
edge_norm = tf.expand_dims(tf.convert_to_tensor(data.edge_norm), 1)
labels = tf.reshape(tf.convert_to_tensor(labels), (-1, ))
# check cuda
if args.gpu < 0:
device = "/cpu:0"
use_cuda = False
else:
device = "/gpu:{}".format(args.gpu)
use_cuda = True
with tf.device(device):
# create graph
g = DGLGraph()
g.add_nodes(num_nodes)
g.add_edges(data.edge_src, data.edge_dst)
# create model
model = EntityClassify(len(g),
args.n_hidden,
num_classes,
num_rels,
num_bases=args.n_bases,
num_hidden_layers=args.n_layers - 2,
dropout=args.dropout,
use_self_loop=args.use_self_loop,
use_cuda=use_cuda)
# optimizer
optimizer = tf.keras.optimizers.Adam(
learning_rate=args.lr)
# training loop
print("start training...")
forward_time = []
backward_time = []
loss_fcn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False)
for epoch in range(args.n_epochs):
t0 = time.time()
with tf.GradientTape() as tape:
logits = model(g, feats, edge_type, edge_norm)
loss = loss_fcn(tf.gather(labels, train_idx), tf.gather(logits, train_idx))
# Manually Weight Decay
# We found Tensorflow has a different implementation on weight decay
# of Adam(W) optimizer with PyTorch. And this results in worse results.
# Manually adding weights to the loss to do weight decay solves this problem.
for weight in model.trainable_weights:
loss = loss + \
args.l2norm * tf.nn.l2_loss(weight)
t1 = time.time()
grads = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
t2 = time.time()
forward_time.append(t1 - t0)
backward_time.append(t2 - t1)
print("Epoch {:05d} | Train Forward Time(s) {:.4f} | Backward Time(s) {:.4f}".
format(epoch, forward_time[-1], backward_time[-1]))
train_acc = acc(logits, labels, train_idx)
val_loss = loss_fcn(tf.gather(labels, val_idx), tf.gather(logits, val_idx))
val_acc = acc(logits, labels, val_idx)
print("Train Accuracy: {:.4f} | Train Loss: {:.4f} | Validation Accuracy: {:.4f} | Validation loss: {:.4f}".
format(train_acc, loss.numpy().item(), val_acc, val_loss.numpy().item()))
print()
logits = model(g, feats, edge_type, edge_norm)
test_loss = loss_fcn(tf.gather(labels, test_idx), tf.gather(logits, test_idx))
test_acc = acc(logits, labels, test_idx)
print("Test Accuracy: {:.4f} | Test loss: {:.4f}".format(test_acc, test_loss.numpy().item()))
print()
print("Mean forward time: {:4f}".format(np.mean(forward_time[len(forward_time) // 4:])))
print("Mean backward time: {:4f}".format(np.mean(backward_time[len(backward_time) // 4:])))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='RGCN')
parser.add_argument("--dropout", type=float, default=0,
help="dropout probability")
parser.add_argument("--n-hidden", type=int, default=16,
help="number of hidden units")
parser.add_argument("--gpu", type=int, default=-1,
help="gpu")
parser.add_argument("--lr", type=float, default=1e-2,
help="learning rate")
parser.add_argument("--n-bases", type=int, default=-1,
help="number of filter weight matrices, default: -1 [use all]")
parser.add_argument("--n-layers", type=int, default=2,
help="number of propagation rounds")
parser.add_argument("-e", "--n-epochs", type=int, default=50,
help="number of training epochs")
parser.add_argument("-d", "--dataset", type=str, required=True,
help="dataset to use")
parser.add_argument("--l2norm", type=float, default=0,
help="l2 norm coef")
parser.add_argument("--relabel", default=False, action='store_true',
help="remove untouched nodes and relabel")
parser.add_argument("--use-self-loop", default=False, action='store_true',
help="include self feature as a special relation")
fp = parser.add_mutually_exclusive_group(required=False)
fp.add_argument('--validation', dest='validation', action='store_true')
fp.add_argument('--testing', dest='validation', action='store_false')
parser.set_defaults(validation=True)
args = parser.parse_args()
print(args)
args.bfs_level = args.n_layers + 1 # pruning used nodes for memory
main(args)
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Suite.cc_version'
db.add_column('library_suite', 'cc_version', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'SuiteCase.cc_version'
db.add_column('library_suitecase', 'cc_version', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'CaseVersion.cc_version'
db.add_column('library_caseversion', 'cc_version', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'CaseAttachment.cc_version'
db.add_column('library_caseattachment', 'cc_version', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Case.cc_version'
db.add_column('library_case', 'cc_version', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'CaseStep.cc_version'
db.add_column('library_casestep', 'cc_version', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
def backwards(self, orm):
# Deleting field 'Suite.cc_version'
db.delete_column('library_suite', 'cc_version')
# Deleting field 'SuiteCase.cc_version'
db.delete_column('library_suitecase', 'cc_version')
# Deleting field 'CaseVersion.cc_version'
db.delete_column('library_caseversion', 'cc_version')
# Deleting field 'CaseAttachment.cc_version'
db.delete_column('library_caseattachment', 'cc_version')
# Deleting field 'Case.cc_version'
db.delete_column('library_case', 'cc_version')
# Deleting field 'CaseStep.cc_version'
db.delete_column('library_casestep', 'cc_version')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.product': {
'Meta': {'ordering': "['name']", 'object_name': 'Product'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 190426)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'has_team': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 190624)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'own_team': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'core.productversion': {
'Meta': {'ordering': "['product', 'order']", 'object_name': 'ProductVersion'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 185878)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'productversion'", 'symmetrical': 'False', 'to': "orm['environments.Environment']"}),
'has_team': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 186074)'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'own_team': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['core.Product']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'", '_ormbases': ['auth.User'], 'proxy': 'True'}
},
'environments.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 196774)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 196972)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'environments.element': {
'Meta': {'ordering': "['name']", 'object_name': 'Element'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'elements'", 'to': "orm['environments.Category']"}),
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 189436)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 189627)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'environments.environment': {
'Meta': {'object_name': 'Environment'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 200292)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'elements': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'environments'", 'symmetrical': 'False', 'to': "orm['environments.Element']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 200493)'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'environments'", 'null': 'True', 'to': "orm['environments.Profile']"})
},
'environments.profile': {
'Meta': {'object_name': 'Profile'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 197684)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 197880)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'library.case': {
'Meta': {'object_name': 'Case'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 192679)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 192871)'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cases'", 'to': "orm['core.Product']"})
},
'library.caseattachment': {
'Meta': {'object_name': 'CaseAttachment'},
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'caseversion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['library.CaseVersion']"}),
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 187537)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 187745)'})
},
'library.casestep': {
'Meta': {'ordering': "['caseversion', 'number']", 'object_name': 'CaseStep'},
'caseversion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'steps'", 'to': "orm['library.CaseVersion']"}),
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 191525)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'expected': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.TextField', [], {}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 191712)'}),
'number': ('django.db.models.fields.IntegerField', [], {})
},
'library.caseversion': {
'Meta': {'ordering': "['case', 'productversion__order']", 'object_name': 'CaseVersion'},
'case': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['library.Case']"}),
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 198592)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'caseversion'", 'symmetrical': 'False', 'to': "orm['environments.Environment']"}),
'envs_narrowed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 198795)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'productversion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'caseversions'", 'to': "orm['core.ProductVersion']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'draft'", 'max_length': '30', 'db_index': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'caseversions'", 'blank': 'True', 'to': "orm['tags.Tag']"})
},
'library.suite': {
'Meta': {'object_name': 'Suite'},
'cases': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'suites'", 'symmetrical': 'False', 'through': "orm['library.SuiteCase']", 'to': "orm['library.Case']"}),
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 194131)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 194340)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'suites'", 'to': "orm['core.Product']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'draft'", 'max_length': '30', 'db_index': 'True'})
},
'library.suitecase': {
'Meta': {'ordering': "['order']", 'object_name': 'SuiteCase'},
'case': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'suitecases'", 'to': "orm['library.Case']"}),
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 195643)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 195852)'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'suite': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'suitecases'", 'to': "orm['library.Suite']"})
},
'tags.tag': {
'Meta': {'object_name': 'Tag'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 188495)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 1, 12, 188686)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Product']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['library']
|
import snap
import os
filename = "facebook_combined.txt"
G = snap.LoadEdgeList(snap.PUNGraph,filename,0,1) #3rd and 4th parameters are column indices of source and destination nodes
#G is now the required undirected graph
n = G.GetNodes()
cls = snap.TIntFltH() # closeness centrality dict containing (node id, closeness centrality) as a (key,value) pair
for itr1 in G.Nodes():
cc = 0
s = 0
sp = snap.TIntH() # A hash table containing shortest paths from itr1.node to all other nodes
a = snap.GetShortPath(G,itr1.GetId(),sp)
for j in sp:
s += sp[j]
cc = (n-1)/s
cc = round(cc,6)
cls[itr1.GetId()] = cc
cls.SortByDat(False) #sorting in descending order by value. parameter Asc == False
# now have to output cls in a text file
os.mkdir("centralities")
os.chdir("centralities")
filename = "closeness.txt"
f = open(filename,"w")
for key in cls:
f.write(str(key) + " " + str(cls[key]) + "\n")
f.close()
pr = snap.TIntFltH() # pagerank dict containing (node id, closeness centrality) as a (key,value) pair
n_pr = 0 #no of nodes with Id divisible by 4
for it in G.Nodes():
if( (it.GetId() %4) == 0):
n_pr += 1
#n_pr number of slots in the non uniform(biased) vector will have non zero input. others will have zero
a = 0.8 # damping factor
for itr in G.Nodes(): #initializing pageranks for all nodes
pr[itr.GetId()] = 1
iteration_MAX = 128 # total number of iterations to be done. a number more than 100 and a power of 2 has been chosen.
for i in range(iteration_MAX):
sum = 0
for itr in G.Nodes():
t = 0
for Id in itr.GetOutEdges():
itr2 = G.GetNI(Id)
t += ( pr[Id]/itr2.GetOutDeg() )
pr[itr.GetId()] = a * t
if( (itr.GetId() % 4) == 0 ):
pr[itr.GetId()] += (1-a) * (1/n_pr)
sum += pr[itr.GetId()]
for key in pr: #normalization
pr[key] = pr[key]/sum
for key in pr: #normalization
pr[key] = round(pr[key],6)
pr.SortByDat(False)
filename = "pagerank.txt"
f = open(filename,"w")
for key in pr:
f.write(str(key) + " " + str(pr[key]) + "\n")
f.close()
|
import os
from distutils.core import setup
from pathlib import Path
def read_requirements():
"Read requirements file and returns those lines corresponding to library dependencies"
pwd = Path(os.path.dirname(os.path.abspath(__file__)))
lines = []
with open(pwd / 'requirements.txt', 'rt') as fh:
for line in fh:
line = line.strip()
pos = line.find('#')
if pos >= 0:
line = line[:pos].strip()
if not line:
continue
lines.append(line)
return lines
requirements = read_requirements()
setup(
name='pywde',
version='0.1',
packages=['pywde'],
url='',
license='',
author='Carlos Aya',
author_email='',
description='Wavelet density estimation in Python',
py_modules=['pywde'],
install_requires=requirements,
setup_requires=[]
)
|
"""Provides formatted logging handler for distributey."""
import logging
import sys
from flask import has_request_context, session
import config
from splunk_handler import SplunkHandler
log_level = config.get_config_by_keypath('LOG_LEVEL')
splunk_enabled = config.get_config_by_keypath('SPLUNK_ENABLED')
if log_level == 'debug':
__LOGLVL = logging.DEBUG
else:
__LOGLVL = logging.INFO
class __RequestFormatter(logging.Formatter):
def format(self, record):
if has_request_context():
try:
record.user_agent = session['header_args']['user-agent']
record.tenant = session['view_args']['tenant']
record.x_real_ip = session['header_args']['x-real-ip']
except KeyError:
# input validation failed
record.user_agent = 'N/A'
record.tenant = 'N/A'
record.x_real_ip = 'N/A'
else:
record.tenant = 'system'
record.x_real_ip = 'localhost'
record.user_agent = 'n/a'
return super().format(record)
__stream_handler = logging.StreamHandler(stream=sys.stderr)
__stream_handler.setFormatter(
__RequestFormatter(
'[%(asctime)s] distributey {%(pathname)s:%(lineno)d} %(levelname)s - '
'tenant: %(tenant)s, origin: %(x_real_ip)s, '
'ua: %(user_agent)s - %(message)s'))
logger = logging.getLogger()
logger.setLevel(__LOGLVL)
logger.addHandler(__stream_handler)
if splunk_enabled:
__splunk = SplunkHandler(
host=config.get_config_by_keypath('SPLUNK_HOST'),
port=config.get_config_by_keypath('SPLUNK_PORT'),
protocol=config.get_config_by_keypath('SPLUNK_PROTOCOL'),
verify=config.get_config_by_keypath('SPLUNK_VERIFY'),
token=config.get_config_by_keypath('SPLUNK_TOKEN'),
index=config.get_config_by_keypath('SPLUNK_INDEX'))
logger.addHandler(__splunk)
|
import logging
import smtplib
from email.message import EmailMessage
class EmailConfig():
def __init__(self, account, passwd, server_addr, server_port):
self.account = account
self.passwd = passwd
self.server_addr = server_addr
self.server_port = server_port
def send_code(config, email_addr, code):
server = smtplib.SMTP_SSL(config.server_addr, config.server_port)
server.ehlo()
server.login(config.account, config.passwd)
msg = EmailMessage()
msg.set_content(code)
msg['Subject'] = "UBC Discord Access Code"
msg['From'] = config.account
msg['To'] = email_addr
server.send_message(msg, config.account, email_addr)
server.close()
logging.info(f"sent access code to {email_addr}")
|
from http import HTTPStatus
from flask_restplus import Resource, abort
from app.api.models import AttachmentModel
from app.api.namespaces import attachments
from app.authorization.permissions import EditThesisPermission, UserNeedPermission
from database import db
from database.models import Attachment
@attachments.route('/<int:{}>'.format('attachment_id'))
@attachments.param('attachment_id', description='Attachment identifier')
class AttachmentItem(Resource):
@attachments.marshal_with(AttachmentModel)
def get(self, attachment_id):
"""
Get attachment info
"""
attachment = Attachment.query.get(attachment_id)
if attachment is None:
return abort(HTTPStatus.NOT_FOUND, message='Attachment is not found')
return attachment
@attachments.response(HTTPStatus.FORBIDDEN, description="User is not authorized to delete the attachment")
@attachments.response(HTTPStatus.OK, description="Attachment successfully deleted")
def delete(self, attachment_id):
"""
Delete attachment
* User can delete **their attachment** if discussion is not frozen
* User with permission to **"edit theses"** can delete the attachment
"""
attachment = Attachment.query.get(attachment_id)
if attachment is None:
return abort(HTTPStatus.NOT_FOUND, message='Attachment is not found')
if not UserNeedPermission(attachment.thesis.author_id):
if not EditThesisPermission.can():
return abort(HTTPStatus.FORBIDDEN, message="User is not authorized to delete the attachment")
elif attachment.thesis.argument_thesis.argument.discussion.is_frozen:
return abort(HTTPStatus.FORBIDDEN, message="Discussion is frozen")
db.session.delete(attachment)
db.session.commit()
return "Attachment successfully deleted"
|
import json
import logging
import os
import torch
import requests
from typing import Dict, Optional
from torch import nn
from torch import Tensor
logger = logging.getLogger(__name__)
from huggingface_hub.constants import CONFIG_NAME, PYTORCH_WEIGHTS_NAME
from huggingface_hub.file_download import (
cached_download,
hf_hub_url,
is_torch_available,
)
from huggingface_hub.hf_api import HfApi, HfFolder
from huggingface_hub.repository import Repository
StateDict = Dict[str, Tensor]
class HFModelHub:
@staticmethod
def save_pretrained(
model: nn.Module,
save_directory: str,
config: Optional[dict] = None,
push_to_hub: bool = False,
**kwargs,
):
"""
Saving weights in local directory.
Parameters:
save_directory (:obj:`str`):
Specify directory in which you want to save weights.
config (:obj:`dict`, `optional`):
specify config (must be dict) incase you want to save it.
push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set it to `True` in case you want to push your weights to huggingface_hub
model_id (:obj:`str`, `optional`, defaults to :obj:`save_directory`):
Repo name in huggingface_hub. If not specified, repo name will be same as `save_directory`
kwargs (:obj:`Dict`, `optional`):
kwargs will be passed to `push_to_hub`
"""
os.makedirs(save_directory, exist_ok=True)
# saving config
if isinstance(config, dict):
path = os.path.join(save_directory, CONFIG_NAME)
with open(path, "w") as f:
json.dump(config, f)
# saving model weights
path = os.path.join(save_directory, PYTORCH_WEIGHTS_NAME)
torch.save(model.state_dict(), path)
if push_to_hub:
return HFModelHub.push_to_hub(save_directory, **kwargs)
@staticmethod
def from_pretrained(
pretrained_model_name_or_path: Optional[str],
strict: bool = True,
map_location: Optional[str] = "cpu",
force_download: bool = False,
resume_download: bool = False,
proxies: Dict = None,
use_auth_token: Optional[str] = None,
cache_dir: Optional[str] = None,
local_files_only: bool = False,
) -> StateDict:
r"""
Instantiate a pretrained pytorch model from a pre-trained model configuration from huggingface-hub.
The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated). To
train the model, you should first set it back in training mode with ``model.train()``.
Parameters:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`, `optional`):
Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- You can add `revision` by appending `@` at the end of model_id simply like this: ``dbmdz/bert-base-german-cased@main``
Revision is the specific model version to use. It can be a branch name, a tag name, or a commit id,
since we use a git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any identifier allowed by git.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword
arguments ``config`` and ``state_dict``).
cache_dir (:obj:`Union[str, os.PathLike]`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (i.e., do not try to download the model).
use_auth_token (:obj:`str` or `bool`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
model_kwargs (:obj:`Dict`, `optional`)::
model_kwargs will be passed to the model during initialization
.. note::
Passing :obj:`use_auth_token=True` is required when you want to use a private model.
"""
model_id = pretrained_model_name_or_path
map_location = torch.device(map_location)
revision = None
if len(model_id.split("@")) == 2:
model_id, revision = model_id.split("@")
if model_id in os.listdir() and CONFIG_NAME in os.listdir(model_id):
config_file = os.path.join(model_id, CONFIG_NAME)
else:
try:
config_url = hf_hub_url(
model_id, filename=CONFIG_NAME, revision=revision
)
config_file = cached_download(
config_url,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
)
except requests.exceptions.RequestException:
logger.warning("config.json NOT FOUND in HuggingFace Hub")
config_file = None
if model_id in os.listdir():
print("LOADING weights from local directory")
model_file = os.path.join(model_id, PYTORCH_WEIGHTS_NAME)
else:
model_url = hf_hub_url(
model_id, filename=PYTORCH_WEIGHTS_NAME, revision=revision
)
model_file = cached_download(
model_url,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
)
logger.debug(model_file)
if config_file is not None:
with open(config_file, "r", encoding="utf-8") as f:
config = json.load(f)
# we are not using config
state_dict = torch.load(model_file, map_location=map_location)
return state_dict
@staticmethod
def push_to_hub(
save_directory: Optional[str],
model_id: Optional[str] = None,
repo_url: Optional[str] = None,
commit_message: Optional[str] = "add model",
organization: Optional[str] = None,
private: bool = None,
) -> str:
"""
Parameters:
save_directory (:obj:`Union[str, os.PathLike]`):
Directory having model weights & config.
model_id (:obj:`str`, `optional`, defaults to :obj:`save_directory`):
Repo name in huggingface_hub. If not specified, repo name will be same as `save_directory`
repo_url (:obj:`str`, `optional`):
Specify this in case you want to push to existing repo in hub.
organization (:obj:`str`, `optional`):
Organization in which you want to push your model.
private (:obj:`bool`, `optional`):
private: Whether the model repo should be private (requires a paid huggingface.co account)
commit_message (:obj:`str`, `optional`, defaults to :obj:`add model`):
Message to commit while pushing
Returns:
url to commit on remote repo.
"""
if model_id is None:
model_id = save_directory
token = HfFolder.get_token()
if repo_url is None:
repo_url = HfApi().create_repo(
token,
model_id,
organization=organization,
private=private,
repo_type=None,
exist_ok=True,
)
repo = Repository(save_directory, clone_from=repo_url, use_auth_token=token)
return repo.push_to_hub(commit_message=commit_message)
|
"""Resource for SecurityGroups"""
import ipaddress
from typing import Type
from botocore.client import BaseClient
from altimeter.aws.resource.resource_spec import ListFromAWSResult
from altimeter.aws.resource.ec2 import EC2ResourceSpec
from altimeter.core.graph.field.dict_field import EmbeddedDictField
from altimeter.core.graph.field.list_field import ListField
from altimeter.core.graph.field.resource_link_field import ResourceLinkField
from altimeter.core.graph.field.scalar_field import ScalarField
from altimeter.core.graph.field.tags_field import TagsField
from altimeter.core.graph.schema import Schema
class SecurityGroupResourceSpec(EC2ResourceSpec):
"""Resource for SecurityGroups"""
type_name = "security-group"
schema = Schema(
ScalarField("GroupName", "name"),
ListField(
"IpPermissions",
EmbeddedDictField(
ScalarField("IpProtocol"),
ScalarField("FromPort", default_value=0),
ScalarField("ToPort", default_value=65535),
ListField(
"IpRanges",
EmbeddedDictField(
ScalarField("CidrIp"), ScalarField("FirstIp"), ScalarField("LastIp")
),
alti_key="ip_range",
optional=True,
),
ListField(
"Ipv6Ranges",
EmbeddedDictField(
ScalarField("CidrIpv6"), ScalarField("FirstIp"), ScalarField("LastIp")
),
alti_key="ipv6_range",
optional=True,
),
ListField(
"PrefixListIds", EmbeddedDictField(ScalarField("PrefixListId")), optional=True
),
ListField(
"UserIdGroupPairs",
EmbeddedDictField(
ResourceLinkField("GroupId", "SecurityGroupResourceSpec"),
ScalarField("UserId", alti_key="account_id"),
ScalarField("PeeringStatus", optional=True),
ScalarField("VpcId", optional=True),
ScalarField("VpcPeeringConnectionId", optional=True),
),
alti_key="user_id_group_pairs",
),
),
alti_key="ingress_rule",
),
ListField(
"IpPermissionsEgress",
EmbeddedDictField(
ScalarField("IpProtocol"),
ScalarField("FromPort", default_value=0),
ScalarField("ToPort", default_value=65535),
ListField(
"IpRanges",
EmbeddedDictField(
ScalarField("CidrIp"), ScalarField("FirstIp"), ScalarField("LastIp")
),
alti_key="ip_range",
optional=True,
),
ListField(
"Ipv6Ranges",
EmbeddedDictField(
ScalarField("CidrIpv6"), ScalarField("FirstIp"), ScalarField("LastIp")
),
alti_key="ipv6_range",
optional=True,
),
ListField(
"PrefixListIds", EmbeddedDictField(ScalarField("PrefixListId")), optional=True
),
ListField(
"UserIdGroupPairs",
EmbeddedDictField(
ResourceLinkField("GroupId", "SecurityGroupResourceSpec"),
ScalarField("UserId", alti_key="account_id"),
ScalarField("PeeringStatus", optional=True),
ScalarField("VpcId", optional=True),
ScalarField("VpcPeeringConnectionId", optional=True),
),
alti_key="user_id_group_pairs",
),
),
alti_key="egress_rule",
),
TagsField(),
)
@classmethod
def list_from_aws(
cls: Type["SecurityGroupResourceSpec"], client: BaseClient, account_id: str, region: str
) -> ListFromAWSResult:
"""Return a dict of dicts of the format:
{'security_group_1_arn': {security_group_1_dict},
'security_group_2_arn': {security_group_2_dict},
...}
Where the dicts represent results from describe_subnets."""
security_groups = {}
paginator = client.get_paginator("describe_security_groups")
for resp in paginator.paginate():
for security_group in resp.get("SecurityGroups", []):
resource_arn = cls.generate_arn(account_id, region, security_group["GroupId"])
for ingress_rule in security_group.get("IpPermissions", []):
for ip_range in ingress_rule.get("IpRanges", []):
cidr = ip_range["CidrIp"]
ipv4_network = ipaddress.IPv4Network(cidr, strict=False)
first_ip, last_ip = int(ipv4_network[0]), int(ipv4_network[-1])
ip_range["FirstIp"] = first_ip
ip_range["LastIp"] = last_ip
for ip_range in ingress_rule.get("Ipv6Ranges", []):
cidr = ip_range["CidrIpv6"]
ipv6_network = ipaddress.IPv6Network(cidr, strict=False)
first_ip, last_ip = int(ipv6_network[0]), int(ipv6_network[-1])
ip_range["FirstIp"] = first_ip
ip_range["LastIp"] = last_ip
for egress_rule in security_group.get("IpPermissionsEgress", []):
for ip_range in egress_rule.get("IpRanges", []):
cidr = ip_range["CidrIp"]
ipv4_network = ipaddress.IPv4Network(cidr, strict=False)
first_ip, last_ip = int(ipv4_network[0]), int(ipv4_network[-1])
ip_range["FirstIp"] = first_ip
ip_range["LastIp"] = last_ip
for ip_range in egress_rule.get("Ipv6Ranges", []):
cidr = ip_range["CidrIpv6"]
ipv6_network = ipaddress.IPv6Network(cidr, strict=False)
first_ip, last_ip = int(ipv6_network[0]), int(ipv6_network[-1])
ip_range["FirstIp"] = first_ip
ip_range["LastIp"] = last_ip
security_groups[resource_arn] = security_group
return ListFromAWSResult(resources=security_groups)
|
from cmndr import Middleware
import logging
class LoggingMiddleware(Middleware):
def execute(self, command, next_callable):
cmd_dict = command.__dict__
splitted_module = command.__module__.split('.')
domain = splitted_module[1]
log_string = '{} {}'.format(' '.join(splitted_module[-2:]), cmd_dict if cmd_dict else '')
logging.getLogger(domain).info(log_string)
ret = next_callable(command)
return ret
|
from sfc_models import register_standard_logs
from sfc_models.objects import *
from sfc_models.sector import Sector
from sfc_gui.chart_plotter import ChartPlotterWindow2
from sfc_models.sector import Sector
from sfc_models.utils import Logger
class BusinessWithInvestment(FixedMarginBusiness):
def __init__(self, country, code, long_name=''):
if long_name == '':
long_name = 'Business with Investment {0} in Country {1}'.format(code, country.Code)
labour_input_name = 'LAB'
output_name = 'GOOD'
profit_margin=0.0
FixedMarginBusiness.__init__(self, country, code, long_name, profit_margin,
labour_input_name, output_name)
self.AddVariable('INV', 'Inventory', 'LAG_INV + PROD - SUP_GOOD - INVEST')
self.AddVariable('LAG_INV', 'Lagged Inventory', 'INV(k-1)')
# Target inventory = 1*market demand
# Production = (expected demand) + (desired change in inventory)
# expected demand = investment + previous market demand
# desired change in inventory = .5*(previous market demand - existing inventory).
self.AddVariable('PROD', 'Production Level', 'max(0,INVEST + LAG_SUP_GOOD + .1*(LAG_SUP_GOOD - LAG_INV))')
self.AddVariable('INVSALES', 'Inventory/Sales Ratio', 'INV/SUP_GOOD')
self.AddVariable('CAPITAL', 'Stock of Capital', '0.95*LAG_CAP + INVEST')
# Production function: Y = K(t-1)^.5 N^.5
# N = Number of hours = P^2 / (K(t-1)
self.AddVariable('NUMHOURS', 'Number of Hours Demanded', '(PROD*PROD)/LAG_CAP')
self.AddVariable('WAGERATE', 'Wage rate', 'DEM_LAB/NUMHOURS')
self.AddVariable('LAG_HOURS', 'Lagged Number of Hours', 'NUMHOURS(k-1)')
self.AddVariable('LAG_CAP', 'Lagged Capital', 'CAPITAL(k-1)')
self.AddVariable('LAG_PROD', 'Lagged Production', 'PROD(k-1)')
self.AddVariable('LAG_SUP_GOOD', 'Lagged Supply to market', 'SUP_GOOD(k-1)')
# Target capital = (alpha_capital)*(previous # of hours worked)
self.AddVariable('ALPHA_CAPITAL', 'Multiplier for target capital', '1.')
self.AddVariable('CAPITAL_TARGET', 'Target Level for capital', 'ALPHA_CAPITAL*LAG_HOURS')
self.AddVariable('INVEST', 'Gross Investment', 'max(0., .05*LAG_CAP + .1*(CAPITAL_TARGET-LAG_CAP))')
self.AddVariable('CAPITAL_RATIO', 'Ratio of Capital to target', 'CAPITAL/CAPITAL_TARGET')
# Pro-Forma Profits: 20% of sales - depreciation.
self.AddVariable('PROFORMA', 'Pro-Forma Profits (Lagged)', '0.20*LAG_SUP_GOOD - .05*LAG_CAP')
self.AddVariable('DIVPAID', 'Dividends Paid', '.055* LAG_F + 0.8*PROFORMA')
self.AddVariable('DEM_LAB', 'Demand For Labour', '0.8*PROD')
def _GenerateEquations(self):
# self.AddVariable('SUP_GOOD', 'Supply of goods', '<TO BE DETERMINED>')
wage_share = 1.0 - self.ProfitMargin
Logger('Searching for Market Sector with Code {0} in parent country', priority=4,
data_to_format=(self.OutputName,))
# Since we have inventories, profits are volume of sales * profit margin
self.SetEquationRightHandSide('PROF', '.1 * SUP_GOOD')
for s in self.Parent.SectorList:
if s.Code == 'HH':
Logger('Adding dividend flow', priority=5)
self.AddCashFlow('-DIV', 'DIVPAID', 'Dividends paid', is_income=False)
s.AddCashFlow('DIV', self.GetVariableName('DIVPAID'), 'Dividends received', is_income=True)
break
def get_description():
return "ModelInvestment"
def build_model():
# Create model, which holds all entities
mod = Model()
mod.EquationSolver.TraceStep = 10
mod.EquationSolver.MaxTime = 100
# Create first country - Canada. (This model only has one country.)
can = Country(mod, 'CA', 'Canada')
# Create sectors
gov = ConsolidatedGovernment(can, 'GOV', 'Government')
hh = Household(can, 'HH', 'Household')
# A literally non-profit business sector
bus = BusinessWithInvestment(can, 'BUS', 'Business Sector')
# Create the linkages between sectors - tax flow, markets - labour ('LAB'), goods ('GOOD')
tax = TaxFlow(can, 'TF', 'TaxFlow', .2)
labour = Market(can, 'LAB', 'Labour market')
goods = Market(can, 'GOOD', 'Goods market')
# Initial conditions
bus.AddInitialCondition('CAPITAL', 100.)
bus.AddInitialCondition('PROD', 100.)
bus.AddInitialCondition('INV', 95.)
# bus.AddInitialCondition('LAG_INV', 100.)
# bus.AddInitialCondition('LAG_SUP_GOOD', 95.)
bus.AddInitialCondition('SUP_GOOD', 95.)
bus.AddInitialCondition('NUMHOURS', 100.)
# Initialise money holdings
bizcash = 69
hhcash = 76.
gov.AddInitialCondition('F', -(bizcash+hhcash))
hh.AddInitialCondition('F', hhcash)
bus.AddInitialCondition('F', bizcash)
# Need to set the exogenous variable - Government demand for Goods ("G" in economist symbology)
#mod.AddExogenous('GOV', 'DEM_GOOD', '[19.,] * 20 + [25.]*100')
mod.AddExogenous('GOV', 'DEM_GOOD', '[19.,] * 105')
mod.AddExogenous('BUS', 'ALPHA_CAPITAL', '[1.,] * 20 + [1.1] * 100')
return mod
if __name__ == '__main__':
# register_standard_logs('output', __file__)
mod2 = build_model()
mod2.main()
window = ChartPlotterWindow2(None, mod2)
window.mainloop()
|
#!/usr/bin/env python3
""" Generate performance graphs. """
import collections
import contextlib
import errno
import os
import platform
import subprocess
import sys
import tempfile
import timeit
FILE_SIZES_MB = tuple(2 ** x for x in range(12))
def generate_file(parent_dir, size_mb):
""" Generate a file, write random data to it, and return its filepath. """
fd, filepath = tempfile.mkstemp(dir=parent_dir)
os.close(fd)
print("Generating %u MB file to '%s'..." % (size_mb, filepath))
cmd = ("dd", "if=/dev/frandom", "of=%s" % (filepath), "bs=1M", "count=%u" % (size_mb))
subprocess.check_call(cmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
return filepath
def read_file(filepath):
""" Read a file to fill OS filesystem cache. """
cmd = ("dd", "if=%s" % (filepath), "of=/dev/null", "bs=1M")
subprocess.check_call(cmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if __name__ == "__main__":
with tempfile.TemporaryDirectory() as tmp_dir:
# measure
data = collections.OrderedDict()
for file_size_mb in FILE_SIZES_MB:
data_point = []
try:
filepath_src = generate_file(tmp_dir, file_size_mb)
read_file(filepath_src) # warm up filesystem cache
filepath_dst = "%s.dst" % (filepath_src)
for use_fast_copy in (False, True):
print("Measuring with%s pyfastcopy..." % ("" if use_fast_copy else "out"))
v = timeit.repeat(setup="import shutil; import pyfastcopy; p1 = %s; p2 = %s" % (repr(filepath_src),
repr(filepath_dst)),
stmt="shutil.%s(p1, p2)" % ("copyfile" if use_fast_copy else "_orig_copyfile"),
number=10 if file_size_mb >= 64 else 100,
repeat=5)
v = min(v)
data_point.append(str(v / (10 if file_size_mb >= 64 else 100)))
os.remove(filepath_dst)
os.remove(filepath_src)
data[file_size_mb] = tuple(data_point)
except OSError as e:
if e.errno == errno.ENOSPC:
print("Not enough free space")
break
else:
raise
with tempfile.TemporaryDirectory() as tmp_dir:
# write data files
data_filepaths = []
data_fds = []
for graph in range(3):
fd, data_filepath = tempfile.mkstemp(suffix=".csv", dir=tmp_dir)
data_filepaths.append(data_filepath)
data_fds.append(fd)
with contextlib.ExitStack() as cm:
files = [cm.enter_context(os.fdopen(fd, "wt")) for fd in data_fds]
for file_size_mb, data_point in data.items():
f = files[0 if file_size_mb < 64 else 1]
line = "%u,%s\n" % (file_size_mb, ",".join(data_point))
f.write(line)
files[-1].write(line)
# plot
sysinfo_line = "(%s %s, on %s %s %s)\"" % (sys.implementation.name.capitalize(),
".".join(map(str, sys.implementation.version[0:3])),
platform.system(),
platform.release(),
platform.processor())
for graph, data_filepath in enumerate(data_filepaths[:2], 1):
gnuplot_code = ["set terminal png size 1024,600 font 'M+ 1c bold,12'",
"set title \"Time to copy file using shutil.copyfile: standard Python vs pyfastcopy\\n" + sysinfo_line,
"set output '%u.png'" % (graph),
"set key left samplen 3 spacing 1.75",
"set xlabel 'File size (MB)'",
"set xtics rotate out",
"set ylabel 'Time to copy (ms)'",
"set format y '%.0f'",
"set style data histogram",
"set style histogram clustered",
"set style fill solid",
"set boxwidth 0.95 relative",
"set datafile separator ','",
"plot '%s' using ($2*1000):xtic(1) title 'standard', "
"'%s' using ($3*1000):xtic(1) title 'pyfastcopy'" % (data_filepath, data_filepath)]
gnuplot_code = ";\n".join(gnuplot_code) + ";"
subprocess.check_output(("gnuplot",),
input=gnuplot_code,
stderr=None,
universal_newlines=True)
gnuplot_code = ["set terminal png size 1024,600 font 'M+ 1c bold,12'",
"set title \"shutil.copyfile performance gain of pyfastcopy vs stock Python\\n" + sysinfo_line,
"set output '3.png'",
"set xlabel 'File size (MB)'",
"set xtics rotate out",
"set ylabel 'Performance gain (%)'",
"set yrange[20:50]",
"set format y '%.0f'",
"set mytics 5",
"set datafile separator ','",
"plot '%s' using (100-100*$3/$2):xtic(1) with lines title '' lw 2" % (data_filepaths[2])]
gnuplot_code = ";\n".join(gnuplot_code) + ";"
subprocess.check_output(("gnuplot",),
input=gnuplot_code,
stderr=None,
universal_newlines=True)
|
#!/usr/bin/env python3
from collections import OrderedDict
ud = dict([('a', 1), ('b', 1), ('c', 1)])
print(ud)
od = OrderedDict([('a', 1), ('b', 1), ('c', 1)])
print(od)
|
from os import path, mkdir
from data import DATA_PATH
# DATA_PATH = "."
class SolverBase(object):
def __init__(self, name):
self.name = name
class_name = self.__class__.__name__
ind = class_name.find("Solver")
self.data_dir = path.join(DATA_PATH, class_name[:ind].lower())
if not path.exists(self.data_dir):
mkdir(self.data_dir)
if self.name == "sample":
open(self.get_filename("in"), "w").close()
open(self.get_filename("out"), "w").close()
open(self.get_filename("out.ref"), "w").close()
self.n_sample_ = 0
def _get_file_handler(self, ext):
op = 'w' if ext == 'out' else 'r'
return open(self.get_filename(ext), op)
def get_filename(self, ext):
return path.join(self.data_dir, self.name + "." + ext)
def _get_input_file(self):
f = self._get_file_handler('in')
self.n_sample_ = int(f.readline())
return f
def _iter_input(self):
with self._get_input_file() as f:
return iter(f.readlines())
@staticmethod
def _split_line_to_list(line, fmt=int):
return [fmt(s) for s in line.split(" ")]
def _write_result(self, result, sep=" "):
assert len(result) == self.n_sample_, \
"length of result '{}' should be equal to the number of samples " \
"{} for {}.".format(len(result), self.n_sample_, self.__class__)
f = self._get_file_handler('out')
for i, line in enumerate(result, 1):
f.write("Case #{}:{}{}\n".format(i, sep, line))
f.close()
def _mod(x, mod):
"""mod is useful for large numbers"""
return x if mod is None else x % mod
def sum_of_int(n, mod=None):
x = int(n * (n + 1) / 2)
return _mod(x, mod)
def sum_of_int_square(n, mod=None):
x = int(n * (n + 1) * (n * 2 + 1) / 6)
return _mod(x, mod)
def sum_of_int_cube(n, mod=None):
x = int(n * (n + 1) / 2)
x = _mod(x, mod)
return _mod(x * x, mod)
def reflex(cond):
def reflex_cond(p1, p2):
return cond(p1, p2) or cond(p2, p1)
return reflex_cond
|
import importlib
from logging import getLogger
from imblearn.ensemble import BalancedBaggingClassifier, RUSBoostClassifier
from imblearn.over_sampling import RandomOverSampler, SMOTE
from imblearn.pipeline import Pipeline
from imblearn.under_sampling import RandomUnderSampler
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
import numpy as np
import scipy.sparse as sp
from sklearn.cluster import KMeans
from sklearn.decomposition import NMF, PCA, TruncatedSVD
from sklearn.impute import SimpleImputer
from sklearn.metrics import get_scorer
from sklearn.model_selection import KFold
from sklearn.multiclass import OneVsOneClassifier, OneVsRestClassifier
from sklearn.preprocessing import MaxAbsScaler, StandardScaler
logger = getLogger('predict').getChild('SingleTrainer')
if 'Flattener' not in globals():
from ..commons.Flattener import Flattener
if 'Reshaper' not in globals():
from ..commons.Reshaper import Reshaper
if 'BaseTrainer' not in globals():
from .BaseTrainer import BaseTrainer
if 'EnsembleTrainer' not in globals():
from .EnsembleTrainer import EnsembleTrainer
if 'Augmentor' not in globals():
from .Augmentor import Augmentor
if 'Outputer' not in globals():
from ..Outputer import Outputer
class SingleTrainer(BaseTrainer):
def __init__(
self, X_train, Y_train, X_test, feature_columns, configs, kernel=False
):
self.X_train = X_train
self.Y_train = Y_train
self.X_test = X_test
self.feature_columns = feature_columns
self.configs = configs
self.kernel = kernel
# keras, torchのスコープ対策として、インスタンス作成時に読み込み
# keras, torch使う時しか使わないので、evalで定義してエラー回避
if self.kernel:
self.create_nn_model = eval('create_nn_model')
def _to_pipeline_params(self, pre, params):
return {f'{pre}__{k}': v for k, v in params.items()}
def _get_model_params(self, model_config):
# model
model = model_config['model']
logger.info('model: %s' % model)
self.model = model
modelname = model_config.get('modelname')
if modelname:
logger.info('modelname: %s' % modelname)
# params
params = model_config.get('params', {})
self.params = self._to_pipeline_params(self.model, params)
# fit_params
fit_params = model_config.get('fit_params', {})
if self.model in ['keras_clf', 'keras_reg']:
fit_params['callbacks'] = []
if fit_params.get('reduce_lr'):
fit_params['callbacks'].append(
ReduceLROnPlateau(**fit_params['reduce_lr']))
del fit_params['reduce_lr']
if fit_params.get('early_stopping'):
fit_params['callbacks'].append(
EarlyStopping(**fit_params['early_stopping']))
del fit_params['early_stopping']
self.fit_params = self._to_pipeline_params(self.model, fit_params)
# cv
self.cv_select = model_config.get('cv_select', 'min')
self.n_trials = model_config.get('n_trials')
# multiclass
# final estimatorに学習後追加
multiclass = model_config.get('multiclass')
if multiclass:
logger.info('multiclass: %s' % multiclass)
if multiclass == 'onevsone':
multiclass = OneVsOneClassifier
elif multiclass == 'onevsrest':
multiclass = OneVsRestClassifier
else:
logger.error(
f'NOT IMPLEMENTED MULTICLASS: {multiclass}')
raise Exception('NOT IMPLEMENTED')
self.multiclass = multiclass
return
def _add_sampling_to_pipeline(self, pipeline, model_config, X_train):
undersampling = model_config.get('undersampling')
oversampling = model_config.get('oversampling')
# 形式エラー対策 pre
if (undersampling and undersampling == 'random') or oversampling:
pipeline.append(('flattener', Flattener()))
# pipeline
undersampling_clf = None
if undersampling:
logger.info(f'undersampling: {undersampling}')
if undersampling == 'random':
pipeline.append(
('undersampling', RandomUnderSampler(random_state=42)))
# final estimatorに学習後追加
elif undersampling == 'bagging':
undersampling_clf = BalancedBaggingClassifier
elif undersampling == 'adaboost':
undersampling_clf = RUSBoostClassifier
else:
logger.error(
f'NOT IMPLEMENTED UNDERSAMPLING: {undersampling}')
raise Exception('NOT IMPLEMENTED')
if oversampling:
logger.info(f'oversampling: {oversampling}')
if oversampling == 'random':
pipeline.append(
('oversampling', RandomOverSampler(random_state=42)))
elif oversampling == 'smote':
pipeline.append(
('oversampling', SMOTE(random_state=42)))
else:
logger.error(
f'NOT IMPLEMENTED OVERSAMPLING: {oversampling}')
raise Exception('NOT IMPLEMENTED')
# 形式エラー対策 post
if (undersampling and undersampling == 'random') or oversampling:
_is_categorical = (self.model == 'keras_clf')
pipeline.append(
('reshaper', Reshaper(X_train.shape[1:], _is_categorical)))
return pipeline, undersampling_clf
def _get_base_pipeline(self, model_config, nn_func, X_train):
_pipeline = []
# imputation
imputation = model_config.get('missing_imputation')
if imputation:
logger.info(f'missing_imputation: {imputation}')
_pipeline.append((
'missing_imputation',
SimpleImputer(missing_values=np.nan, strategy=imputation)
))
# x_scaler
x_scaler = model_config.get('x_scaler')
if x_scaler:
logger.info(f'x_scaler: {x_scaler}')
if x_scaler == 'standard':
# to-do: 外れ値対策として、1-99%に限定検討
# winsorize(X_train, limits=[0.01, 0.01]).tolist()
_x_scaler = StandardScaler(with_mean=False)
elif x_scaler == 'maxabs':
_x_scaler = MaxAbsScaler()
_pipeline.append(('x_scaler', _x_scaler))
# dimension_reduction
di_reduction = model_config.get('dimension_reduction')
if di_reduction:
n = di_reduction['n']
model = di_reduction['model']
if n == 'all':
n = X_train.shape[1]
logger.info(
'dimension_reduction: %s to %s with %s'
% (X_train.shape[1], n, model))
if model == 'pca':
# pca ratioがuniqueでないと、再現性ない場合あり
_pipeline.append((
'dimension_reduction',
PCA(n_components=n, random_state=42)
))
elif model == 'svd':
_pipeline.append((
'dimension_reduction',
TruncatedSVD(n_components=n, random_state=42)
))
elif model == 'kmeans':
_pipeline.append((
'dimension_reduction',
KMeans(n_clusters=n, random_state=42, n_jobs=-1)
))
elif model == 'nmf':
_pipeline.append((
'dimension_reduction',
NMF(n_components=n, random_state=42)
))
else:
logger.error(
'NOT IMPLEMENTED DIMENSION REDUCTION MODEL: %s' % model)
raise Exception('NOT IMPLEMENTED')
self.feature_columns = list(map(
lambda x: '%s_%d' % (model, x), range(n)))
# sampling
_pipeline, self.undersampling = \
self._add_sampling_to_pipeline(_pipeline, model_config, X_train)
# augmentation
augmentation = model_config.get('augmentation')
if augmentation:
logger.info(f'augmentation: {augmentation}')
_pipeline.append(
('augmentation', Augmentor(**augmentation)))
# model
create_nn_model = None
if self.model in ['keras_clf', 'keras_reg', 'torch_clf', 'torch_reg']:
if self.kernel:
create_nn_model = self.create_nn_model
else:
myfunc = importlib.import_module(
'modules.myfuncs.%s' % nn_func)
create_nn_model = myfunc.create_nn_model
_pipeline.append((
self.model,
self.get_base_estimator(
self.model, create_nn_model=create_nn_model)
))
return Pipeline(_pipeline)
def _fit(self, scorer, train_cv, val_cv, X_train, Y_train):
# for param tuning, use train_cv
best_params = self.calc_best_params(
self.base_pipeline, X_train, Y_train, self.params,
scorer, train_cv, self.fit_params, self.n_trials,
self.multiclass, self.undersampling)
logger.info('best params: %s' % best_params)
pipeline = self.base_pipeline
pipeline.set_params(**best_params)
pipeline.steps[-1] = (pipeline.steps[-1][0], self.to_second_estimator(
pipeline.steps[-1][1], self.multiclass, self.undersampling))
# to create model, use val_cv
logger.info(f'get estimator with cv_select: {self.cv_select}')
if self.cv_select == 'train_all':
scores, pipelines = self.calc_cv_scores_pipelines(
pipeline, X_train, Y_train, scorer,
cv=1, fit_params=self.fit_params, with_importances=True)
score = scores[0]
pipeline = pipelines[0]
estimator = pipeline
elif self.cv_select in ['min', 'all_folds']:
scores, pipelines = self.calc_cv_scores_pipelines(
pipeline, X_train, Y_train, scorer,
cv=val_cv, fit_params=self.fit_params, with_importances=True)
if self.cv_select == 'min':
_min_index = np.array(scores).argmin()
score = scores[_min_index]
pipeline = pipelines[_min_index]
estimator = pipeline
elif self.cv_select == 'all_folds':
_single_pipelines = []
for i, _pipeline in enumerate(pipelines):
_single_pipelines.append(
(f'{i}_fold', _pipeline))
weights = EnsembleTrainer.get_weights(scores)
score = np.average(scores, weights=weights)
ensemble_trainer_obj = EnsembleTrainer(
X_train, Y_train, self.X_test, self.configs)
estimator = ensemble_trainer_obj.calc_ensemble_estimator(
_single_pipelines, ensemble_config={'mode': 'average'},
weights=weights, scorer=scorer)
else:
logger.error(f'NOT IMPLEMENTED CV SELECT: {self.cv_select}')
raise Exception('NOT IMPLEMENTED')
return score, estimator
def _calc_pseudo_label_data(
self, X_train, Y_train, estimator, classes, threshold
):
_, Y_pred_proba = Outputer.predict_like(
train_mode=self.configs['fit']['train_mode'],
estimator=estimator, X_train=X_train, Y_train=Y_train,
X_target=self.X_test)
data_index, label_index = np.where(Y_pred_proba > threshold)
pseudo_X_train = self.X_test[data_index]
pseudo_Y_train = classes[label_index].reshape(-1, 1)
return pseudo_X_train, pseudo_Y_train
def _fit_with_pseudo_labeling(
self,
scorer, train_cv, val_cv, estimator,
X_train, Y_train, classes, threshold
):
logger.info('fit with pseudo labeling')
pseudo_X_train, pseudo_Y_train = self._calc_pseudo_label_data(
X_train, Y_train, estimator, classes, threshold)
new_X_train = sp.vstack((X_train, pseudo_X_train), format='csr')
new_Y_train = np.concatenate([Y_train, pseudo_Y_train])
logger.info(
'with threshold %s, train data added %s => %s'
% (threshold, len(Y_train), len(new_Y_train)))
return self._fit(scorer, train_cv, val_cv, new_X_train, new_Y_train)
def _sample_with_error(self, X_train, Y_train, estimator):
Y_pred, _ = Outputer.predict_like(
train_mode=self.configs['fit']['train_mode'],
estimator=estimator, X_train=X_train, Y_train=Y_train,
X_target=X_train)
data_index = np.where(Y_pred != self.ravel_like(Y_train))
error_X_train = X_train[data_index]
error_Y_train = Y_train[data_index]
return error_X_train, error_Y_train
def _fit_with_error_sampling(
self, scorer, train_cv, val_cv, estimator, X_train, Y_train, score
):
logger.info('fit with error_sampling')
new_X_train, new_Y_train = self._sample_with_error(
X_train, Y_train, estimator)
logger.info(
'with error_sampling, error train data is %s'
% len(new_Y_train))
_score, _estimator = \
self._fit(scorer, train_cv, val_cv, new_X_train, new_Y_train)
_single_estimators = [
('base', estimator),
('error', _estimator),
]
weights = EnsembleTrainer.get_weights(
np.array([len(Y_train), len(new_Y_train)]))
score = np.average(np.array([score, _score]), weights=weights)
ensemble_trainer_obj = EnsembleTrainer(
X_train, Y_train, self.X_test, configs=self.configs)
estimator = ensemble_trainer_obj.calc_ensemble_estimator(
_single_estimators, ensemble_config={'mode': 'average'},
weights=weights, scorer=scorer)
return score, estimator
def calc_single_estimator(
self,
model_config, scorer=get_scorer('accuracy'),
train_cv=KFold(n_splits=3, shuffle=True, random_state=42),
val_cv=KFold(n_splits=3, shuffle=True, random_state=43),
nn_func=None, X_train=None, Y_train=None
):
if X_train is None:
X_train = self.X_train
if Y_train is None:
Y_train = self.Y_train
self._get_model_params(model_config)
self.base_pipeline = \
self._get_base_pipeline(model_config, nn_func, X_train)
logger.info(f'base_pipeline: {self.base_pipeline}')
logger.info(f'fit_params: {self.fit_params}')
# fit
logger.info('fit')
score, estimator = \
self._fit(scorer, train_cv, val_cv, X_train, Y_train)
logger.info(f'score: {score}')
logger.info(f'estimator: {estimator}')
# pseudo labeling
pseudo_config = model_config.get('pseudo_labeling')
if pseudo_config:
if self.configs['fit']['train_mode'] == 'reg':
logger.error('NOT IMPLEMENTED PSEUDO LABELING WITH REGRESSION')
raise Exception('NOT IMPLEMENTED')
threshold = pseudo_config.get('threshold')
if not threshold and int(threshold) != 0:
threshold = 0.8
if hasattr(estimator, 'classes_'):
classes = estimator.classes_
else:
classes = np.sort(np.unique(Y_train))
score, estimator = self._fit_with_pseudo_labeling(
scorer, train_cv, val_cv, estimator,
X_train, Y_train, classes, threshold)
logger.info(f'score: {score}')
logger.info(f'estimator: {estimator}')
# error sampling
if model_config.get('error_sampling'):
if self.configs['fit']['train_mode'] == 'reg':
logger.error('NOT IMPLEMENTED ERROR SAMPLING WITH REGRESSION')
raise Exception('NOT IMPLEMENTED')
score, estimator = self._fit_with_error_sampling(
scorer, train_cv, val_cv, estimator, X_train, Y_train, score)
logger.info(f'score: {score}')
logger.info(f'estimator: {estimator}')
return score, estimator
|
import cv2
import glob
'''Cut images into 256*256 将目标图片裁剪为256*256 像素'''
i = 0
def crop(img, outdir):
img = cv2.imread(img) # 读入图片
#Row1
cropped = img[0:256, 0:256] # 裁剪坐标为[y0:y1, x0:x1]
cv2.imwrite("../corpped_PNG/{}.png".format(i+11000), cropped) # 裁剪并存储在指定文件夹中
# 图片名110xx,第xx张照片的第1行第1列
# 例如11035,文件夹中第35张照片的剪裁出的第1行第1列
cropped = img[0:256, 256:512]
cv2.imwrite("../corpped_PNG/{}.png".format(i+12000), cropped)
cropped = img[0:256, 512:768]
cv2.imwrite("../corpped_PNG/{}.png".format(i + 13000), cropped)
cropped = img[0:256, 768:1024]
cv2.imwrite("../corpped_PNG/{}.png".format(i + 14000), cropped)
cropped = img[0:256, 1024:1280] # 是否有这一行要看照片尺寸是否够大
cv2.imwrite("../corpped_PNG/{}.png".format(i + 15000), cropped)
# Row2
cropped = img[256:512, 0:256]
cv2.imwrite("../corpped_PNG/{}.png".format(i+21000), cropped)
cropped = img[256:512, 256:512]
cv2.imwrite("../corpped_PNG/{}.png".format(i+22000), cropped)
cropped = img[256:512, 512:768]
cv2.imwrite("../corpped_PNG/{}.png".format(i + 23000), cropped)
cropped = img[256:512, 768:1024]
cv2.imwrite("../corpped_PNG/{}.png".format(i + 24000), cropped)
cropped = img[256:512, 1024:1280]# 是否有这一行要看照片尺寸是否够大
cv2.imwrite("../corpped_PNG/{}.png".format(i + 25000), cropped)
# Row3
cropped = img[512:768, 0:256]
cv2.imwrite("../corpped_PNG/{}.png".format(i+31000), cropped)
cropped = img[512:768, 256:512]
cv2.imwrite("../corpped_PNG/{}.png".format(i+32000), cropped)
cropped = img[512:768, 512:768]
cv2.imwrite("../corpped_PNG/{}.png".format(i + 33000), cropped)
cropped = img[512:768, 768:1024]
cv2.imwrite("../corpped_PNG/{}.png".format(i + 34000), cropped)
cropped = img[512:768, 1024:1280]# 是否有这一行要看照片尺寸是否够大
cv2.imwrite("../corpped_PNG/{}.png".format(i + 35000), cropped)
# Row4 是否有这一行要看照片尺寸是否够大
cropped = img[768:1024, 0:256]
cv2.imwrite("../corpped_PNG/{}.png".format(i+41000), cropped)
cropped = img[768:1024, 256:512]
cv2.imwrite("../corpped_PNG/{}.png".format(i+42000), cropped)
cropped = img[768:1024, 512:768]
cv2.imwrite("../corpped_PNG/{}.png".format(i + 43000), cropped)
cropped = img[768:1024, 768:1024]
cv2.imwrite("../corpped_PNG/{}.png".format(i + 44000), cropped)
cropped = img[768:1024, 1024:1280]
cv2.imwrite("../corpped_PNG/{}.png".format(i + 45000), cropped)
for img in glob.glob("Your Folder address/*.png"): # 对需要裁剪的图片的文件夹循环读取
crop(img, "Your Folder address/")
i = i + 1
|
#!/usr/bin/env python
# Copyright 2001 by Brad Chapman. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Test for graphics things that don't really deserve there own test module."""
import os
import random
import unittest
from Bio import MissingExternalDependencyError
try:
# Skip the test if reportlab is not installed
import reportlab as r
del r
except ImportError:
raise MissingExternalDependencyError(
"Install reportlab if you want to use Bio.Graphics."
) from None
# the stuff we're testing
from Bio.Graphics.Comparative import ComparativeScatterPlot
class ComparativeTest(unittest.TestCase):
"""Do tests for modules involved with comparing data."""
def setUp(self):
self.min_num_points = 1
self.max_num_points = 500
self.min_point_num = 0
self.max_point_num = 200
def _make_random_points(self, num_two_d_lists):
"""Make a bunch of random points for testing plots."""
plot_info = []
random.seed(num_two_d_lists) # for reproducibility
for two_d_list in range(num_two_d_lists):
cur_list = []
num_points = random.randrange(self.min_num_points, self.max_num_points)
for point in range(num_points):
x_point = random.randrange(self.min_point_num, self.max_point_num)
y_point = random.randrange(self.min_point_num, self.max_point_num)
cur_list.append((x_point, y_point))
plot_info.append(cur_list)
return plot_info
def test_simple_scatter_plot_1(self):
"""Test creation of a simple ScatterPlot with one list."""
compare_plot = ComparativeScatterPlot()
compare_plot.display_info = self._make_random_points(1)
output_file = os.path.join(os.getcwd(), "Graphics", "scatter_test_1.pdf")
compare_plot.draw_to_file(output_file, "Testing Scatter Plots")
def test_simple_scatter_plot_7(self):
"""Test creation of a simple ScatterPlot with more lists."""
compare_plot = ComparativeScatterPlot()
# There are 6 pre-defined colors and symbols, doing more:
compare_plot.display_info = self._make_random_points(7)
output_file = os.path.join(os.getcwd(), "Graphics", "scatter_test_7.pdf")
compare_plot.draw_to_file(output_file, "Testing Scatter Plots")
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
import scrapy
class PokemonSpider(scrapy.Spider):
name = "poke_spider"
start_urls = ['https://serebii.net/pokedex-sm/001.shtml']
def parse(self, response):
once_standard = True
once_alola = True
once_pre = True
file = open("pre_moves.json", "a+", encoding="utf-8")
for table in response.xpath('//*[@class="dextable"]'):
if (table.xpath('tr[1]/td/text()').get() == "Ultra Sun/Ultra Moon Level Up" or
table.xpath('tr[1]/td/text()').get() == "Generation VII Level Up" or
table.xpath('tr[1]/td/font/text()').get() == "Standard Level Up") and once_standard:
once_standard = False
file.write(response.xpath('//*[@class="dextab"]//b/text()').get() + " [")
for row in table.xpath('tr'):
if row.xpath('td[2]//text()').get() is not None and \
"Other" not in row.xpath('td[4]/img/@alt').get():
file.write("'" + row.xpath('td[2]/a/text()').get() + "' ")
file.write("]\n")
elif table.xpath('tr[1]/td/font/text()').get() == "Alola Form Level Up" and once_alola:
once_alola = False
file.write(response.xpath('//*[@class="dextab"]//b/text()').get() + " Alola [")
for row in table.xpath('tr'):
if row.xpath('td[2]//text()').get() is not None and \
"Other" not in row.xpath('td[4]/img/@alt').get():
file.write("'" + row.xpath('td[2]/a/text()').get() + "' ")
file.write("]\n")
elif table.xpath('tr[1]/td/text()').get() == "Pre-Evolution Only Moves" and once_pre:
once_pre = False
file.write(response.xpath('//*[@class="dextab"]//b/text()').get() + " Pre [")
for row in table.xpath('tr'):
if row.xpath('td[1]//text()').get() is not None and \
row.xpath('td[1]//@colspan').get() is None and \
"Other" not in row.xpath('td[3]/img/@alt').get():
file.write("'" + row.xpath('td[1]/a/text()').get() + "' ")
file.write("]\n")
navi_table = '//*[@align="right"]/table[@border="0"]//tr'
next_page = response.xpath(navi_table + '/td[2]/a/@href').get()
if "--->" in response.xpath(navi_table + '/td[3]/text()').get():
return scrapy.Request(
response.urljoin('https://serebii.net'+next_page),
callback=self.parse
)
file.close()
|
#!/usr/bin/env python
"""
Run this from the project root directory to perform tests.
There should be some music files in the music directory (source_dir) before
testing.
TODO: Everything.
"""
import os
import unittest
import shutil
import sys
import numpy as np
# I literally cannot believe this is how Python handles relative imports
sys.path.insert(0, './')
from lib.config import config as cf
# Set appropriate testing directories
test_target_dir = 'tests/data'
test_model_dir = 'tests/models'
test_output_dir = 'tests/output'
cf.data.target_dir = test_target_dir
cf.state.model_dir = test_model_dir
cf.output.output_dir = test_output_dir
# Now load other project modules - this is because we are setting config vars
# locally in, particularly, utils.py --- this should be fixed.
from lib.net import Net
import lib.utils as utils
# This all needs to be fleshed out a lot more; is a placeholder for now.
class ScBasicTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
test_dirs = [test_target_dir, test_model_dir, test_output_dir]
for test_dir in test_dirs:
test_files = os.listdir(test_dir)
for file in test_files:
if file[0] == '.':
continue
filepath = os.path.join(test_dir, file)
os.remove(filepath)
def test_01_convert_files(self):
# Convert source to wavs
utils.convert_source_dir()
# Convert wavs to freq timesteps
utils.convert_wavs_to_freqs()
def test__02_data_load(self):
z = utils.load_blocks()
w = utils.load_wavs()
testslice = max(len(z), 10)
t = z[:testslice]
u = utils.dechannel(t)
u = utils.com2pol(u)
# Test polar forms
self.assertGreaterEqual(u.T[0].min(), 0)
self.assertLessEqual(u.T[1].max(), 3.15)
self.assertGreaterEqual(u.T[1].min(), -3.15)
v = utils.pol2com(u)
v = utils.enchannel(v)
# Should be close to original
diff = t - v
self.assertLess(diff.max(), 1e-4)
def test_03_model(self):
n = Net(
Net.ALL,
training_epochs=25,
epochs_per_save=5,
epochs_per_archve=10,
save_m=True,
gen_steps=60,
kwargs={'hidden_size':32})
# Should not load - no files
self.assertFalse(n.load())
# Should train and gen output
n.train()
y = n.gen()
filepath = os.path.join(test_output_dir, 'test1.wav')
utils.write_output(filepath, y)
# Build new model and load weights
m = Net(
Net.ALL,
build=False,
gen_steps=60,
kwargs={'hidden_size':32})
self.assertTrue(m.load())
y = m.gen()
filepath = os.path.join(test_output_dir, 'test2.wav')
utils.write_output(filepath, y)
# Let us confirm our files were saved
files = filter(lambda e: e[0] != '.', os.listdir(test_output_dir))
self.assertEqual(len(files), 2)
if __name__ == '__main__':
unittest.main()
|
# Time complexity: O(n^2) where n is length of string
# Approach: Recursion with memoizing boolean values.
class Solution:
def __init__(self):
self.mp = {}
def isScramble(self, s1: str, s2: str) -> bool:
if len(s1)!=len(s2):
return False
n = len(s1)
if not n or s1==s2:
return True
if sorted(s1)!=sorted(s2):
return False
if s1+' '+s2 in self.mp:
return self.mp[s1+' '+s2]
fl = False
for i in range(1, n):
if self.isScramble(s1[:i], s2[:i]) and self.isScramble(s1[i:], s2[i:]):
fl = True
return True
if self.isScramble(s1[:i], s2[-i:]) and self.isScramble(s1[i:], s2[:-i]):
fl = True
return True
self.mp[s1+' '+s2] = fl
return self.mp[s1+' '+s2]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Correlation(nn.Module):
def __init__(self, max_displacement=4, *args, **kwargs):
super(Correlation, self).__init__()
self.max_displacement = max_displacement
self.output_dim = 2 * self.max_displacement + 1
self.pad_size = self.max_displacement
def forward(self, x1, x2):
B, C, H, W = x1.size()
x2 = F.pad(x2, [self.pad_size] * 4)
cv = []
for i in range(self.output_dim):
for j in range(self.output_dim):
cost = x1 * x2[:, :, i:(i + H), j:(j + W)]
cost = torch.mean(cost, 1, keepdim=True)
cv.append(cost)
return torch.cat(cv, 1)
if __name__ == '__main__':
import time
import random
from correlation_package.correlation import Correlation as Correlation_cuda
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
corr1 = Correlation(max_displacement=4, kernel_size=1, stride1=1,
stride2=1, corr_multiply=1).to(device)
corr2 = Correlation_cuda(pad_size=4, kernel_size=1, max_displacement=4, stride1=1,
stride2=1, corr_multiply=1)
t1_sum = 0
t2_sum = 0
for i in range(50):
C = random.choice([128, 256])
H = random.choice([128, 256]) # , 512
W = random.choice([64, 128]) # , 256
x1 = torch.randn(4, C, H, W, requires_grad=True).to(device)
x2 = torch.randn(4, C, H, W).to(device)
end = time.time()
y2 = corr2(x1, x2)
t2_f = time.time() - end
end = time.time()
y2.sum().backward()
t2_b = time.time() - end
end = time.time()
y1 = corr1(x1, x2)
t1_f = time.time() - end
end = time.time()
y1.sum().backward()
t1_b = time.time() - end
assert torch.allclose(y1, y2, atol=1e-7)
print('Forward: cuda: {:.3f}ms, pytorch: {:.3f}ms'.format(t1_f * 100, t2_f * 100))
print(
'Backward: cuda: {:.3f}ms, pytorch: {:.3f}ms'.format(t1_b * 100, t2_b * 100))
if i < 3:
continue
t1_sum += t1_b + t1_f
t2_sum += t2_b + t2_f
print('cuda: {:.3f}s, pytorch: {:.3f}s'.format(t1_sum, t2_sum))
...
|
from launch import LaunchDescription
from launch_ros.actions import Node
from launch.actions import DeclareLaunchArgument, SetEnvironmentVariable
from launch.substitutions import LaunchConfiguration, ThisLaunchFileDir
def generate_launch_description():
return LaunchDescription([
Node(
package='roomba_600_driver',
node_executable='tf_broadcast',
output='screen',
parameters=[config]),
])
|
# Generated by Django 3.0.8 on 2020-09-06 10:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0014_auto_20200906_0956'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='image',
field=models.FileField(blank=True, default='/home/zedway/zedway/media/dummy.png', upload_to='profile_pictures'),
),
]
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
class Thresholds():
def __init__(self,img):
self._img = img
def _abs_sobel_thresh(self, img, orient='x', sobel_kernel=3, thresh=(0, 255)):
# Calculate directional gradient
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the derivative in x or y given orient = 'x' or 'y'
if orient == 'x':
sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
else:
sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
# 3) Take the absolute value of the derivative or gradient
abs_sobel = np.absolute(sobel)
# 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# 5) Create a mask of 1's where the scaled gradient magnitude
# is > thresh_min and < thresh_max
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
# 6) Return this mask as your binary_output image
#binary_output = np.copy(img) # Remove this line
grad_binary = sxbinary
return grad_binary
def _mag_thresh(self, img, sobel_kernel=3, mag_thresh=(0, 255)):
# Calculate gradient magnitude
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Calculate the magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobel = np.uint8(255*gradmag/np.max(gradmag))
# 5) Create a binary mask where mag thresholds are met
mag_binary = np.zeros_like(scaled_sobel)
mag_binary[(scaled_sobel >= mag_thresh[0]) & (scaled_sobel <= mag_thresh[1])] = 1
return mag_binary
def _dir_threshold(self, img, sobel_kernel=3, thresh=(0, np.pi/2)):
# Calculate gradient direction
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Take the absolute value of the x and y gradients
abs_sobelx = np.absolute(sobelx)
abs_sobely= np.absolute(sobely)
# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
absgraddir = np.arctan2(abs_sobely, abs_sobelx)
# 5) Create a binary mask where direction thresholds are met
dir_binary = np.zeros_like(absgraddir)
dir_binary[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
# 6) Return this mask as your binary_output image
#binary_output = np.copy(img) # Remove this line
return dir_binary
# This function thresholds the S-channel of HLS
# Use exclusive lower bound (>) and inclusive upper (<=)
def _hls_select(self,img, thresh=(0, 255)):
# 1) Convert to HLS color space
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
# 2) Apply a threshold to the S channel
S = hls[:,:,2]
binary_output = np.zeros_like(S)
binary_output[(S > thresh[0]) & (S <= thresh[1])] = 1
# 3) Return a binary image of threshold result
#binary_output = np.copy(img) # placeholder line
return binary_output
# Edit this function to create your own pipeline.
def pipeline(self, s_thresh=(170, 255), sx_thresh=(20, 100)):
# Sobel x
sxbinary = self._abs_sobel_thresh(self._img, 'x', 3, sx_thresh)
# Threshold color channel
s_binary = self._hls_select(self._img, s_thresh)
# Stack each channel
color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) * 255
# Combine thresholds
combined = np.zeros_like(sxbinary)
combined[((s_binary == 1) | (sxbinary == 1))] = 1
#combined[((s_binary == 1) & (sxbinary == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
return combined
if __name__ == '__main__':
test = 5
image = mpimg.imread('..\\test_images\\test' +str(test)+'.jpg')
threshold = Thresholds(image)
result = threshold.pipeline()
# Plot the result
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(image)
ax1.set_title('Original Image', fontsize=40)
ax2.imshow(result)
ax2.set_title('Pipeline Result', fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.savefig('..\\output_images\\P2output_test' +str(test)+'.png')
|
#!/usr/bin/env python
"""
Models for info if needed
"""
class Product:
"""
A class for
ProductId – an integer uniquely identifying the Product
Name – a string name for the Product
Price – a floating point price at which the Product is sold (per unit, not per lot)
LotSize – an integer representing how many of the product are sold in a single lot
The numerical fields may be assumed to be positive.
The string fields are not quoted and may be assumed not
to contain commas or non-ASCII characters.
The file does not contain a header. An example file is as follows:
"""
def __init__(self, product_id, name, price, lot_size):
self.product_id = product_id
self.name = name
self.price = price
self.lot_size = lot_size
class Sales:
"""
The Sales file is a comma-separated text file where
each line contains information about a unique sale.
The fields of the file are as follows:
SaleId – an integer uniquely identifying the sale
ProductId – an integer identifying the Product (matches the ProductId from the Product Master)
TeamId – an integer identifying the Sales Team (matches the TeamId from the Team Map)
Quantity – an integer representing how many lots of the product were sold
All of the numerical fields may be assumed to be positive.
The file does not contain a header with the field names. An example file is as follows:
"""
def __init__(self, sale_id, product_id, team_id, quantity):
self.sale_id = sale_id
self.product_id = product_id
self.team_id = team_id
self.quantity = quantity
class Report:
"""
Product Report
The Product Report file is a comma-separated text file where each line summarizes the
sales of a single Product and contains four values as follows:
Name – name of the Product
GrossRevenue – gross revenue of sales of the Product
TotalUnits – total number of units sold in the Product
The file should contain a header with the field names, and the products
should be provided in descending order of their gross revenue.
"""
def __init__(self, name, gross_revenue, total_units):
self.name = name
self.gross_revenue = gross_revenue
self.total_units = total_units
|
'''
Copyright (c) 2015, Salesforce.com, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Salesforce.com nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from sqlalchemy.engine import create_engine
import config
configuration = config.Configuration()
def get_engine():
credentials_id = configuration.get(('postgresql','credential-identifier'))
server_name = configuration.get(('postgresql','server'))
database_name = configuration.get(('postgresql','database'))
cred_url_part = ""
if credentials_id is not None:
creds = config.credential_manager.get_or_create_credentials_for(credentials_id, "password")
cred_url_part = "%s:%s@" % (creds.username, creds.password)
connectionurl = 'postgresql://%s%s/%s' % (cred_url_part, server_name, database_name)
return create_engine(connectionurl)
engine = get_engine()
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.exc import IntegrityError
Session = sessionmaker(bind=engine)
Session.configure(bind=engine)
import copy
def get_one_or_create(session,
model,
create_method='',
create_method_kwargs=None,
**kwargs):
try:
return session.query(model).filter_by(**kwargs).one(), False
except NoResultFound:
kwargs.update(create_method_kwargs or {})
created = getattr(model, create_method, model)(**kwargs)
try:
session.add(created)
session.flush()
return created, True
except IntegrityError:
session.rollback()
return session.query(model).filter_by(**kwargs).one(), False
|
# empty init file
|
'''
Several testcases around <Transformer> and it's arguments.
'''
import unittest
import utils
def suite():
suite = unittest.TestSuite()
suite.addTest(TransformerEncodeTestCase())
suite.addTest(TransformerEncode2TestCase())
suite.addTest(TransformerEncode3TestCase())
return suite
class TransformerEncodeTestCase(utils.PeachTcpTestCase):
def runTest(self):
# Test
self.peachUtils.RunPeachXml("transformersEncode.xml")
ret = self.peachUtils.GetListenerData()
assert ret == 'MTIzNDU=', 'transformersEncode.xml failed, instead [%s]' % repr(ret)
class TransformerEncode2TestCase(utils.PeachTcpTestCase):
def runTest(self):
# Test
self.peachUtils.RunPeachXml("transformersEncode2.xml")
ret = self.peachUtils.GetListenerData()
assert ret == 'gnzLDuqKcGxMNKFokfhOew==', 'transformersEncode2.xml failed, instead [%s]' % repr(ret)
class TransformerEncode3TestCase(utils.PeachTcpTestCase):
def runTest(self):
# Test
self.peachUtils.RunPeachXml("transformersEncode3.xml")
ret = self.peachUtils.GetListenerData()
assert ret == '827ccb0eea8a706c4c34a16891f84e7b', 'transformersEncode3.xml failed, instead [%s]' % repr(ret)
if __name__ == "__main__":
unittest.main()
# end
|
import unittest
from visuanalytics.tests.analytics.transform.transform_test_helper import prepare_test
class TestTransformCalculate(unittest.TestCase):
def setUp(self):
self.data = {
"testvalue1": 5,
"testvalue2": 3.7,
"testarray1": [5, 4, 7, 1, 3, 6],
"testarray2": [9, 4, 12, 7.6, 1.75, 500],
"icon": ["und", "und", "wie", "viel", "wie", "wie", "wir"],
"array": [{"left": 4.5, "right": 2.7}, {"left": 2.7, "right": 8.5}, {"left": 1.8, "right": 3},
{"left": 3.3, "right": 3}]
}
def test_transform_calculate_multiply_value_right(self):
values = [
{
"type": "calculate",
"keys": [
"_req|testvalue1"
],
"action": "multiply",
"value_right": 3.6,
"new_keys": [
"_req|result"
]
}
]
expected_data = {
"_req": {
"testvalue1": 5,
"result": 18,
"testvalue2": 3.7,
"testarray1": [5, 4, 7, 1, 3, 6],
"testarray2": [9, 4, 12, 7.6, 1.75, 500],
"icon": ["und", "und", "wie", "viel", "wie", "wie", "wir"],
"array": [{"left": 4.5, "right": 2.7}, {"left": 2.7, "right": 8.5}, {"left": 1.8, "right": 3},
{"left": 3.3, "right": 3}]
}
}
exp, out = prepare_test(values, self.data, expected_data)
self.assertDictEqual(exp, out, "calculate multiply value right Failed")
def test_transform_calculate_multiply_data_value_right(self):
values = [
{
"type": "calculate",
"keys": [
"_req|testvalue1"
],
"action": "multiply",
"value_right": "_req|testvalue2",
"new_keys": [
"_req|result"
]
}
]
expected_data = {
"_req": {
"testvalue1": 5,
"result": 18.5,
"testvalue2": 3.7,
"testarray1": [5, 4, 7, 1, 3, 6],
"testarray2": [9, 4, 12, 7.6, 1.75, 500],
"icon": ["und", "und", "wie", "viel", "wie", "wie", "wir"],
"array": [{"left": 4.5, "right": 2.7}, {"left": 2.7, "right": 8.5}, {"left": 1.8, "right": 3},
{"left": 3.3, "right": 3}]
}
}
exp, out = prepare_test(values, self.data, expected_data)
self.assertDictEqual(exp, out, "calculate multiply data value right Failed")
def test_transform_calculate_multiply_value_left(self):
values = [
{
"type": "calculate",
"keys": [
"_req|testvalue1"
],
"action": "multiply",
"value_left": 3.6,
"new_keys": [
"_req|result"
]
}
]
expected_data = {
"_req": {
"testvalue1": 5,
"result": 18,
"testvalue2": 3.7,
"testarray1": [5, 4, 7, 1, 3, 6],
"testarray2": [9, 4, 12, 7.6, 1.75, 500],
"icon": ["und", "und", "wie", "viel", "wie", "wie", "wir"],
"array": [{"left": 4.5, "right": 2.7}, {"left": 2.7, "right": 8.5}, {"left": 1.8, "right": 3},
{"left": 3.3, "right": 3}]
}
}
exp, out = prepare_test(values, self.data, expected_data)
self.assertDictEqual(exp, out, "calculate multiply value Failed")
def test_transform_calculate_multiply_data_value_left(self):
values = [
{
"type": "calculate",
"keys": [
"_req|testvalue1"
],
"action": "multiply",
"value_left": "_req|testvalue2",
"new_keys": [
"_req|result"
]
}
]
expected_data = {
"_req": {
"testvalue1": 5,
"result": 18.5,
"testvalue2": 3.7,
"testarray1": [5, 4, 7, 1, 3, 6],
"testarray2": [9, 4, 12, 7.6, 1.75, 500],
"icon": ["und", "und", "wie", "viel", "wie", "wie", "wir"],
"array": [{"left": 4.5, "right": 2.7}, {"left": 2.7, "right": 8.5}, {"left": 1.8, "right": 3},
{"left": 3.3, "right": 3}]
}
}
exp, out = prepare_test(values, self.data, expected_data)
self.assertDictEqual(exp, out, "calculate multiply data value Failed")
def test_transform_calculate_multiply_array_value_right(self):
values = [
{
"type": "transform_array",
"array_key": "_req|array",
"transform": [
{
"type": "calculate",
"keys": [
"_loop|left"
],
"action": "multiply",
"value_right": "_loop|right",
"new_keys": [
"_req|result|{_idx}|result"
],
"decimal": 2
}
]
}
]
expected_data = {
"_req": {
"testvalue1": 5,
"testvalue2": 3.7,
"testarray1": [5, 4, 7, 1, 3, 6],
"testarray2": [9, 4, 12, 7.6, 1.75, 500],
"icon": ["und", "und", "wie", "viel", "wie", "wie", "wir"],
"array": [{"left": 4.5, "right": 2.7}, {"left": 2.7, "right": 8.5}, {"left": 1.8, "right": 3},
{"left": 3.3, "right": 3}],
"result": {0: {"result": 12.15}, 1: {"result": 22.95}, 2: {"result": 5.4}, 3: {"result": 9.9}}
}
}
def test_transform_calculate_multiply_array_keys_right(self):
values = [
{
"type": "transform_array",
"array_key": "_req|array",
"transform": [
{
"type": "calculate",
"keys": [
"_loop|left"
],
"action": "multiply",
"keys_right": ["_loop|right"],
"new_keys": [
"_req|result|{_idx}|result"
],
"decimal": 2
}
]
}
]
expected_data = {
"_req": {
"testvalue1": 5,
"testvalue2": 3.7,
"testarray1": [5, 4, 7, 1, 3, 6],
"testarray2": [9, 4, 12, 7.6, 1.75, 500],
"icon": ["und", "und", "wie", "viel", "wie", "wie", "wir"],
"array": [{"left": 4.5, "right": 2.7}, {"left": 2.7, "right": 8.5}, {"left": 1.8, "right": 3},
{"left": 3.3, "right": 3}],
"result": {0: {"result": 12.15}, 1: {"result": 22.95}, 2: {"result": 5.4}, 3: {"result": 9.9}}
}
}
exp, out = prepare_test(values, self.data, expected_data)
self.assertDictEqual(exp, out, "calculate multiply array keys right Failed")
def test_transform_calculate_multiply_array_value_left(self):
values = [
{
"type": "transform_array",
"array_key": "_req|array",
"transform": [
{
"type": "calculate",
"keys": [
"_loop|left"
],
"action": "multiply",
"value_left": "_loop|right",
"new_keys": [
"_req|result|{_idx}|result"
],
"decimal": 2
}
]
}
]
expected_data = {
"_req": {
"testvalue1": 5,
"testvalue2": 3.7,
"testarray1": [5, 4, 7, 1, 3, 6],
"testarray2": [9, 4, 12, 7.6, 1.75, 500],
"icon": ["und", "und", "wie", "viel", "wie", "wie", "wir"],
"array": [{"left": 4.5, "right": 2.7}, {"left": 2.7, "right": 8.5}, {"left": 1.8, "right": 3},
{"left": 3.3, "right": 3}],
"result": {0: {"result": 12.15}, 1: {"result": 22.95}, 2: {"result": 5.4}, 3: {"result": 9.9}}
}
}
exp, out = prepare_test(values, self.data, expected_data)
self.assertDictEqual(exp, out, "calculate multiply array value left Failed")
|
###
# Copyright 2008-2011 Diamond Light Source Ltd.
# This file is part of Diffcalc.
#
# Diffcalc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Diffcalc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diffcalc. If not, see <http://www.gnu.org/licenses/>.
###
import math
from math import degrees
from unittest.mock import Mock
from diffcalc.hkl.calc import HklCalculation
from diffcalc.hkl.geometry import Position
from diffcalc.ub.calc import UBCalculation
from diffcalc.util import I
from numpy import array
from tests.tools import assert_almost_equal, assert_matrix_almost_equal
x = array([[1], [0], [0]])
y = array([[0], [1], [0]])
z = array([[0], [0], [1]])
def isnan(n):
# math.isnan was introduced only in python 2.6 and is not in Jython (2.5.2)
try:
return math.isnan(n)
except AttributeError:
return n != n # for Jython
class Test_position_to_virtual_angles:
def setup_method(self):
constraints = Mock()
constraints.is_fully_constrained.return_value = True
self.ubcalc = UBCalculation()
self.ubcalc.set_lattice("xtal", 1)
self.ubcalc.set_u(I)
self.ubcalc.n_phi = (0, 0, 1)
self.calc = HklCalculation(self.ubcalc, constraints)
def check_angle(
self, name, expected, mu=-99, delta=99, nu=99, eta=99, chi=99, phi=99
):
"""All in degrees"""
pos = Position(mu=mu, delta=delta, nu=nu, eta=eta, chi=chi, phi=phi)
calculated = self.calc.get_virtual_angles(pos, False)[name]
assert_almost_equal(degrees(calculated), expected)
# theta
def test_theta0(self):
self.check_angle("theta", 0, delta=0, nu=0)
def test_theta1(self):
self.check_angle("theta", 1, delta=2, nu=0)
def test_theta2(self):
self.check_angle("theta", 1, delta=0, nu=2)
def test_theta3(self):
self.check_angle("theta", 1, delta=-2, nu=0)
def test_theta4(self):
self.check_angle("theta", 1, delta=0, nu=-2)
# qaz
def test_qaz0_degenerate_case(self):
self.check_angle("qaz", 0, delta=0, nu=0)
def test_qaz1(self):
self.check_angle("qaz", 90, delta=2, nu=0)
def test_qaz2(self):
self.check_angle("qaz", 90, delta=90, nu=0)
def test_qaz3(self):
self.check_angle(
"qaz",
0,
delta=0,
nu=1,
)
# Can't see one by eye
# def test_qaz4(self):
# pos = Pos(delta=20*TORAD, nu=20*TORAD)#.inRadians()
# assert_almost_equal(
# self.calc._anglesToVirtualAngles(pos, None)['qaz']*TODEG, 45)
# alpha
def test_defaultReferenceValue(self):
# The following tests depemd on this
assert_matrix_almost_equal(self.calc.ubcalc.n_phi, array([[0], [0], [1]]))
def test_alpha0(self):
self.check_angle("alpha", 0, mu=0, eta=0, chi=0, phi=0)
def test_alpha1(self):
self.check_angle("alpha", 0, mu=0, eta=0, chi=0, phi=10)
def test_alpha2(self):
self.check_angle("alpha", 0, mu=0, eta=0, chi=0, phi=-10)
def test_alpha3(self):
self.check_angle("alpha", 2, mu=2, eta=0, chi=0, phi=0)
def test_alpha4(self):
self.check_angle("alpha", -2, mu=-2, eta=0, chi=0, phi=0)
def test_alpha5(self):
self.check_angle("alpha", 2, mu=0, eta=90, chi=2, phi=0)
# beta
def test_beta0(self):
self.check_angle("beta", 0, delta=0, nu=0, mu=0, eta=0, chi=0, phi=0)
def test_beta1(self):
self.check_angle("beta", 0, delta=10, nu=0, mu=0, eta=6, chi=0, phi=5)
def test_beta2(self):
self.check_angle("beta", 10, delta=0, nu=10, mu=0, eta=0, chi=0, phi=0)
def test_beta3(self):
self.check_angle("beta", -10, delta=0, nu=-10, mu=0, eta=0, chi=0, phi=0)
def test_beta4(self):
self.check_angle("beta", 5, delta=0, nu=10, mu=5, eta=0, chi=0, phi=0)
# azimuth
def test_naz0(self):
self.check_angle("naz", 0, mu=0, eta=0, chi=0, phi=0)
def test_naz1(self):
self.check_angle("naz", 0, mu=0, eta=0, chi=0, phi=10)
def test_naz3(self):
self.check_angle("naz", 0, mu=10, eta=0, chi=0, phi=10)
def test_naz4(self):
self.check_angle("naz", 2, mu=0, eta=0, chi=2, phi=0)
def test_naz5(self):
self.check_angle("naz", -2, mu=0, eta=0, chi=-2, phi=0)
# tau
def test_tau0(self):
self.check_angle("tau", 0, mu=0, delta=0, nu=0, eta=0, chi=0, phi=0)
# self.check_angle('tau_from_dot_product', 90, mu=0, delta=0,
# nu=0, eta=0, chi=0, phi=0)
def test_tau1(self):
self.check_angle("tau", 90, mu=0, delta=20, nu=0, eta=10, chi=0, phi=0)
# self.check_angle('tau_from_dot_product', 90, mu=0, delta=20,
# nu=0, eta=10, chi=0, phi=0)
def test_tau2(self):
self.check_angle("tau", 90, mu=0, delta=20, nu=0, eta=10, chi=0, phi=3)
# self.check_angle('tau_from_dot_product', 90, mu=0, delta=20,
# nu=0, eta=10, chi=0, phi=3)
def test_tau3(self):
self.check_angle("tau", 88, mu=0, delta=20, nu=0, eta=10, chi=2, phi=0)
# self.check_angle('tau_from_dot_product', 88, mu=0, delta=20,
# nu=0, eta=10, chi=2, phi=0)
def test_tau4(self):
self.check_angle("tau", 92, mu=0, delta=20, nu=0, eta=10, chi=-2, phi=0)
# self.check_angle('tau_from_dot_product', 92, mu=0, delta=20,
# nu=0, eta=10, chi=-2, phi=0)
def test_tau5(self):
self.check_angle("tau", 10, mu=0, delta=0, nu=20, eta=0, chi=0, phi=0)
# self.check_angle('tau_from_dot_product', 10, mu=0, delta=0,
# nu=20, eta=0, chi=0, phi=0)
# psi
def test_psi0(self):
pos = Position()
assert isnan(self.calc.get_virtual_angles(pos)["psi"])
def test_psi1(self):
self.check_angle("psi", 90, mu=0, delta=11, nu=0, eta=0, chi=0, phi=0)
def test_psi2(self):
self.check_angle("psi", 100, mu=10, delta=0.001, nu=0, eta=0, chi=0, phi=0)
def test_psi3(self):
self.check_angle("psi", 80, mu=-10, delta=0.001, nu=0, eta=0, chi=0, phi=0)
def test_psi4(self):
self.check_angle("psi", 90, mu=0, delta=11, nu=0, eta=0, chi=0, phi=12.3)
def test_psi5(self):
# self.check_angle('psi', 0, mu=10, delta=.00000001,
# nu=0, eta=0, chi=90, phi=0)
pos = Position(mu=0, delta=0, nu=0, eta=0, chi=90, phi=0)
assert isnan(self.calc.get_virtual_angles(pos)["psi"])
def test_psi6(self):
self.check_angle("psi", 90, mu=0, delta=0.001, nu=0, eta=90, chi=0, phi=0)
def test_psi7(self):
self.check_angle("psi", 92, mu=0, delta=0.001, nu=0, eta=90, chi=2, phi=0)
def test_psi8(self):
self.check_angle("psi", 88, mu=0, delta=0.001, nu=0, eta=90, chi=-2, phi=0)
|
import sys, os
from faster.fastlist import fastlist
import cProfile
from pstats import SortKey
l = fastlist.from_sequence(list(range(10)))
l.insert(0, 4)
print(l)
print(l.count(5))
print(l)
del l
# mult = 2 ** 24
# length = 15
# print('mult:', mult)
# print('size:', mult * length)
# print('\nfastlist:')
# cProfile.runctx('l *= m', {'l': fastlist.from_sequence(list(range(length))), 'm': mult}, {})
# print('\nlist:')
# cProfile.runctx('l *= m', {'l': list(range(15)), 'm': mult}, {})
print('no segfault')
|
from CTFe.views.auth_view import router as auth_router
from CTFe.views.user_view import router as user_router
from CTFe.views.team_view import router as team_router
from CTFe.views.challenge_view import router as challenge_router
from CTFe.views.attempt_view import router as attempt_router
from CTFe.views.player_view import router as player_router
from CTFe.views.contributor_view import router as contributor_router
|
"""
Unit tests for the User Controller.
"""
from datetime import datetime
from unittest.mock import patch
from app.controllers.user_controller import UserController
from app.models import User, Role
from app.models.user_role import UserRole
from app.repositories import UserRepo, RoleRepo, UserRoleRepo
from tests.base_test_case import BaseTestCase
from factories.user_factory import UserFactory
from factories import RoleFactory, UserRoleFactory, PermissionFactory
from factories.location_factory import LocationFactory
from app.utils.auth import Auth
class TestUserController(BaseTestCase):
"""
UserController test class.
"""
def setUp(self):
self.BaseSetUp()
self.mock_role = Role(
id=1,
name="Pass",
help="help",
)
self.mock_user_role = UserRole(
id=1,
created_at=datetime.now(),
updated_at=datetime.now(),
role_id=1,
role=self.mock_role,
user_id=1,
is_active=True,
is_deleted=False,
)
self.mock_user = User(
id=1,
first_name="test",
last_name="test",
gender="male",
password="test",
email="user1@user.com",
is_active=True,
is_deleted=False,
created_at=datetime.now(),
updated_at=datetime.now(),
)
self.mock_user2 = User(
id=1,
first_name="test",
last_name="test",
gender="male",
password="test",
email="user2@user.com",
is_active=True,
is_deleted=False,
created_at=datetime.now(),
updated_at=datetime.now(),
)
def tearDown(self):
self.BaseTearDown()
@patch.object(UserController, "pagination_meta")
@patch("app.repositories.user_role_repo.UserRoleRepo.filter_by")
@patch.object(UserRepo, "find_first")
def test_list_admin_users_ok_response(
self, mock_user_repo_find_first, mock_filter_by, mock_pagination_meta
):
"""
Test list_admin_users OK response.
"""
# Arrange
with self.app.app_context():
mock_filter_by.return_value.items = [
self.mock_user_role,
]
mock_user_repo_find_first.return_value = self.mock_user
mock_pagination_meta.return_value = {
"total_rows": 1,
"total_pages": 1,
"current_page": 1,
"next_page": None,
"prev_page": None,
}
user_controller = UserController(self.request_context)
# Act
result = user_controller.list_admin_users()
# Assert
assert result.status_code == 200
assert result.get_json()["msg"] == "OK"
assert result.get_json()["payload"]["meta"]["current_page"] == 1
assert result.get_json()["payload"]["meta"]["next_page"] is None
@patch.object(Auth, "get_location")
@patch.object(UserController, "request_params")
@patch.object(RoleRepo, "find_first")
@patch.object(UserRepo, "exists")
@patch.object(UserRepo, "new_user")
@patch.object(UserRoleRepo, "new_user_role")
def test_create_user_succeeds(
self,
mock_user_role_repo_new_user_role,
mock_user_repo_new_user,
mock_user_repo_exists,
mock_role_repo_find_first,
mock_request_params,
mock_get_location,
):
location = LocationFactory()
role = RoleFactory(name="test_role")
with self.app.app_context():
mock_get_location.return_value = location.id
mock_role_repo_find_first.return_value = self.mock_role
mock_user_repo_exists.return_value = None
mock_user_repo_new_user.return_value = self.mock_user2
mock_user_role_repo_new_user_role.return_value = self.mock_user_role
mock_request_params.return_value = [
"Joseph",
"Serunjogi",
"tst@tst.com",
role.id,
"male",
str(datetime.now()),
1,
"password",
]
user_controller = UserController(self.request_context)
# Act
result = user_controller.create_user()
# Assert
assert result.status_code == 201
assert result.get_json()["msg"] == "OK"
@patch.object(UserController, "request_params")
def test_create_user_method_handles_user_creation_with_non_existent_role_id(
self, mock_request_params
):
with self.app.app_context():
user = UserFactory()
role = RoleFactory(name="test_role")
UserRoleFactory(role_id=role.id, user_id=user.id)
non_existent_role_id = 100
mock_request_params.return_value = [
"Joseph",
"Serunjogi",
"tst@tst.com",
non_existent_role_id,
"male",
str(datetime.now()),
1,
"password",
]
user_controller = UserController(self.request_context)
response = user_controller.create_user()
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.get_json()["msg"],
"Role with userTypeId(roleId) {} does not exist".format(
non_existent_role_id
),
)
@patch.object(UserRepo, "find_first")
def test_list_user_succeeds(
self,
mock_user_repo_find_first,
):
with self.app.app_context():
role = RoleFactory()
UserRoleFactory(user_id=self.mock_user.id, role_id=role.id)
PermissionFactory.create(keyword="view_users", role=role)
mock_user_repo_find_first.return_value = self.mock_user
user_controller = UserController(self.request_context)
response = user_controller.list_user(id=self.mock_user.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get_json()["msg"], "OK")
self.assertEqual(
response.get_json()["payload"]["user"]["first_name"],
self.mock_user.first_name,
)
self.assertEqual(
response.get_json()["payload"]["user"]["last_name"],
self.mock_user.last_name,
)
@patch.object(UserRepo, "find_first")
def test_list_user_when_user_found_succeeds(
self,
mock_user_repo_find_first,
):
with self.app.app_context():
user_controller = UserController(self.request_context)
mock_user_repo_find_first.return_value = self.mock_user
response = user_controller.list_user(id=1)
self.assertEqual(response.status_code, 200)
@patch.object(Auth, "get_location")
@patch.object(UserController, "request_params")
@patch.object(RoleRepo, "find_first")
@patch.object(UserRepo, "exists")
# @patch.object(UserRepo, "new_user")
@patch.object(UserRoleRepo, "new_user_role")
def test_create_user_fails_for_existing_user(
self,
mock_user_role_repo_new_user_role,
# mock_user_repo_new_user,
mock_user_repo_exists,
mock_role_repo_find_first,
mock_request_params,
mock_get_location,
):
location = LocationFactory()
role = RoleFactory(name="test_role")
with self.app.app_context():
mock_get_location.return_value = location.id
mock_role_repo_find_first.return_value = self.mock_role
mock_user_repo_exists.return_value = self.mock_user2
# mock_user_repo_new_user.return_value = None
mock_user_role_repo_new_user_role.return_value = self.mock_user_role
mock_request_params.return_value = [
"Joseph",
"Serunjogi",
self.mock_user2.email,
role.id,
"male",
str(datetime.now()),
1,
"password",
]
user_controller = UserController(self.request_context)
# Act
result = user_controller.create_user()
print(result)
print(result.get_json())
# Assert
assert result.status_code == 400
assert (
result.get_json()["msg"]
== f"User with email '{self.mock_user2.email}' already exists"
)
|
from django import forms
class ContactForm(forms.Form):
""" Create form for contact page """
email = forms.EmailField(
required=True,
label='',
widget=forms.EmailInput(attrs={
'class': 'form-control border-black rounded-0 mb-3',
'placeholder': 'Email',
})
)
message = forms.CharField(
required=True,
label='',
widget=forms.Textarea(attrs={
'rows': 5,
'class': 'form-control border-black rounded-0 mb-3',
'placeholder': 'Type your message here...',
})
)
|
def select_rows(df, where):
"""Performs a series of rows selection in a DataFrame
Pandas provides several methods to select rows.
Using lambdas allows to select rows in a uniform and
more flexible way.
Parameters
----------
df: DataFrame
DataFrame whose rows should be selected
where: dict
Dictionary with DataFrame columns name as keys
and predicates (as lambdas) as values.
For instance: {'a': lambda d: d == 1, 'b': lambda d: d == 3}
Returns
-------
Pandas DataFrame
New DataFrame with selected rows
"""
df = df.copy()
for col, f in where.items():
df = df[df[col].apply(f)]
return df
def chunk(len_array, nb_chunks=3):
"""Chunks an array in a list of several equal (when odd) length chunks
Parameters
----------
len_array: int
Length of the array to be chunked
nb_chunks: int
Number of chunks
Returns
-------
Iterator
e.g list(chunk(10, 3)) would return [(0, 3), (3, 6), (6, 10)]
"""
assert nb_chunks <= len_array, "nb_chunks should be lower or equal than len_array"
step = len_array // nb_chunks
bounds = [x*step for x in range(nb_chunks)] + [len_array]
return zip(bounds, bounds[1:])
|
def fibonacci(n):
if n <= 1:
return 1
else:
return fibonacci(n-2) + fibonacci(n-1)
def factorial(n):
if n== 0:
return 1
else:
return n*factorial(n-1)
|
import sys
import math
import scipy as sp
import scipy.stats
from scipy.stats import norm
from scipy.stats import poisson, binom, hypergeom
from permute.utils import binom_conf_interval, hypergeom_conf_interval
from numpy.random import choice
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sprt
from joblib import Parallel, delayed
import multiprocessing
import csv
num_cores = multiprocessing.cpu_count()
if len(sys.argv) != 2:
print('usage: python plot_data.py [data_file].csv')
sys.exit()
def main():
data = {}
for line in csv.DictReader(open(sys.argv[1])):
alpha = float(line['alpha'])
prop_w = float(line['prop_w'])
total = int(line['total'])
to_set = {
'bpa_75': float(line['bpa_75']),
'bpa_90': float(line['bpa_90']),
'bpa_99': float(line['bpa_99']),
'bbp_75': float(line['bbp_75']),
'bbp_90': float(line['bbp_90']),
'bbp_99': float(line['bbp_99']),
'bbp_seq_75': float(line['bbp_seq_75']),
'bbp_seq_90': float(line['bbp_seq_90']),
'bbp_seq_99': float(line['bbp_seq_99']),
}
if alpha in data:
if prop_w in data[alpha]:
data[alpha][prop_w][total] = to_set
else:
data[alpha][prop_w] = {total: to_set}
else:
data[alpha] = {prop_w: {total: to_set}}
if data[alpha][prop_w][total]['bbp_90'] == None:
data[alpha][prop_w][total]['bbp_90'] = .01
prop_ws = []
for i in range(1, 50):
prop_ws.append((100-i)/100.0)
prop_ws.reverse()
cols = ['Alpha {}'.format(col) for col in sorted(data.keys(), reverse=True)]
rows = ['Quant {}'.format(row) for row in ['75', '90', '99']]
fig, axes = plt.subplots(nrows=3, ncols=3)
i = 1
tots = [10000, 1000000]
for quant in ['75', '90', '99']:
#ax1 = plt.subplot(5, 3, 0)
for alpha in data:
bpa = []
bbp = []
bbp_seq = []
bpa_1m = []
bbp_1m = []
bbp_seq_1m = []
for prop_w in prop_ws:
bpa.append(data[alpha][prop_w][10000]['bpa_' + quant])
bbp.append(data[alpha][prop_w][10000]['bbp_' + quant])
bbp_seq.append(data[alpha][prop_w][10000]['bbp_seq_' + quant])
bpa_1m.append(data[alpha][prop_w][1000000]['bpa_' + quant])
bbp_1m.append(data[alpha][prop_w][1000000]['bbp_' + quant])
bbp_seq_1m.append(data[alpha][prop_w][1000000]['bbp_seq_' + quant])
ax1 = plt.subplot(3, 3, i)
ax1.invert_xaxis()
#plt.title('Quant: ' + str(quant) + ' Alpha: ' + str(alpha), fontsize=12)
l1 = ax1.semilogy(prop_ws, bpa, label='BPA10k')
l2 = ax1.semilogy(prop_ws, bbp, label='BBP10k')
l3 = ax1.semilogy(prop_ws, bbp_seq, label='BBPSEQ10k')
l3 = ax1.semilogy(prop_ws, bpa_1m, label='BPA1M')
l4 = ax1.semilogy(prop_ws, bbp_1m, label='BBP1M')
l4 = ax1.semilogy(prop_ws, bbp_seq_1m, label='BBPSEQ1M')
if i%3 != 0:
ax1.set_yticklabels([])
else:
ax1.yaxis.tick_right()
if i%3 == 1:
ax1.set_ylabel(rows[i/3], rotation=90)
if i < 7:
ax1.set_xticklabels([])
if i < 4:
ax1.set_title(cols[i-1])
if i == 8:
ax1.set_xlabel('Margin', size='large')
i += 1
fig.text(0.04, 0.5, '% ballots', va='center', rotation='vertical', size='large')
handles, labels = ax1.get_legend_handles_labels()
art = []
lgd = ax1.legend(handles, labels, loc=9, ncol=6, fontsize=10, bbox_to_anchor=(-.75, -.35))
art.append(lgd)
plt.savefig('quant_plot.png', additional_artists=art, bbox_inches="tight")
if __name__== "__main__":
main()
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyMpi4py(PythonPackage):
"""This package provides Python bindings for the Message Passing
Interface (MPI) standard. It is implemented on top of the
MPI-1/MPI-2 specification and exposes an API which grounds on the
standard MPI-2 C++ bindings.
"""
homepage = "https://pypi.python.org/pypi/mpi4py"
url = "https://pypi.io/packages/source/m/mpi4py/mpi4py-3.0.0.tar.gz"
git = "https://github.com/mpi4py/mpi4py.git"
version('develop', branch='master')
version('3.0.1', sha256='6549a5b81931303baf6600fa2e3bc04d8bd1d5c82f3c21379d0d64a9abcca851')
version('3.0.0', sha256='b457b02d85bdd9a4775a097fac5234a20397b43e073f14d9e29b6cd78c68efd7')
version('2.0.0', sha256='6543a05851a7aa1e6d165e673d422ba24e45c41e4221f0993fe1e5924a00cb81')
version('1.3.1', sha256='e7bd2044aaac5a6ea87a87b2ecc73b310bb6efe5026031e33067ea3c2efc3507')
depends_on('python@2.7:2.8,3.3:')
depends_on('py-setuptools', type='build')
depends_on('mpi')
depends_on('py-cython', when='@develop', type='build')
def build_args(self, spec, prefix):
return ['--mpicc=%s -shared' % spec['mpi'].mpicc]
|
from core.advbase import *
import adv.g_cleo
def module():
return Gala_Cleo
class Gala_Cleo(adv.g_cleo.Gala_Cleo):
def prerun(self):
super().prerun()
self.a1_zones = []
self.gleo_count = 4
self.wide = 'self'
self.comment = '{} GCleo'.format(self.gleo_count)
def fs_proc_alt(self, e):
if self.a1_buffed:
while len(self.a1_zones) > 0 and not self.a1_zones[0].get():
self.a1_zones.pop(0)
for _ in range(self.gleo_count):
if self.wide == 'team':
buff = Teambuff('a1_str',0.25,10)
else:
buff = Selfbuff('a1_str',0.25,10)
buff.bufftime = buff.nobufftime
self.a1_zones.append(buff)
if len(self.a1_zones) > 4:
self.a1_zones.pop(0).off()
buff.on()
def s2_proc(self, e):
for _ in range(self.gleo_count):
if self.wide == 'team':
Debuff('s2', 0.10, 20).on()
else:
Selfbuff('s2', -0.10, 20, mtype='def').on()
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(Gala_Cleo, *sys.argv)
|
#!/usr/bin/python3
# This program is the basic form of the sumobot program
# 1. Connect sensors and initialise motors
# 2. Wait 3 seconds before starting
from time import sleep
import sys, os
from ev3dev.ev3 import *
#Connect motors
rightMotor = LargeMotor(OUTPUT_B)
leftMotor = LargeMotor(OUTPUT_C)
# Connect touch sensors.
ts1 = TouchSensor(INPUT_1); assert ts1.connected
ts4 = TouchSensor(INPUT_4); assert ts4.connected
us = UltrasonicSensor(); assert us.connected
gs = GyroSensor(); assert gs.connected
gs.mode = 'GYRO-RATE' # Changing the mode resets the gyro
gs.mode = 'GYRO-ANG' # Set gyro mode to return compass angle
# We will need to check EV3 buttons state.
btn = Button()
def start()
#delay
sleep(3);
#scan for the robot by spinning CLOCKWISE for 90 DEGREES
direction = gs.value();
while (direction < 90):
rightMotor.run_direct(duty_cycle_sp = -75)
leftMotor.run_direct(duty_cycle_sp = 75)
direction = gs.value();
start()
while not btn.any():
# Keep the robot going in the same direction
direction = gs.value();
# print direction
if direction > 5:
# print('right')
rightMotor.duty_cycle_sp = 5
elif direction < -5:
# print('left')
leftMotor.duty_cycle_sp = 5
else:
leftMotor.duty_cycle_sp = 75
rightMotor.duty_cycle_sp = 75
rightMotor.stop()
leftMotor.stop()
|
"""
MIT License
Copyright (c) 2021 Visperi
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from discord.ext import commands
class HelpCog(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@staticmethod
async def send_help(ctx: commands.Context, invoked_with: str, help_dict: dict):
description = help_dict["description"]
additional = help_dict["additional"] or "-"
example = help_dict["example"] or "-"
command_help = f"**{invoked_with}**\n" \
f"{description}\n\n" \
f"**Additional:** {additional}\n" \
f"**Example:** {example}"
await ctx.send(command_help)
@commands.group(name="help")
async def command_help(self, ctx: commands.Context):
message_content = ctx.message.content
if ctx.invoked_subcommand is None and message_content != "!help":
await ctx.send("No help found for such command.")
return
elif message_content == "!help":
await ctx.send("`!info`: Basic info about the bot and latest updates\n"
"`!commands`: Get a list of all available commands\n"
"`!scommands`: Get a list of all custom commands for this server\n"
"`!help <command name>`: Get instructions for one command")
@command_help.command(name="me")
async def get_user_info(self, ctx: commands.Context):
help_dict = {"description": "Fetch some data related to the command invoker and represent it in a nice "
"embed.",
"additional": None,
"example": "`!me`"}
await self.send_help(ctx, ctx.invoked_with, help_dict)
@command_help.command(name="info")
async def get_bot_info(self, ctx: commands.Context):
help_dict = {"description": "Get some basic information and latest updates related to this bot.",
"additional": None,
"example": "`!info`"}
await self.send_help(ctx, ctx.invoked_with, help_dict)
def setup(bot: commands.Bot):
bot.add_cog(HelpCog(bot))
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/proto/spanner/v1/query_plan.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/cloud/proto/spanner/v1/query_plan.proto',
package='google.spanner.v1',
syntax='proto3',
serialized_pb=_b('\n.google/cloud/proto/spanner/v1/query_plan.proto\x12\x11google.spanner.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xf8\x04\n\x08PlanNode\x12\r\n\x05index\x18\x01 \x01(\x05\x12.\n\x04kind\x18\x02 \x01(\x0e\x32 .google.spanner.v1.PlanNode.Kind\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\x12:\n\x0b\x63hild_links\x18\x04 \x03(\x0b\x32%.google.spanner.v1.PlanNode.ChildLink\x12M\n\x14short_representation\x18\x05 \x01(\x0b\x32/.google.spanner.v1.PlanNode.ShortRepresentation\x12)\n\x08metadata\x18\x06 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x30\n\x0f\x65xecution_stats\x18\x07 \x01(\x0b\x32\x17.google.protobuf.Struct\x1a@\n\tChildLink\x12\x13\n\x0b\x63hild_index\x18\x01 \x01(\x05\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x10\n\x08variable\x18\x03 \x01(\t\x1a\xb2\x01\n\x13ShortRepresentation\x12\x13\n\x0b\x64\x65scription\x18\x01 \x01(\t\x12S\n\nsubqueries\x18\x02 \x03(\x0b\x32?.google.spanner.v1.PlanNode.ShortRepresentation.SubqueriesEntry\x1a\x31\n\x0fSubqueriesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"8\n\x04Kind\x12\x14\n\x10KIND_UNSPECIFIED\x10\x00\x12\x0e\n\nRELATIONAL\x10\x01\x12\n\n\x06SCALAR\x10\x02\"<\n\tQueryPlan\x12/\n\nplan_nodes\x18\x01 \x03(\x0b\x32\x1b.google.spanner.v1.PlanNodeB}\n\x15\x63om.google.spanner.v1B\x0eQueryPlanProtoP\x01Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\xaa\x02\x17Google.Cloud.Spanner.V1b\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PLANNODE_KIND = _descriptor.EnumDescriptor(
name='Kind',
full_name='google.spanner.v1.PlanNode.Kind',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='KIND_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RELATIONAL', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SCALAR', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=706,
serialized_end=762,
)
_sym_db.RegisterEnumDescriptor(_PLANNODE_KIND)
_PLANNODE_CHILDLINK = _descriptor.Descriptor(
name='ChildLink',
full_name='google.spanner.v1.PlanNode.ChildLink',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='child_index', full_name='google.spanner.v1.PlanNode.ChildLink.child_index', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='google.spanner.v1.PlanNode.ChildLink.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='variable', full_name='google.spanner.v1.PlanNode.ChildLink.variable', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=459,
serialized_end=523,
)
_PLANNODE_SHORTREPRESENTATION_SUBQUERIESENTRY = _descriptor.Descriptor(
name='SubqueriesEntry',
full_name='google.spanner.v1.PlanNode.ShortRepresentation.SubqueriesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='google.spanner.v1.PlanNode.ShortRepresentation.SubqueriesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='google.spanner.v1.PlanNode.ShortRepresentation.SubqueriesEntry.value', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=655,
serialized_end=704,
)
_PLANNODE_SHORTREPRESENTATION = _descriptor.Descriptor(
name='ShortRepresentation',
full_name='google.spanner.v1.PlanNode.ShortRepresentation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='description', full_name='google.spanner.v1.PlanNode.ShortRepresentation.description', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='subqueries', full_name='google.spanner.v1.PlanNode.ShortRepresentation.subqueries', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_PLANNODE_SHORTREPRESENTATION_SUBQUERIESENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=526,
serialized_end=704,
)
_PLANNODE = _descriptor.Descriptor(
name='PlanNode',
full_name='google.spanner.v1.PlanNode',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='index', full_name='google.spanner.v1.PlanNode.index', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kind', full_name='google.spanner.v1.PlanNode.kind', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='display_name', full_name='google.spanner.v1.PlanNode.display_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='child_links', full_name='google.spanner.v1.PlanNode.child_links', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='short_representation', full_name='google.spanner.v1.PlanNode.short_representation', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='metadata', full_name='google.spanner.v1.PlanNode.metadata', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='execution_stats', full_name='google.spanner.v1.PlanNode.execution_stats', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_PLANNODE_CHILDLINK, _PLANNODE_SHORTREPRESENTATION, ],
enum_types=[
_PLANNODE_KIND,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=130,
serialized_end=762,
)
_QUERYPLAN = _descriptor.Descriptor(
name='QueryPlan',
full_name='google.spanner.v1.QueryPlan',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='plan_nodes', full_name='google.spanner.v1.QueryPlan.plan_nodes', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=764,
serialized_end=824,
)
_PLANNODE_CHILDLINK.containing_type = _PLANNODE
_PLANNODE_SHORTREPRESENTATION_SUBQUERIESENTRY.containing_type = _PLANNODE_SHORTREPRESENTATION
_PLANNODE_SHORTREPRESENTATION.fields_by_name['subqueries'].message_type = _PLANNODE_SHORTREPRESENTATION_SUBQUERIESENTRY
_PLANNODE_SHORTREPRESENTATION.containing_type = _PLANNODE
_PLANNODE.fields_by_name['kind'].enum_type = _PLANNODE_KIND
_PLANNODE.fields_by_name['child_links'].message_type = _PLANNODE_CHILDLINK
_PLANNODE.fields_by_name['short_representation'].message_type = _PLANNODE_SHORTREPRESENTATION
_PLANNODE.fields_by_name['metadata'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_PLANNODE.fields_by_name['execution_stats'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_PLANNODE_KIND.containing_type = _PLANNODE
_QUERYPLAN.fields_by_name['plan_nodes'].message_type = _PLANNODE
DESCRIPTOR.message_types_by_name['PlanNode'] = _PLANNODE
DESCRIPTOR.message_types_by_name['QueryPlan'] = _QUERYPLAN
PlanNode = _reflection.GeneratedProtocolMessageType('PlanNode', (_message.Message,), dict(
ChildLink = _reflection.GeneratedProtocolMessageType('ChildLink', (_message.Message,), dict(
DESCRIPTOR = _PLANNODE_CHILDLINK,
__module__ = 'google.cloud.proto.spanner.v1.query_plan_pb2'
# @@protoc_insertion_point(class_scope:google.spanner.v1.PlanNode.ChildLink)
))
,
ShortRepresentation = _reflection.GeneratedProtocolMessageType('ShortRepresentation', (_message.Message,), dict(
SubqueriesEntry = _reflection.GeneratedProtocolMessageType('SubqueriesEntry', (_message.Message,), dict(
DESCRIPTOR = _PLANNODE_SHORTREPRESENTATION_SUBQUERIESENTRY,
__module__ = 'google.cloud.proto.spanner.v1.query_plan_pb2'
# @@protoc_insertion_point(class_scope:google.spanner.v1.PlanNode.ShortRepresentation.SubqueriesEntry)
))
,
DESCRIPTOR = _PLANNODE_SHORTREPRESENTATION,
__module__ = 'google.cloud.proto.spanner.v1.query_plan_pb2'
# @@protoc_insertion_point(class_scope:google.spanner.v1.PlanNode.ShortRepresentation)
))
,
DESCRIPTOR = _PLANNODE,
__module__ = 'google.cloud.proto.spanner.v1.query_plan_pb2'
# @@protoc_insertion_point(class_scope:google.spanner.v1.PlanNode)
))
_sym_db.RegisterMessage(PlanNode)
_sym_db.RegisterMessage(PlanNode.ChildLink)
_sym_db.RegisterMessage(PlanNode.ShortRepresentation)
_sym_db.RegisterMessage(PlanNode.ShortRepresentation.SubqueriesEntry)
QueryPlan = _reflection.GeneratedProtocolMessageType('QueryPlan', (_message.Message,), dict(
DESCRIPTOR = _QUERYPLAN,
__module__ = 'google.cloud.proto.spanner.v1.query_plan_pb2'
# @@protoc_insertion_point(class_scope:google.spanner.v1.QueryPlan)
))
_sym_db.RegisterMessage(QueryPlan)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\025com.google.spanner.v1B\016QueryPlanProtoP\001Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\252\002\027Google.Cloud.Spanner.V1'))
_PLANNODE_SHORTREPRESENTATION_SUBQUERIESENTRY.has_options = True
_PLANNODE_SHORTREPRESENTATION_SUBQUERIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
|
# Copyright 2017 University of Maryland.
#
# This file is part of Sesame. It is subject to the license terms in the file
# LICENSE.rst found in the top-level directory of this distribution.
|
import os
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
if os.path.exists('README.txt'):
README = open('README.txt').read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='wagtail-simple-math-captcha',
version='0.1.2',
packages=find_packages(exclude=[]),
install_requires=['wagtail', 'django-simple-math-captcha'],
include_package_data=True,
license='BSD License',
description='A simple math captcha field for Wagtail Form Pages based on Django Simple Math Captcha.',
long_description=README,
url='https://bitbucket.org/jordanmarkov/wagtail-simple-math-captcha',
download_url='https://bitbucket.org/jordanmarkov/wagtail-simple-math-captcha/get/0.1.2.tar.gz',
keywords=['django', 'wagtail', 'cms', 'captcha'],
author='Jordan Markov',
author_email='jmarkov@gmail.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
# Generated by Django 4.0.3 on 2022-03-09 03:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('username', models.CharField(default='user0001', max_length=30, unique=True)),
('email', models.CharField(max_length=30, unique=True)),
('password', models.CharField(max_length=30)),
('is_active', models.BooleanField(default=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('context', models.CharField(blank=True, max_length=256, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=255)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='posts.post')),
],
),
]
|
import logging
import random
import time
import requests
from stem import Signal
from stem.control import Controller
from stem.util.log import get_logger
logger = get_logger()
logger.propagate = False
# site to get ip
IP_CHECK_SERVICE = 'http://icanhazip.com/'
class TorController:
def __init__(self, allow_reuse_ip_after: int = 10):
"""Creates a new instance of TorController.
Keywords arguments:
allow_reuse_ip_after -- When an already used IP can be used again. If 0, there will be no IP reuse control. (default 10).
"""
self.allow_reuse_ip_after = allow_reuse_ip_after
self.used_ips = list()
self.proxies = {'http': 'socks5://127.0.0.1:9050',
'https': 'socks5://127.0.0.1:9050'}
self.renew_ip()
def get_ip(self) -> str:
"""Returns the current IP used by Tor."""
with requests.Session() as session:
r = session.get(IP_CHECK_SERVICE, proxies = self.proxies)
if r.ok:
session.close()
return r.text.replace('\n', '')
return r.text.replace('\n', '')
return ''
def change_ip(self) -> None:
"""Send IP change signal to Tor."""
with Controller.from_port(port=9051) as controller:
controller.authenticate()
controller.signal(Signal.NEWNYM)
def renew_ip(self) -> None:
"""Change Tor's IP
Returns the new IP or '', if is not possible to change the IP.
"""
new_ip = None
# Try to change the IP 10 times
for _ in range(10):
self.change_ip()
new_ip = self.get_ip()
# Waits for possible IP change
waiting = 0
while waiting <= 30:
if new_ip in self.used_ips:
waiting += 2.5
time.sleep(2.5)
new_ip = self.get_ip()
if not new_ip:
break
else:
break
# If we can recover the IP, check if it has already been used
if new_ip:
# Controls IP reuse
if self.allow_reuse_ip_after > 0:
if len(self.used_ips) == self.allow_reuse_ip_after:
del self.used_ips[0]
self.used_ips.append(new_ip)
return new_ip
# Wait a random time to try again
time.sleep(random.randint(5,30))
# Could not change IP
return ''
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
input params
"""
__author__ = "Saeed Moghimi"
__copyright__ = "Copyright 2017, UCAR/NOAA"
__license__ = "GPL"
__version__ = "1.0"
__email__ = "moghimis@gmail.com"
from pynmd.plotting.vars_param import *
from collections import defaultdict
import datetime
cases = defaultdict(dict)
#### INPUTS ####
#storm_name = 'SAN'
storm_name = 'SANDY'
storm_year = '2012'
# map_plot_options
plot_adc_fort = True
plot_adc_maxele = False
plot_nems_fields = False
plot_forcing_files = False
plot_nwm_files = False
plot_mesh = False
local_bias_cor = True
#HWM proximity limit
prox_max = 0.004
base_dir_sm = '/scratch2/COASTAL/coastal/save/Saeed.Moghimi/noscrub/01_stmp_ca/stmp10_sandy_re/'
hwm_fname = '/scratch2/COASTAL/coastal/save/Saeed.Moghimi/models/NEMS/NEMS_inps/01_data/hwm/events/hwm_san.csv'
base_dir_coops_piks = '/scratch2/COASTAL/coastal/save/Saeed.Moghimi/models/NEMS/NEMS_inps/01_data/ccxxxxxxxx/'
base_dir_obs = '/scratch2/COASTAL/coastal/save/Saeed.Moghimi/models/NEMS/NEMS_inps/01_data/coops_ndbc_data/'
nwm_channel_pnts = '/scratch2/COASTAL/coastal/save/Saeed.Moghimi/models/NEMS/NEMS_inps/01_data/nwm_base_info/channel_points/NWM_v1.1_nc_tools_v1/spatialMetadataFiles/nwm_v1.1_geospatial_data_template_channel_point.nc'
nwm_channel_geom = '/scratch2/COASTAL/coastal/save/Saeed.Moghimi/models/NEMS/NEMS_inps/01_data/nwm_base_info/channel_geom/nwm_v1.1/nwm_fcst_points_comid_lat_lon-v1-1_ALL.csv'
nwm_results_dir = '/scratch2/COASTAL/coastal/save/Saeed.Moghimi/models/wrfhydro/test_data/nwm.20180226/'
#ftypes = ['png','pdf']
ftypes = ['png']
san_before_update = False
san_hwrf_update = True
if True:
#Base run only tide
key = 'atm:n-tid:y-wav:n'
cases[key]['dir'] = base_dir_sm + '/a21_SAN_TIDE_v1.1/rt_20180420_h17_m53_s52r550/'
cases[key]['label'] = 'Only tide'
cases[key]['hsig_file'] = None
cases[key]['wdir_file'] = None
key0 = key
if san_before_update:
#ATM2OCN
key = '01-atm:y-tid:y-wav:n'
cases[key]['dir'] = base_dir_sm + '/a52_SAN_ATM2OCN_v1.1/rt_20180423_h14_m04_s14r624/'
cases[key]['label'] = 'ATM2OCN Pre HWRF'
cases[key]['hsig_file'] = None
cases[key]['wdir_file'] = None
#ATM&WAV2OCN
wav_inp_dir = '/scratch4/COASTAL/coastal/save/NAMED_STORMS/SANDY/WW3/'
key = '02-atm:y-tid:y-wav:y-try01'
cases[key]['dir'] = base_dir_sm + '/a53_SAN_ATM_WAV2OCN_v1.0/rt_20180423_h14_m09_s08r130/'
cases[key]['label'] = 'ATM&WAV2OCN Pre HWRF'
cases[key]['hsig_file'] = wav_inp_dir + 'ww3.HWRF.3DVar.2012_hs.nc'
cases[key]['wdir_file'] = wav_inp_dir + 'ww3.HWRF.3DVar.2012_dir.nc'
key1 = key
if san_hwrf_update:
#ATM2OCN
key = '03-atm:y-tid:y-wav:n'
cases[key]['dir'] = base_dir_sm + '/a50_SAN_ATM2OCN_v2.1_new_hwrf_land_mask/rt_20190710_h21_m15_s48r723/'
cases[key]['label'] = 'ATM2OCN Upd HWRF'
cases[key]['hsig_file'] = None
cases[key]['wdir_file'] = None
#key0 = key
#ATM&WAV2OCN
wav_inp_dir = '/scratch4/COASTAL/coastal/save/NAMED_STORMS/SANDY/WW3/'
key = '04-atm:y-tid:y-wav:y-try01'
cases[key]['dir'] = base_dir_sm + '/a70_SAN_ATM_WAV2OCN_v2.1_new_hwrf_land_mask/rt_20190710_h21_m17_s23r169/'
cases[key]['label'] = 'ATM&WAV2OCN Upd HWRF'
cases[key]['hsig_file'] = wav_inp_dir + 'ww3.HWRF.3DVar.2012_hs.nc'
cases[key]['wdir_file'] = wav_inp_dir + 'ww3.HWRF.3DVar.2012_dir.nc'
key1 = key
#out_dir = cases[key1]['dir']+'/../01_post_atm2ocn_wav_nwm/'
out_dir = cases[key1]['dir']+'/../02_post_2019_shachak3/'
#######
defs['elev']['label'] = 'Elev [m]'
dif = False
vec = True
if dif:
if True:
defs['elev']['cmap'] = maps.jetMinWi
defs['elev']['label'] = 'Surge [m]'
defs['elev']['vmin'] = 0
defs['elev']['vmax'] = 5
else:
defs['elev']['label'] = 'Wave set-up [m]'
defs['elev']['cmap'] = maps.jetWoGn()
defs['elev']['vmin'] = -0.5
defs['elev']['vmax'] = 0.5
else:
defs['elev']['cmap'] = maps.jetWoGn()
defs['elev']['label'] = 'Elev [m]'
defs['elev']['vmin'] = -2
defs['elev']['vmax'] = 2
defs['rad']['vmin'] = 0.0
defs['rad']['vmax'] = 0.01
defs['rad']['cmap'] = maps.jetMinWi
if False:
#wind-stress
defs['wind']['vmin'] = 0.0
defs['wind']['vmax'] = 0.01
defs['wind']['label'] = 'Wind force [m$^ \\mathrm{-2}$ s$^ \\mathrm{-2}$] '
else:
#wind vel
defs['wind']['vmin'] = 10
defs['wind']['vmax'] = 30
defs['wind']['label'] = 'Wind Speed [m$^ \\mathrm{}$ s$^ \\mathrm{-1}$] '
defs['wind']['cmap'] = maps.jetMinWi
#added defs
defs['rad' ]['fname'] = 'rads.64.nc'
defs['elev']['fname'] = 'fort.63.nc'
defs['wind']['fname'] = 'fort.74.nc'
varname = 'pmsl'
defs[varname]['vmin'] = 9.2000
defs[varname]['vmax'] = 10.5000
defs[varname]['fname'] = 'fort.73.nc'
defs[varname]['label'] = 'Pressure [mH2O] '
defs[varname]['var'] = 'pressure'
varname = 'hs'
defs[varname]['vmin'] = 1
defs[varname]['vmax'] = 12
track_fname = '/scratch4/COASTAL/coastal/save/Saeed.Moghimi/models/NEMS/NEMS_inps/01_data/tracks/sandy_bal182012.dat'
#For liverpool and andre report
#key = 'atm:y-tid:y-wav:n-main-sm24'
#key1 = key
#cases[key]['dir'] = base_dir_sm + '/a52_IKE_ATM2OCN_v1.1/rt_20170801_h20_m24_s42r389/'
#cases[key]['label'] = 'IKE_HWRF_GFS05d_OC_DA_HSOFS'
#out_dir0 = base_dir_sm + '/a52_IKE_ATM2OCN_v1.1/'+'01_post/maps/'
#with Gustav
#key = 'atm:y-tid:y-wav:n-main-sm60'
#cases[key]['dir'] = base_dir_sm + '/a61_IKE_ATM2OCN_v2.0/rt_20170815_h17_m46_s10r529/'
#cases[key]['label'] = 'IKE_HWRF_GFS05d_OC_HSOFS_Gustav'
#key1 = key
#out_dir0 = base_dir_sm + '/a61_IKE_ATM2OCN_v2.0/'+'01_post/'+cases[key1]['label'] +'/maps/'
#without gustav
#key = 'atm:y-tid:y-wav:n-main-sm20'
#cases[key]['dir'] = base_dir_sm + '/a52_IKE_ATM2OCN_v1.1/rt_20170801_h20_m15_s00r670//'
#cases[key]['label'] = 'IKE_HWRF_GFS05d_OC_HSOFS'
#key1 = key
#############################
#defs['maxele'] = {}
#defs['maxele'] = defs['elev']
#defs['maxele']['fname'] = 'maxele.63.nc'
#
tim_lim = {}
tim_lim['xmin'] = datetime.datetime(2012, 10, 9, 18, 0) + datetime.timedelta(12.5)
tim_lim['xmax'] = datetime.datetime(2012, 10, 9, 18, 0) + datetime.timedelta(25.125)
if False:
#plot maps track
tim_lim['xmin'] = datetime.datetime(2012, 10, 23,1)
#plot map area2
tim_lim['xmin'] = datetime.datetime(2012, 10, 26,23)
tim_lim['xmax'] = datetime.datetime(2012, 10, 31,0)
varnames = ['elev']#,'rad'] #,'maxele']#
#varnames = ['rad'] #,'maxele']
#varnames = ['rad','wind']#,'elev'] #,'maxele']
#varnames = ['elev'] #,'maxele']
#varnames = ['wind']#,'elev'] #,'maxele']
#varnames = ['hs']#,'elev'] #,'maxele']
#varnames = ['elev'] #,'maxele']
#
regions = ['san_area2','san_area','san_newyork','san_track','san_delaware','san_jamaica_bay']
#regions = ['hsofs_region','san_track','san_area2','san_delaware','san_area']
#regions = ['san_jamaica_bay']
#regions = ['san_delaware']#,'san_area2']
#regions = ['san_area'] #anim
#regions = ['san_area'] # HWM
regions = ['san_newyork']
####################
#tim_lim['xmin'] = datetime.datetime(2017, 9, 5,23)
#tim_lim['xmax'] = datetime.datetime(2017, 9, 7,12)
#regions = ['burbuda_zoom' ,'puertorico_shore'] #,'carib_irma']
#####################
#tim_lim['xmin'] = datetime.datetime(2017, 9, 7,11)
#tim_lim['xmax'] = datetime.datetime(2017, 9, 9,12)
#tim_lim['xmin'] = datetime.datetime(2017, 9, 9,10)
#tim_lim['xmax'] = datetime.datetime(2017, 9, 12,12)
#regions = ['cuba_zoom','key_west_zoom']
#####################
#regions = ['ike_region']
#
#tim_lim['xmin'] = datetime.datetime(2017, 9, 6 )
#tim_lim['xmax'] = datetime.datetime(2017, 9, 12 )
#
#latp = 29.2038
#lonp = -92.2285
#i,prox = find_nearest1d(xvec = lon,yvec = lat,xp = lonp,yp = latp)
# for COOPS time series plot
station_selected_list = None
"""
if False:
#ATM2OCN
key = '01-atm:y-tid:y-wav:n'
cases[key]['dir'] = base_dir_sm + '/a52_SAN_ATM2OCN_v2.0/rt_20180510_h12_m53_s33r830/'
cases[key]['label'] = '3DVar'
#key0 = key
key = '03-atm:y-tid:y-wav:n'
cases[key]['dir'] = base_dir_sm + '/a52_SAN_ATM2OCN_v2.0/rt_20180510_h13_m02_s21r175/'
cases[key]['label'] = 'Hybrid'
key = '05-atm:y-tid:y-wav:n'
cases[key]['dir'] = base_dir_sm + '/a52_SAN_ATM2OCN_v2.0/rt_20180510_h13_m06_s43r786/'
cases[key]['label'] = 'Operational'
#key1 = key
key = '07-atm:y-tid:y-wav:n'
cases[key]['dir'] = base_dir_sm + '/a52_SAN_ATM2OCN_v2.0/rt_20180510_h12_m57_s57r209/'
cases[key]['label'] = 'ENS_ch75'
#key1 = key
#ATM&WAV2OCN
wav_inp_dir = '/scratch4/COASTAL/coastal/save/NAMED_STORMS/SANDY/WW3/'
key = '02-atm:y-tid:y-wav:y'
cases[key]['dir'] = base_dir_sm + '/a53_SAN_ATM_WAV2OCN_v2.0/rt_20180510_h13_m15_s13r719/'
cases[key]['label'] = '3DVar WAV'
cases[key]['hsig_file'] = wav_inp_dir + 'ww3.HWRF.3DVar.2012_hs.nc'
cases[key]['wdir_file'] = wav_inp_dir + 'ww3.HWRF.3DVar.2012_dir.nc'
key1 = key
key = '04-atm:y-tid:y-wav:y'
cases[key]['dir'] = base_dir_sm + '/a53_SAN_ATM_WAV2OCN_v2.0/rt_20180510_h13_m23_s44r792/'
cases[key]['label'] = 'Hybrid WAV'
cases[key]['hsig_file'] = wav_inp_dir + 'ww3.HWRF.Hybrid.2012_hs.nc'
cases[key]['wdir_file'] = wav_inp_dir + 'ww3.HWRF.Hybrid.2012_dir.nc'
key = '06-atm:y-tid:y-wav:y'
cases[key]['dir'] = base_dir_sm + '/a53_SAN_ATM_WAV2OCN_v2.0/rt_20180510_h13_m28_s11r309/'
cases[key]['label'] = 'Operational WAV'
cases[key]['hsig_file'] = wav_inp_dir + 'ww3.HWRF.Operational.2012_hs.nc'
cases[key]['wdir_file'] = wav_inp_dir + 'ww3.HWRF.Operational.2012_dir.nc'
key = '08-atm:y-tid:y-wav:y'
cases[key]['dir'] = base_dir_sm + '/a53_SAN_ATM_WAV2OCN_v2.0/rt_20180510_h13_m19_s27r209/'
cases[key]['label'] = 'ENS_ch75 WAV'
cases[key]['hsig_file'] = wav_inp_dir + 'ww3.HWRF.ENS_CH75.2012_hs.nc'
cases[key]['wdir_file'] = wav_inp_dir + 'ww3.HWRF.ENS_CH75.2012_dir.nc'
if False:
#ATM&WAV2OCN
wav_inp_dir = '/scratch4/COASTAL/coastal/save/NAMED_STORMS/SANDY/WW3/'
key = '02-atm:y-tid:y-wav:y'
cases[key]['dir'] = base_dir_sm + '/a53_SAN_ATM_WAV2OCN_v2.0/rt_20180510_h13_m15_s13r719/'
cases[key]['label'] = '3DVar WAV'
cases[key]['hsig_file'] = wav_inp_dir + 'ww3.HWRF.3DVar.2012_hs.nc'
cases[key]['wdir_file'] = wav_inp_dir + 'ww3.HWRF.3DVar.2012_dir.nc'
key1 = key
"""
|
import numpy as np
from sklearn import svm
from sklearn.externals import joblib
from dynamic_programming import neural_classif_usage as nn
class DynamicProgramming:
def __init__(self, model_path, model_file, offline_file):
self.model_path = model_path
self.model_file = model_file
self.offline_file = offline_file
self.nControls = None
self.activationCost = None
self.nn = None
self.nStages = 24
self.regr = joblib.load(model_path+'regr_model.pkl')
# if os.path.isfile(offline_file+'.p'):
# offline_phase = pickle.load(open(offline_file+'.p', "rb"))
# self.controlSpace = offline_phase['controlSpace']
# self.nControls = offline_phase['nControls']
# self.J = offline_phase['J']
# self.activationCost = offline_phase['activationCost']
# else:
# self.offline_phase()
self.offline_phase()
def offline_phase(self):
print('Executing offline phase.')
UEgenerationFactor = 10000
UEgenerationbias = 150
averageTrafficPerHour = - UEgenerationbias + UEgenerationFactor * np.array([0.025, 0.07, 0.085, 0.0964, 0.1, 0.105, 0.11, 0.115, 0.12, 0.1225, 0.125, 0.13, 0.14, 0.16, 0.155, 0.15, 0.125, 0.1, 0.075, 0.05, 0.03, 0.024, 0.024, 0.027])
nPicos = 6
nActivePicosVal = np.arange(0, (nPicos+1))
nABSvals = 8
ABSval = np.linspace(0, 7/8, nABSvals)
CREval = np.array([0, 6, 9, 12, 18])
controlSpace = np.array(np.meshgrid(nActivePicosVal, ABSval, CREval)).T.reshape(-1,3)
nControls = len(controlSpace[:, 0])
ABS_CRE_bench = np.array([0, 0])
nControls_bench = nPicos + 1
controlSpace_bench = np.append(np.expand_dims(nActivePicosVal, axis=0).transpose(), np.tile(ABS_CRE_bench, (nControls_bench, 1)), axis=1)
C = np.zeros((nControls, self.nStages))
P = np.zeros((nControls, self.nStages))
self.nn = nn.Classification(self.model_path, self.model_file)
print('Creating matrices C and P...')
for k in range(self.nStages):
for s in range(nControls):
state = np.concatenate([np.array([averageTrafficPerHour[k]]), controlSpace[s, :]])
C[s, k] = self.nn.getConsumption(np.expand_dims(state, axis=0))[0]
P[s, k] = self.nn.getQoS(np.expand_dims(state, axis=0))[0]
J = np.zeros((nControls, self.nStages))
p0Pico = 56
beta = .1
self.activationCost = beta * p0Pico
print('Computing matrix J...')
stageLoop = np.arange(0, self.nStages)[::-1]
for k in stageLoop:
if k == stageLoop[0]: # if last stage
costToGo = 0
validActions = P[:, 0] > 0
if sum(validActions) == 0:
validActions[np.argmax(P[:, 0])] = True
else:
costToGo = J[:, k+1]
validActions = P[:, k+1] > 0
if sum(validActions) == 0:
validActions[np.argmax(P[:, 0])] = True
for s in range(nControls):
currentActivePicos = controlSpace[s, 0]
activationMatrix = np.maximum(controlSpace[:, 0] - currentActivePicos, 0)
activationCostMatrix = activationMatrix * self.activationCost
cost_in_k = C[:, k] + activationCostMatrix + costToGo
J[s, k] = np.min(cost_in_k[validActions])
self.J = J
self.controlSpace = controlSpace
self.nControls = nControls
#offline phase for benchmark algorithm
C = np.zeros((nControls_bench, self.nStages))
P = np.zeros((nControls_bench, self.nStages))
print('Creating matrices C and P for benchmark algorithm...')
for k in range(self.nStages):
for s in range(nControls_bench):
state = np.concatenate([np.array([averageTrafficPerHour[k]]), controlSpace_bench[s, :]])
C[s, k] = self.nn.getConsumption(np.expand_dims(state, axis=0))[0]
P[s, k] = self.nn.getQoS(np.expand_dims(state, axis=0))[0]
J_bench = np.zeros((nControls_bench, self.nStages))
p0Pico = 56
beta = .1
self.activationCost = beta * p0Pico
print('Computing matrix J for benchmark algorithm...')
stageLoop = np.arange(0, self.nStages)[::-1]
for k in stageLoop:
if k == stageLoop[0]: # if last stage
costToGo = 0
validActions = P[:, 0] > 0
if sum(validActions) == 0:
validActions[np.argmax(P[:, 0])] = True
else:
costToGo = J_bench[:, k+1]
validActions = P[:, k+1] > 0
if sum(validActions) == 0:
validActions[np.argmax(P[:, 0])] = True
for s in range(nControls_bench):
currentActivePicos = controlSpace_bench[s, 0]
activationMatrix = np.maximum(controlSpace_bench[:, 0] - currentActivePicos, 0)
activationCostMatrix = activationMatrix * self.activationCost
cost_in_k = C[:, k] + activationCostMatrix + costToGo
J_bench[s, k] = np.min(cost_in_k[validActions])
self.controlSpace_bench = controlSpace_bench
self.nControls_bench = nControls_bench
self.J_bench = J_bench
# offline_phase = dict(controlSpace=controlSpace, nControls=nControls, J=J, activationCost=self.activationCost)
# pickle.dump(offline_phase, open(self.offline_file+'.p', "wb"))
print('Offline phase finished.')
def online_getNextControl(self, traffic, currentControl, k): #regresion and NN
if self.nn is None:
self.nn = nn.Classification(self.model_path, self.model_file)
states = np.append(np.tile(traffic, (self.nControls, 1)), self.controlSpace.copy(), axis=1)
states[:, 2] = states[:, 2] * 8
consumption_next_stage = self.regr.predict(states)
QoS_class = self.nn.getQoS(states).transpose()[0]
validActions = QoS_class > 0
if not validActions.any():
validActions[np.argmax(QoS_class)] = True
print('There is no action satisfying the threshold!')
currentActivePicos = currentControl[0]
activationMatrix = np.maximum(self.controlSpace[:, 0] - currentActivePicos, 0)
activationCostMatrix = activationMatrix * self.activationCost
totalConsumption = consumption_next_stage + activationCostMatrix + self.J[:, np.mod(k+1, self.nStages)]
totalConsumption[np.logical_not(validActions)] = np.inf
control = self.controlSpace[np.argmin(totalConsumption), :]
if control[0] == 0: #if 0 active picos, ABS and CRE deactivated
control[2] = 0
return control
def online_getNextControl_noeICIC(self, traffic, currentControl, k): #no eICIC regr and NN
if self.nn is None:
self.nn = nn.Classification(self.model_path, self.model_file)
states = np.append(np.tile(traffic, (self.nControls_bench, 1)), self.controlSpace_bench.copy(), axis=1)
consumption_next_stage = self.regr.predict(states)
QoS_class = self.nn.getQoS(states).transpose()[0]
validActions = QoS_class > 0
if not validActions.any():
validActions[np.argmax(QoS_class)] = True
print('There is no action satisfying the threshold!')
currentActivePicos = currentControl[0]
activationMatrix = np.maximum(self.controlSpace_bench[:, 0] - currentActivePicos, 0)
activationCostMatrix = activationMatrix * self.activationCost
totalConsumption = consumption_next_stage + activationCostMatrix + self.J_bench[:, np.mod(k+1, self.nStages)]
totalConsumption[np.logical_not(validActions)] = np.inf
control = self.controlSpace_bench[np.argmin(totalConsumption), :]
return control
def closeAlgorithm(self):
self.nn.closeModel()
|
"""Entry point"""
from domination.process import app
def main():
"""Run faust main"""
app.main()
if __name__ == '__main__':
main()
|
__author__ = 'Owais Lone'
__version__ = '0.7.0'
default_app_config = 'webpack_loader.apps.WebpackLoaderConfig'
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from onnx import numpy_helper
def get_parameter_name(name):
"""
Gets parameter name from parameter key.
"""
return name[name.rfind(".") + 1 :]
def get_attribute_value(attr):
"""
Retrieves value from attribute in ONNX graph.
"""
if attr.HasField("f"): # floating-point attribute
return attr.f
elif attr.HasField("i"): # integer attribute
return attr.i
elif attr.HasField("s"): # string attribute
return attr.s # TODO: Sanitize string.
elif attr.HasField("t"): # tensor attribute
return torch.from_numpy(numpy_helper.to_array(attr.t))
elif len(attr.ints) > 0:
return list(attr.ints)
elif len(attr.floats) > 0:
return list(attr.floats)
else:
raise ValueError("Unknown attribute type for attribute %s." % attr.name)
|
# -*- coding: utf-8 -*-
# This code shows an example of text translation from English to Simplified-Chinese.
# This code runs on Python 2.7.x and Python 3.x.
# You may install `requests` to run this code: pip install requests
# Please refer to `https://api.fanyi.baidu.com/doc/21` for complete api document
import requests
import random
import json
from hashlib import md5
# Set your own appid/appkey.
appid = '20210704000879756'
appkey = 'WeD6UT8byhVe2p2WEilH'
# For list of language codes, please refer to `https://api.fanyi.baidu.com/doc/21`
from_lang = 'jp'
to_lang = 'zh'
endpoint = 'http://api.fanyi.baidu.com'
path = '/api/trans/vip/translate'
url = endpoint + path
query = 'トイレがいっぱいです'
# Generate salt and sign
def make_md5(s, encoding='utf-8'):
return md5(s.encode(encoding)).hexdigest()
salt = random.randint(32768, 65536)
sign = make_md5(appid + query + str(salt) + appkey)
# Build request
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
payload = {'appid': appid, 'q': query, 'from': from_lang, 'to': to_lang, 'salt': salt, 'sign': sign}
# Send request
r = requests.post(url, params=payload, headers=headers)
result = r.json()
# Show response
print(json.dumps(result, indent=4, ensure_ascii=False))
|
import json
from compare import expect
from django.test import TransactionTestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from lxml import etree
from tardis.tardis_portal.models import Experiment, License, ObjectACL, User
def _create_user_and_login(username='testuser', password='testpass'):
user = User.objects.create_user(username, '', password)
user.save()
client = Client()
client.login(username=username, password=password)
return user, client
class RifCSTestCase(TransactionTestCase):
def setUp(self):
self.ns = {'r': 'http://ands.org.au/standards/rif-cs/registryObjects',
'o': 'http://www.openarchives.org/OAI/2.0/'}
user, client = _create_user_and_login()
license_ = License(name='Creative Commons Attribution-NoDerivs '
'2.5 Australia',
url='http://creativecommons.org/licenses/by-nd/'
'2.5/au/',
internal_description='CC BY 2.5 AU',
allows_distribution=True)
license_.save()
experiment = Experiment(title='Norwegian Blue',
description='Parrot + 40kV',
created_by=user)
experiment.public_access = Experiment.PUBLIC_ACCESS_FULL
experiment.license = license_
experiment.save()
acl = ObjectACL(content_object=experiment,
pluginId='django_user',
entityId=str(user.id),
isOwner=False,
canRead=True,
canWrite=True,
canDelete=False,
aclOwnershipType=ObjectACL.OWNER_OWNED)
acl.save()
params = {'type': 'website',
'identifier': 'https://www.google.com/',
'title': 'Google',
'notes': 'This is a note.'}
response = client.post(\
reverse('tardis.apps.related_info.views.' +
'list_or_create_related_info',
args=[experiment.id]),
data=json.dumps(params),
content_type='application/json')
# Check related info was created
expect(response.status_code).to_equal(201)
self.acl = acl
self.client = client
self.experiment = experiment
self.params = params
def testExistsInOaipmh(self):
ns = self.ns
args = {
'verb': 'GetRecord',
'metadataPrefix': 'rif',
'identifier': 'experiment/%d' % self.experiment.id
}
response = self.client.get('/apps/oaipmh/?%s' %
'&'.join(['%s=%s' % (k, v)
for k, v in args.items()]))
self.assertEqual(response.status_code, 200)
# Check the response content is good
xml = etree.fromstring(response.content)
print response.content
assert xml.xpath('/o:OAI-PMH', namespaces=ns)
assert not xml.xpath('o:error', namespaces=ns)
assert xml.xpath('/o:OAI-PMH/o:GetRecord/o:record', namespaces=ns)
header, metadata = xml.xpath('/o:OAI-PMH/o:GetRecord/o:record/o:*',
namespaces=ns)[0:2]
exp_id = Experiment.objects.first().id
expect(header.xpath('o:identifier/text()',namespaces=ns)[0]) \
.to_equal('experiment/%d' % exp_id)
# <registryObject group="MyTARDIS Default Group">
registryObject = metadata.xpath('r:registryObjects/r:registryObject',
namespaces=ns)[0]
# <collection type="dataset">
expect(registryObject.xpath('r:collection/@type',
namespaces=ns)[0]).to_equal('dataset')
collection = registryObject.xpath('r:collection', namespaces=ns)[0]
expect(collection.xpath('r:relatedInfo/@type', namespaces=ns)) \
.to_equal([self.params['type']])
relatedInfo = collection.xpath('r:relatedInfo', namespaces=ns)[0]
for k, v in self.params.items():
if k == 'type':
continue
expect(relatedInfo.xpath('r:'+k+'/text()', namespaces=ns)) \
.to_equal([v])
expect(relatedInfo.xpath('r:identifier/@type', namespaces=ns)) \
.to_equal(['uri'])
|
import os
from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for, current_app
)
from .db import Database
bp = Blueprint('portal', __name__)
@bp.route('/admin', methods=['GET'])
def admin():
return render_template("admin/admin_home.html")
@bp.route('/admin/table/<string:table_type>', methods=['GET'])
def render_table(table_type):
database_server = Database(host=current_app.config["MYSQL_HOST"],
port=current_app.config["MYSQL_PORT"],
user=current_app.config["MYSQL_USER"],
password=current_app.config["MYSQL_PASSWORD"],
database=current_app.config["MYSQL_DATABASE"])
data_tuple = None
if table_type == 'professor':
data_tuple = database_server.get_professor_table_all()
return render_template("admin/table_render.html",
table_type=data_tuple[0],
table_columns=data_tuple[1],
table_dict=data_tuple[2])
elif table_type == 'gender':
data_tuple = database_server.get_gender_table_all()
return render_template("admin/table_render.html",
table_type=data_tuple[0],
table_columns=data_tuple[1],
table_dict=data_tuple[2])
elif table_type == 'concentration':
data_tuple = database_server.get_concentration_table_all()
return render_template("admin/table_render.html",
table_type=data_tuple[0],
table_columns=data_tuple[1],
table_dict=data_tuple[2])
elif table_type == 'job-merged-table':
data_tuple = database_server.get_job_merged_table_all()
return render_template("admin/table_render.html",
table_type=data_tuple[0],
table_columns=data_tuple[1],
table_dict=data_tuple[2])
elif table_type == 'company':
data_tuple = database_server.get_company_table_all()
return render_template("admin/table_render.html",
table_type=data_tuple[0],
table_columns=data_tuple[1],
table_dict=data_tuple[2])
elif table_type == 'alumni':
data_tuple = database_server.get_alumni_table_all()
return render_template("admin/table_render.html",
table_type=data_tuple[0],
table_columns=data_tuple[1],
table_dict=data_tuple[2])
@bp.route('/admin/table/insert/<string:table_type>', methods=["GET"])
def insert_new_record(table_type):
database_server = Database(host=current_app.config["MYSQL_HOST"],
port=current_app.config["MYSQL_PORT"],
user=current_app.config["MYSQL_USER"],
password=current_app.config["MYSQL_PASSWORD"],
database=current_app.config["MYSQL_DATABASE"])
if table_type == 'professor':
return render_template("admin/table_insert.html",
table_type=table_type)
@bp.route('/admin/table/insert/ajax/<string:table_type>', methods=["POST"])
def ajax_insert_new_record(table_type):
database_server = Database(host=current_app.config["MYSQL_HOST"],
port=current_app.config["MYSQL_PORT"],
user=current_app.config["MYSQL_USER"],
password=current_app.config["MYSQL_PASSWORD"],
database=current_app.config["MYSQL_DATABASE"])
if table_type == 'professor':
request.get_data()
json_data = request.json
flat_list = [json_data["firstName"], json_data["lastName"]]
inserted_id = database_server.insert_into_professor_table(flat_list)
return {
"database_status": "Success",
"inserted_id": inserted_id
}
|
from cx_Freeze import setup, Executable
base = None
executables = [Executable("lzss3.py", base=base)]
packages = ["idna"]
options = {
'build_exe': {
'packages':packages,
},
}
setup(
name = "Decompress",
options = options,
version = "1.0.0.0",
description = "Decompress a file that uses Nintendo's lz11 compression.",
executables = executables
)
|
from collections import defaultdict
from datetime import timedelta
import re
from typing import Dict, List
from django.core.exceptions import SuspiciousOperation
from django.db.models import Q, query, Sum
from django.http import Http404
from django.urls import reverse_lazy
from django.utils import timezone
from django.views import generic as generic_views
from djangoql.exceptions import DjangoQLError
from djangoql.queryset import apply_search
import structlog
from . import forms, models
from ..auditing.models import Issue
from ..checklists.steps import STEPS
log = structlog.get_logger()
class ServiceMixin:
def get_object(self, queryset=None):
"""Return the service based on owner and name from the URL."""
if queryset is None:
queryset = self.get_queryset()
try:
return queryset.get(
owner_slug=self.kwargs["owner_slug"], name_slug=self.kwargs["name_slug"]
)
except queryset.model.DoesNotExist:
raise Http404("Service.DoesNotExist")
class ServiceCreate(generic_views.CreateView):
form_class = forms.ServiceForm
model = form_class.Meta.model
class ServiceDelete(generic_views.DeleteView):
model = models.Service
success_url = reverse_lazy("service_list")
def get_object(self, queryset=None):
owner_slug = self.kwargs.get("owner_slug")
name_slug = self.kwargs.get("name_slug")
if queryset is None:
queryset = self.get_queryset()
if owner_slug is None or name_slug is None:
raise SuspiciousOperation(
f"ServiceDelete view must be called with owner_slug and name_slug"
)
try:
return queryset.get(owner_slug=owner_slug, name_slug=name_slug)
except self.model.DoesNotExist:
raise Http404(f"Service {owner_slug}/{name_slug} does not exist")
class ServiceDetail(ServiceMixin, generic_views.DetailView):
model = models.Service
def get_queryset(self):
return (
super()
.get_queryset()
.select_related("repository")
.prefetch_related("checkmarks")
)
def generate_sentry_histogram(
self, sentry_issues: query.QuerySet
) -> Dict[int, List[int]]:
result = defaultdict(list)
for issue in sentry_issues:
last_two_weeks = sorted(issue.stats.all(), key=lambda x: x.timestamp)[:14]
max_events_count = max([day.count for day in last_two_weeks])
for day in last_two_weeks:
bar_height = (
max(day.count / max_events_count, 0.175) if day.count else 0
)
result[issue.id].append(
{
"value": bar_height,
"tooltip": f"""
<h4>{day.timestamp.strftime('%d/%m/%Y')}</h4>
<strong>{day.count}</strong> events
""",
}
)
return dict(result)
def calculate_weekly_sentry_stats(
self, sentry_issues: query.QuerySet
) -> Dict[str, int]:
one_week_ago = timezone.now().date() - timedelta(days=7)
last_week_issues = sentry_issues.filter(last_seen__gte=one_week_ago).annotate(
freq=Sum("stats__count", filter=Q(stats__timestamp__gte=one_week_ago))
)
weekly_events = last_week_issues.aggregate(Sum("freq"))["freq__sum"] or 0
weekly_users = last_week_issues.aggregate(Sum("users"))["users__sum"] or 0
return {"events": weekly_events, "users": weekly_users}
def get_sentry_context(self):
ISSUE_ORDER = {
models.SentryIssueCategory.STALE.value: 0,
models.SentryIssueCategory.DECAYING.value: 1,
models.SentryIssueCategory.SPOILED.value: 2,
}
sentry_issues_queryset = self.object.sentry_issues.prefetch_related(
"stats"
).all()
if not sentry_issues_queryset.exists():
return None
all_sentry_issues = sentry_issues_queryset.order_by("-last_seen")
problematic_sentry_issues = sorted(
sentry_issues_queryset.exclude(
category=models.SentryIssueCategory.FRESH.value
).all(),
key=lambda k: (ISSUE_ORDER[k.category], k.last_seen),
)
issue_histogram = self.generate_sentry_histogram(all_sentry_issues)
weekly_stats = self.calculate_weekly_sentry_stats(all_sentry_issues)
return {
"weekly_events": weekly_stats["events"],
"weekly_users": weekly_stats["users"],
"issues": [
{
"id": issue.id,
"instance": issue,
"histogram": issue_histogram[issue.id],
}
for issue in problematic_sentry_issues
],
}
def get_checklists_context(self):
if self.object.status != models.Status.BETA.value:
return None
return {
"total": sum(
[len(steps) for tag, steps in STEPS.items() if tag in self.object.tags]
),
"completed": self.object.checkmarks.count(),
}
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.object.repository:
context["issue_count"] = self.object.repository.issues.filter(
status=Issue.Status.NEW.value
).count()
context["sentry_data"] = self.get_sentry_context()
context["checklist"] = self.get_checklists_context()
return context
class ServiceList(generic_views.ListView):
model = models.Service
paginate_by = 50
def get_queryset(self):
queryset = self.model.objects.select_related("repository")
queryterm = self.request.GET.get("q", None)
SIMPLE_QUERY_PATTERN = r"^[\w-]+$"
URL_QUERY_PATTERN = r"^https?[:][/][/]\S+$"
if queryterm:
if re.match(SIMPLE_QUERY_PATTERN, queryterm):
queryset = queryset.filter(
Q(name__icontains=queryterm)
| Q(owner__icontains=queryterm)
| Q(status__icontains=queryterm)
| Q(impact__icontains=queryterm)
)
elif re.match(URL_QUERY_PATTERN, queryterm):
queryset = queryset.filter(
Q(docs_url__icontains=queryterm)
| Q(service_url__icontains=queryterm)
| Q(health_check_url__icontains=queryterm)
)
else:
try:
queryset = apply_search(queryset, queryterm, models.ServiceQLSchema)
except DjangoQLError:
log.exception("services.query_error", queryterm=queryterm)
return self.model.objects.none()
return queryset.order_by("name")
class ServiceUpdate(ServiceMixin, generic_views.UpdateView):
form_class = forms.ServiceForm
model = form_class.Meta.model
|
"""
AWGs
"""
from atom.api import Atom, List, Int, Float, Range, Enum, Bool, Constant, Str
from .Instrument import Instrument
import enaml
from enaml.qt.qt_application import QtApplication
import glob
import copy
class AWGDriver:
naming_convention = []
def get_naming_convention(self):
# return copy of empty_channel_set so different AWGs will not share memory
return copy.copy(self.naming_convention)
class AWGChannel(Atom):
label = Str()
amplitude = Float(default=1.0).tag(desc="Scaling applied to channel amplitude")
offset = Float(default=0.0).tag(desc='D.C. offset applied to channel')
enabled = Bool(True).tag(desc='Whether the channel output is enabled.')
class AWG(Instrument, AWGDriver):
isMaster = Bool(False).tag(desc='Whether this AWG is master')
triggerSource = Enum('Internal', 'External').tag(desc='Source of trigger')
triggerInterval = Float(1e-4).tag(desc='Internal trigger interval')
samplingRate = Float(1200000000).tag(desc='Sampling rate in Hz')
numChannels = Int()
channels = List(AWGChannel)
seqFile = Str().tag(desc='Path to sequence file.')
seqFileExt = Constant('')
seqForce = Bool(True).tag(desc='Whether to reload the sequence')
delay = Float(0.0).tag(desc='time shift to align multiple AWGs')
translator = Constant('')
def __init__(self, **traits):
super(AWG, self).__init__(**traits)
if not self.channels:
for ct in range(self.numChannels):
self.channels.append(AWGChannel())
def json_encode(self, matlabCompatible=False):
jsonDict = super(AWG, self).json_encode(matlabCompatible)
# Skip encoding of constants
del jsonDict["seqFileExt"]
del jsonDict["translator"]
if matlabCompatible:
channels = jsonDict.pop('channels', None)
for ct,chan in enumerate(channels):
jsonDict['chan_{}'.format(ct+1)] = chan
return jsonDict
def update_from_jsondict(self, params):
for ct in range(self.numChannels):
channelParams = params['channels'][ct]
# if this is still a raw dictionary convert to object
if isinstance(channelParams, dict):
channelParams.pop('x__class__', None)
channelParams.pop('x__module__', None)
channelParams = AWGChannel(**channelParams)
self.channels[ct].label = channelParams.label
self.channels[ct].amplitude = channelParams.amplitude
self.channels[ct].offset = channelParams.offset
self.channels[ct].enabled = channelParams.enabled
for p in ['label', 'enabled', 'address', 'isMaster', 'triggerSource', 'triggerInterval', 'samplingRate', 'seqFile', 'seqForce', 'delay']:
setattr(self, p, params[p])
|
from numpy import *
from matplotlib.pyplot import *
x=loadtxt('cluster_test.txt')
N=x.shape[0]
M=zeros((N,N))
# critical distance
dc=7
for i in range(N):
for j in range(i+1, N):
M[j, i] = M[i, j] = (sum((x[i, :]-x[j, :])**2))**0.5
print('matrix of distances is ready')
l1=[]
l2=[]
l3=[]
res=[]
Q=range(N)
# we need change 0 in diagonal matrix then we can found min value of matrix
maxi=M.max()
for i in range(N):
M[i,i]=maxi
# NNS algorithm
fv=True
while fv:
minim = M[Q[0], Q[1]]
iz = Q[0]
jz = Q[1]
for i in Q:
for j in Q:
if M[i, j] < minim:
iz = i
jz = j
minim = M[i, j]
l1 = []
l2 = [iz, jz]
l3 = []
f = True
while f:
for el in l2:
for i in Q:
if M[el, i] < dc:
if (i not in l1) and (i not in l2) and (i not in l3):
l3.append(i)
if not l3:
f = False
l1.extend(l2)
l2 = l3
l3 = []
res.append(l1)
Q = [el for el in Q if el not in l1]
if not Q:
fv = False
x2 = x.take(l1,0)
plot(x2[:,0], x2[:,1], '.')
grid()
show()
# savefig('NNS_cluster.png')
|
"""
Purpose: Summarizes the alternatives of the bluebooks
Status: Final -- 06/27/2019
@author: olivergiesecke
"""
###############################################################################
### Set packages ###
import pandas as pd
import re
import os
import matplotlib.pyplot as plt
import numpy as np
###############################################################################
def main():
dataffr=construct_dataset(1988,2008)
plot_target(dataffr)
data=load_bluebook_data(1988,2008)
create_totstat(data,'tab_sumstats_menu')
#create_sumstat_byyear(data,'tab_sumstats_menu_byyear')
#turning_points=['1989-06-01','1993-06-01','1995-04-01','2000-11-01','2004-01-01','2007-02-01']
#create_sumstat_byperiod(data,turning_points,'tab_sumstats_menu_byperiod')
def create_totstat(data,name):
sum_menu=pd.pivot_table(data,values='end_date',index='treatment_options',aggfunc=np.count_nonzero,fill_value=0)
sum_menu=sum_menu.reset_index()
sum_menu.rename(columns={"end_date":"count"},inplace=True)
sum_menu.loc[:,'len_count']=sum_menu["treatment_options"].apply(lambda x:len(x))
sum_menu=sum_menu.sort_values(by='len_count')
sum_menu=sum_menu.append({'treatment_options':'Total','count':sum_menu['count'].sum()},ignore_index=True)
### Export the dataframe to latex
# Dictionary for column headers
headers={"treatment_options":"Policy Options","count":"Number of Meetings"}
sum_menu.rename(columns=headers,inplace=True)
sum_menu.drop(columns="len_count",inplace=True)
create_table_df(sum_menu,name)
#print("Table",name,"is written." )
def create_table_df(data,name):
columnheaders=list(data.columns)
numbercolumns=len(columnheaders)
with open("../output/overleaf_files/"+name+".tex", "w") as f:
f.write(r"\begin{tabular}{"+"l" + "".join("c" * (numbercolumns-1)) + "}\n")
f.write("\\hline\\hline \n")
f.write("\\addlinespace"+" \n")
f.write(" & ".join([str(x) for x in columnheaders]) + " \\\ \n")
f.write("\\hline \n")
# write data
for idx,row in data.iterrows():
# Do formatting for specific tables
if row.iloc[0]=="Total":
f.write("\\addlinespace"+" \n")
f.write(" & ".join([str(x) for x in row.values]) + " \\\\\n")
f.write("\\hline \n")
f.write(r"\end{tabular}")
def construct_dataset(startyear,endyear):
ffr=pd.read_excel("../../../collection/python/data/FRED_DFEDTAR.xls",skiprows=10)
ffr.rename(columns={"observation_date":"date","DFEDTAR":"ffrtarget"},inplace=True)
ffr['year']=ffr['date'].apply(lambda x: x.year)
ffr=ffr[(ffr['year']>=startyear) & (ffr['year']<=endyear)]
return ffr
def plot_target(dataffr):
dataffr[dataffr['year']==1990]
### Get turning points
dataffr.reset_index(inplace=True)
t_point=dataffr[dataffr['ffrtarget'].diff(1)!=0]
t_point[t_point['year']==2007]
plt.rc('text', usetex=True)
fig = plt.figure(figsize=(10, 3))
ax = fig.add_subplot(1, 1, 1)
ax.plot(dataffr['date'],dataffr['ffrtarget'], color='blue', ls='solid')
turning_points=['1989-06-01','1993-06-01','1995-04-01','2000-11-01','2004-01-01','2007-02-01']
for datestring in turning_points:
ax.axvline(x=pd.to_datetime(datestring), color='gray', linestyle='--')
plt.legend(['Federal Funds Target'],frameon=False)
plt.savefig('../output/fig_fed_target.png', dpi=300,bbox_inches='tight')
#print('fig_fed_target.png is written')
def load_bluebook_data(startyear,endyear):
data=pd.read_excel("../data/bluebook_manual_data_online_WORKING.xlsx")
data['year']=data['start_date'].apply(lambda x : x.year)
data=data[(data['year']>=startyear) & (data['year']<=endyear)]
data=data.reset_index()
treatments=[]
for alt in ['a','b','c','d','e']:
try:
treatments+=data['C_TREATMENT_alt_'+alt].unique().tolist()
except:
pass
#print('No option found')
treatments=list(set(treatments))
data.loc[:,'treatment_options']=np.nan
for idx,row in data.iterrows():
treatments=[]
for alt in ['a','b','c','d','e']:
try:
treatments+=row['C_TREATMENT_alt_'+alt]
except:
pass
#print('No option found')
notvalid_treatments=['N']
treatments=[x for x in treatments if not x in notvalid_treatments]
treatment_tuple=tuple(set(treatments))
treatments=",".join(list(set(treatments)))
#print(treatments)
#print(idx)
if not len(treatment_tuple)==0:
data['treatment_options'].iloc[idx]=treatments
else:
pass
return data
def create_sumstat_byyear(data,name):
sum_menu_byyear=pd.pivot_table(data,values='end_date',index='treatment_options',columns='year',aggfunc=np.count_nonzero,fill_value=0)
sum_menu_byyear=sum_menu_byyear.reset_index()
addline={'treatment_options':'Total'}
for item in list(sum_menu_byyear.columns)[1:]:
addline.update({item:sum_menu_byyear[item].sum() })
sum_menu_byyear=sum_menu_byyear.append(addline,ignore_index=True)
### Export the dataframe to latex
# Dictionary for column headers
headers={"treatment_options":"Policy Options"}
sum_menu_byyear.rename(columns=headers,inplace=True)
create_table_df(sum_menu_byyear,name)
#print("Table",name,"is written." )
def create_sumstat_byperiod(data,turning_points,name):
data.loc[:,'period']=""
data.loc[data['start_date']<=pd.to_datetime(turning_points[0]),'period']='pre '+turning_points[0]
for i in range(len(turning_points)-1):
data.loc[(data['start_date']>pd.to_datetime(turning_points[i])) & (data['start_date']<=pd.to_datetime(turning_points[i+1])),'period']='\\shortstack{'+turning_points[i]+'- \\\ '+turning_points[i+1]+'}'
data.loc[data['start_date']>=pd.to_datetime(turning_points[-1]),'period']='post '+turning_points[-1]
sum_menu_period=pd.pivot_table(data,values='end_date',index='treatment_options',columns='period',aggfunc=np.count_nonzero,fill_value=0)
sum_menu_period=sum_menu_period.reset_index()
addline={'treatment_options':'Total'}
for item in list(sum_menu_period.columns)[1:]:
addline.update({item:sum_menu_period[item].sum() })
cols = list(sum_menu_period)
# move the column to head of list using index, pop and insert
cols.insert(1, cols.pop(cols.index(cols[-1])))
sum_menu_period=sum_menu_period[cols]
sum_menu_period=sum_menu_period.append(addline,ignore_index=True)
### Export the dataframe to latex
# Dictionary for column headers
headers={"treatment_options":"Policy Options"}
sum_menu_period.rename(columns=headers,inplace=True)
create_table_df(sum_menu_period,name)
#print("Table",name,"is written." )
if __name__ == "__main__":
main()
|
# Copyright (c) 2018-2021, Texas Instruments
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import numpy as np
import mmcv
import onnx
import torch
from mmdet.utils import save_model_proto
__all__ = ['pytorch2proto']
def prepare_model_for_layer_outputs(model_name, model_name_new, export_layer_types=None):
onnx_model = onnx.load(model_name)
intermediate_layer_value_info = onnx.helper.ValueInfoProto()
intermediate_layer_value_info.name = ''
for i in range(len(onnx_model.graph.node)):
for j in range(len(onnx_model.graph.node[i].output)):
if export_layer_types is None or onnx_model.graph.node[i].op_type in export_layer_types:
intermediate_layer_value_info.name = onnx_model.graph.node[i].output[0]
onnx_model.graph.output.append(intermediate_layer_value_info)
#
#
#
layer_output_names = [out.name for out in onnx_model.graph.output]
onnx.save(onnx_model, model_name_new)
return layer_output_names
def similar_tensor(t1, t2, rtol=1.e-5, atol=1.e-5):
if len(t1.shape) != len(t2.shape):
return False
if not np.allclose(t1.shape, t2.shape):
return False
if np.isnan(t1).all() or np.isnan(t2).all():
return False
max_t1 = abs(np.nanmax(t1))
atol = max_t1*atol
is_close = np.allclose(t1, t2, rtol=rtol, atol=atol, equal_nan=True)
if not is_close:
eps = max_t1 / rtol
diff = np.nanmax(np.abs((t1-t2)))
ratio = np.nanmax(np.abs((t1-t2)/(t1+eps)))
is_close = diff < atol and ratio < rtol
print(f'{t1.shape} - max difference: {diff} vs {atol}, max ratio: {ratio} vs {rtol}')
#
return is_close
def retrieve_onnx_names(input_data, partial_model_path, full_model_path):
import onnxruntime
full_model_path_tmp = f'{full_model_path}.tmp'
full_output_names = prepare_model_for_layer_outputs(full_model_path, full_model_path_tmp, export_layer_types=None)
full_infer = onnxruntime.InferenceSession(full_model_path_tmp)
full_input_name = full_infer.get_inputs()[0].name
partial_infer = onnxruntime.InferenceSession(partial_model_path)
partial_input_name = partial_infer.get_inputs()[0].name
partial_output_names = [o.name for o in partial_infer.get_outputs()]
input_numpy = input_data.detach().numpy() if isinstance(input_data, torch.Tensor) else input_data
full_outputs = full_infer.run(full_output_names, {full_input_name:input_numpy})
partial_outputs = partial_infer.run(partial_output_names, {partial_input_name:input_numpy})
matched_names = {}
for pname, po in zip(partial_output_names, partial_outputs):
matched_name = None
for fname, fo in zip(full_output_names, full_outputs):
if similar_tensor(po, fo):
matched_name = fname
#
#
if matched_name is None:
return None
#
matched_names[pname] = matched_name
#
os.remove(full_model_path_tmp)
return matched_names
def pytorch2proto(cfg, model, input_data, output_onnx_file, out_proto_file, output_names, opset_version):
input_data = input_data[0] if isinstance(input_data, (list,tuple)) and \
isinstance(input_data[0], (torch.Tensor, np.ndarray)) else input
save_model_proto(cfg, model, input_data, output_filename=out_proto_file, output_names=output_names, opset_version=opset_version)
matched_names = None
if output_onnx_file is not None:
matched_names = retrieve_onnx_names(input_data, out_proto_file, output_onnx_file)
if matched_names is not None:
proto_names = list(matched_names.values())
save_model_proto(cfg, model, input_data, output_filename=output_onnx_file,
opset_version=opset_version, proto_names=proto_names, output_names=output_names,
save_onnx=False)
#
#
if not matched_names:
print('Tensor names could not be located; prototxt file corresponding to full model '
'(model.onnx) wont be written')
|
from django.db import models
from utils.models import BaseModel
# Create your models here.
class Link(BaseModel):
""" 友情链接 """
name = models.CharField(max_length=32, verbose_name="名称")
url = models.URLField(max_length=64, verbose_name="URL地址")
def __str__(self):
return self.name
class Meta:
db_table = "link"
verbose_name = '友情链接'
verbose_name_plural = verbose_name
|
"""Reference implementations of graph algorithms."""
import numpy as np
import scipy.sparse as sparse
def bellman_ford_reference(A, c):
"""Reference implementation of Bellman-Ford.
Parameters
---------
A : coo sparse matrix
n x n directed graph with positive weights
c : array_like
list of cluster centers
Return
------
m : ndarray
cluster index
d : ndarray
distance to cluster center
See Also
--------
amg_core.graph.bellman_ford
"""
Nnode = A.shape[0]
Ncluster = len(c)
d = np.full((Nnode,), np.inf)
m = np.full((Nnode,), -1.0, dtype=np.int)
d[c] = 0 # distance
m[c] = c # index
done = False
while not done:
done = True
for i, j, Aij in zip(A.row, A.col, A.data):
if Aij > 0 and d[i] + Aij < d[j]:
d[j] = d[i] + Aij
m[j] = m[i]
done = False
return (d, m)
if __name__ == '__main__':
Edges = np.array([[1, 4],
[3, 1],
[1, 3],
[0, 1],
[0, 2],
[3, 2],
[1, 2],
[4, 3]])
w = np.array([2, 1, 2, 1, 4, 5, 3, 1], dtype=float)
A = sparse.coo_matrix((w, (Edges[:, 0], Edges[:, 1])))
c = np.array([0,1,2,3,4])
print('\nreference--')
for cc in c:
d, m = bellman_ford_reference(A, [cc])
print(d, m)
print('\npyamg--')
from pyamg.graph import bellman_ford
for cc in c:
d, m = bellman_ford(A, [cc])
print(d, m)
print('\ncsgraph.bellman_ford')
from scipy.sparse import csgraph
for cc in c:
d, p = csgraph.bellman_ford(A, directed=True, indices=[cc], return_predecessors=True)
print(d.ravel(), p.ravel())
|
import psycopg2
import psycopg2.extras
from Infrastructure import log
logger = log.get_logger("Postgres")
class Connector:
def __init__(self, config):
self.host = config['hostname']
self.database = config['database']
self.user = config['username']
self.password = config['password']
self.connection = None
def connect(self):
i = 1
while not self.connection:
try:
self.connection = psycopg2.connect(host=self.host,
database=self.database,
user=self.user,
password=self.password)
except Exception as e:
i += 1
logger.info("Error postgres connection " + str(e))
logger.info("Connect postgres " + str(i))
if i > 10:
break
def execute_with_results(self, query, params={}, as_dict=False):
query = query.format(**params)
self.connect()
if as_dict:
cursor = self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
else:
cursor = self.connection.cursor()
cursor.execute(query)
data = cursor.fetchall()
self.connection.commit()
cursor.close()
self.close()
if as_dict:
data = list(map(lambda r: dict(r), data))
return data
def execute_with_results_generic(self, query):
self.connect()
cursor = self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cursor.execute(query)
rowcount = cursor.rowcount
try:
data = list(cursor.fetchall())
except Exception as ex:
data = []
self.connection.commit()
cursor.close()
return [data, rowcount]
def execute_multiple_queries_select_dict_response(self, store_procedure, params={}):
procedure = open(store_procedure, 'r').read()
sql_command = procedure.format(**params)
sqllist = sql_command.split(";")[:-1]
selects = []
for sql_c in sqllist:
selected = self.execute_with_results_generic(sql_c)
selects.append(selected)
return selects
def close(self):
if self.connection:
self.connection.close()
self.connection = None
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2005 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def test():
from .Class import example
from .CCMill import CCMill
klass = example()
from .installationInfo import tmp
from .factory import createHHandCC
hhfn, ccfn = createHHandCC( klass, tmp )
import os
os.system( "cd %s && g++ -c %s" % (tmp, ccfn) )
return
if __name__ == "__main__": test()
# version
__id__ = "$Id$"
# End of file
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 18 20:10:30 2021
@author: Mohamed Salah
"""
# Import libraries and methods
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.impute import SimpleImputer
import pandas as pd
import numpy as np
# Data Preparation
## Data Selection
dataset = pd.read_csv("houses.csv")
#print(dataset)
X = dataset.iloc[ : , :-1]
y = dataset.iloc[ : , -1]
# print(y)
# print(X)
## Data Cleaning
# imputed_module = SimpleImputer(missing_values=np.nan, strategy='mean')
# imputed_X = imputed_module.fit(X)
# X = imputed_X.transform(X)
# imputed_module = SimpleImputer(missing_values='nan', strategy='mean')
# imputed_y = imputed_module.fit(y)
# y = imputed_y.transform(y)
## Data Scaling
X = StandardScaler(copy= True, with_mean=True, with_std=True).fit_transform(X)
# print(X)
## Data Splitting
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=44, shuffle =True)
# Model Building
regression_model = LinearRegression()
# Fit Model
regression_model.fit(X_train, y_train)
# Get Attribute Values
train_score = regression_model.score(X_train, y_train)
test_score = regression_model.score(X_test, y_test)
thetas = regression_model.coef_
y_intercept = regression_model.intercept_
# Predict
y_pred = regression_model.predict(X_test)
# Get Accuracy
MAEValue = mean_absolute_error(y_test, y_pred, multioutput='uniform_average') # it can be raw_values
MSEValue = mean_squared_error(y_test, y_pred, multioutput='uniform_average') # it can be raw_values
MdSEValue = median_absolute_error(y_test, y_pred)
|
## @ingroupMethods-Noise-Fidelity_One-Propeller
# compute_broadband_noise.py
#
# Created: Mar 2021, M. Clarke
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import SUAVE
from SUAVE.Core import Units , Data
import numpy as np
from SUAVE.Methods.Noise.Fidelity_One.Noise_Tools.dbA_noise import A_weighting
from SUAVE.Methods.Noise.Fidelity_One.Noise_Tools import SPL_harmonic_to_third_octave
# ----------------------------------------------------------------------
# Frequency Domain Broadband Noise Computation
# ----------------------------------------------------------------------
## @ingroupMethods-Noise-Fidelity_One-Propeller
def compute_broadband_noise(freestream,angle_of_attack,position_vector,
velocity_vector,network,auc_opts,settings,res,source):
'''This computes the broadband noise of a propeller or rotor in the frequency domain
Assumptions:
Coorelations are adjusted from reference altitude of 300 ft
Source:
Schlegel, Ronald, Robert King, and Harold Mull. Helicopter rotor noise generation
and propagation. UNITED TECHNOLOGIES CORP STRATFORD CT SIKORSKY AIRCRAFT DIV, 1966.
Inputs:
freestream - freestream data structure [m/s]
angle_of_attack - aircraft angle of attack [rad]
position_vector - position vector of aircraft [m]
velocity_vector - velocity vector of aircraft [m/s]
network - energy network object [None]
auc_opts - data structure of acoustic data [None]
settings - accoustic settings [None]
res.
SPL_prop_bb_spectrum - SPL of Frequency Spectrum [dB]
Outputs
*acoustic data is stored and passed in data structures*
Properties Used:
N/A
'''
num_cpt = len(angle_of_attack)
num_mic = len(position_vector[0,:,0,1])
num_prop = len(position_vector[0,0,:,1])
if source == 'lift_rotors':
propellers = network.lift_rotors
propeller = network.lift_rotors[list(propellers.keys())[0]]
else:
propellers = network.propellers
propeller = network.propellers[list(propellers.keys())[0]]
# ----------------------------------------------------------------------------------
# Broadband (Vortex) Noise
# ----------------------------------------------------------------------------------
# [number control points, number of mics, number of props, positions]
x = position_vector[:,:,:,0]
y = position_vector[:,:,:,1]
z = position_vector[:,:,:,2]
omega = auc_opts.omega[:,0] # angular velocity
R = propeller.radius_distribution # radial location
c = propeller.chord_distribution # blade chord
R_tip = propeller.tip_radius
beta = propeller.twist_distribution # twist distribution
t = propeller.max_thickness_distribution # thickness distribution
n = len(R)
S = np.sqrt(x**2 + y**2 + z**2) # distance between rotor and the observer
V_tip = R_tip*omega # blade_tip_speed
V_07 = V_tip*0.70/(Units.feet) # blade velocity at r/R_tip = 0.7
St = 0.28 # Strouhal number
t_avg = np.mean(t)/(Units.feet) # thickness
c_avg = np.mean(c)/(Units.feet) # average chord
beta_07 = beta[round(n*0.70)] # blade angle of attack at r/R = 0.7
h_val = t_avg*np.cos(beta_07) + c_avg*np.sin(beta_07) # projected blade thickness
f_peak = (V_07*St)/h_val # V - blade velocity at a radial location of 0.7
A_blade = (np.trapz(c, x = R))/(Units.feet**2) # area of blade
CL_07 = 2*np.pi*beta_07
S_feet = S/(Units.feet)
SPL_300ft = np.atleast_2d(10*np.log10(((6.1E-27)*A_blade*V_07**6)/(10**-16)) + 20*np.log(CL_07/0.4))
SPL_300ft = np.repeat(np.repeat(SPL_300ft.T,num_mic, axis = 1)[:,:,np.newaxis],num_prop, axis = 2)
SPL_vals = SPL_300ft - 20*np.log10(S_feet/300)
SPL_vals = SPL_300ft - 20*np.log10(S_feet/300)
# estimation of A-Weighting for Vortex Noise
f_v = np.array([0.5*f_peak,1*f_peak,2*f_peak,4*f_peak,8*f_peak,16*f_peak]).T # spectrum
fr = np.array([0.5,1,2,4,8,16]) # frequency ratio
weights = np.atleast_2d(np.array([7.92 , 4.17 , 8.33 , 8.75 ,12.92 , 13.33]))
SPL_weight = np.repeat(np.repeat(np.repeat(weights, num_prop, axis = 0)[np.newaxis,:,:], num_mic, axis = 0)[np.newaxis,:,:,:], num_cpt, axis = 0) # SPL weight
SPL_v = np.repeat(SPL_vals[:,:,:,np.newaxis], 6 , axis = 3) - SPL_weight # SPL correction
dim = len(fr)
C = np.zeros((num_cpt,num_mic,num_prop,dim))
p_pref_bb_dBA = np.zeros((num_cpt,num_mic,num_prop,dim-1))
SPL_bb_dbAi = np.zeros((num_cpt,num_mic,num_prop,dim))
for i in range(num_cpt):
for j in range(dim):
SPL_bb_dbAi[i,:,:,j] = A_weighting(SPL_v[i,:,:,j],f_v[i,j])
for j in range(dim-1):
C[:,:,:,j] = (SPL_bb_dbAi[:,:,:,j+1] - SPL_bb_dbAi[:,:,:,j])/(np.log10(fr[j+1]) - np.log10(fr[j]))
C[:,:,:,j+1] = SPL_bb_dbAi[:,:,:,j+1] - C[:,:,:,j]*np.log10(fr[j+1])
p_pref_bb_dBA[:,:,:,j] = (10**(0.1*C[:,:,:,j+1]))*(((fr[j+1]**(0.1*C[:,:,:,j]+ 1))/(0.1*C[:,:,:,j]+ 1))-((fr[j]**(0.1*C[:,:,:,j]+ 1))/(0.1*C[:,:,:,j]+ 1)))
p_pref_bb_dBA[np.isnan(p_pref_bb_dBA)] = 0
res.p_pref_bb_dBA = p_pref_bb_dBA
# convert to 1/3 octave spectrum
res.SPL_prop_bb_spectrum = SPL_harmonic_to_third_octave(SPL_v,f_v,settings)
return
|
from collections import namedtuple
from board import Player, Point
def compute_game_result(game_state):
num_b, num_w = 0, 0
for r in range(1, 8):
for c in range(1, 8):
player = game_state.board._grid.get(Point(r, c))
if player is None:
if game_state.add_player is None: continue
elif game_state.add_player == Player.black: num_b += 1
elif game_state.add_player == Player.white: num_w += 1
elif player == Player.black: num_b += 1
elif player == Player.white: num_w += 1
if num_b > num_w: return Player.black
else: return Player.white
def get_more_num(game_state):
num_my, num_op = 0, 0
for r in range(1, 8):
for c in range(1, 8):
player = game_state.board._grid.get(Point(r, c))
if player is None: continue
elif player == game_state.next_player: num_op += 1
elif player == game_state.next_player: num_my += 1
return num_my - num_op
|
#! /usr/bin/env python
from __future__ import print_function, unicode_literals
import sys
import pyATP
dat_lines = '''BEGIN NEW DATA CASE
LINE CONSTANTS
$ERASE
BRANCH IN___AOUT__AIN___BOUT__BIN___COUT__C
ENGLISH
3 0.0 .1357 0 .3959 1.18 5. 42. 30.
1 0.0 .1357 0 .3959 1.18 -5. 49. 37.
2 0.0 .1357 0 .3959 1.18 5. 56. 44.
0 0.0 .6609 0 .4883 .551 -.5 65. 58.
BLANK CARD ENDING CONDUCTOR CARDS
50. 60. 000001 001000 0 5.98 0 44
$PUNCH
BLANK CARD ENDING FREQUENCY CARDS
BLANK CARD ENDING LINE CONSTANT
BEGIN NEW DATA CASE
BLANK CARD
'''.split('\n')
'''
conductor_card = pyATP.DataCard('(I3, F5.4, F8.5, I2, F8.5, F8.5, F8.3, F8.3, F8.3)',
['IP', 'SKIN', 'RESIS', 'IX', 'REACT', 'DIAM', 'HORIZ', 'VTOWER', 'VMID'])
end_conductors = pyATP.DataCardFixedText('BLANK CARD ENDING CONDUCTOR CARDS')
conductor_cards = pyATP.DataCardRepeat(conductor_card, end_record = end_conductors)
line_cards = pyATP.DataCardStack([
pyATP.DataCardFixedText('BEGIN NEW DATA CASE'),
pyATP.DataCardFixedText('LINE CONSTANTS'),
pyATP.DataCardFixedText('$ERASE'),
pyATP.DataCard('(A8,6A6)', ['branch_card', 'in1', 'out1', 'in2', 'out2', 'in3', 'out3']),
pyATP.DataCard('(A80)', ['units']),
conductor_cards,
pyATP.DataCard('(F8.2, F10.2, A10, A1, 6I1, A1, 6I1, A1, I1, F8.3, A1, 4I1, I1, A7, I3)',
['RHO', 'FREQ', 'FCAR', '_1',
'inv_C', 'inv_Ce', 'inv_Cs',
'C', 'Ce', 'Cs', '_2',
'Z', 'Ze', 'Zs',
'inv_Z', 'inv_Ze', 'inv_Zg', '_3',
'ICAP',
'DIST', '_4',
'pi_Y', 'pi_Ys', 'pi_Z', 'pi_Zs',
'ISEG', '_5',
'PUN']),
pyATP.DataCardFixedText('$PUNCH'),
pyATP.DataCardFixedText('BLANK CARD ENDING FREQUENCY CARDS'),
pyATP.DataCardFixedText('BLANK CARD ENDING LINE CONSTANT'),
pyATP.DataCardFixedText('BEGIN NEW DATA CASE'),
pyATP.DataCardFixedText('BLANK CARD')
])
'''
line_cards = pyATP.LineConstCards()
print('Match? ', line_cards.match(dat_lines))
line_cards.read(dat_lines)
print(line_cards.data)
print('-'*80)
print(line_cards.write())
|
from typing import Union, Sequence, Tuple
import numpy as np
import torch
from torch.utils.data import Dataset
from torchio import IMAGE, LOCATION
from torchio.utils import to_tuple
class GridSampler(Dataset):
"""
Adapted from NiftyNet.
See https://niftynet.readthedocs.io/en/dev/window_sizes.html
"""
def __init__(
self,
data: Union[np.ndarray, torch.Tensor],
patch_size: Union[int, Sequence[int]],
patch_overlap: Union[int, Sequence[int]],
):
self.array = data
patch_size = to_tuple(patch_size)
patch_overlap = to_tuple(patch_overlap)
self.locations = self._grid_spatial_coordinates(
self.array,
patch_size,
patch_overlap,
)
def __len__(self):
return len(self.locations)
def __getitem__(self, index):
# Assume 3D
location = self.locations[index]
i_ini, j_ini, k_ini, i_fin, j_fin, k_fin = location
window = self.array[i_ini:i_fin, j_ini:j_fin, k_ini:k_fin]
window = window[np.newaxis, ...] # add channels dimension
sample = {IMAGE: window, LOCATION: location}
return sample
@staticmethod
def _enumerate_step_points(
starting: int,
ending: int,
win_size: int,
step_size: int,
) -> np.ndarray:
starting = max(int(starting), 0)
ending = max(int(ending), 0)
win_size = max(int(win_size), 1)
step_size = max(int(step_size), 1)
if starting > ending:
starting, ending = ending, starting
sampling_point_set = []
while (starting + win_size) <= ending:
sampling_point_set.append(starting)
starting = starting + step_size
additional_last_point = ending - win_size
sampling_point_set.append(max(additional_last_point, 0))
sampling_point_set = np.unique(sampling_point_set).flatten()
if len(sampling_point_set) == 2:
sampling_point_set = np.append(
sampling_point_set, np.round(np.mean(sampling_point_set)))
_, uniq_idx = np.unique(sampling_point_set, return_index=True)
return sampling_point_set[np.sort(uniq_idx)]
@staticmethod
def _grid_spatial_coordinates(
array: np.ndarray,
window_shape: Tuple[int],
border: Tuple[int],
):
shape = array.shape
num_dims = len(shape)
grid_size = [
max(win_size - 2 * border, 0)
for (win_size, border)
in zip(window_shape, border)
]
steps_along_each_dim = [
GridSampler._enumerate_step_points(
starting=0,
ending=shape[i],
win_size=window_shape[i],
step_size=grid_size[i],
)
for i in range(num_dims)
]
starting_coords = np.asanyarray(np.meshgrid(*steps_along_each_dim))
starting_coords = starting_coords.reshape((num_dims, -1)).T
n_locations = starting_coords.shape[0]
# prepare the output coordinates matrix
spatial_coords = np.zeros((n_locations, num_dims * 2), dtype=np.int32)
spatial_coords[:, :num_dims] = starting_coords
for idx in range(num_dims):
spatial_coords[:, num_dims + idx] = (
starting_coords[:, idx]
+ window_shape[idx]
)
max_coordinates = np.max(spatial_coords, axis=0)[num_dims:]
assert np.all(max_coordinates <= shape[:num_dims]), \
"window size greater than the spatial coordinates {} : {}".format(
max_coordinates, shape)
return spatial_coords
|
from .quiz import Quiz
def setup(bot):
bot.add_cog(Quiz(bot))
|
# palabras.py
# imprime palabras que no empiezan con "f"
# ni tienen menos que 4 caracteres
palabras = ['python', 'academy', 'fundamentals', 'ibm', 'kyndryl']
for p in palabras:
if len(p) < 4: # si <p> tiene menos que 4 caracteres
continue
if p.startswith('f'): # si <p> empieza con "f"
continue
print(p) # si no entra en ningún if, imprime <p>
|
import requests
import arrow
import os
import csv
from fuzzywuzzy import fuzz
SENSITIVITY = 60
def download_data():
'''Downloads and decodes the dataset
and passes to org_list function'''
dataset = []
timenow = arrow.now('GMT').format('MMYY')
filename = "{}paydata.csv".format(timenow)
if filename in os.listdir('.'):
print("\nOpening {}\n".format(filename))
with open(filename, 'r') as olddata:
readdata = olddata.read()
csv_read = csv.reader(readdata.splitlines(), delimiter=",")
else:
print("\nDeleting old files...")
for i in os.listdir('.'):
if 'sdn' in i:
print("\nDeleting: {}\n".format(i))
os.remove(i)
print("\nRetrieving {}\n".format(filename))
with open(filename, 'w+') as data_file:
data = 'https://www.treasury.gov/ofac/downloads/sdn.csv'
with requests.Session() as sesh:
download = sesh.get(data)
decoded_content = download.content.decode('utf-8')
data_file.write(decoded_content)
csv_read = csv.reader(decoded_content.splitlines(), delimiter=",")
my_list = list(csv_read)
for i in (my_list[1::]):
dataset.append(i)
return dataset
datas = download_data()
user_input = input("Enter name: ")
for i in datas:
try:
if fuzz.ratio(user_input.upper(), i[1].upper()) > SENSITIVITY:
print(i[1])
except IndexError:
pass
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
import torch
torch.rand(10)
import torch.nn as nn
import torch.nn.functional as F
import glob
from tqdm import tqdm, trange
print(torch.cuda.is_available())
print(torch.cuda.get_device_name())
print(torch.cuda.current_device())
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
print()
#Additional Info when using cuda
if device.type == 'cuda':
print(torch.cuda.get_device_name(0))
print('Memory Usage:')
print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')
print('Cached: ', round(torch.cuda.memory_reserved(0)/1024**3,1), 'GB')
import torch.backends.cudnn as cudnn
import numpy as np
import os, cv2
from tqdm import tqdm, trange
import seaborn as sns
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, plot_one_box, strip_optimizer)
from utils.torch_utils import select_device, load_classifier, time_synchronized
from my_utils import xyxy_2_xyxyo, draw_boxes
# Initialize
device = select_device('')
half = device.type != 'cpu' # half precision only supported on CUDA
def prepare_input(img1, img_size=416, half=True):
img2 = cv2.resize(img1, (img_size, img_size)) # W x H
img2 = img2.transpose(2,0,1)
img2 = img2[np.newaxis, ...]
img2 = torch.from_numpy(img2).to(device) # torch image is ch x H x W
img2 = img2.half() if not half else img2.float()
img2 /= 255.0
return img2
#%%
# Directories
out = '/home/user01/data_ssd/Talha/yolo/op/'
weights = '/home/user01/data_ssd/Talha/yolo/ScaledYOLOv4/runs/exp2_yolov4-csp-results/weights/best_yolov4-csp-results.pt'
source = '/home/user01/data_ssd/Talha/yolo/paprika_y5/valid/images/'
imgsz = 416
conf_thres = 0.4
iou_thres = 0.5
classes = [0,1,2,3,4,5]
class_names = ["blossom_end_rot", "graymold","powdery_mildew","spider_mite",
"spotting_disease", "snails_and_slugs"]
# deleting files in op_dir
filelist = [ f for f in os.listdir(out)]# if f.endswith(".png") ]
for f in tqdm(filelist, desc = 'Deleting old files fro directory'):
os.remove(os.path.join(out, f))
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
if half:
model.half() # to FP16
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
img_paths = glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png') + \
glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg')
# Run inference
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
#%%
for i in trange(len(img_paths)):
path = img_paths[i]
img1 = cv2.imread(path)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img_h, img_w, _ = img1.shape
img2 = prepare_input(img1, 416, half)
# get file name
name = os.path.basename(path)[:-4]
# Inference
t1 = time_synchronized()
pred = model(img2, augment=False)[0]
# Apply NMS
pred = non_max_suppression(pred, conf_thres, iou_thres, classes=classes, agnostic=True)
if pred[0] is not None:
boxes = pred[0].cpu().detach().numpy() # <xmin><ymin><xmax><ymax><confd><class_id>
else:
boxes = np.array([10.0, 20.0, 30.0, 50.0, 0.75, 0]).reshape(1,6) # dummy values
coords_minmax = np.zeros((boxes.shape[0], 4)) # droping 5th value
confd = np.zeros((boxes.shape[0], 1))
class_ids = np.zeros((boxes.shape[0], 1))
# assign
coords_minmax = boxes[:,0:4] # coords
confd = boxes[:,4] # confidence
class_ids = boxes[:,5] # class id
coords_xyminmax = []
det_classes = []
for i in range(boxes.shape[0]):
coords_xyminmax.append(xyxy_2_xyxyo(img_w, img_h, coords_minmax[i]))
det_classes.append(class_names[int(class_ids[i])])
all_bounding_boxnind = []
for i in range(boxes.shape[0]):
bounding_box = [0.0] * 6
bounding_box[0] = det_classes[i]
bounding_box[1] = confd[i]
bounding_box[2] = coords_xyminmax[i][0]
bounding_box[3] = coords_xyminmax[i][1]
bounding_box[4] = coords_xyminmax[i][2]
bounding_box[5] = coords_xyminmax[i][3]
bounding_box = str(bounding_box)[1:-1]# remove square brackets
bounding_box = bounding_box.replace("'",'')# removing inverted commas around class name
bounding_box = "".join(bounding_box.split())# remove spaces in between **here dont give space inbetween the inverted commas "".
all_bounding_boxnind.append(bounding_box)
all_bounding_boxnind = ' '.join(map(str, all_bounding_boxnind))# convert list to string
all_bounding_boxnind=list(all_bounding_boxnind.split(' ')) # convert strin to list
# replacing commas with spaces
for i in range(len(all_bounding_boxnind)):
all_bounding_boxnind[i] = all_bounding_boxnind[i].replace(',',' ')
for i in range(len(all_bounding_boxnind)):
# check if file exiscts else make new
with open(out +'{}.txt'.format(name), "a+") as file_object:
# Move read cursor to the start of file.
file_object.seek(0)
# If file is not empty then append '\n'
data = file_object.read(100)
if len(data) > 0 :
file_object.write("\n")
# Append text at the end of file
file_object.write(all_bounding_boxnind[i])
#%%
import glob, random
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 300
img_paths = glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png') + \
glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg')
img_path = random.choice(img_paths)
img1 = cv2.imread(img_path)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img_h, img_w, _ = img1.shape
img2 = prepare_input(img1, 416, half)
pred = model(img2, augment=False)[0]
# Apply NMS
pred = non_max_suppression(pred, conf_thres, iou_thres, classes=classes, agnostic=True)
boxes = pred[0].cpu().detach().numpy() # <xmin><ymin><xmax><ymax><confd><class_id>
coords_minmax = np.zeros((boxes.shape[0], 4)) # droping 5th value
confd = np.zeros((boxes.shape[0], 1))
class_ids = np.zeros((boxes.shape[0], 1))
# assign
coords_minmax = boxes[:,0:4] # coords
confd = boxes[:,4] # confidence
class_ids = boxes[:,5] # class id
coords_xyminmax = []
det_classes = []
for i in range(boxes.shape[0]):
coords_xyminmax.append(xyxy_2_xyxyo(img_w, img_h, coords_minmax[i]))
det_classes.append(class_names[int(class_ids[i])])
t = np.asarray(coords_xyminmax)
op = draw_boxes(img1, confd, t, det_classes, class_names, order='xy_minmax', analysis=False)
plt.imshow(op)
print('='*50)
print('Image Name: ', os.path.basename(img_path),img1.shape)
print('\nClass_name ', '| B_box Coords ', '| Confidence')
print('_'*50)
for k in range(len(det_classes)):
print(det_classes[k], t[k], confd[k])
print('='*50)
|
from selenium import webdriver
from time import sleep
from random import randint
from secrets import *
class GithubBot:
def __init__(self, username, pw, copyfollowers):
self.username = username
self.copyfollowers = copyfollowers
# START LOGIN PROCESS
self.driver = webdriver.Chrome("/home/hkatwijk/repo/GitHub-Follow-Bot/chromedriver")
self.driver.get("https://github.com/login")
# wait for the page to load the DOM
sleep(2)
self.driver.find_element_by_xpath('//*[@id="login_field"]')\
.send_keys(username)
self.driver.find_element_by_xpath('//*[@id="password"]')\
.send_keys(pw)
self.driver.find_element_by_xpath('//*[@id="login"]/form/div[4]/input[12]')\
.click()
# wait for the page to load the DOM
sleep(randint(4, 12))
# END LOGIN PROCESS
def copy_followers(self):
#here we go to our target and make a list of all there followers
#go to profile
self.driver.get("https://github.com/{}?tab=followers".format(self.copyfollowers))
sleep(randint(3, 10))
#get all the followers from the first page
users = self.driver.find_elements_by_xpath("//a[@data-hovercard-type='user']")
temp = []
for follower in users:
temp.append(follower.get_attribute("href"))
list_set = set(temp)
self.followers = (list(list_set))
print(self.followers)
def followersRatio(self):
#number of follers to following , if this user does not follow many users back there is no point in following
#here we will determin if this is a good person to follow from the above list
temp = []
for follower in self.followers:
self.driver.get(follower)
sleep(randint(8, 14))
print('--------------------------------------')
try:
#get the number of followers
numFollowers = self.driver.find_elements_by_xpath('//*[@id="js-pjax-container"]/div[2]/div/div[1]/div/div[4]/div[2]/div/div/a[1]/span')
numFollowing = self.driver.find_elements_by_xpath('//*[@id="js-pjax-container"]/div[2]/div/div[1]/div/div[4]/div[2]/div/div/a[2]/span')
print('Followers : ' + numFollowers[0].text)
print('Following : ' + numFollowing[0].text)
#convert to number and check if half the follers is less than the following
halfFollowers = int(numFollowers[0].text) / 2
if numFollowing > halfFollowers:
print(follower)
print('follower ratio good')
isActive = self.activeOnGithub(follower)
print(isActive)
if isActive == True:
print('User is Active')
self.followThisUser(follower)
temp.append(follower)
except:
print("can't get the total amount of followers")
list_set = set(temp)
self.followersRatio = (list(list_set))
def activeOnGithub(self,follower):
#check if the users from the ratio are active on github ifnot then there is really no point in following
sleep(randint(4, 14))
try:
#get the number of followers
numberOfPinned = len(self.driver.find_elements_by_xpath('//*[@id="js-pjax-container"]/div[2]/div/div[2]/div[2]/div/div[1]/div/ol/li[1]'))
numberOfActivities = len(self.driver.find_elements_by_class_name('profile-rollup-wrapper'))
print(numberOfPinned)
print(numberOfActivities)
#if number of pinned projects 1 or more
if numberOfPinned >= 1:
#if activites for the month greater than 1
if numberOfActivities >= 1:
return True
print('User is not Active')
return False
except:
print("can't get activities and pinned projects")
return False
def followThisUser(self,follower):
#click the follow button if this is a good match
sleep(randint(4, 14))
try:
print("Following......")
follow_button = self.driver.find_element_by_xpath('//input[@value="Follow"]')
# follow_button = self.driver.find_elements_by_xpath("//input[@aria-label='Follow this person']")
inputParent = follow_button.find_element_by_xpath('..')
inputParent.submit()
# checking if you already follow does not work
# print('Follow button : '+inputParent.get_attribute('hidden') )
# if inputParent.get_attribute('hidden') == None:
# print('Submit clicked !')
# inputParent.submit()
# else:
# print('you are already folloing them')
return True
except:
print("can't click follow button")
return False
my_bot = GithubBot(gituser,gitpw,gitCopyFollowers)
my_bot.copy_followers()
my_bot.followersRatio()
|
# -*- coding: utf-8 -*-
from flask_oauthlib.contrib.apps import RemoteAppFactory
mit = RemoteAppFactory('mit', {
'base_url': 'https://oidc.mit.edu',
'access_token_url': '/token',
'authorize_url': '/authorize',
'request_token_url': None,
'request_token_params': {'scope': 'openid,email,profile'},
})
|
from scipy.integrate import odeint
import numpy as np
import matplotlib.pyplot as plt
# Let’s run the basic SIR model
# describe the model
def deriv(y, t, N, beta, k, delta, m ):
S, E, I, R, D = y
dSdt = -beta * S * I / N
dEdt = (beta * S * I / N - k * E)
dIdt = delta * E - k * I
dRdt = k * I
dDdt = m * I
return dSdt, dEdt, dIdt, dRdt, dDdt
# describe the parameters
N = 1000000 # population
m = 0.02 #percentage of the contaminated that dies
delta = 0.2
P = 0.9 #the effect of restrictions in percent, 1 = no effect 0 = full effect
beta = P * 2.5
k=1/6
S0, E0, I0, R0, D0= N-1, 1, 0, 0, 0 # initial conditions: one infected, rest susceptible
t = np.linspace(0, 99, 100) # Grid of time points (in days)
y0 = S0, E0, I0, R0, D0 # Initial conditions vector
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(N, beta, k, delta, m, ))
S, E, I, R , D= ret.T
def plotsir(t, S, E, I, R, D):
f, ax = plt.subplots(1,1,figsize=(10,4))
ax.plot(t, S, 'b', alpha=0.7, linewidth=2, label='Susceptible')
ax.plot(t, E, 'y', alpha=0.7, linewidth=2, label='Exposed')
ax.plot(t, I, 'r', alpha=0.7, linewidth=2, label='Infected')
ax.plot(t, R, 'g', alpha=0.7, linewidth=2, label='Recovered')
ax.plot(t, D , 'k', alpha=0.7, linewidth=2, label='Deceased')
ax.set_xlabel('Time (days)')
ax.yaxis.set_tick_params(length=0)
ax.xaxis.set_tick_params(length=0)
ax.grid(b=True, which='major', c='w', lw=2, ls='-')
legend = ax.legend()
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax.spines[spine].set_visible(False)
#plt.savefig('C:/Users/albin/OneDrive/Dokument/GitHub/SIR_model_SEE070/SIR_model_SEE070/Lecture2dec.png')
plt.show()
plotsir(t, S, E, I, R, D)
|
#!/usr/bin/python
"""
Jamf Pro extension attribute to return a list of authorization rights
enabled on a Mac. The returned levels are...
- allow - unlocks the associated preference pane without admin rights
- authenticate-session-owner-or-admin - requires credentials, but
allows standard users to authenticate.
- None - default preference where admin credentials are required
Authorization rights reference: https://www.dssw.co.uk/reference/authorization-rights/index.html
Add more to RIGHTS list as needed.
Partially cribbed from https://gist.github.com/haircut/20bc1b3f9ef0cec7d869a87b0db92fd3
"""
import subprocess
import plistlib
# List of authorizations to be checked
RIGHTS = [
"system.preferences",
"system.preferences.datetime",
"system.preferences.printing",
]
def get_auth_right(right, format="string"):
"""Gets the specified authorization right in plist format."""
try:
cmd = ["/usr/bin/security", "authorizationdb", "read", right]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, _ = proc.communicate()
if stdout:
return plistlib.readPlistFromString(stdout)
except (IOError, OSError):
pass
def main():
# Loop through rights and get associated rule. Append to list.
results = []
for right in RIGHTS:
try:
rule = get_auth_right(right)["rule"][0]
except KeyError:
rule = None
a = "%s: %s" % (right, rule)
results.append(a)
# Format list and print to EA
results = "\n".join(results)
print("<result>%s</result>" % results)
if __name__ == "__main__":
main()
|
import Snake as s
import Fruit as f
snake = s.Snake()
fruit = f.Fruit()
start = True
#processing option for screen
def setup():
size(400,400)
frameRate(20)
def draw():
global snake, fruit, start
#home screen
if start == True:
background(0)
fill(255)
textAlign(CENTER)
textSize(40)
text('SNAKE', width/2, height/2)
textSize(14)
text('version 1.0', width/2, height/2 + 20)
textSize(20)
text('CLICK TO START', width/2, height - 50)
textSize(10)
text('developed by: Matheus de Moncada Assis', width/2, 20)
#starting the game if mouse is pressed
if mousePressed:
if mouseX < width and mouseY < height:
start = False
#drawing fruit and snake
if not start:
background(0)
snake.draw_snake()
snake.coordinates()
fruit.draw_fruit()
if [fruit.x, fruit.y] in snake.hist:
fruit.new_pos
#determine if it has eaten a fruit or not
if snake.eat(fruit.x, fruit.y):
fruit.new_pos()
fruit.draw_fruit()
#determine if it is dead or not
if snake.x < 0 or snake.x > width or snake.y < 0 or snake.y > height or snake.death():
fill(255,255,255,200)
rect(0,0,width,height)
textSize(40)
fill(0)
textAlign(CENTER)
text('GAME OVER', width/2, height/2)
textSize(20)
text('LENGTH: {}'.format(snake.total),width/2, height/2 + 50)
noLoop()
#changing snake directions
def keyPressed():
if keyCode == UP and snake.dy != 1:
snake.direction(0,-1)
elif keyCode == DOWN and snake.dy != -1:
snake.direction(0,1)
elif keyCode == RIGHT and snake.dx != -1:
snake.direction(1,0)
elif keyCode == LEFT and snake.dx != 1:
snake.direction(-1,0)
|
from __future__ import annotations
import os
from base64 import b64decode
from collections import defaultdict
from collections.abc import Mapping, Sequence
from dataclasses import dataclass, replace
from enum import Enum
from ipaddress import IPv4Address, IPv4Network
from pathlib import Path
from typing import Any
import kopf
from neuro_config_client import (
ACMEEnvironment,
ARecord,
Cluster,
DNSConfig,
DockerRegistryConfig,
IdleJobConfig,
OrchestratorConfig,
ResourcePoolType,
)
from yarl import URL
@dataclass(frozen=True)
class Certificate:
private_key: str
certificate: str
class KubeClientAuthType(str, Enum):
NONE = "none"
TOKEN = "token"
CERTIFICATE = "certificate"
@dataclass(frozen=True)
class KubeConfig:
version: str
url: URL
cert_authority_path: Path | None = None
cert_authority_data_pem: str | None = None
auth_type: KubeClientAuthType = KubeClientAuthType.NONE
auth_cert_path: Path | None = None
auth_cert_key_path: Path | None = None
auth_token_path: Path | None = None
auth_token: str | None = None
conn_timeout_s: int = 300
read_timeout_s: int = 100
conn_pool_size: int = 100
@classmethod
def load_from_env(cls, env: Mapping[str, str] | None = None) -> KubeConfig:
env = env or os.environ
return cls(
version=env["NP_KUBE_VERSION"].lstrip("v"),
url=URL(env["NP_KUBE_URL"]),
cert_authority_path=cls._convert_to_path(
env.get("NP_KUBE_CERT_AUTHORITY_PATH")
),
cert_authority_data_pem=env.get("NP_KUBE_CERT_AUTHORITY_DATA_PEM"),
auth_type=KubeClientAuthType(env["NP_KUBE_AUTH_TYPE"]),
auth_cert_path=cls._convert_to_path(env.get("NP_KUBE_AUTH_CERT_PATH")),
auth_cert_key_path=cls._convert_to_path(
env.get("NP_KUBE_AUTH_CERT_KEY_PATH")
),
auth_token_path=cls._convert_to_path(env.get("NP_KUBE_AUTH_TOKEN_PATH")),
auth_token=env.get("NP_KUBE_AUTH_TOKEN"),
)
@classmethod
def _convert_to_path(cls, value: str | None) -> Path | None:
return Path(value) if value else None
@dataclass(frozen=True)
class LabelsConfig:
job: str = "platform.neuromation.io/job"
node_pool: str = "platform.neuromation.io/nodepool"
accelerator: str = "platform.neuromation.io/accelerator"
preemptible: str = "platform.neuromation.io/preemptible"
@dataclass(frozen=True)
class HelmRepo:
url: URL
name: str = ""
username: str = ""
password: str = ""
@dataclass
class HelmReleaseNames:
platform: str
@dataclass(frozen=True)
class HelmChartNames:
docker_registry: str = "docker-registry"
minio: str = "minio"
traefik: str = "traefik"
adjust_inotify: str = "adjust-inotify"
nvidia_gpu_driver: str = "nvidia-gpu-driver"
nvidia_gpu_driver_gcp: str = "nvidia-gpu-driver-gcp"
platform: str = "platform"
platform_storage: str = "platform-storage"
platform_registry: str = "platform-registry"
platform_monitoring: str = "platform-monitoring"
platform_container_runtime: str = "platform-container-runtime"
platform_secrets: str = "platform-secrets"
platform_reports: str = "platform-reports"
platform_disks: str = "platform-disks"
platform_api_poller: str = "platform-api-poller"
platform_buckets: str = "platform-buckets"
@dataclass(frozen=True)
class HelmChartVersions:
platform: str
@dataclass(frozen=True)
class Config:
node_name: str
log_level: str
retries: int
backoff: int
kube_config: KubeConfig
helm_release_names: HelmReleaseNames
helm_chart_names: HelmChartNames
helm_chart_versions: HelmChartVersions
platform_auth_url: URL
platform_ingress_auth_url: URL
platform_config_url: URL
platform_admin_url: URL
platform_config_watch_interval_s: float
platform_api_url: URL
platform_namespace: str
platform_lock_secret_name: str
acme_ca_staging_path: str
is_standalone: bool
@classmethod
def load_from_env(cls, env: Mapping[str, str] | None = None) -> Config:
env = env or os.environ
return cls(
node_name=env["NP_NODE_NAME"],
log_level=(env.get("NP_CONTROLLER_LOG_LEVEL") or "INFO").upper(),
retries=int(env.get("NP_CONTROLLER_RETRIES") or "3"),
backoff=int(env.get("NP_CONTROLLER_BACKOFF") or "60"),
kube_config=KubeConfig.load_from_env(env),
helm_release_names=HelmReleaseNames(platform="platform"),
helm_chart_names=HelmChartNames(),
helm_chart_versions=HelmChartVersions(
platform=env["NP_HELM_PLATFORM_CHART_VERSION"],
),
platform_auth_url=URL(env["NP_PLATFORM_AUTH_URL"]),
platform_ingress_auth_url=URL(env["NP_PLATFORM_INGRESS_AUTH_URL"]),
platform_config_url=URL(env["NP_PLATFORM_CONFIG_URL"]),
platform_admin_url=URL(env["NP_PLATFORM_ADMIN_URL"]),
platform_config_watch_interval_s=float(
env.get("NP_PLATFORM_CONFIG_WATCH_INTERVAL_S", "15")
),
platform_api_url=URL(env["NP_PLATFORM_API_URL"]),
platform_namespace=env["NP_PLATFORM_NAMESPACE"],
platform_lock_secret_name=env["NP_PLATFORM_LOCK_SECRET_NAME"],
acme_ca_staging_path=env["NP_ACME_CA_STAGING_PATH"],
is_standalone=env.get("NP_STANDALONE", "false").lower() == "true",
)
def _spec_default_factory() -> dict[str, Any]:
return defaultdict(_spec_default_factory)
class IamSpec(dict[str, Any]):
def __init__(self, spec: dict[str, Any]) -> None:
super().__init__(spec)
self._spec = defaultdict(_spec_default_factory, spec)
@property
def aws_region(self) -> str:
return self._spec["aws"].get("region", "")
@property
def aws_role_arn(self) -> str:
return self._spec["aws"].get("roleArn", "")
@property
def aws_s3_role_arn(self) -> str:
return self._spec["aws"].get("s3RoleArn", "")
@property
def gcp_service_account_key_base64(self) -> str:
return self._spec["gcp"].get("serviceAccountKeyBase64", "")
class KubernetesSpec(dict[str, Any]):
def __init__(self, spec: dict[str, Any]) -> None:
super().__init__(spec)
self._spec = defaultdict(_spec_default_factory, spec)
@property
def provider(self) -> str:
return self["provider"]
@property
def standard_storage_class_name(self) -> str:
return self.get("standardStorageClassName", "")
@property
def node_label_job(self) -> str:
return self._spec["nodeLabels"].get("job", "")
@property
def node_label_node_pool(self) -> str:
return self._spec["nodeLabels"].get("nodePool", "")
@property
def node_label_accelerator(self) -> str:
return self._spec["nodeLabels"].get("accelerator", "")
@property
def node_label_preemptible(self) -> str:
return self._spec["nodeLabels"].get("preemptible", "")
@property
def kubelet_port(self) -> int | None:
return self.get("kubeletPort")
@property
def docker_config_secret_create(self) -> bool:
return self._spec["dockerConfigSecret"].get("create", True)
@property
def docker_config_secret_name(self) -> str:
return self._spec["dockerConfigSecret"].get("name", "")
@property
def tpu_network(self) -> IPv4Network | None:
return IPv4Network(self["tpuIPv4CIDR"]) if self.get("tpuIPv4CIDR") else None
class StorageSpec(dict[str, Any]):
def __init__(self, spec: dict[str, Any]) -> None:
super().__init__(spec)
@property
def path(self) -> str:
return self.get("path", "")
@property
def storage_size(self) -> str:
return self["kubernetes"]["persistence"].get("size", "")
@property
def storage_class_name(self) -> str:
return self["kubernetes"]["persistence"].get("storageClassName", "")
@property
def nfs_server(self) -> str:
return self["nfs"].get("server", "")
@property
def nfs_export_path(self) -> str:
return self["nfs"].get("path", "/")
@property
def smb_server(self) -> str:
return self["smb"].get("server", "")
@property
def smb_share_name(self) -> str:
return self["smb"].get("shareName", "")
@property
def smb_username(self) -> str:
return self["smb"].get("username", "")
@property
def smb_password(self) -> str:
return self["smb"].get("password", "")
@property
def gcs_bucket_name(self) -> str:
return self["gcs"].get("bucket", "")
@property
def azure_storage_account_name(self) -> str:
return self["azureFile"].get("storageAccountName", "")
@property
def azure_storage_account_key(self) -> str:
return self["azureFile"].get("storageAccountKey", "")
@property
def azure_share_name(self) -> str:
return self["azureFile"].get("shareName", "")
class BlobStorageSpec(dict[str, Any]):
def __init__(self, spec: dict[str, Any]) -> None:
super().__init__(spec)
@property
def aws_region(self) -> str:
return self["aws"]["region"]
@property
def gcp_project(self) -> str:
return self["gcp"]["project"]
@property
def azure_storrage_account_name(self) -> str:
return self["azure"]["storageAccountName"]
@property
def azure_storrage_account_key(self) -> str:
return self["azure"]["storageAccountKey"]
@property
def emc_ecs_access_key_id(self) -> str:
return self["emcEcs"]["accessKeyId"]
@property
def emc_ecs_secret_access_key(self) -> str:
return self["emcEcs"]["secretAccessKey"]
@property
def emc_ecs_s3_role(self) -> str:
return self["emcEcs"]["s3Role"]
@property
def emc_ecs_s3_endpoint(self) -> str:
return self["emcEcs"]["s3Endpoint"]
@property
def emc_ecs_management_endpoint(self) -> str:
return self["emcEcs"]["managementEndpoint"]
@property
def open_stack_region(self) -> str:
return self["openStack"]["region"]
@property
def open_stack_username(self) -> str:
return self["openStack"]["username"]
@property
def open_stack_password(self) -> str:
return self["openStack"]["password"]
@property
def open_stack_endpoint(self) -> str:
return self["openStack"]["endpoint"]
@property
def open_stack_s3_endpoint(self) -> str:
return self["openStack"]["s3Endpoint"]
@property
def minio_url(self) -> str:
return self["minio"]["url"]
@property
def minio_region(self) -> str:
return self["minio"]["region"]
@property
def minio_access_key(self) -> str:
return self["minio"]["accessKey"]
@property
def minio_secret_key(self) -> str:
return self["minio"]["secretKey"]
@property
def kubernetes_storage_class_name(self) -> str:
return self["kubernetes"]["persistence"].get("storageClassName", "")
@property
def kubernetes_storage_size(self) -> str:
return self["kubernetes"]["persistence"].get("size", "")
class RegistrySpec(dict[str, Any]):
def __init__(self, spec: dict[str, Any]) -> None:
super().__init__(spec)
@property
def aws_account_id(self) -> str:
return self["aws"]["accountId"]
@property
def aws_region(self) -> str:
return self["aws"]["region"]
@property
def gcp_project(self) -> str:
return self["gcp"]["project"]
@property
def azure_url(self) -> str:
return self["azure"]["url"]
@property
def azure_username(self) -> str:
return self["azure"]["username"]
@property
def azure_password(self) -> str:
return self["azure"]["password"]
@property
def docker_url(self) -> str:
return self["docker"]["url"]
@property
def docker_username(self) -> str:
return self["docker"].get("username", "")
@property
def docker_password(self) -> str:
return self["docker"].get("password", "")
@property
def kubernetes_storage_class_name(self) -> str:
return self["kubernetes"]["persistence"].get("storageClassName", "")
@property
def kubernetes_storage_size(self) -> str:
return self["kubernetes"]["persistence"].get("size", "")
class MonitoringSpec(dict[str, Any]):
def __init__(self, spec: dict[str, Any]) -> None:
super().__init__(spec)
@property
def logs_region(self) -> str:
return self["logs"]["blobStorage"].get("region", "")
@property
def logs_bucket(self) -> str:
return self["logs"]["blobStorage"]["bucket"]
@property
def metrics(self) -> dict[str, Any]:
return self["metrics"]
@property
def metrics_region(self) -> str:
return self["metrics"].get("region", "")
@property
def metrics_retention_time(self) -> str:
return self["metrics"].get("retentionTime", "")
@property
def metrics_bucket(self) -> str:
return self["metrics"]["blobStorage"].get("bucket", "")
@property
def metrics_storage_size(self) -> str:
return self["metrics"]["kubernetes"]["persistence"].get("size", "")
@property
def metrics_storage_class_name(self) -> str:
return self["metrics"]["kubernetes"]["persistence"].get("storageClassName", "")
class DisksSpec(dict[str, Any]):
def __init__(self, spec: dict[str, Any]) -> None:
super().__init__(spec)
self._spec = defaultdict(_spec_default_factory, spec)
@property
def storage_class_name(self) -> str:
return self._spec["kubernetes"]["persistence"].get("storageClassName", "")
class IngressControllerSpec(dict[str, Any]):
def __init__(self, spec: dict[str, Any]) -> None:
super().__init__(spec)
self._spec = defaultdict(_spec_default_factory, spec)
@property
def enabled(self) -> bool:
return self._spec.get("enabled", True)
@property
def replicas(self) -> int | None:
return self._spec.get("replicas")
@property
def namespaces(self) -> Sequence[str]:
return self._spec.get("namespaces", ())
@property
def service_type(self) -> str:
return self._spec.get("serviceType", "")
@property
def public_ips(self) -> Sequence[IPv4Address]:
return [IPv4Address(ip) for ip in self._spec.get("publicIPs", [])]
@property
def node_port_http(self) -> int | None:
return self._spec["nodePorts"].get("http")
@property
def node_port_https(self) -> int | None:
return self._spec["nodePorts"].get("https")
@property
def host_port_http(self) -> int | None:
return self._spec["hostPorts"].get("http")
@property
def host_port_https(self) -> int | None:
return self._spec["hostPorts"].get("https")
@property
def ssl_cert_data(self) -> str:
return self._spec["ssl"].get("certificateData", "")
@property
def ssl_cert_key_data(self) -> str:
return self._spec["ssl"].get("certificateKeyData", "")
class Spec(dict[str, Any]):
def __init__(self, spec: dict[str, Any]) -> None:
super().__init__(spec)
spec = defaultdict(_spec_default_factory, spec)
self._token = spec.get("token", "")
self._iam = IamSpec(spec["iam"])
self._kubernetes = KubernetesSpec(spec["kubernetes"])
self._ingress_controller = IngressControllerSpec(spec["ingressController"])
self._registry = RegistrySpec(spec["registry"])
self._storages = [StorageSpec(s) for s in spec["storages"]]
self._blob_storage = BlobStorageSpec(spec["blobStorage"])
self._disks = DisksSpec(spec["disks"])
self._monitoring = MonitoringSpec(spec["monitoring"])
@property
def token(self) -> str:
return self._token
@property
def iam(self) -> IamSpec:
return self._iam
@property
def kubernetes(self) -> KubernetesSpec:
return self._kubernetes
@property
def ingress_controller(self) -> IngressControllerSpec:
return self._ingress_controller
@property
def registry(self) -> RegistrySpec:
return self._registry
@property
def storages(self) -> Sequence[StorageSpec]:
return self._storages
@property
def blob_storage(self) -> BlobStorageSpec:
return self._blob_storage
@property
def disks(self) -> DisksSpec:
return self._disks
@property
def monitoring(self) -> MonitoringSpec:
return self._monitoring
class Metadata(dict[str, Any]):
def __init__(self, spec: dict[str, Any]) -> None:
super().__init__(spec)
@property
def name(self) -> str:
return self["name"]
@dataclass(frozen=True)
class DockerConfig:
url: URL
email: str = ""
username: str = ""
password: str = ""
secret_name: str = ""
create_secret: bool = False
class IngressServiceType(str, Enum):
LOAD_BALANCER = "LoadBalancer"
NODE_PORT = "NodePort"
class StorageType(str, Enum):
KUBERNETES = "kubernetes"
NFS = "nfs"
SMB = "smb"
GCS = "gcs"
AZURE_fILE = "azureFile"
@dataclass(frozen=True)
class StorageConfig:
type: StorageType
path: str = ""
storage_size: str = "10Gi"
storage_class_name: str = ""
nfs_export_path: str = ""
nfs_server: str = ""
smb_server: str = ""
smb_share_name: str = ""
smb_username: str = ""
smb_password: str = ""
azure_storage_account_name: str = ""
azure_storage_account_key: str = ""
azure_share_name: str = ""
gcs_bucket_name: str = ""
class RegistryProvider(str, Enum):
AWS = "aws"
AZURE = "azure"
GCP = "gcp"
DOCKER = "docker"
@dataclass(frozen=True)
class RegistryConfig:
provider: RegistryProvider
aws_account_id: str = ""
aws_region: str = ""
gcp_project: str = ""
azure_url: URL | None = None
azure_username: str = ""
azure_password: str = ""
docker_registry_install: bool = False
docker_registry_url: URL | None = None
docker_registry_username: str = ""
docker_registry_password: str = ""
docker_registry_storage_class_name: str = ""
docker_registry_storage_size: str = ""
class BucketsProvider(str, Enum):
AWS = "aws"
AZURE = "azure"
GCP = "gcp"
EMC_ECS = "emcEcs"
OPEN_STACK = "openStack"
MINIO = "minio"
@dataclass(frozen=True)
class BucketsConfig:
provider: BucketsProvider
disable_creation: bool = False
aws_region: str = ""
gcp_project: str = ""
gcp_location: str = "us" # default GCP location
azure_storage_account_name: str = ""
azure_storage_account_key: str = ""
minio_install: bool = False
minio_url: URL | None = None
minio_public_url: URL | None = None
minio_region: str = ""
minio_access_key: str = ""
minio_secret_key: str = ""
minio_storage_class_name: str = ""
minio_storage_size: str = ""
emc_ecs_access_key_id: str = ""
emc_ecs_secret_access_key: str = ""
emc_ecs_s3_endpoint: URL | None = None
emc_ecs_management_endpoint: URL | None = None
emc_ecs_s3_assumable_role: str = ""
open_stack_username: str = ""
open_stack_password: str = ""
open_stack_endpoint: URL | None = None
open_stack_s3_endpoint: URL | None = None
open_stack_region_name: str = ""
class MetricsStorageType(Enum):
BUCKETS = 1
KUBERNETES = 2
@dataclass(frozen=True)
class MonitoringConfig:
logs_bucket_name: str
logs_region: str = ""
metrics_enabled: bool = True
metrics_storage_type: MetricsStorageType = MetricsStorageType.BUCKETS
metrics_bucket_name: str = ""
metrics_storage_class_name: str = ""
metrics_storage_size: str = ""
metrics_retention_time: str = "3d"
metrics_region: str = ""
@dataclass(frozen=True)
class PlatformConfig:
release_name: str
auth_url: URL
ingress_auth_url: URL
config_url: URL
admin_url: URL
api_url: URL
token: str
cluster_name: str
service_account_name: str
image_pull_secret_names: Sequence[str]
pre_pull_images: Sequence[str]
standard_storage_class_name: str | None
kubernetes_provider: str
kubernetes_version: str
kubernetes_tpu_network: IPv4Network | None
node_labels: LabelsConfig
kubelet_port: int
nvidia_dcgm_port: int
namespace: str
ingress_dns_name: str
ingress_url: URL
ingress_registry_url: URL
ingress_metrics_url: URL
ingress_acme_enabled: bool
ingress_acme_environment: ACMEEnvironment
ingress_controller_install: bool
ingress_controller_replicas: int
ingress_public_ips: Sequence[IPv4Address]
ingress_cors_origins: Sequence[str]
ingress_node_port_http: int | None
ingress_node_port_https: int | None
ingress_host_port_http: int | None
ingress_host_port_https: int | None
ingress_service_type: IngressServiceType
ingress_service_name: str
ingress_namespaces: Sequence[str]
ingress_ssl_cert_data: str
ingress_ssl_cert_key_data: str
disks_storage_limit_per_user_gb: int
disks_storage_class_name: str | None
jobs_namespace: str
jobs_resource_pool_types: Sequence[ResourcePoolType]
jobs_priority_class_name: str
jobs_internal_host_template: str
jobs_fallback_host: str
idle_jobs: Sequence[IdleJobConfig]
storages: Sequence[StorageConfig]
buckets: BucketsConfig
registry: RegistryConfig
monitoring: MonitoringConfig
helm_repo: HelmRepo
docker_config: DockerConfig
grafana_username: str | None = None
grafana_password: str | None = None
sentry_dsn: URL | None = None
sentry_sample_rate: float | None = None
docker_hub_config: DockerConfig | None = None
aws_region: str = ""
aws_role_arn: str = ""
aws_s3_role_arn: str = ""
gcp_service_account_key: str = ""
gcp_service_account_key_base64: str = ""
def get_storage_claim_name(self, path: str) -> str:
name = f"{self.release_name}-storage"
if path:
name += path.replace("/", "-")
return name
def get_image(self, name: str) -> str:
url = str(self.docker_config.url / name)
return url.replace("http://", "").replace("https://", "")
def create_dns_config(
self,
ingress_service: dict[str, Any] | None = None,
aws_ingress_lb: dict[str, Any] | None = None,
) -> DNSConfig | None:
if not ingress_service and not self.ingress_public_ips:
return None
a_records: list[ARecord] = []
if self.ingress_public_ips:
ips = [str(ip) for ip in self.ingress_public_ips]
a_records.extend(
(
ARecord(name=f"{self.ingress_dns_name}.", ips=ips),
ARecord(name=f"*.jobs.{self.ingress_dns_name}.", ips=ips),
ARecord(name=f"registry.{self.ingress_dns_name}.", ips=ips),
ARecord(name=f"metrics.{self.ingress_dns_name}.", ips=ips),
)
)
if self.buckets.provider == BucketsProvider.MINIO:
a_records.append(
ARecord(name=f"blob.{self.ingress_dns_name}.", ips=ips)
)
elif aws_ingress_lb and ingress_service:
ingress_host = ingress_service["status"]["loadBalancer"]["ingress"][0][
"hostname"
]
ingress_zone_id = aws_ingress_lb["CanonicalHostedZoneNameID"]
a_records.extend(
(
ARecord(
name=f"{self.ingress_dns_name}.",
dns_name=ingress_host,
zone_id=ingress_zone_id,
),
ARecord(
name=f"*.jobs.{self.ingress_dns_name}.",
dns_name=ingress_host,
zone_id=ingress_zone_id,
),
ARecord(
name=f"registry.{self.ingress_dns_name}.",
dns_name=ingress_host,
zone_id=ingress_zone_id,
),
ARecord(
name=f"metrics.{self.ingress_dns_name}.",
dns_name=ingress_host,
zone_id=ingress_zone_id,
),
)
)
elif ingress_service and ingress_service["spec"]["type"] == "LoadBalancer":
ingress_host = ingress_service["status"]["loadBalancer"]["ingress"][0]["ip"]
a_records.extend(
(
ARecord(
name=f"{self.ingress_dns_name}.",
ips=[ingress_host],
),
ARecord(
name=f"*.jobs.{self.ingress_dns_name}.",
ips=[ingress_host],
),
ARecord(
name=f"registry.{self.ingress_dns_name}.",
ips=[ingress_host],
),
ARecord(
name=f"metrics.{self.ingress_dns_name}.",
ips=[ingress_host],
),
)
)
if self.buckets.provider == BucketsProvider.MINIO:
a_records.append(
ARecord(name=f"blob.{self.ingress_dns_name}.", ips=[ingress_host])
)
else:
return None
return DNSConfig(name=self.ingress_dns_name, a_records=a_records)
def create_orchestrator_config(self, cluster: Cluster) -> OrchestratorConfig | None:
assert cluster.orchestrator
orchestrator = replace(
cluster.orchestrator,
job_internal_hostname_template=self.jobs_internal_host_template,
)
if self.kubernetes_tpu_network:
orchestrator = replace(
orchestrator,
resource_pool_types=self._update_tpu_network(
orchestrator.resource_pool_types,
self.kubernetes_tpu_network,
),
)
if cluster.orchestrator == orchestrator:
return None
return orchestrator
@classmethod
def _update_tpu_network(
cls,
resource_pools_types: Sequence[ResourcePoolType],
tpu_network: IPv4Network,
) -> Sequence[ResourcePoolType]:
result = []
for rpt in resource_pools_types:
if rpt.tpu:
result.append(
replace(rpt, tpu=replace(rpt.tpu, ipv4_cidr_block=str(tpu_network)))
)
else:
result.append(rpt)
return result
class PlatformConfigFactory:
def __init__(self, config: Config) -> None:
self._config = config
def create(self, platform_body: kopf.Body, cluster: Cluster) -> PlatformConfig:
assert cluster.credentials
assert cluster.orchestrator
assert cluster.disks
assert cluster.dns
assert cluster.ingress
metadata = Metadata(platform_body["metadata"])
spec = Spec(platform_body["spec"])
release_name = self._config.helm_release_names.platform
docker_config = self._create_neuro_docker_config(
cluster,
(
spec.kubernetes.docker_config_secret_name
or f"{release_name}-docker-config"
),
spec.kubernetes.docker_config_secret_create,
)
docker_hub_config = self._create_docker_hub_config(
cluster, f"{release_name}-docker-hub-config"
)
jobs_namespace = self._config.platform_namespace + "-jobs"
return PlatformConfig(
release_name=release_name,
auth_url=self._config.platform_auth_url,
ingress_auth_url=self._config.platform_ingress_auth_url,
config_url=self._config.platform_config_url,
admin_url=self._config.platform_admin_url,
api_url=self._config.platform_api_url,
token=spec.token,
cluster_name=metadata.name,
namespace=self._config.platform_namespace,
service_account_name="default",
image_pull_secret_names=self._create_image_pull_secret_names(
docker_config, docker_hub_config
),
pre_pull_images=cluster.orchestrator.pre_pull_images,
standard_storage_class_name=(
spec.kubernetes.standard_storage_class_name or None
),
kubernetes_provider=spec.kubernetes.provider,
kubernetes_version=self._config.kube_config.version,
kubernetes_tpu_network=spec.kubernetes.tpu_network,
kubelet_port=int(spec.kubernetes.kubelet_port or 10250),
nvidia_dcgm_port=9400,
node_labels=LabelsConfig(
job=spec.kubernetes.node_label_job or LabelsConfig.job,
node_pool=(
spec.kubernetes.node_label_node_pool or LabelsConfig.node_pool
),
accelerator=(
spec.kubernetes.node_label_accelerator or LabelsConfig.accelerator
),
preemptible=(
spec.kubernetes.node_label_preemptible or LabelsConfig.preemptible
),
),
ingress_dns_name=cluster.dns.name,
ingress_url=URL(f"https://{cluster.dns.name}"),
ingress_registry_url=URL(f"https://registry.{cluster.dns.name}"),
ingress_metrics_url=URL(f"https://metrics.{cluster.dns.name}"),
ingress_acme_enabled=(
not spec.ingress_controller.ssl_cert_data
or not spec.ingress_controller.ssl_cert_key_data
),
ingress_acme_environment=cluster.ingress.acme_environment,
ingress_controller_install=spec.ingress_controller.enabled,
ingress_controller_replicas=spec.ingress_controller.replicas or 2,
ingress_public_ips=spec.ingress_controller.public_ips,
ingress_cors_origins=cluster.ingress.cors_origins,
ingress_service_type=IngressServiceType(
spec.ingress_controller.service_type or IngressServiceType.LOAD_BALANCER
),
ingress_service_name="traefik",
ingress_namespaces=sorted(
{
self._config.platform_namespace,
jobs_namespace,
*spec.ingress_controller.namespaces,
}
),
ingress_node_port_http=spec.ingress_controller.node_port_http,
ingress_node_port_https=spec.ingress_controller.node_port_https,
ingress_host_port_http=spec.ingress_controller.host_port_http,
ingress_host_port_https=spec.ingress_controller.host_port_https,
ingress_ssl_cert_data=spec.ingress_controller.ssl_cert_data,
ingress_ssl_cert_key_data=spec.ingress_controller.ssl_cert_key_data,
jobs_namespace=jobs_namespace,
jobs_resource_pool_types=cluster.orchestrator.resource_pool_types,
jobs_priority_class_name=f"{self._config.helm_release_names.platform}-job",
jobs_internal_host_template=f"{{job_id}}.{jobs_namespace}",
jobs_fallback_host=cluster.orchestrator.job_fallback_hostname,
idle_jobs=cluster.orchestrator.idle_jobs,
storages=[self._create_storage(s) for s in spec.storages],
buckets=self._create_buckets(spec.blob_storage, cluster),
registry=self._create_registry(spec.registry),
monitoring=self._create_monitoring(spec.monitoring),
disks_storage_limit_per_user_gb=cluster.disks.storage_limit_per_user_gb,
disks_storage_class_name=spec.disks.storage_class_name or None,
helm_repo=self._create_helm_repo(cluster),
docker_config=docker_config,
docker_hub_config=docker_hub_config,
grafana_username=cluster.credentials.grafana.username
if cluster.credentials.grafana
else None,
grafana_password=cluster.credentials.grafana.password
if cluster.credentials.grafana
else None,
sentry_dsn=cluster.credentials.sentry.public_dsn
if cluster.credentials.sentry
else None,
sentry_sample_rate=cluster.credentials.sentry.sample_rate
if cluster.credentials.sentry
else None,
aws_region=spec.iam.aws_region,
aws_role_arn=spec.iam.aws_role_arn,
aws_s3_role_arn=spec.iam.aws_s3_role_arn,
gcp_service_account_key=self._base64_decode(
spec.iam.gcp_service_account_key_base64
),
gcp_service_account_key_base64=spec.iam.gcp_service_account_key_base64,
)
def _create_helm_repo(self, cluster: Cluster) -> HelmRepo:
assert cluster.credentials
assert cluster.credentials.neuro_helm
return HelmRepo(
url=cluster.credentials.neuro_helm.url,
username=cluster.credentials.neuro_helm.username or "",
password=cluster.credentials.neuro_helm.password or "",
)
def _create_neuro_docker_config(
self, cluster: Cluster, secret_name: str, create_secret: bool
) -> DockerConfig:
assert cluster.credentials
assert cluster.credentials.neuro_registry
return self._create_docker_config(
cluster.credentials.neuro_registry, secret_name, create_secret
)
def _create_docker_hub_config(
self, cluster: Cluster, secret_name: str
) -> DockerConfig | None:
assert cluster.credentials
if cluster.credentials.docker_hub is None:
return None
return self._create_docker_config(
cluster.credentials.docker_hub, secret_name, True
)
def _create_docker_config(
self, registry: DockerRegistryConfig, secret_name: str, create_secret: bool
) -> DockerConfig:
if not registry.username or not registry.password:
secret_name = ""
create_secret = False
return DockerConfig(
url=registry.url,
email=registry.email or "",
username=registry.username or "",
password=registry.password or "",
secret_name=secret_name,
create_secret=create_secret,
)
def _create_image_pull_secret_names(
self, *docker_config: DockerConfig | None
) -> Sequence[str]:
result: list[str] = []
for config in docker_config:
if config and config.secret_name:
result.append(config.secret_name)
return result
def _create_storage(self, spec: StorageSpec) -> StorageConfig:
if not spec:
raise ValueError("Storage spec is empty")
if StorageType.KUBERNETES in spec:
return StorageConfig(
type=StorageType.KUBERNETES,
path=spec.path,
storage_class_name=spec.storage_class_name,
storage_size=spec.storage_size,
)
elif StorageType.NFS in spec:
return StorageConfig(
type=StorageType.NFS,
path=spec.path,
nfs_server=spec.nfs_server,
nfs_export_path=spec.nfs_export_path,
)
elif StorageType.SMB in spec:
return StorageConfig(
type=StorageType.SMB,
path=spec.path,
smb_server=spec.smb_server,
smb_share_name=spec.smb_share_name,
smb_username=spec.smb_username,
smb_password=spec.smb_password,
)
elif StorageType.AZURE_fILE in spec:
return StorageConfig(
type=StorageType.AZURE_fILE,
path=spec.path,
azure_storage_account_name=spec.azure_storage_account_name,
azure_storage_account_key=spec.azure_storage_account_key,
azure_share_name=spec.azure_share_name,
)
elif StorageType.GCS in spec:
return StorageConfig(
type=StorageType.GCS, gcs_bucket_name=spec.gcs_bucket_name
)
else:
raise ValueError("Storage type is not supported")
def _create_buckets(self, spec: BlobStorageSpec, cluster: Cluster) -> BucketsConfig:
if not spec:
raise ValueError("Blob storage spec is empty")
assert cluster.credentials
assert cluster.buckets
assert cluster.dns
if BucketsProvider.AWS in spec:
return BucketsConfig(
provider=BucketsProvider.AWS,
disable_creation=cluster.buckets.disable_creation,
aws_region=spec.aws_region,
)
elif BucketsProvider.GCP in spec:
return BucketsConfig(
provider=BucketsProvider.GCP,
gcp_project=spec.gcp_project,
disable_creation=cluster.buckets.disable_creation,
)
elif BucketsProvider.AZURE in spec:
return BucketsConfig(
provider=BucketsProvider.AZURE,
disable_creation=cluster.buckets.disable_creation,
azure_storage_account_name=spec.azure_storrage_account_name,
azure_storage_account_key=spec.azure_storrage_account_key,
)
elif BucketsProvider.EMC_ECS in spec:
return BucketsConfig(
provider=BucketsProvider.EMC_ECS,
disable_creation=cluster.buckets.disable_creation,
emc_ecs_access_key_id=spec.emc_ecs_access_key_id,
emc_ecs_secret_access_key=spec.emc_ecs_secret_access_key,
emc_ecs_s3_assumable_role=spec.emc_ecs_s3_role,
emc_ecs_s3_endpoint=URL(spec.emc_ecs_s3_endpoint),
emc_ecs_management_endpoint=URL(spec.emc_ecs_management_endpoint),
)
elif BucketsProvider.OPEN_STACK in spec:
return BucketsConfig(
provider=BucketsProvider.OPEN_STACK,
disable_creation=cluster.buckets.disable_creation,
open_stack_region_name=spec.open_stack_region,
open_stack_username=spec.open_stack_username,
open_stack_password=spec.open_stack_password,
open_stack_endpoint=URL(spec.open_stack_endpoint),
open_stack_s3_endpoint=URL(spec.open_stack_s3_endpoint),
)
elif BucketsProvider.MINIO in spec:
return BucketsConfig(
provider=BucketsProvider.MINIO,
disable_creation=cluster.buckets.disable_creation,
minio_url=URL(spec.minio_url),
# Ingress should be configured manually in this case
minio_public_url=URL(f"https://blob.{cluster.dns.name}"),
minio_region=spec.minio_region,
minio_access_key=spec.minio_access_key,
minio_secret_key=spec.minio_secret_key,
)
elif "kubernetes" in spec:
assert cluster.credentials.minio
return BucketsConfig(
provider=BucketsProvider.MINIO,
disable_creation=cluster.buckets.disable_creation,
minio_install=True,
minio_url=URL.build(
scheme="http",
host=f"{self._config.helm_release_names.platform}-minio",
port=9000,
),
# Ingress should be configured manually in this case
minio_public_url=URL(f"https://blob.{cluster.dns.name}"),
minio_region="minio",
minio_access_key=cluster.credentials.minio.username,
minio_secret_key=cluster.credentials.minio.password,
minio_storage_class_name=spec.kubernetes_storage_class_name,
minio_storage_size=spec.kubernetes_storage_size or "10Gi",
)
else:
raise ValueError("Bucket provider is not supported")
def _create_registry(self, spec: RegistrySpec) -> RegistryConfig:
if not spec:
raise ValueError("Registry spec is empty")
if RegistryProvider.AWS in spec:
return RegistryConfig(
provider=RegistryProvider.AWS,
aws_account_id=spec.aws_account_id,
aws_region=spec.aws_region,
)
elif RegistryProvider.GCP in spec:
return RegistryConfig(
provider=RegistryProvider.GCP,
gcp_project=spec.gcp_project,
)
elif RegistryProvider.AZURE in spec:
url = URL(spec.azure_url)
if not url.scheme:
url = URL(f"https://{url!s}")
return RegistryConfig(
provider=RegistryProvider.AZURE,
azure_url=url,
azure_username=spec.azure_username,
azure_password=spec.azure_password,
)
elif RegistryProvider.DOCKER in spec:
return RegistryConfig(
provider=RegistryProvider.DOCKER,
docker_registry_url=URL(spec.docker_url),
docker_registry_username=spec.docker_username,
docker_registry_password=spec.docker_password,
)
elif "kubernetes" in spec:
return RegistryConfig(
provider=RegistryProvider.DOCKER,
docker_registry_install=True,
docker_registry_url=URL.build(
scheme="http",
host=f"{self._config.helm_release_names.platform}-docker-registry",
port=5000,
),
docker_registry_storage_class_name=spec.kubernetes_storage_class_name,
docker_registry_storage_size=spec.kubernetes_storage_size or "10Gi",
)
else:
raise ValueError("Registry provider is not supported")
def _create_monitoring(self, spec: MonitoringSpec) -> MonitoringConfig:
if not spec:
raise ValueError("Monitoring spec is empty")
metrics_enabled = not self._config.is_standalone
if not metrics_enabled:
return MonitoringConfig(
logs_region=spec.logs_region,
logs_bucket_name=spec.logs_bucket,
metrics_enabled=False,
)
elif "blobStorage" in spec.metrics:
return MonitoringConfig(
logs_region=spec.logs_region,
logs_bucket_name=spec.logs_bucket,
metrics_enabled=True,
metrics_storage_type=MetricsStorageType.BUCKETS,
metrics_region=spec.metrics_region,
metrics_bucket_name=spec.metrics_bucket,
metrics_retention_time=(
spec.metrics_retention_time
or MonitoringConfig.metrics_retention_time
),
)
elif "kubernetes" in spec.metrics:
return MonitoringConfig(
logs_bucket_name=spec.logs_bucket,
metrics_enabled=True,
metrics_storage_type=MetricsStorageType.KUBERNETES,
metrics_storage_class_name=spec.metrics_storage_class_name,
metrics_storage_size=spec.metrics_storage_size or "10Gi",
metrics_retention_time=(
spec.metrics_retention_time
or MonitoringConfig.metrics_retention_time
),
metrics_region=spec.metrics_region,
)
else:
raise ValueError("Metrics storage type is not supported")
@classmethod
def _base64_decode(cls, value: str | None) -> str:
if not value:
return ""
return b64decode(value.encode("utf-8")).decode("utf-8")
|
import os, pymorphy2, string, math
import re
# Create pymorphy2 instance for methods that use pymorphy2
morph = pymorphy2.MorphAnalyzer(lang = 'uk')
ukrainian_letters = ['й','ц','у','к','е','н','г','ш','щ',
'з','х','ї','ґ','є','ж','д','л','о','р','п','а','в','і','i', #Cyrrilic and latin i
'ф','я','ч','с','м','и','т','ь','б','ю',"'","’","-"]
weed_out = ['без', 'у', 'в', 'від', 'для', 'по', 'через', 'при', 'над', 'на', 'під', 'до',
'з', 'із', 'як', 'за', 'задля', 'з-під', 'із-за', 'поза', 'щодо', 'і', 'в', 'та', 'й', 'але', 'а', 'й', 'або', 'чи', 'як', 'коли', 'що',
'як', 'би', 'наскільки', 'хоч', 'мов', 'наче', 'адже', 'аніж', 'втім', 'зате',
'мовби', 'мовбито', 'начеб', 'начебто', 'немов', 'немовби', 'немовбито', 'неначе',
'неначебто', 'ніби', 'нібито', 'ніж', 'отже', 'отож', 'притім', 'притому', 'причім',
'причому', 'проте', 'себто', 'тобто', 'цебто', 'щоб', 'якби', 'якщо', 'отож-то',
'тим-то', 'тільки-но', 'тому-то', 'т', 'д', 'так', 'є', 'це','ще', 'уже', 'не', 'де']
vowels = ['а', 'е', 'є', 'и', 'і', 'ї', 'о', 'у', 'ю', 'я']
class Text(object):
"""docstring for Text"""
def __init__(self, abs_path=None):
"""At initialization creates list of ukraninian words and list of sentences. Takes absolute path to the file as an argument"""
self.path = abs_path
#Set current dirrectory
os.curdir = self.path
def is_ukr_word(word):
#Check if the word is ukrainian
#Returns Bool value
letters = list(word)
for letter in letters:
if letter.lower() in ukrainian_letters:
pass
else:
return False
return True
def word_syllab_number(word):
vowelsN = 0
for letter in word:
if letter.lower() in vowels:
vowelsN += 1
return vowelsN
# Open file for reading
if abs_path:
with open(self.path, mode='r', encoding='utf-8') as f:
file_content = f.read()
#Split file into sentences
sentences = re.split('\.\s|\n|\?|\!',file_content)
self.sentences = [sentence for sentence in sentences if sentence != '']
#Split file into words
self.all_words = []
for sentence in self.sentences:
words = sentence.split(' ')
self.all_words.extend(words)
# Creates a new list of cleaned words
self.ukr_words = []
# Num of long words
self.num_long_words = 0
for word in self.all_words:
word = word.strip(string.punctuation+'«»')
if is_ukr_word(word) and len(word) > 2 and word.lower() not in weed_out:
self.ukr_words.append(word)
if len(word) >= 6:
self.num_long_words += 1
# Num of syllables (vowels)
self.num_syllab = 0
for word in self.all_words:
if is_ukr_word(word):
self.num_syllab += word_syllab_number(word)
#Num of words
self.num_words = len(self.all_words)
#Num of sentences
self.num_sent = len(self.sentences)
#LIX coefficient
self.lix_coef = 0
#OLIX coefficient
self.ovix_coef = 0
#Flesch readability index
self.flesch_ind = 0
else:
self.all_words = []
self.ukr_words = []
self.sentences = []
self.num_words = 0
self.num_sent = 0
self.num_long_words = 0
self.num_syllab = 0
self.lix_coef = 0
self.ovix_coef = 0
self.flesch_ind = 0
def analyse(self):
"""Returns number of words, number of sentences, average word length and average sentence length in tuple """
#Average length of word
total_length = 0
for word in self.ukr_words:
total_length += len(word)
av_word_len = total_length/self.num_words
#Average length of sentence
total_length = 0
for sentence in self.sentences:
total_length += len(sentence)
av_sent_len = total_length/self.num_words
unique_words = set(self.ukr_words)
unique_words_num = len(unique_words)
#lix coefficient computing
self.lix_coef = self.num_words/self.num_sent + (self.num_long_words * 100) / self.num_words
#ovix coefficient computing
if unique_words_num:
k = math.log(self.num_words)
t = math.log(unique_words_num)
b = math.log(self.num_words)
self.ovix_coef = k / math.log(2 - (t / b))
#flesch coefficient computing
self.flesch_ind = 206.835 - 1.015*(self.num_words/self.num_sent) - 84*(self.num_syllab/self.num_words)
return self.num_words, self.num_sent, av_word_len, av_sent_len, self.lix_coef, self.ovix_coef, self.flesch_ind
def freq_dict(self, list_of_words = None):
"""Returns key value pair of all ukrainian words and relative frequency if no other list is given
{key(str):value(float)}
"""
if not list_of_words:
list_of_words = self.ukr_words
unique_words = set(list_of_words)
# num_of_un_words = len(unique_words)
freq_dict = {}
for un_word in unique_words:
freq_dict[un_word.lower()] = list_of_words.count(un_word)
# Sort by frequency
sorted_words = sorted(freq_dict, key=freq_dict.get)
sorted_words.reverse()
new_freq_dict = {}
for word in sorted_words:
new_freq_dict[word] = freq_dict[word]
return new_freq_dict
def get_relative_lemma_freq(self):
"""Returns key value pair of lemmas and relative frequency
{key(str):value(float)}
"""
lemmas = []
for word in self.ukr_words:
p = morph.parse(word)[0]
lemmas.append(p.normal_form)
lemmas_dict = self.freq_dict(list_of_words = lemmas)
return lemmas_dict
def get_parts_of_speach_freq(self):
"""Returns key value pair of parts of speach frequency"""
parts_of_speech = {}
for word in self.ukr_words:
p = morph.parse(word)[0]
if p.tag.POS in parts_of_speech:
parts_of_speech[p.tag.POS] += 1
else:
parts_of_speech[p.tag.POS] = 0
# for keyvalue in parts_of_speech:
# parts_of_speech[keyvalue] = (parts_of_speech[keyvalue]/self.num_words)*100
return parts_of_speech
if __name__ == '__main__':
# Helper functions
def dict_to_text(dictionary, add_POS_col = False):
""" Transform dictionary to str for readability and add parts of speach """
text = ''
if add_POS_col == True:
for key in dictionary:
p = morph.parse(key)[0]
text += f'{key} - {p.tag.POS} - {dictionary[key]} \n'
return text
else:
for key in dictionary:
text += f'{key} - {dictionary[key]} \n'
return text
def create_report(root_path, files):
"""Creates reports in the folder"""
def write_report(num_words, num_sent, av_word_len, av_sent_len, lix_coef, ovix_coef, flesch_coef, text, file, f_path):
report = ''
report += '='*100+'\n'
report += f'{file}\n'
report += '='*100+'\n'*2
report += '='*100+'\n'
if 'Easy' in f_path:
report += 'Text type: Easy\n'
elif 'Moderate' in f_path:
report += 'Text type: Moderate\n'
elif 'Complex' in f_path:
report += 'Text type: Complex\n'
else:
report += 'Text type: Unknown\n'
report += '='*100+'\n'*2
report += f"Number of words: {num_words}, number of sentences: {num_sent}, average word length: {av_word_len}, average sentence length: {av_sent_len}, words after cleaning: {len(text.ukr_words)}\n"
report += f"LIX coefficient: {lix_coef}, OVIX coefficient: {ovix_coef}, Flesch readability coefficient {flesch_coef} \n "
# Uncomment to add frequencies
# report += f'============================= Frequencies ================================= \n'
# report += dict_to_text(text.freq_dict()) + '\n'
report += f'============================= Lemma Frequencies ================================= \n\n'
report += dict_to_text(text.get_relative_lemma_freq(), add_POS_col = True) + '\n'
report += f'============================= Parts of speach ================================= \n\n'
report += dict_to_text(text.get_parts_of_speach_freq()) + '\n'
with open(os.path.join(path,'report.txt'), 'a', encoding='utf-8') as f:
f.write(report)
mega_text = Text()
f = open(os.path.join(path,'report.txt'), 'a', encoding='utf-8')
for file in files:
if file.startswith('readme'):
continue
print(file)
# Path to the file
f_path = os.path.join(path, file)
text = Text(f_path)
# Get main charcteristics of text
num_words, num_sent, av_word_len, av_sent_len, lix_coef, ovix_coef, flesch_coef = text.analyse()
# Add info from small text to total folder text
mega_text.all_words += text.all_words
mega_text.ukr_words += text.ukr_words
mega_text.num_words += text.num_words
mega_text.num_sent += text.num_sent
mega_text.sentences += text.sentences
#Create report
write_report(num_words, num_sent, av_word_len, av_sent_len, lix_coef, ovix_coef, flesch_coef, text, file, f_path)
num_words, num_sent, av_word_len, av_sent_len, lix_coef, ovix_coef, flesch_coef = mega_text.analyse()
write_report(num_words, num_sent, av_word_len, av_sent_len, lix_coef, ovix_coef, flesch_coef, mega_text, file='Summary', f_path= os.curdir)
path = os.getcwd()
# Walk through all leaves in the root tree
tree = os.walk(path)
# Create report for each text
for i in tree:
path, folders, files = i
files_c = files[:]
for file in files_c:
if not file.endswith('.txt'):
files.remove(file)
if len(files):
create_report(path, files)
|
import dash
import dash_html_components as html
import json
import dash_leaflet as dl
from examples import geojson_csf
from dash_extensions.transpile import inject_js, module_to_props
# Create geojson.
with open("assets/us-states.json", 'r') as f:
data = json.load(f)
js = module_to_props(geojson_csf) # do transcrypt to enable passing python functions as props
geojson = dl.GeoJSON(data=data, id="geojson", options=dict(style=geojson_csf.style),
hoverStyle=geojson_csf.hover_style)
# Create app.
app = dash.Dash(prevent_initial_callbacks=True)
app.layout = html.Div([dl.Map(children=[dl.TileLayer(), geojson], center=[39, -98], zoom=4, id="map")],
style={'width': '100%', 'height': '50vh', 'margin': "auto", "display": "block"})
# Inject transcrypted javascript.
inject_js(app, js)
if __name__ == '__main__':
app.run_server(port=7777, debug=True)
|
from .fetcher import ResourceFetcher
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.