repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
zfit/benchmarks | src/function_deadlock_bug.py | import tensorflow as tf
@tf.function(autograph=False)
def func1(depth=0):
if depth > 1:
return depth
else:
return func1(depth + 1)
func1(0)
|
zfit/benchmarks | toys/kst_angular/kst_angular.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# @file B2KstLL.py
# @author <NAME> (<EMAIL>)
# @date 11.04.2019
# =============================================================================
"""B -> K*ll angular distribution in zfit."""
import os
from _decimal import Decimal
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import argparse
from collections import defaultdict
from math import pi
from typing import Type
import tensorflow as tf
import numpy as np
import pandas as pd
import yaml
import zfit
import progressbar
import matplotlib
matplotlib.use('TkAgg')
# Hack End
import matplotlib.pyplot as plt
import flavio
ztf = zfit.ztf
ztyping = zfit.util.ztyping
ztypes = zfit.settings.ztypes
def plotToys(fitResults):
"""Plotting fit results for the distribution, error and pulls
for each parameter of the fit
"""
# Create disctionary with all parameteres
dictParams = {}
for key in fitResults[0]:
dictParams[key.name + '_Val'], dictParams[key.name + '_Err'] = [], []
# Fill dictionary with all the fitted values
print(fitResults)
for iToy in fitResults:
for i, (key, listpar) in enumerate(iToy.items()):
dictParams[key.name + '_Val'].append(list(listpar.values())[0])
dictParams[key.name + '_Err'].append(list(list(listpar.values())[1].values())[0])
for key, par in dictParams.items():
# LHCb style
# matplotlib.rc_file('/Users/rsilvaco/Research/PosDoc/Packages/zfit/zfit-tutorials/LHCb_Style.mlpstyle')
_par = np.array(par)
# print(np.mean(_par))
# print(np.std(_par))
plt.hist(_par, bins=50)
plt.savefig("plots/" + key + ".png")
plt.clf()
def _setInitVal(dictParams, pred, lepton, _q2min, _q2max):
channel = "B0->K*ee"
if (lepton): channel = "B0->K*mumu"
for key, par in dictParams.items():
if (pred == 'NP'):
wc = flavio.WilsonCoefficients()
if (key == 'AT2'):
par.set_value(
flavio.np_prediction('<P1>(' + channel + ')', wc, q2min=float(_q2min), q2max=float(_q2max)))
else:
par.set_value(flavio.np_prediction('<' + key + '>(' + channel + ')', wc, q2min=float(_q2min),
q2max=float(_q2max)))
else:
if (key == 'AT2'):
par.set_value(flavio.sm_prediction('<P1>(' + channel + ')', q2min=float(_q2min), q2max=float(_q2max)))
else:
par.set_value(
flavio.sm_prediction('<' + key + '>(' + channel + ')', q2min=float(_q2min), q2max=float(_q2max)))
# The PDFs
class P4pPDF(zfit.pdf.ZPDF):
"""P4prime observable from Bd -> Kst ll (l=e,mu).
Angular distribution obtained from a fold tecnhique,
i.e. the valid of the angles is given for
- phi: [0, pi]
- theta_K: [0, pi]
- theta_l: [0, pi/2]
The function is normalized over a finite range and therefore a PDF.
Args:
FL (`zfit.Parameter`): Fraction of longitudinal polarisation of the Kst
AT2 (`zfit.Parameter`): Transverse asymmetry
P4p (`zfit.Parameter`): Defined as S4/sqrt(FL(1-FL))
obs (`zfit.Space`):
name (str):
dtype (tf.DType):
"""
_PARAMS = ['FL', 'AT2', 'P4p']
_N_OBS = 3
def _unnormalized_pdf(self, x):
FL = self.params['FL']
AT2 = self.params['AT2']
P4p = self.params['P4p']
costheta_k, costheta_l, phi = ztf.unstack_x(x)
sintheta_k = tf.sqrt(1.0 - costheta_k * costheta_k)
sintheta_l = tf.sqrt(1.0 - costheta_l * costheta_l)
sintheta_2k = (1.0 - costheta_k * costheta_k)
sintheta_2l = (1.0 - costheta_l * costheta_l)
sin2theta_k = (2.0 * sintheta_k * costheta_k)
cos2theta_l = (2.0 * costheta_l * costheta_l - 1.0)
pdf = (3.0 / 4.0) * (1.0 - FL) * sintheta_2k + \
FL * costheta_k * costheta_k + \
(1.0 / 4.0) * (1.0 - FL) * sintheta_2k * cos2theta_l + \
-1.0 * FL * costheta_k * costheta_k * cos2theta_l + \
(1.0 / 2.0) * (1.0 - FL) * AT2 * sintheta_2k * sintheta_2l * tf.cos(2.0 * phi) + \
tf.sqrt(FL * (1 - FL)) * P4p * sin2theta_k * sin2theta_l * tf.cos(phi)
return pdf
class P5pPDF(zfit.pdf.ZPDF):
_PARAMS = ['FL', 'AT2', 'P5p']
_N_OBS = 3
def _unnormalized_pdf(self, x):
FL = self.params['FL']
AT2 = self.params['AT2']
P5p = self.params['P5p']
costheta_k, costheta_l, phi = ztf.unstack_x(x)
sintheta_k = tf.sqrt(1.0 - costheta_k * costheta_k)
sintheta_l = tf.sqrt(1.0 - costheta_l * costheta_l)
sintheta_2k = (1.0 - costheta_k * costheta_k)
sintheta_2l = (1.0 - costheta_l * costheta_l)
sin2theta_k = (2.0 * sintheta_k * costheta_k)
cos2theta_l = (2.0 * costheta_l * costheta_l - 1.0)
pdf = (3.0 / 4.0) * (1.0 - FL) * sintheta_2k + \
FL * costheta_k * costheta_k + \
(1.0 / 4.0) * (1.0 - FL) * sintheta_2k * cos2theta_l + \
-1.0 * FL * costheta_k * costheta_k * cos2theta_l + \
(1.0 / 2.0) * (1.0 - FL) * AT2 * sintheta_2k * sintheta_2l * tf.cos(2.0 * phi) + \
tf.sqrt(FL * (1 - FL)) * P5p * sin2theta_k * sintheta_l * tf.cos(phi)
return pdf
class P6pPDF(zfit.pdf.ZPDF):
"""P6prime observable from Bd -> Kst ll (l=e,mu).
Angular distribution obtained from a fold tecnhique,
i.e. the valid of the angles is given for
- phi: [-pi/2, pi/2]
- theta_K: [0, pi]
- theta_l: [0, pi/2]
The function is normalized over a finite range and therefore a PDF.
Args:
FL (`zfit.Parameter`): Fraction of longitudinal polarisation of the Kst
AT2 (`zfit.Parameter`): Transverse asymmetry
P6p (`zfit.Parameter`): Defined as S5/sqrt(FL(1-FL))
obs (`zfit.Space`):
name (str):
dtype (tf.DType):
"""
_PARAMS = ['FL', 'AT2', 'P6p']
_N_OBS = 3
def _unnormalized_pdf(self, x):
FL = self.params['FL']
AT2 = self.params['AT2']
P6p = self.params['P6p']
costheta_k, costheta_l, phi = ztf.unstack_x(x)
sintheta_k = tf.sqrt(1.0 - costheta_k * costheta_k)
sintheta_l = tf.sqrt(1.0 - costheta_l * costheta_l)
sintheta_2k = (1.0 - costheta_k * costheta_k)
sintheta_2l = (1.0 - costheta_l * costheta_l)
sin2theta_k = (2.0 * sintheta_k * costheta_k)
cos2theta_l = (2.0 * costheta_l * costheta_l - 1.0)
pdf = (3.0 / 4.0) * (1.0 - FL) * sintheta_2k + \
FL * costheta_k * costheta_k + \
(1.0 / 4.0) * (1.0 - FL) * sintheta_2k * cos2theta_l + \
-1.0 * FL * costheta_k * costheta_k * cos2theta_l + \
(1.0 / 2.0) * (1.0 - FL) * AT2 * sintheta_2k * sintheta_2l * tf.cos(2.0 * phi) + \
tf.sqrt(FL * (1 - FL)) * P6p * sin2theta_k * sintheta_l * tf.sin(phi)
return pdf
class P8pPDF(zfit.pdf.ZPDF):
"""P8prime observable from Bd -> Kst ll (l=e,mu).
Angular distribution obtained from a fold tecnhique,
i.e. the valid of the angles is given for
- phi: [-pi/2, pi/2]
- theta_K: [0, pi]
- theta_l: [0, pi/2]
The function is normalized over a finite range and therefore a PDF.
Args:
FL (`zfit.Parameter`): Fraction of longitudinal polarisation of the Kst
AT2 (`zfit.Parameter`): Transverse asymmetry
P8p (`zfit.Parameter`): Defined as S5/sqrt(FL(1-FL))
obs (`zfit.Space`):
name (str):
dtype (tf.DType):
"""
_PARAMS = ['FL', 'AT2', 'P8p']
_N_OBS = 3
def _unnormalized_pdf(self, x):
FL = self.params['FL']
AT2 = self.params['AT2']
P8p = self.params['P8p']
costheta_k, costheta_l, phi = ztf.unstack_x(x)
sintheta_k = tf.sqrt(1.0 - costheta_k * costheta_k)
sintheta_l = tf.sqrt(1.0 - costheta_l * costheta_l)
sintheta_2k = (1.0 - costheta_k * costheta_k)
sintheta_2l = (1.0 - costheta_l * costheta_l)
sin2theta_k = (2.0 * sintheta_k * costheta_k)
cos2theta_l = (2.0 * costheta_l * costheta_l - 1.0)
pdf = (3.0 / 4.0) * (1.0 - FL) * sintheta_2k + \
FL * costheta_k * costheta_k + \
(1.0 / 4.0) * (1.0 - FL) * sintheta_2k * cos2theta_l + \
-1.0 * FL * costheta_k * costheta_k * cos2theta_l + \
(1.0 / 2.0) * (1.0 - FL) * AT2 * sintheta_2k * sintheta_2l * tf.cos(2.0 * phi) + \
tf.sqrt(FL * (1 - FL)) * P8p * sin2theta_k * sin2theta_l * tf.sin(phi)
return pdf
# Folding data
def fold_P4p(data, costheta_k, costheta_l, phi):
theta_l = np.acos(data[costheta_l])
data[f'{costheta_k}_P4p'] = data[costheta_k]
data[f'{phi}_P4p'] = np.where(data[phi] < 0,
-data[phi],
data[phi])
data[f'{phi}_P4p'] = np.where(theta_l > 0.5 * pi,
pi - data[f'{phi}_P4p'],
data[f'{phi}_P4p'])
data[f'{costheta_l}_P4p'] = np.where(theta_l > 0.5 * pi,
np.cos(pi - theta_l),
data[costheta_l])
return zfit.Data.from_pandas(data[f'{costheta_l}_P4p',
f'{costheta_k}_P4p',
f'{phi}_P4p'].copy()
.rename(index=str,
columns={f'{costheta_l}_P4p': costheta_l,
f'{costheta_k}_P4p': costheta_k,
f'{phi}_P4p': phi}))
def fold_P5p(data, costheta_k, costheta_l, phi):
theta_l = np.acos(data[costheta_l])
data[f'{costheta_k}_P5p'] = data[costheta_k]
data[f'{phi}_P5p'] = np.where(data[f'{phi}_P5p'] < 0,
-data[f'{phi}_P5p'],
data[f'{phi}_P5p'])
data[f'{costheta_l}_P5p'] = np.where(theta_l > 0.5 * pi,
np.cos(pi - theta_l),
data[costheta_l])
return zfit.Data.from_pandas(data[f'{costheta_l}_P5p',
f'{costheta_k}_P5p',
f'{phi}_P5p'].copy()
.rename(index=str,
columns={f'{costheta_l}_P5p': costheta_l,
f'{costheta_k}_P5p': costheta_k,
f'{phi}_P5p': phi}))
def fold_P6p(data, costheta_k, costheta_l, phi):
theta_l = np.acos(data[costheta_l])
data[f'{costheta_k}_P6p'] = data[costheta_k]
data[f'{phi}_P6p'] = np.where(data[phi] > 0.5 * pi,
pi - data[phi],
data[phi])
data[f'{phi}_P6p'] = np.where(data[f'{phi}_P6p'] < - 0.5 * pi,
- pi - data[f'{phi}_P6p'],
data[f'{phi}_P6p'])
data[f'{costheta_l}_P6p'] = np.where(theta_l > 0.5 * pi,
np.cos(pi - theta_l),
data[costheta_l])
return zfit.Data.from_pandas(data[f'{costheta_l}_P6p',
f'{costheta_k}_P6p',
f'{phi}_P6p'].copy()
.rename(index=str,
columns={f'{costheta_l}_P6p': costheta_l,
f'{costheta_k}_P6p': costheta_k,
f'{phi}_P6p': phi}))
def fold_P8p(data, costheta_k, costheta_l, phi):
theta_k = np.acos(data[costheta_k])
theta_l = np.acos(data[costheta_l])
data[f'{costheta_k}_P8p'] = np.where(theta_l > 0.5 * pi,
np.cos(pi - theta_k),
data[costheta_k])
data[f'{phi}_P8p'] = np.where(data[phi] > 0.5 * pi,
pi - data[phi],
data[phi])
data[f'{phi}_P8p'] = np.where(data[f'{phi}_P8p'] < - 0.5 * pi,
- pi - data[f'{phi}_P8p'],
data[f'{phi}_P8p'])
data[f'{costheta_l}_P8p'] = np.where(theta_l > 0.5 * pi,
np.cos(pi - theta_l),
data[costheta_l])
return zfit.Data.from_pandas(data[f'{costheta_l}_P8p',
f'{costheta_k}_P8p',
f'{phi}_P8p'].copy()
.rename(index=str,
columns={f'{costheta_l}_P8p': costheta_l,
f'{costheta_k}_P8p': costheta_k,
f'{phi}_P8p': phi}))
# A bit of handling
class B2Kstll:
FOLDS = {'P4p': (P4pPDF, ['FL', 'AT2', 'P4p'], fold_P4p),
'P5p': (P5pPDF, ['FL', 'AT2', 'P5p'], fold_P5p),
'P6p': (P6pPDF, ['FL', 'AT2', 'P6p'], fold_P6p),
'P8p': (P8pPDF, ['FL', 'AT2', 'P8p'], fold_P8p)}
def __init__(self, costheta_l, costheta_k, phi):
self._obs_names = {'costheta_l': costheta_l.obs,
'costheta_k': costheta_k.obs,
'phi': phi.obs}
self.obs = costheta_l * costheta_k * phi
self.params = {}
def get_folded_pdf(self, name):
pdf_class, param_names, _ = self.FOLDS[name]
def get_params(param_list):
out = {}
for param in param_list:
if param not in self.params:
config = [0.8, 0, 1] if param == 'FL' else [0.0, -1, 1]
self.params.update({param: zfit.Parameter(param, *config)})
out[param] = self.params[param]
return out
# Make sure params exist
params = get_params(param_names)
pdf = pdf_class(obs=self.obs, **params)
return pdf
def fold_dataset(self, name, dataset):
*_, data_transform = self.FOLDS[name]
return data_transform(dataset, self.obs.obs)
def run_toys(pdf_factory, n_toys, toys_nevents):
zfit.run.create_session(reset_graph=True)
pdf = pdf_factory()
sampler = pdf.create_sampler(n=1000)
nll = zfit.loss.UnbinnedNLL(model=pdf, data=sampler, fit_range=pdf.space)
# minimizer = zfit.minimize.MinuitMinimizer(verbosity=0)
from zfit.minimizers.baseminimizer import ToyStrategyFail
minimizer = zfit.minimize.MinuitMinimizer(strategy=ToyStrategyFail(), verbosity=0)
sampler.resample(n=1000)
# pre build graph
nll_grads = [nll.value(), nll.gradients()]
zfit.run(nll_grads)
dependents = pdf.get_dependents()
performance = {}
performance["ntoys"] = n_toys
for nevents in toys_nevents:
sampler.resample(n=nevents)
# Create dictionary to save fit results
performance[nevents] = {"success": [], "fail": []}
failed_fits = 0
successful_fits = 0
timer = Timer(f"Toys {nevents}")
with progressbar.ProgressBar(max_value=n_toys) as bar:
ident = 0
with timer:
while successful_fits < n_toys:
with timer.child(f"toy number {successful_fits} {ident}") as child:
# Retrieve value from flav.io predictions
# _setInitVal(pdf.params, pred, lepton, _q2min, _q2max)
# Generate toys
# sampler.resample(n=nevents)
# Randomise initial values
# for param in dependents:
# param.randomize()
# Minimise the NLL
# minimum = minimizer.minimize(nll)
zfit.run(nll_grads)
if ident == 0:
ident += 1
continue
if True or minimum.converged:
bar.update(successful_fits)
successful_fits += 1
fail_or_success = "success"
else:
child.elapsed = Decimal()
failed_fits += 1
fail_or_success = "fail"
ident += 1
performance[nevents][fail_or_success].append(float(child.elapsed))
print("Failed fits: {}/{}".format(failed_fits, failed_fits + n_toys))
return performance
# Plotting fit results
# plotToys(fitResults)
def pdf_factory():
# Phase space
costheta_l = zfit.Space("costhetal", limits=(0, 1.0))
costheta_k = zfit.Space("costhetaK", limits=(-1.0, 1.0))
phi = zfit.Space("phi", limits=(0, pi))
decay = B2Kstll(costheta_l, costheta_k, phi)
# Define angular pdf
angularPDF = decay.get_folded_pdf(fold)
# Create mass pdf
mu = zfit.Parameter("mu", 5279, 5200, 5400)
sigma = zfit.Parameter("sigma", 30, 20, 40)
a0 = zfit.Parameter("a0", 0.9, 0.8, 1.1)
a1 = zfit.Parameter("a1", 1.1, 0.9, 1.5)
n0 = zfit.Parameter("n0", 7, 6, 8)
n1 = zfit.Parameter("n1", 4, 3, 5)
mass = zfit.Space("mass", limits=(4900, 5600))
massPDF = zfit.pdf.DoubleCB(obs=mass, mu=mu, sigma=sigma,
alphal=a0, nl=n0, alphar=a1, nr=n1)
pdf = massPDF * angularPDF
# pdf = angularPDF
return pdf
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Toys of Kst angular')
parser.add_argument("-t", "--testing", dest="testing", action='store_true', help="if set, run a small subset for testing only.")
# parser.add_argument("-i", "--q2min", dest="q2min", required=True, help="Set the minimum q2 for the simulation")
# parser.add_argument("-j", "--q2max", dest="q2max", required=True, help="Set the maximum q2 for the simulation")
# parser.add_argument("-f", "--fold", dest="fold", required=True,
# help="Choose the fold to be examined (i.e. P4p, P5p, P6p or P8p)")
# parser.add_argument("-l", "--lepton", dest="lepton", required=False,
# help="Choose the final state (e.g. muon or electron)")
# parser.add_argument("-p", "--pred", dest="pred", required=True, help="Choose whether SM or NP prediction")
#
args = parser.parse_args()
config = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1,
allow_soft_placement=True)
#
sess = tf.Session(config=config)
# os.environ["OMP_NUM_THREADS"] = "NUM_PARALLEL_EXEC_UNITS"
# os.environ["KMP_BLOCKTIME"] = "30"
# os.environ["KMP_SETTINGS"] = "1"
# os.environ["KMP_AFFINITY"] = "granularity=fine,verbose,compact,1,0"
# Parameters and configuration
# _q2min = args.q2min
# _q2max = args.q2max
# fold = args.fold
# lepton = args.lepton
# pred = args.pred
# sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# zfit.run.sess = sess
zfit.run.numeric_checks = False
from zfit_benchmark.timer import Timer
_q2min = 1.1
_q2max = 6
fold = "P5p"
lepton = "muon"
pred = "sm"
testing = args.testing
print(testing)
if testing:
toys_nevents = [2 ** i for i in range(7, 9)]
n_toys = 3
else:
# toys_nevents = [2 ** i for i in range(7, 23)]
toys_nevents = [1000000 * 1]
n_toys = 10
results = run_toys(pdf_factory=pdf_factory, n_toys=n_toys, toys_nevents=toys_nevents)
with open(f"results_kstangular_nll_1cpu_{np.random.randint(low=0, high=int(10))}.yaml", "w") as f:
yaml.dump(results, f)
# EOFs
|
zfit/benchmarks | zfit_benchmark/performance.py | <filename>zfit_benchmark/performance.py
"""Various code monitoring utilities."""
import os
def memory_usage():
"""Get memory usage of current process in MiB.
Tries to use :mod:`psutil`, if possible, otherwise fallback to calling
``ps`` directly.
Return:
float: Memory usage of the current process.
"""
pid = os.getpid()
try:
import psutil
process = psutil.Process(pid)
mem = process.memory_info()[0] / float(2 ** 20)
except ImportError:
import subprocess
out = subprocess.Popen(['ps', 'v', '-p', str(pid)],
stdout=subprocess.PIPE).communicate()[0].split(b'\n')
vsz_index = out[0].split().index(b'RSS')
mem = float(out[1].split()[vsz_index]) / 1024
return mem
# pylint: disable=too-few-public-methods
# EOF
|
zfit/benchmarks | toys/gaussians/evaluate_gauss.py | import argparse
import pprint
from collections import OrderedDict, defaultdict
import yaml
import numpy as np
import matplotlib.pyplot as plt
def process_results(file):
with open(file) as result_file:
result = yaml.load(result_file)
avg_results = OrderedDict()
finished = result.pop("ATTENTION", False) == "ATTENTION"
n_toys = result.pop("n_toys", None)
column_n_gauss = result.pop("column", None)
for n_gauss, gauss_results in result.items():
avg_results[n_gauss] = OrderedDict()
column_n_free_params = gauss_results.pop("column", None)
for n_params, params_results in gauss_results.items():
avg_results[n_gauss][n_params] = OrderedDict()
column_n_events = params_results.pop("column", None)
for n_events, fit_result in params_results.items():
avg_results[n_gauss][n_params][n_events] = (np.average(fit_result["success"]),
np.std(fit_result["success"]))
return avg_results
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='evaluate gaussian toy results')
# parser.add_argument('file', metavar='N', type=str, nargs='+',
# help='an integer for the accumulator')
# parser.add_argument('--sum', dest='accumulate', action='store_const',
# const=sum, default=max,
# help='sum the integers (default: find the max)')
# args = parser.parse_args()
# file = args.file[0]
# file = "results/gauss_roofit/result_623883112794675899.yaml"
file_grad = "results/gauss_zfit_grad/zfit_withgrad.yaml"
file_nograd = "results/gauss_zfit_nograd/zfit_withgrad.yaml"
# file = "zfitcpumkl_result_207406707159128210.yaml"
# print(result)
avg_results_nograd = process_results(file_nograd)
avg_results_grad = process_results(file_grad)
def difference(dict1, dict2):
if isinstance(dict1, dict):
diff = {}
for key, value in dict1.items():
try:
value2 = dict2[key]
except KeyError:
continue
else:
diff[key] = difference(value, value2)
else:
diff = (dict1[0] - dict2[0], np.sqrt(dict1[1]**2 + dict2[1]**2))
return diff
avg_results = difference(avg_results_nograd, avg_results_grad)
pprint.pprint(avg_results)
n_gausses_2param128 = []
n_gausses_2param32768 = []
n_gausses_2param2097152 = []
n_gausses_nparam128 = []
n_gausses_nparam32768 = []
n_gausses_nparam2097152 = []
n_gauss_2param_freeparam = []
n_gauss_nparam_freeparam = []
n_gausses_2param = defaultdict(list)
n_gausses_2param_nevents = []
n_gausses_nparam = defaultdict(list)
n_gausses_nparam_nevents = []
n_gausses = []
plt.rc('axes', labelsize=18) # fontsize of the x and y labels
# plt.rcParams.update({'font.size': 16})
for n_gauss, gauss_results in avg_results.items():
if n_gauss == 20:
continue
n_gausses.append(n_gauss)
for n_params, n_params_result in gauss_results.items():
free_params = 2 * n_params
if n_params == 1:
n_gauss_2param_freeparam.append(free_params)
for nevents, el in n_params_result.items():
n_gausses_2param[nevents].append(el[0])
elif n_params == n_gauss:
for nevents, el in n_params_result.items():
n_gausses_nparam[nevents].append(el[0])
else:
continue
n_events = []
times_err = []
times = []
for nevents, elapsed in n_params_result.items():
n_events.append(nevents)
times.append(elapsed[0]) # success only
times_err.append(elapsed[1])
plt.figure(f"figure_noscale_{n_params == 1}")
# plt.loglog(n_events, times, label=f"n_gauss: {n_gauss}")
# plt.plot(n_events, times, "x--", label=f"n_gauss: {n_gauss}")
# plt.semilogx(n_events, times, label=f"n_gauss: {n_gauss}")
# plt.loglog(n_events, times, "x--", label=f"n_gauss: {n_gauss}")
ax = plt.axes()
ax.set_xscale("log")
# ax.set_yscale("log")
plt.errorbar(n_events, times, yerr=times_err, fmt="x--", label=f"n_gauss: {n_gauss}")
plt.legend()
addition = f" and 2 free parameters" if free_params == 2 else ""
plt.title(f"Toys with sum of gaussians" + addition)
plt.xlabel("Number of events")
plt.ylabel("Time (sec)")
together = True
if together:
for nevents, times in n_gausses_2param.items():
plt.figure("n_gauss_2param")
# plt.semilogy(n_gausses, times, label=f"n events: {nevents}")
# plt.plot(n_gausses, times, label=f"n events: {nevents}")
plt.loglog(n_gausses, times, label=f"n events: {nevents}")
plt.legend()
# plt.title(f"Toys with sum of gaussians, total 2 free parameters.")
plt.xlabel("Number of gaussians")
plt.ylabel("Time (sec)")
for nevents, times in n_gausses_nparam.items():
continue
plt.figure("n_gauss_nparam")
n_gausses = np.array(n_gausses)
n_params = 2 * n_gausses
# plt.semilogy(n_params, times, label=f"n events: {nevents}")
# plt.plot(n_params, times, label=f"n events: {nevents}")
plt.loglog(n_params, times, label=f"n events: {nevents}")
plt.legend()
plt.title(f"Toys with sum of gaussians")
plt.xlabel("Number of free params")
plt.ylabel("Time (sec)")
else:
for times, nevents in (
(n_gausses_2param128, 128), (n_gausses_2param32768, 32768), (n_gausses_2param2097152, 2097152)):
plt.figure()
plt.plot(n_gausses, times)
plt.title(f"Toys with {nevents} and sum of gaussians with 2 free parameters.")
plt.xlabel("Number of gaussians")
plt.ylabel("Time (sec)")
for times, nevents in (
(n_gausses_nparam128, 128), (n_gausses_nparam32768, 32768), (n_gausses_nparam2097152, 2097152)):
plt.figure()
n_gausses = np.array(n_gausses)
n_params = 3 * n_gausses - 1
plt.plot(n_params, times, "x")
plt.title(f"Toys with {nevents} and sum of gaussians")
plt.xlabel("Number of free params")
plt.ylabel("Time (sec)")
plt.show()
|
zfit/benchmarks | src/cache_perf.py | <reponame>zfit/benchmarks
"""Test the performance of different cache methods
Comparison of a feed_dict based approach and an approach based on a Variable actings as cache.
Results:
(100 runs)
non-cached:
variable: 7.3 sec
feed_dict: 7.4 sec
cached:
variable: 0.02 sec
feed_dict: 0.026 sec
"""
import numpy as np
import tensorflow as tf
from zfit import z
from zfit_benchmark.timer import Timer
# zfit.run.set_graph_mode(False)
do_cache = True
# do_cache = False
rnd_prob = 0.0 # how often to randomly invalidate the cache
# sanity check: if ~1, should behave like no cache, if ~0, nearly no std and fast
# setting it to zero means no invalidation ever
z.function
def func_a(x):
return tf.math.log(tf.math.exp(x - 0.01) * 1.01 + 0.1) * 0.99 - tf.math.sin(x * 0.98)
z.function
def func_b(x):
return tf.math.cos(tf.math.exp(x - 0.011) * 1.03 + 0.11) * 0.992 - tf.math.sin(x * 0.984)
def expensive(func):
return tf.math.reduce_mean(func(tf.random.uniform(shape=(10000000,))))
class BaseModel():
def __init__(self) -> None:
self.cache = {}
super().__init__()
def value(self):
return self.expensive_a() + self.expensive_b()
def expensive_a(self):
raise NotImplementedError
def expensive_b(self):
raise NotImplementedError
def run(self):
raise NotImplementedError
# OLD TensorFlow 1
# class FeedModel(BaseModel):
#
# def expensive_a(self):
# val = expensive(func_a)
# return val
#
# def expensive_b(self):
# val = expensive(func_b)
# return val
#
# def run(self):
# val, a, b = self.sess.run([self.value, self.a, self.b], feed_dict=self.cache)
# if do_cache:
# if not (rnd_cache and np.random.choice([True, False], p=[0.5, 0.5])):
# self.cache[self.a] = a
# else:
# with suppress(KeyError):
# del self.cache[self.a]
# if not (rnd_cache and np.random.choice([True, False], p=[0.5, 0.5])):
# self.cache[self.b] = b
# else:
# with suppress(KeyError):
# del self.cache[self.b]
# return val
def expensive_auto_cache(cache: tf.Variable, flag: tf.Variable, func):
def autoset_func():
val = func()
cache.assign(val, read_value=False)
flag.assign(True, read_value=False)
return cache
val = tf.cond(flag, lambda: cache, autoset_func)
return val
class VariableModel(BaseModel):
def __init__(self):
super().__init__()
self.is_cached = {}
self.cache['a'] = tf.Variable(initial_value=42., trainable=False)
self.is_cached['a'] = tf.Variable(initial_value=False, trainable=False)
self.cache['b'] = tf.Variable(initial_value=42., trainable=False)
self.is_cached['b'] = tf.Variable(initial_value=False, trainable=False)
@z.function()
def expensive_a(self):
return expensive_auto_cache(cache=self.cache['a'], flag=self.is_cached['a'], func=lambda: expensive(func_a))
@z.function
def expensive_b(self):
return expensive_auto_cache(cache=self.cache['b'], flag=self.is_cached['b'], func=lambda: expensive(func_b))
def run(self):
if not do_cache or (do_cache and np.random.choice([True, False],
p=[rnd_prob, 1 - rnd_prob])):
self.is_cached['a'].assign(False)
if not do_cache or (do_cache and np.random.choice([True, False],
p=[rnd_prob, 1 - rnd_prob])):
self.is_cached['b'].assign(False)
return self.value()
if __name__ == '__main__':
n_runs = 100
values = np.zeros(shape=(n_runs,))
model = VariableModel()
model.run() # pre run to remove possible initial overhead, caches also values
with Timer() as timer:
for i in range(n_runs):
values[i] = model.run()
print(f"mean={np.mean(values):.4g} +- {np.std(values):.4g}")
print(f"{timer.elapsed:.3f} sec")
|
zfit/benchmarks | zfit_benchmark/timer.py | <gh_stars>0
import math
from collections import OrderedDict
from timeit import default_timer
# class Timer(object):
# """Time the code placed inside its context.
#
# Taken from http://coreygoldberg.blogspot.ch/2012/06/python-timer-class-context-manager-for.html
#
# Attributes:
# verbose (bool): Print the elapsed time at context exit?
# start (float): Start time in seconds since Epoch Time. Value set
# to 0 if not run.
# elapsed (float): Elapsed seconds in the timer. Value set to
# 0 if not run.
#
# Arguments:
# verbose (bool, optional): Print the elapsed time at
# context exit? Defaults to False.
#
# """
#
# def __init__(self, verbose=False):
# """Initialize the timer."""
# self.verbose = verbose
# self._timer = default_timer
# self.start = 0
# self.elapsed = 0
#
# def __enter__(self):
# self.start = self._timer()
# return self
#
# def __exit__(self, *args):
# self.elapsed = self._timer() - self.start
# if self.verbose:
# print('Elapsed time: {} ms'.format(self.elapsed*1000.0))
from decimal import Decimal
from timeit import default_timer
# The code below is taken from https://github.com/mherrmann/timer-cm/blob/master/timer_cm.py
# and licensed with the MIT from mherrmann
# The following license applies for the code below
#
# MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class Timer:
def __init__(self, name: str = "Timer", do_print: bool = False):
self.elapsed = Decimal()
self._name = name
self._do_print = do_print
self._start_time = None
self._children = OrderedDict()
self._count = 0
self._running = False
def __enter__(self):
self.start()
return self
def __exit__(self, *_):
self.stop()
if self._do_print:
self.print_results()
def child(self, name):
try:
return self._children[name]
except KeyError:
child = Timer(name, do_print=False)
self._children[name] = child
return child
def start(self):
self._count += 1
if self._running:
raise RuntimeError("Already started")
self._running = True
self._start_time = self._get_time()
def stop(self):
self._running = False
self.elapsed += self._get_time() - self._start_time
@property
def elapsed(self):
if self._running:
current = self._get_time() - self._start_time
else:
current = 0
return self._elapsed + current
@elapsed.setter
def elapsed(self, value):
self._elapsed = value
def print_results(self):
print(self._format_results())
def _format_results(self, indent=' '):
children = self._children.values()
elapsed = self.elapsed or sum(c.elapsed for c in children)
result = f'{self._name}: {elapsed:.3f}s'
max_count = max(c._count for c in children) if children else 0
count_digits = 0 if max_count <= 1 else int(math.ceil(math.log10(max_count + 1)))
for child in sorted(children, key=lambda c: c.elapsed, reverse=True):
lines = child._format_results(indent).split('\n')
child_percent = child.elapsed / elapsed * 100
lines[0] += f' ({child_percent:.3f})'
if count_digits:
# `+2` for the 'x' and the space ' ' after it:
lines[0] = (f'{child._count:}x ').rjust(count_digits + 2) \
+ lines[0]
for line in lines:
result += '\n' + indent + line
return result
def _get_time(self):
return Decimal(default_timer())
|
zfit/benchmarks | src/mathematica_sympy.py | <filename>src/mathematica_sympy.py
import sympy
import tensorflow as tf
import sympy.parsing.mathematica as symath
math_expr = '(t^3+10t^2*a*Sin[x]+b*32t+32)/(t^2+2t-15)'
parsed_expr = symath.mathematica(s=math_expr)
print("parsed sympy expression", parsed_expr)
tf_expr = sympy.lambdify(parsed_expr.free_symbols, parsed_expr, 'tensorflow')
data = tf.linspace(0., 10., num=10)
print(tf_expr)
a = tf.Variable(15.)
b = tf.Variable(13.)
tensor = tf_expr(a=a, b=b, x=data, t=data)
print(tensor)
|
zfit/benchmarks | src/convolution.py | import matplotlib.pyplot as plt
import numpy as np
import scipy.signal
import tensorflow as tf
# n = 101
n = 100
array1 = np.random.uniform(0, 5, size=(n, n))
array1b = np.random.uniform(0, 5, size=(n, n))
array2 = np.random.normal(loc=1, scale=2, size=(n, n))
array2b = np.random.normal(loc=1, scale=2, size=(n, n))
lower1 = 0
upper1 = 10
linspace1 = np.linspace(lower1, upper1, num=n // 2 + 1)
x1 = np.concatenate([linspace1, np.zeros(shape=n // 2)])
upper2 = 40
lower2 = 20
linspace2 = np.linspace(lower2, upper2, num=n // 2 + 1)
x2 = np.concatenate([linspace2, np.ones(shape=n // 2)])
linspace1 = np.linspace(lower1, upper1, num=n // 2 + 1)
xk1 = np.concatenate([linspace1[::-1], np.zeros(shape=n // 2)])
linspace2 = np.linspace(lower2, upper2, num=n // 2 + 1)
xk2 = np.concatenate([3 * np.ones(shape=n // 4), linspace2, np.ones(shape=n // 4)])
linfull1 = np.linspace(lower1, upper1, num=n + 1)
linfull2 = np.linspace(lower2, upper2, num=n + 1)
ar = tf.transpose(tf.meshgrid(linfull1, linfull2, indexing='ij'))
# ark1, ark2 =
# ark1, ark2 = np.meshgrid(xk2, xk1, indexing='ij')
# array1 = x1 * x2[..., None]
# xk = xk1 * xk2[..., None]
# array2 = xk
n = n + 1
array1 = (lambda x, y: tf.math.square(x) * tf.math.cos(y))(*tf.unstack(ar, axis=-1))
array2 = (lambda x, y: 1. * tf.math.sqrt(y))(*tf.unstack(xk1, xk2, axis=-1))
array1 = tf.reshape(array1, (n, n))
array2 = tf.reshape(array2, (n, n))
# mode = 'same'
mode = 'same'
corr_np = scipy.signal.correlate(array1, array2[::-1, ::-1], mode=mode)
corr2d_np = scipy.signal.correlate2d(array1, array2[::-1, ::-1], mode=mode)
conv_np = scipy.signal.convolve(array1, array2, mode=mode)
# conv_np = scipy.signal.fftconvolve(array1, array2[::-1, ::-1], mode='same')
# conv_np = scipy.signal.convolve2d(array1, -array2, mode='same')
# conv_npb = scipy.signal.correlate(array1b, array2b, mode=mode)
array_rev = tf.reverse(array2, axis=(0, 1))
# array_rev = array2[::-1, ::-1]
conv_tf = tf.nn.convolution(array1[None, ..., None], array_rev[..., None, None,],
strides=1, padding='SAME')[0, ..., 0]
diff = corr_np - conv_tf
# diffb = corr_np - conv_npb
plt.figure()
# plt.hist2d(array1, array2)
plt.figure()
plt.title("tf vs np")
plt.imshow(diff)
plt.colorbar()
plt.figure()
plt.title("convolution")
plt.imshow(conv_tf)
plt.colorbar()
# plt.figure()
# plt.title("corr vs conv")
# plt.imshow(corr_np - conv_np)
# plt.colorbar()
plt.figure()
plt.title("corr vs corr2d")
plt.imshow(corr_np - corr2d_np)
plt.colorbar()
plt.figure()
plt.title("tf vs corr2d")
plt.imshow(conv_tf - corr2d_np)
plt.colorbar()
plt.show()
|
zfit/benchmarks | zfit_benchmark/__init__.py | from . import timer, performance
__all__ = ["performance", "timer"]
|
zfit/benchmarks | src/gradient.py | import tensorflow as tf
import numpy as np
from zfit_benchmark.timer import Timer
import zfit.z.numpy as znp
# @tf.function
def func1(x):
p = [2.0, 3.0, 4.0, 1.5, 3]
return tf.reduce_sum(x**5 * p[0] + tf.math.special.dawsn(p[1]*x**2) + p[2]*x**3 + p[3]*x**4 + p[4]*x**5)
# @tf.function
def grad(x):
with tf.GradientTape() as tape:
tape.watch(x)
y = func1(x)
return tape.gradient(y, x)
# @tf.function
size = (100,)
params = [tf.Variable(val, dtype=znp.float64) for val in znp.random.uniform(low=0., high=10., size=size)]
def func(x):
return znp.sum([tf.cast(p, tf.float64) ** tf.cast(znp.random.uniform(low=0, high=10, size=[100_000]), znp.float64) for p in x])
def hessian(params):
with tf.GradientTape(persistent=True, watch_accessed_variables=False) as tape:
tape.watch(params)
with tf.GradientTape(persistent=True, watch_accessed_variables=False) as tape:
tape.watch(params)
y = func(params)
gradients = tape.gradient(y, params)
if hessian != 'diag':
gradients_tf = znp.stack(gradients)
if hessian == 'diag':
computed_hessian = znp.stack(
[tape.gradient(grad, sources=param) for param, grad in zip(params, gradients)]
)
# gradfunc = lambda par_grad: tape.gradient(par_grad[0], sources=par_grad[1])
# computed_hessian = tf.vectorized_map(gradfunc, zip(params, gradients))
else:
computed_hessian = znp.asarray(tape.jacobian(gradients_tf, sources=params,
experimental_use_pfor=False # causes TF bug? Slow..
))
return computed_hessian
results = []
if __name__ == '__main__':
with Timer() as timer:
for _ in range(100):
# x = tf.random.uniform(shape=(100,))
y = hessian(params)
results.append(y.numpy())
print(f"{np.average(results)} +- {np.std(results)}")
print(timer.elapsed * 1000, 'ms')
# gradfunc = lambda par_grad: tape.gradient(par_grad[0], sources=par_grad[1])
# computed_hessian = tf.vectorized_map(gradfunc, zip(params, gradients))
|
zfit/benchmarks | toys/gaussians/gaussians_graph.py | import pprint
import progressbar
import yaml
import zfit
import zfit.minimizers.baseminimizer
import numpy as np
import zfit_benchmark
zfit.run.numeric_checks = False
def toy_run(n_params, n_gauss, nevents):
# pdf = chebys[0]
# zfit.settings.set_verbosity(10)
lower = -1
upper = 1
# create observables
obs = zfit.Space("obs1", limits=(lower, upper))
# create parameters
params = []
params_initial = []
mu_lower, mu_upper = 1, 3
sigma_lower, sigma_upper = 0.5, 2
for i in range(n_params):
mu = zfit.Parameter(f"mu_{i}_{nevents}", np.random.uniform(low=mu_lower, high=mu_upper), mu_lower,
mu_upper)
sigma = zfit.Parameter(f"sigma_{i}_{nevents}", np.random.uniform(low=sigma_lower, high=sigma_upper),
sigma_lower, sigma_upper)
params.append((mu, sigma))
# create pdfs
pdfs = []
for i in range(n_gauss):
mu, sigma = params[i % n_params]
shifted_mu = mu + 0.3 * i
shifted_sigma = sigma + 0.1 * i
pdf = zfit.pdf.Gauss(obs=obs, mu=shifted_mu, sigma=shifted_sigma)
# from zfit.models.basic import CustomGaussOLD
# pdf = CustomGaussOLD(obs=obs, mu=shifted_mu, sigma=shifted_sigma)
# pdf.update_integration_options(mc_sampler=tf.random_uniform)
pdfs.append(pdf)
initial_param_val = 1 / n_gauss
fracs = []
for i in range(n_gauss - 1):
frac_value = 1 / n_gauss
lower_value = 0.0001
upper_value = 1.5 / n_gauss
frac = zfit.Parameter(f"frac_{i}", value=1 / n_gauss, lower_limit=lower_value, upper_limit=upper_value)
frac.floating = False
fracs.append(frac)
sum_pdf = zfit.pdf.SumPDF(pdfs=pdfs, fracs=fracs)
# sum_pdf.update_integration_options(mc_sampler=tf.random_uniform)
pdf = sum_pdf
# Create dictionary to save fit results
failed_fits = 0
successful_fits = 0
sampler = pdf.create_sampler(n=nevents, fixed_params=True)
sampler.set_data_range(obs)
nll = zfit.loss.UnbinnedNLL(pdf, sampler)
minimizer = zfit.minimize.MinuitMinimizer(zfit.minimizers.baseminimizer.ToyStrategyFail(), verbosity=0)
minimizer._use_tfgrad = True
timer = zfit_benchmark.timer.Timer(f"Timing")
sampler.resample()
with timer:
to_run = [nll.value(), nll.gradients()]
zfit.run(to_run)
return
dependents = pdf.get_dependents()
# HACK stop here
with timer:
with timer.child(f"toy gauss gpu") as child:
sampler.resample()
for param in dependents:
param.randomize()
# with tf.device("/device:GPU:0"):
minimum = minimizer.minimize(nll)
if minimum.converged:
successful_fits += 1
fail_or_success = "success"
else:
failed_fits += 1
fail_or_success = "fail"
if __name__ == '__main__':
import tensorflow as tf
sess = tf.Session()
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
# # sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# zfit.run.sess = sess
# zfit.run.run_metadata = run_metadata
# zfit.run.run_options = run_options
# random_uniform = tf.random_uniform(shape=(199,))
# from tensorflow.python.client import timeline
# rnd = tf.sqrt(random_uniform)
# rnd = tf.log(tf.abs(rnd))
# with tf.Session() as sess:
# options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
# run_metadata = tf.RunMetadata()
# sess.run(rnd, options=options, run_metadata=run_metadata)
#
# # Create the Timeline object, and write it to a json file
# fetched_timeline = timeline.Timeline(run_metadata.step_stats)
# chrome_trace = fetched_timeline.generate_chrome_trace_format()
# with open('/home/jonas/tmp/timeline_01.json', 'w') as f:
# f.write(chrome_trace)
# writer = tf.summary.FileWriter("tensorboard_log", graph=sess.graph)
#
# writer.add_run_metadata(run_metadata, "my_session1")
# writer.close()
# zfit.run(rnd)
n_gauss = 3
n_params = 3
n_events = 500000
# with tf.device("/device:GPU:0"):
# pdf = chebys[0]
# zfit.settings.set_verbosity(10)
lower = -1
upper = 1
# create observables
obs = zfit.Space("obs1", limits=(lower, upper))
# create parameters
params = []
params_initial = []
mu_lower, mu_upper = 1, 3
sigma_lower, sigma_upper = 0.5, 2
for i in range(n_params):
mu = zfit.Parameter(f"mu_{i}_{n_events}", np.random.uniform(low=mu_lower, high=mu_upper), mu_lower,
mu_upper)
sigma = zfit.Parameter(f"sigma_{i}_{n_events}", np.random.uniform(low=sigma_lower, high=sigma_upper),
sigma_lower, sigma_upper)
params.append((mu, sigma))
# create pdfs
pdfs = []
for i in range(n_gauss):
mu, sigma = params[i % n_params]
shifted_mu = mu + 0.3 * i
shifted_sigma = sigma + 0.1 * i
pdf = zfit.pdf.Gauss(obs=obs, mu=shifted_mu, sigma=shifted_sigma)
# from zfit.models.basic import CustomGaussOLD
# pdf = CustomGaussOLD(obs=obs, mu=shifted_mu, sigma=shifted_sigma)
# pdf.update_integration_options(mc_sampler=tf.random_uniform)
pdfs.append(pdf)
initial_param_val = 1 / n_gauss
fracs = []
for i in range(n_gauss - 1):
frac_value = 1 / n_gauss
lower_value = 0.0001
upper_value = 1.5 / n_gauss
frac = zfit.Parameter(f"frac_{i}", value=1 / n_gauss, lower_limit=lower_value, upper_limit=upper_value)
frac.floating = False
fracs.append(frac)
sum_pdf = zfit.pdf.SumPDF(pdfs=pdfs, fracs=fracs)
# sum_pdf.update_integration_options(mc_sampler=tf.random_uniform)
pdf = sum_pdf
# Create dictionary to save fit results
failed_fits = 0
successful_fits = 0
sampler = pdf.create_sampler(n=n_events, fixed_params=True)
sampler.set_data_range(obs)
nll = zfit.loss.UnbinnedNLL(pdf, sampler)
minimizer = zfit.minimize.MinuitMinimizer(zfit.minimizers.baseminimizer.ToyStrategyFail(), verbosity=0)
minimizer._use_tfgrad = True
timer = zfit_benchmark.timer.Timer(f"Timing")
sampler.resample()
# to_run = [nll.value(), nll.gradients()]
to_run = [nll.value()]
zfit.run(to_run)
# zfit.run(to_run)
from tensorflow.python.client import timeline
# with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
# sess.run(to_run, options=options, run_metadata=run_metadata)
with timer:
for _ in range(1):
# val = zfit.run(to_run, options=options, run_metadata=run_metadata)
val = zfit.run(sampler.sample_holder.initializer, options=options, run_metadata=run_metadata)
# val = zfit.run(to_run)
print(f"Time needed for single run: {timer.elapsed}")
# Create the Timeline object, and write it to a json file
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
with open('/home/jonas/tmp/timeline_01.json', 'w') as f:
f.write(chrome_trace)
writer = tf.summary.FileWriter("tensorboard_log", graph=sess.graph)
writer.add_run_metadata(run_metadata, "my_session1")
writer.close()
# writer.add_run_metadata(run_metadata, "my_session1")
# writer.close()
|
zfit/benchmarks | src/params_vs_argument.py | import progressbar
import tensorflow as tf
from zfit_benchmark.timer import Timer
var1 = tf.Variable(42., dtype=tf.float32, validate_shape=False, shape=tf.TensorShape(None))
size_int_sample = 20000
@tf.function(autograph=False)
def func(x, y):
return (x - 1 / (y + 100)) ** 2 - y * tf.abs(1 + x) * tf.cos(x) * tf.sin(y ** 2) * tf.math.erfc(tf.abs(x + 0.1)) * tf.math.special.dawsn(tf.cos(x) ** 2)
def func_params(x, y):
var1.assign(y)
# tf.assign(var1, y)
vals = func(x=x, y=var1)
tf.debugging.assert_equal(var1.value(), y)
# print(vals)
return vals
def func_args(x, y):
vals = func(x, y)
tf.debugging.assert_equal(y + 1, y + 1)
# print(vals)
return vals
def integrate_func_params(y):
x = tf.random.uniform(shape=(size_int_sample,), minval=-1., maxval=1.)
return tf.reduce_mean(func_params(x=x, y=y))
def integrate_func_args(y):
x = tf.random.uniform(shape=(size_int_sample,), minval=-1., maxval=1.)
return tf.reduce_mean(func_args(x=x, y=y), axis=-1)
@tf.function(autograph=False)
def integrate(y, func):
# vals = tf.map_fn(func, y, parallel_iterations=14)
vals = tf.vectorized_map(func, y)
print(vals)
return vals
@tf.function(autograph=False)
def integrate_broadcast(y, func):
y = y[:, None]
func1 = func(y)
print(func1)
return func1
# return tf.vectorized_map(func, x)
if __name__ == '__main__':
# tf.config.experimental_run_functions_eagerly(True)
size = (10000,)
x = tf.random.normal(mean=10., shape=size)
results = []
n_trials = 2
# import multiprocessing as mp
# pool = mp.pool.Pool(2)
# xs = [tf.random.normal(mean=10., shape=size) for _ in range(2)]
# def pfunc(x):
# return integrate(x, integrate_func_args)
# results = pool.map(func, xs)
#
logdir = 'tmp_logresults'
# writer = tf.summary.create_file_writer(logdir)
# tf.summary.trace_on(graph=True, profiler=True)
@tf.function
def func1(x, y):
return x + y
with tf.profiler.experimental.Profile(logdir):
# Train the model here
func1(tf.constant(1), tf.constant(41))
with Timer() as timer:
timer.stop()
x = tf.random.normal(shape=size)
for i in progressbar.progressbar(range(n_trials + 1)):
if i == 1:
timer.start()
# with tf.device('/device:cpu:0'):
# result = pfunc(x)
# result = integrate(x, integrate_func_args)
result = integrate_broadcast(x, integrate_func_args)
# result = integrate(x, integrate_func_params)
# result = integrate_broadcast(x, integrate_func_params)
#
if n_trials > 0:
print(f"Result = {result}")
print(f"Time needed (per run): {timer.elapsed / n_trials :.3} sec")
# with writer.as_default():
# tf.summary.trace_export('params_vs_argument', step=0, profiler_outdir=logdir)
|
zfit/benchmarks | src/sim_fit_probfit_roofit.py | <filename>src/sim_fit_probfit_roofit.py<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import numpy as np
from iminuit import Minuit
from probfit import UnbinnedLH, gaussian, AddPdf, Normalized, Extended, describe, gen_toy, rename, SimultaneousFit
import time
results = {'probfit': [26],
'zfit_eager': [6, 10, 12],
'zfit': [1.6 + 0.5, 3 + 1.5, 4 + 1.5, 11],
'roofit': [0.5, 2, 4, 12],
'nevents': [10000, 50000, 100000, 300000]
}
# In[2]:
do_probfit = False
# do_probfit = False
# zfit_eager = True
zfit_eager = False
nevents = 150000
def gen_samples(nevents, fraction=0.9, slope=0.005):
fract = sum(np.random.binomial(1, 1 - fraction, nevents) == 0)
bound = (2900, 3300)
bkg_m = gen_toy(lambda x: slope * np.math.exp(-slope * x), nevents // 2, bound)
sig_m = np.random.normal(3096.916, 12, fract)
tot_m = np.concatenate([sig_m, bkg_m])
bkg_u = gen_toy(lambda x: slope * np.math.exp(-slope * x), nevents // 2, bound)
sig_u = np.random.normal(3096.916, 12, nevents - fract)
tot_u = np.concatenate([sig_u, bkg_u])
print("matching efficiency = ", fract / nevents)
return tot_m, tot_u
tot_m, tot_u = gen_samples(nevents=nevents)
def exp(x, l):
return l * np.exp(-l * x)
def model(fit_range, bin):
nrm_bkg_pdf = Normalized(rename(exp, ['x', 'l%d' % bin]), fit_range)
ext_bkg_pdf = Extended(nrm_bkg_pdf, extname='Ncomb_%d' % bin)
ext_sig_pdf = Extended(rename(gaussian, ['x', 'm%d' % bin, "sigma%d" % bin]), extname='Nsig_%d' % bin)
tot_pdf = AddPdf(ext_bkg_pdf, ext_sig_pdf)
print('pdf: {}'.format(describe(tot_pdf)))
return tot_pdf
fit_range = (2900, 3300)
mod_1 = model(fit_range, 1)
lik_1 = UnbinnedLH(mod_1, tot_m, extended=True)
mod_2 = model(fit_range, 2)
lik_2 = UnbinnedLH(mod_2, tot_u, extended=True)
sim_lik = SimultaneousFit(lik_1, lik_2)
describe(sim_lik)
pars = dict(l1=0.002, Ncomb_1=1000, m1=3100, sigma1=10, Nsig_1=1000, l2=0.002, Ncomb_2=1000, m2=3100, sigma2=10,
Nsig_2=1000)
minuit = Minuit(sim_lik, pedantic=False, print_level=0, **pars)
# In[8]:
if do_probfit:
start = time.time()
minuit.migrad()
time_probfit = time.time() - start
print("starting zfit")
import zfit
zfit.run.set_graph_mode(not zfit_eager)
mass = zfit.Space("mass", limits=fit_range)
def zfit_model(obs, bin, limits):
mu = zfit.Parameter("mu{}".format(bin), 3100, limits[0], limits[1])
sigma = zfit.Parameter("sigma{}".format(bin), 10, 1, 30)
gauss = zfit.pdf.Gauss(mu=mu, sigma=sigma, obs=obs)
slope = zfit.Parameter("slope{}".format(bin), -0.002, -0.05, 0.0)
exp = zfit.pdf.Exponential(lambda_=slope, obs=obs)
Nsig = zfit.Parameter("Nsig{}".format(bin), 1000, 0, 1000000)
Nbkg = zfit.Parameter("Nbkg{}".format(bin), 1000, 0, 2000000)
ext_gauss = gauss.create_extended(Nsig)
ext_exp = exp.create_extended(Nbkg)
model = zfit.pdf.SumPDF([ext_exp, ext_gauss])
return model
model_ = [zfit_model(mass, i, fit_range) for i in range(2)]
data_ = [zfit.Data.from_numpy(obs=mass.obs, array=mass.filter(dataset)) for dataset in [tot_m, tot_u]]
nll_simultaneous = zfit.loss.ExtendedUnbinnedNLL(model=model_, data=data_)
minimizer = zfit.minimize.Minuit(ncall=10000, verbosity=7, tolerance=1e-3, use_minuit_grad=False)
start = time.time()
nll_simultaneous.value_gradients(params=list(nll_simultaneous.get_params()))
time_zfit_compile = time.time() - start
start = time.time()
result = minimizer.minimize(nll_simultaneous)
time_zfit_min = time.time() - start
print(result)
print(result.params)
# x = tf.linspace(mass.lower[0][0], mass.upper[0][0], num=1000)
# nbins = 40
# for mod, data in zip(model_, [tot_m, tot_u]):
# y = mod.pdf(x) * mod.get_yield() / nbins * mass.rect_area()
# plt.figure()
# plt.plot(x, y, label=mod.name)
# plt.hist(data, bins=nbins)
# Test ROOT too
from ROOT import RooDataSet, RooRealVar, RooGaussian, RooExponential, RooAbsRealLValue, \
RooArgSet, RooFit, RooCategory, RooSimultaneous, RooArgList, RooAddPdf
def load_set(array, var, dataset):
for entry in array:
RooAbsRealLValue.__assign__(var, entry)
dataset.add(RooArgSet(var))
return dataset
m = RooRealVar("Jpsi_M", "mass", fit_range[0], fit_range[1])
data_m = RooDataSet("data_m", "data_m", RooArgSet(m))
data_u = RooDataSet("data_u", "data_u", RooArgSet(m))
data_m = load_set(tot_m, m, data_m)
data_u = load_set(tot_u, m, data_u)
data_m.Print("v")
data_u.Print("v")
sample = RooCategory("sample", "sample")
sample.defineType("matched")
sample.defineType("unmatched")
# define the combined set
combData = RooDataSet(
"combData",
"combined data",
RooArgSet(m),
RooFit.Index(sample),
RooFit.Import(
"matched",
data_m),
RooFit.Import(
"unmatched",
data_u))
combData.Print("v")
# Not working below, bug?
# # create model
# def model(var, bin):
# # define signal pdf
# mean = RooRealVar("mean{}".format(bin), "mean{}".format(bin), 3090, 2900, 3300)
# sigma = RooRealVar("sigma{}".format(bin), "sigma{}".format(bin), 10, 0, 30)
# gaus = RooGaussian("gx{}".format(bin), "gx{}".format(bin), var, mean, sigma)
#
# # define background pdf
# slope = RooRealVar("slope{}".format(bin), "slope{}".format(bin), -0.04, -0.1, -0.0001)
# exp = RooExponential("exp{}".format(bin), "exp{}".format(bin), var, slope)
#
# # define yields
# nsig = RooRealVar("nsig{}".format(bin), "n. sig bin{}".format(bin), 1000, 0., 1000000)
# nbkg = RooRealVar("nbkg{}".format(bin), "n. bkg bin{}".format(bin), 1000, 0, 2000000)
#
# # sum pdfs
# model = RooAddPdf("model{}".format(bin), "model{}".format(bin),
# RooArgList(exp, gaus),
# RooArgList(nbkg, nsig))
# return model
# define signal pdf
bin = "0"
mean0 = RooRealVar("mean{}".format(bin), "mean{}".format(bin), 3090, 2900, 3300)
sigma0 = RooRealVar("sigma{}".format(bin), "sigma{}".format(bin), 10, 0, 30)
gaus0 = RooGaussian("gx{}".format(bin), "gx{}".format(bin), m, mean0, sigma0)
# define background pdf
slope0 = RooRealVar("slope{}".format(bin), "slope{}".format(bin), -0.005, -0.1, -0.0001)
exp0 = RooExponential("exp{}".format(bin), "exp{}".format(bin), m, slope0)
# define yields
nsig0 = RooRealVar("nsig{}".format(bin), "n. sig bin{}".format(bin), 1000, 0., 1000000)
nbkg0 = RooRealVar("nbkg{}".format(bin), "n. bkg bin{}".format(bin), 1000, 0, 2000000)
# sum pdfs
model0 = RooAddPdf("model{}".format(bin), "model{}".format(bin),
RooArgList(exp0, gaus0),
RooArgList(nbkg0, nsig0))
# define signal pdf
bin = "1"
mean1 = RooRealVar("mean{}".format(bin), "mean{}".format(bin), 3090, 2900, 3300)
sigma1 = RooRealVar("sigma{}".format(bin), "sigma{}".format(bin), 10, 0, 30)
gaus1 = RooGaussian("gx{}".format(bin), "gx{}".format(bin), m, mean1, sigma1)
# define background pdf
slope1 = RooRealVar("slope{}".format(bin), "slope{}".format(bin), -0.005, -0.01, -0.0001)
exp1 = RooExponential("exp{}".format(bin), "exp{}".format(bin), m, slope1)
# define yields
nsig1 = RooRealVar("nsig{}".format(bin), "n. sig bin{}".format(bin), 1000, 0., 1000000)
nbkg1 = RooRealVar("nbkg{}".format(bin), "n. bkg bin{}".format(bin), 1000, 0, 2000000)
# sum pdfs
model1 = RooAddPdf("model{}".format(bin), "model{}".format(bin),
RooArgList(exp1, gaus1),
RooArgList(nbkg1, nsig1))
simPdf = RooSimultaneous("simPdf", "simultaneous pdf", sample)
simPdf.addPdf(model0, "matched")
simPdf.addPdf(model1, "unmatched")
start = time.time()
result = simPdf.fitTo(combData, RooFit.Save(True), RooFit.NumCPU(12))
time_roofit = time.time() - start
if do_probfit:
print(f"time probfit: {time_probfit}")
print(f"time RooFit: {time_roofit}")
print(f"time zfit {'eager' if zfit_eager else 'graph'} compile: {time_zfit_compile}, time zfit min={time_zfit_min}")
|
zfit/benchmarks | src/multiparam.py | import time
import numpy as np
import progressbar
import tensorflow as tf
import tensorflow.experimental.numpy as tnp
tnp.experimental_enable_numpy_behavior()
nparams = 150
var1 = tf.Variable(np.linspace(0, 10, nparams), dtype=tf.float64, validate_shape=False)
from tensorflow import Variable
class IndexedVariable(Variable):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.active_indices = tuple(range(self.shape[0]))
def value(self):
return super.value()[self.active_indices]
def gather_nd(self, indices, name=None):
raise RuntimeError
return super().gather_nd(indices, name)
def sparse_read(self, indices, name=None):
raise RuntimeError
return super().sparse_read(indices, name)
# varindex = IndexedVariable(np.linspace(0, 10, nparams), dtype=tf.float64, validate_shape=False)
indices_all = np.array(range(nparams))
indices = np.random.choice(indices_all, nparams, replace=False)
def model(var):
var = tnp.array(var)
# indices = np.array(range(1000))
var = var[indices]
return tf.math.special.dawsn(tf.math.abs(var) ** 2.5) * var * tf.math.abs(tnp.cos(var)) ** (var + 0.2) + var ** 3
@tf.function(autograph=False)
def get_hessian2():
var = vars2
var = [v.value() for v in var]
with tf.GradientTape(persistent=True) as tape:
preds = model(var)
grads = tape.gradient(preds, var)
grads = tf.stack(grads, axis=0)
grads = grads[indices]
hessians = tape.jacobian(grads, var)
# hessians = tf.stack(hessians)[:, indices]
hessians = tf.stack(hessians)
# hessians = None
return grads, hessians
@tf.function(autograph=False)
def get_hessian2_classy():
var = vars2
with tf.GradientTape(persistent=True, watch_accessed_variables=False) as tape:
tape.watch(var)
with tf.GradientTape(persistent=True, watch_accessed_variables=False) as tape2:
tape2.watch(var)
preds = model(var)
grads = tape2.gradient(preds, var)
grads = tf.stack(grads, axis=0)
grads = grads[indices]
hessians = tape.jacobian(grads, var, experimental_use_pfor=True)
# hessians = tf.stack(hessians)[:, indices]
hessians = tf.stack(hessians)
# hessians = None
return grads, hessians
@tf.function(autograph=False)
def get_hessian1():
var = var1
# var = var[indices]
# var = [v.value() for v in var]
with tf.GradientTape(persistent=True, watch_accessed_variables=False) as tape:
tape.watch(var)
preds = model(var)
grads = tape.gradient(preds, var)
grads = grads[indices]
hessians = tape.jacobian(grads, var, experimental_use_pfor=True)
hessians = hessians[:, indices]
return grads, hessians
with tf.GradientTape(watch_accessed_variables=False) as tape:
y = var1.sparse_read(5) * 5.
grad = tape.gradient(y, var1.sparse_read(5))
print(grad)
class MyVar(tf.Variable):
pass
vars2 = [MyVar(val, dtype=tf.float64, validate_shape=False)
for val in np.linspace(0, 10, nparams)]
def assign2(values, variables):
for i, var in enumerate(variables):
var.assign(values[i], use_locking=False, read_value=False)
def assign1(values, variables: tf.Variable):
variables.assign(values, use_locking=False, read_value=False)
# updates = tf.IndexedSlices(values[indices], indices)
# variables.scatter_update(updates, use_locking=False)
assign2_compiled = tf.function(assign2, autograph=False)
assign1_compiled = tf.function(assign1, autograph=False)
start = None
prev = 1
for nrun in progressbar.progressbar(range(100)):
if nrun > 2 and start is None:
start = time.time()
print('starting the time')
uniform = tnp.random.uniform(size=(nparams,))
result = get_hessian2()
# result = get_hessian2_classy()
assign2_compiled(uniform, vars2)
# result = get_hessian1()
# assign1_compiled(values=uniform, variables=var1)
# assign1(values=uniform, variables=var1)
# assign2(uniform, vars2)
# assign(uniform, vars2)
print(f'time per param needed: {(time.time() - start) / nparams}')
print(result)
|
zfit/benchmarks | src/adaptive_integral1.py | <reponame>zfit/benchmarks
import math
import tensorflow as tf
from zfit import z
def integrate(func, lower, upper):
func_upper = func(upper)
func_lower = func(lower)
uncertainty = tf.abs(func_upper - func_lower) # can be improved of course
integrals = (func_lower + func_upper) / 2 * (upper - lower)
return integrals, uncertainty
# func = lambda x: tf.where(tf.less(x, 0.1),
# tf.sin(x * 100),
# tf.sin(x))
func = lambda x: tf.sin(x) + tf.cos(x * 100) # example func to integrate
lower, upper = z.constant(0.), z.constant(math.pi)
n_iter_max = 32 # maximum iteration: if we have a discontinuous function, we won't reacht the precision requested
# so we should break
def body(integral, lower, upper, n_iter):
integrals, uncertainties = integrate(func, lower, upper)
uncertainties_too_large = tf.greater(uncertainties, 1e-5)
# if we reached the max number of iterations, we take the values anyway, so the uncertainties are just "too large",
# or need to be redone if we did not yet reach the max iterations
uncertainties_too_large = tf.logical_and(uncertainties_too_large, n_iter < n_iter_max)
too_large_indices = tf.where(uncertainties_too_large)[:, 0]
# tf.print(integrals[:5])
# tf.print(uncertainties[:5])
# tf.print(too_large_indices[:5])
integral += tf.reduce_sum(tf.boolean_mask(integrals, mask=tf.logical_not(uncertainties_too_large)), axis=0)
tf.print(integral)
lower_to_redo = tf.gather(lower, too_large_indices, axis=0) # take the indices of the lower that need to be redone
# tf.print(lower_to_redo[:5])
upper_to_redo = tf.gather(upper, too_large_indices, axis=0)
# tf.print(upper_to_redo[:5])
new_middle = (upper_to_redo + lower_to_redo) / 2 # create points in the middle of the current lower, upper
# the new points are now: old lower, and new middle points respectively new middle point and old upper
new_lower = tf.concat([lower_to_redo, new_middle], axis=0)
# tf.print(new_lower[:5])
new_upper = tf.concat([new_middle, upper_to_redo], axis=0)
# tf.print(new_upper[:5])
return integral, new_lower, new_upper, n_iter + 1
def all_calculated(integral, lower, upper, n_iter):
shape = tf.shape(lower)[0] # number of integrals to redo. If this is 0, we're fine
tf.print(shape)
return tf.logical_and(shape > 0, n_iter < n_iter_max)
initial_points = tf.linspace(lower, upper, num=101) # start with som initial points
@tf.function(autograph=False)
def do_integrate():
return tf.while_loop(cond=all_calculated, body=body, loop_vars=[z.constant(0.), # integral
initial_points[:-1], # lower
initial_points[1:], # upper
0 # n_iter
],
# here we specify the shape of the loop_vars: since they change (of the second and third),
# we need to specify them, with None as "shape is not fixed". For the integral as well as for
# the number of iterations, this is a scalar with shape ()
shape_invariants=[
tf.TensorShape(()),
tf.TensorShape((None,)),
tf.TensorShape((None,)),
tf.TensorShape(()),
],
maximum_iterations=n_iter_max,
)
integral = do_integrate()
print(integral[0])
|
zfit/benchmarks | toys/gaussians/gaussians_roofit.py | <reponame>zfit/benchmarks<gh_stars>0
# import ROOT
from collections import defaultdict
import ROOT
from ROOT import RooRealVar, RooGaussian, RooAddPdf, RooArgList, RooArgSet
from ROOT import RooFit
import progressbar
import yaml
import numpy as np
import zfit_benchmark
def toy_run(nevents):
lower = -1
upper = 1
# create observables
obs = RooRealVar("obs", "obs1", lower, upper)
# create parameters
mean1 = RooRealVar("mean1", "mean of gaussian", 0, -1, 1)
sigma1 = RooRealVar("sigma1", "sigma of gaussian", 0.1, -1, 1)
gauss1 = RooGaussian("gauss1", "gaussian PDF", obs, mean1, sigma1)
mean2 = RooRealVar("mean2", "mean of gaussian", 0.5, -1, 1)
sigma2 = RooRealVar("sigma2", "sigma of gaussian", 0.2, -1, 1)
gauss2 = RooGaussian("gauss2", "gaussian PDF", obs, mean2, sigma2)
frac = RooRealVar("frac", "Fraction of a gauss", 0.5, 0, 1)
arg_list = RooArgList(gauss1, gauss2, gauss2, gauss2, gauss2,
# gauss2,
gauss2, gauss2, gauss1)
arg_list.addOwned(gauss2)
pdf = RooAddPdf("sum_pdf", "sum of pdfs", arg_list,
RooArgList(frac,
frac,
frac,
# frac,
# frac,
frac,
frac,
frac,
frac,
frac))
# obs, pdf = build_pdf()
timer = zfit_benchmark.timer.Timer(f"Toys {nevents}")
with timer:
data = pdf.generate(RooArgSet(obs), nevents)
pdf.fitTo(data)
# mgr.generateAndFit(n_toys, nevents)
return float(timer.elapsed)
def build_pdf():
lower = -1
upper = 1
# create observables
obs = RooRealVar("obs", "obs1", lower, upper)
# create parameters
mean1 = RooRealVar("mean1", "mean of gaussian", 0, -1, 1)
sigma1 = RooRealVar("sigma1", "sigma of gaussian", 0.1, -1, 1)
gauss1 = RooGaussian("gauss1", "gaussian PDF", obs, mean1, sigma1)
mean2 = RooRealVar("mean2", "mean of gaussian", 0.5, -1, 1)
sigma2 = RooRealVar("sigma2", "sigma of gaussian", 0.2, -1, 1)
gauss2 = RooGaussian("gauss2", "gaussian PDF", obs, mean2, sigma2)
mean3 = RooRealVar("mean3", "mean of gaussian", 0.5, -1, 1)
sigma3 = RooRealVar("sigma3", "sigma of gaussian", 0.3, -1, 1)
gauss3 = RooGaussian("gauss3", "gaussian PDF", obs, mean3, sigma3)
mean4 = RooRealVar("mean4", "mean of gaussian", 0.5, -1, 1)
sigma4 = RooRealVar("sigma4", "sigma of gaussian", 0.4, -1, 1)
gauss4 = RooGaussian("gauss4", "gaussian PDF", obs, mean4, sigma4)
mean5 = RooRealVar("mean5", "mean of gaussian", 0.5, -1, 1)
sigma5 = RooRealVar("sigma5", "sigma of gaussian", 0.5, -1, 1)
gauss5 = RooGaussian("gauss5", "gaussian PDF", obs, mean5, sigma5)
frac1 = RooRealVar("frac", "Fraction of a gauss", 0.5, 0, 1)
frac2 = RooRealVar("frac", "Fraction of a gauss", 0.5, 0, 1)
frac3 = RooRealVar("frac", "Fraction of a gauss", 0.5, 0, 1)
frac4 = RooRealVar("frac", "Fraction of a gauss", 0.5, 0, 1)
model = RooAddPdf("sum_pdf", "sum of pdfs", RooArgList(RooArgList(gauss1, gauss2),
RooArgList(gauss3, gauss4, gauss5)),
RooArgList(frac1, frac2, frac3, frac4))
return obs, model
if __name__ == '__main__':
elapsed = toy_run(nevents=1000)
print(elapsed)
|
zfit/benchmarks | toys/gaussians/gaussians.py | # import ROOT
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import pprint
from collections import defaultdict
from decimal import Decimal
try:
import ROOT
from ROOT import RooRealVar, RooGaussian, RooChebychev, RooAddPdf, RooArgList, RooArgSet, RooFit, RooAddition
except:
pass
import progressbar
import yaml
import zfit
import zfit.minimizers.baseminimizer
import numpy as np
import zfit_benchmark
zfit.run.numeric_checks = False
run_name = "gpu_tol1_grad_new"
def toy_run(n_params, n_gauss, n_toys, toys_nevents, run_zfit, intermediate_result_factory=None):
# pdf = chebys[0]
# zfit.settings.set_verbosity(10)
performance = {}
performance["column"] = "number of events"
for nevents in toys_nevents:
# n_toys = 30 if nevents < 50000 else 10
if run_zfit:
zfit.run.create_session(reset_graph=True)
# zfit.sess.close()
# zfit.sess = tf.Session
# initial_param_val, obs, pdf = build_pdf(n_gauss, n_params, run_zfit)
lower = -1
upper = 1
# create observables
if run_zfit:
obs = zfit.Space("obs1", limits=(lower, upper))
else:
obs = RooRealVar("obs", "obs1", lower, upper)
ROOT.SetOwnership(obs, False)
# create parameters
params = []
params_initial = []
mu_lower, mu_upper = 1, 3
sigma_lower, sigma_upper = 0.5, 2
# step_size = 0.003
for i in range(n_params):
if run_zfit:
mu = zfit.Parameter(f"mu_{i}_{nevents}", np.random.uniform(low=mu_lower, high=mu_upper), mu_lower,
mu_upper,
# step_size=step_size
)
sigma = zfit.Parameter(f"sigma_{i}_{nevents}", np.random.uniform(low=sigma_lower, high=sigma_upper),
sigma_lower, sigma_upper,
# step_size=step_size
)
else:
mu_initial = np.random.uniform(mu_lower, mu_upper)
mu = RooRealVar(f"mu_{i}_{nevents}", "Mean of Gaussian", mu_initial, mu_lower, mu_upper)
ROOT.SetOwnership(mu, False)
sigma_initial = np.random.uniform(mu_lower, mu_upper)
sigma = RooRealVar(f"sigma_{i}_{nevents}", "Width of Gaussian", sigma_initial, sigma_lower, sigma_upper)
ROOT.SetOwnership(sigma, False)
params_initial.append((mu_initial, sigma_initial))
params.append((mu, sigma))
# create pdfs
pdfs = []
for i in range(n_gauss):
mu, sigma = params[i % n_params]
if run_zfit:
shifted_mu = mu + 0.3 * i
shifted_sigma = sigma + 0.1 * i
pdf = zfit.pdf.Gauss(obs=obs, mu=shifted_mu, sigma=shifted_sigma)
# from zfit.models.basic import CustomGaussOLD
# pdf = CustomGaussOLD(obs=obs, mu=shifted_mu, sigma=shifted_sigma)
# pdf.update_integration_options(mc_sampler=tf.random_uniform)
else:
shift1 = RooFit.RooConst(float(0.3 * i))
shifted_mu = RooAddition(f"mu_shifted_{i}_{nevents}", f"Shifted mu {i}", RooArgList(mu, shift1))
shift2 = RooFit.RooConst(float(0.1 * i))
shifted_sigma = RooAddition(f"sigma_shifted_{i}_{nevents}", f"Shifted sigma {i}",
RooArgList(sigma, shift2))
pdf = RooGaussian(f"pdf_{i}_{nevents}", "Gaussian pdf", obs, shifted_mu, shifted_sigma)
ROOT.SetOwnership(pdf, False)
ROOT.SetOwnership(shift1, False)
ROOT.SetOwnership(shifted_mu, False)
ROOT.SetOwnership(shift2, False)
ROOT.SetOwnership(shifted_sigma, False)
pdfs.append(pdf)
initial_param_val = 1 / n_gauss
fracs = []
for i in range(n_gauss - 1):
frac_value = 1 / n_gauss
lower_value = 0.0001
upper_value = 1.5 / n_gauss
if run_zfit:
frac = zfit.Parameter(f"frac_{i}", value=1 / n_gauss, lower_limit=lower_value, upper_limit=upper_value)
frac.floating = False
else:
frac = RooRealVar(f"frac_{i}_{nevents}", "Fraction of a gauss", frac_value)
ROOT.SetOwnership(frac, False)
fracs.append(frac)
if run_zfit:
sum_pdf = zfit.pdf.SumPDF(pdfs=pdfs, fracs=fracs)
# sum_pdf.update_integration_options(mc_sampler=tf.random_uniform)
else:
sum_pdf = RooAddPdf(f"sum_pdf_{nevents}", "sum of pdfs", RooArgList(*pdfs), RooArgList(*fracs))
ROOT.SetOwnership(sum_pdf, False)
pdf = sum_pdf
# Create dictionary to save fit results
failed_fits = 0
successful_fits = 0
performance[nevents] = {"success": [], "fail": []}
if run_zfit:
sampler = pdf.create_sampler(n=nevents, fixed_params=True)
sampler.set_data_range(obs)
nll = zfit.loss.UnbinnedNLL(pdf, sampler)
minimizer = zfit.minimize.MinuitMinimizer(zfit.minimizers.baseminimizer.ToyStrategyFail(), verbosity=5,
minimize_strategy=1)
# minimizer.minimizer_options['tol'] = 100
# minimizer._use_tfgrad = False
timer = zfit_benchmark.timer.Timer(f"Toys {nevents}")
if run_zfit:
sampler.resample()
# with tf.device("/device:GPU:0"):
jit_scope = tf.contrib.compiler.jit.experimental_jit_scope
# with jit_scope():
to_run = [nll.value(), nll.gradients()]
zfit.run(to_run)
dependents = pdf.get_dependents()
else:
mgr = ROOT.RooMCStudy(pdf, RooArgSet(obs), RooFit.Silence())
ROOT.SetOwnership(mgr, False)
run_toystudy = False
with progressbar.ProgressBar(max_value=n_toys) as bar:
ident = 0
with timer:
if not run_toystudy:
while successful_fits < n_toys:
# print(f"starting run number {len(fitResults)}")
if run_zfit:
sampler.resample()
for param in dependents:
param.randomize()
else:
for (mu, sigma), (mu_val, sigma_val) in zip(params, params_initial):
mu.setVal(mu_val)
sigma.setVal(sigma_val)
data = pdf.generate(RooArgSet(obs), nevents)
for mu, sigma in params:
mu.setVal(np.random.uniform(mu_lower, mu_upper))
sigma.setVal(np.random.uniform(sigma_lower, sigma_upper))
with timer.child(f"toy number {successful_fits} {ident}") as child:
if run_zfit:
# sampler.resample()
# with tf.device("/device:GPU:0"):
minimum = minimizer.minimize(nll)
# print(minimum.hesse())
else:
# for mu, sigma in params:
# mu.setVal(np.random.uniform(mu_lower, mu_upper))
# sigma.setVal(np.random.uniform(sigma_lower, sigma_upper))
# for frac in fracs:
# frac.setVal(np.random.uniform(lower_value, upper_value))
result = pdf.fitTo(data, RooFit.NumCPU(12), RooFit.Save(True),
RooFit.Hesse(False), RooFit.Minos(False))
if ident == 0:
ident += 1
continue # warm up run
if run_zfit:
if minimum.converged:
bar.update(successful_fits)
successful_fits += 1
fail_or_success = "success"
else:
child.elapsed = Decimal()
failed_fits += 1
fail_or_success = "fail"
else:
if result.status() == 0:
bar.update(successful_fits)
successful_fits += 1
fail_or_success = "success"
else:
child.elapsed = Decimal()
failed_fits += 1
fail_or_success = "fail"
ident += 1
performance[nevents][fail_or_success].append(float(child.elapsed))
else:
mgr.generateAndFit(n_toys, nevents)
performance[nevents]["success"].append([float(timer.elapsed) / n_toys for _ in range(n_toys)])
with open(f"{run_name}tmp.yaml", "w") as f:
if intermediate_result_factory:
dump_result = intermediate_result_factory(performance)
else:
dump_result = performance.copy()
dump_result["ATTENTION"] = "NOT FINISHED"
yaml.dump(dump_result, f)
return performance
if __name__ == '__main__':
import tensorflow as tf
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
# sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
config = tf.ConfigProto(intra_op_parallelism_threads=12, inter_op_parallelism_threads=2,
allow_soft_placement=True)
#
sess = tf.Session(config=config)
zfit.run.sess = sess
# zfit.run.run_metadata = run_metadata
# zfit.run.run_options = run_options
# zfit.settings.set_verbosity(10)
# testing = False
testing = True
# run_zfit = False
run_zfit = True
n_gauss_max = 9
n_params_max = n_gauss_max
# toys_nevents = [2 ** i for i in list(range(7, 18, 2)) + list(range(19, 24, 2))]
toys_nevents = [2 ** i for i in list(range(7, 22, 2))]
n_toys = 20
if testing:
n_gauss_max = 9
# toys_nevents = [2**23]
toys_nevents = [2097152]
n_toys = 30
results = {}
results["n_toys"] = n_toys
results["column"] = "number of gaussians"
just_one = 0
# for n_gauss in range(2, n_gauss_max + 1):
# HACK START
for n_gauss in [n_gauss_max]:
# HACK END
if n_gauss > n_gauss_max:
break
results[n_gauss] = {}
results[n_gauss]["column"] = "number of free params"
# for n_params in (1, n_gauss):
# for n_params in (1,):
for n_params in (n_gauss,):
# HACK START
# if just_one > 0:
# break
# just_one += 1
# HACK END
if n_gauss < n_gauss_max and n_params not in (1, n_gauss):
# HACK START
pass
# HACK END
# continue # only test the parameter scan for full params
results_copy = results.copy()
def intermediate_result_factory(res_tmp):
results_copy[n_gauss][n_params] = res_tmp
return results_copy
# with tf.device("/device:GPU:0"):
results[n_gauss][n_params] = toy_run(n_params=n_params, n_gauss=n_gauss,
n_toys=n_toys, toys_nevents=toys_nevents,
run_zfit=run_zfit,
intermediate_result_factory=intermediate_result_factory)
# writer = tf.summary.FileWriter("tensorboard_log", graph=sess.graph)
# writer.add_run_metadata(run_metadata, "my_session1")
# writer.close()
pprint.pprint(results)
with open(f"{run_name}_{np.random.randint(low=0, high=int(1e1))}.yaml", "w") as f:
yaml.dump(results, f)
|
zfit/benchmarks | src/extend_multiparam.py | <filename>src/extend_multiparam.py
import tensorflow as tf
import progressbar
import time
nparams = 1000
start = None
prev = 1
values = tf.linspace(0, 10, nparams)
var1 = tf.Variable([-1], dtype=tf.float64, shape=tf.TensorShape(None), validate_shape=False)
for nrun in progressbar.progressbar(range(10)):
if nrun > 2 and start is None:
start = time.time()
for newval in values:
val = var1.value()
newvar = tf.concat([val, [newval]], axis=0)
var1.assign(newvar, use_locking=False, read_value=False)
print(f'time per param needed: {(time.time() - start) / nparams}')
print(var1.value())
|
zfit/benchmarks | src/wofz.py | <reponame>zfit/benchmarks
# /////////////////////////////////////////////////////////////////////////////
# //
# // DATE
# // 06/22/2015
# //
# // AUTHORS
# // <NAME>, <NAME>
# //
# // DESCRIPTION
# // FADDEEVA error function for GPU in CUDA.
# // This file is intended to be used as a
# // preamble to depending kernels, e.g. in PyCUDA
# // via ElementwiseKernel(..., preamble=open( <this_file> ).read()).
# //
# /////////////////////////////////////////////////////////////////////////////
# include <math.h>
import time
errf_const = 1.12837916709551
xLim = 5.33
yLim = 4.29
import tensorflow.experimental.numpy as znp
znp.experimental_enable_numpy_behavior()
# from tensorflow.experimental.numpy import *
import tensorflow as tf
from math import sqrt, exp, cos, sin
@tf.function
def wofz2(in_real, in_imag):
# /**
# this function calculates the double precision complex error function
# based on the algorithm of the FORTRAN function written at CERN by
# <NAME>, Program C335, 1970.
#
# See also <NAME> and <NAME>, "Closed expression for the
# electric field of a two-dimensional Gaussian charge density",
# CERN-ISR-TH/80-06.
# */
x = abs(in_real)
y = abs(in_imag)
cond = znp.logical_and(y < yLim, x < xLim)
nevents = tf.shape(x)[0]
def if_true():
# Rx = znp.zeros([nevents, 33], dtype=znp.float64)
# Ry = znp.zeros([nevents, 33], dtype=znp.float64)
q = (1.0 - y / yLim) * sqrt(1.0 - (x / xLim) * (x / xLim))
h = 1.0 / (3.2 * q)
nc = 7 + tf.cast(23.0 * q, dtype=znp.int32)
xl = pow(h, 1. - nc)
xh = y + 0.5 / h
yh = x
nu = 10 + tf.cast(21.0 * q, dtype=znp.int32)
Rx = znp.zeros_like(x, dtype=znp.float64)
Ry = znp.zeros_like(y, dtype=znp.float64)
n = nu
n2 = nc
# rxs = []
# rys = []
Sx = znp.zeros_like(x, dtype=znp.float64)
Sy = znp.zeros_like(x, dtype=znp.float64)
while znp.any(n > 0):
n = znp.maximum(n, 0)
Tx = xh + n * Rx
Ty = yh - n * Ry
Tn = Tx * Tx + Ty * Ty
# indices = znp.asarray([tf.range(nevents), n - 1])
# Rx = tf.transpose(Rx)
# Ry = tf.transpose(Ry)
# Rx = tf.tensor_scatter_nd_update(Rx, [n - 1], (0.5 * Tx / Tn))
# Ry = tf.tensor_scatter_nd_update(Ry, [n - 1], (0.5 * Ty / Tn))
# Rx = tf.transpose(Rx)
# Ry = tf.transpose(Ry)
Rx = (0.5 * Tx / Tn)
Ry = (0.5 * Ty / Tn)
Saux = Sx + xl
indices = znp.stack([n - 1, tf.range(n.shape[0])], axis=1)
mask = tf.cast(n2 == n, dtype=float64)
rx_n1 = Rx * mask
ry_n1 = Ry * mask
Sx_tmp = rx_n1 * Saux - ry_n1 * Sy
Sy_tmp = rx_n1 * Sy + ry_n1 * Saux
cond_inside = n > 0
Sx = znp.where(cond_inside, Sx_tmp, Sx)
Sy = znp.where(cond_inside, Sy_tmp, Sy)
xl = h * xl
n -= 1
n2 = tf.maximum(n, n2 - 1)
print(znp.max(n))
# Rx = znp.stack(rxs)
# Ry = znp.stack(rys)
# # Rx = tf.transpose(Rx)
# # Ry = tf.transpose(Ry)
#
#
# n = nc
#
# while znp.any(n > 0):
# n = znp.maximum(n, 0)
# Saux = Sx + xl
# indices = znp.stack([n - 1, tf.range(n.shape[0])], axis=1)
# rx_n1 = tf.gather_nd(Rx, indices)
# ry_n1 = tf.gather_nd(Ry, indices)
# Sx = rx_n1 * Saux - ry_n1 * Sy
# Sy = rx_n1 * Sy + ry_n1 * Saux
# xl = h * xl
# n -= 1
Wx = errf_const * Sx
Wy = errf_const * Sy
return Wx, Wy
def if_false():
xh = y
yh = x
rx = znp.zeros_like(x, dtype=znp.float64)
ry = znp.zeros_like(y, dtype=znp.float64)
for n in tf.range(1, 10):
Tx = xh + n * rx
Ty = yh - n * ry
Tn = Tx ** 2 + Ty ** 2
rx = 0.5 * Tx / Tn
ry = 0.5 * Ty / Tn
Wx = errf_const * rx
Wy = errf_const * ry
return Wx, Wy
# if y == 0.:
# Wx = exp(-x * x)
cond2 = in_imag < 0.
def if_true2(Wx, Wy):
Wx = 2.0 * exp(y * y - x * x) * cos(2.0 * x * y) - Wx
Wy = - 2.0 * exp(y * y - x * x) * sin(2.0 * x * y) - Wy
Wy = -Wy * znp.sign(in_real)
return Wx, Wy
def if_false2(Wx, Wy):
return Wx, Wy * znp.sign(in_real)
value = znp.where(cond, if_true(), if_false())
true2 = if_true2(*tf.unstack(value))
false2 = if_false2(*tf.unstack(value))
value = znp.where(cond2, true2, false2)
return value[0] + 1j * value[1]
errf_const = 1.12837916709551
xLim = 5.33
yLim = 4.29
#
# __device__ void wofz(double in_real, double in_imag,
# double* out_real, double* out_imag)
# /**
# this function calculates the double precision complex error function
# based on the algorithm of the FORTRAN function written at CERN by
# <NAME>, Program C335, 1970.
# See also <NAME> and <NAME>, "Closed expression for the
# electric field of a two-dimensional Gaussian charge density",
# CERN-ISR-TH/80-06.
# */
# int n, nc, nu
# double h, q, Saux, Sx, Sy, Tn, Tx, Ty, Wx, Wy, xh, xl, x, yh, y
import numba
@numba.vectorize()
def wofz(in_real, in_imag) -> complex:
Rx = []
Ry = []
x = abs(in_real)
y = abs(in_imag)
if (y < yLim and x < xLim):
q = (1.0 - y / yLim) * sqrt(1.0 - (x / xLim) * (x / xLim))
h = 1.0 / (3.2 * q)
nc = 7 + int(23.0 * q)
xl = pow(h, 1. - nc)
xh = y + 0.5 / h
yh = x
nu = 10 + int(21.0 * q)
Rx[nu] = 0.
Ry[nu] = 0.
n = nu
while (n > 0):
Tx = xh + n * Rx[n]
Ty = yh - n * Ry[n]
Tn = Tx * Tx + Ty * Ty
Rx[n - 1] = 0.5 * Tx / Tn
Ry[n - 1] = 0.5 * Ty / Tn
n -= 1
Sx = 0.
Sy = 0.
n = nc
while n > 0:
Saux = Sx + xl
Sx = Rx[n - 1] * Saux - Ry[n - 1] * Sy
Sy = Rx[n - 1] * Sy + Ry[n - 1] * Saux
xl = h * xl
n -= 1
Wx = errf_const * Sx
Wy = errf_const * Sy
else:
xh = y
yh = x
Rx[0] = 0.
Ry[0] = 0.
for n in tf.range(9, 0, -1):
Tx = xh + n * Rx[0]
Ty = yh - n * Ry[0]
Tn = Tx * Tx + Ty * Ty
Rx[0] = 0.5 * Tx / Tn
Ry[0] = 0.5 * Ty / Tn
Wx = errf_const * Rx[0]
Wy = errf_const * Ry[0]
if (y == 0.):
Wx = exp(-x * x)
if (in_imag < 0.):
Wx = 2.0 * exp(y * y - x * x) * cos(2.0 * x * y) - Wx
Wy = - 2.0 * exp(y * y - x * x) * sin(2.0 * x * y) - Wy
if (in_real > 0.):
Wy = -Wy
elif (in_real < 0.):
Wy = -Wy
if __name__ == '__main__':
import scipy.special
import numpy as np
wofz(
# znp.array([10.], dtype=znp.float64), znp.array([5.], dtype=znp.float64))
*np.random.uniform(-10, 10, (2, 1000000)))
print("compiled")
start = time.time()
x = np.random.uniform(-10, 10, (2, 1000000))
n = 10
for _ in range(n):
wofz_our = wofz(
# znp.array([10.], dtype=znp.float64), znp.array([5.], dtype=znp.float64))
*x
)
print('tensorflow', time.time() - start)
x = x[0] + 1j * x[1]
start = time.time()
for _ in range(n):
y = scipy.special.wofz(x)
print('scipy', time.time() - start)
print(abs(wofz_our - y), znp.std(wofz_our - y))
|
zfit/benchmarks | toys/gaussians/roofit_example.py | <gh_stars>0
from ROOT import RooRealVar, RooGaussian, RooChebychev, RooAddPdf, RooArgList, RooArgSet, RooFit
x = RooRealVar("x","x",-1,1)
# Use RooGaussian in the generation
mean = RooRealVar("mean","mean of gaussian",0,-1,1)
sigma = RooRealVar("sigma","sigma of gaussian",0.1,-1,1)
sig = RooGaussian("gauss","gaussian PDF",x,mean,sigma) ;
# Background
a0 = RooRealVar("a0","a0",0.5,0.,1.)
a1 = RooRealVar("a1","a1",-0.2,0.,1.)
bkg = RooChebychev("bkg","Background",x,RooArgList(a0,a1))
bkgfrac = RooRealVar("bkgfrac","fraction of background",0.5,0.,1.)
model = RooAddPdf("model","g+a",RooArgList(bkg,sig), RooArgList(bkgfrac) )
data = model.generate(RooArgSet(x), 10000)
model.fitTo(data) |
zfit/benchmarks | src/py_function.py | <reponame>zfit/benchmarks
"""Benchmark of different frameworks for playing around.
no GPU used, serial required (x used in ever next calculation)
| sample_size | tf traced | tf eager | numpy |
| 1k | 0.001 | 0.004 | 0.001 |
| 10 k | 0.005 | 0.013 | 0.007 |
| 100 k | 0.015 | 0.03 | 0.07 |
| 1 mio | 0.2 | 0.3 | 0.7 |
| 10 mio | 2 | 3 | 7 |
no GPU used, parallel possible, list of 10
| sample_size | tf traced | tf eager | numpy | torch |
| 1k | 0.0002 | 0.004 | 0.001 | 0.001 |
| 10 k | 0.0008 | 0.014 | 0.008 | 0.004 |
| 100 k | 0.002 | 0.04 | 0.08 | 0.02 |
| 1 mio | 0.02 | 0.4 | 0.8 | 0.3 |
| 10 mio | 0.2 | 4 | 8 | 3 |
(with autograph 2 secs)
"""
import numba as numba
import numpy as np
import tensorflow as tf
import torch
from zfit_benchmark.timer import Timer
global_y = tf.random.normal(shape=(10, 1))
var1 = tf.Variable(42.)
list1 = [1, 2, 3, 4]
size = (10000000,)
n_loops = 10
def dummy():
normal = np.random.normal(size=100000)
list1.append(np.sum(normal))
def calc_np(x):
# x = x.numpy()
# dummy()
# x = x * global_y.numpy()
# x = zfit.run(x * global_y)
# x *= var1.numpy()
x_init = x
list1 = []
for i in range(n_loops):
x = np.sqrt(np.abs(x_init))
x = np.cos(x - 0.3)
x = np.power(x, i + 1)
x = np.sinh(x + 0.4)
x = x ** 2
x += np.random.normal(size=size)
x /= np.mean(x)
x = np.abs(x)
list1.append(x)
x = np.sum(list1, axis=0)
x = np.mean(np.log(x))
return x
@tf.function(autograph=True)
def calc_tf(x):
x_init = x
list1 = []
for i in tf.range(n_loops):
# for i in range(n_loops):
x = tf.sqrt(tf.abs(x_init * (tf.cast(i, dtype=tf.float64) + 1.)))
print(x)
x = tf.cos(x - 0.3)
x = tf.pow(x, tf.cast(i + 1, tf.float64))
x = tf.sinh(x + 0.4)
# print("calc_tf is being traced")
x = x ** 2
x += tf.random.normal(shape=size, dtype=tf.float64)
x /= tf.reduce_mean(x)
x = tf.abs(x)
list1.append(x)
x = tf.reduce_sum(x, axis=0)
x = tf.reduce_mean(tf.math.log(x))
# tf.py_function(dummy, [], Tout=[])
return x
# @torch.jit.script
def calc_torch(x):
x_init = x
list1 = []
for i in range(n_loops):
x = torch.sqrt(torch.abs(x_init))
x = torch.cos(x - 0.3)
x = torch.pow(x, i + 1)
x = torch.sinh(x + 0.4)
x = x ** 2
x += torch.normal(mean=0, std=0, size=size)
x /= torch.mean(x)
x = torch.abs(x)
list1.append(x)
list1 = torch.stack(list1)
x = torch.sum(list1, dim=0)
x = torch.mean(torch.log(x))
return x.numpy()
@tf.function
def calc_np_wrapped(x):
return tf.py_function(calc_np, [x], Tout=tf.float32)
@tf.function
def calc_torch_wrapped(x):
return tf.py_function(calc_torch, [x], Tout=tf.float32)
@numba.jit(nopython=True)
def calc_np_numba(x):
for i in range(n_loops):
x = np.sqrt(np.abs(x))
x = np.cos(x - 0.3)
x = np.power(x, i)
x = np.sinh(x + 0.4)
x = x ** 2
x = np.mean(np.log(x))
x += np.random.normal(size=size)
return x
if __name__ == '__main__':
x_tf = tf.random.normal(shape=size, dtype=tf.float64)
x_torch = torch.normal(mean=0, std=0, size=size)
# x = x.numpy()
# y = zfit.run(calc_tf(x))
# x = zfit.run(x)
results = []
# calc_tf_graph = calc_tf(x)
# calc_np_wrapped_graph = calc_np_wrapped(x)
# grad = tf.gradients(calc_np_wrapped_graph, x)
# grad = tf.gradients(calc_tf_graph, x)
# print(zfit.run(grad))
# x = np.random.normal(size=size)
y = calc_np(x_tf)
y = calc_tf(x_tf)
y = calc_torch(x_torch)
# y = calc_np_numba(x)
tf.config.experimental.set_synchronous_execution(
False
)
with Timer() as timer:
n_runs = 3
for _ in range(n_runs):
# x = tf.random.normal(shape=size)
# with tf.GradientTape() as tape:
# tape.watch(x)
# y = calc_np_wrapped(x)
# y = calc_np(x_tf)
y = calc_tf(x_tf)
# y = calc_torch(x_torch)
# y = calc_torch_wrapped(x)
# y = zfit.run(calc_tf_graph)
# y = zfit.run(calc_np_wrapped_graph)
# x = torch.normal(0, 1, size=size)
# y = calc_np_numba(x)
# if not v2behavior:
# zfit.run()
# gradients = tape.gradient(y, x)
# print(gradients)
results.append(y)
print(f"{np.average(results)} +- {np.std(results)}")
print(f"Time needed: {timer.elapsed / n_runs :.3} sec")
|
zfit/benchmarks | src/dataset_memory.py | <reponame>zfit/benchmarks
from memory_profiler import profile
import numpy as np
import tensorflow as tf
# @tf.function(autograph=False)
# @profile
def load_data():
# data_np = np.random.normal(size=100000000)
data_np = tf.random.normal(shape=(80, 1)) + 1
dataset = tf.data.Dataset.from_tensor_slices(data_np)
# dataset = tf.data.Dataset.range(100)
dataset2 = dataset.batch(3)
for data in dataset2:
print(data)
sqrt = tf.square(data)
for data in dataset2:
print(data)
sqrt = tf.square(data)
# print(sqrt)
# dataset = tf.convert_to_tensor(data_np)
if __name__ == '__main__':
load_data() |
ethanjpark/Plume_Tracing_UUV_Sim | src/multi_bot_testing.py | <filename>src/multi_bot_testing.py
#!/usr/bin/env python
# testing code for the multi-bot simulation, mainly that i can move them independently
import rospy
import numpy as np
from std_msgs.msg import Header
from uuv_control_msgs.srv import GoTo
from uuv_control_msgs.msg import Waypoint
from geometry_msgs.msg import Point, Vector3, Twist, TwistWithCovariance
from nav_msgs.msg import Odometry
from visualization_msgs.msg import Marker
#CONSTANTS
rov1_startx = 100
rov2_startx = 80
starty = 20
#Global Vars
auv1_location = None #global var for robot position
auv1_heading = None #global var for robot heading vector
auv2_location = None
auv2_heading = None
#see musa's example waypoint for eca a9 in discord
#Waypoint messsage 'constructor'
def make_waypoint(newx,newy,newz):
#create waypoint message
wp = Waypoint()
wp.header.stamp = rospy.Time.now()
wp.header.frame_id = "world"
wp.point.x = newx
wp.point.y = newy
wp.point.z = newz
wp.max_forward_speed = 2.0
wp.heading_offset = 0.0
wp.use_fixed_heading = False
wp.radius_of_acceptance = 0.5
return wp
#Go To service call
def call_goto(wp, gotoservice, interpolator):
#rosservice call to Go_To
try:
res = gotoservice(wp,wp.max_forward_speed,str(interpolator))
#print("Go To service call successful: " + str(res))
except rospy.ServiceException, e:
print("Service call failed: %s"%e)
#callback function for auv pose subscriber
def readauv1pose(msg):
global auv1_heading
auv1_heading = np.array([msg.twist.twist.linear.x, msg.twist.twist.linear.y])
def readauv2pose(msg):
global auv2_heading
auv2_heading = np.array([msg.twist.twist.linear.x, msg.twist.twist.linear.y])
if __name__=='__main__':
rospy.init_node('multi_bot_test')
auv1pos_sub = rospy.Subscriber(
'rov1/pose_gt',
Odometry,
readauv1pose)
auv2pos_sub = rospy.Subscriber(
'rov2/pose_gt',
Odometry,
readauv2pose)
interpolator = rospy.get_param('~interpolator', 'dubins')
try:
rospy.wait_for_service('rov1/go_to', timeout=15)
except rospy.ROSException:
raise rospy.ROSException('rov1 Service not available!')
try:
rospy.wait_for_service('rov2/go_to', timeout=15)
except rospy.ROSException:
raise rospy.ROSException('rov2 Service not available!')
try:
goto1 = rospy.ServiceProxy('rov1/go_to', GoTo)
except rospy.ROSException as e:
raise rospy.ROSException('rov1 service proxy failed, error=%s', str(e))
try:
goto2 = rospy.ServiceProxy('rov2/go_to', GoTo)
except rospy.ROSException as e:
raise rospy.ROSException('rov2 service proxy failed, error=%s', str(e))
while not rospy.is_shutdown():
r = rospy.Rate(1)
r.sleep()
r1wp1 = make_waypoint(rov1_startx,starty,-31)
r2wp1 = make_waypoint(rov2_startx,starty,-31)
call_goto(r1wp1,goto1,interpolator)
call_goto(r2wp1,goto2,interpolator) |
ethanjpark/Plume_Tracing_UUV_Sim | src/cptbbp.py | <reponame>ethanjpark/Plume_Tracing_UUV_Sim
#!/usr/bin/env python
# written by: <NAME>
# Chemical Plume Tracing - Behaviour Based Planning Algorithm
# Algorithm based on:
# "Chemical Plume Tracing via an Autonomous Underwater Vehicle" by
# <NAME>, <NAME>, and <NAME>
import rospy
import numpy as np
from std_msgs.msg import Header
from uuv_control_msgs.srv import GoTo
from uuv_control_msgs.msg import Waypoint
from uuv_sensor_ros_plugins_msgs.msg import ChemicalParticleConcentration
from geometry_msgs.msg import Point, Vector3, Twist, TwistWithCovariance
from nav_msgs.msg import Odometry
from visualization_msgs.msg import Marker
#CONSTANTS
THRESHOLD = 0.004 #particle concentration threshold for detecting plume
CURRENT_FLOW = np.array([1.0, -1.0]) #[x,y] vector of the current flow
BETA_OFFSET = 30 #angle offset relative to upflow
UPFLOW = np.array([-1.0, 1.0]) #180 rotation of CURRENT_FLOW
LAMBDA = 2.0 #plume detection time threshold (2 seconds)
R = 0.75 #distance threshold to find new ldp waypoint
L_u = 2.0 #constant for how much upflow from last detected location auv should go
L_c = 2.0 #constant for how much cross flow from last detected location auv should go
startx = 20 #x-component of where auv should start from
starty = 25 #y-component of where auv should start from
#Global Vars
alg_state = -1 #global var for which state the algorithm is currently in
# 0 for init, 1 for find, 2 for track-in, 3 for track-out, 4 for reacquire, 5 (maybe) for source declared
particle_concentration = 0.0 #global var for particle concentration
auv_location = None #global var for robot position
auv_heading = None #global var for robot heading vector
lhs = 0 #global var for which side of plume robot will drive out of
t_last = 0 #global var for last time at which plume was detected
lost_pnts = [] #last detection points stored when track out is triggered
ldp = None #global var for last detection point
tout_init = 1 #global var indicating whether track-out needs to choose the next upflow last detected point
tout_wp = None #global var for storing waypoint for track-out behavior
bowtie_step = -1 #global var for which step of the bowtie maneuver auv is currently performing
# 0 = going to center, 1 for upflow left, 2 for downflow left, 3 for upflow right, 4 for downflow right
upnotcross = 1 #global var indicator for when auv is going upflow not cross (for hitting boundary)
findpos = -1 #global var for indicating which direction of rotation from upflow auv is going
findbound = 0 #global var indicating which boundary (pos/neg x or pos/neg y) the auv hit
# 1 for pos x, 2 for neg x, 3 for pos y, 4 for neg y
prevfindbound = 0 #global var to keep track of which edge we hit last
trackincounter = 3 #global var used to only periodically call goto in trackin
#dictionary for mapping alg_state to behaviors
s2b = {
-1: 'Init',
0 : 'GoTo',
1 : 'Find',
2 : 'Track-In',
3 : 'Track-Out',
4 : 'Reacquire',
5 : 'Source Declared'
}
#Calculate angle between two vectors (counter-clockwise positive)
def angle_between(v1,v2):
dot = np.dot(v1, v2)
det = np.linalg.det(np.array([v1, v2]))
return np.rad2deg(np.arctan2(det,dot))
#Calculate normalized rotated vector of upflow
def rotate_upflow(angle):
rotmatrix = np.array([[np.cos(angle), -np.sin(angle)],[np.sin(angle), np.cos(angle)]])
temp = np.array([[UPFLOW[0]],[UPFLOW[1]]])
new_heading = np.dot(rotmatrix, temp) #2D heading
new_heading = new_heading/np.linalg.norm(new_heading)
return new_heading
#Waypoint messsage 'constructor'
def make_waypoint(newx,newy,newz):
#create waypoint message
wp = Waypoint()
wp.header.stamp = rospy.Time.now()
wp.header.frame_id = "world"
wp.point.x = newx
wp.point.y = newy
wp.point.z = newz
wp.max_forward_speed = 0.75
wp.heading_offset = 0.0
wp.use_fixed_heading = False
return wp
#Go To service call
def call_goto(wp, gotoservice, interpolator):
#rosservice call to Go_To
try:
res = gotoservice(wp,wp.max_forward_speed,str(interpolator))
#print("Go To service call successful: " + str(res))
except rospy.ServiceException, e:
print("Service call failed: %s"%e)
#Check distance between two locations
def has_reached(a, b, thres):
return (np.linalg.norm(a-b) < thres)
#Check auv location for boundaries
def check_bounds(location):
global findbound
if(location[0] > 100):
findbound = 1
return False
elif(location[0] < -100):
findbound = 2
return False
elif(location[1] > 50):
findbound = 3
return False
elif(location[1] < -50):
findbound = 4
return False
elif(location[2] < -50 or location[2] > 0): #shouldn't really ever trigger but for redundancy
return False
else:
return True
#Track In behavior of algorithm
def track_in(gotoservice,interpolator):
global lhs, t_last, ldp, alg_state, trackincounter
trackincounter += 1
if(not check_bounds(auv_location)): #hit boundary, reflect
print("track in hit boundary")
if(lhs == 1):
lhs = -1
elif(lhs == -1):
lhs = 1
#update t_last
t_last = rospy.get_time()
#update last detection point
ldp = auv_location
#calculate heading and new waypoint
offsetrad = lhs*np.deg2rad(BETA_OFFSET)
new_heading = rotate_upflow(offsetrad)
threed_heading = np.array([new_heading[0],new_heading[1],0.0])
new_waypoint = np.add(threed_heading,auv_location)
wp = make_waypoint(new_waypoint[0], new_waypoint[1], new_waypoint[2])
call_goto(wp, gotoservice, interpolator)
elif(particle_concentration >= THRESHOLD): #stay in track-in
alg_state = 2
if(lhs == 0):
#calculate lhs var using angle between upflow and auv_heading
ang = angle_between(UPFLOW,auv_heading)
print("AUV heading: (" + str(auv_heading[0]) + "," + str(auv_heading[1]) + ")")
print("Angle between: " + str(ang))
if(ang > 0): #heading is counter-clockwise from upflow
lhs = 1
else:
lhs = -1
if(trackincounter%4 == 0):
#update t_last
t_last = rospy.get_time()
#update last detection point
ldp = auv_location
#calculate heading and new waypoint
print("lhs: " + str(lhs))
offsetrad = lhs*np.deg2rad(BETA_OFFSET)
new_heading = np.dot(2,rotate_upflow(offsetrad))
threed_heading = np.array([new_heading[0],new_heading[1],0.0])
new_waypoint = np.add(threed_heading,auv_location)
wp = make_waypoint(new_waypoint[0], new_waypoint[1], new_waypoint[2])
call_goto(wp, gotoservice, interpolator)
#lost contact with plume
elif(rospy.get_time() - t_last > LAMBDA): #go to track-out
lost_pnts.append(ldp)
print("Lost contact with plume, going to track-out.")
alg_state = 3
lhs = 0
#Track out behavior of algorithm
def track_out(gotoservice,interpolator):
global tout_init, alg_state, tout_wp
#plume detected again
if(particle_concentration >= THRESHOLD):
tout_init = 1
S = src_check()
if(S):
alg_state = 5 #source has been found
else:
print("Plume found, going to track-in.")
alg_state = 2 #back to track in
else:
if(tout_init == 1): #determine destination waypoint
up = np.array([lost_pnts[-1][0], lost_pnts[-1][1]]) #set to most upflow point in last detection point list
tout_init = 0
#set destination to a point that is upflow and cross the flow from last detected point
f_p = np.ndarray.flatten(rotate_upflow(np.pi/2))
f = UPFLOW/np.linalg.norm(UPFLOW)
tout_wp = up - np.dot(L_u,f) - np.dot(L_c*lhs,f_p)
wp = make_waypoint(tout_wp[0], tout_wp[1], lost_pnts[-1][2])
print("Going somewhere based on ldp.")
call_goto(wp, gotoservice, interpolator)
#has gotten close enough to designated ldp waypoint
if(has_reached(auv_location, np.array([tout_wp[0], tout_wp[1], auv_location[2]]), R)):
tout_init = 1
S = src_check()
if(S):
alg_state = 5 #source has been found
else:
print("Going to reacquire.")
alg_state = 4 #go to reacquire
#go to ldp waypoint
# else:
# wp = make_waypoint(tout_wp[0], tout_wp[1], lost_pnts[-1][2])
# print("Going somewhere based on ldp.")
# call_goto(wp, gotoservice, interpolator)
#function for checking whether source can be determined from ldp list
def src_check():
if(len(lost_pnts) < 3): #not enough data to make conclusion
print("Not enough data to determine source.")
return False
else:
v1 = np.array([lost_pnts[-3][0]-lost_pnts[-1][0], lost_pnts[-3][1]-lost_pnts[-1][1]]) #vector from 3rd to 1st point (in terms of how upflow)
v2 = np.array([lost_pnts[-2][0]-lost_pnts[-1][0], lost_pnts[-2][1]-lost_pnts[-1][1]]) #vector from 2nd to 1st point (in terms of how upflow)
v3 = np.array([lost_pnts[-3][0]-lost_pnts[-2][0], lost_pnts[-3][1]-lost_pnts[-2][1]]) #vector from 3rd to 2nd point (in terms of how upflow)
#calculate scalar projection of vectors onto upflow vector
temp = np.linalg.norm(UPFLOW)
print("Calculating distances between three most upflow points in direction of upflow...")
p1 = np.dot(UPFLOW, v1)/temp
p2 = np.dot(UPFLOW, v2)/temp
p3 = np.dot(UPFLOW, v3)/temp
if(p1 < 4 and p2 < 4 and p3 < 4):
print("Source determined!")
return True
else:
print("Data inconclusive, source cannot be determined with accuracy.")
return False
#Reacquire behavior of algorithm
def reacquire(gotoservice, interpolator):
global bowtie_step, alg_state, lost_pnts
if(particle_concentration >= THRESHOLD):
print("Plume found, going to track-in.")
bowtie_step = -1
alg_state = 2
else:
#calculate vertices of bowtie maneuver
bowtie_center = lost_pnts[-1] #most upflow ldp is center of bowtie maneuver
angle1 = np.deg2rad(15)
angle2 = np.deg2rad(165)
angle3 = np.deg2rad(-15)
angle4 = np.deg2rad(-165)
uleft = 5*rotate_upflow(angle1) #multiplied by 2 for a bigger maneuver since output of rotate_upflow is normalized
dleft = 5*rotate_upflow(angle2)
uright = 5*rotate_upflow(angle3)
dright = 5*rotate_upflow(angle4)
bowtie_uleft = np.array([auv_location[0]+uleft[0], auv_location[1]+uleft[1], auv_location[2]])
bowtie_dleft = np.array([auv_location[0]+dleft[0], auv_location[1]+dleft[1], auv_location[2]])
bowtie_uright = np.array([auv_location[0]+uright[0], auv_location[1]+uright[1], auv_location[2]])
bowtie_dright = np.array([auv_location[0]+dright[0], auv_location[1]+dright[1], auv_location[2]])
if(bowtie_step == -1): #go to center of bowtie
bowtie_step = 0
wp = make_waypoint(bowtie_center[0], bowtie_center[1], bowtie_center[2])
print("Going to center of bowtie.")
call_goto(wp, gotoservice, interpolator)
elif(bowtie_step == 0): #check if center reached, if so then start bowtie
if(has_reached(auv_location, bowtie_center, R)):
bowtie_step = 1
wp = make_waypoint(bowtie_uleft[0], bowtie_uleft[1], bowtie_uleft[2])
print("Going to upper left of bowtie.")
call_goto(wp, gotoservice, interpolator)
elif(bowtie_step == 1):
if(has_reached(auv_location, bowtie_uleft, R)):
bowtie_step = 2
wp = make_waypoint(bowtie_dleft[0], bowtie_dleft[1], bowtie_dleft[2])
print("Going to lower left of bowtie.")
call_goto(wp, gotoservice, interpolator)
elif(bowtie_step == 2):
if(has_reached(auv_location, bowtie_dleft, R)):
bowtie_step = 3
wp = make_waypoint(bowtie_uright[0], bowtie_uright[1], bowtie_uright[2])
print("Going to upper right of bowtie.")
call_goto(wp, gotoservice, interpolator)
elif(bowtie_step == 3):
if(has_reached(auv_location, bowtie_uright, R)):
bowtie_step = 4
wp = make_waypoint(bowtie_dright[0], bowtie_dright[1], bowtie_dright[2])
print("Going to lower right of bowtie.")
call_goto(wp, gotoservice, interpolator)
elif(bowtie_step == 4):
if(has_reached(auv_location, bowtie_dright, R)): #end of bowtie maneuver reached without finding plume
lost_pnts = lost_pnts[:-1] #remove most upflow point and start again
if(len(lost_pnts) == 0): #no more ldp points to go through
print("Couldn't find plume after bowtie, going to find.")
find_plume(gotoservice, interpolator)
else:
bowtie_step = -1
#Find behavior in algorithm
def find_plume(gotoservice, interpolator):
global alg_state, upnotcross, findpos, prevfindbound
if(particle_concentration >= THRESHOLD):
print("Plume found, going to track-in.")
alg_state = 2
else:
alg_state = 1
if(upnotcross == 1):
print("find: going across")
upnotcross = 0
if(findpos == 1):
findpos = -1
elif(findpos == -1):
findpos = 1
cross = rotate_upflow(findpos*np.pi/2)
done = False
while(not done):
cross += cross
if(auv_location[0]+cross[0] < -100 or auv_location[0]+cross[0] > 100 or auv_location[1]+cross[1] < -50 or auv_location[1]+cross[1] > 50):
done = True
wp = make_waypoint(auv_location[0]+cross[0], auv_location[1]+cross[1], auv_location[2])
call_goto(wp, gotoservice, interpolator)
else:
if(not check_bounds(auv_location) and prevfindbound != findbound): #hit boundary, go upflow slightly before crossing
print("find: hit boundary")
upnotcross = 1
ufnorm = UPFLOW/np.linalg.norm(UPFLOW)
#depending on which boundary the auv hit, have to adjust the vector so it doesn't keep going out of bounds
if(findbound == 1): #triggered on positive x boundary
prevfindbound = 1
if(ufnorm[0] > 0): ufnorm[0] = 0
elif(findbound == 2): #triggered on negative x boundary
prevfindbound = 2
if(ufnorm[0] < 0): ufnorm[0] = 0
elif(findbound == 3): #triggered on positive y boundary
prevfindbound = 3
if(ufnorm[1] > 0): ufnorm[1] = 0
elif(findbound == 4): #triggered on negative y boundary
prevfindbound = 4
if(ufnorm[1] < 0): ufnorm[1] = 0
print("find: going upflow")
wp = make_waypoint(auv_location[0]+ufnorm[0], auv_location[1]+ufnorm[1], auv_location[2])
call_goto(wp, gotoservice, interpolator)
#callback function for particle concentration subscriber
def readconcentration(msg):
global particle_concentration, auv_location
particle_concentration = msg.concentration
auv_location = np.array([msg.position.x, msg.position.y, msg.position.z])
#callback function for auv pose subscriber
def readauvpose(msg):
global auv_heading
auv_heading = np.array([msg.twist.twist.linear.x, msg.twist.twist.linear.y])
if __name__=='__main__':
rospy.init_node('CPT_BBP')
part_conc_sub = rospy.Subscriber(
'rexrov2/particle_concentration',
ChemicalParticleConcentration,
readconcentration)
auvpos_sub = rospy.Subscriber(
'rexrov2/pose_gt',
Odometry,
readauvpose)
markerpub = rospy.Publisher('sourcemarker', Marker, queue_size=1)
interpolator = rospy.get_param('~interpolator', 'lipb')
try:
rospy.wait_for_service('rexrov2/go_to', timeout=15)
except rospy.ROSException:
raise rospy.ROSException('Service not available!')
try:
goto = rospy.ServiceProxy('rexrov2/go_to', GoTo)
except rospy.ROSException as e:
raise rospy.ROSException('Service proxy failed, error=%s', str(e))
prevstate = -1
while not rospy.is_shutdown():
#running the algorithm to quickly makes for some... interesting AUV behavior (namely breakdancing)
r = rospy.Rate(1)
r.sleep()
if(alg_state != prevstate):
print("Algorithm state: " + s2b[alg_state])
if(particle_concentration > 0):
print("Particle concentration = " + str(particle_concentration))
#check algorithm state and run appropriate behavior
if(alg_state == -1): #initial startup
alg_state = 0
prevstate = 0
wp = make_waypoint(startx, starty, auv_location[2])
call_goto(wp, goto, interpolator)
elif(alg_state == 0): #go to starting pos
dest = np.array([startx, starty, auv_location[2]])
if(has_reached(auv_location, dest, R)):
find_plume(goto, interpolator)
elif(alg_state == 1):
prevstate = 1
find_plume(goto, interpolator)
elif(alg_state == 2):
prevstate = 2
track_in(goto, interpolator)
elif(alg_state == 3):
prevstate = 3
track_out(goto, interpolator)
elif(alg_state == 4):
prevstate = 4
reacquire(goto, interpolator)
elif(alg_state == 5): #source found
prevstate = 5
if(not has_reached(auv_location, lost_pnts[-1], R)): #go to most upflow ldp, which is speculated source
print("Source: [" + str(lost_pnts[-1][0]) + ", " + str(lost_pnts[-1][1]) + ", " + str(lost_pnts[-1][2]) + "]")
wp = make_waypoint(lost_pnts[-1][0], lost_pnts[-1][1], lost_pnts[-1][2])
call_goto(wp, goto, interpolator)
marker = Marker()
marker.header.frame_id = 'world'
marker.header.stamp = rospy.Time.now()
marker.id = 0
marker.type = Marker.SPHERE
marker.action = Marker.ADD
marker.pose.position.x = lost_pnts[-1][0]
marker.pose.position.y = lost_pnts[-1][1]
marker.pose.position.z = lost_pnts[-1][2]
marker.pose.orientation.x = 0
marker.pose.orientation.y = 0
marker.pose.orientation.z = 0
marker.pose.orientation.w = 1
marker.scale.x = 1.0
marker.scale.y = 1.0
marker.scale.z = 1.0
marker.color.r = 1.0
marker.color.g = 1.0
marker.color.b = 1.0
marker.color.a = 1.0
marker.lifetime = rospy.Duration(0)
markerpub.publish(marker) |
ethanjpark/Plume_Tracing_UUV_Sim | scripts/tutorial_dp_controller.py | #!/usr/bin/env python
import rospy
import numpy as np
from uuv_control_interfaces import DPControllerBase
class TutorialDPController(DPControllerBase):
def __init__(self):
super(TutorialDPController, self).__init__(self)
self._Kp = np.zeros(shape=(6, 6))
self._Kd = np.zeros(shape=(6, 6))
self._Ki = np.zeros(shape=(6, 6))
self._int = np.zeros(shape=(6,))
self._error_pose = np.zeros(shape=(6,))
# Do the same for the other two matrices
if rospy.get_param('~Kp'):
diag = rospy.get_param('~Kp')
if len(diag) == 6:
self._Kp = np.diag(diag)
print 'Kp=\n', self._Kp
else:
# If the vector provided has the wrong dimension, raise an exception
raise rospy.ROSException('For the Kp diagonal matrix, 6 coefficients are needed')
if rospy.get_param('~Kd'):
diag = rospy.get_param('~Kd')
if len(diag) == 6:
self._Kd = np.diag(diag)
print 'Kd=\n', self._Kd
else:
# If the vector provided has the wrong dimension, raise an exception
raise rospy.ROSException('For the Kd diagonal matrix, 6 coefficients are needed')
if rospy.get_param('~Ki'):
diag = rospy.get_param('~Ki')
if len(diag) == 6:
self._Ki = np.diag(diag)
print 'Ki=\n', self._Ki
else:
# If the vector provided has the wrong dimension, raise an exception
raise rospy.ROSException('For the Ki diagonal matrix, 6 coefficients are needed')
self._is_init = True
def _reset_controller(self):
super(TutorialDPController, self)._reset_controller()
self._error_pose = np.zeros(shape=(6,))
self._int = np.zeros(shape=(6,))
#This is where the controller algorithm would go. Also generates a
#control effort vector (tau) and publishes it to the thrust manager input
def update_controller(self):
if not self._is_init:
return False
if not self.odom_is_init:
return
self._int = self._int + 0.5 * (self.error_pose_euler + self._error_pose) * self._dt
self._error_pose = self.error_pose_euler
tau = np.dot(self._Kp, self.error_pose_euler) + np.dot(self._Kd, self._errors['vel']) + np.dot(self._Ki, self._int)
self.publish_control_wrench(tau)
if __name__ == '__main__':
print('Tutorial - DP Controller')
rospy.init_node('tutorial_dp_controller')
try:
node = TutorialDPController()
rospy.spin()
except rospy.ROSInterruptException:
print('caught exception')
print('exiting') |
Corvalius/deepnet | examples/deepnet/multimodal_dbn/create_results_table.py | <filename>examples/deepnet/multimodal_dbn/create_results_table.py
"""Collects results from multiple runs and puts them into a nice table."""
import sys
import os
import numpy as np
from package.deepnet import util
def main():
path = sys.argv[1]
numsplits = int(sys.argv[2])
output_file = sys.argv[3]
layers = ['image_input', 'image_hidden1', 'image_hidden2', 'joint_hidden',
'text_hidden2', 'text_hidden1', 'text_input']
maps = {}
precs = {}
for i in range(1, numsplits+1):
for layer in layers:
mfile = os.path.join(path, 'split_%d' % i, '%s_classifier_BEST' % layer)
model = util.ReadModel(mfile)
MAP = model.test_stat_es.MAP
prec50 = model.test_stat_es.prec50
if layer not in maps:
maps[layer] = []
if layer not in precs:
precs[layer] = []
maps[layer].append(MAP)
precs[layer].append(prec50)
f = open(output_file, 'w')
f.write('\\begin{tabular}{|l|c|c|} \\hline \n')
f.write('Layer & MAP & Prec@50 \\\\ \\hline\n')
for layer in layers:
lmap = np.array(maps[layer])
lprec = np.array(precs[layer])
f.write('%s & %.3f $\\pm$ %.3f & %.3f $\\pm$ %.3f \\\\ \n' % (layer,
lmap.mean(), lmap.std(), lprec.mean(), lprec.std()))
f.write('\\hline\n')
f.write('\\end{tabular}\n')
f.close()
if __name__ == '__main__':
main()
|
Corvalius/deepnet | examples/cudamat/tryout.py | from package.cudamat import cudamat as cm
import numpy as np
# Ensuring that Cudamat is working.
cm.cublas_init()
# create two random matrices and copy them to the GPU
a = cm.CUDAMatrix(np.random.rand(32, 256))
b = cm.CUDAMatrix(np.random.rand(256, 32))
# perform calculations on the GPU
c = cm.dot(a, b)
d = c.sum(axis = 0)
# copy d back to the host (CPU) and print
print( d.asarray() )
|
Corvalius/deepnet | package/cudamat/cudamat_conv.py | import ctypes as ct
import math
import pdb
import platform
if platform.system() == 'Windows':
_ConvNet = ct.cdll.LoadLibrary('libcudamat_conv.dll')
else:
_ConvNet = ct.cdll.LoadLibrary('libcudamat_conv.so')
def convUp(images, filters, targets, numModulesX, paddingStart, moduleStride, numImgColors, numGroups=1):
"""
images - (n_images, img_w**2 * n_chans)
filters - (n_filters, filter_w**2 * n_chans)
targets - (n_images, n_locs**2 * n_filters)
numModulesX - Number of filter locations along an axis. = n_locs
paddingStart - Set to k for a k-pixel border of zeros. Usually set to 0.
moduleStride - stride to move the filters by.
numImgColors - n_chans
"""
numImages = images.shape[0]
numFilters = filters.shape[0]
assert targets.shape == (numImages, numFilters * numModulesX * numModulesX), '%s %d %d-%d-%d' % (targets.shape.__str__(), numImages, numFilters, numModulesX, numModulesX)
_ConvNet.convUp(images.p_mat, filters.p_mat, targets.p_mat, numModulesX,
-paddingStart, moduleStride, numImgColors, numGroups)
def convDown(hidSums, filters, targets, numModulesX, paddingStart, moduleStride, filterSizeX, imSizeX, numImgColors):
"""
hidSums - (n_images, n_locs**2 * n_filters)
filters - (n_filters, filter_w**2 * n_chans)
targets - (n_images, img_w**2 * n_chans)
"""
numGroups = 1
numFilters = filters.shape[0]
numImages = hidSums.shape[0]
numModules = numModulesX**2
assert paddingStart >= 0
assert targets.shape == (numImages, numImgColors * imSizeX * imSizeX)
_ConvNet.convDown(hidSums.p_mat, filters.p_mat, targets.p_mat, imSizeX,
-paddingStart, moduleStride, numImgColors, numGroups)
def convOutp(images, hidSums, targets, numModulesX, paddingStart, filterSizeX, moduleStride, numImgColors):
"""
images - (n_images, img_w**2 * n_chans)
hidSums - (n_images, n_locs**2 * n_filters)
targets - (n_filters, filter_w**2 * n_chans)
"""
numGroups = 1
partialSum = 0
numImages = images.shape[0]
numFilters = hidSums.shape[1] / (numModulesX**2)
assert targets.shape == (numFilters, numImgColors * filterSizeX * filterSizeX), '%s %d %d-%d-%d' % (targets.shape.__str__(), numFilters, numImgColors, filterSizeX, filterSizeX)
_ConvNet.convOutp(images.p_mat, hidSums.p_mat, targets.p_mat, numModulesX, filterSizeX, -paddingStart, moduleStride, numImgColors, 1, 0)
def localUp(images, filters, targets, numModulesX, paddingStart, moduleStride, numImgColors, numGroups=1):
"""
images - (n_images, img_w**2 * n_chans)
filters - (n_filters, filter_w**2 * n_chans)
targets - (n_images, n_locs**2 * n_filters)
numModulesX - Number of filter locations along an axis. = n_locs
paddingStart - Set to k for a k-pixel border of zeros. Usually set to 0.
moduleStride - stride to move the filters by.
numImgColors - n_chans
"""
numImages = images.shape[0]
numFilters = filters.shape[0]
assert targets.shape == (numImages, numFilters * numModulesX * numModulesX), '%s %d %d-%d-%d' % (targets.shape.__str__(), numImages, numFilters, numModulesX, numModulesX)
_ConvNet.localUp(images.p_mat, filters.p_mat, targets.p_mat,
numModulesX, -paddingStart, moduleStride, numImgColors, numGroups)
def localDown(hidSums, filters, targets, numModulesX, paddingStart, moduleStride, filterSizeX, imSizeX, numImgColors):
"""
hidSums - (n_images, n_locs**2 * n_filters)
filters - (n_filters, filter_w**2 * n_chans)
targets - (n_images, img_w**2 * n_chans)
"""
numGroups = 1
numFilters = filters.shape[0]
numImages = hidSums.shape[0]
numModules = numModulesX**2
assert paddingStart >= 0
assert targets.shape == (numImages, numImgColors * imSizeX * imSizeX)
_ConvNet.localDown(hidSums.p_mat, filters.p_mat, targets.p_mat,
imSizeX, -paddingStart, moduleStride, numImgColors, numGroups)
def localOutp(images, hidSums, targets, numModulesX, paddingStart, filterSizeX, moduleStride, numImgColors):
"""
images - (n_images, img_w**2 * n_chans)
hidSums - (n_images, n_locs**2 * n_filters)
targets - (n_filters, filter_w**2 * n_chans)
"""
numGroups = 1
partialSum = 0
numImages = images.shape[0]
numFilters = hidSums.shape[1] / (numModulesX**2)
assert targets.shape == (numFilters, numModulesX**2 * numImgColors * filterSizeX**2), '%s %d %d-%d-%d' % (targets.shape.__str__(), numFilters, numImgColors, filterSizeX, filterSizeX)
_ConvNet.localOutp(images.p_mat, hidSums.p_mat, targets.p_mat,
numModulesX, filterSizeX, -paddingStart, moduleStride, numImgColors, numGroups, partialSum)
def MaxPool(images, targets, numChannels, subsX, startX, strideX, outputsX):
"""
images - (n_images, img_w**2 * n_chans)
numChannels - number of filter/color channels
subsX - width of pooling area
startX - pixel where pooling starts
strideX - stride
outputsX - number of pooling sites
"""
numImages = images.shape[0]
assert targets.shape == (numImages, numChannels * outputsX * outputsX)
_ConvNet.MaxPool(images.p_mat, targets.p_mat,
numChannels, subsX, startX, strideX, outputsX)
def ProbMaxPool(images, rnd, targets, numChannels, subsX, startX, strideX, outputsX):
"""
images - (n_images, img_w**2 * n_chans)
rnd - (n_images, img_w**2 * n_chans)
numChannels - number of filter/color channels
subsX - width of pooling area
startX - pixel where pooling starts
strideX - stride
outputsX - number of pooling sites
"""
numImages = images.shape[0]
assert targets.shape == (numImages, numChannels * outputsX * outputsX)
assert rnd.shape == images.shape
_ConvNet.ProbMaxPool(images.p_mat, rnd.p_mat, targets.p_mat,
numChannels, subsX, startX, strideX, outputsX)
def MaxPoolUndo(images, targets, grad, maxes,
subsX, startX, strideX, outputsX):
"""
images - (n_images, img_w**2 * n_chans)
grad - (n_images, outputsX**2 * n_chans) cudamat of deltas/gradients of loss wrt layer outputs.
maxes - (n_images, outputsX**2 * n_chans) cudamat of layer outputs.
subsX - width of pooling area
startX - pixel where pooling starts
strideX - stride
outputsX - number of pooling sites
"""
assert targets.shape == images.shape
_ConvNet.MaxPoolUndo(images.p_mat, grad.p_mat, maxes.p_mat, targets.p_mat,
subsX, startX, strideX, outputsX)
def ResponseNorm(images, denoms, targets, numChannels, sizeX, addScale, powScale):
assert targets.shape == images.shape
assert targets.shape == denoms.shape
num_images = images.shape[0]
numpixels = images.shape[1] / numChannels
imgsize = int(math.sqrt(numpixels))
#assert images.shape[1] == numChannels * numpixels
#assert imgsize * imgsize == numpixels
#pdb.setrace()
_ConvNet.ResponseNorm(images.p_mat, denoms.p_mat, targets.p_mat,
numChannels, sizeX, ct.c_float(addScale),
ct.c_float(powScale))
def ResponseNormUndo(outGrad, denoms, inGrad, acts, targets, numChannels, sizeX,
addScale, powScale):
assert targets.shape == outGrad.shape
assert targets.shape == denoms.shape
assert targets.shape == inGrad.shape
assert targets.shape == acts.shape
_ConvNet.ResponseNormUndo(outGrad.p_mat, denoms.p_mat, inGrad.p_mat,
acts.p_mat, targets.p_mat, numChannels, sizeX,
ct.c_float(addScale), ct.c_float(powScale))
|
Corvalius/deepnet | package/deepnet/choose_matrix_library.py | import os
use_gpu = os.environ.get('USE_GPU', 'auto')
assert use_gpu in ['auto', 'yes', 'no'], "environment variable USE_GPU, should be one of 'auto', 'yes', 'no'."
if use_gpu == 'auto':
try:
use_gpu = 'yes'
except:
print( 'Failed to import cudamat. Using eigenmat. No GPU will be used.' )
use_gpu = 'no'
if use_gpu == 'yes':
from package.cudamat import cudamat as cm
from package.cudamat import cudamat_conv as cc
from package.cudamat import gpu_lock
elif use_gpu == 'no':
import package.eigenmat as cm
|
Corvalius/deepnet | examples/deepnet/multimodal_dbn/setup_data.py | <reponame>Corvalius/deepnet<gh_stars>1-10
"""Sets up paths, separates out data with missing text."""
import os, sys
import pdb
import glob
from package.deepnet import datahandler as dh
from package.deepnet import util
from google.protobuf import text_format
import numpy as np
def EditPaths(data_pb, data_dir, gpu_mem, main_mem):
data_pb.gpu_memory = gpu_mem
data_pb.main_memory = main_mem
data_pb.prefix = data_dir
def CreateMissingTextData(data_pb, data_pbtxt_file_z, data_pbtxt_file_nnz):
"""Some cases have text and some don't. This method separates them out."""
prefix = data_pb.prefix
data_pb_z = util.CopyDataset(data_pb)
data_pb_nnz = util.CopyDataset(data_pb)
data_pb_z.name = 'flickr_zero_text'
data_pb_nnz.name = 'flickr_non_zero_text'
del data_pb_z.data[:]
del data_pb_nnz.data[:]
for tag in ['labelled', 'unlabelled']:
# Find the proto that describes text data.
text_data = next(d for d in data_pb.data if d.name == 'text_%s' % tag)
# Load the text data into a sparse matrix.
text_data_file = os.path.join(prefix, text_data.file_pattern)
data = dh.Disk.LoadSparse(text_data_file)
# Find cases which have non-zero words.
numwords = np.array(data.sum(axis=1)).reshape(-1)
nnz_indices = np.where(numwords != 0)[0]
z_indices = np.where(numwords == 0)[0]
indices_file = os.path.join(prefix, 'text', 'indices_%s.npz' % tag)
np.savez(indices_file, nnz_indices=nnz_indices, z_indices=z_indices)
text_nnz_file = os.path.join('text', 'text_nnz_2000_%s.npz' % tag)
dh.Disk.SaveSparse(os.path.join(prefix, text_nnz_file), data[nnz_indices])
nnz = len(nnz_indices)
# Separate images.
image_data = next(d for d in data_pb.data if d.name == 'image_%s' % tag)
numdims = np.prod(image_data.dimensions)
image_z_dir = os.path.join('image', '%s_z' % tag)
image_nnz_dir = os.path.join('image', '%s_nnz' % tag)
data_writer_nnz = dh.DataWriter(['combined'],
os.path.join(prefix, image_nnz_dir), '1G',
[numdims], datasize=nnz)
data_writer_z = dh.DataWriter(['combined'],
os.path.join(prefix, image_z_dir), '1G',
[numdims], datasize=image_data.size - nnz)
end = 0
img_files = glob.glob(os.path.join(data_pb.prefix, image_data.file_pattern))
for img_file in sorted(img_files):
print(img_file)
img = np.load(img_file)
start = end
end = start + img.shape[0]
nw = numwords[start:end]
zero_text_images = img[np.where(nw == 0)[0]]
non_zero_text_images = img[np.where(nw != 0)[0]]
data_writer_z.Submit([zero_text_images])
data_writer_nnz.Submit([non_zero_text_images])
num_outputs_z = data_writer_z.Commit()
num_outputs_nnz = data_writer_nnz.Commit()
assert num_outputs_z[0] == image_data.size - nnz
assert num_outputs_nnz[0] == nnz
# Make data pbtxt for the new data.
image_z = util.CopyData(image_data)
image_nnz = util.CopyData(image_data)
text_nnz = util.CopyData(text_data)
image_z.file_pattern = os.path.join(image_z_dir, 'combined-*-of-*.npy')
image_nnz.file_pattern = os.path.join(image_nnz_dir, 'combined-*-of-*.npy')
text_nnz.file_pattern = text_nnz_file
image_z.size = image_data.size - nnz
image_nnz.size = nnz
text_nnz.size = nnz
data_pb_z.data.extend([image_z])
data_pb_nnz.data.extend([image_nnz, text_nnz])
with open(data_pbtxt_file_z, 'w') as f:
text_format.PrintMessage(data_pb_z, f)
with open(data_pbtxt_file_nnz, 'w') as f:
text_format.PrintMessage(data_pb_nnz, f)
def EditTrainers(data_dir, model_dir, rep_dir, numsplits):
tnames = ['train_CD_image_layer1.pbtxt',
'train_CD_image_layer2.pbtxt',
'train_CD_text_layer1.pbtxt',
'train_CD_text_layer2.pbtxt',
'train_CD_joint_layer.pbtxt']
for tname in tnames:
t_op_file = os.path.join('trainers', 'dbn', tname)
t_op = util.ReadOperation(t_op_file)
if 'layer1' in tname:
t_op.data_proto_prefix = data_dir
else:
t_op.data_proto_prefix = rep_dir
t_op.checkpoint_directory = model_dir
with open(t_op_file, 'w') as f:
text_format.PrintMessage(t_op, f)
t_op_file = os.path.join('trainers', 'classifiers', 'baseclassifier.pbtxt')
t_op = util.ReadOperation(t_op_file)
for i in range(1, numsplits+1):
t_op_file = os.path.join('trainers', 'classifiers', 'split_%d.pbtxt' % i)
t_op.data_proto_prefix = rep_dir
t_op.data_proto = os.path.join('split_%d' % i, 'data.pbtxt')
t_op.checkpoint_prefix = model_dir
t_op.checkpoint_directory = os.path.join('classifiers','split_%d' % i)
with open(t_op_file, 'w') as f:
text_format.PrintMessage(t_op, f)
# Change prefix in multimodal dbn model
mnames = ['multimodal_dbn.pbtxt']
for mname in mnames:
model_file = os.path.join('models', mname)
model = util.ReadModel(model_file)
model.prefix = model_dir
with open(model_file, 'w') as f:
text_format.PrintMessage(model, f)
def main():
data_dir = sys.argv[1]
model_dir = sys.argv[2]
rep_dir = sys.argv[3]
gpu_mem = sys.argv[4]
main_mem = sys.argv[5]
numsplits = int(sys.argv[6])
data_pbtxt_file = os.path.join(data_dir, 'flickr.pbtxt')
data_pb = util.ReadData(data_pbtxt_file)
EditPaths(data_pb, data_dir, gpu_mem, main_mem)
with open(data_pbtxt_file, 'w') as f:
text_format.PrintMessage(data_pb, f)
EditTrainers(data_dir, model_dir, rep_dir, numsplits)
data_pbtxt_file_z = os.path.join(data_dir, 'flickr_z.pbtxt')
data_pbtxt_file_nnz = os.path.join(data_dir, 'flickr_nnz.pbtxt')
if not os.path.exists(data_pbtxt_file_z):
CreateMissingTextData(data_pb, data_pbtxt_file_z, data_pbtxt_file_nnz)
data_pb = util.ReadData(data_pbtxt_file_z)
EditPaths(data_pb, data_dir, gpu_mem, main_mem)
with open(data_pbtxt_file_z, 'w') as f:
text_format.PrintMessage(data_pb, f)
data_pb = util.ReadData(data_pbtxt_file_nnz)
EditPaths(data_pb, data_dir, gpu_mem, main_mem)
with open(data_pbtxt_file_nnz, 'w') as f:
text_format.PrintMessage(data_pb, f)
if __name__ == '__main__':
main()
|
Corvalius/deepnet | package/deepnet/ais.py | """Computes partition function for RBM-like models using Annealed Importance Sampling."""
import numpy as np
import dbm
import util
import trainer as tr
from choose_matrix_library import *
import sys
import numpy as np
import pdb
import time
import itertools
import matplotlib.pyplot as plt
from package.deepnet import visualize
import lightspeed
def SampleEnergySoftmax(layer, numsamples, use_lightspeed=False):
sample = layer.sample
energy = layer.state
temp = layer.expanded_batch
if use_lightspeed:
layer.ApplyActivation()
layer.state.sum(axis=0, target=layer.temp)
layer.state.div_by_row(layer.temp, target=temp)
probs_cpu = temp.asarray().astype(np.float64)
samples_cpu = lightspeed.SampleSoftmax(probs_cpu, numsamples)
sample.overwrite(samples_cpu.astype(np.float32))
else:
sample.assign(0)
for i in range(numsamples):
energy.perturb_energy_for_softmax_sampling(target=temp)
temp.choose_max_and_accumulate(sample)
def LogMeanExp(x):
offset = x.max()
return offset + np.log(np.exp(x-offset).mean())
def LogSumExp(x):
offset = x.max()
return offset + np.log(np.exp(x-offset).sum())
def Display(w, hid_state, input_state, w_var, x_axis):
w = w.asarray().flatten()
#plt.figure(1)
#plt.clf()
#plt.hist(w, 100)
#visualize.display_hidden(hid_state.asarray(), 2, 'activations', prob=True)
#plt.figure(3)
#plt.clf()
#plt.imshow(hid_state.asarray().T, cmap=plt.cm.gray, interpolation='nearest')
#plt.figure(4)
#plt.clf()
#plt.imshow(input_state.asarray().T, cmap=plt.cm.gray, interpolation='nearest')
#, state.shape[0], state.shape[1], state.shape[0], 3, title='Markov chains')
#plt.tight_layout(pad=0, w_pad=0, h_pad=0)
plt.figure(5)
plt.clf()
plt.suptitle('Variance')
plt.plot(np.array(x_axis), np.array(w_var))
plt.draw()
def AISReplicatedSoftmax(model, D, num_chains, display=False):
schedule = np.concatenate((
#np.arange(0.0, 1.0, 0.01),
#np.arange(0.0, 1.0, 0.001),
np.arange(0.0, 0.7, 0.001), # 700
np.arange(0.7, 0.9, 0.0001), # 2000
np.arange(0.9, 1.0, 0.00002) # 5000
))
#schedule = np.array([0.])
cm.CUDAMatrix.init_random(seed=0)
assert len(model.layer) == 2, 'Only implemented for RBMs.'
steps = len(schedule)
input_layer = model.layer[0]
hidden_layer = model.layer[1]
edge = model.edge[0]
batchsize = num_chains
w = edge.params['weight']
a = hidden_layer.params['bias']
b = input_layer.params['bias']
numvis, numhid = w.shape
f = 0.1
input_layer.AllocateBatchsizeDependentMemory(num_chains)
hidden_layer.AllocateBatchsizeDependentMemory(num_chains)
# INITIALIZE TO SAMPLES FROM BASE MODEL.
input_layer.state.assign(0)
input_layer.NN.assign(D)
input_layer.state.add_col_mult(b, f)
SampleEnergySoftmax(input_layer, D)
w_ais = cm.CUDAMatrix(np.zeros((1, batchsize)))
#pdb.set_trace()
w_variance = []
x_axis = []
if display:
Display(w_ais, hidden_layer.state, input_layer.state, w_variance, x_axis)
#raw_input('Press Enter.')
#pdb.set_trace()
# RUN AIS.
for i in range(steps-1):
sys.stdout.write('\r%d' % (i+1))
sys.stdout.flush()
cm.dot(w.T, input_layer.sample, target=hidden_layer.state)
hidden_layer.state.add_col_mult(a, D)
hidden_layer.state.mult(schedule[i], target=hidden_layer.temp)
hidden_layer.state.mult(schedule[i+1])
cm.log_1_plus_exp(hidden_layer.state, target=hidden_layer.deriv)
cm.log_1_plus_exp(hidden_layer.temp)
hidden_layer.deriv.subtract(hidden_layer.temp)
w_ais.add_sums(hidden_layer.deriv, axis=0)
w_ais.add_dot(b.T, input_layer.sample, mult=(1-f)*(schedule[i+1]-schedule[i]))
hidden_layer.ApplyActivation()
hidden_layer.Sample()
cm.dot(w, hidden_layer.sample, target=input_layer.state)
input_layer.state.add_col_vec(b)
input_layer.state.mult(schedule[i+1])
input_layer.state.add_col_mult(b, f*(1-schedule[i+1]))
SampleEnergySoftmax(input_layer, D)
if display and (i % 100 == 0 or i == steps - 2):
w_variance.append(w_ais.asarray().var())
x_axis.append(i)
Display(w_ais, hidden_layer.state, input_layer.sample, w_variance, x_axis)
sys.stdout.write('\n')
z = LogMeanExp(w_ais.asarray()) + D * LogSumExp(f * b.asarray()) + numhid * np.log(2)
return z
def AISBinaryRbm(model, schedule):
cm.CUDAMatrix.init_random(seed=int(time.time()))
assert len(model.layer) == 2, 'Only implemented for RBMs.'
steps = len(schedule)
input_layer = model.layer[0]
hidden_layer = model.layer[1]
edge = model.edge[0]
batchsize = model.t_op.batchsize
w = edge.params['weight']
a = hidden_layer.params['bias']
b = input_layer.params['bias']
numvis, numhid = w.shape
# INITIALIZE TO UNIFORM RANDOM.
input_layer.state.assign(0)
input_layer.ApplyActivation()
input_layer.Sample()
w_ais = cm.CUDAMatrix(np.zeros((1, batchsize)))
unitcell = cm.empty((1, 1))
# RUN AIS.
for i in range(1, steps):
cm.dot(w.T, input_layer.sample, target=hidden_layer.state)
hidden_layer.state.add_col_vec(a)
hidden_layer.state.mult(schedule[i-1], target=hidden_layer.temp)
hidden_layer.state.mult(schedule[i])
cm.log_1_plus_exp(hidden_layer.state, target=hidden_layer.deriv)
cm.log_1_plus_exp(hidden_layer.temp)
hidden_layer.deriv.subtract(hidden_layer.temp)
w_ais.add_sums(hidden_layer.deriv, axis=0)
w_ais.add_dot(b.T, input_layer.state, mult=schedule[i]-schedule[i-1])
hidden_layer.ApplyActivation()
hidden_layer.Sample()
cm.dot(w, hidden_layer.sample, target=input_layer.state)
input_layer.state.add_col_vec(b)
input_layer.state.mult(schedule[i])
input_layer.ApplyActivation()
input_layer.Sample()
z = LogMeanExp(w_ais.asarray()) + numvis * np.log(2) + numhid * np.log(2)
return z
def GetAll(n):
x = np.zeros((n, 2**n))
a = []
for i in range(n):
a.append([0, 1])
for i, r in enumerate(itertools.product(*tuple(a))):
x[:, i] = np.array(r)
return x
def ExactZ_binary_binary(model):
assert len(model.layer) == 2, 'Only implemented for RBMs.'
steps = len(schedule)
input_layer = model.layer[0]
hidden_layer = model.layer[1]
edge = model.edge[0]
w = edge.params['weight']
a = hidden_layer.params['bias']
b = input_layer.params['bias']
numvis, numhid = w.shape
batchsize = 2**numvis
input_layer.AllocateBatchsizeDependentMemory(batchsize)
hidden_layer.AllocateBatchsizeDependentMemory(batchsize)
all_inputs = GetAll(numvis)
w_ais = cm.CUDAMatrix(np.zeros((1, batchsize)))
input_layer.sample.overwrite(all_inputs)
cm.dot(w.T, input_layer.sample, target=hidden_layer.state)
hidden_layer.state.add_col_vec(a)
cm.log_1_plus_exp(hidden_layer.state)
w_ais.add_sums(hidden_layer.state, axis=0)
w_ais.add_dot(b.T, input_layer.state)
offset = float(w_ais.asarray().max())
w_ais.subtract(offset)
cm.exp(w_ais)
z = offset + np.log(w_ais.asarray().sum())
return z
def Usage():
print( '%s <model file> <number of Markov chains to run> [number of words (for Replicated Softmax models)]' )
if __name__ == '__main__':
board = tr.LockGPU()
model_file = sys.argv[1]
numchains = int(sys.argv[2])
if len(sys.argv) > 3:
D = int(sys.argv[3]) #10 # number of words.
m = dbm.DBM(model_file)
m.LoadModelOnGPU(batchsize=numchains)
plt.ion()
log_z = AISReplicatedSoftmax(m, D, numchains, display=True)
print( 'Log Z %.5f' % log_z )
#log_z = AIS(m, schedule)
#print 'Log Z %.5f' % log_z
#log_z = ExactZ_binary_binary(m)
#print 'Exact %.5f' % log_z
tr.FreeGPU(board)
raw_input('Press Enter.')
|
Corvalius/deepnet | package/deepnet/util.py | <gh_stars>1-10
"""Utility functions for loading/saving models."""
import pickle
import package.deepnet.deepnet_pb2 as deepnet_pb2
import gzip
import numpy as np
import os
import shutil
import time
import pdb
from google.protobuf import text_format
def ParameterAsNumpy(param):
"""Converts a serialized parameter string into a numpy array."""
return np.fromstring(param.mat, dtype='float32').reshape(
*tuple(param.dimensions))
def NumpyAsParameter(numpy_array):
"""Converts a numpy array into a serialized parameter string."""
assert numpy_array.dtype == 'float32', 'Saved arrays should be float32.'
return numpy_array.tostring()
def WriteCheckpointFile(net, t_op, best=False):
"""Writes out the model to disk."""
ckpt_dir = os.path.join(t_op.checkpoint_prefix, t_op.checkpoint_directory)
if not os.path.isdir(ckpt_dir):
os.makedirs(ckpt_dir)
if best:
tag = 'BEST'
checkpoint_file = '%s_%s' % (net.name, tag)
checkpoint_file = os.path.join(ckpt_dir, checkpoint_file)
print( 'Writing current best model %s' % checkpoint_file )
f = gzip.open(checkpoint_file, 'wb')
f.write(net.SerializeToString())
f.close()
else:
tag = 'LAST'
checkpoint_file = '%s_%s' % (net.name, time.strftime('%j%H%M%S'))
checkpoint_file = os.path.join(ckpt_dir, checkpoint_file)
print( 'Writing checkpoint %s' % checkpoint_file )
f = gzip.open(checkpoint_file, 'wb')
f.write(net.SerializeToString())
f.close()
checkpoint_file_LAST = '%s_%s' % (net.name, tag)
checkpoint_file_LAST = os.path.join(ckpt_dir, checkpoint_file_LAST)
shutil.copyfile(checkpoint_file, checkpoint_file_LAST)
# Save the t_op.
checkpoint_file_op = '%s_train_op_%s' % (net.name, tag)
checkpoint_file = os.path.join(ckpt_dir, checkpoint_file_op)
f = gzip.open(checkpoint_file, 'wb')
f.write(t_op.SerializeToString())
f.close()
def ReadOperation(proto_file):
protoname, ext = os.path.splitext(proto_file)
proto = deepnet_pb2.Operation()
if ext == '.pbtxt':
proto_pbtxt = open(proto_file, 'rb')
text_format.Merge(proto_pbtxt.read(), proto)
else:
f = gzip.open(proto_file, 'rb')
proto.ParseFromString(f.read())
f.close()
return proto
def ReadModel(proto_file):
protoname, ext = os.path.splitext(proto_file)
proto = deepnet_pb2.Model()
if ext == '.pbtxt':
proto_pbtxt = open(proto_file, 'rb')
text_format.Merge(proto_pbtxt.read(), proto)
else:
f = gzip.open(proto_file, 'rb')
proto.ParseFromString(f.read())
f.close()
return proto
def WritePbtxt(output_file, pb):
with open(output_file, 'wb') as f:
text_format.PrintMessage(pb, f)
def ReadData(proto_file):
protoname, ext = os.path.splitext(proto_file)
proto = deepnet_pb2.Dataset()
if ext == '.pbtxt':
proto_pbtxt = open(proto_file, 'rb')
text_format.Merge(proto_pbtxt.read(), proto)
else:
f = open(proto_file, 'rb')
proto.ParseFromString(f.read())
f.close()
return proto
def CopyData(data):
copy = deepnet_pb2.Dataset.Data()
copy.CopyFrom(data)
return copy
def CopyDataset(data):
copy = deepnet_pb2.Dataset()
copy.CopyFrom(data)
return copy
def CopyOperation(op):
copy = deepnet_pb2.Operation()
copy.CopyFrom(op)
return copy
def CopyModel(model):
copy = deepnet_pb2.Model()
copy.CopyFrom(model)
return copy
def CopyLayer(layer):
copy = deepnet_pb2.Layer()
copy.CopyFrom(layer)
return copy
def GetPerformanceStats(stat, prefix=''):
s = ''
if stat.compute_cross_entropy:
s += ' %s_CE: %.3f' % (prefix, stat.cross_entropy / stat.count)
if stat.compute_correct_preds:
s += ' %s_Acc: %.3f (%d/%d)' % (
prefix, stat.correct_preds/stat.count, stat.correct_preds, stat.count)
if stat.compute_error:
s += ' %s_E: %.7f' % (prefix, stat.error / stat.count)
if stat.compute_MAP and prefix != 'T':
s += ' %s_MAP: %.3f' % (prefix, stat.MAP)
if stat.compute_prec50 and prefix != 'T':
s += ' %s_prec50: %.3f' % (prefix, stat.prec50)
if stat.compute_sparsity:
s += ' %s_sp: %.3f' % (prefix, stat.sparsity / stat.count)
return s
def Accumulate(acc, perf):
acc.count += perf.count
acc.cross_entropy += perf.cross_entropy
acc.error += perf.error
acc.correct_preds += perf.correct_preds
acc.sparsity += perf.sparsity
def CreateLayer(layer_class, proto, *args, **kwargs):
for cls in layer_class.__subclasses__():
if cls.IsLayerType(proto):
return cls(proto, *args, **kwargs)
l = CreateLayer(cls, proto, *args, **kwargs)
if l is not None:
return l
return None
def CreateEdge(edge_class, proto, *args, **kwargs):
for cls in edge_class.__subclasses__():
if cls.IsEdgeType(proto):
return cls(proto, *args, **kwargs)
return edge_class(proto, *args, **kwargs)
def LoadMissing(p1, p2):
p = p1.__class__()
p.CopyFrom(p2)
p.MergeFrom(p1)
return p
# For Navdeep's data.
def save(fname, var_list, source_dict):
var_list = [var.strip() for var in var_list.split() if len(var.strip())>0]
fo = gzip.GzipFile(fname, 'wb')
pickle.dump(var_list, fo)
for var in var_list:
pickle.dump(source_dict[var], fo, protocol=2)
fo.close()
def load(fname, target_dict, verbose = False):
fo = gzip.GzipFile(fname, 'rb')
var_list = pickle.load(fo)
if verbose:
print(var_list)
for var in var_list:
target_dict[var] = pickle.load(fo)
fo.close()
|
Corvalius/deepnet | package/eigenmat/eigenmat.py | <gh_stars>1-10
import os, pdb, platform, time, warnings
import ctypes as ct
import numpy as np
if platform.system() == 'Windows':
_eigenmat = ct.cdll.LoadLibrary('libeigenmat.dll')
elif platform.system() == 'Darwin':
_eigenmat = ct.cdll.LoadLibrary('libeigenmat.dylib')
else:
_eigenmat = ct.cdll.LoadLibrary('libeigenmat.so')
_eigenmat.euclid_norm.restype = ct.c_float
_eigenmat.vdot.restype = ct.c_float
_eigenmat.sum_all.restype = ct.c_float
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
def newFunc(*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
class EigenMatException(Exception):
pass
def get_last_cuda_error():
return str(_eigenmat.get_last_cuda_error())
def generate_exception(err_code):
"""
Return a EigenMatException object based on the error code err_code.
"""
if err_code == -1:
return EigenMatException("Incompatible matrix dimensions.")
elif err_code == -2:
return EigenMatException("CUBLAS error.")
elif err_code == -3:
return EigenMatException("CUDA error: " + get_last_cuda_error())
elif err_code == -4:
return EigenMatException("Operation not supported on views.")
elif err_code == -5:
return EigenMatException("Operation not supported on transposed matrices.")
elif err_code == -6:
return EigenMatException("")
elif err_code == -7:
return EigenMatException("Incompatible transposedness.")
elif err_code == -8:
return EigenMatException("Matrix is not in device memory.")
elif err_code == -9:
return EigenMatException("Operation not supported.")
class eigenmat(ct.Structure):
_fields_ = [('data', ct.POINTER(ct.c_float)),
('size', ct.c_int * 2),
('is_trans', ct.c_int),
('owns_data', ct.c_int)]
class rnd_struct(ct.Structure):
_fields_ = [('seed', ct.c_ulong),
('kn', ct.c_int * 128),
('fn', ct.c_float * 128),
('wn', ct.c_float * 128)]
class TransposedEigenMatrix(object):
def __init__(self, mat):
self.mat = eigenmat()
ct.memmove(ct.pointer(self.mat), ct.pointer(mat), ct.sizeof(self.mat))
self.mat.is_trans = 1
self.p_mat = ct.pointer(self.mat)
self.T = mat
class EigenMatrix(object):
"""
A EigenMatrix object represents a matrix of single precision floating point
numbers on a GPU.
"""
def overwrite(self, array):
"""Overwrites self with array.
'array' should have a size smaller than that of the array used to
initialize the EigenMatrix. The method will not throw an Exception just
yet if this is not true. It will throw exceptions or behave in strange
ways later on.
"""
assert type(array) == np.ndarray, 'array must be a np.ndarray.'
array = reformat(array)
self.numpy_array = array
_eigenmat.init_from_array(self.p_mat, array.ctypes.data_as(ct.POINTER(ct.c_float)), ct.c_int(array.shape[0]), ct.c_int(array.shape[1]))
def __init__(self, array, **kwargs):
"""
Initializes a new matrix object in one of two ways. If array is a numpy
ndarray, memory for a matrix with the same dimensions is allocated on
the GPU. If the copy_to_device flag is set to True, the GPU matrix is
initialized with the given ndarray. If array is not an ndarray, it must
be a eigenmat structure (typically the user will never use this way of
calling __init__).
"""
if type(array) == np.ndarray:
# Convert array to float32 in FORTRAN order
array = reformat(array)
# Initialize as a ndarray-tied matrix.
self.mat = eigenmat()
self.size = self.mat.size
self.p_mat = ct.pointer(self.mat)
self.numpy_array = array
_eigenmat.init_from_array(self.p_mat, array.ctypes.data_as(ct.POINTER(ct.c_float)), ct.c_int(array.shape[0]), ct.c_int(array.shape[1]))
else:
# Initialize based on existing eigenmat structure.
self.mat = array
self.p_mat = ct.pointer(self.mat)
self.T = TransposedEigenMatrix(self.mat)
@staticmethod
def init_random(seed=0):
"""
Initialize and seed the random number generator.
"""
assert seed >= 0, "Seed must be a non-negative integer."
EigenMatrix.rnd_state = rnd_struct()
EigenMatrix.rnd_state_p = ct.pointer(EigenMatrix.rnd_state)
_eigenmat.init_random(EigenMatrix.rnd_state_p, ct.c_int(seed+1))
@property
def shape(self):
return (self.mat.size[0], self.mat.size[1])
def set_shape(self, shape):
"""
Sets the shape of the array to the given array.
Highly unsafe method. Does no checking.
Do not use this unless you know what you are doing.
"""
m = ct.c_uint(shape[0])
n = ct.c_uint(shape[1])
err_code = _eigenmat.set_shape(self.p_mat, m, n)
if err_code:
raise generate_exception(err_code)
return self
def reshape(self, shape):
"""
Reshapes self to have the given shape. The number of elements cannot
change as this only changes how the contents are interpreted.
"""
m = ct.c_uint(shape[0])
n = ct.c_uint(shape[1])
err_code = _eigenmat.reshape(self.p_mat, m, n)
if err_code:
raise generate_exception(err_code)
return self
def blockify(source, blocksize, target=None):
if target == None:
target = source
err_code = _eigenmat.blockify(source.p_mat, target.p_mat, ct.c_uint(blocksize))
if err_code:
raise generate_exception(err_code)
return target
def generate_translations(source, source_w, target_w, off_x, off_y, target=None):
num_channels = source.shape[0] / (source_w**2)
if target == None:
batch_s = source.shape[1]
target = empty((target_w**2, batch_s))
err_code = _eigenmat.generate_translations_big_var_off(source.p_mat, target.p_mat, off_x.p_mat, off_y.p_mat, ct.c_uint(source_w), ct.c_uint(target_w), ct.c_uint(num_channels))
if err_code:
raise generate_exception(err_code)
return target
def asarray(self):
"""
Copies the matrix to an ndarray on the CPU and returns it.
"""
return self.numpy_array
def copy_to_device(self):
"""
Copy the matrix to the GPU.
"""
pass
def copy_to_host(self):
"""
Copy the matrix to the CPU.
"""
pass
def assign(self, val):
"""Assign val to self, where val can be a scalar or a EigenMatrix
with the same dimensions as self. """
if isinstance(val, EigenMatrix):
err_code = _eigenmat.copy_on_device(val.p_mat, self.p_mat)
elif isinstance(val, (int, float)):
err_code = _eigenmat.assign_scalar(self.p_mat, ct.c_float(val))
else:
raise ValueError( "Assigned value must be of type EigenMatrix, int, or float." )
if err_code:
raise generate_exception(err_code)
return self
def free_device_memory(self):
"""
Free memory used up by the matrix on the GPU.
"""
pass
def set_trans(self, is_trans):
"""
Set the transposedness flag to is_trans.
"""
_eigenmat.set_transpose(self.p_mat, ct.c_int(1 * is_trans))
def slice(self, first_col, last_col):
mat = eigenmat()
if self.mat.size[0] == 1 or self.mat.size[1] == 1:
err_code = _eigenmat.get_vector_slice(self.p_mat, ct.pointer(mat), ct.c_int(first_col), ct.c_int(last_col))
else:
err_code = _eigenmat.get_slice(self.p_mat, ct.pointer(mat), ct.c_int(first_col), ct.c_int(last_col))
if err_code:
raise generate_exception(err_code)
new_mat = EigenMatrix(mat)
try:
new_mat.sliceof = self.sliceof
except:
new_mat.sliceof = self
return new_mat
def get_col_slice(self, first_col, last_col, target=None):
col_slice = self.slice(first_col, last_col)
if target:
target.assign(col_slice)
return target
else:
return col_slice
def set_col_slice(self, first_col, last_col, mat):
self.slice(first_col, last_col).assign(mat)
return self
def get_row_slice(self, start, end, target=None):
"""
Get the rows with indices start through end. If target is not provided
memory for a new matrix will be allocated.
"""
width = self.shape[1]
if not target:
target = empty((end-start, width))
err_code = _eigenmat.get_row_slice(self.p_mat, target.p_mat, ct.c_int(start), ct.c_int(end))
if err_code:
raise generate_exception(err_code)
return target
def set_row_slice(self, start, end, mat):
"""
Assign the contents of mat to the rows with indices start through end.
"""
err_code = _eigenmat.set_row_slice(mat.p_mat, self.p_mat, ct.c_int(start), ct.c_int(end))
if err_code:
raise generate_exception(err_code)
return self
def transpose(self, target=None):
"""
Return a transposed copy of the matrix.
"""
if not target:
target = empty((self.shape[1], self.shape[0]))
err_code = _eigenmat.copy_transpose(self.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def fill_with_rand(self):
"""
Fill matrix on the GPU with random numbers drawn from the uniform
distribution over the (0,1) interval.
"""
err_code = _eigenmat.fill_with_rand(EigenMatrix.rnd_state_p, self.p_mat)
if err_code:
raise generate_exception(err_code)
return self
def fill_with_randn(self):
"""
Fill matrix on the GPU with random numbers drawn from the standard normal
distribution.
"""
err_code = _eigenmat.fill_with_randn(EigenMatrix.rnd_state_p, self.p_mat)
if err_code:
raise generate_exception(err_code)
return self
def dropout(self, dropprob, val=0.0):
"""
Drop entries in this matrix uniformly randomly with given probability
and set the dropped out unit to state val.
"""
err_code = _eigenmat.dropout(EigenMatrix.rnd_state_p, self.p_mat,
ct.c_float(dropprob), ct.c_float(val))
if err_code:
raise generate_exception(err_code)
return self
def sample_bernoulli(self, target=None):
"""
Sample a bernoulli distribution. Choose 1 with probability given by entries of self, 0 otherwise.
"""
if not target:
target = self
err_code = _eigenmat.sample_bernoulli(EigenMatrix.rnd_state_p, self.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return self
def sample_bernoulli_tanh(self, target=None):
"""
Sample a bernoulli distribution. Choose 1 with probability given by entries of (1+self)/2, -1 otherwise.
"""
if not target:
target = self
err_code = _eigenmat.sample_bernoulli_tanh(EigenMatrix.rnd_state_p, self.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return self
def sample_poisson(self, target=None):
"""
Sample a poisson distribution. Choose 1 with probability given by entries of self.
Not implemented yet.
"""
if not target:
target = self
err_code = _eigenmat.sample_poisson(EigenMatrix.rnd_state_p, self.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return self
def sample_gaussian(self, mult=1.0, target=None):
"""
Add zero mean gaussian noise to the matrix. mult is the stddev.
"""
if not target:
target = self
err_code = _eigenmat.sample_gaussian(EigenMatrix.rnd_state_p, self.p_mat, target.p_mat, ct.c_float(mult))
if err_code:
raise generate_exception(err_code)
return self
def perturb_energy_for_softmax_sampling(self, target=None):
"""
Add by -log(-log(rand)).
"""
if not target:
target = self
err_code = _eigenmat.perturb_energy(EigenMatrix.rnd_state_p, self.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return self
def perturb_prob_for_softmax_sampling(self, target=None):
"""
Divide by -log(rand).
"""
if not target:
target = self
err_code = _eigenmat.perturb_prob(EigenMatrix.rnd_state_p, self.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return self
def add_col_vec(self, vec, target=None):
"""
Add vector vec to every column of the matrix. If a target is provided,
it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _eigenmat.add_col_vec(self.p_mat, vec.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def add_col_mult(self, vec, mult, target=None):
"""
Add a multiple of vector vec to every column of the matrix. If a target
is provided, it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _eigenmat.add_col_mult(self.p_mat, vec.p_mat, target.p_mat, ct.c_float(mult))
if err_code:
raise generate_exception(err_code)
return target
def add_mult_sign(self, mat2, mult = 1.):
"""
Add multiple of sign of mat2 to the matrix.
"""
err_code = _eigenmat.add_mult_sign(self.p_mat, mat2.p_mat, ct.c_float(mult))
if err_code:
raise generate_exception(err_code)
return self
def mult_diagonal(self, val, target=None):
"""
Mult val to the diagonal of self. If a target
is provided, it is used to store the result instead of self.
"""
if not target:
target = self
assert self.shape[0] == self.shape[1], 'self must be a square matrix'
if isinstance(val, EigenMatrix):
err_code = _eigenmat.mult_diagonal(self.p_mat, val.p_mat, target.p_mat)
elif isinstance(val, (int, float)):
err_code = _eigenmat.mult_diagonal_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
raise ValueError( "Value must be of type EigenMatrix, int, or float." )
if err_code:
raise generate_exception(err_code)
return target
def add_diagonal(self, val, target=None):
"""
Add val to the diagonal of self. If a target
is provided, it is used to store the result instead of self.
"""
if not target:
target = self
assert self.shape[0] == self.shape[1], 'self must be a square matrix'
if isinstance(val, EigenMatrix):
err_code = _eigenmat.add_diagonal(self.p_mat, val.p_mat, target.p_mat)
elif isinstance(val, (int, float)):
err_code = _eigenmat.add_diagonal_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
raise ValueError( "Value must be of type EigenMatrix, int, or float." )
if err_code:
raise generate_exception(err_code)
return target
def add_row_mult(self, vec, mult, target=None):
"""
Add a multiple of vector vec to every row of the matrix. If a target
is provided, it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _eigenmat.add_row_mult(self.p_mat, vec.p_mat, target.p_mat, ct.c_float(mult))
if err_code:
raise generate_exception(err_code)
return target
def add_row_vec(self, vec, target=None):
"""
Add vector vec to every row of the matrix. If a target is provided,
it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _eigenmat.add_row_vec(self.p_mat, vec.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def mult_by_col(self, vec, target=None):
"""
Multiply vector vec into every column of the matrix. If a target is
provided, it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _eigenmat.mult_by_col_vec(self.p_mat, vec.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def mult_by_row(self, vec, target=None):
"""
Multiply vector vec into every row of the matrix. If a target is
provided, it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _eigenmat.mult_by_row_vec(self.p_mat, vec.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def div_by_col(self, vec, target=None):
"""
Multiply vector vec into every column of the matrix. If a target is
provided, it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _eigenmat.div_by_col_vec(self.p_mat, vec.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def div_by_row(self, vec, target=None):
"""
Divide vector vec into every row of the matrix. If a target is
provided, it is used to store the result instead of self.
"""
if not target:
target = self
err_code = _eigenmat.div_by_row_vec(self.p_mat, vec.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def sum(self, axis=None, target = None):
"""
Sum the matrix along the given dimension, where 0 represents the leading
dimension and 1 represents the non-leading dimension. If None, the sum
of all elements is returned. If a target is not prvided, a new vector is
created for storing the result.
"""
if axis is None:
return _eigenmat.sum_all(self.p_mat)
else:
return sum(self, axis, target)
def add_sums(self, mat, axis, mult = 1.):
"""
Add a multiple of the sums of the matrix mat along the given dimension
to self.
"""
m = _eigenmat.get_leading_dimension(mat.p_mat)
n = _eigenmat.get_nonleading_dimension(mat.p_mat)
err_code = _eigenmat.add_sum_by_axis(mat.p_mat, self.p_mat, ct.c_int(axis), ct.c_float(mult))
if err_code:
raise generate_exception(err_code)
return self
def less_than(self, val, target=None):
"""
Perform the operation target = 1. * (self < val), where val can be a matrix or a scalar.
"""
if not target:
target = self
if isinstance(val, (int, float)):
err_code = _eigenmat.less_than_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
err_code = _eigenmat.less_than(self.p_mat, val.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def greater_than(self, val, target=None):
"""
Perform the operation target = 1. * (self > val), where val can be a matrix or a scalar.
"""
if not target:
target = self
if isinstance(val, (int, float)):
err_code = _eigenmat.greater_than_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
err_code = _eigenmat.greater_than(self.p_mat, val.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def upper_bound(self, val, target=None):
"""
Perform the operation target = (self > val) ? val:self, where val can be a matrix or a scalar.
"""
if not target:
target = self
if isinstance(val, (int, float)):
err_code = _eigenmat.upper_bound_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
err_code = _eigenmat.upper_bound(self.p_mat, val.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def lower_bound(self, val, target=None):
"""
Perform the operation target = (self < val) ? val:self, where val can be a matrix or a scalar.
"""
if not target:
target = self
if isinstance(val, (int, float)):
err_code = _eigenmat.lower_bound_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
err_code = _eigenmat.lower_bound(self.p_mat, val.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def cumsum(self, axis, temp=None, target=None):
"""
Cumulative sum along axis.
"""
m, n = self.shape
assert axis == 0, 'axis = 1 not implemented.'
if not target:
target = empty((m, n))
if not temp:
temp = empty((m, n))
"""
elif axis == 1:
if not target:
target = empty((m, 1))
"""
err_code = _eigenmat.cumsum_by_axis(self.p_mat, target.p_mat, temp.p_mat, ct.c_int(axis))
if err_code:
raise generate_exception(err_code)
return target
def choose_max_and_accumulate(self, acc):
"""
Find the maximum value along the given dimension, where 0 represents the
leading dimension and 1 represents the non-leading dimension. If a target
is not prvided, a new vector is created for storing the result.
"""
m, n = self.shape
err_code = _eigenmat.choose_max_and_accumulate(self.p_mat, acc.p_mat)
if err_code:
raise generate_exception(err_code)
return acc
def choose_max(self, axis, target=None):
"""
Find the maximum value along the given dimension, where 0 represents the
leading dimension and 1 represents the non-leading dimension. If a target
is not prvided, a new vector is created for storing the result.
"""
m, n = self.shape
assert axis == 0, 'Axis = 1 not implemented.'
if not target:
target = self
err_code = _eigenmat.choose_max_by_axis(self.p_mat, target.p_mat, ct.c_int(axis))
if err_code:
raise generate_exception(err_code)
return target
def max(self, axis, target=None):
"""
Find the maximum value along the given dimension, where 0 represents the
leading dimension and 1 represents the non-leading dimension. If a target
is not prvided, a new vector is created for storing the result.
"""
m, n = self.shape
if axis == 0:
if not target:
target = empty((1, n))
elif axis == 1:
if not target:
target = empty((m, 1))
err_code = _eigenmat.max_by_axis(self.p_mat, target.p_mat, ct.c_int(axis))
if err_code:
raise generate_exception(err_code)
return target
def argmax(self, axis, target=None):
"""
Find the index with the maximum value along the given dimension, where 0 represents the
leading dimension and 1 represents the non-leading dimension. If a target
is not prvided, a new vector is created for storing the result.
"""
m, n = self.shape
if axis == 0:
if not target:
target = empty((1, n))
elif axis == 1:
if not target:
target = empty((m, 1))
err_code = _eigenmat.argmax_by_axis(self.p_mat, target.p_mat, ct.c_int(axis))
if err_code:
raise generate_exception(err_code)
return target
def sqsum(self, axis, target=None):
"""
Find the sum of squares along the given dimension, where 0 represents the
leading dimension and 1 represents the non-leading dimension. If a target
is not prvided, a new vector is created for storing the result.
"""
m, n = self.shape
if axis == 0:
if not target:
target = empty((1, n))
elif axis == 1:
if not target:
target = empty((m, 1))
err_code = _eigenmat.sqsum_by_axis(self.p_mat, target.p_mat, ct.c_int(axis))
if err_code:
raise generate_exception(err_code)
return target
def norm_limit(self, norm, axis, target=None):
"""
Limit the norm along the given dimension to be 'norm', where 0
represents the leading dimension and 1 represents the non-leading
dimension. If a target is not provided, self is used as target.
"""
m, n = self.shape
if axis == 0:
if not target:
target = self
elif axis == 1:
if not target:
target = self
err_code = _eigenmat.normlimit_by_axis(self.p_mat, target.p_mat,
ct.c_int(axis), ct.c_float(norm))
if err_code:
raise generate_exception(err_code)
return target
def sign(self, target=None):
"""
Find the sign of each element of the matrix.
"""
if not target:
target = empty((self.mat.size[0], self.mat.size[1]))
err_code = _eigenmat.sign(self.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def apply_cos(self, target=None):
"""
Apply the cos sigmoid to each element of the matrix.
"""
return cos(self, target)
def apply_sin(self, target=None):
"""
Apply the sin sigmoid to each element of the matrix.
"""
return sin(self, target)
def apply_sigmoid(self, target=None):
"""
Apply the logistic sigmoid to each element of the matrix.
"""
return sigmoid(self, target)
def apply_softmax(self, target=None):
"""
Apply softmax activation. Each column is taken as one softmax.
"""
return softmax(self, target)
def reciprocal(self, target=None):
"""
Find the reciprocal of each element of the matrix.
"""
if not target:
target = self
err_code = _eigenmat.reciprocal(self.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def dot(self, mat2, mult=1.0, target=None):
"""
Multiply the matrix by mat2 from the right and multiply by scalar mult.
"""
return dot(self, mat2, mult, target)
def add_dot(self, m1, m2, mult=1.0):
"""
Add the dot product of m1 and m2 to the matrix.
"""
err_code = _eigenmat.dot(m1.p_mat, m2.p_mat, self.p_mat, ct.c_float(1.), ct.c_float(mult))
if err_code:
raise generate_exception(err_code)
return self
def subtract_dot(self, m1, m2):
"""
Subtract the dot product of m1 and m2 from the matrix.
"""
err_code = _eigenmat.dot(m1.p_mat, m2.p_mat, self.p_mat, ct.c_float(1.), ct.c_float(-1.))
if err_code:
raise generate_exception(err_code)
return self
def add_mult(self, mat2, alpha = 1.):
"""
Add multiple of mat2 to the matrix.
"""
err_code = _eigenmat.add_mult(self.p_mat, mat2.p_mat, ct.c_float(alpha))
if err_code:
raise generate_exception(err_code)
return self
def subtract_mult(self, mat2, alpha = 1.):
"""
Subtract a multiple of mat2 from the matrix.
"""
err_code = _eigenmat.add_mult(self.p_mat, mat2.p_mat, ct.c_float(-1. * alpha))
if err_code:
raise generate_exception(err_code)
return self
def add(self, val, target=None):
"""Add val to self, where val can be a scalar or a EigenMatrix with the
same dimensions as self. """
if not target:
target = self
if isinstance(val, EigenMatrix):
err_code = _eigenmat.add_elementwise(self.p_mat, val.p_mat, target.p_mat)
elif isinstance(val, (int, float)):
err_code = _eigenmat.add_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
raise ValueError( "Value must be of type EigenMatrix, int, or float." )
if err_code:
raise generate_exception(err_code)
return target
def subtract(self, val, target=None):
"""Subtract val from self, where val can be a scalar or a EigenMatrix with
the same dimensions as self. """
if not target:
target = self
if isinstance(val, EigenMatrix):
err_code = _eigenmat.subtract_elementwise(self.p_mat, val.p_mat, target.p_mat)
elif isinstance(val, (int, float)):
err_code = _eigenmat.add_scalar(self.p_mat, ct.c_float(-1*val), target.p_mat)
else:
raise ValueError( "Value must be of type EigenMatrix, int, or float." )
if err_code:
raise generate_exception(err_code)
return target
def divide(self, val, target=None):
"""Divide self by val, where val can be a scalar or a EigenMatrix with the
same dimensions as self. """
if not target:
target = self
if isinstance(val, EigenMatrix):
err_code = _eigenmat.divide_elementwise(self.p_mat, val.p_mat, target.p_mat)
elif isinstance(val, (int, float)):
err_code = _eigenmat.divide_by_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
raise ValueError( "Value must be of type EigenMatrix, int, or float." )
if err_code:
raise generate_exception(err_code)
return target
def mult(self, val, target=None):
"""Multiply self by val, where val can be a scalar or a EigenMatrix with
the same dimensions as self. """
if not target:
target = self
if isinstance(val, EigenMatrix):
err_code = _eigenmat.mult_elementwise(self.p_mat, val.p_mat, target.p_mat)
elif isinstance(val, (int, float)):
err_code = _eigenmat.mult_by_scalar(self.p_mat, ct.c_float(val), target.p_mat)
else:
raise ValueError( "Value must be of type EigenMatrix, int, or float." )
if err_code:
raise generate_exception(err_code)
return target
def apply_cos_deriv(self, val, target=None):
"""
Apply cos derivative, where val is the activation of cos units.
"""
if not target:
target = self
if isinstance(val, EigenMatrix):
err_code = _eigenmat.apply_cos_deriv(self.p_mat, val.p_mat, target.p_mat)
else:
raise ValueError( "Value must be of type EigenMatrix." )
if err_code:
raise generate_exception(err_code)
return target
def apply_sin_deriv(self, val, target=None):
"""
Apply sin derivative, where val is the activation of sin units.
"""
if not target:
target = self
if isinstance(val, EigenMatrix):
err_code = _eigenmat.apply_sin_deriv(self.p_mat, val.p_mat, target.p_mat)
else:
raise ValueError( "Value must be of type EigenMatrix." )
if err_code:
raise generate_exception(err_code)
return target
def apply_logistic_deriv(self, val, target=None):
"""
Apply logistic derivative, where val is the activation of logistic units.
"""
if not target:
target = self
if isinstance(val, EigenMatrix):
err_code = _eigenmat.apply_logistic_deriv(self.p_mat, val.p_mat, target.p_mat)
else:
raise ValueError( "Value must be of type EigenMatrix." )
if err_code:
raise generate_exception(err_code)
return target
def apply_tanh_deriv(self, val, target=None):
"""
Apply tanh derivative, where val is the activation of the units.
"""
if not target:
target = self
if isinstance(val, EigenMatrix):
err_code = _eigenmat.apply_tanh_deriv(self.p_mat, val.p_mat, target.p_mat)
else:
raise ValueError( "Value must be of type EigenMatrix." )
if err_code:
raise generate_exception(err_code)
return target
def apply_rectified_linear_deriv(self, val, target=None):
"""
Apply rectified linear derivative, where val is the activation of the units.
"""
if not target:
target = self
if isinstance(val, EigenMatrix):
err_code = _eigenmat.apply_rectified_linear_deriv(self.p_mat, val.p_mat, target.p_mat)
else:
raise ValueError( "Value must be of type EigenMatrix." )
if err_code:
raise generate_exception(err_code)
return target
def apply_rectified_linear_smooth_deriv(self, val, target=None):
"""
Apply rectified linear smooth derivative, where val is the activation of the units.
"""
if not target:
target = self
if isinstance(val, EigenMatrix):
err_code = _eigenmat.apply_rectified_linear_smooth_deriv(self.p_mat, val.p_mat, target.p_mat)
else:
raise ValueError( "Value must be of type EigenMatrix." )
if err_code:
raise generate_exception(err_code)
return target
@deprecated
def assign_scalar(self, alpha):
"""
Assign scalar alpha to every element of the matrix.
"""
err_code = _eigenmat.assign_scalar(self.p_mat, ct.c_float(alpha))
if err_code:
raise generate_exception(err_code)
return self
@deprecated
def mult_by_scalar(self, alpha, target=None):
"""
Multiply the matrix by a scalar.
"""
if not target:
target = self
err_code = _eigenmat.mult_by_scalar(self.p_mat, ct.c_float(alpha), target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
@deprecated
def div_by_scalar(self, alpha, target=None):
"""
Divide the matrix by a scalar.
"""
if not target:
target = self
err_code = _eigenmat.divide_by_scalar(self.p_mat, ct.c_float(alpha), target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
@deprecated
def add_scalar(self, alpha, target=None):
"""
Increment the matrix by a scalar.
"""
if not target:
target = self
err_code = _eigenmat.add_scalar(self.p_mat, ct.c_float(alpha), target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def sum_all(self):
err_code = ct.c_int(0)
res = _eigenmat.sum_all(self.p_mat)
if err_code:
raise generate_exception(err_code.value, ct.byref(err_code))
return res
def euclid_norm(self):
err_code = ct.c_int(0)
res = _eigenmat.euclid_norm(self.p_mat, ct.byref(err_code))
if err_code:
raise generate_exception(err_code.value)
return res
def select_columns(self, indices, target):
"""
copies some columns of self into target.
<indices> must be a row vector. Its elements are float32's representing integers, e.g. "34.0" means the integer "34".
after this call, for all r,c, target[r,c]=self[r,indices[c]].
This returns target.
Negative indices are interpreted in the usual Python way: all elements of <indices> had better be in the range [-self.shape[1], self.shape[1]-1].
This does bounds checking, but out of bounds indices do not raise an exception (because the programmer was lazy). Instead, they result in NaN values in <target>.
"""
err_code = _eigenmat.selectRows(self.p_mat, target.p_mat, indices.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def swap_columns(self, indices1, indices2, target):
"""
swap columns at indices1 of self with columns at indices2 of target.
<indices1> and <indices2> must be row vectors of equal length. Its elements are float32's representing integers, e.g. "34.0" means the integer "34".
after this call, for all r,c, target[r,indices2[c]=self[r,indices1[c]].
self can be same as target, but then the result will be non-deterministic if there is overlap between indices1 and indices2. Can be used for in-place shuffling by making sure indices1 and indices2 do not overlap.
This returns target.
Negative indices are interpreted in the usual Python way: all elements of <indices> had better be in the range [-self.shape[1], self.shape[1]-1].
This does bounds checking, but out of bounds indices do not raise an exception (because the programmer was lazy). Instead, they result in NaN values in <target>.
"""
assert indices1.shape[0] == 1
assert indices1.shape == indices2.shape
err_code = _eigenmat.swapCols(self.p_mat, target.p_mat, indices1.p_mat, indices2.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def set_selected_columns(self, indices, source):
"""
copies all columns of source into some columns of self.
<indices> must be a row vector. Its elements are float32's representing
integers, e.g. "34.0" means the integer "34". after this call, for all
r,c, self[r,indices[c]]=source[r,c]. This returns self.
Negative indices are interpreted in the usual Python way: all elements
of <indices> had better be in the range [-self.shape[1], self.shape[1]-1].
This does bounds checking, but out of bounds indices do not raise an
exception (because the programmer was lazy). Instead, they result in NaN
values in <self>.
"""
err_code = _eigenmat.setSelectedRows(self.p_mat, source.p_mat, indices.p_mat)
if err_code:
raise generate_exception(err_code)
return self
def get_softmax_correct(self, labels, target):
"""
target[i] = 1, iff labels[i] is correctly predicted; 0 otherwise.
"""
assert labels.shape == (1, self.shape[1])
assert target.shape == labels.shape
if isinstance(labels, EigenMatrix):
err_code = _eigenmat.get_softmax_correct(self.p_mat, labels.p_mat, target.p_mat)
else:
raise ValueError( "labels must be of type CUDAMatrix." )
if err_code:
raise generate_exception(err_code)
return target
def get_softmax_cross_entropy(self, labels, target, tiny=1e-10):
"""
target[i] = -log(self[label[i]] + tiny).
"""
assert labels.shape == (1, self.shape[1])
assert target.shape == labels.shape
if isinstance(labels, EigenMatrix):
err_code = _eigenmat.get_softmax_cross_entropy(self.p_mat, labels.p_mat, target.p_mat, ct.c_float(tiny))
else:
raise ValueError( "labels must be of type EigenMatrix or CUDAMatrix." )
if err_code:
raise generate_exception(err_code)
return target
def apply_softmax_grad(self, labels, target = None):
"""
Apply softmax derivative, where labels are the correct labels.
"""
if not target:
target = self
assert labels.shape == (1, self.shape[1])
assert target.shape == self.shape
if isinstance(labels, EigenMatrix):
err_code = _eigenmat.apply_softmax_grad(self.p_mat, labels.p_mat, target.p_mat)
else:
raise ValueError( "labels must be of type EigenMatrix or CUDAMatrix." )
if err_code:
raise generate_exception(err_code)
return target
CUDAMatrix = EigenMatrix
def empty(shape):
"""
Creates and returns a new EigenMatrix with the given shape.
"""
return EigenMatrix(np.zeros(shape))
def sum(mat, axis, target=None):
"""
Sum the matrix along the given dimension, where 0 represents the leading
dimension and 1 represents the non-leading dimension. If a target is
not prvided, a new vector is created for storing the result.
"""
m = _eigenmat.get_leading_dimension(mat.p_mat)
n = _eigenmat.get_nonleading_dimension(mat.p_mat)
if axis == 0:
# sum along leading dimension
if not target:
target = empty((1, n))
elif axis == 1:
# sum along non-leading dimension
if not target:
target = empty((m, 1))
err_code = _eigenmat.sum_by_axis(mat.p_mat, target.p_mat, ct.c_int(axis))
if err_code:
raise generate_exception(err_code)
return target
def dot(m1, m2, mult=1.0, target=None):
"""
Find the dot product between m1 and m2.
"""
if not target:
m = _eigenmat.get_leading_dimension(m1.p_mat)
n = _eigenmat.get_nonleading_dimension(m2.p_mat)
target = empty((m, n))
err_code = _eigenmat.dot(m1.p_mat, m2.p_mat, target.p_mat, ct.c_float(0.), ct.c_float(mult))
if err_code:
raise generate_exception(err_code)
return target
def vdot(m1, m2):
"""
Compute the vector dot product of matrices m1 and m2.
"""
err_code = ct.c_int(0)
res = _eigenmat.vdot(m1.p_mat, m2.p_mat, ct.byref(err_code))
if err_code:
raise generate_exception(err_code.value)
return res
def cos(mat, target=None):
"""
Apply cos to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _eigenmat.apply_cos(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def sin(mat, target=None):
"""
Apply sin to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _eigenmat.apply_sin(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def softmax(mat, target = None):
"""
Apply softmax activation to each column of mat.
"""
if not target:
target = mat
err_code = _eigenmat.apply_softmax(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def sigmoid(mat, target=None):
"""
Apply the logistic sigmoid to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _eigenmat.apply_sigmoid(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def tanh(mat, target=None):
"""
Apply the tanh to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _eigenmat.apply_tanh(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def abs(mat, target=None):
"""
Apply abs to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _eigenmat.apply_abs(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def log_1_plus_exp(mat, target=None, exact=False):
"""
Apply log(1+exp(x)) to each element of the matrix mat. If exact is True, use
slow and accurate log and exp.
"""
if not target:
target = mat
if exact:
err_code = _eigenmat.apply_log_1_plus_exp_exact(mat.p_mat, target.p_mat)
else:
err_code = _eigenmat.apply_log_1_plus_exp(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def log(mat, tiny=0.0, target=None):
"""
Find the natural logarithm of each element of the matrix mat.
"""
if not target:
target = mat
err_code = _eigenmat.apply_log(mat.p_mat, target.p_mat, ct.c_float(tiny))
if err_code:
raise generate_exception(err_code)
return target
def exp(mat, target=None):
"""
Apply the exponential function to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _eigenmat.apply_exp(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def ceil(mat, target=None):
"""
Apply the ceil function to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _eigenmat.apply_ceil(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def floor(mat, target=None):
"""
Apply the floor function to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _eigenmat.apply_floor(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def sqrt(mat, target=None):
"""
Compute the square root of each element of the matrix mat.
"""
if not target:
target = mat
err_code = _eigenmat.apply_sqrt(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def cross_entropy_bernoulli(mat, p, target=None, tiny=1e-10):
"""
Compute -mat*log(p) - (1-mat).*log(1-p)
"""
if not target:
target = mat
if isinstance(p, EigenMatrix):
err_code = _eigenmat.compute_cross_entropy_bernoulli(mat.p_mat, p.p_mat, target.p_mat, ct.c_float(tiny))
else:
raise ValueError( "Value must be of type EigenMatrix." )
if err_code:
raise generate_exception(err_code)
return target
def cross_entropy(mat, p, target=None, tiny=1e-10):
"""
Compute -mat*log(p)
"""
if not target:
target = mat
if isinstance(p, EigenMatrix):
err_code = _eigenmat.compute_cross_entropy(mat.p_mat, p.p_mat, target.p_mat, ct.c_float(tiny))
else:
raise ValueError( "Value must be of type EigenMatrix." )
if err_code:
raise generate_exception(err_code)
return target
def correct_preds(mat, p, target=None, cutoff=0.5):
"""
Compute mat*(p >= 0.5) + (1-mat).*(p < 0.5)
"""
if not target:
target = mat
if isinstance(p, EigenMatrix):
err_code = _eigenmat.correct_preds(mat.p_mat, p.p_mat, target.p_mat, ct.c_float(cutoff))
else:
raise ValueError( "Value must be of type EigenMatrix." )
if err_code:
raise generate_exception(err_code)
return target
def pow(mat, p, target=None):
"""
If p is a scalar, compute the 'p'th power of each element of the matrix mat,
otherwise raise each element of the matrix mat to the power given by the
corresponding element of the matrix p.
"""
if not target:
target = mat
if isinstance(p, EigenMatrix):
err_code = _eigenmat.apply_pow_matrix(mat.p_mat, p.p_mat, target.p_mat)
elif isinstance(p, (int, float)):
err_code = _eigenmat.apply_pow(mat.p_mat, ct.c_float(p), target.p_mat)
else:
raise ValueError( "Value must be of type EigenMatrix, int, or float." )
if err_code:
raise generate_exception(err_code)
return target
def cuda_sync_threads():
pass
def reformat(array):
"""
Returns array as a float32 array in FORTRAN order.
"""
return np.array(array, dtype=np.float32, order='F')
def cuda_set_device(dev_id):
"""
Selects the CUDA device with the given ID.
"""
pass
def cublas_init():
"""
Initialize Cublas.
"""
pass
init = cublas_init
def cublas_shutdown():
"""
Shut down Cublas.
"""
pass
shutdown = cublas_shutdown
|
Corvalius/deepnet | package/eigenmat/setup.py | from setuptools import setup, find_packages
import os, sys
if os.name == 'nt':
ret = os.system("nmake -f Makefile.win")
else:
ret = os.system("make")
if ret != 0:
sys.exit(ret)
setup(
name="eigenmat",
version="0.1",
description="Eigen matrix support for Python",
license="BSD",
keywords="EIGEN MATRIX",
packages=find_packages(),
include_package_data=True,
) |
Corvalius/deepnet | examples/eigenmat/tryout.py | <reponame>Corvalius/deepnet
from package.eigenmat import eigenmat as mat
import matplotlib.pyplot as plot
import numpy as np
# Ensuring that eigenmat works.
plot.ion()
mat.EigenMatrix.init_random(seed=1)
plot.figure(1)
plot.clf()
x = mat.empty((100, 100))
x.fill_with_randn()
plot.hist(x.asarray().flatten(), 100)
plot.figure(2)
plot.clf()
y = np.random.randn(100, 100)
plot.hist(y.flatten(), 100)
input('Press Enter.') |
Corvalius/deepnet | package/cudamat/setup.py | from setuptools import setup, find_packages
import os, sys
if os.name == 'nt':
ret = os.system("nmake -f Makefile.win")
else:
ret = os.system("make")
if ret != 0:
sys.exit(ret)
setup(
name="cudamat",
version="0.3",
description="CUBLAS for Python",
license="BSD",
keywords="CUDA CUBLAS",
packages=find_packages(exclude=['examples', 'test']),
include_package_data=True,
author='<NAME>',
author_email='<EMAIL>',
) |
Corvalius/deepnet | package/deepnet/mc_avg.py | <reponame>Corvalius/deepnet<filename>package/deepnet/mc_avg.py
"""Monte Carlo model averaging for dropout networks."""
from neuralnet import *
from trainer import *
import glob
import sys
import random
def ExtractRepresentations(model_file, train_op_file, layernames,
base_output_dir, memory = '100M', k=10):
LockGPU()
model = util.ReadModel(model_file)
op = ReadOperation(train_op_file)
op.randomize = False
net = CreateDeepnet(model, op, op)
net.LoadModelOnGPU()
net.SetUpData()
for i in range(k):
output_dir = os.path.join(base_output_dir, 'sample_%.5d' % i)
sys.stdout.write('\r Sample %d' % (i+1))
sys.stdout.flush()
net.WriteRepresentationToDisk(layernames, output_dir, memory=memory, drop=True)
sys.stdout.write('\n')
FreeGPU()
def GetAverageResult(truth_file, pred_dir, total, k, avg_over=10):
sample_ids = range(total)
x = []
pred_dict = {}
truth = np.load(truth_file)
for t in range(avg_over):
avg_pred = None
for j in range(k):
i = random.choice(sample_ids)
prediction_file = glob.glob(os.path.join(pred_dir, 'sample_%.5d' % i, '*.npy'))[0]
predictions = pred_dict.get(i, np.load(prediction_file))
pred_dict[i] = predictions
if avg_pred is None:
avg_pred = predictions
else:
avg_pred += predictions
avg_pred /= k
pred = avg_pred.argmax(axis=1)
error = len((pred - truth).nonzero()[0])
x.append((100. * error) / len(truth))
x = np.array(x)
return x.mean(), x.std()
def main():
model_file = sys.argv[1]
model = util.ReadModel(model_file)
train_op_file = sys.argv[2]
output_dir = sys.argv[3]
layernames = ['output_layer']
total = 1000
k = 200
avg_over = 100
true_label_file = '/ais/gobi3/u/nitish/mnist/test_labels.npy'
plot_data_file = '/ais/gobi3/u/nitish/mnist/results/mc_avg.npy'
#ExtractRepresentations(model_file, train_op_file, layernames, output_dir, memory='1G', k=total)
out = np.zeros((k, 3))
for l in range(1, k+1):
mean, std = GetAverageResult(true_label_file, output_dir, total, l, avg_over=avg_over)
print( '%d %.4f %.4f' % (l, mean, std) )
out[l-1, 0] = l
out[l-1, 1] = mean
out[l-1, 2] = std
np.save(plot_data_file, out)
if __name__ == '__main__':
main()
|
Corvalius/deepnet | examples/deepnet/multimodal_dbn/split_reps.py | import glob, os, sys
from package.deepnet import deepnet_pb2
from package.deepnet import util
from google.protobuf import text_format
import numpy as np
def DumpDataSplit(data, output_dir, name, dataset_pb, stats_file):
data_pb = dataset_pb.data.add()
output_file_name = os.path.join(output_dir, name)
np.save(output_file_name, data)
data_pb.name = name
data_pb.file_pattern = '%s.npy' % output_file_name
data_pb.size = data.shape[0]
if stats_file:
data_pb.stats_file = stats_file
data_pb.dimensions.append(data.shape[1])
def DumpLabelSplit(data, output_dir, name, dataset_pb):
data_pb = dataset_pb.data.add()
output_file_name = os.path.join(output_dir, name)
np.save(output_file_name, data)
data_pb.name = name
data_pb.file_pattern = '%s.npy' % output_file_name
data_pb.size = data.shape[0]
data_pb.dimensions.append(data.shape[1])
def Load(file_pattern):
data = None
for f in sorted(glob.glob(file_pattern)):
ext = os.path.splitext(f)[1]
if ext == '.npy':
this_data = np.load(f)
elif ext == '.npz':
this_data = dh.Disk.LoadSparse(f).toarray()
else:
raise Exception('unknown data format.')
if data is None:
data = this_data
else:
data = np.concatenate((data, this_data))
return data
def MakeDict(data_pbtxt):
data_pb = util.ReadData(data_pbtxt)
rep_dict = {}
stats_files = {}
for data in data_pb.data:
rep_dict[data.name] = Load(data.file_pattern)
stats_files[data.name] = data.stats_file
return rep_dict, stats_files
def main():
data_pbtxt = sys.argv[1]
output_dir = sys.argv[2]
prefix = sys.argv[3]
r = int(sys.argv[4])
gpu_mem = sys.argv[5]
main_mem = sys.argv[6]
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
rep_dict, stats_files = MakeDict(data_pbtxt)
reps = rep_dict.keys()
indices_file = os.path.join(prefix, 'splits', 'train_indices_%d.npy' % r)
if os.path.exists(indices_file):
train = np.load(indices_file)
valid = np.load(os.path.join(prefix, 'splits', 'valid_indices_%d.npy' % r))
test = np.load(os.path.join(prefix, 'splits', 'test_indices_%d.npy' % r))
else:
print( 'Creating new split.' )
indices = np.arange(25000)
np.random.shuffle(indices)
train = indices[:10000]
valid = indices[10000:15000]
test = indices[15000:]
np.save(os.path.join(prefix, 'splits', 'train_indices_%d.npy' % r), train)
np.save(os.path.join(prefix, 'splits', 'valid_indices_%d.npy' % r), valid)
np.save(os.path.join(prefix, 'splits', 'test_indices_%d.npy' % r), test)
print( 'Splitting data' )
dataset_pb = deepnet_pb2.Dataset()
dataset_pb.name = 'flickr_split_%d' % r
dataset_pb.gpu_memory = gpu_mem
dataset_pb.main_memory = main_mem
for rep in reps:
data = rep_dict[rep]
stats_file = stats_files[rep]
DumpDataSplit(data[train], output_dir, 'train_%s' % rep, dataset_pb, stats_file)
DumpDataSplit(data[valid], output_dir, 'valid_%s' % rep, dataset_pb, stats_file)
DumpDataSplit(data[test], output_dir, 'test_%s' % rep, dataset_pb, stats_file)
print( 'Splitting labels' )
labels = np.load(os.path.join(prefix, 'labels.npy')).astype('float32')
DumpLabelSplit(labels[train,], output_dir, 'train_labels', dataset_pb)
DumpLabelSplit(labels[valid,], output_dir, 'valid_labels', dataset_pb)
DumpLabelSplit(labels[test,], output_dir, 'test_labels', dataset_pb)
#d = 'indices'
#np.save(os.path.join(output_dir, 'train_%s.npy' % d), train)
#np.save(os.path.join(output_dir, 'valid_%s.npy' % d), valid)
#np.save(os.path.join(output_dir, 'test_%s.npy' % d), test)
with open(os.path.join(output_dir, 'data.pbtxt'), 'w') as f:
text_format.PrintMessage(dataset_pb, f)
print( 'Output written in directory %s' % output_dir )
if __name__ == '__main__':
main()
|
Corvalius/deepnet | package/deepnet/inference.py | <gh_stars>1-10
"""Do inference in deepnet models."""
from neuralnet import *
from trainer import *
def DoInference(model_file, train_op_file, base_output_dir, layernames,
layernames_to_unclamp, memory='1G', method='gibbs',
steps=10, datasets=['validation', 'test'], gpu_mem='2G',
main_mem='30G', data_proto=None):
model = util.ReadModel(model_file)
op = ReadOperation(train_op_file)
op.randomize = False
op.get_last_piece = True
if data_proto:
op.data_proto = data_proto
net = CreateDeepnet(model, op, op)
net.LoadModelOnGPU()
net.SetUpData(skip_layernames=layernames_to_unclamp)
data_pb = deepnet_pb2.Dataset()
data_pb.name = model.name
data_pb.gpu_memory = gpu_mem
data_pb.main_memory = main_mem
output_proto_file = os.path.join(base_output_dir, 'data.pbtxt')
for dataset in datasets:
output_dir = os.path.join(base_output_dir, dataset)
print( 'Writing to %s' % output_dir )
size = net.Inference(steps, layernames, layernames_to_unclamp, output_dir,
memory=memory, dataset=dataset, method=method)
if size is None:
continue
# Write protocol buffer.
for lname in layernames:
layer = net.GetLayerByName(lname)
data = data_pb.data.add()
data.name = '%s_%s' % (lname, dataset)
data.file_pattern = os.path.join(output_dir, '%s-*-of-*.npy' % lname)
data.size = size
data.dimensions.append(layer.state.shape[0])
with open(output_proto_file, 'w') as f:
text_format.PrintMessage(data_pb, f)
def main():
LockGPU()
prefix = '/ais/gobi3/u/nitish/flickr'
model = util.ReadModel(sys.argv[1])
train_op_file = sys.argv[2]
layernames = ['joint_hidden', 'text_hidden2', 'text_hidden1',
'text_input_layer']
layernames_to_unclamp = ['text_input_layer', 'text_hidden2']
method = 'gibbs'
steps = 10
output_d = 'dbn_inference'
output_dir = os.path.join(prefix, output_d, '%s_LAST' % model.name)
model_file = sys.argv[1]
DoInference(model_file, train_op_file, output_dir, layernames,
layernames_to_unclamp, memory = '1G', method=method,
steps=steps)
FreeGPU()
if __name__ == '__main__':
main()
|
Corvalius/deepnet | package/deepnet/write_model_to_mat.py | <filename>package/deepnet/write_model_to_mat.py<gh_stars>1-10
"""Write a model protocol buffer to mat file."""
import util
import numpy as np
import sys
import scipy.io
def Convert(model_file, output_file):
model = util.ReadModel(model_file)
params = {}
for l in model.layer:
for p in l.param:
params['%s_%s' % (l.name, p.name)] = util.ParameterAsNumpy(p)
for e in model.edge:
for p in e.param:
params['%s_%s_%s' % (e.node1, e.node2, p.name)] = util.ParameterAsNumpy(p)
scipy.io.savemat(output_file, params, oned_as='column')
if __name__ == '__main__':
Convert(sys.argv[1], sys.argv[2])
|
Corvalius/deepnet | package/eigenmat/test.py | <reponame>Corvalius/deepnet
import unittest
import eigenmat as mat
import numpy as np
class TestEigenMat(unittest.TestCase):
def setUp(self):
mat.EigenMatrix.init_random(seed=1)
def test_add(self):
x = np.random.randn(10, 10)
y = np.random.randn(10, 10)
eig_x = mat.EigenMatrix(x)
eig_y = mat.EigenMatrix(y)
eig_z = mat.empty(x.shape)
z = x + y # Numpy add.
eig_x.add(eig_y, target=eig_z) # EigenMat add.
diff = ((eig_z.asarray() - z)**2).sum()
self.assertAlmostEqual(diff, 0)
def test_dot(self):
x = np.random.randn(500, 1000)
y = np.random.randn(1000, 600)
eig_x = mat.EigenMatrix(x)
eig_y = mat.EigenMatrix(y)
eig_z = mat.empty((x.shape[0], y.shape[1]))
z = x.dot(y)
mat.dot(eig_x, eig_y, target=eig_z)
diff = ((eig_z.asarray() - z)**2).sum()
self.assertAlmostEqual(diff, 0, places=4)
def test_dot_transposed(self):
x = np.random.randn(500, 1000)
y = np.random.randn(600, 1000)
eig_x = mat.EigenMatrix(x)
eig_y = mat.EigenMatrix(y)
eig_z = mat.empty((x.shape[0], y.shape[0]))
z = x.dot(y.T)
mat.dot(eig_x, eig_y.T, target=eig_z)
diff = ((eig_z.asarray() - z)**2).sum()
self.assertAlmostEqual(diff, 0, places=4)
def test_sum_by_axis(self):
x = 1.1 + np.random.randn(10, 1000)
y = np.zeros((1, 1000))
z = np.zeros((10, 1))
eig_x = mat.EigenMatrix(x)
eig_y = mat.EigenMatrix(y)
eig_z = mat.EigenMatrix(z)
eig_x.sum(axis=0, target=eig_y)
eig_x.sum(axis=1, target=eig_z)
diff = ((eig_y.asarray() - x.sum(axis=0).reshape(1, -1))**2).sum()
self.assertAlmostEqual(diff, 0, places=5)
diff = ((eig_z.asarray() - x.sum(axis=1).reshape(-1, 1))**2).sum()
self.assertAlmostEqual(diff, 0, places=5)
def test_apply_softmax(self):
x = np.random.randn(100, 10)
eig_x = mat.EigenMatrix(x)
eig_y = mat.empty((100, 10))
eig_x.apply_softmax(target=eig_y)
y = np.exp(x - x.max(axis=0))
y /= y.sum(axis=0)
diff = ((eig_y.asarray() - y)**2).sum()
self.assertAlmostEqual(diff, 0, places=5)
if __name__ == '__main__':
unittest.main()
|
nathants/ptop | setup.py | import setuptools
setuptools.setup(
version="0.0.1",
license='mit',
name='ptop',
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/nathants/ptop',
scripts=['ptop'],
python_requires='>=3.7',
install_requires=['psutil >5, <6',
'argh >0.26, <0.27',
'blessed >1, <2'],
description='a minimal htop alternative',
)
|
ciciplusplus/mapnes | app.py | <reponame>ciciplusplus/mapnes<gh_stars>1-10
from flask import Flask, redirect, send_file
from PIL import Image, ImageStat
import requests
from io import BytesIO
import tempfile
import math
import mapnik
import threading
original_tile_size = 256
small_tile_size = 16
numRows = original_tile_size // small_tile_size
R, G, B = 0, 1, 2
app = Flask(__name__)
tile_grass = Image.open("tiles/tile_grass.png")
tile_forest = Image.open("tiles/tile_forest.png")
tile_water = Image.open("tiles/tile_water.png")
tile_rock = Image.open("tiles/tile_rock.png")
tile_snow = Image.open("tiles/tile_snow.png")
tile_sand = Image.open("tiles/tile_sand.png")
def minmax (a,b,c):
a = max(a,b)
a = min(a,c)
return a
class GoogleProjection:
def __init__(self,levels=18):
self.Bc = []
self.Cc = []
self.zc = []
self.Ac = []
c = 256
for d in range(0,levels):
e = c/2;
self.Bc.append(c/360.0)
self.Cc.append(c/(2 * math.pi))
self.zc.append((e,e))
self.Ac.append(c)
c *= 2
def fromLLtoPixel(self,ll,zoom):
d = self.zc[zoom]
e = round(d[0] + ll[0] * self.Bc[zoom])
f = minmax(math.sin(math.radians(ll[1])),-0.9999,0.9999)
g = round(d[1] + 0.5*math.log((1+f)/(1-f))*-self.Cc[zoom])
return (e,g)
def fromPixelToLL(self,px,zoom):
e = self.zc[zoom]
f = (px[0] - e[0])/self.Bc[zoom]
g = (px[1] - e[1])/-self.Cc[zoom]
h = math.degrees( 2 * math.atan(math.exp(g)) - 0.5 * math.pi)
return (f,h)
m = mapnik.Map(original_tile_size, original_tile_size)
mapnik.load_map(m, "labels.xml")
prj = mapnik.Projection(m.srs)
maxZoom = 20
tileproj = GoogleProjection(maxZoom + 1)
lock = threading.Lock()
@app.route("/")
def hello_world():
return app.send_static_file('index.html')
@app.route("/tiles/<int:x>/<int:y>/<int:z>")
def tiles(x, y, z):
url = "https://khms1.google.com/kh/v=904?x={}&y={}&z={}".format(x, y, z)
response = requests.get(url)
img = Image.open(BytesIO(response.content))
for row in range(numRows):
for col in range(numRows):
start_x = col * small_tile_size
start_y = row * small_tile_size
rect = (start_x, start_y, start_x + small_tile_size, start_y + small_tile_size)
stat = ImageStat.Stat(img.crop(rect))
avgR, avgG, avgB = stat.mean[R], stat.mean[G], stat.mean[B]
rock_b_threshold = 145
forest_b_threshold = 65
if avgR >= 225 and avgG >= 225 and avgB >= 225: # snow
img.paste(tile_snow, rect)
elif avgG >= avgB and avgG >= avgR and avgB <= forest_b_threshold: # grass
img.paste(tile_grass, rect)
elif avgG >= avgB and avgG >= avgR and avgB > forest_b_threshold: # forest
img.paste(tile_forest, rect)
elif avgB >= avgG and avgB >= avgR: # water
img.paste(tile_water, rect)
elif avgR >= avgG and avgR >= avgB and avgB <= rock_b_threshold: # sand
img.paste(tile_sand, rect)
elif avgR >= avgG and avgR >= avgB and avgB > rock_b_threshold: # rock
img.paste(tile_rock, rect)
else:
pass
tmpPng = tempfile.NamedTemporaryFile(mode="w+b", delete=False, suffix=".png")
img.save(tmpPng, 'PNG')
tmpPng.seek(0)
tmpPng2 = tempfile.NamedTemporaryFile(mode="w+b", delete=False, suffix=".png")
return render_tile(tmpPng.name, tmpPng2.name, x, y, z)
def serve_pil_image(pil_img):
img_io = BytesIO()
pil_img.save(img_io, 'PNG')
img_io.seek(0)
return send_file(img_io, mimetype='image/png')
def render_tile(back_img, tile_handle, x, y, z):
# Calculate pixel positions of bottom-left & top-right
p0 = (x * 256, (y + 1) * 256)
p1 = ((x + 1) * 256, y * 256)
# Convert to LatLong (EPSG:4326)
l0 = tileproj.fromPixelToLL(p0, z);
l1 = tileproj.fromPixelToLL(p1, z);
# Convert to map projection (e.g. mercator co-ords EPSG:900913)
c0 = prj.forward(mapnik.Coord(l0[0],l0[1]))
c1 = prj.forward(mapnik.Coord(l1[0],l1[1]))
# Bounding box for the tile
bbox = mapnik.Box2d(c0.x, c0.y, c1.x, c1.y)
render_size = 256
with lock:
m.resize(render_size, render_size)
m.zoom_to_box(bbox)
m.buffer_size = 128
# Render image with default Agg renderer
im = mapnik.Image(render_size, render_size)
m.background_image = back_img
mapnik.render(m, im)
im.save(tile_handle, 'png256')
return send_file(tile_handle, mimetype='image/png') |
Volensia/plover_number_format | plover_number_format.py | import re
def num_sec_to_word(num_sec, mode):
number_words = ["", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "eighteen", "nineteen"]
number_words_tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
illions = ["thousand", "million", "billion", "trillion", "quadrillion", "quintillion", "sextillion", "septillion", "octillion", "nonillion", "decillion"]
illion_prefixes = ["", "un", "duo", "tre", "quattuor", "quinqua", "se", "septe", "octo", "nove"]
illion_prefixes_tens = ["", "deci", "viginti", "triginta", "quadraginta", "quinquaginta", "sexaginta", "septuaginta", "octoginta", "nonaginta"]
illion_prefixes_hundreds = ["", "centi", "ducenti", "trecenti", "quadringenti", "quingenti", "sescenti", "septingenti", "octingenti", "nongenti"]
# Mode 0: 3 digit segments
if mode == 0:
# add leading zeros
if len(num_sec) == 1:
num_sec = "00" + num_sec
if len(num_sec) == 2:
num_sec = "0" + num_sec
# the hundreds
num_sec_word = ""
if num_sec[0] != "0":
num_sec_word = number_words[int(num_sec[0])] + " hundred"
# the tens
num_tens = int(num_sec[1:3])
if num_sec_word != "" and num_tens != "0":
num_sec_word += " "
if num_tens < 20:
num_sec_word += number_words[num_tens]
else:
num_sec_word += number_words_tens[int(num_sec[1])]
if num_sec[2] != "0":
num_sec_word += "-" + number_words[int(num_sec[2])]
return num_sec_word
# Mode 1: -illion parts
if mode == 1:
if num_sec <= 10:
return illions[num_sec]
if num_sec >= 1000:
return "?"
prefix1 = illion_prefixes[num_sec%10]
prefix2 = illion_prefixes_tens[int(num_sec/10)%10]
if num_sec >= 100:
prefix2 += illion_prefixes_hundreds[int(num_sec/100)]
# tre- rule
if num_sec%10 == 3:
if prefix2[0] == "v" or prefix2[0] == "t" or prefix2[0] == "q" or prefix2[0] == "o" or prefix2[0] == "c":
prefix1 += "s"
# se- rule
if num_sec%10 == 6:
if prefix2[0] == "v" or prefix2[0] == "t" or prefix2[0] == "q":
prefix1 += "s"
if prefix2[0] == "o" or prefix2[0] == "c":
prefix1 += "x"
# septe- & nove- rule
if num_sec%10 == 7 or num_sec%10 == 9:
if prefix2[0] == "d" or prefix2[0] == "t" or prefix2[0] == "q" or prefix2[0] == "s" or prefix2[0] == "c":
prefix1 += "n"
if prefix2[0] == "v" or prefix2[0] == "o":
prefix1 += "m"
return prefix1 + prefix2[0:-1] + "illion"
# Mode 2: single digits
if num_sec == "0":
return "zero"
if num_sec == "O":
return ""
return number_words[int(num_sec)]
def number_format_insert_(ctx, cmdline):
action = ctx.copy_last_action()
last_words = "".join(ctx.last_fragments(1))
cmd = "".join(cmdline)
l_cmd = len(cmd)
l = len(last_words)
# do nothing if there are not enough digits
key = re.compile(r"(?<!\\)N")
cnt = len(key.findall(cmd))
if (l < cnt):
return action
# fill in the numbers
for i in range(l_cmd-1, -1, -1):
if i > 0 and cmd[i-1] == "\\":
continue
if cmd[i] == 'N':
cmd = cmd[:i] + last_words[l-1] + cmd[i+1:]
l -= 1
cnt -= 1
elif (l > cnt and l > 0) and (cmd[i] == 'n' or cmd[i] == 'x' or cmd[i] == 'X' or cmd[i] == '0' or cmd[i] == '_'):
cmd = cmd[:i] + last_words[l-1] + cmd[i+1:]
l -= 1
# deal with the symbols
parenthesis = 0 # no unpaired parentheses
for i in range(l_cmd-1, -1, -1):
if cmd[i] == '_' and cmd[i-1] != "\\":
cmd = cmd[:i] + ' ' + cmd[i+1:]
continue
if (cmd[i] < 'a' or cmd[i] > 'z') and (cmd[i] < 'A' or cmd[i] > 'Z') and (cmd[i] < '0' or cmd[i] > '9'):
if i > 0 and cmd[i-1] == 'n' and cmd[i-1] != "\\":
cmd = cmd[:i] + "\\" + cmd[i+1:]
if cmd[i] == ')':
parenthesis += 1
if cmd[i] == '(':
if parenthesis == 0:
cmd = cmd[:i] + "\\" + cmd[i+1:]
else:
parenthesis -= 1
if (cmd[i] == 'n' or cmd[i] == 'x' or cmd[i] == 'X') and cmd[i-1] != "\\":
cmd = cmd[:i] + "\\" + cmd[i+1:]
action.prev_replace = last_words
action.text = cmd.replace("\\", "").strip() # remove backslash
action.word = None
action.prev_attach = True
return action
def number_format_roman_(ctx, cmdline):
action = ctx.copy_last_action()
args = cmdline.split(":")
method = int(args[0])
case = int(args[1])
if method < 0 or method > 1:
return action
# only convert numbers less than 4 digits long
last_words = "".join(ctx.last_fragments(1))[::-1]
num = last_words.replace(",", "").replace(".", "")
if num.isnumeric() == False or len(num) > 4:
return action
rom = ""
num_method = [[["I", "II", "III", "IV", "V", "VI", "VII", "VIII", "IX"], ["I", "II", "III", "IIII", "V", "VI", "VII", "VIII", "VIIII"]], [["X", "XX", "XXX", "XL", "L", "LX", "LXX", "LXXX", "XC"], ["X", "XX", "XXX", "XXXX", "L", "LX", "LXX", "LXXX", "LXXXX"]], [["C", "CC", "CCC", "CD", "D", "DC", "DCC", "DCCC", "CM"], ["C", "CC", "CCC", "CCCC", "D", "DC", "DCC", "DCCC", "DCCCC"]]]
for i in range(len(num)):
x = int(num[i])
if x == 0:
continue
if i != 3:
rom = num_method[i][method][x-1] + rom
else:
for j in range(x):
rom = "M" + rom
if case != 0:
rom = rom.lower()
action.prev_replace = last_words
action.text = rom
action.word = None
action.prev_attach = True
return action
def number_word_conversion_(ctx, cmdline):
action = ctx.copy_last_action()
args = cmdline.split(":")
card_ord = int(args[0]) # maintain/cardinal/ordinal
num_word = int(args[1]) # maintain/number/word
# if len(arg) > 2:
# sig_dec = int(args[2]) # significant digits/decimal places
# if len(arg) > 3:
# num_fig = int(args[3]) # number of sig-fig/dec-plc
# if len(arg) > 4:
# separator = int(args[4]) # maintain/+separator/-separator
# if card_ord < 0 or card_ord > 2 or num_word < 0 or num_word > 2 or separator < 0 or separator > 2 or sig_dec < 0 or sig_dec > 1:
# return action
num = ""
num_to_word = ""
num_dec = ""
is_negative = False
fragment = "".join(ctx.last_fragments(1)) # TODO: READ WORDS PROPERLY
# check cardinal/ordinal
tmp = fragment[-2:]
if tmp == "st" or tmp == "nd" or tmp == "rd" or tmp == "th":
tmp = fragment[:-2]
if card_ord == 0:
card_ord = 2
else:
tmp = fragment
if card_ord == 0:
card_ord = 1
# check number/word
if tmp.replace(",", "").replace(".", "").replace("-", "").replace("−", "").isdecimal() == False:
return action
# check separator
# if separator == 0:
# if re.search(",", tmp) == None:
# separator = 2
# else:
# separator = 1
# check positive/negative
if re.search(r"-|−", tmp) != None:
is_negative = True
num = tmp.replace(",", "").replace("-", "").replace("−", "")
# split decimal
if re.search(r"\.", tmp) != None:
tmp = num.split(".", 1)
num = tmp[0]
num_dec = tmp[1].replace(".", "")
if (num_dec == ""):
num_dec = "O"
# number to word conversion
if num_word == 2:
for i in range(len(num)-1, -1, -3):
num_sec = num_sec_to_word(num[max(0, i-2):i+1], 0)
if num_sec != "":
if i != len(num)-1:
num_sec += " " + num_sec_to_word(int((len(num)-i-1)/3)-1, 1)
if num_to_word != "":
num_sec += " "
num_to_word = num_sec + num_to_word
if num == "0" and num_to_word == "":
num_to_word = "zero"
# negative numbers
if is_negative:
num_to_word = "negative " + num_to_word
# decimal numbers
if num_dec != "":
if num_to_word != "":
num_to_word += " "
num_to_word += "point"
for i in num_dec:
num_to_word += " " + num_sec_to_word(i, 2)
# ordinal numbers
if card_ord == 2 and num_dec == "":
tmp = num_to_word[-3:]
if tmp == "one":
num_to_word = num_to_word[:-3] + "first"
elif tmp == "two":
num_to_word = num_to_word[:-3] + "second"
elif tmp == "ree":
num_to_word = num_to_word[:-3] + "ird"
elif tmp == "ive":
num_to_word = num_to_word[:-3] + "ifth"
elif tmp == "ght":
num_to_word = num_to_word[:-3] + "ghth"
elif tmp == "ine":
num_to_word = num_to_word[:-3] + "inth"
elif tmp == "lve":
num_to_word = num_to_word[:-3] + "lfth"
elif num_to_word[-2:] == "ty":
num_to_word = num_to_word[:-2] + "tieth"
else:
num_to_word += "th"
# convert numbers to ordinals
else:
if card_ord == 2 and num_dec == "":
tmp = num[-1]
if tmp == "1" and num[-2:] != "11":
num += "st"
elif tmp == "2" and num[-2:] != "12":
num += "nd"
elif tmp == "3" and num[-2:] != "13":
num += "rd"
else:
num += "th"
last_words = fragment
action.prev_replace = last_words
if num_word == 2:
action.text = num_to_word
else:
action.text = num
action.word = None
action.prev_attach = True
return action
def retro_insert_currency_(ctx, cmdline):
action = ctx.copy_last_action()
args = cmdline.split(":")
word_num = int(args[0]) + 1
symbol = args[1]
tmp = "".join(ctx.last_fragments(count = word_num))[::-1]
key = re.compile(r"[\d,.]+\b[,.]?")
ans = key.search(tmp)
if ans == None:
return action
last_words = "".join(reversed(tmp[:ans.end()]))
action.prev_replace = last_words
action.text = symbol + last_words
action.word = None
action.prev_attach = True
return action
def number_format_insert(*args, **kwargs):
return number_format_insert_(*args, **kwargs)
def number_format_roman(*args, **kwargs):
return number_format_roman_(*args, **kwargs)
def number_word_conversion(*args, **kwargs):
return number_word_conversion_(*args, **kwargs)
def retro_insert_currency(*args, **kwargs):
return retro_insert_currency_(*args, **kwargs) |
philipjewell/PingdomLib | pingdomlib/pingdom.py | import requests
import sys
from pingdomlib.check import PingdomCheck
from pingdomlib.contact import PingdomContact
from pingdomlib.reports import PingdomEmailReport, PingdomSharedReport
server_address = 'https://api.pingdom.com'
api_version = '2.0'
class Pingdom(object):
"""Main connection object to interact with pingdom
Attributes:
* pushChanges -- This boolean controls if changes are automatically
pushed to pingdom
* shortlimit -- String containing short api rate limit details
* longlimit -- String containing long api rate limit details
"""
def __init__(self, username, password, apikey, accountemail=None,
pushchanges=True, server=server_address):
self.pushChanges = pushchanges
self.username = username
self.password = password
self.apikey = apikey
self.accountemail = accountemail
self.url = '%s/api/%s/' % (server, api_version)
self.shortlimit = ''
self.longlimit = ''
@staticmethod
def _serializeBooleans(params):
""""Convert all booleans to lowercase strings"""
serialized = {}
for name, value in params.items():
if value is True:
value = 'true'
elif value is False:
value = 'false'
serialized[name] = value
return serialized
for k, v in params.items():
if isinstance(v, bool):
params[k] = str(v).lower()
def request(self, method, url, parameters=dict()):
"""Requests wrapper function"""
# The requests library uses urllib, which serializes to "True"/"False" while Pingdom requires lowercase
parameters = self._serializeBooleans(parameters)
headers = {'App-Key': self.apikey}
if self.accountemail:
headers.update({'Account-Email': self.accountemail})
# Method selection handling
if method.upper() == 'GET':
response = requests.get(self.url + url, params=parameters,
auth=(self.username, self.password),
headers=headers)
elif method.upper() == 'POST':
response = requests.post(self.url + url, data=parameters,
auth=(self.username, self.password),
headers=headers)
elif method.upper() == 'PUT':
response = requests.put(self.url + url, data=parameters,
auth=(self.username, self.password),
headers=headers)
elif method.upper() == 'DELETE':
response = requests.delete(self.url + url, params=parameters,
auth=(self.username, self.password),
headers=headers)
else:
raise Exception("Invalid method in pingdom request")
# Store pingdom api limits
self.shortlimit = response.headers.get(
'Req-Limit-Short',
self.shortlimit)
self.longlimit = response.headers.get(
'Req-Limit-Long',
self.longlimit)
# Verify OK response
if response.status_code != 200:
sys.stderr.write('ERROR from %s: %d' % (response.url,
response.status_code))
sys.stderr.write('Returned data: %s\n' % response.json())
response.raise_for_status()
return response
def actions(self, **parameters):
"""Returns a list of actions (alerts) that have been generated for
your account.
Optional Parameters:
* from -- Only include actions generated later than this timestamp.
Format is UNIX time.
Type: Integer
Default: None
* to -- Only include actions generated prior to this timestamp.
Format is UNIX time.
Type: Integer
Default: None
* limit -- Limits the number of returned results to the specified
quantity.
Type: Integer (max 300)
Default: 100
* offset -- Offset for listing.
Type: Integer
Default: 0
* checkids -- Comma-separated list of check identifiers. Limit
results to actions generated from these checks.
Type: String
Default: All
* contactids -- Comma-separated list of contact identifiers.
Limit results to actions sent to these contacts.
Type: String
Default: All
* status -- Comma-separated list of statuses. Limit results to
actions with these statuses.
Type: String ['sent', 'delivered', 'error',
'not_delivered', 'no_credits']
Default: All
* via -- Comma-separated list of via mediums. Limit results to
actions with these mediums.
Type: String ['email', 'sms', 'twitter', 'iphone',
'android']
Default: All
Returned structure:
{
'alerts' : [
{
'contactname' : <String> Name of alerted contact
'contactid' : <String> Identifier of alerted contact
'checkid' : <String> Identifier of check
'time' : <Integer> Time of alert generation. Format
UNIX time
'via' : <String> Alert medium ['email', 'sms',
'twitter', 'iphone',
'android']
'status' : <String> Alert status ['sent', 'delivered',
'error',
'notdelivered',
'nocredits']
'messageshort': <String> Short description of message
'messagefull' : <String> Full message body
'sentto' : <String> Target address, phone number, etc
'charged' : <Boolean> True if your account was charged
for this message
},
...
]
}
"""
# Warn user about unhandled parameters
for key in parameters:
if key not in ['from', 'to', 'limit', 'offset', 'checkids',
'contactids', 'status', 'via']:
sys.stderr.write('%s not a valid argument for actions()\n'
% key)
response = self.request('GET', 'actions', parameters)
return response.json()['actions']
def alerts(self, **parameters):
"""A short-hand version of 'actions', returns list of alerts.
See parameters for actions()"""
return self.actions(**parameters)['alerts']
def getChecks(self, **parameters):
"""Pulls all checks from pingdom
Optional Parameters:
* limit -- Limits the number of returned probes to the
specified quantity.
Type: Integer (max 25000)
Default: 25000
* offset -- Offset for listing (requires limit.)
Type: Integer
Default: 0
* tags -- Filter listing by tag/s
Type: String
Default: None
"""
# Warn user about unhandled parameters
for key in parameters:
if key not in ['limit', 'offset', 'tags']:
sys.stderr.write('%s not a valid argument for getChecks()\n'
% key)
response = self.request('GET', 'checks', parameters)
return [PingdomCheck(self, x) for x in response.json()['checks']]
def getCheck(self, checkid):
"""Returns a detailed description of a specified check."""
check = PingdomCheck(self, {'id': checkid})
check.getDetails()
return check
def getResults(self, checkid):
""" Returns detailed results for a specified check id."""
response = self.request('GET','results/%s' % checkid)
return response.json()
def newCheck(self, name, host, checktype='http', **kwargs):
"""Creates a new check with settings specified by provided parameters.
Provide new check name, hostname and type along with any additional
optional parameters passed as keywords. Returns new PingdomCheck
instance
Types available:
* http
* httpcustom
* tcp
* ping
* dns
* udp
* smtp
* pop3
Optional parameters:
* paused -- Check should be paused
Type: Boolean
Default: False
* resolution -- Check resolution time (in minutes)
Type: Integer [1, 5, 15, 30, 60]
Default: 5
* contactids -- Comma separated list of contact IDs
Type: String
Default: None
* sendtoemail -- Send alerts as email
Type: Boolean
Default: False
* sendtosms -- Send alerts as SMS
Type: Boolean
Default: False
* sendtotwitter -- Send alerts through Twitter
Type: Boolean
Default: False
* sendtoiphone -- Send alerts to iPhone
Type: Boolean
Default: False
* sendtoandroid -- Send alerts to Android
Type: Boolean
Default: False
* sendnotificationwhendown -- Send notification when check is down
the given number of times
Type: Integer
Default: 2
* notifyagainevery -- Set how many results to wait for in between
notices
Type: Integer
Default: 0
* notifywhenbackup -- Notify when back up again
Type: Boolean
Default: True
* use_legacy_notifications -- Use the old notifications instead of
BeepManager
Type: Boolean
Default: False
HTTP check options:
* url -- Target path on server
Type: String
Default: /
* encryption -- Use SSL/TLS
Type: Boolean
Default: False
* port -- Target server port
Type: Integer
Default: 80
* auth -- Username and password for HTTP authentication
Example: user:password
Type: String
Default: None
* shouldcontain -- Target site should contain this string.
Cannot be combined with 'shouldnotcontain'
Type: String
Default: None
* shouldnotcontain -- Target site should not contain this string.
Cannot be combined with 'shouldcontain'
Type: String
Default: None
* postdata -- Data that should be posted to the web page,
for example submission data for a sign-up or login form.
The data needs to be formatted in the same way as a web browser
would send it to the web server
Type: String
Default: None
* requestheader<NAME> -- Custom HTTP header, replace <NAME> with
desired header name. Header in form: Header:Value
Type: String
Default: None
HTTPCustom check options:
* url -- Target path on server
Type: String
Mandatory
* encryption -- Use SSL/TLS
Type: Boolean
Default: False
* port -- Target server port
Type: Integer
Default: 80
* auth -- Username and password for HTTP authentication
Example: user:password
Type: String
Default: None
* additionalurls -- Colon-separated list of additonal URLS with
hostname included
Type: String
Default: None
TCP check options:
* port -- Target server port
Type: Integer
Mandatory
* stringtosend -- String to send
Type: String
Default: None
* stringtoexpect -- String to expect in response
Type: String
Default: None
DNS check options:
* expectedip -- Expected IP
Type: String
Mandatory
* nameserver -- Nameserver to check
Type: String
Mandatory
UDP check options:
* port -- Target server port
Type: Integer
Mandatory
* stringtosend -- String to send
Type: String
Default: None
* stringtoexpect -- String to expect in response
Type: String
Default: None
SMTP check options:
* port -- Target server port
Type: Integer
Default: 25
* auth -- Username and password for target SMTP authentication.
Example: user:password
Type: String
Default: None
* stringtoexpect -- String to expect in response
Type: String
Default: None
* encryption -- Use connection encryption
Type: Boolean
Default: False
POP3 check options:
* port -- Target server port
Type: Integer
Default: 110
* stringtoexpect -- String to expect in response
Type: String
Default: None
* encryption -- Use connection encryption
Type: Boolean
Default: False
IMAP check options:
* port -- Target server port
Type: Integer
Default: 143
* stringtoexpect -- String to expect in response
Type: String
Default: None
* encryption -- Use connection encryption
Type: Boolean
Default: False
"""
if checktype == 'http':
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['alert_policy', 'autoresolve', 'paused', 'resolution', 'contactids',
'sendtoemail', 'sendtosms', 'sendtotwitter',
'sendtoiphone', 'sendtoandroid',
'sendnotificationwhendown', 'notifyagainevery',
'notifywhenbackup', 'type', 'hostname', 'url',
'encryption', 'port', 'auth', 'shouldcontain',
'shouldnotcontain', 'postdata',
'use_legacy_notifications']:
if key.startswith('requestheader') is not True:
sys.stderr.write("'%s'" % key + ' is not a valid ' +
'argument of newCheck() for type ' +
"'http'\n")
elif checktype == 'httpcustom':
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['paused', 'resolution', 'contactids',
'sendtoemail', 'sendtosms', 'sendtotwitter',
'sendtoiphone', 'sendtoandroid',
'sendnotificationwhendown', 'notifyagainevery',
'notifywhenbackup', 'type', 'hostname', 'url',
'encryption', 'port', 'auth', 'additionalurls',
'use_legacy_notifications']:
sys.stderr.write("'%s'" % key + ' is not a valid ' +
'argument of newCheck() for type ' +
"'httpcustom'\n")
elif checktype == 'tcp':
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['alert_policy', 'autoresolve', 'paused', 'resolution', 'contactids',
'sendtoemail', 'sendtosms', 'sendtotwitter',
'sendtoiphone', 'sendtoandroid',
'sendnotificationwhendown', 'notifyagainevery',
'notifywhenbackup', 'type', 'hostname', 'port',
'stringtosend', 'stringtoexpect',
'use_legacy_notifications']:
sys.stderr.write("'%s'" % key + ' is not a valid ' +
'argument of newCheck() for type ' +
"'tcp'\n")
elif checktype == 'ping':
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['paused', 'resolution', 'contactids',
'sendtoemail', 'sendtosms', 'sendtotwitter',
'sendtoiphone', 'sendtoandroid',
'sendnotificationwhendown', 'notifyagainevery',
'notifywhenbackup', 'type', 'hostname',
'use_legacy_notifications']:
sys.stderr.write("'%s'" % key + ' is not a valid ' +
'argument of newCheck() for type ' +
"'ping'\n")
elif checktype == 'dns':
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['paused', 'resolution', 'contactids',
'sendtoemail', 'sendtosms', 'sendtotwitter',
'sendtoiphone', 'sendtoandroid',
'sendnotificationwhendown', 'notifyagainevery',
'notifywhenbackup', 'type', 'hostname',
'expectedip', 'nameserver',
'use_legacy_notifications']:
sys.stderr.write("'%s'" % key + ' is not a valid ' +
'argument of newCheck() for type ' +
"'dns'\n")
elif checktype == 'udp':
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['paused', 'resolution', 'contactids',
'sendtoemail', 'sendtosms', 'sendtotwitter',
'sendtoiphone', 'sendtoandroid',
'sendnotificationwhendown', 'notifyagainevery',
'notifywhenbackup', 'type', 'hostname', 'port',
'stringtosend', 'stringtoexpect',
'use_legacy_notifications']:
sys.stderr.write("'%s'" % key + ' is not a valid ' +
'argument of newCheck() for type ' +
"'udp'\n")
elif checktype == 'smtp':
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['paused', 'resolution', 'contactids',
'sendtoemail', 'sendtosms', 'sendtotwitter',
'sendtoiphone', 'sendtoandroid',
'sendnotificationwhendown', 'notifyagainevery',
'notifywhenbackup', 'type', 'hostname', 'port',
'auth', 'stringtoexpect', 'encryption',
'use_legacy_notifications']:
sys.stderr.write("'%s'" % key + ' is not a valid ' +
'argument of newCheck() for type ' +
"'smtp'\n")
elif checktype == 'pop3':
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['paused', 'resolution', 'contactids',
'sendtoemail', 'sendtosms', 'sendtotwitter',
'sendtoiphone', 'sendtoandroid',
'sendnotificationwhendown', 'notifyagainevery',
'notifywhenbackup', 'type', 'hostname', 'port',
'stringtoexpect', 'encryption',
'use_legacy_notifications']:
sys.stderr.write("'%s'" % key + ' is not a valid ' +
'argument of newCheck() for type ' +
"'pop3'\n")
elif checktype == 'imap':
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['paused', 'resolution', 'contactids',
'sendtoemail', 'sendtosms', 'sendtotwitter',
'sendtoiphone', 'sendtoandroid',
'sendnotificationwhendown', 'notifyagainevery',
'notifywhenbackup', 'type', 'hostname', 'port',
'stringtoexpect', 'encryption',
'use_legacy_notifications']:
sys.stderr.write("'%s'" % key + ' is not a valid ' +
'argument of newCheck() for type ' +
"'imap'\n")
else:
raise Exception("Invalid checktype in newCheck()")
parameters = {'name': name, 'host': host, 'type': checktype}
for key, value in kwargs.iteritems():
parameters[key] = value
checkinfo = self.request("POST", 'checks', parameters)
return self.getCheck(checkinfo.json()['check']['id'])
def modifyChecks(self, **kwargs):
"""Pause or change resolution for multiple checks in one bulk call.
Parameters:
* paused -- Check should be paused
Type: Boolean
* resolution -- Check resolution time (in minutes)
Type: Integer [1, 5, 15, 30, 60]
* checkids -- Comma-separated list of identifiers for checks to be
modified. Invalid check identifiers will be ignored.
Type: String
"""
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['paused', 'resolution', 'checkids']:
sys.stderr.write("'%s'" % key + ' is not a valid argument ' +
'of newCheck()\n')
return self.request("PUT", "checks", kwargs).json()['message']
def deleteChecks(self, checkids):
"""Deletes a list of checks, CANNOT BE REVERSED!
Provide a comma-separated list of checkid's to delete
"""
return self.request("DELETE", "checks",
{'delcheckids': checkids}).json()['message']
def credits(self):
"""Gets credits list"""
return self.request("GET", "credits").json()['credits']
def probes(self, **kwargs):
"""Returns a list of all Pingdom probe servers
Parameters:
* limit -- Limits the number of returned probes to the specified
quantity
Type: Integer
* offset -- Offset for listing (requires limit).
Type: Integer
Default: 0
* onlyactive -- Return only active probes
Type: Boolean
Default: False
* includedeleted -- Include old probes that are no longer in use
Type: Boolean
Default: False
Returned structure:
[
{
'id' : <Integer> Unique probe id
'country' : <String> Country
'city' : <String> City
'name' : <String> Name
'active' : <Boolean> True if probe is active
'hostname' : <String> DNS name
'ip' : <String> IP address
'countryiso': <String> Country ISO code
},
...
]
"""
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['limit', 'offset', 'onlyactive', 'includedeleted']:
sys.stderr.write("'%s'" % key + ' is not a valid argument ' +
'of probes()\n')
return self.request("GET", "probes", kwargs).json()['probes']
def references(self):
"""Get a reference of regions, timezones and date/time/number formats
and their identifiers.
Returned structure:
{
'regions' :
[
{
'id' : <Integer> Region identifier
'description' : <String> Region description
'countryid' : <Integer> Corresponding country
identifier
'datetimeformatid' : <Integer> Corresponding datetimeformat
identifier
'numberformatid' : <Integer> Corresponding numberformat
identifer
'timezoneid' : <Integer> Corresponding timezone
identifier
},
...
],
'timezones' :
[
{
'id' : <Integer> Time zone identifier
'description' : <String> Time zone description
},
...
],
'datetimeformats' :
[
{
'id' : <Integer> Date/time format identifer
'description' : <String> Date/time format description
},
...
],
'numberformats' :
[
{
'id' : <Integer> Number format identifier
'description' : <String> Number format description
},
...
],
'countries' :
[
{
'id' : <Integer> Country id
'iso' : <String> Country ISO code
},
...
],
'phonecodes' :
[
{
'countryid' : <Integer> Country id
'name' : <String> Country name
'phonecode' : <String> Area phone code
},
...
]
}"""
return self.request("GET", "reference").json()
def traceroute(self, host, probeid):
"""Perform a traceroute to a specified target from a specified Pingdom
probe.
Provide hostname to check and probeid to check from
Returned structure:
{
'result' : <String> Traceroute output
'probeid' : <Integer> Probe identifier
'probedescription' : <String> Probe description
}
"""
response = self.request('GET', 'traceroute', {'host': host,
'probeid': probeid})
return response.json()['traceroute']
def servertime(self):
"""Get the current time of the API server in UNIX format"""
return self.request('GET', 'servertime').json()['servertime']
def getContacts(self, **kwargs):
"""Returns a list of all contacts.
Optional Parameters:
* limit -- Limits the number of returned contacts to the specified
quantity.
Type: Integer
Default: 100
* offset -- Offset for listing (requires limit.)
Type: Integer
Default: 0
Returned structure:
[
'id' : <Integer> Contact identifier
'name' : <String> Contact name
'email' : <String> Contact email
'cellphone' : <String> Contact telephone
'countryiso' : <String> Cellphone country ISO code
'defaultsmsprovider' : <String> Default SMS provider
'directtwitter' : <Boolean> Send Tweets as direct messages
'twitteruser' : <String> Twitter username
'paused' : <Boolean> True if contact is pasued
'iphonetokens' : <String list> iPhone tokens
'androidtokens' : <String list> android tokens
]
"""
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['limit', 'offset']:
sys.stderr.write("'%s'" % key + ' is not a valid argument ' +
'of getContacts()\n')
return [PingdomContact(self, x) for x in
self.request("GET", "notification_contacts", kwargs).json()['contacts']]
def newContact(self, name, **kwargs):
"""Create a new contact.
Provide new contact name and any optional arguments. Returns new
PingdomContact instance
Optional Parameters:
* email -- Contact email address
Type: String
* cellphone -- Cellphone number, without the country code part. In
some countries you are supposed to exclude leading zeroes.
(Requires countrycode and countryiso)
Type: String
* countrycode -- Cellphone country code (Requires cellphone and
countryiso)
Type: String
* countryiso -- Cellphone country ISO code. For example: US (USA),
GB (Britain) or SE (Sweden) (Requires cellphone and
countrycode)
Type: String
* defaultsmsprovider -- Default SMS provider
Type: String ['clickatell', 'bulksms', 'esendex',
'cellsynt']
* directtwitter -- Send tweets as direct messages
Type: Boolean
Default: True
* twitteruser -- Twitter user
Type: String
"""
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['email', 'cellphone', 'countrycode', 'countryiso',
'defaultsmsprovider', 'directtwitter',
'twitteruser']:
sys.stderr.write("'%s'" % key + ' is not a valid argument ' +
'of newContact()\n')
kwargs['name'] = name
contactinfo = self.request("POST", "notification_contacts",
kwargs).json()['contact']
return PingdomContact(self, contactinfo)
def modifyContacts(self, contactids, paused):
"""Modifies a list of contacts.
Provide comma separated list of contact ids and desired paused state
Returns status message
"""
response = self.request("PUT", "notification_contacts", {'contactids': contactids,
'paused': paused})
return response.json()['message']
def deleteContacts(self, contactids):
"""Deletes a list of contacts. CANNOT BE REVERSED!
Provide a comma-separated list of contactid's to delete
Returns status message
"""
return self.request("DELETE", "notification_contacts",
{'delcheckids': contactids}).json()['message']
def singleTest(self, host, checktype, **kwargs):
"""Performs a single test using a specified Pingdom probe against a
specified target. Please note that this method is meant to be used
sparingly, not to set up your own monitoring solution.
Provide hostname and check type, followed by any optional arguments.
Types available:
* http
* httpcustom
* tcp
* ping
* dns
* udp
* smtp
* pop3
Optional arguments:
* probeid -- Probe to use for check
Type: Integer
Default: A random probe
See newCheck() docstring for type-specific arguments
Returned structure:
{
'status' : <String> Test result status ['up, 'down']
'responsetime' : <Integer> Response time in milliseconds
'statusdesc' : <String> Short status description
'statusdesclong' : <String> Long status description
'probeid' : <Integer> Probe identifier
'probedesc' : <String> Probe description
}
"""
if checktype == 'http':
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['probeid', 'url',
'encryption', 'port', 'auth', 'shouldcontain',
'shouldnotcontain', 'postdata']:
if key.startswith('requestheader') is not True:
sys.stderr.write("'%s'" % key + ' is not a valid ' +
'argument of singleTest() for type ' +
"'http'\n")
elif checktype == 'httpcustom':
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['probeid', 'url',
'encryption', 'port', 'auth', 'additionalurls']:
sys.stderr.write("'%s'" % key + ' is not a valid ' +
'argument of singleTest() for type ' +
"'httpcustom'\n")
elif checktype == 'tcp':
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['probeid', 'port',
'stringtosend', 'stringtoexpect']:
sys.stderr.write("'%s'" % key + ' is not a valid ' +
'argument of singleTest() for type ' +
"'tcp'\n")
elif checktype == 'ping':
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['probeid']:
sys.stderr.write("'%s'" % key + ' is not a valid ' +
'argument of singleTest() for type ' +
"'ping'\n")
elif checktype == 'dns':
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['probeid', 'expectedip',
'nameserver']:
sys.stderr.write("'%s'" % key + ' is not a valid ' +
'argument of singleTest() for type ' +
"'dns'\n")
elif checktype == 'udp':
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['probeid', 'port',
'stringtosend', 'stringtoexpect']:
sys.stderr.write("'%s'" % key + ' is not a valid ' +
'argument of singleTest() for type ' +
"'udp'\n")
elif checktype == 'smtp':
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['probeid', 'port', 'auth',
'stringtoexpect', 'encryption']:
sys.stderr.write("'%s'" % key + ' is not a valid ' +
'argument of singleTest() for type ' +
"'smtp'\n")
elif checktype == 'pop3':
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['probeid', 'port',
'stringtoexpect', 'encryption']:
sys.stderr.write("'%s'" % key + ' is not a valid ' +
'argument of singleTest() for type ' +
"'pop3'\n")
elif checktype == 'imap':
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['probeid', 'port',
'stringtoexpect', 'encryption']:
sys.stderr.write("'%s'" % key + ' is not a valid ' +
'argument of singleTest() for type ' +
"'imap'\n")
else:
raise Exception("Invalid checktype in singleTest()")
parameters = {'host': host, 'type': checktype}
for key, value in kwargs.iteritems():
parameters[key] = value
checkinfo = self.request('GET', "single", parameters)
return checkinfo.json()['result']
def getSettings(self):
"""Returns all account-specific settings.
Returned structure:
{
'firstname' : <String> First name
'lastname' : <String> Last name
'company' : <String> Company
'email' : <String> Email
'phone' : <String> Phone
'phonecountryiso' : <String> Phone country ISO code
'cellphone' : <String> Cellphone
'cellphonecountryiso' : <String> Cellphone country ISO code
'address' : <String> Address line 1
'address2' : <String> Address line 2
'zip' : <String> Zip, postal code or equivalent
'location' : <String> City / location
'state' : <String> State or equivalent
'autologout' : <Boolean> Enable auto-logout
'country' :
{
'name' : <String> Country name
'iso' : <String> Country ISO-code
'countryid' : <Integer> Country identifier
}
'vatcode' : <String> For certain EU countries, VAT-code
'region' : <String> Region
'regionid' : <Integer> Region identifier, see reference
'accountcreated' : <Integer> Account creation timestamp
'timezone' :
{
'id' : <String> Timezone name
'description' : <String> Timezone description
'timezoneid' : <Integer> Timezone identifier
}
'dateformat' : <String> Date format
'timeformat' : <String> Time format
'datetimeformatid' : <Integer> Date/time format identifier
'numberformat' : <String> Number format
'numberformatexample' : <String> Example of number presentation
'numberformatid' : <Integer> Number format identifier
'publicreportscode' : <String> URL code
'settingssaved' : <Boolean> True if user has saved initial
settings in control panel
}
"""
return self.request('GET', 'settings').json()['settings']
def modifySettings(self, **kwargs):
"""Modify account-specific settings.
Returns status message for operation
Optional parameters:
* firstname -- First name
Type: String
* lastname -- Last name
Type: String
* company -- Company
Type: String
* email -- Email (Please note that your email is used for
authentication purposes such as using this API or logging into
the Pingdom Panel)
Type: String
* cellphone -- Cellphone (without country code)
(Requires cellcountrycode and cellcountryiso)
Type: String
* cellcountrycode -- Cellphone country code, for example 1 (USA)
or 46 (Sweden)
Type: Integer
* cellcountryiso -- Cellphone country ISO code, for example
US(USA) or SE (Sweden)
Type: String
* phone -- Phone (without country code) (Requires phonecountrycode
and phonecountryiso)
Type: String
* phonecountrycode -- Phone country code, for example 1 (USA)
or 46 (Sweden)
Type: Integer
* phonecountryiso -- Phone country ISO code, for example US (USA)
or SE (Sweden)
Type: String
* address -- Address line 1
Type: String
* address2 -- Address line 2
Type: String
* zip -- Zip, postal code or equivalent
Type: String
* location -- City / location
Type: String
* state -- State, province or equivalent
Type: String
* countryiso -- Country ISO code, for example US (USA)
or SE (Sweden)
Type: String
* vatcode -- For certain EU countries, VAT-code.
Example: SE123456789
Type: String
* autologout -- Enable auto-logout
Type: Boolean
* regionid -- Region identifier, for localization purposes.
0 for "Custom"/none. See the API resource "Reference" for more
information
Type: Integer
* timezoneid -- Time zone identifier. See the API resource
"Reference" for more information
Type: Integer
* datetimeformatid -- Date/time format identifier. See the API
resource "Reference" for more information
Type: Integer
* numberformatid -- Number format identifier. See the API resource
"Reference" for more information
Type: Integer
* pubrcustomdesign -- Use custom design for public reports
Type: Boolean
* pubrtextcolor -- Public reports, custom text color
(Example: FEFFFE or 99CC00)
Type: String
* pubrbackgroundcolor -- Public reports, background color
(Example: FEFFFE or 99CC00)
Type: String
* pubrlogourl -- Public reports, URL to custom logotype.
This parameter is currently disabled for public use.
(Example: stats.pingdom.com/images/logo.png)
Type: String
* pubrmonths -- Public reports, nuber of months to show
Type: String ['none', 'all', '3']
* pubrshowoverview -- Public reports, enable overview
Type: Boolean
* pubrcustomdomain -- Public reports, custom domain. Must be a DNS
CNAME with target stats.pingdom.com
Type: Boolean
"""
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['firstname', 'lastname', 'company', 'email',
'cellphone', 'cellcountrycode', 'cellcountryiso',
'phone', 'phonecountrycode', 'phonecountryiso',
'address', 'address2', 'zip', 'location', 'state',
'countryiso', 'vatcode', 'autologout', 'regionid',
'timezoneid', 'datetimeformatid', 'numberformatid',
'pubrcustomdesign', 'pubrtextcolor',
'pubrbackgroundcolor', 'pubrlogourl', 'pubrmonths',
'pubrshowoverview', 'pubrcustomdomain']:
sys.stderr.write("'%s'" % key + ' is not a valid argument ' +
'of modifySettings()\n')
return self.request('PUT', 'settings', kwargs).json()['message']
def getEmailReports(self):
"""Returns a list of PingdomEmailReport instances."""
reports = [PingdomEmailReport(self, x) for x in
self.request('GET',
'reports.email').json()['subscriptions']]
return reports
def newEmailReport(self, name, **kwargs):
"""Creates a new email report
Returns status message for operation
Optional parameters:
* checkid -- Check identifier. If omitted, this will be an
overview report
Type: Integer
* frequency -- Report frequency
Type: String ['monthly', 'weekly', 'daily']
* contactids -- Comma separated list of receiving contact
identifiers
Type: String
* additionalemails -- Comma separated list of additional receiving
emails
Type: String
"""
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['checkid', 'frequency', 'contactids',
'additionalemails']:
sys.stderr.write("'%s'" % key + ' is not a valid argument ' +
'of newEmailReport()\n')
parameters = {'name': name}
for key, value in kwargs.iteritems():
parameters[key] = value
return self.request('POST', 'reports.email',
parameters).json()['message']
def getPublicReports(self):
"""Returns a list of public (web-based) reports
Returned structure:
[
{
'checkid' : <Integer> Check identifier
'checkname' : <String> Check name
'reporturl' : <String> URL to report
},
...
]
"""
return self.request('GET', 'reports.public').json()['public']
def getSharedReports(self):
"""Returns a list of PingdomSharedReport instances"""
response = self.request('GET',
'reports.shared').json()['shared']['banners']
reports = [PingdomSharedReport(self, x) for x in response]
return reports
def newSharedReport(self, checkid, **kwargs):
"""Create a shared report (banner).
Returns status message for operation
Optional parameters:
* auto -- Automatic period (If false, requires: fromyear,
frommonth, fromday, toyear, tomonth, today)
Type: Boolean
* type -- Banner type
Type: String ['uptime', 'response']
* fromyear -- Period start: year
Type: Integer
* frommonth -- Period start: month
Type: Integer
* fromday -- Period start: day
Type: Integer
* toyear -- Period end: year
Type: Integer
* tomonth -- Period end: month
Type: Integer
* today -- Period end: day
Type: Integer
"""
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['auto', 'type', 'fromyear', 'frommonth', 'fromday',
'toyear', 'tomonth', 'today', 'sharedtype']:
sys.stderr.write("'%s'" % key + ' is not a valid argument ' +
'of newSharedReport()\n')
parameters = {'checkid': checkid, 'sharedtype': 'banner'}
for key, value in kwargs.iteritems():
parameters[key] = value
return self.request('POST', 'reports.shared',
parameters).json()['message']
|
philipjewell/PingdomLib | setup.py | from setuptools import setup
setup(
name='PingdomLib',
version='2.0.3',
author='<NAME>',
author_email='<EMAIL>',
packages=['pingdomlib'],
url='https://github.com/KennethWilke/PingdomLib',
license='ISC license',
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: ISC License (ISCL)',
'Operating System :: OS Independent',
'Topic :: System :: Monitoring'],
description='A documented python library to consume the full pingdom API',
long_description=open('README.txt').read(),
install_requires=[
"requests >= 2.2.1"
],
)
|
purplewish07/django-vue-admin-zhtw | server/apps/task_system/migrations/0004_auto_20220105_0908.py | <filename>server/apps/task_system/migrations/0004_auto_20220105_0908.py
# Generated by Django 3.2.6 on 2022-01-05 01:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('task_system', '0003_auto_20220104_1642'),
]
operations = [
migrations.CreateModel(
name='Class',
fields=[
('ID', models.AutoField(primary_key=True, serialize=False)),
('ClassID', models.PositiveIntegerField()),
('ClassName', models.TextField(blank=True, null=True)),
('Color', models.CharField(blank=True, max_length=7, null=True)),
],
options={
'db_table': 'class',
},
),
migrations.CreateModel(
name='Machine',
fields=[
('ID', models.AutoField(primary_key=True, serialize=False)),
('MachineID', models.PositiveIntegerField()),
('MachineName', models.TextField(blank=True, null=True)),
('Color', models.CharField(blank=True, max_length=7, null=True)),
],
options={
'db_table': 'machine',
},
),
migrations.RenameField(
model_name='mechanical_hours',
old_name='Subjuct',
new_name='Subject',
),
migrations.AlterField(
model_name='mechanical_hours',
name='end_time',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='mechanical_hours',
name='start_time',
field=models.DateTimeField(blank=True, null=True),
),
]
|
purplewish07/django-vue-admin-zhtw | server/apps/warehouse_management/migrations/0001_initial.py | # Generated by Django 3.2.6 on 2022-03-07 07:11
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Storage_list',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(default=django.utils.timezone.now, help_text='創建時間', verbose_name='創建時間')),
('is_deleted', models.BooleanField(default=False, help_text='刪除標記', verbose_name='刪除標記')),
('name', models.CharField(max_length=10, unique=True, verbose_name='批號')),
('storage_spaces', models.CharField(blank=True, max_length=5, null=True, verbose_name='儲位')),
('update_time', models.DateTimeField(blank=True, default=django.utils.timezone.now, help_text='更新時間', null=True, verbose_name='更新時間')),
],
options={
'db_table': 'warehouse_management',
},
),
]
|
purplewish07/django-vue-admin-zhtw | server/apps/system/apps.py | from django.apps import AppConfig
class SystemConfig(AppConfig):
name = 'apps.system'
verbose_name = '系統管理'
def ready(self):
import apps.system.signals |
purplewish07/django-vue-admin-zhtw | server/apps/tag_system/migrations/0001_initial.py | <reponame>purplewish07/django-vue-admin-zhtw
# Generated by Django 3.2.6 on 2022-01-24 07:13
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('tagid', models.CharField(max_length=10, unique=True)),
],
options={
'db_table': 'tag',
},
),
]
|
purplewish07/django-vue-admin-zhtw | server/apps/tag_system/models.py | <reponame>purplewish07/django-vue-admin-zhtw<filename>server/apps/tag_system/models.py
from django.db import models
# Create your models here.
class Tag(models.Model):
id = models.AutoField(primary_key=True)
tagid = models.CharField(max_length=10, unique=True)
class Meta:
db_table = "tag"
|
purplewish07/django-vue-admin-zhtw | server/apps/warehouse_management/views.py | from django.shortcuts import render
from rest_framework.viewsets import ModelViewSet
from .models import Storage_list
from .serializers import Storage_listSerializer
from rest_framework.decorators import action
from django.utils import timezone
from rest_framework.response import Response
from rest_framework import status
# Create your views here.
class Storage_listViewSet(ModelViewSet):
"""
儲位清單-增刪改查
"""
perms_map = {'get': '*', 'post': 'work_create',
'put': '*', 'delete': 'work_delete'}
queryset = Storage_list.objects.all()
serializer_class = Storage_listSerializer
pagination_class = None
search_fields = ['name']
ordering_fields = ['pk']
ordering = ['pk']
# def get_serializer(self, *args, **kwargs):
# serializer_class = self.get_serializer_class()
# kwargs.setdefault('context', self.get_serializer_context())
# if isinstance(self.request.data, list):
# return serializer_class(many=True, *args, **kwargs)
# else:
# return serializer_class(*args, **kwargs)
@action(methods=['post'], detail=False)
def create_or_update(self, request, *args, **kwargs):
items=[]
update_time=timezone.localtime()
for res in request.data:
if 9<=len(res['name'])<=12:
items.append(Storage_list(name=res['name'],storage_spaces=res['storage_spaces'],user=res['user'],update_time=update_time))
else:
res.update(error='format error')
Storage_list.objects.bulk_update_or_create(items, ['storage_spaces','update_time','user'], match_field='name')
return Response(request.data, status=status.HTTP_200_OK) |
purplewish07/django-vue-admin-zhtw | server/apps/tag_system/urls.py | <gh_stars>0
from django.urls import path, include
from .views import Tag_ViewSet
from rest_framework import routers
router = routers.DefaultRouter()
router.register('tag', Tag_ViewSet, basename="tag")
urlpatterns = [
path('', include(router.urls)),
]
|
purplewish07/django-vue-admin-zhtw | server/apps/task_system/migrations/0001_initial.py | <filename>server/apps/task_system/migrations/0001_initial.py
# Generated by Django 3.2.6 on 2021-12-27 07:46
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Test',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('OrderID', models.TextField()),
('CustomerID', models.TextField()),
('EmployeeID', models.TextField()),
('OrderDate', models.DateTimeField(blank=True, default=django.utils.timezone.now, help_text='開始時間', null=True, verbose_name='開始時間')),
('ShipName', models.TextField()),
('ShipCity', models.TextField()),
('ShipAddress', models.TextField()),
('ShipRegion', models.TextField()),
('ShipPostalCode', models.TextField()),
('ShipCountry', models.TextField()),
('Freight', models.TextField()),
('Verified', models.TextField()),
],
options={
'db_table': 'test',
},
),
]
|
purplewish07/django-vue-admin-zhtw | server/apps/report_system/models.py | from django.db import models
from utils.model import SoftModel, BaseModel
from django.utils import timezone
from datetime import datetime
# Create your models here.
class Work_order(BaseModel):
name = models.CharField('製令', max_length=10, unique=True)
status = models.CharField('狀態', max_length=5, null=True, blank=True)
start_time =models.DateTimeField(default=timezone.now, verbose_name='開始時間', help_text='開始時間', null=True, blank=True)
end_time =models.DateTimeField(default=timezone.now, verbose_name='結束時間', help_text='結束時間', null=True, blank=True)
class Meta:
verbose_name = '製令'
verbose_name_plural = verbose_name
def __str__(self):
return self.name |
purplewish07/django-vue-admin-zhtw | server/apps/warehouse_management/serializers.py | <reponame>purplewish07/django-vue-admin-zhtw
from rest_framework import serializers
from .models import Storage_list
# from django.utils import timezone
class Storage_listSerializer(serializers.ModelSerializer):
"""
儲位清單序列化
"""
class Meta:
model = Storage_list
fields = '__all__'
extra_kwargs = {'storage_spaces': {'required': True}}
# def create(self, validated_data):
# Storage_list, created = Storage_list.objects.update_or_create(
# name=validated_data.get('name', None), storage_spaces=validated_data.get('storage_spaces', None),
# defaults={'name': name,'storage_spaces': storage_spaces})
# return Storage_list
# def create_or_update(self, validated_data):
# print(0)
# print(validated_data)
# name=validated_data.get('name', None)
# storage_spaces=validated_data.get('storage_spaces', None)
# print(name)
# print(storage_spaces)
# update_time=timezone.localtime()
# print(update_time)
# items=[Storage_list(name=name,storage_spaces=storage_spaces,update_time=update_time)]
# print(1)
# Storage_list.objects.bulk_update_or_create(items, ['storage_spaces','update_time'], match_field='name')
# print(2)
# # return Storage_list.objects.create(**validated_data)
# return 0
|
purplewish07/django-vue-admin-zhtw | server/apps/report_system/admin.py | from django.contrib import admin
from .models import Work_order
# Register your models here.
admin.site.register(Work_order) |
purplewish07/django-vue-admin-zhtw | server/apps/task_system/urls.py | <gh_stars>0
from django.urls import path, include
# from django.conf.urls import include, url
# from .views import TestViewSet
from .views import *
from rest_framework import routers
router = routers.DefaultRouter()
router.register('task_list', TestViewSet, basename="task_list")
router.register('Machine', MachineViewSet, basename="Machine")
router.register('Class', ClassViewSet, basename="Class")
router.register('Mechanical_hours', Mechanical_hoursViewSet, basename="Mechanical_hours")
urlpatterns = [
path('', include(router.urls)),
# url(r'^task_list/$', TestViewSet),
]
|
purplewish07/django-vue-admin-zhtw | server/apps/report_system/urls.py | <gh_stars>0
from django.urls import path, include
from .views import Work_orderViewSet
from rest_framework import routers
router = routers.DefaultRouter()
router.register('work_list', Work_orderViewSet, basename="work_list")
urlpatterns = [
path('', include(router.urls)),
]
|
purplewish07/django-vue-admin-zhtw | server/apps/warehouse_management/urls.py | <filename>server/apps/warehouse_management/urls.py
from django.urls import path, include
from .views import Storage_listViewSet
from rest_framework import routers
router = routers.DefaultRouter()
router.register('storage_list', Storage_listViewSet, basename="Storage_list")
urlpatterns = [
path('', include(router.urls)),
]
|
purplewish07/django-vue-admin-zhtw | server/apps/report_system/serializers.py | <reponame>purplewish07/django-vue-admin-zhtw<filename>server/apps/report_system/serializers.py
from rest_framework import serializers
from .models import Work_order
class Work_orderSerializer(serializers.ModelSerializer):
"""
製令序列化
"""
class Meta:
model = Work_order
fields = '__all__' |
purplewish07/django-vue-admin-zhtw | server/apps/task_system/migrations/0003_auto_20220104_1642.py | # Generated by Django 3.2.6 on 2022-01-04 08:42
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('task_system', '0002_auto_20211227_1604'),
]
operations = [
# migrations.CreateModel(
# name='Class',
# fields=[
# ('ID', models.AutoField(primary_key=True, serialize=False)),
# ('ClassID', models.PositiveIntegerField()),
# ('ClassName', models.TextField(blank=True, null=True)),
# ('Color', models.CharField(blank=True, max_length=7, null=True)),
# ],
# options={
# 'db_table': 'class',
# },
# ),
# migrations.CreateModel(
# name='Machine',
# fields=[
# ('ID', models.AutoField(primary_key=True, serialize=False)),
# ('MachineID', models.PositiveIntegerField()),
# ('MachineName', models.TextField(blank=True, null=True)),
# ('Color', models.CharField(blank=True, max_length=7, null=True)),
# ],
# options={
# 'db_table': 'machine',
# },
# ),
migrations.CreateModel(
name='Mechanical_hours',
fields=[
('ID', models.AutoField(primary_key=True, serialize=False)),
('start_time', models.DateTimeField(default=datetime.datetime(2022, 1, 4, 8, 0, tzinfo=utc))),
('end_time', models.DateTimeField(default=datetime.datetime(2022, 1, 4, 17, 0, tzinfo=utc))),
('Subjuct', models.TextField(blank=True, null=True)),
('MachineID', models.JSONField()),
('ClassID', models.JSONField()),
],
options={
'db_table': 'mechanical_hours',
},
),
migrations.AlterField(
model_name='test',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
]
|
purplewish07/django-vue-admin-zhtw | server/apps/task_system/views2.py | from django.shortcuts import render
from .models import Test
from odata_query.django import apply_odata_query
import json
from django.http import JsonResponse
# Create your views here.
def TestViewSet(odata_query="id eq 2"):
orm_query = Test.objects # This can be a Manager or a QuerySet.
# odata_query = "name eq 'test'" # This will usually come from a query string parameter.
query = apply_odata_query(orm_query, odata_query)
results = query.all()
# return JsonResponse(results) |
purplewish07/django-vue-admin-zhtw | server/apps/tag_system/views.py | <reponame>purplewish07/django-vue-admin-zhtw<gh_stars>0
from django.shortcuts import render
from rest_framework.viewsets import ModelViewSet
from .models import Tag
from .serializers import Tag_Serializer
# Create your views here.
class Tag_ViewSet(ModelViewSet):
"""
Tag-增刪改查
"""
perms_map = {'get': '*', 'post': 'work_create',
'put': '*', 'delete': 'work_delete'}
queryset = Tag.objects.all()
serializer_class = Tag_Serializer
pagination_class = None
search_fields = ['tagid']
ordering_fields = ['pk']
ordering = ['pk']
|
purplewish07/django-vue-admin-zhtw | server/apps/warehouse_management/migrations/0002_storage_list_user.py | # Generated by Django 3.2.6 on 2022-03-22 08:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('warehouse_management', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='storage_list',
name='user',
field=models.CharField(blank=True, max_length=5, null=True, verbose_name='確認者'),
),
]
|
purplewish07/django-vue-admin-zhtw | server/apps/tag_system/apps.py | from django.apps import AppConfig
class TagSystemConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'apps.tag_system'
|
purplewish07/django-vue-admin-zhtw | server/apps/task_system/views.py | from django.shortcuts import render
# from .models import Test
from .models import *
# from .serializers import Test_Serializer
from .serializers import *
from rest_framework import viewsets
from rest_framework.response import Response
import json
from django.http import JsonResponse
from rest_framework.permissions import IsAuthenticated
# Create your views here.
class TestViewSet(viewsets.ModelViewSet):
queryset = Test.objects.all()
serializer_class = Test_Serializer
perms_map = {'get': '*', 'post': 'work_create',
'put': 'work_update', 'delete': 'work_delete'}
search_fields = ['name']
ordering_fields = ['pk']
ordering = ['pk']
# def get_queryset(self):
# inlinecount = self.request.query_params.get('$inlinecount')
# # return self.queryset
# result = list(self.queryset.values())
# count = len(result)
# return JsonResponse({result})
def retrieve(self, request, *args, **kwargs):
# instance = self.get_object()
# serializer = self.get_serializer(instance)
# data = serializer.data
# here you can manipulate your data response
# return Response(data)
result = list(self.queryset.values())
count = len(result)
# return JsonResponse({"result":result,"count":count})
return JsonResponse({result})
def list(self, request, *args, **kwargs):
# instance = self.get_object()
# serializer = self.get_serializer(instance)
# data = serializer.data
# here you can manipulate your data response
# return Response(data)
result = list(self.queryset.values())
count = len(result)
return JsonResponse({"result":result,"count":count})
# return JsonResponse(result,safe=False)
# def retrieve(self, request, *args, **kwargs):
# return Response({"result":[{'something': 'my custom JSON'}],"count":1})
# def list(self, request, *args, **kwargs):
# return Response({"result":[{'something': 'my custom JSON'}],"count":1})
# def put(self, request, format=None):
# pass
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
# def delete(self, request, id, format=None):
# pass
class ClassViewSet(viewsets.ModelViewSet):
"""
機械類別-增刪改查
"""
# perms_map = {'get': '*', 'post': 'work_create',
# 'put': '*', 'delete': 'work_delete'}
queryset = Class.objects.all()
serializer_class = Class_Serializer
def list(self, request, *args, **kwargs):
result = list(self.queryset.values())
count = len(result)
return JsonResponse({"result":result,"count":count})
class MachineViewSet(viewsets.ModelViewSet):
"""
機械類別-增刪改查
"""
# perms_map = {'get': '*', 'post': 'work_create',
# 'put': '*', 'delete': 'work_delete'}
queryset = Machine.objects.all()
serializer_class = Machine_Serializer
def list(self, request, *args, **kwargs):
result = list(self.queryset.values())
count = len(result)
return JsonResponse({"result":result,"count":count})
class Mechanical_hoursViewSet(viewsets.ModelViewSet):
"""
機械類別-增刪改查
"""
# perms_map = {'get': '*', 'post': 'work_create',
# 'put': '*', 'delete': 'work_delete'}
queryset = Mechanical_hours.objects.all()
serializer_class = Mechanical_hours_Serializer
def list(self, request, *args, **kwargs):
result = list(self.queryset.values())
count = len(result)
return JsonResponse({"result":result,"count":count})
# return JsonResponse(result,safe=False) |
purplewish07/django-vue-admin-zhtw | server/apps/report_system/views.py | from django.shortcuts import render
from rest_framework.viewsets import ModelViewSet
from .models import Work_order
from .serializers import Work_orderSerializer
# Create your views here.
class Work_orderViewSet(ModelViewSet):
"""
製令-增刪改查
"""
perms_map = {'get': '*', 'post': 'work_create',
'put': '*', 'delete': 'work_delete'}
queryset = Work_order.objects.all()
serializer_class = Work_orderSerializer
pagination_class = None
search_fields = ['name']
ordering_fields = ['pk']
ordering = ['pk']
|
purplewish07/django-vue-admin-zhtw | server/apps/report_system/migrations/0004_auto_20211130_1005.py | <reponame>purplewish07/django-vue-admin-zhtw
# Generated by Django 3.2.6 on 2021-11-30 02:05
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('report_system', '0003_auto_20211130_0951'),
]
operations = [
migrations.AlterField(
model_name='work_order',
name='end_time',
field=models.DateTimeField(blank=True, default=django.utils.timezone.now, help_text='結束時間', null=True, verbose_name='結束時間'),
),
migrations.AlterField(
model_name='work_order',
name='start_time',
field=models.DateTimeField(blank=True, default=django.utils.timezone.now, help_text='開始時間', null=True, verbose_name='開始時間'),
),
]
|
purplewish07/django-vue-admin-zhtw | server/apps/report_system/migrations/0003_auto_20211130_0951.py | # Generated by Django 3.2.6 on 2021-11-30 01:51
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('report_system', '0002_work_order'),
]
operations = [
migrations.AlterField(
model_name='work_order',
name='end_time',
field=models.DateTimeField(blank=True, default=datetime.datetime.now, help_text='結束時間', null=True, verbose_name='結束時間'),
),
migrations.AlterField(
model_name='work_order',
name='start_time',
field=models.DateTimeField(blank=True, default=datetime.datetime.now, help_text='開始時間', null=True, verbose_name='開始時間'),
),
]
|
purplewish07/django-vue-admin-zhtw | server/apps/task_system/serializers.py | <reponame>purplewish07/django-vue-admin-zhtw
from rest_framework import serializers
# from .models import Test
from .models import *
class Test_Serializer(serializers.ModelSerializer):
"""
測試序列化
"""
class Meta:
model = Test
# fields = '__all__'
fields = (
"OrderID",
"CustomerID",
"EmployeeID",
"OrderDate",
"ShipName",
"ShipCity",
"ShipAddress",
"ShipRegion",
"ShipPostalCode",
"ShipCountry",
"Freight",
"Verified")
class Class_Serializer(serializers.ModelSerializer):
"""
機械類別序列化
"""
class Meta:
model = Class
fields = '__all__'
class Machine_Serializer(serializers.ModelSerializer):
"""
機台序列化
"""
class Meta:
model = Machine
fields = '__all__'
class Mechanical_hours_Serializer(serializers.ModelSerializer):
"""
機械工時序列化
"""
class Meta:
model = Mechanical_hours
fields = '__all__'
|
purplewish07/django-vue-admin-zhtw | venv/Lib/site-packages/timezone_field/__init__.py | <reponame>purplewish07/django-vue-admin-zhtw
from timezone_field.fields import TimeZoneField
from timezone_field.forms import TimeZoneFormField
__version__ = '4.2.1'
__all__ = ['TimeZoneField', 'TimeZoneFormField']
|
purplewish07/django-vue-admin-zhtw | server/server/settings_dev.py | from .settings import *
import pymysql
pymysql.install_as_MySQLdb()
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mes',
'USER': 'usr',
'PASSWORD': '<PASSWORD>',
'HOST': '192.168.2.3',
'PORT': '3306',
'OPTIONS': {
'sql_mode': 'traditional',
}
}
#'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
#}
}
# celery配置
CELERY_BEAT_SCHEDULER = 'django_celery_beat.schedulers:DatabaseScheduler' |
purplewish07/django-vue-admin-zhtw | server/apps/warehouse_management/models.py | <reponame>purplewish07/django-vue-admin-zhtw
from django.db import models
from utils.model import SoftModel, BaseModel
from django.utils import timezone
from datetime import datetime
from bulk_update_or_create import BulkUpdateOrCreateQuerySet
# Create your models here.
class Storage_list(BaseModel):
objects = BulkUpdateOrCreateQuerySet.as_manager()
name = models.CharField('批號', max_length=10, unique=True)
storage_spaces = models.CharField('儲位', max_length=5, null=True, blank=True)
user = models.CharField('確認者', max_length=5, null=True, blank=True)
update_time =models.DateTimeField(default=timezone.now, verbose_name='更新時間', help_text='更新時間', null=True, blank=True)
class Meta:
db_table = "warehouse_management"
|
purplewish07/django-vue-admin-zhtw | server/apps/report_system/apps.py | from django.apps import AppConfig
class ReportSystemConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'apps.report_system'
verbose_name = '報工系統' |
purplewish07/django-vue-admin-zhtw | server/apps/system/forms.py | <gh_stars>0
from django.contrib.auth.forms import AuthenticationForm
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5
import os
class RsaAuthenticationForm(AuthenticationForm):
def clean(self):
module_dir = os.path.dirname(__file__) # get current directory
publicKey = RSA.import_key(open(module_dir + "\..\..\..\ssl\public.pem").read())
cipherRSA = PKCS1_v1_5.new(publicKey)
sentinel = None
# 加密
# username = cipherRSA.encrypt(self.cleaned_data.get('username'),sentinel)
# password = cipherRSA.encrypt(self.cleaned_data.get('password'),sentinel)
#未加密
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
print('encode:',username,password) #驗證加密值
if username is not None and password:
self.user_cache = authenticate(self.request, username=username, password=password)
if self.user_cache is None:
raise self.get_invalid_login_error()
else:
self.confirm_login_allowed(self.user_cache)
return self.cleaned_data |
purplewish07/django-vue-admin-zhtw | server/apps/task_system/migrations/0002_auto_20211227_1604.py | # Generated by Django 3.2.6 on 2021-12-27 08:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('task_system', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='test',
name='CustomerID',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='test',
name='EmployeeID',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='test',
name='Freight',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='test',
name='ShipAddress',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='test',
name='ShipCity',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='test',
name='ShipCountry',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='test',
name='ShipName',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='test',
name='ShipPostalCode',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='test',
name='ShipRegion',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='test',
name='Verified',
field=models.TextField(blank=True, null=True),
),
]
|
purplewish07/django-vue-admin-zhtw | server/utils/serializer.py | <gh_stars>0
from rest_framework import serializers
# class TreeSerializer(serializers.Serializer):
# id = serializers.IntegerField()
# label = serializers.CharField(max_length=20, source='name')
# pid = serializers.PrimaryKeyRelatedField(read_only=True)
# class TreeAPIView(ListAPIView):
# """
# 自定義樹結構View
# """
# serializer_class = TreeSerializer
# def list(self, request, *args, **kwargs):
# queryset = self.filter_queryset(self.get_queryset())
# page = self.paginate_queryset(queryset)
# serializer = self.get_serializer(queryset, many=True)
# tree_dict = {}
# tree_data = []
# try:
# for item in serializer.data:
# tree_dict[item['id']] = item
# for i in tree_dict:
# if tree_dict[i]['pid']:
# pid = tree_dict[i]['pid']
# parent = tree_dict[pid]
# parent.setdefault('children', []).append(tree_dict[i])
# else:
# tree_data.append(tree_dict[i])
# results = tree_data
# except KeyError:
# results = serializer.data
# if page is not None:
# return self.get_paginated_response(results)
# return Response(results)
|
purplewish07/django-vue-admin-zhtw | server/utils/model.py | from django.db import models
import django.utils.timezone as timezone
from django.db.models.query import QuerySet
# 自定義軟刪除查詢基類
class SoftDeletableQuerySetMixin(object):
'''
QuerySet for SoftDeletableModel. Instead of removing instance sets
its ``is_deleted`` field to True.
'''
def delete(self, soft=True):
'''
Soft delete objects from queryset (set their ``is_deleted``
field to True)
'''
if soft:
self.update(is_deleted=True)
else:
return super(SoftDeletableQuerySetMixin, self).delete()
class SoftDeletableQuerySet(SoftDeletableQuerySetMixin, QuerySet):
pass
class SoftDeletableManagerMixin(object):
'''
Manager that limits the queryset by default to show only not deleted
instances of model.
'''
_queryset_class = SoftDeletableQuerySet
def get_queryset(self, all=False):
'''
Return queryset limited to not deleted entries.
'''
kwargs = {'model': self.model, 'using': self._db}
if hasattr(self, '_hints'):
kwargs['hints'] = self._hints
if all:
return self._queryset_class(**kwargs)
return self._queryset_class(**kwargs).filter(is_deleted=False)
class SoftDeletableManager(SoftDeletableManagerMixin, models.Manager):
pass
class BaseModel(models.Model):
"""
基本表
"""
create_time = models.DateTimeField(
default=timezone.now, verbose_name='創建時間', help_text='創建時間')
update_time = models.DateTimeField(
auto_now=True, verbose_name='修改時間', help_text='修改時間')
is_deleted = models.BooleanField(
default=False, verbose_name='刪除標記', help_text='刪除標記')
class Meta:
abstract = True
class SoftModel(BaseModel):
"""
軟刪除基本表
"""
class Meta:
abstract = True
objects = SoftDeletableManager()
def delete(self, using=None, soft=True, *args, **kwargs):
'''
這裡需要真刪除的話soft=False即可
'''
if soft:
self.is_deleted = True
self.save(using=using)
else:
return super(SoftModel, self).delete(using=using, *args, **kwargs)
|
purplewish07/django-vue-admin-zhtw | server/apps/task_system/apps.py | <reponame>purplewish07/django-vue-admin-zhtw
from django.apps import AppConfig
class TaskSystemConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'apps.task_system'
|
purplewish07/django-vue-admin-zhtw | server/apps/task_system/models.py | from django.db import models
from django.utils import timezone
from datetime import datetime
# Create your models here.
class Test(models.Model):
id = models.AutoField(primary_key=True)
OrderID = models.TextField()
CustomerID = models.TextField(null=True, blank=True)
EmployeeID = models.TextField(null=True, blank=True)
OrderDate =models.DateTimeField(default=timezone.now, verbose_name='開始時間', help_text='開始時間', null=True, blank=True)
ShipName = models.TextField(null=True, blank=True)
ShipCity = models.TextField(null=True, blank=True)
ShipAddress = models.TextField(null=True, blank=True)
ShipRegion = models.TextField(null=True, blank=True)
ShipPostalCode = models.TextField(null=True, blank=True)
ShipCountry = models.TextField(null=True, blank=True)
Freight = models.TextField(null=True, blank=True)
Verified = models.TextField(null=True, blank=True)
class Meta:
db_table = "test"
class Machine(models.Model):
ID = models.AutoField(primary_key=True)
MachineID = models.PositiveIntegerField()
MachineName = models.TextField(null=True, blank=True)
Color = models.CharField(max_length=7,null=True, blank=True)
class Meta:
db_table = "machine"
class Class(models.Model):
ID = models.AutoField(primary_key=True)
ClassID = models.PositiveIntegerField()
ClassName = models.TextField(null=True, blank=True)
Color = models.CharField(max_length=7,null=True, blank=True)
class Meta:
db_table = "class"
class Mechanical_hours(models.Model):
ID = models.AutoField(primary_key=True)
start_time = models.DateTimeField(null=True, blank=True)
end_time = models.DateTimeField(null=True, blank=True)
Subject = models.TextField(null=True, blank=True)
MachineID = models.JSONField()
ClassID = models.JSONField()
class Meta:
db_table = "mechanical_hours"
|
purplewish07/django-vue-admin-zhtw | server/apps/report_system/migrations/0002_work_order.py | <reponame>purplewish07/django-vue-admin-zhtw<gh_stars>0
# Generated by Django 3.2.6 on 2021-11-30 01:33
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('report_system', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Work_order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(default=django.utils.timezone.now, help_text='創建時間', verbose_name='創建時間')),
('update_time', models.DateTimeField(auto_now=True, help_text='修改時間', verbose_name='修改時間')),
('is_deleted', models.BooleanField(default=False, help_text='刪除標記', verbose_name='刪除標記')),
('name', models.CharField(max_length=10, unique=True, verbose_name='製令')),
('status', models.CharField(blank=True, max_length=5, null=True, verbose_name='狀態')),
('start_time', models.DateTimeField(blank=True, default=django.utils.timezone.now, help_text='開始時間', null=True, verbose_name='開始時間')),
('end_time', models.DateTimeField(blank=True, default=django.utils.timezone.now, help_text='結束時間', null=True, verbose_name='結束時間')),
],
options={
'verbose_name': '製令',
'verbose_name_plural': '製令',
},
),
]
|
purplewish07/django-vue-admin-zhtw | server/apps/tag_system/serializers.py | from rest_framework import serializers
# from .models import Test
from .models import *
class Tag_Serializer(serializers.ModelSerializer):
"""
測試序列化
"""
class Meta:
model = Tag
fields = '__all__' |
purplewish07/django-vue-admin-zhtw | server/apps/system/migrations/0003_auto_20211124_1554.py | # Generated by Django 3.2.6 on 2021-11-24 07:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('system', '0002_auto_20210718_0918'),
]
operations = [
migrations.AlterField(
model_name='dict',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='dicttype',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='file',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='historicaldict',
name='id',
field=models.BigIntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID'),
),
migrations.AlterField(
model_name='organization',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='permission',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='position',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='role',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='user',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
AchintyaVatsraj/pywebscan | pywebscan.py | <reponame>AchintyaVatsraj/pywebscan<gh_stars>1-10
import sys
import re
import urllib3
import functools
from concurrent.futures import ThreadPoolExecutor
# PARAMS - could be changed to be CLI arguments
TIMEOUT = 15 # Connect/read timeout
RETRIES = 1 # Connect/read retries that are permitted
REDIRECTS = 0 # How many redirects to follow
OUTPUT_STATUS_CODES = [200, 403] # Status codes to track in results
ASSUME_SCHEME = 'https://' # Scheme to assume when none is provided
# The following controls:
# Connection pool #
# Max simultaneous connections
# Number of Python threads
# Essentially, how many requests can be active at once
# Be careful when tuning this
THREADS = 10
# usage and arg validation
if len(sys.argv) != 3:
print('-- Usage:')
print('pywebscan.py [https://example.com | 192.168.1.1 | hosts.txt] paths.txt')
print('-- Notes:')
print('Protocol must be provided when targeting a single hostname')
exit()
# turn off output buffering so we see progressive updates
print = functools.partial(print, flush=True)
# add trailing slash and protocol where needed
def formatHost(host):
if not re.search('^https?:\\/\\/', host): # add scheme if needed
host = ASSUME_SCHEME + host
if host[-1] != '/': # add trailing slash if needed
host += '/'
return host
# request a url and return a (url, status code) tuple
def request(url):
try:
response = http.request('GET', url)
print(url, response.status)
return (url, response.status)
except Exception: # SSL error, timeout, host is down, firewall block, etc.
print(url, 'ERROR')
return (url, None)
# parse hosts
hosts = []
# hosts as an argument (IP or hostname)
if re.search('^([0-9]{1,3}\\.){3}[0-9]{1,3}$', sys.argv[1]) \
or re.search('^https?:\\/\\/', sys.argv[1]):
hosts.append(formatHost(sys.argv[1]))
else: # hosts from a file
fp = open(sys.argv[1], 'r')
hosts = [formatHost(line.strip()) for line in fp if len(line.strip()) > 0]
fp.close()
# parse paths
fp = open(sys.argv[2], 'r')
paths = [line.strip().lstrip('/') for line in fp if len(line.strip()) > 0] # strip leading slash
fp.close()
# initialize our http object
timeout = urllib3.util.Timeout(connect=TIMEOUT, read=TIMEOUT)
retries = urllib3.util.Retry(connect=RETRIES, read=RETRIES, redirect=REDIRECTS)
http = urllib3.PoolManager(
retries=retries,
timeout=timeout,
num_pools=THREADS,
maxsize=THREADS,
block=True
)
# thread and execute the scan
print(f'Scanning {len(hosts)} host(s) for {len(paths)} path(s) - {len(hosts) * len(paths)} requests total...\n')
print('------ REQUESTS ------\n')
urls = [host + path for host in hosts for path in paths]
with ThreadPoolExecutor(max_workers=THREADS) as executor:
results = executor.map(request, urls)
executor.shutdown(wait=True)
# print our results
print('\n------ RESULTS ------\n')
results = list(results) # convert from generator
pathNum = len(paths)
for i, host in enumerate(hosts):
# group our results by host by slicing since order is preserved
group = results[(i * pathNum):(i * pathNum + pathNum)]
# filter for desired status codes
filtered = [result for result in group if result[1] in OUTPUT_STATUS_CODES]
# output
print(host)
print('---')
for url, status in filtered:
print(url, status)
if not filtered:
print('no results')
print()
print("------ SCAN COMPLETE ------\n")
|
farhadvaseghi/Prime-Numbers-in-an-Interval | main.py | import math
a = int(input("pleas enter the first number of you'r interval "))
b = int(input("pleas enter the second number of you'r interval "))
def is_prime(n):
i=2
prime = True
while i<=math.floor(math.sqrt(n)):
if n%i==0:
prime = False
break
else:
i+=1
return prime
prime_list=[]
for i in range(a,b+1):
if is_prime(i):
prime_list.append(i)
print("[INFO] list of prime numbers in range {}-{} is: ".format(a,b), prime_list)
|
guoshijiang/we_guitar | blog/models.py | <gh_stars>1-10
#encoding=utf-8
import pytz
from django.conf import settings
from django.db import models
from django.contrib.auth.models import User
from DjangoUeditor.models import UEditorField
from common.models import BaseModel
from django.contrib.auth.models import User
tz = pytz.timezone(settings.TIME_ZONE)
class Banner(BaseModel):
title = models.CharField(max_length=200, default='', verbose_name='标题')
img = models.ImageField(upload_to='banner/', verbose_name='轮播图')
url = models.URLField(max_length=100, verbose_name='图片链接')
active = models.CharField(max_length=250, default='', verbose_name='图片状态')
is_active = models.BooleanField(default=True, verbose_name='是否是有效')
def __str__(self):
return self.title
class Meta:
verbose_name = '轮播图'
verbose_name_plural = '轮播图'
def as_dict(self):
return {
'id': self.id,
'text_info': self.title,
'img': str(self.img),
'link_url': self.url,
'is_active': self.is_active,
'uuid': self.uuid,
'created_at': self.created_at,
'updated_at': self.updated_at
}
class Tag(BaseModel):
name = models.CharField(max_length=100, verbose_name='标签')
is_active = models.BooleanField(default=True, verbose_name='是否有效')
class Meta:
verbose_name = '标签表'
verbose_name_plural = '标签表'
def __str__(self):
return self.name
def as_dict(self):
return {
'id': self.id,
'name': self.name,
'uuid': self.uuid,
'created_at': self.created_at,
'updated_at': self.updated_at
}
class Category(BaseModel):
name = models.CharField('文章分类', max_length=100)
icon = models.ImageField(upload_to='cat/%Y/%m/%d/', blank=True, null=True, verbose_name='分类的Icon')
is_active = models.BooleanField('是否是有效', default=True)
class Meta:
verbose_name = '文章分类'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
def as_dict(self):
return {
'id': self.id,
'name': self.name,
'icon': self.icon,
'uuid': self.uuid,
'created_at': self.created_at,
'updated_at': self.updated_at
}
class Article(BaseModel):
title = models.CharField(max_length=70, verbose_name='标题')
user = models.ForeignKey(
User, related_name="article_user",
null=True, blank=True, on_delete=models.CASCADE, verbose_name='作者'
)
excerpt = models.TextField(max_length=200, default='', verbose_name='摘要')
tags = models.ManyToManyField(Tag, blank=True, null=True, verbose_name='标签',)
category = models.ForeignKey(
Category, related_name="article_cat",
on_delete=models.DO_NOTHING, blank=True, null=True, verbose_name='分类'
)
img = models.ImageField(
upload_to='article/%Y/%m/%d/', blank=True, null=True, verbose_name='文章图片'
)
body = UEditorField(
width=800, height=500,
toolbars="full", imagePath="upimg/", filePath="upfile/",
upload_settings={"imageMaxSize": 1204000},
settings={}, command=None, blank=True, verbose_name='内容'
)
views = models.PositiveIntegerField(default=0, verbose_name='阅读量')
is_active = models.BooleanField(default=True, verbose_name='是否有效')
class Meta:
verbose_name = '文章'
verbose_name_plural = '文章'
def __str__(self):
return self.title
def return_dict(self):
return {
'id': self.id,
'title': self.title,
'excerpt': self.excerpt,
'img': str(self.img),
'created_at': self.created_at.astimezone(tz).strftime('%Y-%m-%d %H:%M'),
'updated_at': self.updated_at.astimezone(tz).strftime('%Y-%m-%d %H:%M')
}
|
guoshijiang/we_guitar | blog/admin.py | #encoding=utf-8
from django.contrib import admin
from blog.models import (
Banner, Category, Article, Tag
)
@admin.register(Banner)
class BannerAdmin(admin.ModelAdmin):
list_display = (
'id', 'title', 'img', 'url', 'is_active'
)
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
@admin.register(Article)
class ArticleAdmin(admin.ModelAdmin):
list_display = ('id', 'category', 'title', 'views', 'created_at')
list_per_page = 50
ordering = ('-created_at',)
list_display_links = ('id', 'title') |
guoshijiang/we_guitar | blog/migrations/0001_initial.py | # Generated by Django 2.1.2 on 2021-06-28 03:07
import DjangoUeditor.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.CharField(blank=True, max_length=100, null=True, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True, db_index=True)),
('title', models.CharField(max_length=70, verbose_name='标题')),
('excerpt', models.TextField(blank=True, max_length=200, verbose_name='摘要')),
('img', models.ImageField(blank=True, null=True, upload_to='article/%Y/%m/%d/', verbose_name='文章图片')),
('body', DjangoUeditor.models.UEditorField(blank=True, verbose_name='内容')),
('views', models.PositiveIntegerField(default=0, verbose_name='阅读量')),
('is_active', models.BooleanField(default=True, verbose_name='是否有效')),
],
options={
'verbose_name': '文章',
'verbose_name_plural': '文章',
},
),
migrations.CreateModel(
name='Banner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.CharField(blank=True, max_length=100, null=True, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True, db_index=True)),
('title', models.CharField(default='', max_length=200, verbose_name='标题')),
('img', models.ImageField(upload_to='banner/', verbose_name='轮播图')),
('url', models.URLField(max_length=100, verbose_name='图片链接')),
('active', models.CharField(default='', max_length=250, verbose_name='图片状态')),
('is_active', models.BooleanField(default=True, verbose_name='是否是有效')),
],
options={
'verbose_name': '轮播图',
'verbose_name_plural': '轮播图',
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.CharField(blank=True, max_length=100, null=True, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True, db_index=True)),
('name', models.CharField(max_length=100, verbose_name='文章分类')),
('is_active', models.BooleanField(default=True, verbose_name='是否是有效')),
],
options={
'verbose_name': '文章分类',
'verbose_name_plural': '文章分类',
},
),
migrations.AddField(
model_name='article',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='article_cat', to='blog.Category', verbose_name='分类'),
),
migrations.AddField(
model_name='article',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='article_user', to=settings.AUTH_USER_MODEL, verbose_name='作者'),
),
]
|
guoshijiang/we_guitar | blog/migrations/0002_auto_20210628_1353.py | <gh_stars>1-10
# Generated by Django 2.1.2 on 2021-06-28 05:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.CharField(blank=True, max_length=100, null=True, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True, db_index=True)),
('name', models.CharField(max_length=100, verbose_name='标签')),
('is_active', models.BooleanField(default=True, verbose_name='是否有效')),
],
options={
'verbose_name': '标签表',
'verbose_name_plural': '标签表',
},
),
migrations.AddField(
model_name='category',
name='icon',
field=models.ImageField(blank=True, null=True, upload_to='cat/%Y/%m/%d/', verbose_name='分类的Icon'),
),
migrations.AlterField(
model_name='article',
name='excerpt',
field=models.TextField(default='', max_length=200, verbose_name='摘要'),
),
migrations.AddField(
model_name='article',
name='tags',
field=models.ManyToManyField(blank=True, null=True, to='blog.Tag', verbose_name='标签'),
),
]
|
guoshijiang/we_guitar | common/storage.py | from django.contrib.staticfiles.storage import ManifestStaticFilesStorage
class NoStrictManifestStaticFilesStorage(ManifestStaticFilesStorage):
manifest_strict = False
|
guoshijiang/we_guitar | common/exceptions.py | <filename>common/exceptions.py
# -*- coding: utf-8 -*-
import logging
from urllib.parse import unquote
class BaseException(Exception):
message = "An unknown exception occurred."
code = 400
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs and hasattr(self, 'code'):
self.kwargs['code'] = self.code
if message:
self.message = unquote(message)
try:
self.message = self.message % kwargs
except Exception as e:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
logging.exception('Exception in string format operation, kwargs: %s', self.message)
raise e
super(BaseException, self).__init__()
def __str__(self):
return self.message
class NotFound(BaseException):
message = "Resource could not be found."
code = 404
class AccessForbidden(BaseException):
message = "Access Forbidden"
code = 403
class Unauthorized(BaseException):
message = "Not Authorized"
code = 401
class Conflict(BaseException):
message = 'Conflict.'
code = 409
class TableCreateError(BaseException):
message = "Table Create Error"
code = 1001
|
guoshijiang/we_guitar | common/db_test.py | import time
import json
import logging
from functools import wraps
from django.db import connection #type: ignore
class QueryLogger:
def __init__(self):
self.queries = []
def __call__(self, execute, sql, params, many, context):
current_query = {'sql': sql, 'params': params, 'many': many}
start = time.monotonic()
try:
result = execute(sql, params, many, context)
except Exception as e:
current_query['status'] = 'error'
current_query['exception'] = e
raise
else:
current_query['status'] = 'ok'
return result
finally:
duration = time.monotonic() - start
current_query['duration'] = duration
current_query['name'] = self.name
current_query['path'] = self.path
logger = logging.getLogger('QueryLogger')
for key in current_query.keys():
current_query[key] = str(current_query[key])
logger.info(json.dumps(current_query))
def db_test(f):
@wraps(f)
def decorated(*args, **kwargs):
ql = QueryLogger()
import os
import inspect
ql.path=os.path.abspath(inspect.getfile(f))
ql.name=f.__name__
with connection.execute_wrapper(ql):
return f(*args, **kwargs)
return decorated
|
guoshijiang/we_guitar | common/model_fields.py | <gh_stars>1-10
import json
from django.db import models
class DecField(models.DecimalField):
def __init__(self, **kw):
kw.setdefault('max_digits', 65)
kw.setdefault('decimal_places', 30)
super(DecField, self).__init__(**kw)
class OrderField(models.CharField):
def __init__(self, **kwargs):
kwargs.setdefault('max_length', 32)
super(OrderField, self).__init__(**kwargs)
class IdField(models.CharField):
def __init__(self, **kwargs):
kwargs.setdefault('max_length', 100)
super(IdField, self).__init__(**kwargs)
|
guoshijiang/we_guitar | common/templatetags/we_guitar_tag.py | <reponame>guoshijiang/we_guitar
#encoding=utf-8
import time
import pytz
from django import template
from django.conf import settings
register = template.Library()
@register.filter(name='hdatetime')
def repr_datetime(value) -> str:
if not value:
return ''
tz = pytz.timezone(settings.TIME_ZONE)
return value.astimezone(tz).strftime('%Y-%m-%d %H:%M:%S')
@register.filter(name='cn_hdatetime')
def cn_hdatetime(value) -> str:
if not value:
return ''
tz = pytz.timezone(settings.TIME_ZONE)
return value.astimezone(tz).strftime('%m月%d日 %H:%M')
|
guoshijiang/we_guitar | blog/urls.py | from typing import Any, List
from django.contrib import admin
from django.urls import include, path
from blog.views import index, artcle
urlpatterns: List[Any] = [
path(r'', index, name='index'),
path(r'artcle', artcle, name='artcle'),
] |
guoshijiang/we_guitar | blog/views.py | <gh_stars>1-10
#encoding=utf-8
from django.shortcuts import render
from blog.models import Category, Banner, Article
from common.helpers import paged_items, ok_json
from blog.helper import judge_pc_or_mobile
def index(request):
cat_id = int(request.GET.get('cat_id', 0))
page = int(request.GET.get('page', 0))
page_size = int(request.GET.get('page_size', 20))
title = request.GET.get('title', None)
user_agt = judge_pc_or_mobile(request.META.get("HTTP_USER_AGENT"))
cat_list = Category.objects.filter(is_active=True).order_by('-id')
banner_list = Banner.objects.filter(is_active=True).order_by('-id')[:3]
article_list = Article.objects.filter(is_active=True).order_by('-id')
if user_agt is False:
if cat_id not in ["0", 0, None]:
cat = Category.objects.get(id=cat_id)
article_list = article_list.filter(category=cat, is_active=True).order_by('-id')
if title not in [None, ""]:
article_list = article_list.filter(title__icontains=title)
article_lst = paged_items(request, article_list)
return render(request, 'web/blog/index.html', locals())
else:
if cat_id not in ["0", 0, None]:
cat = Category.objects.get(id=cat_id)
article_lst = article_list.filter(category=cat).order_by('-id')
if title not in [None, ""]:
article_lst = article_list.filter(title__icontains=title)
if request.is_ajax():
start = page * page_size
end = start + page_size
artcle_list_ret = []
article_list = article_list[start:end]
for article in article_list:
artcle_list_ret.append(article.return_dict())
return ok_json(artcle_list_ret)
else:
article_lst = article_list[0:20]
return render(request, 'mobile/blog/index.html', locals())
def artcle(request):
aid = int(request.GET.get('aid', 0))
article = Article.objects.get(id=aid)
user_agt = judge_pc_or_mobile(request.META.get("HTTP_USER_AGENT"))
if user_agt is False:
return render(request, 'web/blog/arctcle.html', locals())
else:
return render(request, 'mobile/blog/arctcle.html', locals()) |
guoshijiang/we_guitar | common/paginator.py | #encoding=utf-8
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
from django.core.paginator import Page, Paginator
class MyPaginator(Paginator):
def __init__(self, object_list:Iterable[Any], per_page:int, orphans:int=0, allow_empty_first_page:bool=True, adjacent_pages:int=0) -> None:
self.adjacent_pages = adjacent_pages
super(MyPaginator, self).__init__(object_list, per_page, orphans, allow_empty_first_page)
#Copied whole parent function returning a MyPage instead. Ergh. Better way of doing this?
def page(self, number):
"Returns a Page object for the given 1-based page number."
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
return MyPage(self.object_list[bottom:top], number, self, self.adjacent_pages)
class MyPage(Page):
def __init__(self, object_list:Iterable[Any], number:int, paginator:Paginator, adjacent_pages:int=0):
self.adjacent_pages = adjacent_pages
super(MyPage, self).__init__(object_list, number, paginator)
def _get_page_range_data(self):
"""
Returns a floating digg-style or 1-based range of pages for
iterating through within a template for loop.
"""
if not self.adjacent_pages:
return self.paginator.page_range
startPage = max(1, self.number - self.adjacent_pages)
#Be a bit smarter about start page
if startPage <= 3: startPage = 1
endPage = self.number + self.adjacent_pages + 1
#Be a bit smarter about end page
if endPage >= self.paginator.num_pages - 1: endPage = self.paginator.num_pages + 1
page_range = [n for n in range(startPage, endPage) \
if n > 0 and n <= self.paginator.count]
return {
'page_range': page_range,
'show_first': page_range and 1 not in page_range,
'show_last': page_range and self.paginator.num_pages not in page_range,
}
page_range_data = property(_get_page_range_data)
|
guoshijiang/we_guitar | common/decorators.py | <reponame>guoshijiang/we_guitar
import time
from functools import wraps
from typing import Any, Callable
from django.conf import settings
from django.contrib.auth.models import Permission
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from common.helpers import getLogger
logger = getLogger(__name__)
def permission_required(permission:Permission) -> Callable:
def _decorator(func):
def __w(request:HttpRequest, *args, **kw):
user = request.user
if user.has_perm(permission):
return func(request, *args, **kw)
return HttpResponse('Forbidden', status=403)
return __w
return _decorator
def retry_on() -> Callable:
def _retry(func):
@wraps(func)
async def inner(*args, **kwargs):
max_retry = kwargs.pop('max_retry', 1)
max_retry = max_retry
retry = 0
while True:
try:
return await func(*args, **kwargs)
except Exception as e:
if retry < max_retry:
logger.warning('%s, proceed to retry.', e)
retry += 1
time.sleep(1)
continue
else:
# logger.error(
# 'After %s retries still got %s, give up.',
# retry, e, exc_info=True)
raise e
return inner
return _retry
|
guoshijiang/we_guitar | common/helpers.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import functools
import hashlib
import logging
import sys
import time
import pytz
from bisect import bisect
from datetime import datetime, timezone
from decimal import ROUND_FLOOR, ROUND_UP
from decimal import Context as DecimalContext
from decimal import Decimal, InvalidOperation
from typing import Any, Dict, List, Optional, Tuple, Union
from urllib.parse import urlencode
from django.core.paginator import EmptyPage
from django.http import HttpRequest, JsonResponse
from django.db.models import Sum
from django.utils.timezone import localtime, now
from common.paginator import MyPaginator
from django.conf import settings
def getLogger(name):
logger = logging.getLogger(name)
if len(sys.argv) > 1 and sys.argv[1] == "test":
logger.disabled = True
return logger
def get_hostname() -> str:
import socket
return socket.gethostname()
def get_processid() -> int:
import os
return os.getpid()
def make_timestamp() -> float:
return time.time() * 1000
def time_to_str(time_time: Optional[float] = None, tz: str = "Asia/Shanghai") -> str:
time_time = time_time or time.time()
dt = datetime.fromtimestamp(time_time).astimezone(pytz.timezone(tz))
return str(dt)
def ok_json(data: Any, code: int = 200) -> JsonResponse:
return JsonResponse({"ok": True, "code": code, "result": data})
def keep_two_place(value):
if not value:
return "0"
dec_value = Decimal(value).quantize(Decimal("0.00"))
return (
dec_value.to_integral()
if dec_value == dec_value.to_integral()
else dec_value.normalize()
)
def error_json(msg: str, code: int = -1, status: int = 200) -> JsonResponse:
return JsonResponse({"ok": False, "code": code, "msg": msg, }, status=status)
def floor_decimal(amount: Decimal, digits: int = 18) -> Decimal:
return amount.quantize(
Decimal("1E-%d" % digits), context=DecimalContext(prec=60, rounding=ROUND_FLOOR)
)
def up_decimal(amount: Decimal, digits: int = 18) -> Decimal:
return amount.quantize(
Decimal("1E-%d" % digits), context=DecimalContext(prec=60, rounding=ROUND_UP)
)
def round_decimal(amount: Decimal, digits: int = 18) -> Decimal:
return amount.quantize(Decimal("1E-%d" % digits), context=DecimalContext(prec=60))
limit_steps: List[int] = [5, 10, 20, 50, 100, 500, 1000, 5000]
def search_limit(limit: int) -> int:
limit = max(0, min(limit, 5000))
return limit_steps[bisect(limit_steps, limit)]
def dec(value: Any, default: Any = "0", digits: int = 18) -> Decimal:
try:
# if isinstance(value, float):
# value = str(value)
if isinstance(value, Decimal):
return floor_decimal(value, digits=digits)
else:
return floor_decimal(Decimal(value), digits=digits)
except (InvalidOperation, TypeError):
return Decimal(default)
parse_decimal = dec
d0: Decimal = dec("0")
d1 = dec("1")
d2 = dec("2")
d10 = dec("10")
d100 = dec("100")
d200 = dec("200")
d1000 = dec("1000")
d1_000 = dec("1000")
d1k = d1000
d10000 = dec("10000")
d10_000 = dec("10000")
d1m = dec("1_000_000")
def dec05up(a: Decimal) -> Decimal:
half = dec("0.5", digits=1)
floored = up_decimal(a + a, digits=0)
return floored * half
def dec05floor(a: Decimal) -> Decimal:
half = dec("0.5", digits=1)
floored = floor_decimal(a + a, digits=0)
return floored * half
dec05 = dec05floor
def mod_decimal(amount: Decimal, div: Decimal) -> Tuple[Decimal, Decimal]:
divided = floor_decimal(amount / div, digits=0) * div
remainder = amount - divided
return divided, remainder
def _xx_decprice(value: Any) -> Decimal:
return dec(value, digits=6)
decprice = dec
def decstr(value: Union[Decimal, float], round_number=None) -> str:
if isinstance(value, float):
value = Decimal(value)
if round_number is not None:
_s = "0."
for i in range(round_number):
_s += "0"
value = value.quantize(Decimal(_s))
s = "{:f}".format(value)
if "." in s:
s = s.rstrip("0").rstrip(".")
if s == "-0":
s = "0"
return s
MIN = dec("0", digits=8)
def parse_int(v, default=0):
try:
v = int(v)
except (ValueError, TypeError) as e:
v = default
return v
def get_page(request: HttpRequest) -> int:
page = parse_int(request.GET.get("page", 1), 1)
if page < 1:
page = 1
return page
PAGE_SIZE = 20
def paged_items(request: HttpRequest, qs, pagesize=PAGE_SIZE, page_cls=MyPaginator):
paginator = page_cls(qs, pagesize, adjacent_pages=3)
page = get_page(request)
try:
items = paginator.page(page)
except EmptyPage:
items = paginator.page(paginator.num_pages)
args = {}
for key, value in request.GET.items():
if key != "page":
args[key] = value.encode("utf-8")
if len(args) == 0:
items.prefix_uri = request.path + "?"
else:
items.prefix_uri = request.path + "?" + urlencode(args) + "&"
return items
def sleep(sleep_time: float) -> None:
time.sleep(sleep_time)
def utc_now() -> datetime:
return now()
def current_now() -> datetime:
return localtime(utc_now())
def timestamp_to_utc(time_stamp):
return datetime.utcfromtimestamp(time_stamp)
def retry(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
for i in range(3):
r = func(*args, **kwargs)
if r:
return r
else:
time.sleep(1)
return wrapper
def datetime2utctimestamp(datetime):
timestamp = datetime.replace(tzinfo=timezone.utc).timestamp()
return timestamp
def md5_crypt(txt: str) -> str:
m = hashlib.md5()
m.update(txt.encode("utf8"))
return m.hexdigest()
def utc_timestamp() -> int:
return int(utc_now().strftime("%s"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.