content stringlengths 5 1.05M |
|---|
# Copyright (c) 2015, Zhenwen Dai
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import abc
import os
import numpy as np
import scipy.io
class AutoregTask(object):
__metaclass__ = abc.ABCMeta
def __init__(self, datapath=os.path.join(os.path.dirname(__file__),'../../datasets/system_identification')):
self.datapath = datapath
def _enforce_2d(self):
if self.data_in_train.ndim==1: self.data_in_train = self.data_in_train[:,None]
if self.data_out_train.ndim==1: self.data_out_train = self.data_out_train[:,None]
if self.data_in_test.ndim==1: self.data_in_test = self.data_in_test[:,None]
if self.data_out_test.ndim==1: self.data_out_test = self.data_out_test[:,None]
@abc.abstractmethod
def load_data(self):
"""Download the dataset if not exist. Return True if successful"""
return True
# @abc.abstractmethod
# def get_training_data(self):
# """Return the training data: training data and labels"""
# return None
#
# @abc.abstractmethod
# def get_test_data(self):
# """Return the test data: training data and labels"""
# return None
def get_training_data(self):
return self.data_in_train, self.data_out_train
def get_test_data(self):
return self.data_in_test, self.data_out_test
class IdentificationExample(AutoregTask):
name='IdentificationExample'
filename = 'identificationExample.mat'
def load_data(self):
data = scipy.io.loadmat(os.path.join(self.datapath, self.filename))
self.data_in = data['u']
self.data_out = data['y']
self.win_in = 1
self.win_out = 1
self.split_point = 150
self.data_in_train = self.data_in[:self.split_point]
self.data_in_test = self.data_in[self.split_point:]
self.data_out_train = self.data_out[:self.split_point]
self.data_out_test = self.data_out[self.split_point:]
self._enforce_2d()
return True
class IdentificationExample1(AutoregTask):
name='IdentificationExample1'
filename = 'identificationExample1.mat'
def load_data(self):
data = scipy.io.loadmat(os.path.join(self.datapath, self.filename))
self.data_in_train = data['u']
self.data_out_train = data['y']
self.data_in_test = data['uNew']
self.data_out_test = data['yNew']
self.win_in = 1
self.win_out = 1
self._enforce_2d()
return True
class IdentificationExample2(AutoregTask):
name='IdentificationExample2'
filename = 'identificationExample2.mat'
def load_data(self):
data = scipy.io.loadmat(os.path.join(self.datapath, self.filename))
self.data_in_train = data['u']
self.data_out_train = data['y']
self.data_in_test = data['uNew']
self.data_out_test = data['yNew']
self.win_in = 1
self.win_out = 2
self._enforce_2d()
return True
class IdentificationExample3(AutoregTask):
name='IdentificationExample3'
filename = 'identificationExample3.mat'
def load_data(self):
data = scipy.io.loadmat(os.path.join(self.datapath, self.filename))
self.data_in_train = data['u']
self.data_out_train = data['y']
self.data_in_test = data['uNew']
self.data_out_test = data['yNew']
self.win_in = 1
self.win_out = 1
self._enforce_2d()
return True
class IdentificationExample4(AutoregTask):
name='IdentificationExample4'
filename = 'identificationExample4.mat'
def load_data(self):
data = scipy.io.loadmat(os.path.join(self.datapath, self.filename))
self.data_in_train = data['u']
self.data_out_train = data['y']
self.data_in_test = data['uNew']
self.data_out_test = data['yNew']
self.win_in = 1
self.win_out = 2
self._enforce_2d()
return True
class IdentificationExample5(AutoregTask):
name='IdentificationExample5'
filename = 'identificationExample5.mat'
def load_data(self):
data = scipy.io.loadmat(os.path.join(self.datapath, self.filename))
self.data_in_train = data['u']
self.data_out_train = data['y']
self.data_in_test = data['uNew']
self.data_out_test = data['yNew']
self.win_in = 5
self.win_out = 5
self._enforce_2d()
return True
class Actuator(AutoregTask):
name='actuator'
filename = 'actuator.mat'
def load_data(self):
data = scipy.io.loadmat(os.path.join(self.datapath, self.filename))
self.data_in = data['u']
self.data_out = data['p']
self.win_in = 10
self.win_out = 10
self.split_point = 512
self.data_in_train = self.data_in[:self.split_point]
self.data_in_test = self.data_in[self.split_point:]
self.data_out_train = self.data_out[:self.split_point]
self.data_out_test = self.data_out[self.split_point:]
self._enforce_2d()
return True
class Ballbeam(AutoregTask):
name='ballbeam'
filename = 'ballbeam.dat'
def load_data(self):
data = np.loadtxt(os.path.join(self.datapath, self.filename))
self.data_in = data[:,0]
self.data_out = data[:,1]
self.win_in = 10
self.win_out = 10
self.split_point = 500
self.data_in_train = self.data_in[:self.split_point]
self.data_in_test = self.data_in[self.split_point:]
self.data_out_train = self.data_out[:self.split_point]
self.data_out_test = self.data_out[self.split_point:]
self._enforce_2d()
return True
class Drive(AutoregTask):
name='drive'
filename = 'drive.mat'
def load_data(self):
data = scipy.io.loadmat(os.path.join(self.datapath, self.filename))
self.data_in = data['u1']
self.data_out = data['z1']
self.win_in = 10
self.win_out = 10
self.split_point = 250
self.data_in_train = self.data_in[:self.split_point]
self.data_in_test = self.data_in[self.split_point:]
self.data_out_train = self.data_out[:self.split_point]
self.data_out_test = self.data_out[self.split_point:]
self._enforce_2d()
return True
class Gas_furnace(AutoregTask):
name='gas_furnace'
filename = 'gas_furnace.csv'
def load_data(self):
data = np.loadtxt(os.path.join(self.datapath, self.filename),skiprows=1,delimiter=',')
self.data_in = data[:,0]
self.data_out = data[:,1]
self.win_in = 3
self.win_out = 3
self.split_point = 148
self.data_in_train = self.data_in[:self.split_point]
self.data_in_test = self.data_in[self.split_point:]
self.data_out_train = self.data_out[:self.split_point]
self.data_out_test = self.data_out[self.split_point:]
self._enforce_2d()
return True
class Flutter(AutoregTask):
name='flutter'
filename = 'flutter.dat'
def load_data(self):
data = np.loadtxt(os.path.join(self.datapath, self.filename))
self.data_in = data[:,0]
self.data_out = data[:,1]
self.win_in = 5
self.win_out = 5
self.split_point = 512
self.data_in_train = self.data_in[:self.split_point]
self.data_in_test = self.data_in[self.split_point:]
self.data_out_train = self.data_out[:self.split_point]
self.data_out_test = self.data_out[self.split_point:]
self._enforce_2d()
return True
class Dryer(AutoregTask):
name='dryer'
filename = 'dryer.dat'
def load_data(self):
data = np.loadtxt(os.path.join(self.datapath, self.filename))
self.data_in = data[:,0]
self.data_out = data[:,1]
self.win_in = 2
self.win_out = 2
self.split_point = 500
self.data_in_train = self.data_in[:self.split_point]
self.data_in_test = self.data_in[self.split_point:]
self.data_out_train = self.data_out[:self.split_point]
self.data_out_test = self.data_out[self.split_point:]
self._enforce_2d()
return True
class Tank (AutoregTask):
name='tank'
filename = 'tank.mat'
def load_data(self):
data = scipy.io.loadmat(os.path.join(self.datapath, self.filename))
self.data_in = data['u'].T
self.data_out = data['y'].T
self.win_in = 1
self.win_out = 3
self.split_point = 1250
self.data_in_train = self.data_in[:self.split_point]
self.data_in_test = self.data_in[self.split_point:]
self.data_out_train = self.data_out[:self.split_point]
self.data_out_test = self.data_out[self.split_point:]
self._enforce_2d()
return True
all_tasks = [IdentificationExample, IdentificationExample1, IdentificationExample2, IdentificationExample3, IdentificationExample4, IdentificationExample5,
Actuator, Ballbeam, Drive, Gas_furnace, Flutter, Dryer]
|
from scripts.ssc.models.TopoAE import test_grid_euler
from src.models.TopoAE.train_engine import simulator_TopoAE
from joblib import Parallel, delayed
if __name__ == "__main__":
Parallel(n_jobs=4)(delayed(simulator_TopoAE)(config) for config in [test_grid_euler,test_grid_euler,test_grid_euler,test_grid_euler]) |
import psutil
def kill_process_with_children(parent_pid):
parent = psutil.Process(parent_pid)
for child in parent.children(recursive=True):
child.kill()
parent.kill() |
#
# Copyright (c) 2013, Centre National de la Recherche Scientifique (CNRS)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import requests
import unittest
map_view2 = {"views":
{"by_runningvms":
{"map":
'''function (doc, meta) {
if (meta.id.indexOf("Accounting") == 0 && doc.state == "running")
{
emit(doc.uuid, null);
}
}'''
},
}
}
map_view = {"views":
{"by_id":
{"map":
'''function (doc, meta) {
if (meta.id.indexOf("Accounting") == 0)
{
emit(meta.id, null);
}
}'''
},
}
}
class ConsolidationPublishTest(unittest.TestCase):
def setUp(self):
self.create_view('dev_byid', map_view)
self.create_view('dev_byuuid',map_view2)
def tearDown(self):
pass
def create_view(self, design_doc,mapview,host='localhost',bucket='default'):
"""
Create view using REST API calls
"""
view_url='http://%s:8092/%s/_design/%s' % (host,bucket,design_doc)
print "view_url=", view_url
data=json.dumps(mapview)
headers = {'content-type': 'application/json'}
r = requests.put(view_url, data=data, headers=headers)
print r.text
def delete_view(self, design_doc,host='localhost',bucket='default'):
"""
Delete view using REST API calls
"""
view_url='http://%s:8092/%s/_design/%s' % (self.host,self.bucket,self.design_doc)
headers = {'content-type': 'application/json'}
r = requests.delete(view_url, headers=headers)
print r.text
|
import tensorflow as tf
from slim import ops
from slim import scopes
import numpy as np
import np_helper
import losses
import models.model_helper as model_helper
from models.model_helper import convolve, read_vgg_init
FLAGS = tf.app.flags.FLAGS
def inference(inputs, is_training=True):
if is_training:
vgg_layers, vgg_layer_names = read_vgg_init(FLAGS.vgg_init_dir)
else:
vgg_layers = None
vgg_layer_names = None
conv1_sz = 64
conv2_sz = 128
conv3_sz = 256
conv4_sz = 512
conv5_sz = 512
k = 3
bn_params = {
# Decay for the moving averages.
'decay': 0.999,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
'center': False,
'scale': False,
}
def build_shared_net(inputs):
net = convolve(inputs, conv1_sz, k, 'conv1_1', vgg_layers)
net = convolve(net, conv1_sz, k, 'conv1_2', vgg_layers)
net = ops.max_pool(net, [2, 2], scope='pool1')
net = convolve(net, conv2_sz, k, 'conv2_1', vgg_layers)
net = convolve(net, conv2_sz, k, 'conv2_2', vgg_layers)
net = ops.max_pool(net, [2, 2], scope='pool2')
net = convolve(net, conv3_sz, k, 'conv3_1', vgg_layers)
net = convolve(net, conv3_sz, k, 'conv3_2', vgg_layers)
conv3_3 = convolve(net, conv3_sz, k, 'conv3_3', vgg_layers)
net = ops.max_pool(conv3_3, [2, 2], scope='pool3')
net = convolve(net, conv4_sz, k, 'conv4_1', vgg_layers)
net = convolve(net, conv4_sz, k, 'conv4_2', vgg_layers)
net = convolve(net, conv4_sz, k, 'conv4_3', vgg_layers)
net = ops.max_pool(net, [2, 2], scope='pool4')
net = convolve(net, conv5_sz, k, 'conv5_1', vgg_layers)
net = convolve(net, conv5_sz, k, 'conv5_2', vgg_layers)
net = convolve(net, conv5_sz, k, 'conv5_3', vgg_layers)
#pad = [[0, 0], [0, 0]]
#net = tf.space_to_batch(net, paddings=pad, block_size=2)
#net = convolve(net, conv5_sz, k, 'conv5_1', vgg_layers)
#net = convolve(net, conv5_sz, k, 'conv5_2', vgg_layers)
#net = convolve(net, conv5_sz, k, 'conv5_3', vgg_layers)
#net = tf.batch_to_space(net, crops=pad, block_size=2)
return net
# best so far = 0.0005
with scopes.arg_scope([ops.conv2d, ops.fc], stddev=0.01, weight_decay=0.0005):
#with scopes.arg_scope([ops.conv2d, ops.fc], stddev=0.01, weight_decay=0.005):
#with scopes.arg_scope([ops.conv2d, ops.fc], stddev=0.01, weight_decay=0.05):
#with scopes.arg_scope([ops.conv2d, ops.fc], stddev=0.01):
with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
is_training=is_training):
#scale_factors = [1.2, 0.8, 0.4]
scale_factors = [1.2, 0.9, 0.6, 0.3]
resolutions = model_helper.get_multiscale_resolutions(
FLAGS.img_width, FLAGS.img_height, scale_factors)
print(resolutions)
#input0 = tf.image.resize_bicubic(inputs, resolutions[0], name='resize_level0')
#input1 = tf.image.resize_bicubic(inputs, resolutions[1], name='resize_level1')
#input2 = tf.image.resize_bicubic(inputs, resolutions[2], name='resize_level2')
input0 = tf.image.resize_bilinear(inputs, resolutions[0], name='resize_level0')
input1 = tf.image.resize_bilinear(inputs, resolutions[1], name='resize_level1')
input2 = tf.image.resize_bilinear(inputs, resolutions[2], name='resize_level2')
input3 = tf.image.resize_bilinear(inputs, resolutions[2], name='resize_level3')
with tf.variable_scope('shared') as scope:
net0 = build_shared_net(input0)
scope.reuse_variables()
net1 = build_shared_net(input1)
net2 = build_shared_net(input2)
net3 = build_shared_net(input3)
level0_shape = net0.get_shape()
level0_shape = [level0_shape[1].value, level0_shape[2].value]
print(level0_shape)
net1 = tf.image.resize_nearest_neighbor(net1, level0_shape, name='upsample_level1')
net2 = tf.image.resize_nearest_neighbor(net2, level0_shape, name='upsample_level2')
net3 = tf.image.resize_nearest_neighbor(net3, level0_shape, name='upsample_level3')
net = tf.concat(3, [net0, net1, net2, net3])
#conv3_shape = conv3_3.get_shape()
#resize_shape = [conv3_shape[1].value, conv3_shape[2].value]
#up_conv5_3 = tf.image.resize_nearest_neighbor(conv5_3, resize_shape)
##up_conv5_3 = tf.image.resize_nearest_neighbor(conv5_3, [108, 256])
#concat = tf.concat(3, [conv3_3, up_conv5_3])
#net = slim.ops.max_pool(net, [2, 2], scope='pool5')
#net = convolve(net, 4096, 7, 'conv6_1', vgg_layers)
#with scopes.arg_scope([ops.conv2d, ops.fc], batch_norm_params=bn_params):
with scopes.arg_scope([ops.conv2d, ops.fc], batch_norm_params=bn_params, weight_decay=0.0005):
net = convolve(net, 1024, 7, 'conv6_1')
#net = convolve(net, 1024, 1, 'conv6_1')
#net = convolve(net, 512, 1, 'conv6_2')
#net = convolve(net, 512, 1, 'conv6_3')
net = convolve(net, 512, 3, 'conv6_2')
net = convolve(net, 512, 3, 'conv6_3')
net = convolve(net, 512, 1, 'fc7')
#net = ops.conv2d(net, 1024, [1, 1], scope='conv5_4')
#net = ops.conv2d(net, 512, [1, 1], scope='conv6_1')
#net = ops.conv2d(net, 512, [1, 1], scope='conv6_2')
net = convolve(net, FLAGS.num_classes, 1, 'score', activation=None)
#net = slim.ops.flatten(net, scope='flatten5')
#net = slim.ops.fc(net, 4096, scope='fc6')
#net = slim.ops.dropout(net, 0.5, scope='dropout6')
#net = slim.ops.fc(net, 4096, scope='fc7')
#net = slim.ops.dropout(net, 0.5, scope='dropout7')
#net = slim.ops.fc(net, 1000, activation=None, scope='fc8')
logits_up = tf.image.resize_bilinear(net, [FLAGS.img_height, FLAGS.img_width],
name='resize_scores')
return logits_up
def loss(logits, labels, weights, num_labels, is_training=True):
loss_val = losses.weighted_cross_entropy_loss(logits, labels, weights, num_labels)
#loss_val = losses.weighted_hinge_loss(logits, labels, weights, num_labels)
#loss_val = losses.flip_xent_loss(logits, labels, weights, num_labels)
#loss_val = losses.flip_xent_loss_symmetric(logits, labels, weights, num_labels)
all_losses = [loss_val]
#all_losses = tf.get_collection(slim.losses.LOSSES_COLLECTION)
# get losses + regularization
total_loss = losses.total_loss_sum(all_losses)
if is_training:
loss_averages_op = losses.add_loss_summaries(total_loss)
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
return total_loss
#return losses.weighted_hinge_loss(logits, labels, weights, num_labels)
#return losses.cross_entropy_loss(logits, labels, num_labels)
|
'''
Functions for working with generic dual-input bi-exponential models (DIBEM)
This general function form can be used by the two-compartment exchange model
(2CXM), two compartment filtration module and the active-uptake and efflux model (AUEM).
In each case the specific model needs to define how to convert its physiological
parameters into the 4 parameters F+, F_, K+, K_ of a model IRF:
I(t) = F+ . exp(-t . K+) + F_ . exp(-t . K_)
This model IRF is then convolved with a vascular input function Cp(t), where
Cp(t) can either be single supply, typically assumed to be arterial only,
Cp(t) = Ca(t). Or as a mix of two supplies, typically assumed to be
arterial and venous (eg for the liver, supplied by the hepatic portal vein)
Cp(t) = fa.Ca(t) + (1- fa).Cv(t) where fa is the arterial fraction (0 <= fa <= 1).
'''
import numpy as np
from QbiPy.dce_models import dce_aif
from QbiPy import helpers
#
#-------------------------------------------------------------------------------
def concentration_from_model(aif:dce_aif.Aif,
F_pos: np.array, F_neg: np.array, K_pos: np.array, K_neg: np.array,
f_a: np.array, tau_a: np.array, tau_v: np.array)->np.array:
'''
Compute concentration time-series from model parameters
Inputs:
aif (Aif object):
object to store and resample arterial input function values (1 for each time point)
F_pos, F_neg, K_pos, K_neg: np.array (1D n_samples)
bi-exponetial IRF parameters
f_a: np.array (1D n_samples)
Arterial mixing fraction, final plasma input is Cp(t) = f_a*Ca(t) + (1-f_a)*Cv(t)
tau_a: np.array (1D n_samples)
offset times of arrival for conccentraion for Ca_t
tau_v: np.array (1D n_samples)
offset times of arrival for conccentraion for Cv_t
Outputs:
C_model (2D numpy array, n_samples x n_t) - Model concentrations at each time point for each
voxel computed from model paramaters
'''
K_max = 1e9
#We allow the model paramaters to be scalar, whilst also accepting higher dimension arrays
n_vox,F_pos,F_neg,K_pos,K_neg,f_a,tau_a,tau_v = helpers.check_param_size(
F_pos=F_pos,F_neg=F_neg,K_pos=K_pos,K_neg=K_neg,
f_a=f_a,tau_a=tau_a,tau_v=tau_v
)
#Get AIF and PIF, labelled in model equation as Ca_t and Cv_t
#Resample AIF and get AIF times
#Make time relative to first scan, and compute time intervals
n_t = aif.times_.size
t = aif.times_
#Resample the AIF
Ca_t = aif.resample_AIF(tau_a) #nv x nt
resample_AIF = np.any(f_a)
if resample_AIF:
Ca_t = aif.resample_AIF(tau_a)
else:
Ca_t = np.zeros((n_vox,n_t))
f_v = 1 - f_a
if np.any(f_v):
Cv_t = aif.resample_PIF(tau_v, ~resample_AIF, True)
else:
Cv_t = np.zeros((n_vox,n_t))
#Irf is of form: I(t) = F_pos.exp(-tK_pos) + F_neg.exp(-tK_neg)
#C(t) = I(t) ** Ca(t)
C_t = np.zeros((n_vox,n_t))
Ft_pos = 0
Ft_neg = 0
Cp_t0 = f_a*Ca_t[:,0] + f_v * Cv_t[:,0]
for i_t in range(1, n_t):
#Compute combined arterial and vascular input for this time
Cp_t1 = f_a*Ca_t[:,i_t] + f_v * Cv_t[:,i_t] #n_v,1
delta_t = t[i_t] - t[i_t-1]
Ft_pos = helpers.exp_conv(K_pos, delta_t, Cp_t1, Cp_t0, Ft_pos)
Ft_neg = helpers.exp_conv(K_neg, delta_t, Cp_t1, Cp_t0, Ft_neg)
#Combine the two exponentials in the final concentration
C = F_neg * Ft_neg / K_neg + F_pos * Ft_pos / K_pos
C[np.isnan(C)] = 0
C_t[:,i_t] = C
Cp_t0 = Cp_t1
return C_t
#
#---------------------------------------------------------------------------
def construct_LLS_matrix(Ctis_t:np.array, aif:dce_aif.Aif, f_a:float, tau_a:float, tau_v:float):
'''
Make a matrix for linear least-sqaures (LLS) solving for a single tissue time-series
Inputs:
Ct_sig: np.array (num_times)
time-series of signal derived CA concentration
aif (Aif object):
object to store and resample arterial input function values (1 for each time point)
f_a: float
Arterial mixing fraction, final plasma input is Cp(t) = f_a*Ca(t) + (1-f_a)*Cv(t)
tau_a: float
offset times of arrival for conccentraion for Ca_t
tau_v: float
offset times of arrival for conccentraion for Cv_t
Outputs:
A_:np.array (num_times x 3)
Matrix for LLS solver collapsed column major to a single data vector
'''
t = aif.times_
f_v = 1.0 - f_a
if not f_v:
Cp_t = aif.resample_AIF(tau_a)[0,]
elif not f_a:
Cp_t = aif.resample_PIF(tau_v, True, True)[0,]
else:
Ca_t = aif.resample_AIF(tau_a)[0,]
Cv_t = aif.resample_PIF(tau_v, False, True)[0,]
Cp_t = f_a*Ca_t + (1 - f_a)*Cv_t
n_t = aif.num_times()
A_ = np.zeros((n_t,4))
Cp_t_int = helpers.trapz_integral(Cp_t, t)
Cp_t_int2 = helpers.trapz_integral(Cp_t_int, t)
Ctis_t_int = helpers.trapz_integral(Ctis_t, t)
Ctis_t_int2 = helpers.trapz_integral(Ctis_t_int, t)
A_[:,0] = -Ctis_t_int2
A_[:,1] = -Ctis_t_int
A_[:,2] = Cp_t_int2
A_[:,3] = Cp_t_int
return A_
|
r"""
Classical Invariant Theory
This module lists classical invariants and covariants of homogeneous
polynomials (also called algebraic forms) under the action of the
special linear group. That is, we are dealing with polynomials of
degree `d` in `n` variables. The special linear group `SL(n,\CC)` acts
on the variables `(x_1,\dots, x_n)` linearly,
.. math::
(x_1,\dots, x_n)^t \to A (x_1,\dots, x_n)^t
,\qquad
A \in SL(n,\CC)
The linear action on the variables transforms a polynomial `p`
generally into a different polynomial `gp`. We can think of it as an
action on the space of coefficients in `p`. An invariant is a
polynomial in the coefficients that is invariant under this action. A
covariant is a polynomial in the coefficients and the variables
`(x_1,\dots, x_n)` that is invariant under the combined action.
For example, the binary quadratic `p(x,y) = a x^2 + b x y + c y^2`
has as its invariant the discriminant `\mathop{disc}(p) = b^2 - 4 a
c`. This means that for any `SL(2,\CC)` coordinate change
.. math::
\begin{pmatrix} x' \\ y' \end{pmatrix}
=
\begin{pmatrix} \alpha & \beta \\ \gamma & \delta \end{pmatrix}
\begin{pmatrix} x \\ y \end{pmatrix}
\qquad
\alpha\delta-\beta\gamma=1
the discriminant is invariant, `\mathop{disc}\big(p(x',y')\big) =
\mathop{disc}\big(p(x,y)\big)`.
To use this module, you should use the factory object
:class:`invariant_theory <InvariantTheoryFactory>`. For example, take
the quartic::
sage: R.<x,y> = QQ[]
sage: q = x^4 + y^4
sage: quartic = invariant_theory.binary_quartic(q); quartic
Binary quartic with coefficients (1, 0, 0, 0, 1)
One invariant of a quartic is known as the Eisenstein
D-invariant. Since it is an invariant, it is a polynomial in the
coefficients (which are integers in this example)::
sage: quartic.EisensteinD()
1
One example of a covariant of a quartic is the so-called g-covariant
(actually, the Hessian). As with all covariants, it is a polynomial in
`x`, `y` and the coefficients::
sage: quartic.g_covariant()
-x^2*y^2
As usual, use tab completion and the online help to discover the
implemented invariants and covariants.
In general, the variables of the defining polynomial cannot be
guessed. For example, the zero polynomial can be thought of as a
homogeneous polynomial of any degree. Also, since we also want to
allow polynomial coefficients we cannot just take all variables of the
polynomial ring as the variables of the form. This is why you will
have to specify the variables explicitly if there is any potential
ambiguity. For example::
sage: invariant_theory.binary_quartic(R.zero(), [x,y])
Binary quartic with coefficients (0, 0, 0, 0, 0)
sage: invariant_theory.binary_quartic(x^4, [x,y])
Binary quartic with coefficients (0, 0, 0, 0, 1)
sage: R.<x,y,t> = QQ[]
sage: invariant_theory.binary_quartic(x^4 + y^4 + t*x^2*y^2, [x,y])
Binary quartic with coefficients (1, 0, t, 0, 1)
Finally, it is often convenient to use inhomogeneous polynomials where
it is understood that one wants to homogenize them. This is also
supported, just define the form with an inhomogeneous polynomial and
specify one less variable::
sage: R.<x,t> = QQ[]
sage: invariant_theory.binary_quartic(x^4 + 1 + t*x^2, [x])
Binary quartic with coefficients (1, 0, t, 0, 1)
REFERENCES:
.. [WpInvariantTheory]
http://en.wikipedia.org/wiki/Glossary_of_invariant_theory
"""
#*****************************************************************************
# Copyright (C) 2012 Volker Braun <vbraun.name@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.rings.all import QQ
from sage.misc.functional import is_odd
from sage.matrix.constructor import matrix
from sage.structure.sage_object import SageObject
from sage.misc.cachefunc import cached_method
######################################################################
def _guess_variables(polynomial, *args):
"""
Return the polynomial variables.
INPUT:
- ``polynomial`` -- a polynomial, or a list/tuple of polynomials
in the same polynomial ring.
- ``*args`` -- the variables. If none are specified, all variables
in ``polynomial`` are returned. If a list or tuple is passed,
the content is returned. If multiple arguments are passed, they
are returned.
OUTPUT:
A tuple of variables in the parent ring of the polynomial(s).
EXAMPLES::
sage: from sage.rings.invariant_theory import _guess_variables
sage: R.<x,y> = QQ[]
sage: _guess_variables(x^2+y^2)
(x, y)
sage: _guess_variables([x^2, y^2])
(x, y)
sage: _guess_variables(x^2+y^2, x)
(x,)
sage: _guess_variables(x^2+y^2, x,y)
(x, y)
sage: _guess_variables(x^2+y^2, [x,y])
(x, y)
"""
if isinstance(polynomial, (list, tuple)):
R = polynomial[0].parent()
if not all(p.parent() is R for p in polynomial):
raise ValueError('All input polynomials must be in the same ring.')
if len(args)==0 or (len(args)==1 and args[0] is None):
if isinstance(polynomial, (list, tuple)):
variables = set()
for p in polynomial:
variables.update(p.variables())
variables = list(variables)
variables.reverse() # to match polynomial.variables() behavior
return tuple(variables)
else:
return polynomial.variables()
elif len(args) == 1 and isinstance(args[0], (tuple, list)):
return tuple(args[0])
else:
return tuple(args)
######################################################################
class FormsBase(SageObject):
"""
The common base class of :class:`AlgebraicForm` and
:class:`SeveralAlgebraicForms`.
This is an abstract base class to provide common methods. It does
not make much sense to instantiate it.
TESTS::
sage: from sage.rings.invariant_theory import FormsBase
sage: FormsBase(None, None, None, None)
<class 'sage.rings.invariant_theory.FormsBase'>
"""
def __init__(self, n, homogeneous, ring, variables):
"""
The Python constructor.
TESTS::
sage: from sage.rings.invariant_theory import FormsBase
sage: FormsBase(None, None, None, None)
<class 'sage.rings.invariant_theory.FormsBase'>
"""
self._n = n
self._homogeneous = homogeneous
self._ring = ring
self._variables = variables
def _jacobian_determinant(self, *args):
"""
Return the Jacobian determinant.
INPUT:
- ``*args`` -- list of pairs of a polynomial and its
homogeneous degree. Must be a covariant, that is, polynomial
in the given :meth:`variables`
OUTPUT:
The Jacobian determinant with respect to the variables.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: from sage.rings.invariant_theory import FormsBase
sage: f = FormsBase(2, True, R, (x, y))
sage: f._jacobian_determinant((x^2+y^2, 2), (x*y, 2))
2*x^2 - 2*y^2
sage: f = FormsBase(2, False, R, (x, y))
sage: f._jacobian_determinant((x^2+1, 2), (x, 2))
2*x^2 - 2
sage: R.<x,y> = QQ[]
sage: cubic = invariant_theory.ternary_cubic(x^3+y^3+1)
sage: cubic.J_covariant()
x^6*y^3 - x^3*y^6 - x^6 + y^6 + x^3 - y^3
sage: 1 / 9 * cubic._jacobian_determinant(
....: [cubic.form(), 3], [cubic.Hessian(), 3], [cubic.Theta_covariant(), 6])
x^6*y^3 - x^3*y^6 - x^6 + y^6 + x^3 - y^3
"""
if self._homogeneous:
def diff(p, d):
return [p.derivative(x) for x in self._variables]
else:
def diff(p, d):
variables = self._variables[0:-1]
grad = [p.derivative(x) for x in variables]
dp_dz = d*p - sum(x*dp_dx for x, dp_dx in zip(variables, grad))
grad.append(dp_dz)
return grad
jac = [diff(p,d) for p,d in args]
return matrix(self._ring, jac).det()
def ring(self):
"""
Return the polynomial ring.
OUTPUT:
A polynomial ring. This is where the defining polynomial(s)
live. Note that the polynomials may be homogeneous or
inhomogeneous, depending on how the user constructed the
object.
EXAMPLES::
sage: R.<x,y,t> = QQ[]
sage: quartic = invariant_theory.binary_quartic(x^4+y^4+t*x^2*y^2, [x,y])
sage: quartic.ring()
Multivariate Polynomial Ring in x, y, t over Rational Field
sage: R.<x,y,t> = QQ[]
sage: quartic = invariant_theory.binary_quartic(x^4+1+t*x^2, [x])
sage: quartic.ring()
Multivariate Polynomial Ring in x, y, t over Rational Field
"""
return self._ring
def variables(self):
"""
Return the variables of the form.
OUTPUT:
A tuple of variables. If inhomogeneous notation is used for the
defining polynomial then the last entry will be ``None``.
EXAMPLES::
sage: R.<x,y,t> = QQ[]
sage: quartic = invariant_theory.binary_quartic(x^4+y^4+t*x^2*y^2, [x,y])
sage: quartic.variables()
(x, y)
sage: R.<x,y,t> = QQ[]
sage: quartic = invariant_theory.binary_quartic(x^4+1+t*x^2, [x])
sage: quartic.variables()
(x, None)
"""
return self._variables
def is_homogeneous(self):
"""
Return whether the forms were defined by homogeneous polynomials.
OUTPUT:
Boolean. Whether the user originally defined the form via
homogeneous variables.
EXAMPLES::
sage: R.<x,y,t> = QQ[]
sage: quartic = invariant_theory.binary_quartic(x^4+y^4+t*x^2*y^2, [x,y])
sage: quartic.is_homogeneous()
True
sage: quartic.form()
x^2*y^2*t + x^4 + y^4
sage: R.<x,y,t> = QQ[]
sage: quartic = invariant_theory.binary_quartic(x^4+1+t*x^2, [x])
sage: quartic.is_homogeneous()
False
sage: quartic.form()
x^4 + x^2*t + 1
"""
return self._homogeneous
######################################################################
class AlgebraicForm(FormsBase):
"""
The base class of algebraic forms (i.e. homogeneous polynomials).
You should only instantiate the derived classes of this base
class.
Derived classes must implement ``coeffs()`` and
``scaled_coeffs()``
INPUT:
- ``n`` -- The number of variables.
- ``d`` -- The degree of the polynomial.
- ``polynomial`` -- The polynomial.
- ``*args`` -- The variables, as a single list/tuple, multiple
arguments, or ``None`` to use all variables of the polynomial.
Derived classes must implement the same arguments for the
constructor.
EXAMPLES::
sage: from sage.rings.invariant_theory import AlgebraicForm
sage: R.<x,y> = QQ[]
sage: p = x^2 + y^2
sage: AlgebraicForm(2, 2, p).variables()
(x, y)
sage: AlgebraicForm(2, 2, p, None).variables()
(x, y)
sage: AlgebraicForm(3, 2, p).variables()
(x, y, None)
sage: AlgebraicForm(3, 2, p, None).variables()
(x, y, None)
sage: from sage.rings.invariant_theory import AlgebraicForm
sage: R.<x,y,s,t> = QQ[]
sage: p = s*x^2 + t*y^2
sage: AlgebraicForm(2, 2, p, [x,y]).variables()
(x, y)
sage: AlgebraicForm(2, 2, p, x,y).variables()
(x, y)
sage: AlgebraicForm(3, 2, p, [x,y,None]).variables()
(x, y, None)
sage: AlgebraicForm(3, 2, p, x,y,None).variables()
(x, y, None)
sage: AlgebraicForm(2, 1, p, [x,y]).variables()
Traceback (most recent call last):
...
ValueError: Polynomial is of the wrong degree.
sage: AlgebraicForm(2, 2, x^2+y, [x,y]).variables()
Traceback (most recent call last):
...
ValueError: Polynomial is not homogeneous.
"""
def __init__(self, n, d, polynomial, *args, **kwds):
"""
The Python constructor.
INPUT:
See the class documentation.
TESTS::
sage: from sage.rings.invariant_theory import AlgebraicForm
sage: R.<x,y> = QQ[]
sage: form = AlgebraicForm(2, 2, x^2 + y^2)
"""
self._d = d
self._polynomial = polynomial
variables = _guess_variables(polynomial, *args)
if len(variables) == n:
pass
elif len(variables) == n-1:
variables = variables + (None,)
else:
raise ValueError('Need '+str(n)+' or '+
str(n-1)+' variables, got '+str(variables))
ring = polynomial.parent()
homogeneous = variables[-1] is not None
super(AlgebraicForm, self).__init__(n, homogeneous, ring, variables)
self._check()
def _check(self):
"""
Check that the input is of the correct degree and number of
variables.
EXAMPLES::
sage: from sage.rings.invariant_theory import AlgebraicForm
sage: R.<x,y,t> = QQ[]
sage: p = x^2 + y^2
sage: inv = AlgebraicForm(3, 2, p, [x,y,None])
sage: inv._check()
"""
degrees = set()
R = self._ring
if R.ngens() == 1:
degrees.update(self._polynomial.exponents())
else:
for e in self._polynomial.exponents():
deg = sum([ e[R.gens().index(x)]
for x in self._variables if x is not None ])
degrees.add(deg)
if self._homogeneous and len(degrees)>1:
raise ValueError('Polynomial is not homogeneous.')
if degrees == set() or \
(self._homogeneous and degrees == set([self._d])) or \
(not self._homogeneous and max(degrees) <= self._d):
return
else:
raise ValueError('Polynomial is of the wrong degree.')
def _check_covariant(self, method_name, g=None, invariant=False):
"""
Test whether ``method_name`` actually returns a covariant.
INPUT:
- ``method_name`` -- string. The name of the method that
returns the invariant / covariant to test.
- ``g`` -- an `SL(n,\CC)` matrix or ``None`` (default). The
test will be to check that the covariant transforms
corrently under this special linear group element acting on
the homogeneous variables. If ``None``, a random matrix will
be picked.
- ``invariant`` -- boolean. Whether to additionaly test that
it is an invariant.
EXAMPLES::
sage: R.<a0, a1, a2, a3, a4, x0, x1> = QQ[]
sage: p = a0*x1^4 + a1*x1^3*x0 + a2*x1^2*x0^2 + a3*x1*x0^3 + a4*x0^4
sage: quartic = invariant_theory.binary_quartic(p, x0, x1)
sage: quartic._check_covariant('EisensteinE', invariant=True)
sage: quartic._check_covariant('h_covariant')
sage: quartic._check_covariant('h_covariant', invariant=True)
Traceback (most recent call last):
...
AssertionError: Not invariant.
"""
assert self._homogeneous
from sage.matrix.constructor import vector, random_matrix
if g is None:
F = self._ring.base_ring()
g = random_matrix(F, self._n, algorithm='unimodular')
v = vector(self.variables())
g_v = g*v
transform = dict( (v[i], g_v[i]) for i in range(self._n) )
# The covariant of the transformed polynomial
g_self = self.__class__(self._n, self._d, self.form().subs(transform), self.variables())
cov_g = getattr(g_self, method_name)()
# The transform of the covariant
g_cov = getattr(self, method_name)().subs(transform)
# they must be the same
assert (g_cov - cov_g).is_zero(), 'Not covariant.'
if invariant:
cov = getattr(self, method_name)()
assert (cov - cov_g).is_zero(), 'Not invariant.'
def __cmp__(self, other):
"""
Compare ``self`` with ``other``.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: quartic = invariant_theory.binary_quartic(x^4+y^4)
sage: cmp(quartic, 'foo') == 0
False
sage: cmp(quartic, quartic)
0
sage: quartic.__cmp__(quartic)
0
"""
c = cmp(type(self), type(other))
if c != 0:
return c
return cmp(self.coeffs(), other.coeffs())
def _repr_(self):
"""
Return a string representation.
OUTPUT:
String.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: quartic = invariant_theory.binary_quartic(x^4+y^4)
sage: quartic._repr_()
'Binary quartic with coefficients (1, 0, 0, 0, 1)'
"""
s = ''
ary = ['Unary', 'Binary', 'Ternary', 'Quaternary', 'Quinary',
'Senary', 'Septenary', 'Octonary', 'Nonary', 'Denary']
try:
s += ary[self._n-1]
except IndexError:
s += 'algebraic'
ic = ['monic', 'quadratic', 'cubic', 'quartic', 'quintic',
'sextic', 'septimic', 'octavic', 'nonic', 'decimic',
'undecimic', 'duodecimic']
s += ' '
try:
s += ic[self._d-1]
except IndexError:
s += 'form'
s += ' with coefficients ' + str(self.coeffs())
return s
def form(self):
"""
Return the defining polynomial.
OUTPUT:
The polynomial used to define the algebraic form.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: quartic = invariant_theory.binary_quartic(x^4+y^4)
sage: quartic.form()
x^4 + y^4
sage: quartic.polynomial()
x^4 + y^4
"""
return self._polynomial
polynomial = form
def homogenized(self, var='h'):
"""
Return form as defined by a homogeneous polynomial.
INPUT:
- ``var`` -- either a variable name, variable index or a
variable (default: ``'h'``).
OUTPUT:
The same algebraic form, but defined by a homogeneous
polynomial.
EXAMPLES::
sage: T.<t> = QQ[]
sage: quadratic = invariant_theory.binary_quadratic(t^2 + 2*t + 3)
sage: quadratic
Binary quadratic with coefficients (1, 3, 2)
sage: quadratic.homogenized()
Binary quadratic with coefficients (1, 3, 2)
sage: quadratic == quadratic.homogenized()
True
sage: quadratic.form()
t^2 + 2*t + 3
sage: quadratic.homogenized().form()
t^2 + 2*t*h + 3*h^2
sage: R.<x,y,z> = QQ[]
sage: quadratic = invariant_theory.ternary_quadratic(x^2 + 1, [x,y])
sage: quadratic.homogenized().form()
x^2 + h^2
"""
if self._homogeneous:
return self
try:
polynomial = self._polynomial.homogenize(var)
R = polynomial.parent()
variables = map(R, self._variables[0:-1]) + [R(var)]
except AttributeError:
from sage.rings.all import PolynomialRing
R = PolynomialRing(self._ring.base_ring(), [str(self._ring.gen(0)), str(var)])
polynomial = R(self._polynomial).homogenize(var)
variables = R.gens()
return self.__class__(self._n, self._d, polynomial, variables)
def _extract_coefficients(self, monomials):
"""
Return the coefficients of ``monomials``.
INPUT:
- ``polynomial`` -- the input polynomial
- ``monomials`` -- a list of all the monomials in the polynomial
ring. If less monomials are passed, an exception is thrown.
OUTPUT:
A tuple containing the coefficients of the monomials in the given
polynomial.
EXAMPLES::
sage: from sage.rings.invariant_theory import AlgebraicForm
sage: R.<x,y,z,a30,a21,a12,a03,a20,a11,a02,a10,a01,a00> = QQ[]
sage: p = ( a30*x^3 + a21*x^2*y + a12*x*y^2 + a03*y^3 + a20*x^2*z +
... a11*x*y*z + a02*y^2*z + a10*x*z^2 + a01*y*z^2 + a00*z^3 )
sage: base = AlgebraicForm(3, 3, p, [x,y,z])
sage: m = [x^3, y^3, z^3, x^2*y, x^2*z, x*y^2, y^2*z, x*z^2, y*z^2, x*y*z]
sage: base._extract_coefficients(m)
(a30, a03, a00, a21, a20, a12, a02, a10, a01, a11)
sage: base = AlgebraicForm(3, 3, p.subs(z=1), [x,y])
sage: m = [x^3, y^3, 1, x^2*y, x^2, x*y^2, y^2, x, y, x*y]
sage: base._extract_coefficients(m)
(a30, a03, a00, a21, a20, a12, a02, a10, a01, a11)
sage: T.<t> = QQ[]
sage: univariate = AlgebraicForm(2, 3, t^3+2*t^2+3*t+4)
sage: m = [t^3, 1, t, t^2]
sage: univariate._extract_coefficients(m)
(1, 4, 3, 2)
sage: univariate._extract_coefficients(m[1:])
Traceback (most recent call last):
...
ValueError: Less monomials were passed than the form actually has.
"""
R = self._ring
if self._homogeneous:
variables = self._variables
else:
variables = self._variables[0:-1]
indices = [ R.gens().index(x) for x in variables ]
coeffs = dict()
if R.ngens() == 1:
# Univariate polynomials
assert indices == [0]
coefficient_monomial_iter = [(c, R.gen(0)**i) for i,c in
enumerate(self._polynomial.padded_list())]
def index(monomial):
if monomial in R.base_ring():
return (0,)
return (monomial.exponents()[0],)
else:
# Multivariate polynomials
coefficient_monomial_iter = self._polynomial
def index(monomial):
if monomial in R.base_ring():
return tuple(0 for i in indices)
e = monomial.exponents()[0]
return tuple(e[i] for i in indices)
for c,m in coefficient_monomial_iter:
i = index(m)
coeffs[i] = c*m + coeffs.pop(i, R.zero())
result = tuple(coeffs.pop(index(m), R.zero()) // m for m in monomials)
if len(coeffs):
raise ValueError('Less monomials were passed than the form actually has.')
return result
def coefficients(self):
"""
Alias for ``coeffs()``.
See the documentation for ``coeffs()`` for details.
EXAMPLES::
sage: R.<a,b,c,d,e,f,g, x,y,z> = QQ[]
sage: p = a*x^2 + b*y^2 + c*z^2 + d*x*y + e*x*z + f*y*z
sage: q = invariant_theory.quadratic_form(p, x,y,z)
sage: q.coefficients()
(a, b, c, d, e, f)
sage: q.coeffs()
(a, b, c, d, e, f)
"""
return self.coeffs()
def transformed(self, g):
"""
Return the image under a linear transformation of the variables.
INPUT:
- ``g`` -- a `GL(n,\CC)` matrix or a dictionary with the
variables as keys. A matrix is used to define the linear
transformation of homogeneous variables, a dictionary acts
by substitution of the variables.
OUTPUT:
A new instance of a subclass of :class:`AlgebraicForm`
obtained by replacing the variables of the homogeneous
polynomial by their image under ``g``.
EXAMPLES::
sage: R.<x,y,z> = QQ[]
sage: cubic = invariant_theory.ternary_cubic(x^3 + 2*y^3 + 3*z^3 + 4*x*y*z)
sage: cubic.transformed({x:y, y:z, z:x}).form()
3*x^3 + y^3 + 4*x*y*z + 2*z^3
sage: cyc = matrix([[0,1,0],[0,0,1],[1,0,0]])
sage: cubic.transformed(cyc) == cubic.transformed({x:y, y:z, z:x})
True
sage: g = matrix(QQ, [[1, 0, 0], [-1, 1, -3], [-5, -5, 16]])
sage: cubic.transformed(g)
Ternary cubic with coefficients (-356, -373, 12234, -1119, 3578, -1151,
3582, -11766, -11466, 7360)
sage: cubic.transformed(g).transformed(g.inverse()) == cubic
True
"""
form = self.homogenized()
if isinstance(g, dict):
transform = g
else:
from sage.modules.all import vector
v = vector(self._ring, self._variables)
g_v = g*v
transform = dict( (v[i], g_v[i]) for i in range(self._n) )
# The covariant of the transformed polynomial
return self.__class__(self._n, self._d, self.form().subs(transform), self.variables())
######################################################################
class QuadraticForm(AlgebraicForm):
"""
Invariant theory of a multivariate quadratic form.
You should use the :class:`invariant_theory
<InvariantTheoryFactory>` factory object to construct instances
of this class. See :meth:`~InvariantTheoryFactory.quadratic_form`
for details.
TESTS::
sage: R.<a,b,c,d,e,f,g, x,y,z> = QQ[]
sage: p = a*x^2 + b*y^2 + c*z^2 + d*x*y + e*x*z + f*y*z
sage: invariant_theory.quadratic_form(p, x,y,z)
Ternary quadratic with coefficients (a, b, c, d, e, f)
sage: type(_)
<class 'sage.rings.invariant_theory.TernaryQuadratic'>
sage: R.<a,b,c,d,e,f,g, x,y,z> = QQ[]
sage: p = a*x^2 + b*y^2 + c*z^2 + d*x*y + e*x*z + f*y*z
sage: invariant_theory.quadratic_form(p, x,y,z)
Ternary quadratic with coefficients (a, b, c, d, e, f)
sage: type(_)
<class 'sage.rings.invariant_theory.TernaryQuadratic'>
Since we cannot always decide whether the form is homogeneous or
not based on the number of variables, you need to explicitly
specify it if you want the variables to be treated as
inhomogeneous::
sage: invariant_theory.inhomogeneous_quadratic_form(p.subs(z=1), x,y)
Ternary quadratic with coefficients (a, b, c, d, e, f)
"""
def __init__(self, n, d, polynomial, *args):
"""
The Python constructor.
TESTS::
sage: R.<x,y> = QQ[]
sage: from sage.rings.invariant_theory import QuadraticForm
sage: form = QuadraticForm(2, 2, x^2+2*y^2+3*x*y)
sage: form
Binary quadratic with coefficients (1, 2, 3)
sage: form._check_covariant('discriminant', invariant=True)
sage: QuadraticForm(3, 2, x^2+y^2)
Ternary quadratic with coefficients (1, 1, 0, 0, 0, 0)
"""
assert d == 2
super(QuadraticForm, self).__init__(n, 2, polynomial, *args)
@cached_method
def monomials(self):
"""
List the basis monomials in the form.
OUTPUT:
A tuple of monomials. They are in the same order as
:meth:`coeffs`.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: quadratic = invariant_theory.quadratic_form(x^2+y^2)
sage: quadratic.monomials()
(x^2, y^2, x*y)
sage: quadratic = invariant_theory.inhomogeneous_quadratic_form(x^2+y^2)
sage: quadratic.monomials()
(x^2, y^2, 1, x*y, x, y)
"""
var = self._variables
def prod(a,b):
if a is None and b is None:
return self._ring.one()
elif a is None:
return b
elif b is None:
return a
else:
return a*b
squares = tuple( prod(x,x) for x in var )
mixed = []
for i in range(self._n):
for j in range(i+1, self._n):
mixed.append(prod(var[i], var[j]))
mixed = tuple(mixed)
return squares + mixed
@cached_method
def coeffs(self):
r"""
The coefficients of a quadratic form.
Given
.. math::
f(x) = \sum_{0\leq i<n} a_i x_i^2 + \sum_{0\leq j <k<n}
a_{jk} x_j x_k
this function returns `a = (a_0, \dots, a_n, a_{00}, a_{01}, \dots, a_{n-1,n})`
EXAMPLES::
sage: R.<a,b,c,d,e,f,g, x,y,z> = QQ[]
sage: p = a*x^2 + b*y^2 + c*z^2 + d*x*y + e*x*z + f*y*z
sage: inv = invariant_theory.quadratic_form(p, x,y,z); inv
Ternary quadratic with coefficients (a, b, c, d, e, f)
sage: inv.coeffs()
(a, b, c, d, e, f)
sage: inv.scaled_coeffs()
(a, b, c, 1/2*d, 1/2*e, 1/2*f)
"""
return self._extract_coefficients(self.monomials())
def scaled_coeffs(self):
"""
The scaled coefficients of a quadratic form.
Given
.. math::
f(x) = \sum_{0\leq i<n} a_i x_i^2 + \sum_{0\leq j <k<n}
2 a_{jk} x_j x_k
this function returns `a = (a_0, \cdots, a_n, a_{00}, a_{01}, \dots, a_{n-1,n})`
EXAMPLES::
sage: R.<a,b,c,d,e,f,g, x,y,z> = QQ[]
sage: p = a*x^2 + b*y^2 + c*z^2 + d*x*y + e*x*z + f*y*z
sage: inv = invariant_theory.quadratic_form(p, x,y,z); inv
Ternary quadratic with coefficients (a, b, c, d, e, f)
sage: inv.coeffs()
(a, b, c, d, e, f)
sage: inv.scaled_coeffs()
(a, b, c, 1/2*d, 1/2*e, 1/2*f)
"""
coeff = self.coeffs()
squares = coeff[0:self._n]
mixed = tuple( c/2 for c in coeff[self._n:] )
return squares + mixed
@cached_method
def matrix(self):
"""
Return the quadratic form as a symmetric matrix
OUTPUT:
This method returns a symmetric matrix `A` such that the
quadratic `Q` equals
.. math::
Q(x,y,z,\dots) = (x,y,\dots) A (x,y,\dots)^t
EXAMPLES::
sage: R.<x,y,z> = QQ[]
sage: quadratic = invariant_theory.ternary_quadratic(x^2+y^2+z^2+x*y)
sage: matrix(quadratic)
[ 1 1/2 0]
[1/2 1 0]
[ 0 0 1]
sage: quadratic._matrix_() == matrix(quadratic)
True
"""
coeff = self.scaled_coeffs()
A = matrix(self._ring, self._n)
for i in range(self._n):
A[i,i] = coeff[i]
ij = self._n
for i in range(self._n):
for j in range(i+1, self._n):
A[i,j] = coeff[ij]
A[j,i] = coeff[ij]
ij += 1
return A
_matrix_ = matrix
def discriminant(self):
"""
Return the discriminant of the quadratic form.
Up to an overall constant factor, this is just the determinant
of the defining matrix, see :meth:`matrix`. For a quadratic
form in `n` variables, the overall constant is `2^{n-1}` if
`n` is odd and `(-1)^{n/2} 2^n` if `n` is even.
EXAMPLES::
sage: R.<a,b,c, x,y> = QQ[]
sage: p = a*x^2+b*x*y+c*y^2
sage: quadratic = invariant_theory.quadratic_form(p, x,y)
sage: quadratic.discriminant()
b^2 - 4*a*c
sage: R.<a,b,c,d,e,f,g, x,y,z> = QQ[]
sage: p = a*x^2 + b*y^2 + c*z^2 + d*x*y + e*x*z + f*y*z
sage: quadratic = invariant_theory.quadratic_form(p, x,y,z)
sage: quadratic.discriminant()
4*a*b*c - c*d^2 - b*e^2 + d*e*f - a*f^2
"""
A = 2*self._matrix_()
if is_odd(self._n):
return A.det() / 2
else:
return (-1)**(self._n//2) * A.det()
@cached_method
def dual(self):
"""
Return the dual quadratic form.
OUTPUT:
A new quadratic form (with the same number of variables)
defined by the adjoint matrix.
EXAMPLES::
sage: R.<a,b,c,x,y,z> = QQ[]
sage: cubic = x^2+y^2+z^2
sage: quadratic = invariant_theory.ternary_quadratic(a*x^2+b*y^2+c*z^2, [x,y,z])
sage: quadratic.form()
a*x^2 + b*y^2 + c*z^2
sage: quadratic.dual().form()
b*c*x^2 + a*c*y^2 + a*b*z^2
sage: R.<x,y,z, t> = QQ[]
sage: cubic = x^2+y^2+z^2
sage: quadratic = invariant_theory.ternary_quadratic(x^2+y^2+z^2 + t*x*y, [x,y,z])
sage: quadratic.dual()
Ternary quadratic with coefficients (1, 1, -1/4*t^2 + 1, -t, 0, 0)
sage: R.<x,y, t> = QQ[]
sage: quadratic = invariant_theory.ternary_quadratic(x^2+y^2+1 + t*x*y, [x,y])
sage: quadratic.dual()
Ternary quadratic with coefficients (1, 1, -1/4*t^2 + 1, -t, 0, 0)
TESTS::
sage: R = PolynomialRing(QQ, 'a20,a11,a02,a10,a01,a00,x,y,z', order='lex')
sage: R.inject_variables()
Defining a20, a11, a02, a10, a01, a00, x, y, z
sage: p = ( a20*x^2 + a11*x*y + a02*y^2 +
... a10*x*z + a01*y*z + a00*z^2 )
sage: quadratic = invariant_theory.ternary_quadratic(p, x,y,z)
sage: quadratic.dual().dual().form().factor()
(1/4) *
(a20*x^2 + a11*x*y + a02*y^2 + a10*x*z + a01*y*z + a00*z^2) *
(4*a20*a02*a00 - a20*a01^2 - a11^2*a00 + a11*a10*a01 - a02*a10^2)
sage: R.<w,x,y,z> = QQ[]
sage: q = invariant_theory.quaternary_quadratic(w^2+2*x^2+3*y^2+4*z^2+x*y+5*w*z)
sage: q.form()
w^2 + 2*x^2 + x*y + 3*y^2 + 5*w*z + 4*z^2
sage: q.dual().dual().form().factor()
(42849/256) * (w^2 + 2*x^2 + x*y + 3*y^2 + 5*w*z + 4*z^2)
sage: R.<x,y,z> = QQ[]
sage: q = invariant_theory.quaternary_quadratic(1+2*x^2+3*y^2+4*z^2+x*y+5*z)
sage: q.form()
2*x^2 + x*y + 3*y^2 + 4*z^2 + 5*z + 1
sage: q.dual().dual().form().factor()
(42849/256) * (2*x^2 + x*y + 3*y^2 + 4*z^2 + 5*z + 1)
"""
A = self.matrix()
Aadj = A.adjoint()
if self._homogeneous:
var = self._variables
else:
var = self._variables[0:-1] + (1, )
n = self._n
p = sum([ sum([ Aadj[i,j]*var[i]*var[j] for i in range(n) ]) for j in range(n)])
return invariant_theory.quadratic_form(p, self.variables())
def as_QuadraticForm(self):
"""
Convert into a :class:`~sage.quadratic_forms.quadratic_form.QuadraticForm`.
OUTPUT:
Sage has a special quadratic forms subsystem. This method
converts ``self`` into this
:class:`~sage.quadratic_forms.quadratic_form.QuadraticForm`
representation.
EXAMPLES::
sage: R.<x,y,z> = QQ[]
sage: p = x^2+y^2+z^2+2*x*y+3*x*z
sage: quadratic = invariant_theory.ternary_quadratic(p)
sage: matrix(quadratic)
[ 1 1 3/2]
[ 1 1 0]
[3/2 0 1]
sage: quadratic.as_QuadraticForm()
Quadratic form in 3 variables over Multivariate Polynomial
Ring in x, y, z over Rational Field with coefficients:
[ 1/2 1 3/2 ]
[ * 1/2 0 ]
[ * * 1/2 ]
sage: _.polynomial('X,Y,Z')
X^2 + 2*X*Y + Y^2 + 3*X*Z + Z^2
"""
R = self._ring
B = self._matrix_()
import sage.quadratic_forms.quadratic_form
return sage.quadratic_forms.quadratic_form.QuadraticForm(R, B)
######################################################################
class BinaryQuartic(AlgebraicForm):
"""
Invariant theory of a binary quartic.
You should use the :class:`invariant_theory
<InvariantTheoryFactory>` factory object to construct instances
of this class. See :meth:`~InvariantTheoryFactory.binary_quartic`
for details.
TESTS::
sage: R.<a0, a1, a2, a3, a4, x0, x1> = QQ[]
sage: p = a0*x1^4 + a1*x1^3*x0 + a2*x1^2*x0^2 + a3*x1*x0^3 + a4*x0^4
sage: quartic = invariant_theory.binary_quartic(p, x0, x1)
sage: quartic._check_covariant('form')
sage: quartic._check_covariant('EisensteinD', invariant=True)
sage: quartic._check_covariant('EisensteinE', invariant=True)
sage: quartic._check_covariant('g_covariant')
sage: quartic._check_covariant('h_covariant')
sage: TestSuite(quartic).run()
"""
def __init__(self, n, d, polynomial, *args):
"""
The Python constructor.
TESTS::
sage: R.<x,y> = QQ[]
sage: from sage.rings.invariant_theory import BinaryQuartic
sage: BinaryQuartic(2, 4, x^4+y^4)
Binary quartic with coefficients (1, 0, 0, 0, 1)
"""
assert n == 2 and d == 4
super(BinaryQuartic, self).__init__(2, 4, polynomial, *args)
self._x = self._variables[0]
self._y = self._variables[1]
@cached_method
def monomials(self):
"""
List the basis monomials in the form.
OUTPUT:
A tuple of monomials. They are in the same order as
:meth:`coeffs`.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: quartic = invariant_theory.binary_quartic(x^4+y^4)
sage: quartic.monomials()
(y^4, x*y^3, x^2*y^2, x^3*y, x^4)
"""
quartic = self._polynomial
x0 = self._x
x1 = self._y
if self._homogeneous:
return (x1**4, x1**3*x0, x1**2*x0**2, x1*x0**3, x0**4)
else:
return (self._ring.one(), x0, x0**2, x0**3, x0**4)
@cached_method
def coeffs(self):
"""
The coefficients of a binary quartic.
Given
.. math::
f(x) = a_0 x_1^4 + a_1 x_0 x_1^3 + a_2 x_0^2 x_1^2 +
a_3 x_0^3 x_1 + a_4 x_0^4
this function returns `a = (a_0, a_1, a_2, a_3, a_4)`
EXAMPLES::
sage: R.<a0, a1, a2, a3, a4, x0, x1> = QQ[]
sage: p = a0*x1^4 + a1*x1^3*x0 + a2*x1^2*x0^2 + a3*x1*x0^3 + a4*x0^4
sage: quartic = invariant_theory.binary_quartic(p, x0, x1)
sage: quartic.coeffs()
(a0, a1, a2, a3, a4)
sage: R.<a0, a1, a2, a3, a4, x> = QQ[]
sage: p = a0 + a1*x + a2*x^2 + a3*x^3 + a4*x^4
sage: quartic = invariant_theory.binary_quartic(p, x)
sage: quartic.coeffs()
(a0, a1, a2, a3, a4)
"""
return self._extract_coefficients(self.monomials())
def scaled_coeffs(self):
"""
The coefficients of a binary quartic.
Given
.. math::
f(x) = a_0 x_1^4 + 4 a_1 x_0 x_1^3 + 6 a_2 x_0^2 x_1^2 +
4 a_3 x_0^3 x_1 + a_4 x_0^4
this function returns `a = (a_0, a_1, a_2, a_3, a_4)`
EXAMPLES::
sage: R.<a0, a1, a2, a3, a4, x0, x1> = QQ[]
sage: quartic = a0*x1^4 + 4*a1*x1^3*x0 + 6*a2*x1^2*x0^2 + 4*a3*x1*x0^3 + a4*x0^4
sage: inv = invariant_theory.binary_quartic(quartic, x0, x1)
sage: inv.scaled_coeffs()
(a0, a1, a2, a3, a4)
sage: R.<a0, a1, a2, a3, a4, x> = QQ[]
sage: quartic = a0 + 4*a1*x + 6*a2*x^2 + 4*a3*x^3 + a4*x^4
sage: inv = invariant_theory.binary_quartic(quartic, x)
sage: inv.scaled_coeffs()
(a0, a1, a2, a3, a4)
"""
coeff = self.coeffs()
return (coeff[0], coeff[1]/4, coeff[2]/6, coeff[3]/4, coeff[4])
@cached_method
def EisensteinD(self):
r"""
One of the Eisenstein invariants of a binary quartic.
OUTPUT:
The Eisenstein D-invariant of the quartic.
.. math::
f(x) = a_0 x_1^4 + 4 a_1 x_0 x_1^3 + 6 a_2 x_0^2 x_1^2 +
4 a_3 x_0^3 x_1 + a_4 x_0^4
\\
\Rightarrow
D(f) = a_0 a_4+3 a_2^2-4 a_1 a_3
EXAMPLES::
sage: R.<a0, a1, a2, a3, a4, x0, x1> = QQ[]
sage: f = a0*x1^4+4*a1*x0*x1^3+6*a2*x0^2*x1^2+4*a3*x0^3*x1+a4*x0^4
sage: inv = invariant_theory.binary_quartic(f, x0, x1)
sage: inv.EisensteinD()
3*a2^2 - 4*a1*a3 + a0*a4
"""
a = self.scaled_coeffs()
assert len(a) == 5
return a[0]*a[4]+3*a[2]**2-4*a[1]*a[3]
@cached_method
def EisensteinE(self):
r"""
One of the Eisenstein invariants of a binary quartic.
OUTPUT:
The Eisenstein E-invariant of the quartic.
.. math::
f(x) = a_0 x_1^4 + 4 a_1 x_0 x_1^3 + 6 a_2 x_0^2 x_1^2 +
4 a_3 x_0^3 x_1 + a_4 x_0^4
\\ \Rightarrow
E(f) = a_0 a_3^2 +a_1^2 a_4 -a_0 a_2 a_4
-2 a_1 a_2 a_3 + a_2^3
EXAMPLES::
sage: R.<a0, a1, a2, a3, a4, x0, x1> = QQ[]
sage: f = a0*x1^4+4*a1*x0*x1^3+6*a2*x0^2*x1^2+4*a3*x0^3*x1+a4*x0^4
sage: inv = invariant_theory.binary_quartic(f, x0, x1)
sage: inv.EisensteinE()
a2^3 - 2*a1*a2*a3 + a0*a3^2 + a1^2*a4 - a0*a2*a4
"""
a = self.scaled_coeffs()
assert len(a) == 5
return a[0]*a[3]**2 +a[1]**2*a[4] -a[0]*a[2]*a[4] -2*a[1]*a[2]*a[3] +a[2]**3
@cached_method
def g_covariant(self):
r"""
The g-covariant of a binary quartic.
OUTPUT:
The g-covariant of the quartic.
.. math::
f(x) = a_0 x_1^4 + 4 a_1 x_0 x_1^3 + 6 a_2 x_0^2 x_1^2 +
4 a_3 x_0^3 x_1 + a_4 x_0^4
\\
\Rightarrow
D(f) = \frac{1}{144}
\begin{pmatrix}
\frac{\partial^2 f}{\partial x \partial x}
\end{pmatrix}
EXAMPLES::
sage: R.<a0, a1, a2, a3, a4, x, y> = QQ[]
sage: p = a0*x^4+4*a1*x^3*y+6*a2*x^2*y^2+4*a3*x*y^3+a4*y^4
sage: inv = invariant_theory.binary_quartic(p, x, y)
sage: g = inv.g_covariant(); g
a1^2*x^4 - a0*a2*x^4 + 2*a1*a2*x^3*y - 2*a0*a3*x^3*y + 3*a2^2*x^2*y^2
- 2*a1*a3*x^2*y^2 - a0*a4*x^2*y^2 + 2*a2*a3*x*y^3
- 2*a1*a4*x*y^3 + a3^2*y^4 - a2*a4*y^4
sage: inv_inhomogeneous = invariant_theory.binary_quartic(p.subs(y=1), x)
sage: inv_inhomogeneous.g_covariant()
a1^2*x^4 - a0*a2*x^4 + 2*a1*a2*x^3 - 2*a0*a3*x^3 + 3*a2^2*x^2
- 2*a1*a3*x^2 - a0*a4*x^2 + 2*a2*a3*x - 2*a1*a4*x + a3^2 - a2*a4
sage: g == 1/144 * (p.derivative(x,y)^2 - p.derivative(x,x)*p.derivative(y,y))
True
"""
a4, a3, a2, a1, a0 = self.scaled_coeffs()
x0 = self._x
x1 = self._y
if self._homogeneous:
xpow = [x0**4, x0**3 * x1, x0**2 * x1**2, x0 * x1**3, x1**4]
else:
xpow = [x0**4, x0**3, x0**2, x0, self._ring.one()]
return (a1**2 - a0*a2)*xpow[0] + \
(2*a1*a2 - 2*a0*a3)*xpow[1] + \
(3*a2**2 - 2*a1*a3 - a0*a4)*xpow[2] + \
(2*a2*a3 - 2*a1*a4)*xpow[3] + \
(a3**2 - a2*a4)*xpow[4]
@cached_method
def h_covariant(self):
r"""
The h-covariant of a binary quartic.
OUTPUT:
The h-covariant of the quartic.
.. math::
f(x) = a_0 x_1^4 + 4 a_1 x_0 x_1^3 + 6 a_2 x_0^2 x_1^2 +
4 a_3 x_0^3 x_1 + a_4 x_0^4
\\
\Rightarrow
D(f) = \frac{1}{144}
\begin{pmatrix}
\frac{\partial^2 f}{\partial x \partial x}
\end{pmatrix}
EXAMPLES::
sage: R.<a0, a1, a2, a3, a4, x, y> = QQ[]
sage: p = a0*x^4+4*a1*x^3*y+6*a2*x^2*y^2+4*a3*x*y^3+a4*y^4
sage: inv = invariant_theory.binary_quartic(p, x, y)
sage: h = inv.h_covariant(); h
-2*a1^3*x^6 + 3*a0*a1*a2*x^6 - a0^2*a3*x^6 - 6*a1^2*a2*x^5*y + 9*a0*a2^2*x^5*y
- 2*a0*a1*a3*x^5*y - a0^2*a4*x^5*y - 10*a1^2*a3*x^4*y^2 + 15*a0*a2*a3*x^4*y^2
- 5*a0*a1*a4*x^4*y^2 + 10*a0*a3^2*x^3*y^3 - 10*a1^2*a4*x^3*y^3
+ 10*a1*a3^2*x^2*y^4 - 15*a1*a2*a4*x^2*y^4 + 5*a0*a3*a4*x^2*y^4
+ 6*a2*a3^2*x*y^5 - 9*a2^2*a4*x*y^5 + 2*a1*a3*a4*x*y^5 + a0*a4^2*x*y^5
+ 2*a3^3*y^6 - 3*a2*a3*a4*y^6 + a1*a4^2*y^6
sage: inv_inhomogeneous = invariant_theory.binary_quartic(p.subs(y=1), x)
sage: inv_inhomogeneous.h_covariant()
-2*a1^3*x^6 + 3*a0*a1*a2*x^6 - a0^2*a3*x^6 - 6*a1^2*a2*x^5 + 9*a0*a2^2*x^5
- 2*a0*a1*a3*x^5 - a0^2*a4*x^5 - 10*a1^2*a3*x^4 + 15*a0*a2*a3*x^4
- 5*a0*a1*a4*x^4 + 10*a0*a3^2*x^3 - 10*a1^2*a4*x^3 + 10*a1*a3^2*x^2
- 15*a1*a2*a4*x^2 + 5*a0*a3*a4*x^2 + 6*a2*a3^2*x - 9*a2^2*a4*x
+ 2*a1*a3*a4*x + a0*a4^2*x + 2*a3^3 - 3*a2*a3*a4 + a1*a4^2
sage: g = inv.g_covariant()
sage: h == 1/8 * (p.derivative(x)*g.derivative(y)-p.derivative(y)*g.derivative(x))
True
"""
a0, a1, a2, a3, a4 = self.scaled_coeffs()
x0 = self._x
x1 = self._y
if self._homogeneous:
xpow = [x0**6, x0**5 * x1, x0**4 * x1**2, x0**3 * x1**3,
x0**2 * x1**4, x0 * x1**5, x1**6]
else:
xpow = [x0**6, x0**5, x0**4, x0**3, x0**2, x0, x0.parent().one()]
return (-2*a3**3 + 3*a2*a3*a4 - a1*a4**2) * xpow[0] + \
(-6*a2*a3**2 + 9*a2**2*a4 - 2*a1*a3*a4 - a0*a4**2) * xpow[1] + \
5 * (-2*a1*a3**2 + 3*a1*a2*a4 - a0*a3*a4) * xpow[2] + \
10 * (-a0*a3**2 + a1**2*a4) * xpow[3] + \
5 * (2*a1**2*a3 - 3*a0*a2*a3 + a0*a1*a4) * xpow[4] + \
(6*a1**2*a2 - 9*a0*a2**2 + 2*a0*a1*a3 + a0**2*a4) * xpow[5] + \
(2*a1**3 - 3*a0*a1*a2 + a0**2*a3) * xpow[6]
######################################################################
def _covariant_conic(A_scaled_coeffs, B_scaled_coeffs, monomials):
"""
Helper function for :meth:`TernaryQuadratic.covariant_conic`
INPUT:
- ``A_scaled_coeffs``, ``B_scaled_coeffs`` -- The scaled
coefficients of the two ternary quadratics.
- ``monomials`` -- The monomials :meth:`~TernaryQuadratic.monomials`.
OUTPUT:
The so-called covariant conic, a ternary quadratic. It is
symmetric under exchange of ``A`` and ``B``.
EXAMPLES::
sage: ring.<x,y,z> = QQ[]
sage: A = invariant_theory.ternary_quadratic(x^2+y^2+z^2)
sage: B = invariant_theory.ternary_quadratic(x*y+x*z+y*z)
sage: from sage.rings.invariant_theory import _covariant_conic
sage: _covariant_conic(A.scaled_coeffs(), B.scaled_coeffs(), A.monomials())
-x*y - x*z - y*z
"""
a0, b0, c0, h0, g0, f0 = A_scaled_coeffs
a1, b1, c1, h1, g1, f1 = B_scaled_coeffs
return (
(b0*c1+c0*b1-2*f0*f1) * monomials[0] +
(a0*c1+c0*a1-2*g0*g1) * monomials[1] +
(a0*b1+b0*a1-2*h0*h1) * monomials[2] +
2*(f0*g1+g0*f1 -c0*h1-h0*c1) * monomials[3] +
2*(h0*f1+f0*h1 -b0*g1-g0*b1) * monomials[4] +
2*(g0*h1+h0*g1 -a0*f1-f0*a1) * monomials[5] )
######################################################################
class TernaryQuadratic(QuadraticForm):
"""
Invariant theory of a ternary quadratic.
You should use the :class:`invariant_theory
<InvariantTheoryFactory>` factory object to construct instances
of this class. See
:meth:`~InvariantTheoryFactory.ternary_quadratic` for details.
TESTS::
sage: R.<x,y,z> = QQ[]
sage: quadratic = invariant_theory.ternary_quadratic(x^2+y^2+z^2)
sage: quadratic
Ternary quadratic with coefficients (1, 1, 1, 0, 0, 0)
sage: TestSuite(quadratic).run()
"""
def __init__(self, n, d, polynomial, *args):
"""
The Python constructor.
INPUT:
See :meth:`~InvariantTheoryFactory.ternary_quadratic`.
TESTS::
sage: R.<x,y,z> = QQ[]
sage: from sage.rings.invariant_theory import TernaryQuadratic
sage: TernaryQuadratic(3, 2, x^2+y^2+z^2)
Ternary quadratic with coefficients (1, 1, 1, 0, 0, 0)
"""
assert n == 3 and d == 2
super(QuadraticForm, self).__init__(3, 2, polynomial, *args)
self._x = self._variables[0]
self._y = self._variables[1]
self._z = self._variables[2]
@cached_method
def monomials(self):
"""
List the basis monomials of the form.
OUTPUT:
A tuple of monomials. They are in the same order as
:meth:`coeffs`.
EXAMPLES::
sage: R.<x,y,z> = QQ[]
sage: quadratic = invariant_theory.ternary_quadratic(x^2+y*z)
sage: quadratic.monomials()
(x^2, y^2, z^2, x*y, x*z, y*z)
"""
R = self._ring
x,y,z = self._x, self._y, self._z
if self._homogeneous:
return (x**2, y**2, z**2, x*y, x*z, y*z)
else:
return (x**2, y**2, R.one(), x*y, x, y)
@cached_method
def coeffs(self):
"""
Return the coefficients of a quadratic.
Given
.. math::
p(x,y) =&\;
a_{20} x^{2} + a_{11} x y + a_{02} y^{2} +
a_{10} x + a_{01} y + a_{00}
this function returns
`a = (a_{20}, a_{02}, a_{00}, a_{11}, a_{10}, a_{01} )`
EXAMPLES::
sage: R.<x,y,z,a20,a11,a02,a10,a01,a00> = QQ[]
sage: p = ( a20*x^2 + a11*x*y + a02*y^2 +
... a10*x*z + a01*y*z + a00*z^2 )
sage: invariant_theory.ternary_quadratic(p, x,y,z).coeffs()
(a20, a02, a00, a11, a10, a01)
sage: invariant_theory.ternary_quadratic(p.subs(z=1), x, y).coeffs()
(a20, a02, a00, a11, a10, a01)
"""
return self._extract_coefficients(self.monomials())
def scaled_coeffs(self):
"""
Return the scaled coefficients of a quadratic.
Given
.. math::
p(x,y) =&\;
a_{20} x^{2} + a_{11} x y + a_{02} y^{2} +
a_{10} x + a_{01} y + a_{00}
this function returns
`a = (a_{20}, a_{02}, a_{00}, a_{11}/2, a_{10}/2, a_{01}/2, )`
EXAMPLES::
sage: R.<x,y,z,a20,a11,a02,a10,a01,a00> = QQ[]
sage: p = ( a20*x^2 + a11*x*y + a02*y^2 +
... a10*x*z + a01*y*z + a00*z^2 )
sage: invariant_theory.ternary_quadratic(p, x,y,z).scaled_coeffs()
(a20, a02, a00, 1/2*a11, 1/2*a10, 1/2*a01)
sage: invariant_theory.ternary_quadratic(p.subs(z=1), x, y).scaled_coeffs()
(a20, a02, a00, 1/2*a11, 1/2*a10, 1/2*a01)
"""
F = self._ring.base_ring()
a200, a020, a002, a110, a101, a011 = self.coeffs()
return (a200, a020, a002, a110/F(2), a101/F(2), a011/F(2))
def covariant_conic(self, other):
"""
Return the ternary quadratic covariant to ``self`` and ``other``.
INPUT:
- ``other`` -- Another ternary quadratic.
OUTPUT:
The so-called covariant conic, a ternary quadratic. It is
symmetric under exchange of ``self`` and ``other``.
EXAMPLES::
sage: ring.<x,y,z> = QQ[]
sage: Q = invariant_theory.ternary_quadratic(x^2+y^2+z^2)
sage: R = invariant_theory.ternary_quadratic(x*y+x*z+y*z)
sage: Q.covariant_conic(R)
-x*y - x*z - y*z
sage: R.covariant_conic(Q)
-x*y - x*z - y*z
TESTS::
sage: R.<a,a_,b,b_,c,c_,f,f_,g,g_,h,h_,x,y,z> = QQ[]
sage: p = ( a*x^2 + 2*h*x*y + b*y^2 +
... 2*g*x*z + 2*f*y*z + c*z^2 )
sage: Q = invariant_theory.ternary_quadratic(p, [x,y,z])
sage: Q.matrix()
[a h g]
[h b f]
[g f c]
sage: p = ( a_*x^2 + 2*h_*x*y + b_*y^2 +
... 2*g_*x*z + 2*f_*y*z + c_*z^2 )
sage: Q_ = invariant_theory.ternary_quadratic(p, [x,y,z])
sage: Q_.matrix()
[a_ h_ g_]
[h_ b_ f_]
[g_ f_ c_]
sage: QQ_ = Q.covariant_conic(Q_)
sage: invariant_theory.ternary_quadratic(QQ_, [x,y,z]).matrix()
[ b_*c + b*c_ - 2*f*f_ f_*g + f*g_ - c_*h - c*h_ -b_*g - b*g_ + f_*h + f*h_]
[ f_*g + f*g_ - c_*h - c*h_ a_*c + a*c_ - 2*g*g_ -a_*f - a*f_ + g_*h + g*h_]
[-b_*g - b*g_ + f_*h + f*h_ -a_*f - a*f_ + g_*h + g*h_ a_*b + a*b_ - 2*h*h_]
"""
return _covariant_conic(self.scaled_coeffs(), other.scaled_coeffs(),
self.monomials())
######################################################################
class TernaryCubic(AlgebraicForm):
"""
Invariant theory of a ternary cubic.
You should use the :class:`invariant_theory
<InvariantTheoryFactory>` factory object to contstruct instances
of this class. See :meth:`~InvariantTheoryFactory.ternary_cubic`
for details.
TESTS::
sage: R.<x,y,z> = QQ[]
sage: cubic = invariant_theory.ternary_cubic(x^3+y^3+z^3)
sage: cubic
Ternary cubic with coefficients (1, 1, 1, 0, 0, 0, 0, 0, 0, 0)
sage: TestSuite(cubic).run()
"""
def __init__(self, n, d, polynomial, *args):
"""
The Python constructor.
TESTS::
sage: R.<x,y,z> = QQ[]
sage: p = 2837*x^3 + 1363*x^2*y + 6709*x^2*z + \
... 5147*x*y^2 + 2769*x*y*z + 912*x*z^2 + 4976*y^3 + \
... 2017*y^2*z + 4589*y*z^2 + 9681*z^3
sage: cubic = invariant_theory.ternary_cubic(p)
sage: cubic._check_covariant('S_invariant', invariant=True)
sage: cubic._check_covariant('T_invariant', invariant=True)
sage: cubic._check_covariant('form')
sage: cubic._check_covariant('Hessian')
sage: cubic._check_covariant('Theta_covariant')
sage: cubic._check_covariant('J_covariant')
"""
assert n == d == 3
super(TernaryCubic, self).__init__(3, 3, polynomial, *args)
self._x = self._variables[0]
self._y = self._variables[1]
self._z = self._variables[2]
@cached_method
def monomials(self):
"""
List the basis monomials of the form.
OUTPUT:
A tuple of monomials. They are in the same order as
:meth:`coeffs`.
EXAMPLES::
sage: R.<x,y,z> = QQ[]
sage: cubic = invariant_theory.ternary_cubic(x^3+y*z^2)
sage: cubic.monomials()
(x^3, y^3, z^3, x^2*y, x^2*z, x*y^2, y^2*z, x*z^2, y*z^2, x*y*z)
"""
R = self._ring
x,y,z = self._x, self._y, self._z
if self._homogeneous:
return (x**3, y**3, z**3, x**2*y, x**2*z, x*y**2,
y**2*z, x*z**2, y*z**2, x*y*z)
else:
return (x**3, y**3, R.one(), x**2*y, x**2, x*y**2,
y**2, x, y, x*y)
@cached_method
def coeffs(self):
r"""
Return the coefficients of a cubic.
Given
.. math::
\begin{split}
p(x,y) =&\;
a_{30} x^{3} + a_{21} x^{2} y + a_{12} x y^{2} +
a_{03} y^{3} + a_{20} x^{2} +
\\ &\;
a_{11} x y +
a_{02} y^{2} + a_{10} x + a_{01} y + a_{00}
\end{split}
this function returns
`a = (a_{30}, a_{03}, a_{00}, a_{21}, a_{20}, a_{12}, a_{02}, a_{10}, a_{01}, a_{11})`
EXAMPLES::
sage: R.<x,y,z,a30,a21,a12,a03,a20,a11,a02,a10,a01,a00> = QQ[]
sage: p = ( a30*x^3 + a21*x^2*y + a12*x*y^2 + a03*y^3 + a20*x^2*z +
... a11*x*y*z + a02*y^2*z + a10*x*z^2 + a01*y*z^2 + a00*z^3 )
sage: invariant_theory.ternary_cubic(p, x,y,z).coeffs()
(a30, a03, a00, a21, a20, a12, a02, a10, a01, a11)
sage: invariant_theory.ternary_cubic(p.subs(z=1), x, y).coeffs()
(a30, a03, a00, a21, a20, a12, a02, a10, a01, a11)
"""
return self._extract_coefficients(self.monomials())
def scaled_coeffs(self):
r"""
Return the coefficients of a cubic.
Compared to :meth:`coeffs`, this method returns rescaled
coefficients that are often used in invariant theory.
Given
.. math::
\begin{split}
p(x,y) =&\;
a_{30} x^{3} + a_{21} x^{2} y + a_{12} x y^{2} +
a_{03} y^{3} + a_{20} x^{2} +
\\ &\;
a_{11} x y +
a_{02} y^{2} + a_{10} x + a_{01} y + a_{00}
\end{split}
this function returns
`a = (a_{30}, a_{03}, a_{00}, a_{21}/3, a_{20}/3, a_{12}/3, a_{02}/3, a_{10}/3, a_{01}/3, a_{11}/6)`
EXAMPLES::
sage: R.<x,y,z,a30,a21,a12,a03,a20,a11,a02,a10,a01,a00> = QQ[]
sage: p = ( a30*x^3 + a21*x^2*y + a12*x*y^2 + a03*y^3 + a20*x^2*z +
... a11*x*y*z + a02*y^2*z + a10*x*z^2 + a01*y*z^2 + a00*z^3 )
sage: invariant_theory.ternary_cubic(p, x,y,z).scaled_coeffs()
(a30, a03, a00, 1/3*a21, 1/3*a20, 1/3*a12, 1/3*a02, 1/3*a10, 1/3*a01, 1/6*a11)
"""
a = self.coeffs()
F = self._ring.base_ring()
return (a[0], a[1], a[2],
1/F(3)*a[3], 1/F(3)*a[4], 1/F(3)*a[5],
1/F(3)*a[6], 1/F(3)*a[7], 1/F(3)*a[8],
1/F(6)*a[9])
def S_invariant(self):
"""
Return the S-invariant.
EXAMPLES::
sage: R.<x,y,z> = QQ[]
sage: cubic = invariant_theory.ternary_cubic(x^2*y+y^3+z^3+x*y*z)
sage: cubic.S_invariant()
-1/1296
"""
a,b,c,a2,a3,b1,b3,c1,c2,m = self.scaled_coeffs()
S = ( a*b*c*m-(b*c*a2*a3+c*a*b1*b3+a*b*c1*c2)
-m*(a*b3*c2+b*c1*a3+c*a2*b1)
+(a*b1*c2**2+a*c1*b3**2+b*a2*c1**2+b*c2*a3**2+c*b3*a2**2+c*a3*b1**2)
-m**4+2*m**2*(b1*c1+c2*a2+a3*b3)
-3*m*(a2*b3*c1+a3*b1*c2)
-(b1**2*c1**2+c2**2*a2**2+a3**2*b3**2)
+(c2*a2*a3*b3+a3*b3*b1*c1+b1*c1*c2*a2) )
return S
def T_invariant(self):
"""
Return the T-invariant.
EXAMPLES::
sage: R.<x,y,z> = QQ[]
sage: cubic = invariant_theory.ternary_cubic(x^3+y^3+z^3)
sage: cubic.T_invariant()
1
sage: R.<x,y,z,t> = GF(7)[]
sage: cubic = invariant_theory.ternary_cubic(x^3+y^3+z^3+t*x*y*z, [x,y,z])
sage: cubic.T_invariant()
-t^6 - t^3 + 1
"""
a,b,c,a2,a3,b1,b3,c1,c2,m = self.scaled_coeffs()
T = ( a**2*b**2*c**2-6*a*b*c*(a*b3*c2+b*c1*a3+c*a2*b1)
-20*a*b*c*m**3+12*a*b*c*m*(b1*c1+c2*a2+a3*b3)
+6*a*b*c*(a2*b3*c1+a3*b1*c2)+
4*(a**2*b*c2**3+a**2*c*b3**3+b**2*c*a3**3+
b**2*a*c1**3+c**2*a*b1**3+c**2*b*a2**3)
+36*m**2*(b*c*a2*a3+c*a*b1*b3+a*b*c1*c2)
-24*m*(b*c*b1*a3**2+b*c*c1*a2**2+c*a*c2*b1**2+c*a*a2*b3**2+a*b*a3*c2**2+
a*b*b3*c1**2)
-3*(a**2*b3**2*c2**2+b**2*c1**2*a3**2+c**2*a2**2*b1**2)+
18*(b*c*b1*c1*a2*a3+c*a*c2*a2*b3*b1+a*b*a3*b3*c1*c2)
-12*(b*c*c2*a3*a2**2+b*c*b3*a2*a3**2+c*a*c1*b3*b1**2+
c*a*a3*b1*b3**2+a*b*a2*c1*c2**2+a*b*b1*c2*c1**2)
-12*m**3*(a*b3*c2+b*c1*a3+c*a2*b1)
+12*m**2*(a*b1*c2**2+a*c1*b3**2+b*a2*c1**2+
b*c2*a3**2+c*b3*a2**2+c*a3*b1**2)
-60*m*(a*b1*b3*c1*c2+b*c1*c2*a2*a3+c*a2*a3*b1*b3)
+12*m*(a*a2*b3*c2**2+a*a3*c2*b3**2+b*b3*c1*a3**2+
b*b1*a3*c1**2+c*c1*a2*b1**2+c*c2*b1*a2**2)
+6*(a*b3*c2+b*c1*a3+c*a2*b1)*(a2*b3*c1+a3*b1*c2)
+24*(a*b1*b3**2*c1**2+a*c1*c2**2*b1**2+b*c2*c1**2*a2**2
+b*a2*a3**2*c2**2+c*a3*a2**2*b3**2+c*b3*b1**2*a3**2)
-12*(a*a2*b1*c2**3+a*a3*c1*b3**3+b*b3*c2*a3**3+b*b1*a2*c1**3
+c*c1*a3*b1**3+c*c2*b3*a2**3)
-8*m**6+24*m**4*(b1*c1+c2*a2+a3*b3)-36*m**3*(a2*b3*c1+a3*b1*c2)
-12*m**2*(b1*c1*c2*a2+c2*a2*a3*b3+a3*b3*b1*c1)
-24*m**2*(b1**2*c1**2+c2**2*a2**2+a3**2*b3**2)
+36*m*(a2*b3*c1+a3*b1*c2)*(b1*c1+c2*a2+a3*b3)
+8*(b1**3*c1**3+c2**3*a2**3+a3**3*b3**3)
-27*(a2**2*b3**2*c1**2+a3**2*b1**2*c2**2)-6*b1*c1*c2*a2*a3*b3
-12*(b1**2*c1**2*c2*a2+b1**2*c1**2*a3*b3+c2**2*a2**2*a3*b3+
c2**2*a2**2*b1*c1+a3**2*b3**2*b1*c1+a3**2*b3**2*c2*a2) )
return T
@cached_method
def polar_conic(self):
"""
Return the polar conic of the cubic.
OUTPUT:
Given the ternary cubic `f(X,Y,Z)`, this method returns the
symmetric matrix `A(x,y,z)` defined by
.. math::
x f_X + y f_Y + z f_Z = (X,Y,Z) \cdot A(x,y,z) \cdot (X,Y,Z)^t
EXAMPLES::
sage: R.<x,y,z,X,Y,Z,a30,a21,a12,a03,a20,a11,a02,a10,a01,a00> = QQ[]
sage: p = ( a30*x^3 + a21*x^2*y + a12*x*y^2 + a03*y^3 + a20*x^2*z +
... a11*x*y*z + a02*y^2*z + a10*x*z^2 + a01*y*z^2 + a00*z^3 )
sage: cubic = invariant_theory.ternary_cubic(p, x,y,z)
sage: cubic.polar_conic()
[ 3*x*a30 + y*a21 + z*a20 x*a21 + y*a12 + 1/2*z*a11 x*a20 + 1/2*y*a11 + z*a10]
[x*a21 + y*a12 + 1/2*z*a11 x*a12 + 3*y*a03 + z*a02 1/2*x*a11 + y*a02 + z*a01]
[x*a20 + 1/2*y*a11 + z*a10 1/2*x*a11 + y*a02 + z*a01 x*a10 + y*a01 + 3*z*a00]
sage: polar_eqn = X*p.derivative(x) + Y*p.derivative(y) + Z*p.derivative(z)
sage: polar = invariant_theory.ternary_quadratic(polar_eqn, [x,y,z])
sage: polar.matrix().subs(X=x,Y=y,Z=z) == cubic.polar_conic()
True
"""
a30, a03, a00, a21, a20, a12, a02, a10, a01, a11 = self.coeffs()
if self._homogeneous:
x,y,z = self.variables()
else:
x,y,z = (self._x, self._y, 1)
F = self._ring.base_ring()
A00 = 3*x*a30 + y*a21 + z*a20
A11 = x*a12 + 3*y*a03 + z*a02
A22 = x*a10 + y*a01 + 3*z*a00
A01 = x*a21 + y*a12 + 1/F(2)*z*a11
A02 = x*a20 + 1/F(2)*y*a11 + z*a10
A12 = 1/F(2)*x*a11 + y*a02 + z*a01
polar = matrix(self._ring, [[A00, A01, A02],[A01, A11, A12],[A02, A12, A22]])
return polar
@cached_method
def Hessian(self):
"""
Return the Hessian covariant.
OUTPUT:
The Hessian matrix multiplied with the conventional
normalization factor `1/216`.
EXAMPLES::
sage: R.<x,y,z> = QQ[]
sage: cubic = invariant_theory.ternary_cubic(x^3+y^3+z^3)
sage: cubic.Hessian()
x*y*z
sage: R.<x,y> = QQ[]
sage: cubic = invariant_theory.ternary_cubic(x^3+y^3+1)
sage: cubic.Hessian()
x*y
"""
a30, a03, a00, a21, a20, a12, a02, a10, a01, a11 = self.coeffs()
if self._homogeneous:
x, y, z = self.variables()
else:
x, y, z = self._x, self._y, 1
Uxx = 6*x*a30 + 2*y*a21 + 2*z*a20
Uxy = 2*x*a21 + 2*y*a12 + z*a11
Uxz = 2*x*a20 + y*a11 + 2*z*a10
Uyy = 2*x*a12 + 6*y*a03 + 2*z*a02
Uyz = x*a11 + 2*y*a02 + 2*z*a01
Uzz = 2*x*a10 + 2*y*a01 + 6*z*a00
H = matrix(self._ring, [[Uxx, Uxy, Uxz],[Uxy, Uyy, Uyz],[Uxz, Uyz, Uzz]])
F = self._ring.base_ring()
return 1/F(216) * H.det()
def Theta_covariant(self):
"""
Return the `\Theta` covariant.
EXAMPLES::
sage: R.<x,y,z> = QQ[]
sage: cubic = invariant_theory.ternary_cubic(x^3+y^3+z^3)
sage: cubic.Theta_covariant()
-x^3*y^3 - x^3*z^3 - y^3*z^3
sage: R.<x,y> = QQ[]
sage: cubic = invariant_theory.ternary_cubic(x^3+y^3+1)
sage: cubic.Theta_covariant()
-x^3*y^3 - x^3 - y^3
sage: R.<x,y,z,a30,a21,a12,a03,a20,a11,a02,a10,a01,a00> = QQ[]
sage: p = ( a30*x^3 + a21*x^2*y + a12*x*y^2 + a03*y^3 + a20*x^2*z +
... a11*x*y*z + a02*y^2*z + a10*x*z^2 + a01*y*z^2 + a00*z^3 )
sage: cubic = invariant_theory.ternary_cubic(p, x,y,z)
sage: len(list(cubic.Theta_covariant()))
6952
"""
U_conic = self.polar_conic().adjoint()
U_coeffs = ( U_conic[0,0], U_conic[1,1], U_conic[2,2],
U_conic[0,1], U_conic[0,2], U_conic[1,2] )
H_conic = TernaryCubic(3, 3, self.Hessian(), self.variables()).polar_conic().adjoint()
H_coeffs = ( H_conic[0,0], H_conic[1,1], H_conic[2,2],
H_conic[0,1], H_conic[0,2], H_conic[1,2] )
quadratic = TernaryQuadratic(3, 2, self._ring.zero(), self.variables())
F = self._ring.base_ring()
return 1/F(9) * _covariant_conic(U_coeffs, H_coeffs, quadratic.monomials())
def J_covariant(self):
"""
Return the J-covariant of the ternary cubic.
EXAMPLES::
sage: R.<x,y,z> = QQ[]
sage: cubic = invariant_theory.ternary_cubic(x^3+y^3+z^3)
sage: cubic.J_covariant()
x^6*y^3 - x^3*y^6 - x^6*z^3 + y^6*z^3 + x^3*z^6 - y^3*z^6
sage: R.<x,y> = QQ[]
sage: cubic = invariant_theory.ternary_cubic(x^3+y^3+1)
sage: cubic.J_covariant()
x^6*y^3 - x^3*y^6 - x^6 + y^6 + x^3 - y^3
"""
F = self._ring.base_ring()
return 1 / F(9) * self._jacobian_determinant(
[self.form(), 3],
[self.Hessian(), 3],
[self.Theta_covariant(), 6])
def syzygy(self, U, S, T, H, Theta, J):
"""
Return the syzygy of the cubic evaluated on the invariants
and covariants.
INPUT:
- ``U``, ``S``, ``T``, ``H``, ``Theta``, ``J`` --
polynomials from the same polynomial ring.
OUTPUT:
0 if evaluated for the form, the S invariant, the T invariant,
the Hessian, the `\Theta` covariant and the J-covariant
of a ternary cubic.
EXAMPLES::
sage: R.<x,y,z> = QQ[]
sage: monomials = (x^3, y^3, z^3, x^2*y, x^2*z, x*y^2,
... y^2*z, x*z^2, y*z^2, x*y*z)
sage: random_poly = sum([ randint(0,10000) * m for m in monomials ])
sage: cubic = invariant_theory.ternary_cubic(random_poly)
sage: U = cubic.form()
sage: S = cubic.S_invariant()
sage: T = cubic.T_invariant()
sage: H = cubic.Hessian()
sage: Theta = cubic.Theta_covariant()
sage: J = cubic.J_covariant()
sage: cubic.syzygy(U, S, T, H, Theta, J)
0
"""
return ( -J**2 + 4*Theta**3 + T*U**2*Theta**2 +
Theta*(-4*S**3*U**4 + 2*S*T*U**3*H - 72*S**2*U**2*H**2
- 18*T*U*H**3 + 108*S*H**4)
-16*S**4*U**5*H - 11*S**2*T*U**4*H**2 -4*T**2*U**3*H**3
+54*S*T*U**2*H**4 -432*S**2*U*H**5 -27*T*H**6 )
######################################################################
class SeveralAlgebraicForms(FormsBase):
"""
The base class of multiple algebraic forms (i.e. homogeneous polynomials).
You should only instantiate the derived classes of this base
class.
See :class:`AlgebraicForm` for the base class of a single
algebraic form.
INPUT:
- ``forms`` -- a list/tuple/iterable of at least one
:class:`AlgebraicForm` object, all with the same number of
variables. Interpreted as multiple homogeneous polynomials in a
common polynomial ring.
EXAMPLES::
sage: from sage.rings.invariant_theory import AlgebraicForm, SeveralAlgebraicForms
sage: R.<x,y> = QQ[]
sage: p = AlgebraicForm(2, 2, x^2, (x,y))
sage: q = AlgebraicForm(2, 2, y^2, (x,y))
sage: pq = SeveralAlgebraicForms([p, q])
"""
def __init__(self, forms):
"""
The Python constructor.
TESTS::
sage: from sage.rings.invariant_theory import AlgebraicForm, SeveralAlgebraicForms
sage: R.<x,y,z> = QQ[]
sage: p = AlgebraicForm(2, 2, x^2 + y^2)
sage: q = AlgebraicForm(2, 3, x^3 + y^3)
sage: r = AlgebraicForm(3, 3, x^3 + y^3 + z^3)
sage: pq = SeveralAlgebraicForms([p, q])
sage: pr = SeveralAlgebraicForms([p, r])
Traceback (most recent call last):
...
ValueError: All forms must be in the same variables.
"""
forms = tuple(forms)
f = forms[0]
super(SeveralAlgebraicForms, self).__init__(f._n, f._homogeneous, f._ring, f._variables)
s = set(f._variables)
if not all(set(f._variables) == s for f in forms):
raise ValueError('All forms must be in the same variables.')
self._forms = forms
def __cmp__(self, other):
"""
Compare ``self`` with ``other``.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: q1 = invariant_theory.quadratic_form(x^2 + y^2)
sage: q2 = invariant_theory.quadratic_form(x*y)
sage: from sage.rings.invariant_theory import SeveralAlgebraicForms
sage: two_inv = SeveralAlgebraicForms([q1, q2])
sage: cmp(two_inv, 'foo') == 0
False
sage: cmp(two_inv, two_inv)
0
sage: two_inv.__cmp__(two_inv)
0
"""
c = cmp(type(self), type(other))
if c != 0:
return c
return cmp(self._forms, other._forms)
def _repr_(self):
"""
Return a string representation.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: q1 = invariant_theory.quadratic_form(x^2 + y^2)
sage: q2 = invariant_theory.quadratic_form(x*y)
sage: q3 = invariant_theory.quadratic_form((x + y)^2)
sage: from sage.rings.invariant_theory import SeveralAlgebraicForms
sage: SeveralAlgebraicForms([q1]) # indirect doctest
Binary quadratic with coefficients (1, 1, 0)
sage: SeveralAlgebraicForms([q1, q2]) # indirect doctest
Joint binary quadratic with coefficients (1, 1, 0) and binary
quadratic with coefficients (0, 0, 1)
sage: SeveralAlgebraicForms([q1, q2, q3]) # indirect doctest
Joint binary quadratic with coefficients (1, 1, 0), binary
quadratic with coefficients (0, 0, 1), and binary quadratic
with coefficients (1, 1, 2)
"""
if self.n_forms() == 1:
return self.get_form(0)._repr_()
if self.n_forms() == 2:
return 'Joint ' + self.get_form(0)._repr_().lower() + \
' and ' + self.get_form(1)._repr_().lower()
s = 'Joint '
for i in range(self.n_forms()-1):
s += self.get_form(i)._repr_().lower() + ', '
s += 'and ' + self.get_form(-1)._repr_().lower()
return s
def n_forms(self):
"""
Return the number of forms.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: q1 = invariant_theory.quadratic_form(x^2 + y^2)
sage: q2 = invariant_theory.quadratic_form(x*y)
sage: from sage.rings.invariant_theory import SeveralAlgebraicForms
sage: q12 = SeveralAlgebraicForms([q1, q2])
sage: q12.n_forms()
2
sage: len(q12) == q12.n_forms() # syntactic sugar
True
"""
return len(self._forms)
__len__ = n_forms
def get_form(self, i):
"""
Return the `i`-th form.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: q1 = invariant_theory.quadratic_form(x^2 + y^2)
sage: q2 = invariant_theory.quadratic_form(x*y)
sage: from sage.rings.invariant_theory import SeveralAlgebraicForms
sage: q12 = SeveralAlgebraicForms([q1, q2])
sage: q12.get_form(0) is q1
True
sage: q12.get_form(1) is q2
True
sage: q12[0] is q12.get_form(0) # syntactic sugar
True
sage: q12[1] is q12.get_form(1) # syntactic sugar
True
"""
return self._forms[i]
__getitem__ = get_form
def homogenized(self, var='h'):
"""
Return form as defined by a homogeneous polynomial.
INPUT:
- ``var`` -- either a variable name, variable index or a
variable (default: ``'h'``).
OUTPUT:
The same algebraic form, but defined by a homogeneous
polynomial.
EXAMPLES::
sage: R.<x,y,z> = QQ[]
sage: q = invariant_theory.quaternary_biquadratic(x^2+1, y^2+1, [x,y,z])
sage: q
Joint quaternary quadratic with coefficients (1, 0, 0, 1, 0, 0, 0, 0, 0, 0)
and quaternary quadratic with coefficients (0, 1, 0, 1, 0, 0, 0, 0, 0, 0)
sage: q.homogenized()
Joint quaternary quadratic with coefficients (1, 0, 0, 1, 0, 0, 0, 0, 0, 0)
and quaternary quadratic with coefficients (0, 1, 0, 1, 0, 0, 0, 0, 0, 0)
sage: type(q) is type(q.homogenized())
True
"""
if self._homogeneous:
return self
forms = [f.homogenized(var=var) for f in self._forms]
return self.__class__(forms)
def _check_covariant(self, method_name, g=None, invariant=False):
"""
Test whether ``method_name`` actually returns a covariant.
INPUT:
- ``method_name`` -- string. The name of the method that
returns the invariant / covariant to test.
- ``g`` -- a `SL(n,\CC)` matrix or ``None`` (default). The
test will be to check that the covariant transforms
corrently under this special linear group element acting on
the homogeneous variables. If ``None``, a random matrix will
be picked.
- ``invariant`` -- boolean. Whether to additionaly test that
it is an invariant.
EXAMPLES::
sage: R.<x,y,z,w> = QQ[]
sage: q = invariant_theory.quaternary_biquadratic(x^2+y^2+z^2+w^2, x*y+y*z+z*w+x*w)
sage: q._check_covariant('Delta_invariant', invariant=True)
sage: q._check_covariant('T_prime_covariant')
sage: q._check_covariant('T_prime_covariant', invariant=True)
Traceback (most recent call last):
...
AssertionError: Not invariant.
"""
assert self._homogeneous
from sage.matrix.constructor import vector, random_matrix
if g is None:
F = self._ring.base_ring()
g = random_matrix(F, self._n, algorithm='unimodular')
v = vector(self.variables())
g_v = g*v
transform = dict( (v[i], g_v[i]) for i in range(self._n) )
# The covariant of the transformed form
transformed = [f.transformed(transform) for f in self._forms]
g_self = self.__class__(transformed)
cov_g = getattr(g_self, method_name)()
# The transform of the covariant
g_cov = getattr(self, method_name)().subs(transform)
# they must be the same
assert (g_cov - cov_g).is_zero(), 'Not covariant.'
if invariant:
cov = getattr(self, method_name)()
assert (cov - cov_g).is_zero(), 'Not invariant.'
######################################################################
class TwoAlgebraicForms(SeveralAlgebraicForms):
def first(self):
"""
Return the first of the two forms.
OUTPUT:
The first algebraic form used in the definition.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: q0 = invariant_theory.quadratic_form(x^2 + y^2)
sage: q1 = invariant_theory.quadratic_form(x*y)
sage: from sage.rings.invariant_theory import TwoAlgebraicForms
sage: q = TwoAlgebraicForms([q0, q1])
sage: q.first() is q0
True
sage: q.get_form(0) is q0
True
sage: q.first().polynomial()
x^2 + y^2
"""
return self._forms[0]
def second(self):
"""
Return the second of the two forms.
OUTPUT:
The second form used in the definition.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: q0 = invariant_theory.quadratic_form(x^2 + y^2)
sage: q1 = invariant_theory.quadratic_form(x*y)
sage: from sage.rings.invariant_theory import TwoAlgebraicForms
sage: q = TwoAlgebraicForms([q0, q1])
sage: q.second() is q1
True
sage: q.get_form(1) is q1
True
sage: q.second().polynomial()
x*y
"""
return self._forms[1]
######################################################################
class TwoQuaternaryQuadratics(TwoAlgebraicForms):
"""
Invariant theory of two quaternary quadratics.
You should use the :class:`invariant_theory
<InvariantTheoryFactory>` factory object to construct instances
of this class. See
:meth:`~InvariantTheoryFactory.quaternary_biquadratics` for
details.
REFERENCES:
.. [Salmon]
G. Salmon: "A Treatise on the Analytic Geometry of Three
Dimensions", section on "Invariants and Covariants of
Systems of Quadrics".
TESTS::
sage: R.<w,x,y,z> = QQ[]
sage: inv = invariant_theory.quaternary_biquadratic(w^2+x^2, y^2+z^2, w, x, y, z)
sage: inv
Joint quaternary quadratic with coefficients (1, 1, 0, 0, 0, 0, 0, 0, 0, 0) and
quaternary quadratic with coefficients (0, 0, 1, 1, 0, 0, 0, 0, 0, 0)
sage: TestSuite(inv).run()
sage: q1 = 73*x^2 + 96*x*y - 11*y^2 - 74*x*z - 10*y*z + 66*z^2 + 4*x + 63*y - 11*z + 57
sage: q2 = 61*x^2 - 100*x*y - 72*y^2 - 38*x*z + 85*y*z + 95*z^2 - 81*x + 39*y + 23*z - 7
sage: biquadratic = invariant_theory.quaternary_biquadratic(q1, q2, [x,y,z]).homogenized()
sage: biquadratic._check_covariant('Delta_invariant', invariant=True)
sage: biquadratic._check_covariant('Delta_prime_invariant', invariant=True)
sage: biquadratic._check_covariant('Theta_invariant', invariant=True)
sage: biquadratic._check_covariant('Theta_prime_invariant', invariant=True)
sage: biquadratic._check_covariant('Phi_invariant', invariant=True)
sage: biquadratic._check_covariant('T_covariant')
sage: biquadratic._check_covariant('T_prime_covariant')
sage: biquadratic._check_covariant('J_covariant')
"""
def Delta_invariant(self):
"""
Return the `\Delta` invariant.
EXAMPLES::
sage: R.<x,y,z,t,a0,a1,a2,a3,b0,b1,b2,b3,b4,b5,A0,A1,A2,A3,B0,B1,B2,B3,B4,B5> = QQ[]
sage: p1 = a0*x^2 + a1*y^2 + a2*z^2 + a3
sage: p1 += b0*x*y + b1*x*z + b2*x + b3*y*z + b4*y + b5*z
sage: p2 = A0*x^2 + A1*y^2 + A2*z^2 + A3
sage: p2 += B0*x*y + B1*x*z + B2*x + B3*y*z + B4*y + B5*z
sage: q = invariant_theory.quaternary_biquadratic(p1, p2, [x, y, z])
sage: coeffs = det(t * q[0].matrix() + q[1].matrix()).polynomial(t).coeffs()
sage: q.Delta_invariant() == coeffs[4]
True
"""
return self.get_form(0).matrix().det()
def Delta_prime_invariant(self):
r"""
Return the `\Delta'` invariant.
EXAMPLES::
sage: R.<x,y,z,t,a0,a1,a2,a3,b0,b1,b2,b3,b4,b5,A0,A1,A2,A3,B0,B1,B2,B3,B4,B5> = QQ[]
sage: p1 = a0*x^2 + a1*y^2 + a2*z^2 + a3
sage: p1 += b0*x*y + b1*x*z + b2*x + b3*y*z + b4*y + b5*z
sage: p2 = A0*x^2 + A1*y^2 + A2*z^2 + A3
sage: p2 += B0*x*y + B1*x*z + B2*x + B3*y*z + B4*y + B5*z
sage: q = invariant_theory.quaternary_biquadratic(p1, p2, [x, y, z])
sage: coeffs = det(t * q[0].matrix() + q[1].matrix()).polynomial(t).coeffs()
sage: q.Delta_prime_invariant() == coeffs[0]
True
"""
return self.get_form(1).matrix().det()
def _Theta_helper(self, scaled_coeffs_1, scaled_coeffs_2):
"""
Internal helper method for :meth:`Theta_invariant` and
:meth:`Theta_prime_invariant`.
TESTS::
sage: R.<w,x,y,z> = QQ[]
sage: inv = invariant_theory.quaternary_biquadratic(w^2+x^2, y^2+z^2, w, x, y, z)
sage: inv._Theta_helper([1]*10, [2]*10)
0
"""
a0, a1, a2, a3, b0, b1, b2, b3, b4, b5 = scaled_coeffs_1
A0, A1, A2, A3, B0, B1, B2, B3, B4, B5 = scaled_coeffs_2
return a1*a2*a3*A0 - a3*b3**2*A0 - a2*b4**2*A0 + 2*b3*b4*b5*A0 - a1*b5**2*A0 \
+ a0*a2*a3*A1 - a3*b1**2*A1 - a2*b2**2*A1 + 2*b1*b2*b5*A1 - a0*b5**2*A1 \
+ a0*a1*a3*A2 - a3*b0**2*A2 - a1*b2**2*A2 + 2*b0*b2*b4*A2 - a0*b4**2*A2 \
+ a0*a1*a2*A3 - a2*b0**2*A3 - a1*b1**2*A3 + 2*b0*b1*b3*A3 - a0*b3**2*A3 \
- 2*a2*a3*b0*B0 + 2*a3*b1*b3*B0 + 2*a2*b2*b4*B0 - 2*b2*b3*b5*B0 \
- 2*b1*b4*b5*B0 + 2*b0*b5**2*B0 - 2*a1*a3*b1*B1 + 2*a3*b0*b3*B1 \
- 2*b2*b3*b4*B1 + 2*b1*b4**2*B1 + 2*a1*b2*b5*B1 - 2*b0*b4*b5*B1 \
- 2*a1*a2*b2*B2 + 2*b2*b3**2*B2 + 2*a2*b0*b4*B2 - 2*b1*b3*b4*B2 \
+ 2*a1*b1*b5*B2 - 2*b0*b3*b5*B2 + 2*a3*b0*b1*B3 - 2*a0*a3*b3*B3 \
+ 2*b2**2*b3*B3 - 2*b1*b2*b4*B3 - 2*b0*b2*b5*B3 + 2*a0*b4*b5*B3 \
+ 2*a2*b0*b2*B4 - 2*b1*b2*b3*B4 - 2*a0*a2*b4*B4 + 2*b1**2*b4*B4 \
- 2*b0*b1*b5*B4 + 2*a0*b3*b5*B4 + 2*a1*b1*b2*B5 - 2*b0*b2*b3*B5 \
- 2*b0*b1*b4*B5 + 2*a0*b3*b4*B5 - 2*a0*a1*b5*B5 + 2*b0**2*b5*B5
def Theta_invariant(self):
r"""
Return the `\Theta` invariant.
EXAMPLES::
sage: R.<x,y,z,t,a0,a1,a2,a3,b0,b1,b2,b3,b4,b5,A0,A1,A2,A3,B0,B1,B2,B3,B4,B5> = QQ[]
sage: p1 = a0*x^2 + a1*y^2 + a2*z^2 + a3
sage: p1 += b0*x*y + b1*x*z + b2*x + b3*y*z + b4*y + b5*z
sage: p2 = A0*x^2 + A1*y^2 + A2*z^2 + A3
sage: p2 += B0*x*y + B1*x*z + B2*x + B3*y*z + B4*y + B5*z
sage: q = invariant_theory.quaternary_biquadratic(p1, p2, [x, y, z])
sage: coeffs = det(t * q[0].matrix() + q[1].matrix()).polynomial(t).coeffs()
sage: q.Theta_invariant() == coeffs[3]
True
"""
return self._Theta_helper(self.get_form(0).scaled_coeffs(), self.get_form(1).scaled_coeffs())
def Theta_prime_invariant(self):
r"""
Return the `\Theta'` invariant.
EXAMPLES::
sage: R.<x,y,z,t,a0,a1,a2,a3,b0,b1,b2,b3,b4,b5,A0,A1,A2,A3,B0,B1,B2,B3,B4,B5> = QQ[]
sage: p1 = a0*x^2 + a1*y^2 + a2*z^2 + a3
sage: p1 += b0*x*y + b1*x*z + b2*x + b3*y*z + b4*y + b5*z
sage: p2 = A0*x^2 + A1*y^2 + A2*z^2 + A3
sage: p2 += B0*x*y + B1*x*z + B2*x + B3*y*z + B4*y + B5*z
sage: q = invariant_theory.quaternary_biquadratic(p1, p2, [x, y, z])
sage: coeffs = det(t * q[0].matrix() + q[1].matrix()).polynomial(t).coeffs()
sage: q.Theta_prime_invariant() == coeffs[1]
True
"""
return self._Theta_helper(self.get_form(1).scaled_coeffs(), self.get_form(0).scaled_coeffs())
def Phi_invariant(self):
"""
Return the `\Phi'` invariant.
EXAMPLES::
sage: R.<x,y,z,t,a0,a1,a2,a3,b0,b1,b2,b3,b4,b5,A0,A1,A2,A3,B0,B1,B2,B3,B4,B5> = QQ[]
sage: p1 = a0*x^2 + a1*y^2 + a2*z^2 + a3
sage: p1 += b0*x*y + b1*x*z + b2*x + b3*y*z + b4*y + b5*z
sage: p2 = A0*x^2 + A1*y^2 + A2*z^2 + A3
sage: p2 += B0*x*y + B1*x*z + B2*x + B3*y*z + B4*y + B5*z
sage: q = invariant_theory.quaternary_biquadratic(p1, p2, [x, y, z])
sage: coeffs = det(t * q[0].matrix() + q[1].matrix()).polynomial(t).coeffs()
sage: q.Phi_invariant() == coeffs[2]
True
"""
a0, a1, a2, a3, b0, b1, b2, b3, b4, b5 = self.get_form(0).scaled_coeffs()
A0, A1, A2, A3, B0, B1, B2, B3, B4, B5 = self.get_form(1).scaled_coeffs()
return a2*a3*A0*A1 - b5**2*A0*A1 + a1*a3*A0*A2 - b4**2*A0*A2 + a0*a3*A1*A2 \
- b2**2*A1*A2 + a1*a2*A0*A3 - b3**2*A0*A3 + a0*a2*A1*A3 - b1**2*A1*A3 \
+ a0*a1*A2*A3 - b0**2*A2*A3 - 2*a3*b0*A2*B0 + 2*b2*b4*A2*B0 - 2*a2*b0*A3*B0 \
+ 2*b1*b3*A3*B0 - a2*a3*B0**2 + b5**2*B0**2 - 2*a3*b1*A1*B1 + 2*b2*b5*A1*B1 \
- 2*a1*b1*A3*B1 + 2*b0*b3*A3*B1 + 2*a3*b3*B0*B1 - 2*b4*b5*B0*B1 - a1*a3*B1**2 \
+ b4**2*B1**2 - 2*a2*b2*A1*B2 + 2*b1*b5*A1*B2 - 2*a1*b2*A2*B2 + 2*b0*b4*A2*B2 \
+ 2*a2*b4*B0*B2 - 2*b3*b5*B0*B2 - 2*b3*b4*B1*B2 + 2*a1*b5*B1*B2 - a1*a2*B2**2 \
+ b3**2*B2**2 - 2*a3*b3*A0*B3 + 2*b4*b5*A0*B3 + 2*b0*b1*A3*B3 - 2*a0*b3*A3*B3 \
+ 2*a3*b1*B0*B3 - 2*b2*b5*B0*B3 + 2*a3*b0*B1*B3 - 2*b2*b4*B1*B3 \
+ 4*b2*b3*B2*B3 - 2*b1*b4*B2*B3 - 2*b0*b5*B2*B3 - a0*a3*B3**2 + b2**2*B3**2 \
- 2*a2*b4*A0*B4 + 2*b3*b5*A0*B4 + 2*b0*b2*A2*B4 - 2*a0*b4*A2*B4 \
+ 2*a2*b2*B0*B4 - 2*b1*b5*B0*B4 - 2*b2*b3*B1*B4 + 4*b1*b4*B1*B4 \
- 2*b0*b5*B1*B4 + 2*a2*b0*B2*B4 - 2*b1*b3*B2*B4 - 2*b1*b2*B3*B4 \
+ 2*a0*b5*B3*B4 - a0*a2*B4**2 + b1**2*B4**2 + 2*b3*b4*A0*B5 - 2*a1*b5*A0*B5 \
+ 2*b1*b2*A1*B5 - 2*a0*b5*A1*B5 - 2*b2*b3*B0*B5 - 2*b1*b4*B0*B5 \
+ 4*b0*b5*B0*B5 + 2*a1*b2*B1*B5 - 2*b0*b4*B1*B5 + 2*a1*b1*B2*B5 \
- 2*b0*b3*B2*B5 - 2*b0*b2*B3*B5 + 2*a0*b4*B3*B5 - 2*b0*b1*B4*B5 \
+ 2*a0*b3*B4*B5 - a0*a1*B5**2 + b0**2*B5**2
def _T_helper(self, scaled_coeffs_1, scaled_coeffs_2):
"""
Internal helper method for :meth:`T_covariant` and
:meth:`T_prime_covariant`.
TESTS::
sage: R.<w,x,y,z> = QQ[]
sage: inv = invariant_theory.quaternary_biquadratic(w^2+x^2, y^2+z^2, w, x, y, z)
sage: inv._T_helper([1]*10, [2]*10)
0
"""
a0, a1, a2, a3, b0, b1, b2, b3, b4, b5 = scaled_coeffs_1
A0, A1, A2, A3, B0, B1, B2, B3, B4, B5 = scaled_coeffs_2
# Construct the entries of the 4x4 matrix T using symmetries:
# cyclic: a0 -> a1 -> a2 -> a3 -> a0, b0->b3->b5->b2->b0, b1->b4->b1
# flip: a0<->a1, b1<->b3, b2<->b4
def T00(a0, a1, a2, a3, b0, b1, b2, b3, b4, b5, A0, A1, A2, A3, B0, B1, B2, B3, B4, B5):
return a0*a3*A0*A1*A2 - b2**2*A0*A1*A2 + a0*a2*A0*A1*A3 - b1**2*A0*A1*A3 \
+ a0*a1*A0*A2*A3 - b0**2*A0*A2*A3 - a0*a3*A2*B0**2 + b2**2*A2*B0**2 \
- a0*a2*A3*B0**2 + b1**2*A3*B0**2 - 2*b0*b1*A3*B0*B1 + 2*a0*b3*A3*B0*B1 \
- a0*a3*A1*B1**2 + b2**2*A1*B1**2 - a0*a1*A3*B1**2 + b0**2*A3*B1**2 \
- 2*b0*b2*A2*B0*B2 + 2*a0*b4*A2*B0*B2 - 2*b1*b2*A1*B1*B2 + 2*a0*b5*A1*B1*B2 \
- a0*a2*A1*B2**2 + b1**2*A1*B2**2 - a0*a1*A2*B2**2 + b0**2*A2*B2**2 \
+ 2*b0*b1*A0*A3*B3 - 2*a0*b3*A0*A3*B3 + 2*a0*a3*B0*B1*B3 - 2*b2**2*B0*B1*B3 \
+ 2*b1*b2*B0*B2*B3 - 2*a0*b5*B0*B2*B3 + 2*b0*b2*B1*B2*B3 - 2*a0*b4*B1*B2*B3 \
- 2*b0*b1*B2**2*B3 + 2*a0*b3*B2**2*B3 - a0*a3*A0*B3**2 + b2**2*A0*B3**2 \
+ 2*b0*b2*A0*A2*B4 - 2*a0*b4*A0*A2*B4 + 2*b1*b2*B0*B1*B4 - 2*a0*b5*B0*B1*B4 \
- 2*b0*b2*B1**2*B4 + 2*a0*b4*B1**2*B4 + 2*a0*a2*B0*B2*B4 - 2*b1**2*B0*B2*B4 \
+ 2*b0*b1*B1*B2*B4 - 2*a0*b3*B1*B2*B4 - 2*b1*b2*A0*B3*B4 + 2*a0*b5*A0*B3*B4 \
- a0*a2*A0*B4**2 + b1**2*A0*B4**2 + 2*b1*b2*A0*A1*B5 - 2*a0*b5*A0*A1*B5 \
- 2*b1*b2*B0**2*B5 + 2*a0*b5*B0**2*B5 + 2*b0*b2*B0*B1*B5 - 2*a0*b4*B0*B1*B5 \
+ 2*b0*b1*B0*B2*B5 - 2*a0*b3*B0*B2*B5 + 2*a0*a1*B1*B2*B5 - 2*b0**2*B1*B2*B5 \
- 2*b0*b2*A0*B3*B5 + 2*a0*b4*A0*B3*B5 - 2*b0*b1*A0*B4*B5 + 2*a0*b3*A0*B4*B5 \
- a0*a1*A0*B5**2 + b0**2*A0*B5**2
def T01(a0, a1, a2, a3, b0, b1, b2, b3, b4, b5, A0, A1, A2, A3, B0, B1, B2, B3, B4, B5):
return a3*b0*A0*A1*A2 - b2*b4*A0*A1*A2 + a2*b0*A0*A1*A3 - b1*b3*A0*A1*A3 \
+ a0*a1*A2*A3*B0 - b0**2*A2*A3*B0 - a3*b0*A2*B0**2 + b2*b4*A2*B0**2 \
- a2*b0*A3*B0**2 + b1*b3*A3*B0**2 - b0*b1*A1*A3*B1 + a0*b3*A1*A3*B1 \
- a1*b1*A3*B0*B1 + b0*b3*A3*B0*B1 - a3*b0*A1*B1**2 + b2*b4*A1*B1**2 \
- b0*b2*A1*A2*B2 + a0*b4*A1*A2*B2 - a1*b2*A2*B0*B2 + b0*b4*A2*B0*B2 \
- b2*b3*A1*B1*B2 - b1*b4*A1*B1*B2 + 2*b0*b5*A1*B1*B2 - a2*b0*A1*B2**2 \
+ b1*b3*A1*B2**2 + a1*b1*A0*A3*B3 - b0*b3*A0*A3*B3 + b0*b1*A3*B0*B3 \
- a0*b3*A3*B0*B3 - a0*a1*A3*B1*B3 + b0**2*A3*B1*B3 + 2*a3*b0*B0*B1*B3 \
- 2*b2*b4*B0*B1*B3 + b2*b3*B0*B2*B3 + b1*b4*B0*B2*B3 - 2*b0*b5*B0*B2*B3 \
+ a1*b2*B1*B2*B3 - b0*b4*B1*B2*B3 - a1*b1*B2**2*B3 + b0*b3*B2**2*B3 \
- a3*b0*A0*B3**2 + b2*b4*A0*B3**2 + b0*b2*B2*B3**2 - a0*b4*B2*B3**2 \
+ a1*b2*A0*A2*B4 - b0*b4*A0*A2*B4 + b0*b2*A2*B0*B4 - a0*b4*A2*B0*B4 \
+ b2*b3*B0*B1*B4 + b1*b4*B0*B1*B4 - 2*b0*b5*B0*B1*B4 - a1*b2*B1**2*B4 \
+ b0*b4*B1**2*B4 - a0*a1*A2*B2*B4 + b0**2*A2*B2*B4 + 2*a2*b0*B0*B2*B4 \
- 2*b1*b3*B0*B2*B4 + a1*b1*B1*B2*B4 - b0*b3*B1*B2*B4 - b2*b3*A0*B3*B4 \
- b1*b4*A0*B3*B4 + 2*b0*b5*A0*B3*B4 - b0*b2*B1*B3*B4 + a0*b4*B1*B3*B4 \
- b0*b1*B2*B3*B4 + a0*b3*B2*B3*B4 - a2*b0*A0*B4**2 + b1*b3*A0*B4**2 \
+ b0*b1*B1*B4**2 - a0*b3*B1*B4**2 + b2*b3*A0*A1*B5 + b1*b4*A0*A1*B5 \
- 2*b0*b5*A0*A1*B5 - b2*b3*B0**2*B5 - b1*b4*B0**2*B5 + 2*b0*b5*B0**2*B5 \
+ b0*b2*A1*B1*B5 - a0*b4*A1*B1*B5 + a1*b2*B0*B1*B5 - b0*b4*B0*B1*B5 \
+ b0*b1*A1*B2*B5 - a0*b3*A1*B2*B5 + a1*b1*B0*B2*B5 - b0*b3*B0*B2*B5 \
- a1*b2*A0*B3*B5 + b0*b4*A0*B3*B5 - b0*b2*B0*B3*B5 + a0*b4*B0*B3*B5 \
+ a0*a1*B2*B3*B5 - b0**2*B2*B3*B5 - a1*b1*A0*B4*B5 + b0*b3*A0*B4*B5 \
- b0*b1*B0*B4*B5 + a0*b3*B0*B4*B5 + a0*a1*B1*B4*B5 - b0**2*B1*B4*B5 \
- a0*a1*B0*B5**2 + b0**2*B0*B5**2
t00 = T00(a0, a1, a2, a3, b0, b1, b2, b3, b4, b5, A0, A1, A2, A3, B0, B1, B2, B3, B4, B5)
t11 = T00(a1, a2, a3, a0, b3, b4, b0, b5, b1, b2, A1, A2, A3, A0, B3, B4, B0, B5, B1, B2)
t22 = T00(a2, a3, a0, a1, b5, b1, b3, b2, b4, b0, A2, A3, A0, A1, B5, B1, B3, B2, B4, B0)
t33 = T00(a3, a0, a1, a2, b2, b4, b5, b0, b1, b3, A3, A0, A1, A2, B2, B4, B5, B0, B1, B3)
t01 = T01(a0, a1, a2, a3, b0, b1, b2, b3, b4, b5, A0, A1, A2, A3, B0, B1, B2, B3, B4, B5)
t12 = T01(a1, a2, a3, a0, b3, b4, b0, b5, b1, b2, A1, A2, A3, A0, B3, B4, B0, B5, B1, B2)
t23 = T01(a2, a3, a0, a1, b5, b1, b3, b2, b4, b0, A2, A3, A0, A1, B5, B1, B3, B2, B4, B0)
t30 = T01(a3, a0, a1, a2, b2, b4, b5, b0, b1, b3, A3, A0, A1, A2, B2, B4, B5, B0, B1, B3)
t02 = T01(a0, a2, a3, a1, b1, b2, b0, b5, b3, b4, A0, A2, A3, A1, B1, B2, B0, B5, B3, B4)
t13 = T01(a1, a3, a0, a2, b4, b0, b3, b2, b5, b1, A1, A3, A0, A2, B4, B0, B3, B2, B5, B1)
if self._homogeneous:
w, x, y, z = self._variables
else:
w, x, y = self._variables[0:3]
z = self._ring.one()
return t00*w*w + 2*t01*w*x + 2*t02*w*y + 2*t30*w*z + t11*x*x + 2*t12*x*y \
+ 2*t13*x*z + t22*y*y + 2*t23*y*z + t33*z*z
def T_covariant(self):
"""
The $T$-covariant.
EXAMPLES::
sage: R.<x,y,z,t,a0,a1,a2,a3,b0,b1,b2,b3,b4,b5,A0,A1,A2,A3,B0,B1,B2,B3,B4,B5> = QQ[]
sage: p1 = a0*x^2 + a1*y^2 + a2*z^2 + a3
sage: p1 += b0*x*y + b1*x*z + b2*x + b3*y*z + b4*y + b5*z
sage: p2 = A0*x^2 + A1*y^2 + A2*z^2 + A3
sage: p2 += B0*x*y + B1*x*z + B2*x + B3*y*z + B4*y + B5*z
sage: q = invariant_theory.quaternary_biquadratic(p1, p2, [x, y, z])
sage: T = invariant_theory.quaternary_quadratic(q.T_covariant(), [x,y,z]).matrix()
sage: M = q[0].matrix().adjoint() + t*q[1].matrix().adjoint()
sage: M = M.adjoint().apply_map( # long time (4s on my thinkpad W530)
....: lambda m: m.coefficient(t))
sage: M == q.Delta_invariant()*T # long time
True
"""
return self._T_helper(self.get_form(0).scaled_coeffs(), self.get_form(1).scaled_coeffs())
def T_prime_covariant(self):
"""
The $T'$-covariant.
EXAMPLES::
sage: R.<x,y,z,t,a0,a1,a2,a3,b0,b1,b2,b3,b4,b5,A0,A1,A2,A3,B0,B1,B2,B3,B4,B5> = QQ[]
sage: p1 = a0*x^2 + a1*y^2 + a2*z^2 + a3
sage: p1 += b0*x*y + b1*x*z + b2*x + b3*y*z + b4*y + b5*z
sage: p2 = A0*x^2 + A1*y^2 + A2*z^2 + A3
sage: p2 += B0*x*y + B1*x*z + B2*x + B3*y*z + B4*y + B5*z
sage: q = invariant_theory.quaternary_biquadratic(p1, p2, [x, y, z])
sage: Tprime = invariant_theory.quaternary_quadratic(
....: q.T_prime_covariant(), [x,y,z]).matrix()
sage: M = q[0].matrix().adjoint() + t*q[1].matrix().adjoint()
sage: M = M.adjoint().apply_map( # long time (4s on my thinkpad W530)
....: lambda m: m.coefficient(t^2))
sage: M == q.Delta_prime_invariant() * Tprime # long time
True
"""
return self._T_helper(self.get_form(1).scaled_coeffs(), self.get_form(0).scaled_coeffs())
def J_covariant(self):
"""
The $J$-covariant.
This is the Jacobian determinant of the two biquadratics, the
$T$-covariant, and the $T'$-covariant with respect to the four
homogeneous variables.
EXAMPLES::
sage: R.<w,x,y,z,a0,a1,a2,a3,A0,A1,A2,A3> = QQ[]
sage: p1 = a0*x^2 + a1*y^2 + a2*z^2 + a3*w^2
sage: p2 = A0*x^2 + A1*y^2 + A2*z^2 + A3*w^2
sage: q = invariant_theory.quaternary_biquadratic(p1, p2, [w, x, y, z])
sage: q.J_covariant().factor()
z * y * x * w * (a3*A2 - a2*A3) * (a3*A1 - a1*A3) * (-a2*A1 + a1*A2)
* (a3*A0 - a0*A3) * (-a2*A0 + a0*A2) * (-a1*A0 + a0*A1)
"""
F = self._ring.base_ring()
return 1/F(16) * self._jacobian_determinant(
[self.first().form(), 2],
[self.second().form(), 2],
[self.T_covariant(), 4],
[self.T_prime_covariant(), 4])
def syzygy(self, Delta, Theta, Phi, Theta_prime, Delta_prime, U, V, T, T_prime, J):
"""
Return the syzygy evaluated on the invariants and covariants.
INPUT:
- ``Delta``, ``Theta``, ``Phi``, ``Theta_prime``,
``Delta_prime``, ``U``, ``V``, ``T``, ``T_prime``, ``J`` --
polynomials from the same polynomial ring.
OUTPUT:
Zero if the ``U`` is the first polynomial, ``V`` the second
polynomial, and the remaining input are the invariants and
covariants of a quaternary biquadratic.
EXAMPLES::
sage: R.<w,x,y,z> = QQ[]
sage: monomials = [x^2, x*y, y^2, x*z, y*z, z^2, x*w, y*w, z*w, w^2]
sage: def q_rnd(): return sum(randint(-1000,1000)*m for m in monomials)
sage: biquadratic = invariant_theory.quaternary_biquadratic(q_rnd(), q_rnd())
sage: Delta = biquadratic.Delta_invariant()
sage: Theta = biquadratic.Theta_invariant()
sage: Phi = biquadratic.Phi_invariant()
sage: Theta_prime = biquadratic.Theta_prime_invariant()
sage: Delta_prime = biquadratic.Delta_prime_invariant()
sage: U = biquadratic.first().polynomial()
sage: V = biquadratic.second().polynomial()
sage: T = biquadratic.T_covariant()
sage: T_prime = biquadratic.T_prime_covariant()
sage: J = biquadratic.J_covariant()
sage: biquadratic.syzygy(Delta, Theta, Phi, Theta_prime, Delta_prime, U, V, T, T_prime, J)
0
If the arguments are not the invariants and covariants then
the output is some (generically non-zero) polynomial::
sage: biquadratic.syzygy(1, 1, 1, 1, 1, 1, 1, 1, 1, x)
-x^2 + 1
"""
return -J**2 + \
Delta * T**4 - Theta * T**3*T_prime + Phi * T**2*T_prime**2 \
- Theta_prime * T*T_prime**3 + Delta_prime * T_prime**4 + \
( (Theta_prime**2 - 2*Delta_prime*Phi) * T_prime**3 -
(Theta_prime*Phi - 3*Theta*Delta_prime) * T_prime**2*T +
(Theta*Theta_prime - 4*Delta*Delta_prime) * T_prime*T**2 -
(Delta*Theta_prime) * T**3
) * U + \
( (Theta**2 - 2*Delta*Phi)*T**3 -
(Theta*Phi - 3*Theta_prime*Delta)*T**2*T_prime +
(Theta*Theta_prime - 4*Delta*Delta_prime)*T*T_prime**2 -
(Delta_prime*Theta)*T_prime**3
)* V + \
( (Delta*Phi*Delta_prime) * T**2 +
(3*Delta*Theta_prime*Delta_prime - Theta*Phi*Delta_prime) * T*T_prime +
(2*Delta*Delta_prime**2 - 2*Theta*Theta_prime*Delta_prime
+ Phi**2*Delta_prime) * T_prime**2
) * U**2 + \
( (Delta*Theta*Delta_prime + 2*Delta*Phi*Theta_prime - Theta**2*Theta_prime) * T**2 +
(4*Delta*Phi*Delta_prime - 3*Theta**2*Delta_prime
- 3*Delta*Theta_prime**2 + Theta*Phi*Theta_prime) * T*T_prime +
(Delta*Theta_prime*Delta_prime + 2*Delta_prime*Phi*Theta
- Theta*Theta_prime**2) * T_prime**2
) * U*V + \
( (2*Delta**2*Delta_prime - 2*Delta*Theta*Theta_prime + Delta*Phi**2) * T**2 +
(3*Delta*Theta*Delta_prime - Delta*Phi*Theta_prime) * T*T_prime +
Delta*Phi*Delta_prime * T_prime**2
) * V**2 + \
( (-Delta*Theta*Delta_prime**2) * T +
(-2*Delta*Phi*Delta_prime**2 + Theta**2*Delta_prime**2) * T_prime
) * U**3 + \
( (4*Delta**2*Delta_prime**2 - Delta*Theta*Theta_prime*Delta_prime
- 2*Delta*Phi**2*Delta_prime + Theta**2*Phi*Delta_prime) * T +
(-5*Delta*Theta*Delta_prime**2 + Delta*Phi*Theta_prime*Delta_prime
+ 2*Theta**2*Theta_prime*Delta_prime - Theta*Phi**2*Delta_prime) * T_prime
) * U**2*V + \
( (-5*Delta**2*Theta_prime*Delta_prime + Delta*Theta*Phi*Delta_prime
+ 2*Delta*Theta*Theta_prime**2 - Delta*Phi**2*Theta_prime) * T +
(4*Delta**2*Delta_prime**2 - Delta*Theta*Theta_prime*Delta_prime
- 2*Delta*Phi**2*Delta_prime + Delta*Phi*Theta_prime**2) * T_prime
) * U*V**2 + \
( (-2*Delta**2*Phi*Delta_prime + Delta**2*Theta_prime**2) * T +
(-Delta**2*Theta_prime*Delta_prime) * T_prime
) * V**3 + \
(Delta**2*Delta_prime**3) * U**4 + \
(-3*Delta**2*Theta_prime*Delta_prime**2 + 3*Delta*Theta*Phi*Delta_prime**2
- Theta**3*Delta_prime**2) * U**3*V + \
(-3*Delta**2*Phi*Delta_prime**2 + 3*Delta*Theta**2*Delta_prime**2
+ 3*Delta**2*Theta_prime**2*Delta_prime
- 3*Delta*Theta*Phi*Theta_prime*Delta_prime
+ Delta*Phi**3*Delta_prime) * U**2*V**2 + \
(-3*Delta**2*Theta*Delta_prime**2 + 3*Delta**2*Phi*Theta_prime*Delta_prime
- Delta**2*Theta_prime**3) * U*V**3 + \
(Delta**3*Delta_prime**2) * V**4
######################################################################
class InvariantTheoryFactory(object):
"""
Factory object for invariants of multilinear forms.
EXAMPLES::
sage: R.<x,y,z> = QQ[]
sage: invariant_theory.ternary_cubic(x^3+y^3+z^3)
Ternary cubic with coefficients (1, 1, 1, 0, 0, 0, 0, 0, 0, 0)
"""
def __repr__(self):
"""
Return a string representation.
OUTPUT:
String.
EXAMPLES::
sage: invariant_theory
<BLANKLINE>
Use the invariant_theory object to construct algebraic forms. These
can then be queried for invariant and covariants. For example,
<BLANKLINE>
s...: R.<x,y,z> = QQ[]
s...: invariant_theory.ternary_cubic(x^3+y^3+z^3)
Ternary cubic with coefficients (1, 1, 1, 0, 0, 0, 0, 0, 0, 0)
s...: invariant_theory.ternary_cubic(x^3+y^3+z^3).J_covariant()
x^6*y^3 - x^3*y^6 - x^6*z^3 + y^6*z^3 + x^3*z^6 - y^3*z^6
"""
return """
Use the invariant_theory object to construct algebraic forms. These
can then be queried for invariant and covariants. For example,
sage: R.<x,y,z> = QQ[]
sage: invariant_theory.ternary_cubic(x^3+y^3+z^3)
Ternary cubic with coefficients (1, 1, 1, 0, 0, 0, 0, 0, 0, 0)
sage: invariant_theory.ternary_cubic(x^3+y^3+z^3).J_covariant()
x^6*y^3 - x^3*y^6 - x^6*z^3 + y^6*z^3 + x^3*z^6 - y^3*z^6
"""
def quadratic_form(self, polynomial, *args):
"""
Invariants of a homogeneous quadratic form.
INPUT:
- ``polynomial`` -- a homogeneous or inhomogeneous quadratic form.
- ``*args`` -- the variables as multiple arguments, or as a
single list/tuple. If the last argument is ``None``, the
cubic is assumed to be inhomogeneous.
EXAMPLES::
sage: R.<x,y,z> = QQ[]
sage: quadratic = x^2+y^2+z^2
sage: inv = invariant_theory.quadratic_form(quadratic)
sage: type(inv)
<class 'sage.rings.invariant_theory.TernaryQuadratic'>
If some of the ring variables are to be treated as coefficients
you need to specify the polynomial variables::
sage: R.<x,y,z, a,b> = QQ[]
sage: quadratic = a*x^2+b*y^2+z^2+2*y*z
sage: invariant_theory.quadratic_form(quadratic, x,y,z)
Ternary quadratic with coefficients (a, b, 1, 0, 0, 2)
sage: invariant_theory.quadratic_form(quadratic, [x,y,z]) # alternate syntax
Ternary quadratic with coefficients (a, b, 1, 0, 0, 2)
Inhomogeneous quadratic forms (see also
:meth:`inhomogeneous_quadratic_form`) can be specified by
passing ``None`` as the last variable::
sage: inhom = quadratic.subs(z=1)
sage: invariant_theory.quadratic_form(inhom, x,y,None)
Ternary quadratic with coefficients (a, b, 1, 0, 0, 2)
"""
variables = _guess_variables(polynomial, *args)
n = len(variables)
if n == 3:
return TernaryQuadratic(3, 2, polynomial, *args)
else:
return QuadraticForm(n, 2, polynomial, *args)
def inhomogeneous_quadratic_form(self, polynomial, *args):
"""
Invariants of an inhomogeneous quadratic form.
INPUT:
- ``polynomial`` -- an inhomogeneous quadratic form.
- ``*args`` -- the variables as multiple arguments, or as a
single list/tuple.
EXAMPLES::
sage: R.<x,y,z> = QQ[]
sage: quadratic = x^2+2*y^2+3*x*y+4*x+5*y+6
sage: inv3 = invariant_theory.inhomogeneous_quadratic_form(quadratic)
sage: type(inv3)
<class 'sage.rings.invariant_theory.TernaryQuadratic'>
sage: inv4 = invariant_theory.inhomogeneous_quadratic_form(x^2+y^2+z^2)
sage: type(inv4)
<class 'sage.rings.invariant_theory.QuadraticForm'>
"""
variables = _guess_variables(polynomial, *args)
n = len(variables) + 1
if n == 3:
return TernaryQuadratic(3, 2, polynomial, *args)
else:
return QuadraticForm(n, 2, polynomial, *args)
def binary_quadratic(self, quadratic, *args):
"""
Invariant theory of a quadratic in two variables.
INPUT:
- ``quadratic`` -- a quadratic form.
- ``x``, ``y`` -- the homogeneous variables. If ``y`` is
``None``, the quadratic is assumed to be inhomogeneous.
REFERENCES:
.. http://en.wikipedia.org/wiki/Invariant_of_a_binary_form
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: invariant_theory.binary_quadratic(x^2+y^2)
Binary quadratic with coefficients (1, 1, 0)
sage: T.<t> = QQ[]
sage: invariant_theory.binary_quadratic(t^2 + 2*t + 1, [t])
Binary quadratic with coefficients (1, 1, 2)
"""
return QuadraticForm(2, 2, quadratic, *args)
def quaternary_quadratic(self, quadratic, *args):
"""
Invariant theory of a quadratic in four variables.
INPUT:
- ``quadratic`` -- a quadratic form.
- ``w``, ``x``, ``y``, ``z`` -- the homogeneous variables. If
``z`` is ``None``, the quadratic is assumed to be
inhomogeneous.
REFERENCES:
.. [WpBinaryForm]
http://en.wikipedia.org/wiki/Invariant_of_a_binary_form
EXAMPLES::
sage: R.<w,x,y,z> = QQ[]
sage: invariant_theory.quaternary_quadratic(w^2+x^2+y^2+z^2)
Quaternary quadratic with coefficients (1, 1, 1, 1, 0, 0, 0, 0, 0, 0)
sage: R.<x,y,z> = QQ[]
sage: invariant_theory.quaternary_quadratic(1+x^2+y^2+z^2)
Quaternary quadratic with coefficients (1, 1, 1, 1, 0, 0, 0, 0, 0, 0)
"""
return QuadraticForm(4, 2, quadratic, *args)
def binary_quartic(self, quartic, *args, **kwds):
"""
Invariant theory of a quartic in two variables.
The algebra of invariants of a quartic form is generated by
invariants `i`, `j` of degrees 2, 3. This ring is naturally
isomorphic to the ring of modular forms of level 1, with the
two generators corresponding to the Eisenstein series `E_4`
(see
:meth:`~sage.rings.invariant_theory.BinaryQuartic.EisensteinD`)
and `E_6` (see
:meth:`~sage.rings.invariant_theory.BinaryQuartic.EisensteinE`). The
algebra of covariants is generated by these two invariants
together with the form `f` of degree 1 and order 4, the
Hessian `g` (see :meth:`~BinaryQuartic.g_covariant`) of degree
2 and order 4, and a covariant `h` (see
:meth:`~BinaryQuartic.h_covariant`) of degree 3 and order
6. They are related by a syzygy
.. math::
j f^3 - g f^2 i + 4 g^3 + h^2 = 0
of degree 6 and order 12.
INPUT:
- ``quartic`` -- a quartic.
- ``x``, ``y`` -- the homogeneous variables. If ``y`` is
``None``, the quartic is assumed to be inhomogeneous.
REFERENCES:
.. http://en.wikipedia.org/wiki/Invariant_of_a_binary_form
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: quartic = invariant_theory.binary_quartic(x^4+y^4)
sage: quartic
Binary quartic with coefficients (1, 0, 0, 0, 1)
sage: type(quartic)
<class 'sage.rings.invariant_theory.BinaryQuartic'>
"""
return BinaryQuartic(2, 4, quartic, *args, **kwds)
def ternary_quadratic(self, quadratic, *args, **kwds):
"""
Invariants of a quadratic in three variables.
INPUT:
- ``quadratic`` -- a homogeneous quadratic in 3 homogeneous
variables, or an inhomogeneous quadratic in 2 variables.
- ``x``, ``y``, ``z`` -- the variables. If ``z`` is ``None``,
the quadratic is assumed to be inhomogeneous.
REFERENCES:
.. http://en.wikipedia.org/wiki/Invariant_of_a_binary_form
EXAMPLES::
sage: R.<x,y,z> = QQ[]
sage: invariant_theory.ternary_quadratic(x^2+y^2+z^2)
Ternary quadratic with coefficients (1, 1, 1, 0, 0, 0)
sage: T.<u, v> = QQ[]
sage: invariant_theory.ternary_quadratic(1+u^2+v^2)
Ternary quadratic with coefficients (1, 1, 1, 0, 0, 0)
sage: quadratic = x^2+y^2+z^2
sage: inv = invariant_theory.ternary_quadratic(quadratic)
sage: type(inv)
<class 'sage.rings.invariant_theory.TernaryQuadratic'>
"""
return TernaryQuadratic(3, 2, quadratic, *args, **kwds)
def ternary_cubic(self, cubic, *args, **kwds):
r"""
Invariants of a cubic in three variables.
The algebra of invariants of a ternary cubic under `SL_3(\CC)`
is a polynomial algebra generated by two invariants `S` (see
:meth:`~sage.rings.invariant_theory.TernaryCubic.S_invariant`)
and T (see
:meth:`~sage.rings.invariant_theory.TernaryCubic.T_invariant`)
of degrees 4 and 6, called Aronhold invariants.
The ring of covariants is given as follows. The identity
covariant U of a ternary cubic has degree 1 and order 3. The
Hessian `H` (see
:meth:`~sage.rings.invariant_theory.TernaryCubic.Hessian`)
is a covariant of ternary cubics of degree 3 and order 3.
There is a covariant `\Theta` (see
:meth:`~sage.rings.invariant_theory.TernaryCubic.Theta_covariant`)
of ternary cubics of degree 8 and order 6 that vanishes on
points `x` lying on the Salmon conic of the polar of `x` with
respect to the curve and its Hessian curve. The Brioschi
covariant `J` (see
:meth:`~sage.rings.invariant_theory.TernaryCubic.J_covariant`)
is the Jacobian of `U`, `\Theta`, and `H` of degree 12, order
9. The algebra of covariants of a ternary cubic is generated
over the ring of invariants by `U`, `\Theta`, `H`, and `J`,
with a relation
.. math::
\begin{split}
J^2 =& 4 \Theta^3 + T U^2 \Theta^2 +
\Theta (-4 S^3 U^4 + 2 S T U^3 H
- 72 S^2 U^2 H^2
\\ &
- 18 T U H^3 + 108 S H^4)
-16 S^4 U^5 H - 11 S^2 T U^4 H^2
\\ &
-4 T^2 U^3 H^3
+54 S T U^2 H^4 -432 S^2 U H^5 -27 T H^6
\end{split}
REFERENCES:
.. [WpTernaryCubic]
http://en.wikipedia.org/wiki/Ternary_cubic
INPUT:
- ``cubic`` -- a homogeneous cubic in 3 homogeneous variables,
or an inhomogeneous cubic in 2 variables.
- ``x``, ``y``, ``z`` -- the variables. If ``z`` is ``None``, the
cubic is assumed to be inhomogeneous.
EXAMPLES::
sage: R.<x,y,z> = QQ[]
sage: cubic = invariant_theory.ternary_cubic(x^3+y^3+z^3)
sage: type(cubic)
<class 'sage.rings.invariant_theory.TernaryCubic'>
"""
return TernaryCubic(3, 3, cubic, *args, **kwds)
def quaternary_biquadratic(self, quadratic1, quadratic2, *args, **kwds):
"""
Invariants of two quadratics in four variables.
INPUT:
- ``quadratic1``, ``quadratic2`` -- two polynomias. Either homogeneous quadratic
in 4 homogeneous variables, or inhomogeneous quadratic
in 3 variables.
- ``w``, ``x``, ``y``, ``z`` -- the variables. If ``z`` is
``None``, the quadratics are assumed to be inhomogeneous.
EXAMPLES::
sage: R.<w,x,y,z> = QQ[]
sage: q1 = w^2+x^2+y^2+z^2
sage: q2 = w*x + y*z
sage: inv = invariant_theory.quaternary_biquadratic(q1, q2)
sage: type(inv)
<class 'sage.rings.invariant_theory.TwoQuaternaryQuadratics'>
Distance between two spheres [Salmon]_ ::
sage: R.<x,y,z, a,b,c, r1,r2> = QQ[]
sage: S1 = -r1^2 + x^2 + y^2 + z^2
sage: S2 = -r2^2 + (x-a)^2 + (y-b)^2 + (z-c)^2
sage: inv = invariant_theory.quaternary_biquadratic(S1, S2, [x, y, z])
sage: inv.Delta_invariant()
-r1^2
sage: inv.Delta_prime_invariant()
-r2^2
sage: inv.Theta_invariant()
a^2 + b^2 + c^2 - 3*r1^2 - r2^2
sage: inv.Theta_prime_invariant()
a^2 + b^2 + c^2 - r1^2 - 3*r2^2
sage: inv.Phi_invariant()
2*a^2 + 2*b^2 + 2*c^2 - 3*r1^2 - 3*r2^2
sage: inv.J_covariant()
0
"""
q1 = QuadraticForm(4, 2, quadratic1, *args, **kwds)
q2 = QuadraticForm(4, 2, quadratic2, *args, **kwds)
return TwoQuaternaryQuadratics([q1, q2])
invariant_theory = InvariantTheoryFactory()
|
# -*- coding: utf-8 -*-
"""Installer for the docxcompose package."""
from setuptools import find_packages
from setuptools import setup
tests_require = [
'pytest',
]
setup(
name='docxcompose',
version='1.3.4.dev0',
description="Compose .docx documents",
long_description=(open("README.rst").read() + "\n" +
open("HISTORY.txt").read()),
# Get more from https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
],
keywords='Python DOCX Word OOXML',
author='Thomas Buchberger',
author_email='t.buchberger@4teamwork.ch',
url='https://github.com/4teamwork/docxcompose',
license='MIT license',
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
zip_safe=True,
install_requires=[
'lxml',
'python-docx >= 0.8.8',
'setuptools',
'six',
],
extras_require={
'test': tests_require,
'tests': tests_require,
},
entry_points={
'console_scripts': [
'docxcompose = docxcompose.command:main'
]
},
)
|
# Generated by Django 3.1.7 on 2021-04-01 06:35
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
import nautobot.extras.models.statuses
import taggit.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
("tenancy", "0001_initial"),
("contenttypes", "0002_remove_content_type_name"),
("extras", "0001_initial_part_1"),
("dcim", "0001_initial_part_1"),
]
operations = [
migrations.AddField(
model_name="virtualchassis",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="site",
name="region",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="sites",
to="dcim.region",
),
),
migrations.AddField(
model_name="site",
name="status",
field=nautobot.extras.models.statuses.StatusField(
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="dcim_site_related",
to="extras.status",
),
),
migrations.AddField(
model_name="site",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="site",
name="tenant",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="sites",
to="tenancy.tenant",
),
),
migrations.AddField(
model_name="region",
name="parent",
field=mptt.fields.TreeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="children",
to="dcim.region",
),
),
migrations.AddField(
model_name="rearporttemplate",
name="device_type",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="rearporttemplates", to="dcim.devicetype"
),
),
migrations.AddField(
model_name="rearport",
name="_cable_peer_type",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="contenttypes.contenttype",
),
),
migrations.AddField(
model_name="rearport",
name="cable",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="+", to="dcim.cable"
),
),
migrations.AddField(
model_name="rearport",
name="device",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="rearports", to="dcim.device"
),
),
migrations.AddField(
model_name="rearport",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="rackreservation",
name="rack",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="reservations", to="dcim.rack"
),
),
migrations.AddField(
model_name="rackreservation",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="rackreservation",
name="tenant",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="rackreservations",
to="tenancy.tenant",
),
),
]
|
def say_hello():
s = "hello"
return s
if __name__ == "__main__":
print(say_hello())
|
# -*- coding: utf-8 -*-
# file: __init__.py
# date: 2021-09-23
from .expt import *
|
# Script to make plots from the RJ-MCMC output
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cm as cm
import numpy as np
from scipy import stats
from matplotlib.colors import LogNorm
import os
import sys
if not os.path.exists('input_file'):
print('*'*50)
print('Cannot find file: input_file')
print('Check that you are running this Python code in the outputs directory \n E.g. cd Outputs \n python ../make_plots.py')
print('*'*50)
sys.exit(0)
# Read some basic data from the input file
# This can be overwritten by either altering this file, or simply hardwiring the various parameters: e.g.
# age_min, age_max = 0, 100
for line in open('input_file','r'):
if not (line[0] == '#' or line == '\n'): #skip comments or blank lines...
if line.split()[0].upper() == 'Intensity_prior'.upper():
I_min, I_max = float(line.split()[1]),float(line.split()[2])
if line.split()[0].upper() == 'Age_bounds'.upper():
age_min, age_max = float(line.split()[1]),float(line.split()[2])
if line.split()[0].upper() == 'Num_change_points'.upper():
K_min, K_max = int(line.split()[1]), int(line.split()[2])
if line.split()[0].upper() == 'Credible'.upper():
credible = float(line.split()[1])
if line.split()[0].upper() == 'output_model'.upper():
output_model_filename = line.split()[1]
if line.split()[0].upper() == 'True_data'.upper():
true_behaviour_file = line.split()[2]
x_cts_true,y_cts_true=np.loadtxt(os.path.join(os.pardir,true_behaviour_file),unpack=True)
if line.split()[0].upper() == 'Plotting_intensity_range'.upper():
I_min,I_max = float(line.split()[1]),float(line.split()[2])
if line.split()[0].upper() == 'Burn_in'.upper():
Burn_in = int(line.split()[1])
if line.split()[0].upper() == 'Outputs_directory'.upper():
outputs_directory = line.split()[1]
if line.split()[0].upper() == 'Data_title'.upper():
data_title = line.split()[1]
# read in the various data files that were output by the RJ-MCMC script
x, x_err, y, y_err, strat = np.loadtxt('data.dat', unpack=True)
strat = [int(a) for a in strat]
lx, ly = np.loadtxt('credible_lower.dat', unpack=True)
ux, uy = np.loadtxt('credible_upper.dat', unpack=True)
mode_x, mode_y = np.loadtxt('mode.dat', unpack=True)
median_x, median_y = np.loadtxt('median.dat', unpack=True)
av_x, av_y = np.loadtxt('average.dat', unpack=True)
best_x, best_y = np.loadtxt('best_fit.dat', unpack=True)
k_index, k_count = np.loadtxt('k_histogram.dat',unpack=True)
print('Building plot of data...')
# plot the data with the density binned by using the number of bins of "num_bins"
num_bins = 20
fig1, ax1 = plt.subplots(figsize=(14,6))
unstratified_index = [index for index,item in enumerate(strat) if item == 0]
stratified_index = [index for index,item in enumerate(strat) if item == 1]
if len(unstratified_index) > 0:
(line, caps, bars) = ax1.errorbar(x[unstratified_index], y[unstratified_index],xerr=x_err[unstratified_index], yerr=y_err[unstratified_index],
fmt='o',markerfacecolor='blue',markeredgecolor='k', markeredgewidth = 0.6, ecolor='k', elinewidth=1, capsize=4, markersize=7)
plt.setp(line,label="Unstratified data") #give label to returned line
if len(stratified_index) > 0:
(line2, caps, bars) = ax1.errorbar(x[stratified_index], y[stratified_index],xerr=x_err[stratified_index], yerr=y_err[stratified_index],
fmt='o',markerfacecolor='red',markeredgecolor='k', markeredgewidth = 0.6, ecolor='k', elinewidth=1, capsize=4, markersize=7)
plt.setp(line2,label="Stratified data") #give label to returned line
ax1.set_xlabel('Time/yr',fontsize=16)
ax1.set_ylabel('Intensity/$\mu$T',fontsize=16)
ax1.xaxis.set_tick_params(labelsize=16)
ax1.yaxis.set_tick_params(labelsize=16)
count_colour = 'g'
ax2 = ax1.twinx()
ax2.hist(x,num_bins,alpha=0.2,color=count_colour,edgecolor='white')
ax2.set_ylabel('Count',fontsize=16,color=count_colour)
for tl in ax2.get_yticklabels():
tl.set_color(count_colour)
ax1.set_xlim(age_min, age_max)
ax2.yaxis.set_tick_params(labelsize=16)
if 'data_title' in locals(): #check to see if data_title is specified in input file
if data_title.upper() == 'Lubeck-Paris700'.upper():
ax2.set_title(r"""$L\"ubeck$-Paris700""",fontsize=20);
else:
ax2.set_title(data_title,fontsize=20)
plt.savefig('Data.pdf', bbox_inches='tight',pad_inches=0.0)
plt.close(fig1)
# Make a single plot of the data with mean/mode/median/credible bounds for the posterior
print('Building plot of posterior...')
fig2, ax = plt.subplots (figsize=(14,5))
ax.fill_between(lx, ly, uy, facecolor='orange', alpha=0.5, edgecolor='g', label='%i%% credible interval' % credible)
#a.errorbar(dx[black_pts_index], dy[black_pts_index],xerr=dx_err[black_pts_index], yerr=dn[black_pts_index],fmt='k.', label='Data', elinewidth=0.5)
(line, caps, bars) = ax.errorbar(x, y,xerr=x_err, yerr=y_err,fmt='o',color='blue',ecolor='k', elinewidth=1, capthick=0.7, capsize=4, markersize=5)
plt.setp(line,label="Data") #give label to returned line
ax.plot(av_x, av_y, 'r', label = 'Average', linewidth=2)
#ax.plot(best_x, best_y, 'b', linewidth=2, label = 'Best fit')
ax.plot(median_x, median_y, 'purple', linewidth=2, label = 'Median')
ax.plot(mode_x, mode_y, 'blue', linewidth=2, label = 'Mode')
if 'x_cts_true' in locals(): #see if "true" data are available to plot --- only for synthetic cases.
plt.plot(x_cts_true,y_cts_true,'k', linewidth=2, label='Real')
ax.set_ylim(I_min,I_max)
ax.set_xlim(age_min, age_max)
ax.set_title('Posterior distribution of intensity',fontsize=20)
ax.set_xlabel('Time/yr',fontsize=16)
ax.set_ylabel('Intensity/$\mu$T',fontsize=16)
ax.legend(loc = 'upper right',fontsize=12,labelspacing=0.2)
ax.xaxis.set_tick_params(labelsize=16)
ax.yaxis.set_tick_params(labelsize=16)
plt.savefig('Posterior.pdf', bbox_inches='tight',pad_inches=0.4)
plt.close(fig2)
# Make a plot of the histogram of the number of change points
print('Building plot of change points...')
fig3, ax = plt.subplots (figsize=(8,5))
k_count = k_count/np.sum(k_count) #normalise
ax.bar(k_index,k_count,align='center')
#ax.set_xticks(k_index[::2])
ax.set_title('Vertices Histogram',fontsize=16)
ax.set_xlabel('Number of vertices',fontsize=16)
ax.set_ylabel('Discrete probability',fontsize=16)
ax.xaxis.set_tick_params(labelsize=16)
ax.yaxis.set_tick_params(labelsize=16)
plt.savefig('K_histogram.pdf', bbox_inches='tight',pad_inches=0.4)
plt.close(fig3)
# Make a plot of the age of the change points
num_bins = 500
vertices = np.loadtxt('changepoints.dat')
fig4, ax = plt.subplots (figsize=(14,3))
ax.hist(vertices, bins = num_bins)
ax.set_title('Vertex position Histogram',fontsize=20)
ax.set_xlabel('Time/yr',fontsize=16)
ax.set_ylabel('Count',fontsize=16)
ax.set_xlim(age_min, age_max)
ax.xaxis.set_tick_params(labelsize=16)
ax.yaxis.set_tick_params(labelsize=16)
plt.savefig('Change_point_histogram.pdf', bbox_inches='tight',pad_inches=0.4)
plt.close(fig4)
# Make a plot of the misfit
print('Building plot of misfit...')
iterations, misfit = np.loadtxt('misfit.dat',unpack=True)
fig5, ax = plt.subplots (figsize=(8,5) )
ax.plot(iterations, misfit,'k')
ax.set_yscale('log')
ax.set_title('Misfit against iteration count',fontsize=16)
ax.set_xlabel('Iteration count',fontsize=16)
ax.set_ylabel('Misfit',fontsize=16)
ax.xaxis.set_tick_params(labelsize=16)
ax.yaxis.set_tick_params(labelsize=16)
# add red bar to indicate the burn-in end
ax.bar(Burn_in,height=misfit.max(),width=iterations.max()/100,bottom = 0, align = 'center',color='red')
plt.savefig('Misfit.pdf', bbox_inches='tight',pad_inches=0.4)
plt.close(fig5)
# Make a plot of the density
print('Building plot of density...')
fig6, ax = plt.subplots ( figsize=(14,5))
ax.set_title('Intensity density')
ax.set_ylabel('Intensity/$\mu$T')
f = open('intensity_density.dat', 'r')
discretise_size, NBINS = [int(x) for x in f.readline().split()]
density_data = [list(map(float, x.split())) for x in f.readlines()]
f.close()
x_density,y_density,intensity_density = list(zip(*density_data))
int_density = np.reshape(intensity_density,[discretise_size,NBINS])
x_density = np.reshape(x_density,[discretise_size,NBINS])
y_density = np.reshape(y_density,[discretise_size,NBINS])
int_density = np.transpose(int_density)
plt.imshow(int_density, origin='lower',cmap = cm.jet,extent=(x_density[0,0],x_density[-1,0],y_density[0,0],y_density[0,-1]), aspect='auto')
plt.xlim(x_density[0,0],x_density[-1,0])
plt.ylim(y_density[0,0],y_density[0,-1])
plt.xlabel('Time/yr',fontsize=16)
plt.ylabel('Intensity/$\mu$T',fontsize=16)
cb = plt.colorbar(ticks=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6,0.7, 0.8,0.9, 1],
orientation='vertical')
cb.set_label('Probability',fontsize=16)
#plt.clim(0, 1)
ax.xaxis.set_tick_params(labelsize=16)
ax.yaxis.set_tick_params(labelsize=16)
#plt.plot(x_orig,y_orig,'white', linewidth=1, label='Real')
plt.savefig('density.pdf', bbox_inches='tight',pad_inches=0.4)
plt.close(fig6)
# Make a plot of the intensity error - to check against the prior assumption
print('Building plot of intensity error...')
fig, ax = plt.subplots ( figsize=(8,5) )
# interpolate the average curve
interp_data = np.interp(x,av_x,av_y)
weighted_errors = (interp_data - y) / y_err
n, bins, patches = plt.hist(weighted_errors,bins=50,normed=1,edgecolor='w',range=[-5,5])
x_smooth = np.linspace(-5,5,1000)
normal_distribution = mlab.normpdf(x_smooth, 0, 1)
ax.plot(x_smooth,normal_distribution,'r')
ax.set_title('Weighted intensity error',fontsize=16)
ax.set_xlabel('$\sigma^{-1} \; \Delta F$',fontsize=16)
ax.set_ylabel('Discrete probability', fontsize=16)
ax.xaxis.set_tick_params(labelsize=16)
ax.yaxis.set_tick_params(labelsize=16)
ax.set_xlim(-5,5)
plt.savefig('histogram_weighted_errors.pdf', bbox_inches='tight',pad_inches=0.4)
plt.close(fig)
# Make a 3-part joint plot
intensity_range = 70
threshold = 0.001
print('Building composite plot...')
plt.figure(figsize=(14,15))
#Part 1:
ax1 = plt.subplot(311)
ax1.fill_between(lx, ly, uy, facecolor='orange', alpha=0.5, edgecolor='g', label='%i%% credible interval' % credible)
unstratified_index = [index for index,item in enumerate(strat) if item == 0]
stratified_index = [index for index,item in enumerate(strat) if item == 1]
if len(unstratified_index) > 0:
(line, caps, bars) = ax1.errorbar(x[unstratified_index], y[unstratified_index],xerr=x_err[unstratified_index], yerr=y_err[unstratified_index],
fmt='o',color='blue',ecolor='k', elinewidth=1, capthick=0.7, capsize=4, markersize=5)
plt.setp(line,label="Unstratified data") #give label to returned line
if len(stratified_index) > 0:
(line2, caps, bars) = ax1.errorbar(x[stratified_index], y[stratified_index],xerr=x_err[stratified_index], yerr=y_err[stratified_index],
fmt='o',color='red',ecolor='k', elinewidth=1, capthick=0.7, capsize=4, markersize=5)
plt.setp(line2,label="Stratified data") #give label to returned line
#for cap in caps:
# cap.set_markeredgewidth(0.5)
ax1.plot(av_x, av_y, 'r', label = 'Average', linewidth=2)
ax1.plot(median_x, median_y, 'purple', linewidth=2, label = 'Median')
ax1.plot(mode_x, mode_y, 'blue', linewidth=2, label = 'Mode')
if 'x_cts_true' in locals(): #see if "true" data are available to plot --- only for synthetic cases.
ax1.plot(x_cts_true,y_cts_true,'k', linewidth=2, label='Real')
ax1.set_ylim(I_min,I_max)
ax1.set_title('Posterior distribution of intensity',fontsize=20)
#ax1.set_xlabel('Time/yr',fontsize=16)
ax1.set_ylabel('Intensity/$\mu$T',fontsize=16)
ax1.legend(numpoints =1, loc = 'upper right',fontsize=12,labelspacing=0.2)
ax1.yaxis.set_tick_params(labelsize=16)
# Make x-tick labels invisible.
plt.setp(ax1.get_xticklabels(), visible=False)
# check to see if any points are to be highlighted:
if len(sys.argv) > 1:
if sys.argv[1].upper() == 'joint_highlight_points'.upper():
for i in range(2,len(sys.argv)):
print('Highlighting points in joint plot: ', x[int(sys.argv[i])-1], y[int(sys.argv[i])-1])
ax1.plot(x[int(sys.argv[i])-1], y[int(sys.argv[i])-1], markerfacecolor='none', color='lime',marker='s',markersize=20, markeredgewidth=2)
# Part 2
ax2 = plt.subplot(312,sharex=ax1)
ax2.set_title('Intensity density',fontsize=20)
f = open('intensity_density.dat', 'r')
discretise_size, NBINS = [int(x) for x in f.readline().split()]
density_data = [list(map(float, x.split())) for x in f.readlines()]
f.close()
x_density,y_density,intensity_density = list(zip(*density_data))
int_density = np.reshape(intensity_density,[discretise_size,NBINS])
x_density = np.reshape(x_density,[discretise_size,NBINS])
y_density = np.reshape(y_density,[discretise_size,NBINS])
int_density = np.transpose(int_density)
x_density = np.transpose(x_density)
y_density = np.transpose(y_density)
int_density_refined = int_density.copy()
int_density_refined[ int_density_refined < threshold] = 0.0
plt.imshow(int_density_refined, origin='lower',cmap = cm.jet,extent=(x_density[0,0],x_density[0,-1],y_density[0,0],y_density[-1,0]), aspect='auto', norm=LogNorm(vmin=0.001, vmax=0.6),interpolation="nearest")
ax2.set_ylabel('Intensity/$\mu$T',fontsize=16)
ax2.xaxis.set_tick_params(labelsize=16)
ax2.yaxis.set_tick_params(labelsize=16)
if 'x_cts_true' in locals(): #see if "true" data are available to plot --- only for synthetic cases.
ax2.plot(x_cts_true,y_cts_true,'k', linewidth=2, label='Real')
ax2.set_ylim(I_min,I_max)
# Part 3:
num_bins = 200
ax3 = plt.subplot(313,sharex=ax1)
ax3.hist(vertices, bins = num_bins, histtype='bar', edgecolor='white', linewidth=0.1, color='b')
ax3.set_ylabel('Count',fontsize=16,color='b')
ax3.yaxis.set_tick_params(labelsize=16,colors='b')
# Make x-tick labels invisible.
plt.setp(ax2.get_xticklabels(), visible=False)
part3_title = 'Vertex position'
# KL divergence
# check to see if prior distribution exists
priors_directory = os.path.join(os.pardir,outputs_directory+'_prior_sampling')
if os.path.exists(priors_directory):
part3_title = part3_title + '/Kullback-Leibler divergence'
# load priors distribution
f = open(priors_directory+'/intensity_density.dat', 'r')
discretise_size, NBINS = [int(x) for x in f.readline().split()]
density_data = [list(map(float, x.split())) for x in f.readlines()]
f.close()
x_density_prior,y_density_prior,intensity_density_prior = list(zip(*density_data))
int_density_prior = np.reshape(intensity_density_prior,[discretise_size,NBINS])
x_density_prior = np.reshape(x_density_prior,[discretise_size,NBINS])
y_density_prior = np.reshape(y_density_prior,[discretise_size,NBINS])
int_density_prior = np.transpose(int_density_prior)
x_density_prior = np.transpose(x_density_prior)
y_density_prior = np.transpose(y_density_prior)
if not np.array_equal(y_density_prior, y_density) or not np.array_equal(x_density_prior,x_density):
print('Prior distribution exists but x and y ranges differ.')
sys.exit(0)
# For each time, simply compare the posterior pdf (which integrates to 1) and the prior pdf (which integrates to 1).
KL = np.zeros( discretise_size )
for i in range(0, discretise_size):
int_density_prior[ int_density_prior == 0] = 1e-8 #replace zero values with a small value to avoid dividing by zero.
KL[i] = stats.entropy(int_density[:,i], qk=int_density_prior[:,i])
ax3_twin = ax3.twinx()
ax3_twin.plot(x_density[1,:], KL, 'r')
ax3_twin.set_ylabel('KL divergence', fontsize=16, color='r')
ax3_twin.tick_params('y', colors='r',labelsize=16)
ax3_twin.tick_params(direction='out', pad=5)
# produce an image of the prior intensity:
threshold_prior = 0.001
int_density_prior_refined = int_density_prior.copy()
int_density_prior_refined[ int_density_prior_refined < threshold_prior] = 0.0
fig8, ax8 = plt.subplots ( figsize=(8,5) )
plt.sca(ax8)
plt.imshow(int_density_prior_refined, origin='lower',cmap = cm.jet,extent=(x_density_prior[0,0],x_density_prior[0,-1],y_density_prior[0,0],y_density_prior[-1,0]), aspect='auto', norm=LogNorm(vmin=0.001, vmax=0.1),interpolation="nearest")
ax8.set_xlim(x_density_prior[0,0],x_density_prior[0,-1])
ax8.set_ylim(y_density_prior[0,0],y_density_prior[-1,0])
ax8.set_xlabel('Time/yr',fontsize=16)
ax8.set_ylabel('Intensity/$\mu$T',fontsize=16)
cb = plt.colorbar(ticks=[0.001, 0.002, 0.003, 0.004, 0.005, 0.006,0.007, 0.008,0.009, 0.01],orientation='vertical')
cb.set_label('Probability',fontsize=16)
ax8.xaxis.set_tick_params(labelsize=16)
ax8.yaxis.set_tick_params(labelsize=16)
plt.savefig('Prior_density.pdf', bbox_inches='tight',pad_inches=0.4)
plt.close(fig8)
plt.sca(ax1)
# The x-axis data keeps being updated with new curves - define at the end the x-range:
else:
print('KL divergence ignored as no prior distribution found')
ax3.set_xlim(age_min, age_max)
ax3.xaxis.set_tick_params(labelsize=16)
ax3.set_xlabel('Time/yr',fontsize=16)
ax3.xaxis.grid(True)
ax3.tick_params(top = 'off')
ax1.xaxis.grid(True)
ax2.xaxis.grid(True)
ax1.tick_params(top = 'off')
ax2.tick_params(top = 'off')
# Adjust plot sizes to place colour bar
plt.subplots_adjust(bottom=0.05, right=0.9, top=0.95)
cax = plt.axes([0.91, 0.367, 0.03, 0.265])
# The numbers in the square brackets of add_axes refer to [left, bottom, width, height], where the coordinates are just fractions that go from 0 to 1 of the plotting area.
cb = plt.colorbar(ticks=[0.001,0.01, 0.1, 0.2, 0.4, .6],
orientation='vertical', format='$%.3f$',cax=cax)
cb.ax.tick_params(labelsize=16)
ax3.set_title(part3_title,fontsize=20)
plt.savefig('joint_plot.pdf', bbox_inches='tight',pad_inches=0.0)
plt.close()
# If the RJ-MCMC code has saved some models, make a plot of these
import os
if os.path.exists(output_model_filename):
output_model = open(output_model_filename,'r')
discretise_size = int(output_model.readline().split()[0])
x_ages = [float(x) for x in output_model.readline().split()]
models = [float(x.split()[0]) for x in output_model.readlines()]
num_models = int(len(models) / discretise_size)
models = np.reshape(models,[num_models,discretise_size])
output_model.close()
print('Building plot of ' + str(num_models) + ' individual models...')
fig, ax1 = plt.subplots ( figsize=(14,5))
for i in range(0, num_models):
ax1.plot( x_ages,models[i,:], color='grey',alpha=0.1)
#ax1.plot( av_x, av_y, 'r', linewidth=2)
ax1.plot(av_x, av_y, 'r', label = 'Average', linewidth=2)
ax1.plot(median_x, median_y, 'purple', linewidth=2, label = 'Median')
ax1.plot(mode_x, mode_y, 'blue', linewidth=2, label = 'Mode')
ax1.legend(numpoints =1, loc = 'upper right',fontsize=12,labelspacing=0.2)
ax1.tick_params(labelsize=16)
ax1.set_xlabel('Time/yr',fontsize=16)
ax1.set_ylabel('Intensity/$\mu$T',fontsize=16)
ax1.set_xlim([age_min,age_max])
ax1.set_ylim([I_min,I_max])
ax1.tick_params(top = 'off')
plt.savefig('individual_models.pdf', bbox_inches='tight',pad_inches=0.0)
plt.close(fig)
else:
print('Data for individual models not found - no plot made.')
|
from pyxb.bundles.opengis.citygml.raw.building import *
|
from random import randint
class Sorting:
"""Groups different sorting algorithms."""
@classmethod
def qsort(cls, arr, beggining=0, pivot=None):
"""Quick sort implementation."""
start = beggining
if pivot is None:
pivot = len(arr) - 1
arr_lenght = pivot - start + 1
if arr_lenght < 2:
return arr
elif arr_lenght == 2:
if arr[start] > arr[pivot]:
arr[start], arr[pivot] = arr[pivot], arr[start]
return arr
# From here on, arr_lenght > 2
end = pivot - 1
# swap pivot with a random entry to mitigate worst case performance
rand_idx = randint(beggining, pivot - 1)
arr[rand_idx], arr[pivot] = arr[pivot], arr[rand_idx]
while start <= end:
if arr[end] <= arr[pivot] and arr[start] > arr[pivot]:
arr[start], arr[end] = arr[end], arr[start]
end -= 1
elif arr[end] <= arr[pivot] and arr[start] <= arr[pivot]:
start += 1
elif arr[end] > arr[pivot]:
end -= 1
else: # arr[end] < arr[pivot] and arr[start] == arr[pivot]
arr[start], arr[end] = arr[end], arr[start]
start += 1
if start != pivot:
arr[start], arr[pivot] = arr[pivot], arr[start]
Sorting.qsort(arr, beggining, start-1)
Sorting.qsort(arr, start+1, pivot)
return arr
@classmethod
def msort(cls, arr):
"""Merge sort implementation."""
if len(arr) < 2:
return arr
med = len(arr) // 2
m1 = Sorting.msort(arr[0:med])
m2 = Sorting.msort(arr[med:])
idx1 = 0; idx2 = 0; ans = []
while len(ans) < len(arr):
if idx1 < len(m1) and idx2 < len(m2):
if m1[idx1] < m2[idx2]:
ans.append(m1[idx1])
idx1 += 1
else:
ans.append(m2[idx2])
idx2 += 1
elif idx1 < len(m1):
ans.append(m1[idx1])
idx1 += 1
else:
ans.append(m2[idx2])
idx2 += 1
return ans
@classmethod
def ssort(cls, arr):
"""Selection sort implementation."""
if len(arr) < 2:
return arr
curr = 0; target = len(arr) - 1
while target > 0:
while curr < target:
if arr[curr] > arr[target]:
arr[curr], arr[target] = arr[target], arr[curr]
curr += 1
target -= 1
curr = 0
return arr
@classmethod
def isort(cls, arr):
"""Insertion sort implementation."""
if len(arr) < 2:
return arr
i = 0; j = 1
while j < len(arr):
while arr[j] > arr[i]:
i += 1; j += 1; break
while arr[j] < arr[i] and i >= 0:
i -= 1
last_ord = j
while j - i > 1:
arr[j], arr[j-1] = arr[j-1], arr[j]
j -= 1
i = last_ord
j = i + 1
return arr
@classmethod
def slow_isort(cls, arr):
"""Attempted implementation of insertio sort, turned out to be very slow."""
if len(arr) < 2:
return arr
i = 0; j = 1
while j < len(arr):
if arr[i] > arr[j]:
arr[i], arr[j] = arr[j], arr[i]
j = i if i > 0 else 1
i = 0
elif j - i == 1:
i += 1
j += 1
else:
i += 1
return arr |
from mothra.settings import INSTALLED_APPS, INSTALLED_APPS_EXTERNAL_PACKAGES
appName = 'workflows'
def get_installed_apps():
return list(get_local_installed_apps()) + list(get_extern_installed_apps())
def get_local_installed_apps():
return [name[len(appName) + 1:] for name in INSTALLED_APPS if
name.startswith(appName + '.') and len(name) > len(appName) + 1]
def get_extern_installed_apps():
return INSTALLED_APPS_EXTERNAL_PACKAGES
# Following functions deal with imports of libraries as dicts
def import_all_packages_libs_as_dict(libName):
pckLibs = {}
for pck in get_installed_apps():
pckLibs[pck] = import_package_lib_as_dict(pck, libName)
return pckLibs
def import_package_lib_as_dict(packageName, libName):
if packageName in get_local_installed_apps():
return dynamic_import_globals_as_dict(appName + "." + packageName + "." + libName, packageName)
else:
return dynamic_import_globals_as_dict(packageName + "." + libName, packageName)
def dynamic_import_globals_as_dict(name, package):
try:
m = __import__(name, globals(), locals(), ['*'])
except:
return None
return m
# Following functions deal with imports of libraries as globals, hovever localSetAttrFunc must be provided - this function should set local global in file where we want import like:
# def setattr_local(name, value, package):
# setattr(sys.modules[__name__], name, value)
def import_all_packages_libs(libName, localSetAttrFunc):
for pck in get_installed_apps():
import_package_lib(pck, libName, localSetAttrFunc)
def import_package_lib(packageName, libName, localSetAttrFunc):
if packageName in get_local_installed_apps():
dynamic_import_globals(appName + "." + packageName + "." + libName, packageName, localSetAttrFunc)
else:
# External CF package
dynamic_import_globals(packageName + "." + libName, packageName, localSetAttrFunc)
def dynamic_import_globals(name, package, localSetAttrFunc):
m = None
try:
m = __import__(name, globals(), locals(), ['*'])
except ImportError:
import sys, traceback, io
s = io.StringIO()
traceback.print_exc(file=s)
if package + '.urls' not in s.getvalue():
print(s.getvalue())
print("ImportError in user code (module: '{}'):".format(package))
print('-' * 60)
traceback.print_exc(file=sys.stdout)
print('-' * 60)
except:
import sys, traceback
print("Exception in user code (module: '{}'):".format(package))
print('-' * 60)
traceback.print_exc(file=sys.stdout)
print('-' * 60)
return
all_names = [name for name in dir(m) if name[0] != '_']
g = globals()
for name in all_names:
# g[name] = m.__dict__.get(name)
localSetAttrFunc(name, m.__dict__.get(name), package)
|
"""The Robotarium class used to instantiate experimentation.
Written by: The Robotarium Team
Modified by: Zahi Kakish (zmk5)
"""
import functools
import math
import time
from typing import Tuple
import numpy as np
import rclpy
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from robotarium_node.robotarium_abc import RobotariumABC
from robotarium_node.utilities.coordinates import quaternion_to_yaw
from robotarium_node.utilities.coordinates import yaw_to_quaternion
class Robotarium(RobotariumABC):
"""Provides runtime routines to interface with the Robotarium.
NOTE: THIS CLASS SHOULD NEVER BE MODIFIED OR SUBMITTED!
"""
def __init__(
self,
number_of_robots: int = -1,
show_figure: bool = True,
sim_in_real_time: bool = True,
initial_conditions: np.ndarray = np.array([])) -> None:
"""Instantiate the Robotarium class."""
super().__init__(
number_of_robots, show_figure, sim_in_real_time, initial_conditions)
# Initialize some rendering variables
self.previous_render_time = time.time()
self.sim_in_real_time = sim_in_real_time
# Initialize checks for step and get poses calls
self._called_step_already = True
self._checked_poses_already = False
# Initialization of error collection.
self._errors = {}
# Initialize steps
self._iterations = 0
# ROS2 Nodes, Publishers, and Subscribers
rclpy.init()
self._node = rclpy.create_node('robotarium_cl')
self._pub = {}
self._sub = {}
self._msg = {}
for i in range(number_of_robots):
self._pub[i] = self._node.create_publisher(
Twist, f'/gb_{i}/cmd_vel', 10)
self._msg[i] = Twist()
self._sub[i] = self._node.create_subscription(
Odometry, f'/gb_{i}/odom',
functools.partial(self._odom_callback, robot_id=i), 10)
def get_poses(self) -> np.ndarray:
"""Return the states of the agents.
Returns
-------
A `3xN` numpy array (of robot poses).
"""
assert(not self._checked_poses_already), 'Can only call get_poses() once per call of step().'
# Allow step() to be called again.
self._called_step_already = False
self._checked_poses_already = True
return self.poses
def call_at_scripts_end(self) -> None:
"""Call this function at the end of scripts to display potentail errors.
Even if you don't want to print the errors, calling this function at
the end of your script will enable execution on the Robotarium testbed.
"""
self._node.get_logger().warn('##### DEBUG OUTPUT #####')
self._node.get_logger().info(
f'{math.ceil(self._iterations * 0.033)} real seconds when ' +
'deployed on the Robotarium.')
# Shutdown node and ROS2
self._node.destroy_node()
rclpy.shutdown()
# print('##### DEBUG OUTPUT #####')
# print('Your simulation will take approximately ' +
# f'{math.ceil(self._iterations*0.033)} real seconds when ' +
# 'deployed on the Robotarium. \n')
# if bool(self._errors):
# if 'boundary' in self._errors:
# print(f'\t Simulation had {self._errors["boundary"]} ' +
# f'{self._errors["boundary_string"]}\n')
# if 'collision' in self._errors:
# print(f'\t Simulation had {self._errors["collision"]} ' +
# f'{self._errors["collision_string"]}\n')
# if 'actuator' in self._errors:
# print(f'\t Simulation had {self._errors["actuator"]} ' +
# f'{self._errors["actuator_string"]}')
# else:
# print('No errors in your simulation! '+
# 'Acceptance of your experiment is likely!')
def step(self) -> None:
"""Increment the simulation by updating the dynamics."""
assert(not self._called_step_already), 'Make sure to call get_poses before calling step() again.'
# Allow get_poses function to be called again.
self._called_step_already = True
self._checked_poses_already = False
# Validate before thresholding velocities
self._errors = self._validate()
self._iterations += 1
# Update dynamics of agents
self.poses[0, :] = self.poses[0, :] + self.time_step * np.cos(self.poses[2, :]) * self.velocities[0, :]
self.poses[1, :] = self.poses[1, :] + self.time_step * np.sin(self.poses[2, :]) * self.velocities[0, :]
self.poses[2, :] = self.poses[2, :] + self.time_step * self.velocities[1, :]
# Ensure angles are wrapped
self.poses[2, :] = np.arctan2(
np.sin(self.poses[2, :]), np.cos(self.poses[2, :]))
# Update graphics
if self.show_figure: # TODO: Remove this.
if self.sim_in_real_time:
t = time.time()
while t - self.previous_render_time < self.time_step:
t = time.time()
self.previous_render_time = t
self._publish_cmd_vel()
rclpy.spin_once(self._node)
def _publish_cmd_vel(self) -> None:
"""Publish the new poses of the robots to the Gazebo simulator."""
for i in range(self.number_of_robots):
self._msg[i].linear.x = self.velocities[0, i]
self._msg[i].angular.z = self.velocities[1, i]
self._pub[i].publish(self._msg[i])
def _odom_callback(self, msg: Odometry, robot_id: int) -> None:
"""Set the pose of the robot from msg."""
self.poses[0, robot_id] = msg.pose.pose.position.x
self.poses[1, robot_id] = msg.pose.pose.position.y
self.poses[2, robot_id] = quaternion_to_yaw(
msg.pose.pose.orientation.x, msg.pose.pose.orientation.y,
msg.pose.pose.orientation.z, msg.pose.pose.orientation.w
)
|
import numpy as np
class QuantizedNdarray(np.ndarray):
def __new__(cls, input_array, qtype=None, **kwargs):
obj = np.asarray(input_array).view(cls)
obj.qtype = qtype
return obj
def __array_finalize__(self, obj):
if obj is None:
return
#pylint: disable=attribute-defined-outside-init
self.qtype = getattr(obj, 'qtype', None)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # this method is called whenever you use a ufunc
'''this implementation of __array_ufunc__ makes sure that all custom attributes are maintained when a ufunc operation is performed on our class.
It should really be a bit more descriminating since some ops will change the quantization'''
# convert inputs and outputs of class ArraySubclass to np.ndarray to prevent infinite recursion
args = ((i.view(np.ndarray) if isinstance(i, QuantizedNdarray) else i) for i in inputs)
outputs = kwargs.pop('out', None)
if outputs:
kwargs['out'] = tuple((o.view(np.ndarray) if isinstance(o, QuantizedNdarray) else o) for o in outputs)
else:
outputs = (None,) * ufunc.nout
# call numpys implementation of __array_ufunc__
results = super().__array_ufunc__(ufunc, method, *args, **kwargs) # pylint: disable=no-member
if results is NotImplemented:
return NotImplemented
if method == 'at':
# method == 'at' means that the operation is performed in-place. Therefore, we are done.
return
# now we need to make sure that outputs that where specified with the 'out' argument are handled corectly:
if ufunc.nout == 1:
results = (results,)
results = tuple((self._copy_attrs_to(result) if output is None else output)
for result, output in zip(results, outputs))
return results[0] if len(results) == 1 else results
def _copy_attrs_to(self, target):
'''copies all attributes of self to the target object. target must be a (subclass of) ndarray'''
target = target.view(QuantizedNdarray)
try:
target.__dict__.update(self.__dict__)
except AttributeError:
pass
return target |
from . import soot_foil, schlieren
__all__ = ["soot_foil", "schlieren"]
|
import os.path
from data.base_dataset import BaseDataset
from data.image_folder import make_dataset
import human36m_skeleton
from PIL import Image
import torchvision.transforms as transforms
import torchvision.transforms.functional as functional
import matplotlib.pyplot as plt
import skimage.io
import skimage.transform
import random
import numpy as np
from data import utils
import scipy.io
def bounding_box_from_mask(mask):
top = float('inf')
bottom = 0
left = float('inf')
right = 0
for x in range(len(mask)):
for y in range(len(mask[0])):
if mask[x, y] == True:
top = min(y, top)
bottom = max(y, bottom)
left = min(x, left)
right = max(x, right)
bbox_coords = (top, bottom, left, right)
return bbox_coords
def tight_bounding_box_to_square(top, bottom, left, right):
bbox_height = bottom - top
bbox_width = right - left
square_length = int(max(bbox_height, bbox_width) * 1.25)
square_top = top - int((square_length - bbox_height) / 2.0)
square_left = left - int((square_length - bbox_width) / 2.0)
return (square_top, square_left, square_length)
def crop_image_to_bounding_box(image, mask):
# get bounding box from boolean mask
(top, bottom, left, right) = bounding_box_from_mask(mask)
# make it a square
square_top, square_left, square_length = tight_bounding_box_to_square(top, bottom, left, right)
cropped_image = functional.resized_crop(image, square_top, square_left, square_length, square_length, 128)
return cropped_image
def proc_im(image, mask, apply_mask=True, crop_to_bounding_box=True):
# read image
image = Image.open(image)
#image = skimage.io.imread(image)
#image = skimage.img_as_float(image).astype(np.float32)
mask = skimage.io.imread(mask)
mask = skimage.img_as_float(mask).astype(np.float32)
if crop_to_bounding_box:
return crop_image_to_bounding_box(image, mask)
image = np.array(image)
if not apply_mask:
return image
return image * mask[..., None]
def get_transform(opt, channels=3):
mean = 0.5
std = 0.5
transform_list = [transforms.ToTensor(),
transforms.Normalize([mean] * channels,
[std] * channels)]
return transforms.Compose(transform_list)
class SimpleHuman36mDatasetSingle(object):
def __init__(self, root, sample_window=[5, 30], activities=None,
actors=None, split_sequence='full', subsampled_size=None, subsample_seed=None):
self.root = root
self.sample_window = sample_window
self.ordered_stream = None
# load dataset
self.sequences = []
for actor in actors:
sequences = os.listdir(os.path.join(root, actor, 'BackgroudMask'))
sequences = sorted(sequences)
for activity in activities:
activity_sequences = [s for s in sequences if s.lower().startswith(activity.lower())]
for seq in activity_sequences:
frames = os.listdir(os.path.join(root, actor, 'BackgroudMask', seq))
frames = [int(os.path.splitext(x)[0]) for x in frames]
frames = sorted(frames)
if split_sequence == 'full':
pass
elif split_sequence == 'first_half':
frames = frames[:len(frames) // 2]
elif split_sequence == 'second_half':
frames = frames[len(frames) // 2:]
else:
raise ValueError()
self.sequences.append({'frames': frames, 'actor': actor, 'activity_sequence': seq})
if subsampled_size:
sequences_ = []
rnd = random.Random(subsample_seed)
for _ in range(subsampled_size):
seq = rnd.choice(self.sequences).copy()
seq['frames'] = [rnd.choice(seq['frames'])]
sequences_.append(seq)
self.sequences = sequences_
def get_pair(self, sequence, frame1, frame2):
def get_single(sequence, frame):
mat_file = os.path.join(self.root, sequence['actor'], 'Landmarks', sequence['activity_sequence'], str(frame) + '.mat')
mat = scipy.io.loadmat(mat_file)
landmarks = mat['keypoints_2d'] * 128.0
return {
'image': os.path.join(self.root, sequence['actor'], 'WithBackground', sequence['activity_sequence'], str(frame) + '.jpg'),
'mask': os.path.join(self.root, sequence['actor'], 'BackgroudMask', sequence['activity_sequence'], str(frame) + '.png'),
'landmarks': landmarks
}
return get_single(sequence, frame1), get_single(sequence, frame2)
def get_ordered_stream(self):
if self.ordered_stream is None:
self.ordered_stream = []
for sequence in self.sequences:
step = self.sample_window[1]
for i in range(0, len(sequence['frames']), step):
frame = sequence['frames'][i]
self.ordered_stream.append((sequence, frame))
return self.ordered_stream
def get_item(self, index):
ordered_stream = self.get_ordered_stream()
sequence, frame1 = ordered_stream[index]
offset = self.sample_window[0]
try:
_, frame2 = ordered_stream[index + offset]
pair = self.get_pair(sequence, frame1, frame2)
except:
pair = self.get_pair(sequence, frame1, frame1)
return pair
def sample_item(self):
sequence = random.choice(self.sequences)
length = len(sequence['frames'])
start = random.randint(0, length - self.sample_window[0] - 1)
end = random.randint(
start + self.sample_window[0],
min(start + self.sample_window[1], length - 1))
return self.get_pair(
sequence, sequence['frames'][start],
sequence['frames'][end])
def num_samples(self):
return len(self.get_ordered_stream())
class SimpleHuman36mDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.add_argument('--sample_window', type=int, default=[5, 30], nargs=2, help='')
parser.add_argument('--no_mask', action='store_true', help='')
parser.add_argument('--skeleton_subset_size', type=int, default=0, help='')
parser.add_argument('--skeleton_subset_seed', type=int, default=None, help='')
parser.add_argument('--crop_to_bounding_box', action='store_true',
help='Crop the pose frame to a square centered around the bounding box')
return parser
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.load_images = True
if hasattr(opt, 'load_images'):
self.load_images = opt.load_images
self.use_mask = not self.opt.no_mask
self.crop_to_bounding_box = self.opt.crop_to_bounding_box
activities = ['directions', 'discussion', 'greeting', 'posing',
'waiting', 'walking']
train_actors = ['S%d' % i for i in [1, 5, 6, 7, 8, 9]]
val_actors = ['S%d' % i for i in [1, 5, 6, 7, 8, 9]]
test_actors = val_actors
if opt.subset == 'train':
actors = train_actors
elif opt.subset == 'val':
actors = val_actors
elif opt.subset == 'test':
actors = test_actors
else:
raise ValueError()
if 'train' in opt.phase:
order_stream = False
split_sequence = 'first_half'
sample_window = opt.sample_window
elif opt.phase == 'val':
order_stream = True
split_sequence = 'full'
order_stream = True
sample_window = opt.sample_window
elif opt.phase == 'test':
order_stream = True
split_sequence = 'full'
sample_window = opt.sample_window
else:
ValueError()
self.dataset = SimpleHuman36mDatasetSingle(
self.root, sample_window=sample_window,
activities=activities, actors=actors,
split_sequence=split_sequence)
if 'train' in opt.phase:
self.skeleton_dataset = SimpleHuman36mDatasetSingle(
self.root, sample_window=[0, 0],
activities=activities, actors=actors,
split_sequence='second_half',
subsampled_size=opt.skeleton_subset_size,
subsample_seed=opt.skeleton_subset_seed)
else:
self.skeleton_dataset = self.dataset
if opt.phase == 'train':
self.len = int(10e7)
else:
self.len = self.dataset.num_samples()
self.ordered_stream = order_stream
self.A_transform = get_transform(opt)
self.B_transform = get_transform(opt, channels=opt.output_nc)
def _get_sample(self, dataset, index, load_image=True):
if self.ordered_stream:
source, target = dataset.get_item(index)
if self.opt.shuffle_identities:
source, _ = dataset.get_item(-1 * index)
else:
source, target = dataset.sample_item()
landmarks = utils.swap_xy_points(source['landmarks'])
future_landmarks = utils.swap_xy_points(target['landmarks'])
landmarks = landmarks.astype('float32')
future_landmarks = future_landmarks.astype('float32')
if load_image:
future_image = proc_im(source['image'], source['mask'], apply_mask=self.use_mask, crop_to_bounding_box=self.crop_to_bounding_box)
source_image = proc_im(target['image'], target['mask'], apply_mask=self.use_mask, crop_to_bounding_box=False)
else:
future_image = None
source_image = None
return source_image, future_image, source['image'], target['image'], landmarks, future_landmarks,
def __getitem__(self, index):
# sample
cond_A_img, A_img, cond_A_path, A_paths, paired_cond_B, paired_B = self._get_sample(self.dataset, index)
# sample B
_, _, _, _, _, B = self._get_sample(self.skeleton_dataset, index, load_image=False)
# normalize keypoints
paired_cond_B = utils.normalize_points(
paired_cond_B, self.opt.fineSize, self.opt.fineSize)
paired_B = utils.normalize_points(
paired_B, self.opt.fineSize, self.opt.fineSize)
B = utils.normalize_points(
B, self.opt.fineSize, self.opt.fineSize)
if self.load_images:
A = self.A_transform(A_img)
cond_A = self.A_transform(cond_A_img)
data = {'B': B, 'paired_cond_B': paired_cond_B, 'paired_B': paired_B,
'A_paths': A_paths, 'cond_A_path': cond_A_path}
if self.load_images:
data.update({'A': A, 'cond_A': cond_A})
return data
def __len__(self):
return self.len
def name(self):
return 'UnalignedDataset'
|
from django.conf import settings
from django.utils import translation
import pytest
from mock import Mock, patch
from olympia.amo.tests import TestCase
from olympia.amo.utils import from_string
from olympia.addons.models import Addon
from olympia.translations.templatetags import jinja_helpers
from olympia.translations.fields import save_signal
from olympia.translations.models import PurifiedTranslation
from olympia.translations.tests.testapp.models import TranslatedModel
pytestmark = pytest.mark.django_db
def test_locale_html():
"""Test HTML attributes for languages different than the site language"""
testfield = Mock()
# same language: no need for attributes
this_lang = translation.get_language()
testfield.locale = this_lang
s = jinja_helpers.locale_html(testfield)
assert not s, 'no special HTML attributes for site language'
# non-rtl language
testfield.locale = 'de'
s = jinja_helpers.locale_html(testfield)
assert s == ' lang="de" dir="ltr"'
# rtl language
for lang in settings.LANGUAGES_BIDI:
testfield.locale = lang
s = jinja_helpers.locale_html(testfield)
assert s == ' lang="%s" dir="rtl"' % testfield.locale
def test_locale_html_xss():
"""Test for nastiness-removal in the transfield's locale"""
testfield = Mock()
# same language: no need for attributes
testfield.locale = '<script>alert(1)</script>'
s = jinja_helpers.locale_html(testfield)
assert '<script>' not in s
assert '<script>alert(1)</script>' in s
def test_empty_locale_html():
"""locale_html must still work if field is None."""
s = jinja_helpers.locale_html(None)
assert not s, 'locale_html on None must be empty.'
def test_truncate_purified_field():
s = '<i>one</i><i>two</i>'
t = PurifiedTranslation(localized_string=s)
actual = from_string('{{ s|truncate(6) }}').render({'s': t})
assert actual == s
def test_truncate_purified_field_xss():
"""Truncating should not introduce xss issues."""
s = 'safe <script>alert("omg")</script>'
t = PurifiedTranslation(localized_string=s)
actual = from_string('{{ s|truncate(100) }}').render({'s': t})
assert actual == 'safe <script>alert("omg")</script>'
actual = from_string('{{ s|truncate(5) }}').render({'s': t})
assert actual == 'safe ...'
def test_clean():
# Links are not mangled, bad HTML is escaped, newlines are slimmed.
s = '<ul><li><a href="#woo">\n\nyeah</a></li>\n\n<li><script></li></ul>'
assert jinja_helpers.clean(s) == (
'<ul><li><a href="#woo">\n\nyeah</a></li><li><script></li></ul>')
def test_clean_in_template():
s = '<a href="#woo">yeah</a>'
assert from_string('{{ s|clean }}').render({'s': s}) == s
def test_clean_strip_all_html():
s = '<a href="#woo">yeah</a>'
expected = 'yeah'
assert from_string('{{ s|clean(true) }}').render({'s': s}) == expected
def test_no_links():
template = from_string('{{ s|no_links }}')
s = 'a <a href="http://url.link">http://example.com</a>, http://text.link'
expected = 'a http://example.com, http://text.link'
assert template.render({'s': s}) == expected
# Bad markup.
s = '<http://bad.markup.com'
assert template.render({'s': s}) == ''
# Bad markup.
s = 'some text <http://bad.markup.com'
assert template.render({'s': s}) == 'some text'
def test_l10n_menu():
# No remove_locale_url provided.
menu = jinja_helpers.l10n_menu({})
assert 'data-rm-locale=""' in menu, menu
# Specific remove_locale_url provided (eg for user).
menu = jinja_helpers.l10n_menu({}, remove_locale_url='/some/url/')
assert 'data-rm-locale="/some/url/"' in menu, menu
# Use the remove_locale_url taken from the addon in the context.
menu = jinja_helpers.l10n_menu(
{'addon': Addon()}, remove_locale_url='some/url/')
assert 'data-rm-locale="/en-US/developers/addon/None/rmlocale"' in menu
@patch.object(settings, 'AMO_LANGUAGES', ('de', 'en-US', 'es', 'fr', 'pt-BR'))
class TestAllLocales(TestCase):
def test_all_locales_none(self):
addon = None
field_name = 'description'
assert jinja_helpers.all_locales(addon, field_name) is None
addon = Mock()
field_name = 'description'
del addon.description
assert jinja_helpers.all_locales(addon, field_name) is None
def test_all_locales(self):
obj = TranslatedModel()
obj.description = {
'en-US': 'There',
'es': 'Is No',
'fr': 'Spoon'
}
# Pretend the TranslateModel instance was saved to force Translation
# objects to be saved.
save_signal(sender=TranslatedModel, instance=obj)
result = jinja_helpers.all_locales(obj, 'description')
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span lang="fr">Spoon</span>' in result
def test_all_locales_empty(self):
obj = TranslatedModel()
obj.description = {
'en-US': 'There',
'es': 'Is No',
'fr': ''
}
# Pretend the TranslateModel instance was saved to force Translation
# objects to be saved.
save_signal(sender=TranslatedModel, instance=obj)
result = jinja_helpers.all_locales(obj, 'description')
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span lang="fr"></span>' in result
result = jinja_helpers.all_locales(
obj, 'description', prettify_empty=True)
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span class="empty" lang="fr">None</span>' in result
|
import badger2040
import machine
import time
import gc
# **** Put the name of your text file here *****
text_file = "book.txt" # File must be on the MicroPython device
try:
open(text_file, "r")
except OSError:
try:
# If the specified file doesn't exist,
# pre-populate with Wind In The Willows
import witw
open(text_file, "wb").write(witw.data())
del witw
except ImportError:
pass
gc.collect()
# Global Constants
WIDTH = badger2040.WIDTH
HEIGHT = badger2040.HEIGHT
ARROW_THICKNESS = 3
ARROW_WIDTH = 18
ARROW_HEIGHT = 14
ARROW_PADDING = 2
TEXT_PADDING = 4
TEXT_SIZE = 0.5
TEXT_SPACING = int(34 * TEXT_SIZE)
TEXT_WIDTH = WIDTH - TEXT_PADDING - TEXT_PADDING - ARROW_WIDTH
FONTS = ["sans", "gothic", "cursive", "serif"]
FONT_THICKNESSES = [2, 1, 1, 2]
# ------------------------------
# Drawing functions
# ------------------------------
# Draw a upward arrow
def draw_up(x, y, width, height, thickness, padding):
border = (thickness // 4) + padding
display.line(x + border, y + height - border,
x + (width // 2), y + border)
display.line(x + (width // 2), y + border,
x + width - border, y + height - border)
# Draw a downward arrow
def draw_down(x, y, width, height, thickness, padding):
border = (thickness // 2) + padding
display.line(x + border, y + border,
x + (width // 2), y + height - border)
display.line(x + (width // 2), y + height - border,
x + width - border, y + border)
# Draw the frame of the reader
def draw_frame():
display.pen(15)
display.clear()
display.pen(12)
display.rectangle(WIDTH - ARROW_WIDTH, 0, ARROW_WIDTH, HEIGHT)
display.pen(0)
display.thickness(ARROW_THICKNESS)
if current_page > 1:
draw_up(WIDTH - ARROW_WIDTH, (HEIGHT // 4) - (ARROW_HEIGHT // 2),
ARROW_WIDTH, ARROW_HEIGHT, ARROW_THICKNESS, ARROW_PADDING)
draw_down(WIDTH - ARROW_WIDTH, ((HEIGHT * 3) // 4) - (ARROW_HEIGHT // 2),
ARROW_WIDTH, ARROW_HEIGHT, ARROW_THICKNESS, ARROW_PADDING)
# ------------------------------
# Program setup
# ------------------------------
# Global variables
next_page = True
prev_page = False
change_font_size = False
change_font = False
last_offset = 0
current_page = 0
# Create a new Badger and set it to update FAST
display = badger2040.Badger2040()
display.update_speed(badger2040.UPDATE_FAST)
# Set up the buttons
button_down = machine.Pin(badger2040.BUTTON_DOWN, machine.Pin.IN, machine.Pin.PULL_DOWN)
button_up = machine.Pin(badger2040.BUTTON_UP, machine.Pin.IN, machine.Pin.PULL_DOWN)
button_a = machine.Pin(badger2040.BUTTON_A, machine.Pin.IN, machine.Pin.PULL_DOWN)
button_b = machine.Pin(badger2040.BUTTON_B, machine.Pin.IN, machine.Pin.PULL_DOWN)
# Set up the activity LED
led = machine.Pin(badger2040.PIN_LED, machine.Pin.OUT)
offsets = []
# Button handling function
def button(pin):
global next_page, prev_page, change_font_size, change_font
if pin == button_down:
next_page = True
if pin == button_up:
prev_page = True
if pin == button_a:
change_font_size = True
if pin == button_b:
change_font = True
# Register the button handling function with the buttons
button_down.irq(trigger=machine.Pin.IRQ_RISING, handler=button)
button_up.irq(trigger=machine.Pin.IRQ_RISING, handler=button)
button_a.irq(trigger=machine.Pin.IRQ_RISING, handler=button)
button_b.irq(trigger=machine.Pin.IRQ_RISING, handler=button)
# ------------------------------
# Render page
# ------------------------------
def render_page():
row = 0
line = ""
pos = ebook.tell()
next_pos = pos
add_newline = False
display.font(FONTS[0])
while True:
# Read a full line and split it into words
words = ebook.readline().split(" ")
# Take the length of the first word and advance our position
next_word = words[0]
if len(words) > 1:
next_pos += len(next_word) + 1
else:
next_pos += len(next_word) # This is the last word on the line
# Advance our position further if the word contains special characters
if '\u201c' in next_word:
next_word = next_word.replace('\u201c', '\"')
next_pos += 2
if '\u201d' in next_word:
next_word = next_word.replace('\u201d', '\"')
next_pos += 2
if '\u2019' in next_word:
next_word = next_word.replace('\u2019', '\'')
next_pos += 2
# Rewind the file back from the line end to the start of the next word
ebook.seek(next_pos)
# Strip out any new line characters from the word
next_word = next_word.strip()
# If an empty word is encountered assume that means there was a blank line
if len(next_word) == 0:
add_newline = True
# Append the word to the current line and measure its length
appended_line = line
if len(line) > 0 and len(next_word) > 0:
appended_line += " "
appended_line += next_word
appended_length = display.measure_text(appended_line, TEXT_SIZE)
# Would this appended line be longer than the text display area, or was a blank line spotted?
if appended_length >= TEXT_WIDTH or add_newline:
# Yes, so write out the line prior to the append
print(line)
display.pen(0)
display.thickness(FONT_THICKNESSES[0])
display.text(line, TEXT_PADDING, (row * TEXT_SPACING) + (TEXT_SPACING // 2) + TEXT_PADDING, TEXT_SIZE)
# Clear the line and move on to the next row
line = ""
row += 1
# Have we reached the end of the page?
if (row * TEXT_SPACING) + (TEXT_SPACING // 2) > HEIGHT:
print("+++++")
display.update()
# Reset the position to the start of the word that made this line too long
ebook.seek(pos)
return
else:
# Set the line to the word and advance the current position
line = next_word
pos = next_pos
# A new line was spotted, so advance a row
if add_newline:
print("")
row += 1
if (row * TEXT_SPACING) + (TEXT_SPACING // 2) > HEIGHT:
print("+++++")
display.update()
return
add_newline = False
else:
# The appended line was not too long, so set it as the line and advance the current position
line = appended_line
pos = next_pos
# ------------------------------
# Main program loop
# ------------------------------
# Open the book file
ebook = open(text_file, "r")
while True:
# Was the next page button pressed?
if next_page:
current_page += 1
# Is the next page one we've not displayed before?
if current_page > len(offsets):
offsets.append(ebook.tell()) # Add its start position to the offsets list
draw_frame()
render_page()
next_page = False # Clear the next page button flag
# Was the previous page button pressed?
if prev_page:
if current_page > 1:
current_page -= 1
ebook.seek(offsets[current_page - 1]) # Retrieve the start position of the last page
draw_frame()
render_page()
prev_page = False # Clear the prev page button flag
if change_font_size:
TEXT_SIZE += 0.1
if TEXT_SIZE > 0.8:
TEXT_SIZE = 0.5
TEXT_SPACING = int(34 * TEXT_SIZE)
offsets = [0]
ebook.seek(0)
current_page = 1
draw_frame()
render_page()
change_font_size = False
if change_font:
FONTS.append(FONTS.pop(0))
FONT_THICKNESSES.append(FONT_THICKNESSES.pop(0))
offsets = [0]
ebook.seek(0)
current_page = 1
draw_frame()
render_page()
change_font = False
time.sleep(0.1)
|
import pytest
import numpy as np
import pandas as pd
from cudf import melt as cudf_melt
from cudf.dataframe import DataFrame
@pytest.mark.parametrize('num_id_vars', [0, 1, 2, 10])
@pytest.mark.parametrize('num_value_vars', [0, 1, 2, 10])
@pytest.mark.parametrize('num_rows', [1, 2, 1000])
@pytest.mark.parametrize(
'dtype',
['int8', 'int16', 'int32', 'int64', 'float32', 'float64', 'datetime64[ms]']
)
@pytest.mark.parametrize('nulls', ['none', 'some', 'all'])
def test_melt(nulls, num_id_vars, num_value_vars, num_rows, dtype):
if dtype not in ['float32', 'float64'] and nulls in ['some', 'all']:
pytest.skip(msg='nulls not supported in dtype: ' + dtype)
pdf = pd.DataFrame()
id_vars = []
for i in range(num_id_vars):
colname = 'id' + str(i)
data = np.random.randint(0, 26, num_rows).astype(dtype)
if nulls == 'some':
idx = np.random.choice(num_rows,
size=int(num_rows/2),
replace=False)
data[idx] = np.nan
elif nulls == 'all':
data[:] = np.nan
pdf[colname] = data
id_vars.append(colname)
value_vars = []
for i in range(num_value_vars):
colname = 'val' + str(i)
data = np.random.randint(0, 26, num_rows).astype(dtype)
if nulls == 'some':
idx = np.random.choice(num_rows,
size=int(num_rows/2),
replace=False)
data[idx] = np.nan
elif nulls == 'all':
data[:] = np.nan
pdf[colname] = data
value_vars.append(colname)
gdf = DataFrame.from_pandas(pdf)
got = cudf_melt(frame=gdf, id_vars=id_vars, value_vars=value_vars)
expect = pd.melt(frame=pdf, id_vars=id_vars, value_vars=value_vars)
# pandas' melt makes the 'variable' column of 'object' type (string)
# cuDF's melt makes it Categorical because it doesn't support strings
expect['variable'] = expect['variable'].astype('category')
pd.testing.assert_frame_equal(
expect,
got.to_pandas()
)
|
import yaml
import time
import numpy as np
from selenium import webdriver
from selenium.webdriver.common.by import By
# loading connfigs
with open("config.yaml", "r") as ymlfile:
config = yaml.load(ymlfile, Loader=yaml.FullLoader)
DRIVER_LOCATION = config["Survey"]["DRIVER_LOCATION"]
URL = config["Survey"]["URL"]
GROUPS = config["Survey"]["GROUPS"]
OPTIONS = config["Survey"]["OPTIONS"]
BAIS_DICT = config["Survey"]["BAIS_DICT"]
LOOPS = config["Survey"]["LOOPS"]
# open the link and Find all radio buttons
driver = webdriver.Chrome(DRIVER_LOCATION)
def get_radio_buttons(driver):
time.sleep(1)
driver.get(URL)
time.sleep(2)
# fetch radio button objects
list_of_all_radio_buttons = driver.find_elements(By.CSS_SELECTOR, "input[type='radio']")
submit_xpath = '//*[@id = "patas"]/main/article/section/form/div[2]/button'
submit_button = driver.find_element(By.XPATH, submit_xpath)
return list_of_all_radio_buttons, submit_button
def grouping(list_of_options: list, groups: int, options: list) -> dict:
# dictionary of questions and their options
qna_dict = {i:[] for i in range(1, groups+1)}
for i, option in zip(range(1, groups+1), options):
for j in range(option):
element = list_of_options.pop(0)
qna_dict[i].append(element)
return qna_dict
def biased_choice(dict_of_options: dict, bais_dict: dict) -> list:
list_of_choices = []
for question_no, _ in zip(dict_of_options, bais_dict):
choice = np.random.choice(dict_of_options[question_no], p=bais_dict[question_no])
list_of_choices.append(choice)
return list_of_choices
def fill_survey(list_of_choices: list, submit_button) -> None:
for choice in list_of_choices:
choice.click()
time.sleep(0.15)
time.sleep(0.50)
submit_button.click()
def main(loops):
for i in range(loops):
list_of_all_radio_buttons, submit_button = get_radio_buttons(driver)
qna_dict = grouping(list_of_all_radio_buttons, GROUPS, OPTIONS)
choice_list = biased_choice(qna_dict, BAIS_DICT)
fill_survey(choice_list, submit_button)
if __name__ == "__main__":
main(LOOPS) # NO of times to run the loop
|
# -*- coding: utf-8 -*-
"""
pip_services_logging.logic.LoggingController
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Logging controller implementation
:copyright: Conceptual Vision Consulting LLC 2015-2016, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from pip_services_commons.commands import ICommandable
from pip_services_commons.config import ConfigParams, IConfigurable
from pip_services_commons.refer import IReferences, IReferenceable
from pip_services_commons.refer import Descriptor, DependencyResolver
from pip_services_commons.data import FilterParams, PagingParams
from .ILoggingBusinessLogic import ILoggingBusinessLogic
from .LoggingCommandSet import LoggingCommandSet
class LoggingController(object, ILoggingBusinessLogic, ICommandable, IConfigurable, IReferenceable):
_dependency_resolver = None
_read_persistence = None
_write_persistence = []
_command_set = None
def __init__(self):
self._dependency_resolver = DependencyResolver()
self._dependency_resolver.put('read_persistence', Descriptor('pip-services-logging', 'persistence', '*', '*', '*'))
self._dependency_resolver.put('write_persistence', Descriptor('pip-services-logging', 'persistence', '*', '*', '*'))
self._command_set = LoggingCommandSet(self)
def get_command_set(self):
return self._command_set
def configure(self, config):
self._dependency_resolver.configure(config)
def set_references(self, references):
self._dependency_resolver.set_references(references)
self._read_persistence = self._dependency_resolver.get_one_required('read_persistence')
self._write_persistence = self._dependency_resolver.get_optional('write_persistence')
def read_messages(self, correlation_id, filter, paging):
if self._read_persistence == None: return DatePage([], 0)
return self._read_persistence.get_page_by_filter(correlation_id, filter, paging)
def read_errors(self, correlation_id, filter, paging):
if self._read_persistence == None: return DatePage([], 0)
filter = filter if filter != None else FilterParams()
filter.set_as_object('errors_only', True)
return self._read_persistence.get_page_by_filter(correlation_id, filter, paging)
def write_message(self, correlation_id, message):
if self._write_persistence == None: return message
for persistence in self._write_persistence:
persistence.create(correlation_id, message)
return message
def write_messages(self, correlation_id, messages):
if self._write_persistence == None: return
for persistence in self._write_persistence:
for message in messages:
persistence.create(correlation_id, message)
def clear(self, correlation_id):
if self._write_persistence == None: return
for persistence in self._write_persistence:
persistence.clear(correlation_id)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from numpy.fft import fft
from scipy.interpolate import interp1d
from .common import mmul_weight
from .polynomial import multEdwdx, nl_terms, poly_deriv
from .statespace import NonlinearStateSpace, StateSpaceIdent
"""
PNLSS -- a collection of classes and functions for modeling nonlinear
linear state space systems.
"""
class PNLSS(NonlinearStateSpace, StateSpaceIdent):
def __init__(self, *system, **kwargs):
if len(system) == 1: # and isinstance(system[0], StateSpace):
sys = system
self.signal = system[0].signal
kwargs['dt'] = 1/self.signal.fs
elif len(system) == 2:
sys = system[0]
self.signal = system[1]
kwargs['dt'] = 1/self.signal.fs
else:
sys = system
super().__init__(*sys, **kwargs)
self.xpowers = np.empty(shape=(0, self.m+self.n))
self.ypowers = np.empty(shape=(0, self.m+self.n))
self.xactive = np.array([], dtype=int)
self.yactive = np.array([], dtype=int)
self.n_nx = len(self.xactive)
self.n_ny = len(self.yactive)
self.xdegree, self.ydegree = [None]*2
self.xstructure, self.ystructure = [None]*2
def __repr__(self):
rep = super().__repr__()
return (rep + ',\n' +
f'x: {repr(self.xdegree)},\n'
f'xtype: {repr(self.xstructure)},\n'
f'y: {repr(self.ydegree)},\n'
f'ytype: {repr(self.ystructure)}')
def nlterms(self, eq, degree, structure):
"""Set active nonlinear terms/monomials to be optimized"""
if eq in ('state', 'x'):
self.xdegree = np.asarray(degree)
self.xstructure = structure
# all possible terms
self.xpowers = combinations(self.n+self.m, degree)
self.n_nx = self.xpowers.shape[0]
self.xactive = \
select_active(self.xstructure, self.n,
self.m, self.n, self.xdegree)
if self.E.size == 0:
self.E = np.zeros((self.n, self.n_nx))
# Compute the derivatives of the polynomials zeta and e
self.xd_powers, self.xd_coeff = poly_deriv(self.xpowers)
elif eq in ('output', 'y'):
self.ydegree = np.asarray(degree)
self.ystructure = structure
self.ypowers = combinations(self.n+self.m, degree)
self.n_ny = self.ypowers.shape[0]
self.yactive = \
select_active(self.ystructure, self.n,
self.m, self.p, self.ydegree)
if self.F.size == 0:
self.F = np.zeros((self.p, self.n_ny))
self.yd_powers, self.yd_coeff = poly_deriv(self.ypowers)
def output(self, u, t=None, x0=None):
return dnlsim(self, u, t=t, x0=x0)
def jacobian(self, x0, weight=False):
return jacobian(x0, self, weight=weight)
# https://github.com/scipy/scipy/blob/master/scipy/signal/ltisys.py
def dnlsim(system, u, t=None, x0=None):
"""Simulate output of a discrete-time nonlinear system.
Calculate the output and the states of a nonlinear state-space model.
x(t+1) = A x(t) + B u(t) + E zeta(x(t),u(t))
y(t) = C x(t) + D u(t) + F eta(x(t),u(t))
where zeta and eta are polynomials whose exponents are given in xpowers and
ypowers, respectively. The maximum degree in one variable (a state or an
input) in zeta or eta is given in max_nx and max_ny, respectively. The
initial state is given in x0.
"""
# if not isinstance(system, PNLSS):
# raise ValueError(f'System must be a PNLSS object {type(system)}')
# pass
# else:
# system = NonlinearStateSpace(*system)
u = np.asarray(u)
if u.ndim == 1:
u = np.atleast_2d(u).T
if t is None:
out_samples = len(u)
stoptime = (out_samples - 1) * system.dt
else:
stoptime = t[-1]
out_samples = int(np.floor(stoptime / system.dt)) + 1
# Pre-build output arrays
xout = np.empty((out_samples, system.A.shape[0]))
yout = np.empty((out_samples, system.C.shape[0]))
tout = np.linspace(0.0, stoptime, num=out_samples)
# Check initial condition
if x0 is None:
xout[0, :] = np.zeros((system.A.shape[1],))
else:
xout[0, :] = np.asarray(x0)
# Pre-interpolate inputs into the desired time steps
if t is None:
u_dt = u
else:
if len(u.shape) == 1:
u = u[:, np.newaxis]
u_dt_interp = interp1d(t, u.transpose(), copy=False, bounds_error=True)
u_dt = u_dt_interp(tout).transpose()
# prepare nonlinear part
repmat_x = np.ones(system.xpowers.shape[0])
repmat_y = np.ones(system.ypowers.shape[0])
# Simulate the system
for i in range(0, out_samples - 1):
# State equation x(t+1) = A*x(t) + B*u(t) + E*zeta(x(t),u(t))
zeta_t = np.prod(np.outer(repmat_x, np.hstack((xout[i], u_dt[i])))
** system.xpowers, axis=1)
xout[i+1, :] = (np.dot(system.A, xout[i, :]) +
np.dot(system.B, u_dt[i, :]) +
np.dot(system.E, zeta_t))
# Output equation y(t) = C*x(t) + D*u(t) + F*eta(x(t),u(t))
eta_t = np.prod(np.outer(repmat_y, np.hstack((xout[i], u_dt[i])))
** system.ypowers, axis=1)
yout[i, :] = (np.dot(system.C, xout[i, :]) +
np.dot(system.D, u_dt[i, :]) +
np.dot(system.F, eta_t))
# Last point
eta_t = np.prod(np.outer(repmat_y, np.hstack((xout[-1], u_dt[-1])))
** system.ypowers, axis=1)
yout[-1, :] = (np.dot(system.C, xout[-1, :]) +
np.dot(system.D, u_dt[-1, :]) +
np.dot(system.F, eta_t))
return tout, yout, xout
def element_jacobian(samples, A_Edwdx, C_Fdwdx, active):
"""Compute Jacobian of the output y wrt. A, B, and E
The Jacobian is calculated by filtering an alternative state-space model
∂x∂Aᵢⱼ(t+1) = Iᵢⱼx(t) + (A + E*∂ζ∂x)*∂x∂Aᵢⱼ(t)
∂y∂Aᵢⱼ(t) = (C + F*∂η∂x)*∂x∂Aᵢⱼ(t)
where JA = ∂y∂Aᵢⱼ
Parameters
----------
samples : ndarray
x, u or zeta corresponding to JA, JB, or JE
A_Edwdx : ndarray (n,n,NT)
The result of ``A + E*∂ζ∂x``
C_Fdwdx : ndarray (p,n,NT)
The result of ``C + F*∂η∂x``
active : ndarray
Array with index of active elements. For JA: np.arange(n**2), JB: n*m or
JE: xactive
Returns
-------
JA, JB or JE depending on the samples given as input
See fJNL
"""
p, n, NT = C_Fdwdx.shape # Number of outputs and number of states
# Number of samples and number of inputs in alternative state-space model
N, npar = samples.shape
nactive = len(active) # Number of active parameters in A, B, or E
out = np.zeros((p, N, nactive))
for k, activ in enumerate(active):
# Which column in A, B, or E matrix
j = np.mod(activ, npar)
# Which row in A, B, or E matrix
i = (activ-j)//npar
# partial derivative of x(0) wrt. A(i,j), B(i,j), or E(i,j)
Jprev = np.zeros(n)
for t in range(1, N):
# Calculate state update alternative state-space model at time t
# Terms in alternative states at time t-1
J = A_Edwdx[:, :, t-1] @ Jprev
# Term in alternative input at time t-1
J[i] += samples[t-1, j]
# Calculate output alternative state-space model at time t
out[:, t, k] = C_Fdwdx[:, :, t] @ J
# Update previous state alternative state-space model
Jprev = J
return out
def jacobian(x0, system, weight=False):
"""Compute the Jacobians of a steady state nonlinear state-space model
Jacobians of a nonlinear state-space model
x(t+1) = A x(t) + B u(t) + E zeta(x(t),u(t))
y(t) = C x(t) + D u(t) + F eta(x(t),u(t))
i.e. the partial derivatives of the modeled output w.r.t. the active
elements in the A, B, E, F, D, and C matrices, fx: JA = ∂y/∂Aᵢⱼ
x0 : ndarray
flattened array of state space matrices
"""
n, m, p = system.n, system.m, system.p
R, p, npp = system.signal.R, system.signal.p, system.signal.npp
nfd = npp//2
# total number of points
N = R*npp # system.signal.um.shape[0]
without_T2 = system.without_T2
A, B, C, D, E, F = system.extract(x0)
# Collect states and outputs with prepended transient sample
x_trans = system.x_mod[system.idx_trans]
u_trans = system.signal.um[system.idx_trans]
contrib = np.hstack((x_trans, u_trans)).T
n_trans = u_trans.shape[0] # NT
# E∂ₓζ + A(n,n,NT)
if E.size == 0:
A_EdwxIdx = np.zeros(shape=(*A.shape, n_trans))
else:
A_EdwxIdx = multEdwdx(contrib, system.xd_powers, np.squeeze(system.xd_coeff),
E, n)
A_EdwxIdx += A[..., None]
zeta = nl_terms(contrib, system.xpowers).T # (NT,n_nx)
# F∂ₓη (p,n,NT)
if F.size == 0:
FdwyIdx = np.zeros(shape=(*C.shape, n_trans))
else:
FdwyIdx = multEdwdx(contrib, system.yd_powers, np.squeeze(system.yd_coeff),
F, n)
# Add C to F∂ₓη for all samples at once
FdwyIdx += C[..., None]
eta = nl_terms(contrib, system.ypowers).T # (NT,n_ny)
# calculate jacobians wrt state space matrices
JC = np.kron(np.eye(p), system.x_mod) # (p*N,p*n)
JD = np.kron(np.eye(p), system.signal.um) # (p*N, p*m)
if system.yactive.size:
JF = np.kron(np.eye(p), eta) # Jacobian wrt all elements in F
JF = JF[:, system.yactive] # all active elements in F. (p*NT,nactiveF)
JF = JF[system.idx_remtrans] # (p*N,nactiveF)
else:
JF = np.array([]).reshape(p*N, 0)
# calculate Jacobian by filtering an alternative state-space model
JA = element_jacobian(x_trans, A_EdwxIdx, FdwyIdx, np.arange(n**2))
JA = JA.transpose((1, 0, 2)).reshape((p*n_trans, n**2))
JA = JA[system.idx_remtrans] # (p*N,n**2)
JB = element_jacobian(u_trans, A_EdwxIdx, FdwyIdx, np.arange(n*m))
JB = JB.transpose((1, 0, 2)).reshape((p*n_trans, n*m))
JB = JB[system.idx_remtrans] # (p*N,n*m)
if system.xactive.size:
JE = element_jacobian(zeta, A_EdwxIdx, FdwyIdx, system.xactive)
JE = JE.transpose((1, 0, 2)).reshape((p*n_trans, len(system.xactive)))
JE = JE[system.idx_remtrans] # (p*N,nactiveE)
else:
JE = np.array([]).reshape(p*N, 0)
jac = np.hstack((JA, JB, JC, JD, JE, JF))[without_T2]
npar = jac.shape[1]
# add frequency weighting
if weight is not False and system.freq_weight:
# (p*ns, npar) -> (Npp,R,p,npar) -> (Npp,p,R,npar) -> (Npp,p,R*npar)
jac = jac.reshape((npp, R, p, npar),
order='F').swapaxes(1, 2).reshape((-1, p, R*npar),
order='F')
# select only the positive half of the spectrum
jac = fft(jac, axis=0)[:nfd]
jac = mmul_weight(jac, weight)
# (nfd,p,R*npar) -> (nfd,p,R,npar) -> (nfd,R,p,npar) -> (nfd*R*p,npar)
jac = jac.reshape((-1, p, R, npar),
order='F').swapaxes(1, 2).reshape((-1, npar), order='F')
J = np.empty((2*nfd*R*p, npar))
J[:nfd*R*p] = jac.real
J[nfd*R*p:] = jac.imag
elif weight is not False:
raise ValueError('Time weighting not possible')
else:
return jac
return J
|
import traceback
class Foo():
def __init__(self):
self.bar = "it's foo"
def __enter__(self):
print("setup")
# Объяснить зачем return self
return self
def __exit__(self, exc_type, exc_val, exc_tb):
print(traceback.extract_tb(exc_val))
# print("teardown with:", exc_type, exc_val, exc_tb)
# print("teardown with:", type(exc_type), type(exc_val), type(exc_tb))
with Foo() as f:
print(f.bar)
raise Exception("Foo")
|
from docutils.writers import html4css1, Writer
from rst2text.translators import TextTranslator
from flask_rstpages.parsers import HTMLTranslator
from typing import Iterable, cast
class HTMLWriter(html4css1.Writer):
"""Subclass the html4css1.Writer to redefine the translator_class"""
def __init__(self):
# html4css1.writers.Writer.__init__(self)
super().__init__()
self.translator_class = HTMLTranslator
class TextWriter(Writer):
supported = ("text",)
settings_spec = ("No options here.", "", ())
settings_defaults = {} # type: Dict
output = None # type: str
def __init__(self):
# type: # (TextBuilder) -> None
super().__init__()
# self.document = document
self.translator_class = TextTranslator
def translate(self):
# type: () -> None
visitor = TextTranslator(self.document)
self.document.walkabout(visitor)
self.output = cast(TextTranslator, visitor).body
|
#! -*- coding: utf-8 -*-
# 用GlobalPointer做中文命名实体识别
# 数据集 https://github.com/CLUEbenchmark/CLUENER2020
import json
import numpy as np
from snippets import *
from bert4keras.backend import keras
from bert4keras.backend import multilabel_categorical_crossentropy
from bert4keras.layers import EfficientGlobalPointer as GlobalPointer
from bert4keras.snippets import sequence_padding, DataGenerator
from bert4keras.snippets import open
from tqdm import tqdm
maxlen = 256
epochs = 10
batch_size = 32
categories = set()
def load_data(filename):
"""加载数据
单条格式:[text, (start, end, label), (start, end, label), ...],
意味着text[start:end + 1]是类型为label的实体。
"""
D = []
with open(filename, encoding='utf-8') as f:
for l in f:
l = json.loads(l)
d = [l['text']]
for k, v in l.get('label', {}).items():
categories.add(k)
for spans in v.values():
for start, end in spans:
d.append((start, end, k))
D.append(d)
return D
# 标注数据
train_data = load_data(data_path + 'cluener/train.json')
valid_data = load_data(data_path + 'cluener/dev.json')
categories = list(sorted(categories))
num_classes = len(categories)
class data_generator(DataGenerator):
"""数据生成器
"""
def __iter__(self, random=False):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for is_end, d in self.sample(random):
tokens = tokenizer.tokenize(d[0], maxlen=maxlen)
mapping = tokenizer.rematch(d[0], tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
segment_ids = [0] * len(token_ids)
labels = np.zeros((len(categories), maxlen, maxlen))
for start, end, label in d[1:]:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
label = categories.index(label)
labels[label, start, end] = 1
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append(labels[:, :len(token_ids), :len(token_ids)])
if len(batch_token_ids) == self.batch_size or is_end:
batch_token_ids = sequence_padding(batch_token_ids)
batch_segment_ids = sequence_padding(batch_segment_ids)
batch_labels = sequence_padding(batch_labels, seq_dims=3)
yield [batch_token_ids, batch_segment_ids], batch_labels
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
# 转换数据集
train_generator = data_generator(train_data, batch_size)
valid_generator = data_generator(valid_data, batch_size)
def globalpointer_crossentropy(y_true, y_pred):
"""给GlobalPointer设计的交叉熵
"""
bh = K.prod(K.shape(y_pred)[:2])
y_true = K.reshape(y_true, (bh, -1))
y_pred = K.reshape(y_pred, (bh, -1))
return K.mean(multilabel_categorical_crossentropy(y_true, y_pred))
def globalpointer_f1score(y_true, y_pred):
"""给GlobalPointer设计的F1
"""
y_pred = K.cast(K.greater(y_pred, 0), K.floatx())
return 2 * K.sum(y_true * y_pred) / K.sum(y_true + y_pred)
# 构建模型
output = base.model.output
output = GlobalPointer(
heads=num_classes,
head_size=base.attention_head_size,
use_bias=False,
kernel_initializer=base.initializer
)(output)
model = keras.models.Model(base.model.input, output)
model.summary()
model.compile(
loss=globalpointer_crossentropy,
optimizer=optimizer,
metrics=[globalpointer_f1score]
)
class Evaluator(keras.callbacks.Callback):
"""保存验证集f1最好的模型
"""
def __init__(self):
self.best_val_f1 = 0
def on_epoch_end(self, epoch, logs=None):
f1, precision, recall = self.evaluate(valid_generator)
# 保存最优
if f1 >= self.best_val_f1:
self.best_val_f1 = f1
model.save_weights('weights/cluener.weights')
print(
'valid: f1: %.5f, precision: %.5f, recall: %.5f, best f1: %.5f\n' %
(f1, precision, recall, self.best_val_f1)
)
def evaluate(self, data):
X, Y, Z = 1e-10, 1e-10, 1e-10
for x_true, y_true in data:
y_pred = (model.predict(x_true) > 0).astype(int)
X += (y_pred * y_true).sum()
Y += y_pred.sum()
Z += y_true.sum()
f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z
return f1, precision, recall
def test_predict(in_file, out_file):
"""输出测试结果到文件
结果文件可以提交到 https://www.cluebenchmarks.com 评测。
"""
test_data = load_data(in_file)
test_generator = data_generator(test_data, batch_size)
results = []
for x_true, _ in tqdm(test_generator, ncols=0):
y_pred = model.predict(x_true)
for y in y_pred:
results.append(np.where(y > 0))
fw = open(out_file, 'w', encoding='utf-8')
with open(in_file) as fr:
for l, r in zip(fr, results):
l = json.loads(l)
l['label'] = {}
tokens = tokenizer.tokenize(l['text'], maxlen=maxlen)
mapping = tokenizer.rematch(l['text'], tokens)
for label, start, end in zip(*r):
label = categories[label]
start, end = mapping[start][0], mapping[end][-1]
if label not in l['label']:
l['label'][label] = {}
entity = l['text'][start:end + 1]
if entity not in l['label'][label]:
l['label'][label][entity] = []
l['label'][label][entity].append([start, end])
l = json.dumps(l, ensure_ascii=False)
fw.write(l + '\n')
fw.close()
if __name__ == '__main__':
evaluator = Evaluator()
model.fit_generator(
train_generator.forfit(),
steps_per_epoch=len(train_generator),
epochs=epochs,
callbacks=[evaluator]
)
model.load_weights('weights/cluener.weights')
test_predict(
in_file=data_path + 'cluener/test.json',
out_file='results/cluener_predict.json'
)
else:
model.load_weights('weights/cluener.weights')
|
# -*- coding: utf-8 -*-
from data.reader import wiki_from_pickles, corpora_from_pickles
from data.corpus import Sentences
from collections import Counter
from itertools import combinations
import numpy as np
import matplotlib.pyplot as plt
def number_sents(sents):
d = dict()
i = 0
found = 0
for s in sents:
tup_s = tuple(s)
if tup_s not in d:
d[tup_s] = i
i += 1
else:
found += 1
print("duplicates: ", found)
label_func = lambda s: d[tuple(s)] #if tuple(s) in d else -100
return d, label_func
def number_sents_remove_empty(sents):
d = dict()
i = 0
found = 0
for s in sents:
tup_s = tuple(w for w in s if w)
if tup_s not in d:
d[tup_s] = i
i += 1
else:
found += 1
print("duplicates: ", found)
label_func = lambda s: (d[tuple(w for w in s if w)]
if tuple(w for w in s if w) in d else -1)
return d, label_func
def number_words(words):
unique_words = set(words)
d = dict(zip(unique_words, range(len(unique_words))))
label_func = lambda w: d[w]
return d, label_func
def jaccard(ls1, ls2, universe=None):
cs1, cs2 = Counter(ls1), Counter(ls2)
if not universe:
universe = cs1.keys() | cs2.keys()
c_vec1, c_vec2 = [cs1[x] for x in sorted(universe)],\
[cs2[x] for x in sorted(universe)]
return (sum(min(one, two) for one, two in zip(c_vec1, c_vec2))/
sum(max(one, two) for one, two in zip(c_vec1, c_vec2)))
if __name__ == "__main__":
n = 100000
d = "results/ALS/"
# GET UNIVERSE
wiki = list(wiki_from_pickles("data/ALS_pkl"))
sent_d, label_f = number_sents((s for a in wiki for s in a))
word_d, word_label_f = number_words((w for a in wiki for s in a for w in s))
## LOAD CORPORA
# SRFs
srf_samples = list(corpora_from_pickles(d + "SRF", names=["n", "h", "i"]))
srf_10 = [Sentences(c) for name_d, c in srf_samples if name_d["n"] == n and
name_d["h"] == 10]
srf_20 = [Sentences(c) for name_d, c in srf_samples if name_d["n"] == n and
name_d["h"] == 20]
srf_30 = [Sentences(c) for name_d, c in srf_samples if name_d["n"] == n and
name_d["h"] == 30]
#TFs
tf_samples = list(corpora_from_pickles(d + "TF", names=["n", "f", "i"]))
tf_50 = [Sentences(c) for name_d, c in tf_samples if name_d["n"] == n and
name_d["f"] == 50]
tf_100 = [Sentences(c) for name_d, c in tf_samples if name_d["n"] == n and
name_d["f"] == 100]
#UNIs
uni_samples = corpora_from_pickles(d + "UNI", names=["n", "i"])
uni = [Sentences(c) for name_d, c in uni_samples if name_d["n"] == n]
# WITHIN POPULATION SIMILARITIES
for subcorp_set, name in zip([srf_10, srf_20, srf_30, tf_50, tf_100, uni],
["SRF10", "SRF20", "SRF30", "TF50", "TF100", "UNI"]):
print("\n", name)
labeled_subcorps = [[label_f(s) for s in subcorp.sentences() if "".join(s)]
for subcorp in subcorp_set]
combs = list(combinations(range(len(labeled_subcorps)), 2))
jaccards = [jaccard(labeled_subcorps[i], labeled_subcorps[j]) for i, j in combs]
print(np.mean(jaccards), np.var(jaccards)**.5)
plt.hist(jaccards, bins=5, label=name)
plt.title("Within Population Jaccard Similarities")
plt.legend()
plt.show()
# ACROSS POPULATION SIMILARITIES
cmp_pairs = [(srf_30, uni), (tf_100, uni), (srf_30, tf_100)]
cmp_names = ["SRF30 - UNI", "TF100 - UNI", "SRF30 - TF100"]
combs = list(combinations(range(10), 2))
for (corps1, corps2), name in zip(cmp_pairs, cmp_names):
labeled_corps1 = [[label_f(s) for s in subcorp.sentences() if "".join(s)]
for subcorp in corps1]
labeled_corps2 = [[label_f(s) for s in subcorp.sentences() if "".join(s)]
for subcorp in corps2]
cross_jccrds = [jaccard(labeled_corps1[i], labeled_corps2[j])
for i, j in combs]
print(name, " JCC: ", np.mean(cross_jccrds), np.var(cross_jccrds)**.5)
plt.hist(cross_jccrds, bins=5, label=name)
plt.title("Across Population Jaccard Similarities")
plt.legend()
plt.show()
# WORD LEVEL WITHIN POP. SIMILARITIES
for subcorp_set, name in zip([srf_10, srf_20, srf_30, tf_50, tf_100, uni],
["SRF10", "SRF20", "SRF30", "TF50", "TF100", "UNI"]):
print("\n", name)
labeled_subcorps = [[word_label_f(w) for s in subcorp.sentences() for w in s if w]
for subcorp in subcorp_set]
combs = list(combinations(range(len(labeled_subcorps)), 2))
jaccards = [jaccard(labeled_subcorps[i], labeled_subcorps[j]) for i, j in combs]
print(np.mean(jaccards), np.var(jaccards)**.5)
plt.hist(jaccards, bins=5, label=name)
plt.title("Word Level Within Population Jaccard SImilarities")
plt.legend()
plt.show()
|
# coding: utf-8
from __future__ import unicode_literals
import calendar
import re
import time
from .amp import AMPIE
from .common import InfoExtractor
from ..compat import compat_urlparse
class AbcNewsVideoIE(AMPIE):
IE_NAME = 'abcnews:video'
_VALID_URL = r'https?://abcnews\.go\.com/[^/]+/video/(?P<display_id>[0-9a-z-]+)-(?P<id>\d+)'
_TESTS = [{
'url': 'http://abcnews.go.com/ThisWeek/video/week-exclusive-irans-foreign-minister-zarif-20411932',
'info_dict': {
'id': '20411932',
'ext': 'mp4',
'display_id': 'week-exclusive-irans-foreign-minister-zarif',
'title': '\'This Week\' Exclusive: Iran\'s Foreign Minister Zarif',
'description': 'George Stephanopoulos goes one-on-one with Iranian Foreign Minister Dr. Javad Zarif.',
'duration': 180,
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://abcnews.go.com/2020/video/2020-husband-stands-teacher-jail-student-affairs-26119478',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
video_id = mobj.group('id')
info_dict = self._extract_feed_info(
'http://abcnews.go.com/video/itemfeed?id=%s' % video_id)
info_dict.update({
'id': video_id,
'display_id': display_id,
})
return info_dict
class AbcNewsIE(InfoExtractor):
IE_NAME = 'abcnews'
_VALID_URL = r'https?://abcnews\.go\.com/(?:[^/]+/)+(?P<display_id>[0-9a-z-]+)/story\?id=(?P<id>\d+)'
_TESTS = [{
'url': 'http://abcnews.go.com/Blotter/News/dramatic-video-rare-death-job-america/story?id=10498713#.UIhwosWHLjY',
'info_dict': {
'id': '10498713',
'ext': 'flv',
'display_id': 'dramatic-video-rare-death-job-america',
'title': 'Occupational Hazards',
'description': 'Nightline investigates the dangers that lurk at various jobs.',
'thumbnail': 're:^https?://.*\.jpg$',
'upload_date': '20100428',
'timestamp': 1272412800,
},
'add_ie': ['AbcNewsVideo'],
}, {
'url': 'http://abcnews.go.com/Entertainment/justin-timberlake-performs-stop-feeling-eurovision-2016/story?id=39125818',
'info_dict': {
'id': '39125818',
'ext': 'mp4',
'display_id': 'justin-timberlake-performs-stop-feeling-eurovision-2016',
'title': 'Justin Timberlake Drops Hints For Secret Single',
'description': 'Lara Spencer reports the buzziest stories of the day in "GMA" Pop News.',
'upload_date': '20160515',
'timestamp': 1463329500,
},
'params': {
# m3u8 download
'skip_download': True,
# The embedded YouTube video is blocked due to copyright issues
'playlist_items': '1',
},
'add_ie': ['AbcNewsVideo'],
}, {
'url': 'http://abcnews.go.com/Technology/exclusive-apple-ceo-tim-cook-iphone-cracking-software/story?id=37173343',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
r'window\.abcnvideo\.url\s*=\s*"([^"]+)"', webpage, 'video URL')
full_video_url = compat_urlparse.urljoin(url, video_url)
youtube_url = self._html_search_regex(
r'<iframe[^>]+src="(https://www\.youtube\.com/embed/[^"]+)"',
webpage, 'YouTube URL', default=None)
timestamp = None
date_str = self._html_search_regex(
r'<span[^>]+class="timestamp">([^<]+)</span>',
webpage, 'timestamp', fatal=False)
if date_str:
tz_offset = 0
if date_str.endswith(' ET'): # Eastern Time
tz_offset = -5
date_str = date_str[:-3]
date_formats = ['%b. %d, %Y', '%b %d, %Y, %I:%M %p']
for date_format in date_formats:
try:
timestamp = calendar.timegm(time.strptime(date_str.strip(), date_format))
except ValueError:
continue
if timestamp is not None:
timestamp -= tz_offset * 3600
entry = {
'_type': 'url_transparent',
'ie_key': AbcNewsVideoIE.ie_key(),
'url': full_video_url,
'id': video_id,
'display_id': display_id,
'timestamp': timestamp,
}
if youtube_url:
entries = [entry, self.url_result(youtube_url, 'Youtube')]
return self.playlist_result(entries)
return entry
|
import numpy as np
from foolbox2.criteria import TargetClass
from foolbox2.models.wrappers2 import CompositeModel
# from fmodel3 import create_fmodel_combo
from fmodel import create_fmodel as create_fmodel_18
from fmodel2 import create_fmodel as create_fmodel_ALP
from foolbox2.attacks.iterative_projected_gradient import MomentumIterativeAttack
from foolbox2.distances import MeanSquaredDistance
from foolbox2.adversarial import Adversarial
from adversarial_vision_challenge import load_model
from adversarial_vision_challenge import read_images
from adversarial_vision_challenge import store_adversarial
from adversarial_vision_challenge import attack_complete
from iterative import SAIterativeAttack, RMSIterativeAttack, \
AdamIterativeAttack, AdagradIterativeAttack
import os
def run_attack(model, image, target_class):
criterion = TargetClass(target_class)
# model == Composite model
# Backward model = substitute model (resnet vgg alex) used to calculate gradients
# Forward model = black-box model
# distance = MeanSquaredDistance
attack = AdamIterativeAttack(model, criterion)
# attack = foolbox.attacks.annealer(model, criterion)
# prediction of our black box model on the original image
original_label = np.argmax(model.predictions(image))
# adv = Adversarial(model, criterion, image, original_label, distance=distance)
# return attack(adv)
return attack(image, model_path = None, label = original_label)
def main():
# tf.logging.set_verbosity(tf.logging.INFO)
# instantiate blackbox and substitute model
# instantiate blackbox and substitute model
forward_model = load_model()
backward_model1 = create_fmodel_18()
backward_model2 = create_fmodel_ALP()
# print(backward_model1[0])
# instantiate differntiable composite model
# (predictions from blackbox, gradients from substitute)
model = CompositeModel(
forward_model=forward_model,
backward_models=[backward_model1,backward_model2])
for (file_name, image, label) in read_images():
adversarial = run_attack(model, image, label)
store_adversarial(file_name, adversarial)
attack_complete()
if __name__ == '__main__':
main()
|
import pandas as pd
import githubstats.database as db
class Reports:
"""Generate reports from the database.
:param target_db: Full path with file name and extension to your SQLite3 database
USAGE
```
from githubstats import Reports
# instantiate reports class
rpt = Reports("<full path with file name and extension to your SQLite3 database>")
# generate a CSV file containing the number of unique clones for each repository in the database
df = rpt.unique_clones_by_repository('clones', output_csv="<full path with file name and extension to your output CSV file>")
"""
def __init__(self, target_db):
self.target_db = target_db
def unique_clones_by_repository(self, table, output_csv=None):
"""Create an output data frame containing the total number of unique clones
for all repositories in the database. Output as CSV as an option.
:param table: target name of the table
:param output_csv: Full path to file name with extension for the output CSV; None by default.
"""
sql = """SELECT
DATE(date_time) as date, uniques, repo_name
FROM {};""".format(table)
# create database connection
conn = db.create_connection(self.target_db)
df = db.query_to_dataframe(sql, conn)
# convert date field from string to datetime object
df['date'] = pd.to_datetime(df['date'])
grp = df.groupby('repo_name')
min_date_dict = grp.min()['date'].to_dict()
max_date_dict = grp.max()['date'].to_dict()
sum_dict = grp.sum()['uniques'].to_dict()
df['min_date'] = df['repo_name'].map(min_date_dict)
df['max_date'] = df['repo_name'].map(max_date_dict)
df['uniques'] = df['repo_name'].map(sum_dict)
df.drop(columns=['date'], inplace=True)
df = df.groupby('repo_name').min()
df.reset_index(inplace=True)
df.columns = ['repository', 'unique_{}'.format(table), 'min_date', 'max_date']
if output_csv is not None:
df.to_csv(output_csv, index=False)
return df
|
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
#
import time
from botocore.exceptions import ClientError
from . import cleanup_utils
from . import exception_utils
def __user_pool_exists(cleaner, user_pool_id):
"""
Verifies if a user pool exists. This is should be replaced once cognito supports Waiter objects for user
pool deletion.
:param cleaner: A Cleaner object from the main cleanup.py script
:param user_pool_id: Can be retrieved from the boto3 list_user_pools() with response['UserPools']['Id']
:return: True if the user pool exists, False if it doesn't exist or an error occurs.
"""
try:
cleaner.cognito_idp.describe_user_pool(UserPoolId=user_pool_id)
except cleaner.cognito_idp.exceptions.ResourceNotFoundException:
return False
except ClientError as err:
print(" ERROR: Unexpected error occurred when checking if user pool {0} exists due to {1}"
.format(user_pool_id, exception_utils.message(err)))
return False
return True
def delete_user_pools(cleaner):
"""
Deletes all cognito identity pools with the specified prefix. After deletion, this function will poll the
client until all specified pools are deleted. list_identity_pools() is not paginatable in LY boto3 version,
but is paginatable in latest version of boto3.
:param cleaner: A Cleaner object from the main cleanup.py script
:return: None
"""
print('\n\nlooking for cognito user pools with names starting with one of {}'.format(cleaner.describe_prefixes()))
params = {'MaxResults': 60}
user_id_list = []
# Construct list
try:
for result in cleanup_utils.paginate(cleaner.cognito_idp.list_user_pools, params, 'NextToken', 'NextToken'):
for pool in result['UserPools']:
pool_name = pool['Name']
if pool_name.startswith('PlayerAccess'):
pool_name = pool_name.replace('PlayerAccess', '')
if cleaner.has_prefix(pool_name):
print(' found user pool {}'.format(pool_name))
user_id_list.append(pool['Id'])
except KeyError as e:
print(" ERROR: Unexpected KeyError while deleting cognito user pools. {}".format(
exception_utils.message(e)))
return
except ClientError as e:
print(" ERROR: Unexpected error for paginator for the cognito client. {}".format(
exception_utils.message(e)))
return
# Delete users
for user_id in user_id_list:
print(' deleting user pool {}'.format(user_id))
try:
cleaner.cognito_idp.delete_user_pool(UserPoolId=user_id)
except ClientError as e:
print(' ERROR. Failed to delete user pool {0} due to {1}'.format(user_id, exception_utils.message(e)))
cleaner.add_to_failed_resources("cognito user pools", user_id)
# Wait for the list to be empty
for user_id in user_id_list:
try:
cleanup_utils.wait_for(lambda: not __user_pool_exists(cleaner, user_id),
attempts=cleaner.wait_attempts,
interval=cleaner.wait_interval,
timeout_exception=cleanup_utils.WaitError)
except cleanup_utils.WaitError:
print(' ERROR. user pool {0} was not deleted after timeout'.format(user_id))
cleaner.add_to_failed_resources("cognito user pools", user_id)
except ClientError as e:
if e.response["Error"]["Code"] == "TooManyRequestsException":
print(' too many requests, sleeping...')
time.sleep(cleaner.wait_interval)
else:
print(" ERROR: Unexpected error occurred waiting for pool {0} to delete due to {1}".format(
user_id, exception_utils.message(e)))
cleaner.add_to_failed_resources("cognito user pools", user_id)
|
from flask.cli import FlaskGroup
from app import create_app, db
app = create_app()
cli = FlaskGroup(app)
@cli.command('recreate_db')
def recreate_db():
db.drop_all()
db.create_all()
db.session.commit()
if __name__ == '__main__':
cli()
|
#!/usr/bin/env python
import os
import sys
from django_app.config import get_project_root_path, import_env_vars
if __name__ == "__main__":
import_env_vars(os.path.join(get_project_root_path(), 'envdir'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_app.config.settings.dev")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
''' Voici le module qui a une fonction de déboggage '''
from Game import window
import time
debugger = None
class Debug:
def __init__(self, stage):
self.fps = 0
self.t0Win = 0
self.stage = stage
self.stop = False
def fps_add(self): # Compteur de fps
self.fps += 1
def fps_reset(self):
if not self.stop:
print(self.fps)
self.fps = 0
get_can().after(1000, self.fps_reset)
def debug_enter(self, event): # Pour entre des commandes
if not self.stop:
self.stop = True
command = input("Command > ")
words = command.split()
if words:
if words[0] == "help":
print("-----------------------------------------------")
# print("* func <function>\n Pour appeller une fonction\n* var <name> <value> <int|float|str> [instance]\n Pour changer la valeur d'une variable")
print("* level <number>\n Pour lancer le niveau spécifié dans <number>\n* finish\n Pour automatiquement finir le niveau")
print("-----------------------------------------------")
elif words[0] == "level":
self.stage.change_level(int(words[1]))
elif words[0] == "finish":
self.t0Win = self.stage.stage_end()
self.stop = False
def init(stage):
global debugger
debugger = Debug(stage)
debugger.fps_reset()
def get_can():
return window.root.canvas
def get_winH():
return window.root.winH
def get_winW():
return window.root.winW
|
########################## FWMAV Simulation #########################
# Version 0.3
# Fan Fei Feb 2019
# Direct motor driven flapping wing MAV simulation
#######################################################################
import gym
import flappy
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.common.vec_env import SubprocVecEnv
from stable_baselines.common import set_global_seeds
from flappy.envs.fwmav.controllers.arc_xy_arc_z import ARCController
from flappy.envs.fwmav.controllers.pid_controller import PIDController
import time
import argparse
import importlib
import numpy as np
def make_env(env_id, rank, seed=0, random_init = True, randomize_sim = True, phantom_sensor = False):
def _init():
env = gym.make(env_id)
env.config(random_init, randomize_sim, phantom_sensor)
if rank == 0:
env.enable_visualization()
env.enable_print()
env.seed(seed + rank)
return env
# set_global_seeds(seed)
return _init
class LazyModel:
def __init__(self,env,model_type):
self.action_lb = env.action_lb
self.action_ub = env.action_ub
self.observation_bound = env.observation_bound
if model_type == 'PID':
self.policy = PIDController(env.sim.dt_c)
elif model_type == 'ARC':
self.policy = ARCController(env.sim.dt_c)
else:
raise Exception('Error')
def predict(self, obs):
action = self.policy.get_action(obs[0]*self.observation_bound)
# scale action from [action_lb, action_ub] to [-1,1]
# since baseline does not support asymmetric action space
normalized_action = (action-self.action_lb)/(self.action_ub - self.action_lb)*2 - 1
action = np.array([normalized_action])
return action, None
def main(args):
env_id = 'fwmav_hover-v0'
env = DummyVecEnv([make_env(env_id, 0, random_init = args.rand_init, randomize_sim = args.rand_dynamics, phantom_sensor = args.phantom_sensor)])
if args.model_type != 'PID' and args.model_type != 'ARC':
try:
model_cls = getattr(
importlib.import_module('stable_baselines'), args.model_type)
except AttributeError:
print(args.model_type, "Error: wrong model type")
return
try:
model = model_cls.load(args.model_path)
except:
print(args.model_path, "Error: wrong model path")
else:
model = LazyModel(env.envs[0],args.model_type)
obs = env.reset()
while True:
if env.envs[0].is_sim_on == False:
env.envs[0].gui.cv.wait()
elif env.envs[0].is_sim_on:
action, _ = model.predict(obs)
obs, rewards, done, info = env.step(action)
# if done:
# obs = env.reset()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_type', required=True)
parser.add_argument('--model_path')
parser.add_argument(
'--policy_type', const='MlpPolicy', default='MlpPolicy', nargs='?')
parser.add_argument('--rand_init', action='store_true', default=False)
parser.add_argument('--rand_dynamics', action='store_true', default=False)
parser.add_argument('--phantom_sensor', action='store_true', default=False)
args = parser.parse_args()
main(args) |
import sublime
import sublime_plugin
import os
import re
from ..settings import get_setting
from ..utils import get_namespace, get_active_project_path
class ImportNamespaceCommand(sublime_plugin.TextCommand):
def run(self, edit):
projectPath = get_active_project_path()
file_name = self.view.file_name().replace(projectPath, '')
if file_name.startswith('/') or file_name.startswith('\\'):
file_name = file_name[1:]
# Abort if the file is not PHP
if not (file_name.endswith('.php') or file_name.endswith('.install') or file_name.endswith('.module')):
sublime.error_message('No .php extension')
return
namespace_stmt = get_namespace(os.path.dirname(file_name))
# Ensuring PHP tag presence
php_tag = '<?php'
php_regex = php_tag.replace('?', '\?')
php_region = self.view.find(php_regex, 0)
if php_region.empty():
line = self.view.line(php_region)
self.view.insert(edit, 0, php_tag)
# Removing existing namespace
namespace_region = self.view.find(r'\s*namespace\s[\w\\]+;', 0)
if not namespace_region.empty():
self.view.replace(edit, namespace_region, '')
# Adding namespace
namespace_position = get_setting('namespace_position')
namespace_contents = ' '
if namespace_position != 'inline':
namespace_contents = '\n' * get_setting('namespace_blank_lines', 2)
namespace_contents += 'namespace ' + namespace_stmt + ';'
if namespace_position != 'inline':
php_regex += r'(\s*\/\*(?:[^*]|\n|(?:\*(?:[^\/]|\n)))*\*\/)?'
php_docblock_region = self.view.find(php_regex, 0)
if not php_docblock_region.empty():
line = self.view.line(php_docblock_region)
self.view.insert(edit, line.end(), namespace_contents)
|
import os
from pathlib import Path
class Constants:
test_dir: Path = Path(__file__).parent
sample_data_dir: str = os.path.join(test_dir, 'sample_data')
test_data_dir: str = os.path.join(test_dir, 'test_data')
ref_data_dir: str = os.path.join(test_dir, 'reference_data')
# files
scrummyrc: str = os.path.join(sample_data_dir, 'scrummyrc')
christmas_2021_actual: str = os.path.join(test_data_dir, 'scrum', '101-christmas-2021.md')
smart_apartment_actual: str = os.path.join(test_data_dir, 'scrum', '102-smart-apartment.md')
scrummy_actual: str = os.path.join(test_data_dir, 'scrum', '105-scrummy.md')
christmas_2021_expected: str = os.path.join(ref_data_dir, 'scrum', '101-christmas-2021.md')
smart_apartment_expected: str = os.path.join(ref_data_dir, 'scrum', '102-smart-apartment.md')
scrummy_expected: str = os.path.join(ref_data_dir, 'scrum', '105-scrummy.md')
|
import urllib3
import traversal_rule_identifier
from bs4 import BeautifulSoup
import json
import tldextract
import certifi
import ssl
from traversal_rule_identifier import TraversalRule
ssl_context = ssl.SSLContext()
ssl_context.load_verify_locations(certifi.where())
http = urllib3.PoolManager(ssl_context=ssl_context)
class OrgTraversalRules:
persistence_type = "json"
def __init__(self, filename):
self.filename = filename
self.org_traversal_rules = dict()
self.load_org_traversal_rules()
def load_org_traversal_rules(self):
with open(self.filename, "r") as fp:
self.org_traversal_rules = json.load(fp)
#self.org_traversal_rules = json.load(fp)
def get_org_traversal_for_url(self, url):
extract_result = tldextract.extract(url)
host_url = extract_result.registered_domain
if host_url in self.org_traversal_rules:
return self.org_traversal_rules[host_url]
return None
class FindOrgWithTraversal:
def __init__(self, url, org_traversal_rule_for_site):
self.url = url
self.org_traversal_rule = org_traversal_rule_for_site
self.page_content = None
def load_page_content(self):
self.page_content = http.request('GET', self.url).data
#print(self.page_content)
def get_org(self):
self.load_page_content()
soup = BeautifulSoup(self.page_content, 'lxml')
soup = BeautifulSoup(soup.prettify('utf-8'), 'lxml')
t = TraversalRule(soup, None, self.org_traversal_rule)
return t.get_org_from_traversal()
class FindOrg:
domain_traversal_file = "./resources/domain_traversal_rules-500.json"
domain_traversal = OrgTraversalRules(domain_traversal_file)
def __init__(self, url):
self.url = url
extracted = tldextract.extract(url)
site = extracted.registered_domain
self.find_org = FindOrgWithTraversal(self.url, self.domain_traversal.org_traversal_rules[site])
def get_org(self):
return self.find_org.get_org()
class AuthorTraversalRules:
persistence_type = "json"
def __init__(self, filename):
self.filename = filename
self.author_traversal_rules = dict()
self.load_author_traversal_rules()
def load_author_traversal_rules(self):
with open(self.filename, "r") as fp:
self.author_traversal_rules = json.load(fp)
def get_author_traversal_for_url(self, url):
extract_result = tldextract.extract(url)
host_url = extract_result.registered_domain
if host_url in self.author_traversal_rules:
return self.author_traversal_rules[host_url]
return None
class FindAuthorWithTraversal:
def __init__(self, url, author_traversal_rule_for_site):
self.url = url
self.author_traversal_rule = author_traversal_rule_for_site
self.page_content = None
def load_page_content(self):
self.page_content = http.request('GET', self.url).data
def get_author(self):
self.load_page_content()
soup = BeautifulSoup(self.page_content, 'lxml')
soup = BeautifulSoup(soup.prettify('utf-8'), 'lxml')
t = traversal_rule_identifier.TraversalRule(soup, None, self.author_traversal_rule)
return t.get_author_from_traversal()
class FindAuthor:
domain_traversal_file = "./resources/domain_traversal_rules-500.json"
domain_traversal = AuthorTraversalRules(domain_traversal_file)
def __init__(self, url):
self.url = url
extracted = tldextract.extract(url)
site = extracted.registered_domain
self.find_author = FindAuthorWithTraversal(self.url, self.domain_traversal.author_traversal_rules[site])
def get_author(self):
return self.find_author.get_author()
if __name__ == "__main__":
print(FindAuthor("https://www.linkedin.com/pulse/automating-user-creation-aws-sftp-service-transfer-arjun-dandagi/").get_author())
print(FindOrg("https://www.facebook.com/TechRadar").get_org()) |
from nodes import Var, Term, Fact, BinOp, Goals, Rule, Rules
from knowledgeBase import KnowledgeBase
from visitors import make_expr
from unificationVisitor import Substitutions
import utils
lovely_rules = KnowledgeBase({'loves/2': [Rule(Fact('loves/2', [Term('hanna'), Term('miles')]), None), Rule(Fact('loves/2', [Term('tom'), Term('tom')]), None), Rule(Fact('loves/2', [Term('simon'), Term('sara')]), None)], 'narcissist/1': [Rule(Fact('narcissist/1', [Var('X')]), Fact('loves/2', [Var('X'), Var('X')]))]})
for (_, sub) in lovely_rules.lookup(
Fact('loves/2', [Var('M'), Var('Z')])):
print(sub)
print('# Narcissist')
for (_, sub) in lovely_rules.lookup(
Fact('narcissist/1', [Var('Y')])):
print(sub)
print('---~~Living~~---')
life_rules = KnowledgeBase({'human/1': [Rule(Fact('human/1', [Term('miles')]), None), Rule(Fact('human/1', [Term('Samael')]), None), Rule(Fact('human/1', [Term('socrates')]), None)], 'male/1': [Rule(Fact('male/1', [Term('Samael')]), None), Rule(Fact('male/1', [Term('jordon')]), None), Rule(Fact('male/1', [Term('jim')]), None)], 'mortal/1': [Rule(Fact('mortal/1', [Var('X')]), Fact('human/1', [Var('X')])), Rule(Fact('mortal/1', [Var('Z')]), Fact('boy/1', [Var('Z')]))], 'boy/1': [Rule(Fact('boy/1', [Var('A')]), Goals([Fact('male/1', [Var('A')]), Fact('human/1', [Var('A')])]))], 'parent/2': [Rule(Fact('parent/2', [Term('miles'), Term('Samael')]), None), Rule(Fact('parent/2', [Term('sara'), Term('Samael')]), None), Rule(Fact('parent/2', [Term('amanda'), Term('Samael')]), None), Rule(Fact('parent/2', [Term('steven'), Term('jordon')]), None), Rule(Fact('parent/2', [Term('laura'), Term('jordon')]), None), Rule(Fact('parent/2', [Term('cassandra'), Term('jim')]), None)], 'father/2': [Rule(Fact('father/2', [Var('A'), Var('X')]), Goals([Fact('parent/2', [Var('A'), Var('X')]), Fact('male/1', [Var('X')])]))], 'sibiling/2': [Rule(Fact('sibiling/2', [Var('A'), Var('B')]), Goals([Fact('father/2', [Var('A'), Var('X')]), Fact('father/2', [Var('B'), Var('X')])]))], 'child/2': [Rule(Fact('child/2', [Var('X'), Var('A')]), Fact('father/2', [Var('A'), Var('X')]))]})
print('# Mortal')
for (_, sub) in life_rules.lookup(
Fact('mortal/1', [Var('O')])):
print(sub)
print('# Sibilings')
for (_, sub) in life_rules.lookup(
Fact('sibiling/2', [Term('sara'), Var('M')])):
print(sub)
print('# Children')
for (_, sub) in life_rules.lookup(
Fact('child/2', [Term('jordon'), Var('C')])):
print(sub)
print('---~~Addition~~---')
addition_rules = KnowledgeBase({'add/3': [Rule(Fact('add/3', [Term(0), Var('X'), Var('X')]), None), Rule(Fact('add/3', [Fact('s/1', [Var('X')]), Var('Y'), Fact('s/1', [Var('Z')])]), Fact('add/3', [Var('X'), Var('Y'), Var('Z')]))]})
for (unifier, sub) in addition_rules.lookup(
Fact('add/3', [Fact('s/1', [Fact('s/1', [Term(0)])]), Var('A'), Fact('s/1', [Fact('s/1', [Fact('s/1', [Fact('s/1', [Fact('s/1', [Term(0)])])])])])])):
print(utils.str_dict(Substitutions.optionally_resolve(unifier.env)), 'results in', sub)
for (_, sub) in addition_rules.lookup(
Fact('add/3', [Var('A'), Fact('s/1', [Var('A')]), Fact('s/1', [Fact('s/1', [Fact('s/1', [Term(0)])])])])):
print(sub)
print('---~~List~~---')
lst_rules = KnowledgeBase({'lst/1': [Rule(Fact('lst/1', [Term('nil')]), None), Rule(Fact('lst/1', [Fact('cons/2', [Var('U-1'), Var('X')])]), Fact('lst/1', [Var('X')]))], 'lst_length/2': [Rule(Fact('lst_length/2', [Var('Xs'), Var('L')]), Fact('lst_length/3', [Var('Xs'), Term(0), Var('L')]))], 'lst_length/3': [Rule(Fact('lst_length/3', [Term('nil'), Var('L'), Var('L')]), None), Rule(Fact('lst_length/3', [Fact('cons/2', [Var('X'), Var('Xs')]), Var('T'), Var('L')]), Goals([BinOp(Var('L'), '>', Var('T')), BinOp(Var('T1'), '==', BinOp(Var('T'), '+', Term(1))), Fact('lst_length/3', [Var('Xs'), Var('T1'), Var('L')])]))], 'lst_member/2': [Rule(Fact('lst_member/2', [Var('X'), Fact('cons/2', [Var('X'), Var('U-2')])]), None), Rule(Fact('lst_member/2', [Var('X'), Fact('cons/2', [Var('U-3'), Var('TAIL')])]), Fact('lst_member/2', [Var('X'), Var('TAIL')]))], 'lst_concat/3': [Rule(Fact('lst_concat/3', [Term('nil'), Var('L'), Var('L')]), None), Rule(Fact('lst_concat/3', [Fact('cons/2', [Var('X1'), Var('L1')]), Var('L2'), Fact('cons/2', [Var('X1'), Var('L3')])]), Fact('lst_concat/3', [Var('L1'), Var('L2'), Var('L3')]))], 'lst_reverse/2': [Rule(Fact('lst_reverse/2', [Term('nil'), Term('nil')]), None), Rule(Fact('lst_reverse/2', [Fact('cons/2', [Var('H'), Var('T')]), Var('RevList')]), Goals([Fact('lst_reverse/2', [Var('T'), Var('RevT')]), Fact('lst_concat/3', [Var('RevT'), Fact('cons/2', [Var('H'), Term('nil')]), Var('RevList')])]))]})
lst1 = Fact('cons', [Term('x'), Fact('cons', [Term('y'), Term('nil')])])
lst2 = Fact('cons', [Term('a'), Fact('cons', [Term('b'), Term('nil')])])
for (_, sub) in lst_rules.lookup(make_expr(Fact('lst_member', [Term('x'), lst1]))):
print(sub)
print('# Concatenation')
for (unifier, _) in lst_rules.lookup(
Goals([Fact('lst_concat/3', [Fact('cons/2', [Term('x'), Fact('cons/2', [Term('y'), Term('nil')])]), Fact('cons/2', [Term('a'), Fact('cons/2', [Term('b'), Term('nil')])]), Var('Res')]), Fact('lst_reverse/2', [Var('Res'), Var('Rev')])])):
print(utils.dict_as_eqs(Substitutions.optionally_resolve(unifier.env)))
print('# Count Members')
for (unifier, _) in lst_rules.lookup(
Fact('lst_length/2', [Fact('cons/2', [Term('a'), Fact('cons/2', [Term('b'), Fact('cons/2', [Term('c'), Term('nil')])])]), Var('X')])):
print(utils.dict_as_eqs(Substitutions.optionally_resolve(unifier.env)))
print('---~~Numbers~~---')
num_rules = KnowledgeBase({'semantic_trial/3': [Rule(Fact('semantic_trial/3', [Var('X'), Var('Y'), Var('Z')]), Goals([BinOp(Var('Y'), '==', BinOp(Var('X'), '+', Term(1))), Fact('inc/2', [Var('Y'), Var('Z')]), BinOp(Var('Z'), '==', BinOp(Var('X'), '+', Term(2)))]))], 'inc/2': [Rule(Fact('inc/2', [Var('X'), Var('Y')]), BinOp(Var('Y'), '==', BinOp(Var('X'), '+', Term(1))))], 'fac/2': [Rule(Fact('fac/2', [Term(0), Term(1)]), None), Rule(Fact('fac/2', [Var('N'), Var('F')]), Goals([BinOp(Var('N'), '>', Term(0)), BinOp(Var('F1'), '>', Term(0)), BinOp(Var('N1'), '==', BinOp(Var('N'), '-', Term(1))), BinOp(Var('F'), '==', BinOp(Var('N'), '*', Var('F1'))), Fact('fac/2', [Var('N1'), Var('F1')])]))], 'safediv/3': [Rule(Fact('safediv/3', [Var('A'), Var('B'), Var('X')]), Goals([BinOp(BinOp(Var('A'), '/', Var('B')), '==', Var('X')), BinOp(Var('B'), '>', Term(0)), BinOp(Var('X'), '>=', Term(1))]))], 'multiplyTo50/2': [Rule(Fact('multiplyTo50/2', [Var('A'), Var('B')]), BinOp(BinOp(Var('A'), '*', Var('B')), '==', Term(50)))]})
for (_, sub) in num_rules.lookup(
Fact('semantic_trial/3', [Var('L'), Term(8), Var('N')])):
print(sub)
print('# Numbers that multiply upto 50')
for (_, sub) in num_rules.lookup(
Fact('multiplyTo50/2', [Var('X'), Var('Y')])):
print(sub)
print('# Factorial')
for (unifier, _) in num_rules.lookup(
Fact('fac/2', [Term(4), Var('F')])):
print(utils.dict_as_eqs(Substitutions.optionally_resolve(unifier.env)))
|
# -*- coding:ISO-8859-1 -*-
'''
This file contains the class that is responsability to open package.json
remove all contents in the key dependencies and devDependencies and add
all dependencies from csv to dependencies key
'''
import json
class Package:
# the file to change is the package json
def __init__(self, fileName='workspace/package/package.json'):
self.fileExists = False
self.fileName = fileName
try:
self.fileJson = json.load(open(self.fileName, errors='ignore'))
self.fileExists = True
except FileNotFoundError:
raise
# delete all dependencies to install only dependencies in csv file
self.fileJson['dependencies'] = {}
self.fileJson['devDependencies'] = {}
# update the value of the specify key
def update(self, dependency, version):
if not self.fileExists:
return
# dependencies and devDependencies are installed in the dependencies key
self.fileJson['dependencies'][dependency] = version
# get the value of the key
def get(self, key):
if not self.fileExists:
return None
try:
return self.fileJson[key]
except KeyError:
print('Key {0} isn\'t in the JSON object.'.format(key))
raise
def print(self):
print(self.fileJson)
# save the current state of json to a file
def save(self):
json.dump(self.fileJson, open(self.fileName, 'w'), indent=2) |
#!/usr/bin/env python3
import argparse
from logging import getLogger
from os import path
from common import setup_log, list_csv_files, read_csv
from model import ParcelOffer
from place_resolver import MapQuestClient, PlaceResolver
def main(map_quest_api_key: str, csv_cache: str, offers_directory: str):
setup_log()
log = getLogger()
client = MapQuestClient(map_quest_api_key, log)
resolver = PlaceResolver(client, log)
if path.isfile(csv_cache):
resolver.load(csv_cache)
log.info(f"Loaded {csv_cache} with {len(resolver.cache.keys())} addresses")
for csv_file in list_csv_files(offers_directory):
log.info(f"Parsing CSV {csv_file}")
for row in read_csv(csv_file):
offer = ParcelOffer.from_csv_row(row)
if offer:
_ = resolver.get(offer)
else:
log.warning(f"Could not parse into offer: {row}")
log.info(f"Storing cache with {len(resolver.cache.keys())} into {csv_cache}")
resolver.save(csv_cache)
def _parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description='Populate places cache with locations resolved using MapQuest API')
parser.add_argument('cache', type=str, help='Path CSV file containing places cache to work on')
parser.add_argument('key', type=str, help='MapQuest API key')
parser.add_argument('offers', type=str, help='Path to directory containing CSV files with offers')
return parser.parse_args()
def cli_main():
args = _parse_args()
main(args.key, args.cache, args.offers)
if __name__ == '__main__':
cli_main()
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
list keys from inverted files (ZODB)
"""
from os.path import dirname, basename, exists, join
from ZODB import FileStorage, DB
from persistent.list import PersistentList
from persistent.dict import PersistentDict
import transaction
from BTrees.OOBTree import OOBTree
from time import time
import sys
class PostIndex(object):
"""Post information"""
def __init__(self, mfn, extraction_id, occ, offset, technique, fieldno):
self.mfn = mfn
self.extraction_id = extraction_id
self.occ = occ
self.offset = offset
self.technique = technique
self.fieldno = fieldno
def __repr__(self):
return """mfn:%s
extraction_id:%s
occ:%s
offset:%s
technique:%s
field:%s"""%(self.mfn,self.extraction_id,self.occ,self.offset,self.technique,self.fieldno)
def listkeys(filename,verbose=0):
try:
storage = FileStorage.FileStorage(filename+".idx")
db = DB(storage)
connection = db.open()
dbroot = connection.root()
root = dbroot['isis']
indices = root.keys()
for idx in indices:
if verbose:
posts = root[idx]
for pst in posts:
print '%s:^m%s^o%s^i%s^t%s^f%s' % (idx, pst.mfn, pst.occ, \
pst.extraction_id, \
pst.technique, \
pst.fieldno)
else:
print idx
finally:
connection.close()
db.close()
storage.close()
if __name__ == '__main__':
try:
verbose = sys.argv[2]
except:
verbose = 0
try:
listkeys(filename=sys.argv[1],verbose=verbose)
except:
print 'Use: listindex.py <database> [verbose]' |
import sys
import requests
import getpass
from IPython.display import display, clear_output, HTML
# if Python2, prompt with raw_input instead of input
if sys.version_info.major==2:
input = raw_input
class EarthdataLogin(requests.Session):
"""
Prompt user for Earthdata credentials repeatedly until auth success. Source:
https://wiki.earthdata.nasa.gov/display/EL/How+To+Access+Data+With+Python
"""
AUTH_HOST = "urs.earthdata.nasa.gov" # urs login url
ERROR = "Login failed ({0}). Retry or register." # failure message
TEST = ("https://daac.ornl.gov/daacdata/") # test authentication;
REGISTER = HTML( # registration prompt
"<p style='font-weight:bold'><a href=https://urs.earth"
"data.nasa.gov/users/new target='_blank'>Click here to"
" register a NASA Earthdata account.</a></p>")
def __init__(self):
fails = 0
while True:
display(self.REGISTER) # register prompt
username = input("Username: ") # username prompt
password = getpass.getpass("Password: ") # secure pw prompt
if sys.version_info.major==2: # init requests session
super(EarthdataLogin, self).__init__() # for Python 2
else:
super().__init__() # for Python 3
self.auth = (username, password) # add username,password
try:
response = self.get(self.TEST) # try to grab TEST
response.raise_for_status() # raise for status>400
clear_output() # clear output
display("Login successful. Download with: session.get(url)")
break
except requests.exceptions.HTTPError as e:
clear_output() # clear cell output
fails += 1 # +1 fail counter
display(self.ERROR.format(str(fails))) # print failure msg
def rebuild_auth(self, prepared_request, response):
"""
Overrides from the library to keep headers when redirected to or
from the NASA auth host.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
original_parsed = requests.utils.urlparse(response.request.url)
redirect_parsed = requests.utils.urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname) and \
redirect_parsed.hostname != self.AUTH_HOST and \
original_parsed.hostname != self.AUTH_HOST:
del headers['Authorization']
self.auth = None # drop username/password attributes
return # return requests.Session
### RUN ON IMPORT
session = EarthdataLogin()
|
# (C) Copyright 1996-2018 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
"""
A modification of the multiprocessing.pool functionality to give:
i) A sensible notion of "global" data
ii) A modified imap, which has a hook to report when each object has finished processing (even when
it has not been returned by the generator, as it is waiting for other objects to complete).
"""
from multiprocessing.pool import Pool, IMapIterator, RUN, mapstar
import sys
# The worker processes are only used for one task. We store the global data here to ensure that we
# don't have to worry about pickling the objects at any point other than initialisation, whilst retaining
# executable functions that are trivial.
_global_data = None
_processing_fn = None
# There is only one parent.
_global_parent = False
def _internal_initialiser(data, processing_fn):
global _global_data
assert _global_data is None
_global_data = data
global _processing_fn
assert _processing_fn is None
_processing_fn = processing_fn
def _internal_worker(obj):
"""
Make the global data available to the processing function
"""
global _processing_fn
global _global_data
try:
return _processing_fn(obj, _global_data)
except KeyboardInterrupt:
# Catch and throw away KeyboardInterrupts, so that they propagate back to the master processes,
# which can correctly terminate the workers
pass
class IMapIteratorLocal(IMapIterator):
"""
A modified version of IMapIterator, which calls a supplied callback when each of the work components
completes, rather than when it is released back into the queue
"""
def __init__(self, callback, *args, **kwargs):
super().__init__(*args, **kwargs)
self.element_count = 0
self.callback = callback
def _set(self, i, obj):
self.element_count += 1
self.callback(self.element_count, i, obj[1])
# print "Calling _set: {} {}".format(i, obj)
super()._set(i, obj)
if sys.version_info < (3, 8):
class IMapIteratorLocalWrapper(IMapIteratorLocal):
def __init__(self, callback, pool, *args, **kwargs):
super(IMapIteratorLocalWrapper, self).__init__(callback, pool._cache, *args, **kwargs)
IMapIteratorLocal = IMapIteratorLocalWrapper
class ProcessingPool(Pool):
def __init__(self, processing_fn, callback, processes=1, global_data=None):
global _global_parent
_global_parent = True
self.callback = callback
self.processes = processes
# If we are only using one processor, then we don't need this machinery. Do things manually to
# avoid the overhead, and keep error reporting/exceptions/assertions in line.
if processes == 1:
self.global_data = global_data
self.processing_fn = processing_fn
else:
super(ProcessingPool, self).__init__(
processes=processes,
initializer=_internal_initialiser,
initargs=(global_data, processing_fn))
def imap_trivial(self, iterable):
"""
If we are only using one process, we can use a trivial imap. This is only in a separate function
as we cannot have a yield in the imap() function below.
"""
for i, elem in enumerate(iterable):
obj = self.processing_fn(elem, self.global_data)
self.callback(i, i, obj)
yield obj
def imap(self, iterable, chunksize=1):
"""
Equivalent of `itertools.imap()` -- can be MUCH slower than `Pool.map()`
"""
if self.processes == 1:
return self.imap_trivial(iterable)
else:
# This is derived from super().imap, but using IMapIteratorLocal instead of IMapIterator
assert self._state == RUN
if chunksize == 1:
result = IMapIteratorLocal(self.callback, self)
self._taskqueue.put((((result._job, i, _internal_worker, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(_internal_worker, iterable, chunksize)
result = IMapIteratorLocal(self.callback, self)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
|
# encoding: utf8
from __future__ import unicode_literals
# Source: https://github.com/stopwords-iso/stopwords-hr
STOP_WORDS = set("""
a
ah
aha
aj
ako
al
ali
arh
au
avaj
bar
baš
bez
bi
bih
bijah
bijahu
bijaše
bijasmo
bijaste
bila
bili
bilo
bio
bismo
biste
biti
brr
buć
budavši
bude
budimo
budite
budu
budući
bum
bumo
će
ćemo
ćeš
ćete
čijem
čijim
čijima
ću
da
daj
dakle
de
deder
dem
djelomice
djelomično
do
doista
dok
dokle
donekle
dosad
doskoro
dotad
dotle
dovečer
drugamo
drugdje
duž
e
eh
ehe
ej
eno
eto
evo
ga
gdjekakav
gdjekoje
gic
god
halo
hej
hm
hoće
hoćemo
hoćeš
hoćete
hoću
hop
htijahu
htijasmo
htijaste
htio
htjedoh
htjedoše
htjedoste
htjela
htjele
htjeli
hura
i
iako
ih
iju
ijuju
ikada
ikakav
ikakva
ikakve
ikakvi
ikakvih
ikakvim
ikakvima
ikakvo
ikakvog
ikakvoga
ikakvoj
ikakvom
ikakvome
ili
im
iz
ja
je
jedna
jedne
jedni
jedno
jer
jesam
jesi
jesmo
jest
jeste
jesu
jim
joj
još
ju
kada
kako
kao
koja
koje
koji
kojima
koju
kroz
lani
li
me
mene
meni
mi
mimo
moj
moja
moje
moji
moju
mu
na
nad
nakon
nam
nama
nas
naš
naša
naše
našeg
naši
ne
neće
nećemo
nećeš
nećete
neću
nego
neka
neke
neki
nekog
neku
nema
nešto
netko
ni
nije
nikoga
nikoje
nikoji
nikoju
nisam
nisi
nismo
niste
nisu
njega
njegov
njegova
njegovo
njemu
njezin
njezina
njezino
njih
njihov
njihova
njihovo
njim
njima
njoj
nju
no
o
od
odmah
on
ona
one
oni
ono
onu
onoj
onom
onim
onima
ova
ovaj
ovim
ovima
ovoj
pa
pak
pljus
po
pod
podalje
poimence
poizdalje
ponekad
pored
postrance
potajice
potrbuške
pouzdano
prije
s
sa
sam
samo
sasvim
sav
se
sebe
sebi
si
šic
smo
ste
što
šta
štogod
štagod
su
sva
sve
svi
svi
svog
svoj
svoja
svoje
svoju
svom
svu
ta
tada
taj
tako
te
tebe
tebi
ti
tim
tima
to
toj
tome
tu
tvoj
tvoja
tvoje
tvoji
tvoju
u
usprkos
utaman
uvijek
uz
uza
uzagrapce
uzalud
uzduž
valjda
vam
vama
vas
vaš
vaša
vaše
vašim
vašima
već
vi
vjerojatno
vjerovatno
vrh
vrlo
za
zaista
zar
zatim
zato
zbija
zbog
želeći
željah
željela
željele
željeli
željelo
željen
željena
željene
željeni
željenu
željeo
zimus
zum
""".split())
|
import random
print('''
Password Generator
==================
''')
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ@&0123456789'
number = input('number of passwords?')
number = int(number)
length = input('password length?')
length = int(length)
print('\nhere are your passwords:')
for pwd in range(number):
password = ''
for c in range(length):
password += random.choice(chars)
print(password) |
#!/usr/bin/env python3
# A simple game without purpose (yet)
# version 0.1.1
# author: key999
# exit codes:
# 0 - correct exit
# anything else i will add here, i.e. -1 something
import pygame
import objects as obj
import menu
class Game:
def __init__(self):
self.running = True
self.screen = None
self.size = self.width, self.height = 1024, 768
self.clock = pygame.time.Clock()
self.objects = pygame.sprite.Group()
self.player = None
self.screen_rect = None
def on_init(self):
pygame.init()
self.screen = pygame.display.set_mode(self.size)
self.screen_rect = self.screen.get_rect()
self.player = obj.Car(self.screen, "car1.png", self.screen_rect.center)
self.objects.add(self.player)
self.running = True
self.screen.fill((50, 50, 50))
pygame.display.flip()
for i in self.objects:
i.image.convert_alpha()
def on_event(self, event):
if event.type == pygame.QUIT:
print("quit")
self.running = False
elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
print("esc down")
if menu.InGame(self.screen).init() == 1:
return 1
def on_loop(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_w] or keys[pygame.K_UP]:
self.player.drive("forward")
if keys[pygame.K_s] or keys[pygame.K_DOWN]:
self.player.drive("backward")
if keys[pygame.K_a] or keys[pygame.K_LEFT]:
self.player.drive("left")
if keys[pygame.K_d] or keys[pygame.K_RIGHT]:
self.player.drive("right")
if keys[pygame.K_SPACE]:
self.player.handbrake()
if keys[pygame.K_r]:
self.player.reset_position()
self.player.move()
# restrict to board
if not self.screen_rect.contains(self.player.rect):
self.player.rect.center = self.screen_rect.center
def on_render(self):
# screen background
self.screen.fill((50, 50, 50))
# draw objects
self.objects.draw(self.screen)
# refresh screen
pygame.display.flip()
@staticmethod
def on_cleanup():
pygame.quit()
def on_execute(self):
if self.on_init() is False:
self.running = False
while self.running:
print(self.player.rect.center, self.player.movement_vector, sep=";")
for event in pygame.event.get():
if self.on_event(event) == 1:
self.on_cleanup()
exit(0)
self.on_loop()
self.on_render()
self.clock.tick(60)
self.on_cleanup()
if __name__ == "__main__":
a = Game()
a.on_execute()
|
__________________________________________________________________________________________________
sample 28 ms submission
class Solution:
def count_indents(self, paths):
res = []
for path in paths:
count = 0
i = 0
while i < len(path) and path[i] == '\t':
count += 1
i += 1
res.append((count, path[i:]))
return res
def lengthLongestPath(self, s: str) -> int:
paths = s.split('\n')
ind_paths = self.count_indents(paths) # [(0,'dir'), (1, 'tmp') ...]
stack = []
max_len = 0
curr_len = 0
for level, name in ind_paths:
while stack and stack[-1][0] >= level:
_, name2 = stack.pop()
curr_len -= len(name2) + 1
stack.append((level, name))
curr_len += len(name) + 1 # +1 because of the '/' before the name
if '.' in name:
max_len = max(max_len, curr_len-1) # -1 because the first 'dir' doesn't start with '/'
return max_len
__________________________________________________________________________________________________
sample 13008 kb submission
class Solution:
def lengthLongestPath(self, input: str) -> int:
# if "\n" not in input: return 0
# if "\n" not in input: return len(input)
input = input.split("\n")
level = 0
w = input.pop(0)
stack = [len(w)]
cur_len, max_len = len(w), 0
is_file = False
if "." in w:
max_len = cur_len
is_file = True
while input:
# check th level of the nect word
w = input.pop(0)
#count = 0
i = 0
while i+1 <= len(w) and w[i:i+1] == "\t":
i += 1
#count += 1
# update
lw = len(w[i:])
for _ in range(level - i+1):
prev = stack.pop()
cur_len -= prev + 1 # 1 for "/"
stack.append(lw)
cur_len += 1 + lw
if "." in w[i:]:
max_len = max(max_len, cur_len)
is_file = True
else:
is_file = False
level = i
return max_len
__________________________________________________________________________________________________
|
from pprint import pprint
def rule110CA(arr):
CARule = [0,1,1,0,1,1,1,0]
N = len(arr)
arrX = [[10 for i in range(N)] for k in range(N)]
arrX[0] = arr
for k in range(1,N):
arrX[k][0] = arrX[k-1][0]
arrX[k][N-1] = arrX[k-1][N-1]
for i in range(1,N-1):
arrX[k][i] = CARule[
4*arrX[k-1][(i-1)%N]+2*arrX[k-1][i]+arrX[k-1][(i+1)%N]
]
for k in range(1,N-1):
s,L = 0,0
for i in range(N-1,N-k-2,-1):
s,L = s + arrX[k][i], L+1
print(k, round(s/L,2))
size = 200
array = [0 for i in range(size-1)]
array.append(1)
rule110CA(array) |
# COMMON
REGION_NAME="us-east-1"
# AIRFLOW
AIRFLOW_DAG_ID="mwaa-sm-customer-churn-dag"
# GLUE
GLUE_ROLE_NAME="AmazonMWAA-Glue-Role"
GLUE_JOB_NAME_PREFIX="mwaa-xgboost-preprocess"
GLUE_JOB_SCRIPT_S3_BUCKET="glue-scripts-XXXXXXXXXXXX-us-east-1"
GLUE_JOB_SCRIPT_S3_KEY="mwaa-xgboost/preprocess-data/glue_etl.py"
DATA_S3_SOURCE="s3://datalake-XXXXXXXXXXXX-us-east-1/customer-churn/customer-churn.csv"
DATA_S3_DEST="s3://mlops-XXXXXXXXXXXX-us-east-1/mwaa-xgboost/processed/"
# SAGEMAKER
SAGEMAKER_ROLE_NAME="AmazonMWAA-SageMaker-Role"
SAGEMAKER_TRAINING_JOB_NAME_PREFIX="mwaa-sm-training-job"
SAGEMAKER_TRAINING_DATA_S3_SOURCE="s3://mlops-XXXXXXXXXXXX-us-east-1/mwaa-xgboost/processed/train/"
SAGEMAKER_VALIDATION_DATA_S3_SOURCE="s3://mlops-XXXXXXXXXXXX-us-east-1/mwaa-xgboost/processed/validation/"
SAGEMAKER_CONTENT_TYPE="text/csv"
SAGEMAKER_MODEL_NAME_PREFIX="mwaa-sm-customer-churn-model"
SAGEMAKER_ENDPOINT_NAME_PREFIX="mwaa-sm-endpoint" # endpoint names have a 63 max char limit
SAGEMAKER_MODEL_S3_DEST="s3://mlops-XXXXXXXXXXXX-us-east-1/mwaa-xgboost/model/"
|
from typing import List
import numpy as np
import torch
import pyonmttok
from purano.annotator.processors import Processor
from purano.models import Document
from purano.proto.info_pb2 import Info as InfoPb
@Processor.register("elmo")
class ElmoProcessor(Processor):
def __init__(self, options_file: str, weight_file: str, cuda_device: int):
from allennlp.modules.elmo import _ElmoBiLm
from allennlp.data.token_indexers.elmo_indexer import ELMoTokenCharactersIndexer
self.indexer = ELMoTokenCharactersIndexer()
self.elmo_bilm = _ElmoBiLm(options_file, weight_file)
if cuda_device >= 0:
self.elmo_bilm = self.elmo_bilm.cuda(device=cuda_device)
self.cuda_device = cuda_device
self.tokenizer = pyonmttok.Tokenizer("conservative", joiner_annotate=False)
def __call__(
self,
docs: List[Document],
infos: List[InfoPb],
input_fields: List[str],
output_field: str,
max_tokens_count: int
):
from allennlp.modules.elmo import batch_to_ids
from allennlp.nn.util import remove_sentence_boundaries
batch = []
for doc_num, doc in enumerate(docs):
sample = " ".join([getattr(doc, input_field) for input_field in input_fields])
tokens = self.preprocess(sample)[:max_tokens_count]
batch.append(tokens)
character_ids = batch_to_ids(batch)
if self.cuda_device >= 0:
character_ids = character_ids.cuda(device=self.cuda_device)
bilm_output = self.elmo_bilm(character_ids)
layer_activations = bilm_output['activations']
mask_with_bos_eos = bilm_output['mask']
without_bos_eos = [
remove_sentence_boundaries(layer, mask_with_bos_eos)
for layer in layer_activations
]
embeddings = torch.cat([pair[0].unsqueeze(1) for pair in without_bos_eos], dim=1)
mask = without_bos_eos[0][1]
for doc_num, info in enumerate(infos):
length = int(mask[doc_num, :].sum())
doc_embeddings = np.zeros((3, 0, 1024))
if length != 0:
doc_embeddings = embeddings[doc_num, :, :length, :].detach().cpu().numpy()
doc_embeddings = doc_embeddings.swapaxes(0, 1).reshape(doc_embeddings.shape[0], -1)
mean_embeddings = doc_embeddings.mean(axis=0)
max_embeddings = doc_embeddings.max(axis=0)
final_embedding = np.concatenate((mean_embeddings, max_embeddings), axis=0)
getattr(info, output_field).extend(final_embedding)
def preprocess(self, text):
text = str(text).strip().replace("\n", " ").replace("\xa0", " ")
tokens, _ = self.tokenizer.tokenize(text)
return tokens
|
odbc_Dcs_GetObjRefHdl_ASParamError_exn_ = 1
odbc_Dcs_GetObjRefHdl_ASTimeout_exn_ = 2
odbc_Dcs_GetObjRefHdl_ASNoSrvrHdl_exn_ = 3
odbc_Dcs_GetObjRefHdl_ASTryAgain_exn_ = 4
odbc_Dcs_GetObjRefHdl_ASNotAvailable_exn_ = 5
odbc_Dcs_GetObjRefHdl_DSNotAvailable_exn_ = 6
odbc_Dcs_GetObjRefHdl_PortNotAvailable_exn_ = 7
odbc_Dcs_GetObjRefHdl_InvalidUser_exn_ = 8
odbc_Dcs_GetObjRefHdl_LogonUserFailure_exn_ = 9
odbc_Dcs_GetObjRefHdl_TenantName_exn_ = 10
#
# out context
#
OUTCONTEXT_OPT1_ENFORCE_ISO88591 = 1 # (2^0)
OUTCONTEXT_OPT1_IGNORE_SQLCANCEL = 1073741824 # (2^30)
OUTCONTEXT_OPT1_EXTRA_OPTIONS = 2147483648 # (2^31)
OUTCONTEXT_OPT1_DOWNLOAD_CERTIFICATE = 536870912 # (2^29)
#
# InitializeDialogue
#
odbc_SQLSvc_InitializeDialogue_ParamError_exn_ = 1
odbc_SQLSvc_InitializeDialogue_InvalidConnection_exn_ = 2
odbc_SQLSvc_InitializeDialogue_SQLError_exn_ = 3
odbc_SQLSvc_InitializeDialogue_SQLInvalidHandle_exn_ = 4
odbc_SQLSvc_InitializeDialogue_SQLNeedData_exn_ = 5
odbc_SQLSvc_InitializeDialogue_InvalidUser_exn_ = 6
SQL_PASSWORD_EXPIRING = 8857
SQL_PASSWORD_GRACEPERIOD = 8837
#
# FETCH_REPLY
#
SQLTYPECODE_CHAR = 1
# NUMERIC * /
SQLTYPECODE_NUMERIC = 2
SQLTYPECODE_NUMERIC_UNSIGNED = -201
# DECIMAL * /
SQLTYPECODE_DECIMAL = 3
SQLTYPECODE_DECIMAL_UNSIGNED = -301
SQLTYPECODE_DECIMAL_LARGE = -302
SQLTYPECODE_DECIMAL_LARGE_UNSIGNED = -303
# INTEGER / INT * /
SQLTYPECODE_INTEGER = 4
SQLTYPECODE_INTEGER_UNSIGNED = -401
SQLTYPECODE_LARGEINT = -402
SQLTYPECODE_LARGEINT_UNSIGNED = -405
# SMALLINT
SQLTYPECODE_SMALLINT = 5
SQLTYPECODE_SMALLINT_UNSIGNED = -502
SQLTYPECODE_BPINT_UNSIGNED = -503
# TINYINT */
SQLTYPECODE_TINYINT = -403
SQLTYPECODE_TINYINT_UNSIGNED = -404
# DOUBLE depending on precision
SQLTYPECODE_FLOAT = 6
SQLTYPECODE_REAL = 7
SQLTYPECODE_DOUBLE = 8
# DATE,TIME,TIMESTAMP */
SQLTYPECODE_DATETIME = 9
# TIMESTAMP */
SQLTYPECODE_INTERVAL = 10
# no ANSI value 11 */
# VARCHAR/CHARACTER VARYING */
SQLTYPECODE_VARCHAR = 12
# SQL/MP stype VARCHAR with length prefix:
SQLTYPECODE_VARCHAR_WITH_LENGTH = -601
SQLTYPECODE_BLOB = -602
SQLTYPECODE_CLOB = -603
# LONG VARCHAR/ODBC CHARACTER VARYING */
SQLTYPECODE_VARCHAR_LONG = -1 # ## NEGATIVE??? */
# no ANSI value 13 */
# BIT */
SQLTYPECODE_BIT = 14 # not supported */
# BIT VARYING */
SQLTYPECODE_BITVAR = 15 # not supported */
# NCHAR -- CHAR(n) CHARACTER SET s -- where s uses two bytes per char */
SQLTYPECODE_CHAR_DBLBYTE = 16
# NCHAR VARYING -- VARCHAR(n) CHARACTER SET s -- s uses 2 bytes per char */
SQLTYPECODE_VARCHAR_DBLBYTE = 17
# BOOLEAN TYPE */
SQLTYPECODE_BOOLEAN = -701
# Date/Time/TimeStamp related constants */
SQLDTCODE_DATE = 1
SQLDTCODE_TIME = 2
SQLDTCODE_TIMESTAMP = 3
SQLDTCODE_MPDATETIME = 4
#
# TerminateReply
#
odbc_SQLSvc_TerminateDialogue_ParamError_exn_ = 1
odbc_SQLSvc_TerminateDialogue_InvalidConnection_exn_ = 2
odbc_SQLSvc_TerminateDialogue_SQLError_exn_ = 3
#
# SetConnectionOption
#
odbc_SQLSvc_SetConnectionOption_ParamError_exn_ = 1
odbc_SQLSvc_SetConnectionOption_InvalidConnection_exn_ = 2
odbc_SQLSvc_SetConnectionOption_SQLError_exn_ = 3
odbc_SQLSvc_SetConnectionOption_SQLInvalidHandle_exn_ = 4
#
# EndTransactionReply
#
odbc_SQLSvc_EndTransaction_ParamError_exn_ = 1
odbc_SQLSvc_EndTransaction_InvalidConnection_exn_ = 2
odbc_SQLSvc_EndTransaction_SQLError_exn_ = 3
odbc_SQLSvc_EndTransaction_SQLInvalidHandle_exn_ = 4
odbc_SQLSvc_EndTransaction_TransactionError_exn_ = 5
|
rule map_bwa_index:
input:
RAW + "genome.fa"
output:
expand(
RAW + "genome.fa.{extension}",
extension="amb ann bwt pac sa".split(" ")
)
threads:
1
log:
MAP + "bwa_index.log"
benchmark:
MAP + "bwa_index.time"
conda:
"map.yml"
shell:
"bwa index "
"{input} "
"2> {log}"
rule map_bwa_sample:
input:
genome = RAW + "genome.fa",
sample = lambda wildcards: config["samples"][wildcards.sample],
ref_files = expand(
RAW + "genome.fa.{extension}",
extension="amb ann bwt pac sa".split(" ")
)
output:
temp(MAP + "{sample}.unsorted.bam")
params:
rg="@RG\tID:{sample}\tSM:{sample}"
log:
MAP + "bwa_{sample}.log"
benchmark:
MAP + "bwa_{sample}.time"
threads:
MAX_THREADS
conda:
"map.yml"
shell:
"(bwa mem "
"-R '{params.rg}' "
"-t {threads} "
"{input.genome} "
"{input.sample} "
"| samtools view "
"-Sb "
"- "
"> {output}) "
"2> {log}"
rule map_sort_sample:
input:
MAP + "{sample}.unsorted.bam"
output:
protected("results/map/{sample}.sorted.bam")
log:
MAP + "sort_{sample}.log"
benchmark:
MAP + "sort_{sample}.time"
conda:
"map.yml"
shell:
"samtools sort "
"-T $(mktemp --dry-run) "
"-O bam {input} "
"> {output} "
"2> {log}"
rule map_index_sample:
input:
MAP + "{sample}.sorted.bam"
output:
protected(MAP + "{sample}.sorted.bam.bai")
log:
MAP + "index_{sample}.log"
benchmark:
MAP + "index_{sample}.time"
conda:
"map.yml"
shell:
"samtools index {input} > {log} 2>&1"
rule map:
input:
expand(
MAP + "{sample}.sorted.bam.bai",
sample=config["samples"]
)
|
#!/usr/bin/python
## @file MBGeneratedDefines.py
## @date Jul 30 2012
## @copyright
## 2012 Brandon LeBlanc <demosdemon@gmail.com>
## This program is made avaliable under the terms of the MIT License.
##
## @brief Generated MBGeneratedDefines at build time.
import sys
import os
import os.path
import subprocess
import time
import json
def git_sha():
return subprocess.check_output(['git','rev-parse','--verify','master','--short']).strip()
def def_header():
maxlen = max([len(key) for key in os.environ])
DEFINE = u'#define %%-%ds @%%s' % maxlen
env = [DEFINE % (key.replace('/','_'), json.dumps(val)) for (key, val) in sorted(os.environ.iteritems()) if key != '_']
HEADER = u"""//
// @file MBGeneratedDefines.h
// @author Joachim LeBlanc
// @date %(MB_BUILDTIME_STRING)s
// @copyright
// 2012 Joachim LeBlanc <demosdemon@gmail.com>
// This program is made available under the terms of the MIT License.
//
// DO NOT EDIT THIS FILE
// THIS FILE IS AUTOMATICALLY GENERATED BY XCODE DURING BUILD
// ANY CHANGES TO THIS FILE WILL BE ERASED AT NEXT BUILD
""" % os.environ
return HEADER + '\n'.join(env)
def setup_environment():
os.environ['GIT_WORK_TREE'] = os.getenv('PROJECT_DIR', '.')
os.environ['GIT_DIR'] = os.getenv('GIT_WORK_TREE') + '/.git'
os.environ['MB_VERSION'] = git_sha()
os.environ['MB_BUILDTIME_STRING'] = time.ctime()
os.environ['MB_BUILDTIME'] = str(int(time.time()))
def main(prog, args):
setup_environment()
header_file = os.getenv('PROJECT_DIR', '.') + '/libmusicbrainz-objc/MBGeneratedDefines.h'
with open(header_file, 'w') as fp:
fp.write(def_header())
return 0
if __name__ == '__main__':
sys.exit(main(
os.path.abspath(sys.argv[0]),
sys.argv[1:])
) |
# формирует url-адреса для api
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
from django.conf.urls import url
from .views import *
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
urlpatterns = [
url(r'^set$', add_cash),
url(r'^withdraw$', withdraw_cash),
url(r'^status$', banc_status)
] |
import json
import glob
import os
import yaml
from collections import defaultdict
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
import re
import requests
import textwrap
from datetime import datetime
from random import randint
from invoker import ReplyObject, Command
class Tournament:
@staticmethod
def toId(thing): return re.sub(r'[^a-zA-Z4e310-9,]', '', thing).lower()
@staticmethod
def buildRankingsTable(data, metagame, people=10):
top10 = sorted(data.items(), key = lambda x: (x[1]['won'], x[1]['won'] / x[1]['entered']), reverse = True)[:people]
withWins = sum(person[1]['won'] > 0 for person in top10)
htmlString = '<h1 style="font-size:1em;">{}</h1>'.format(metagame)
htmlString += '<div style="height: {height}px; overflow-y: auto;">'.format(height=min(34 + 17 * withWins, 205))
htmlString += '<table style="border-collapse: collapse; margin: 0; border: 1px solid black;">'
htmlString += '<tr><th style="border: 1px solid black;">Rank</th>'
htmlString += '<th style="border: 1px solid black;">Name</th>'
htmlString += '<th style="border: 1px solid black;">Tours</th>'
htmlString += '<th style="border: 1px solid black;">Games Won</th>'
htmlString += '<th style="border: 1px solid black;">Game Wins / Tour</th>'
htmlString += '<th style="border: 1px solid black;">Tours Won</th>'
htmlString += '<th style="border: 1px solid black;">Tour Win%</th></tr>'
rank = 1
for person in top10:
wins = person[1]['won']
if wins < 1: continue
entered = person[1]['entered']
try:
gamewins = person[1]['gamewins']
except KeyError:
gamewins = 'N/A'
htmlString += '<tr style="{style} text-align: center;">'.format(style = 'background-color: #333333; color: #AAAAAA;' if rank % 2 == 0 else 'background-color: #AAAAAA; color: #333333;')
htmlString += '<td>{rank}</td>'.format(rank=rank)
htmlString += '<td>{player}</td>'.format(player=person[0]) if not person[0] == 'bb8nu' else '<td style="color: #CD853F">BB-8-NU</td>'
htmlString += '<td>{played}</td>'.format(played=entered)
htmlString += '<td>{gameswon}</td>'.format(gameswon=gamewins)
try:
htmlString += '<td>{percent:.1f}</td>'.format(percent=gamewins / entered)
except TypeError:
htmlString += '<td>N/A</td>'
htmlString += '<td>{won}</td>'.format(won=wins)
htmlString += '<td>{percent:.1f}</td></tr>'.format(percent=(wins / entered) * 100)
rank += 1
htmlString += '</table></div>'
return htmlString
@staticmethod
def getTournamentData(room, formatName, official=False):
if official:
filePath = 'plugins/stats/{}/{}/official-rankings.yaml'.format(room, formatName)
else:
filePath = 'plugins/stats/{}/{}/tournament-rankings.yaml'.format(room, formatName)
with open(filePath, 'r+') as yf:
formatData = yaml.load(yf, Loader=Loader)
return formatData
@staticmethod
def alertTournamentString(room):
tourFormat = room.tour.format
gen = tourFormat[:4]
tier = tourFormat[4:]
gen = '[{}{} {}] '.format(gen[0].upper(), gen[1:3], gen[3])
tier = tier.title()
if len(tier) == 2: # OU, UU, RU, NU, PU, ZU...
tier = tier.upper()
tier = gen + tier
html = '<a href="/{room}" class="ilink" target="_blank" rel="noopener">'.format(room=room.title)
html += '<strong>{tour}</strong> tournament created in <strong>{room}</strong>.'.format(
tour=tier, room=room.formatedName)
html += '</a>'
return html
def __init__(self, ws, room, tourFormat, battleHandler, official=False):
self.ws = ws
self.room = room
self.format = tourFormat
self.title = self.format
self.official = official
self.players = []
self.gameWinners = defaultdict(int)
self.winner = None
self.runnerUp = None
self.finals = None
self.hasStarted = False
self.startTime = None
self.loggedParticipation = False
self.bh = battleHandler
def send(self, room, message):
print('{room}|{msg}'.format(room = room, msg = message))
self.ws.send('{room}|{msg}'.format(room = room, msg = message))
def sendTourCmd(self, cmd):
self.send(self.room.title, '/tour {}'.format(cmd))
def join(self, room):
self.send('', '/join {}'.format(room))
def joinTour(self):
self.sendTourCmd('join')
def leaveTour(self):
self.sendTourCmd('leave')
def sendChallenge(self, opponent):
self.sendTourCmd('challenge {opp}'.format(opp = opponent))
def acceptChallenge(self):
self.sendTourCmd('acceptchallenge')
def pickTeam(self):
team = self.bh.getRandomTeam(self.format)
if team:
self.ws.send('|/utm {}'.format(team))
def onUpdate(self, msg):
if 'updateEnd' in msg : return
elif 'join' in msg:
self.players.append(Tournament.toId(msg[1]))
elif 'leave' in msg:
self.players.remove(Tournament.toId(msg[1]))
elif 'start' in msg:
self.startTime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.logParticipation()
elif 'update' in msg:
info = json.loads(msg[1])
if 'format' in info:
self.title = info['format']
if 'challenges' in info and info['challenges']:
self.pickTeam()
self.sendChallenge(info['challenges'][0])
elif 'challenged' in info and info['challenged']:
self.pickTeam()
self.acceptChallenge()
elif 'isStarted' in info:
self.hasStarted = info['isStarted']
try:
if info['bracketData']['rootNode']['state'] == 'inprogress':
self.finals = info['bracketData']['rootNode']['room']
self.join(self.finals)
if not self.room.silent:
self.send(self.room.title, '/wall Come watch the finals: <<{}>>'.format(self.finals))
except (KeyError, TypeError):
pass # Expected to happen a lot
elif 'battleend' in msg:
winner, runnerUp = msg[1:3]
if msg[3] != 'win':
winner, runnerUp = runnerUp, winner
# Count everyone's individual wins
self.gameWinners[winner] += 1
if self.finals:
self.runnerUp = runnerUp
self.send(self.finals, '/savereplay')
self.finals = 'https://replay.pokemonshowdown.com/{}'.format(self.finals[7:]) # len('battle-') == 7
def _logParticipationInner(self, fileDir, fileName):
os.makedirs(fileDir, exist_ok=True)
filePath = '{path}/{file}'.format(path=fileDir, file=fileName)
with open(filePath, 'a+') as yf:
yf.seek(0, 0)
data = yaml.load(yf, Loader=Loader)
if not data: data = {}
for player in self.players:
player = Tournament.toId(player)
if player not in data:
data[player] = {'entered': 1, 'gamewins': 0, 'won': 0}
else:
data[player]['entered'] = data[player]['entered'] + 1
with open(filePath, 'w') as yf:
yaml.dump(data, yf, default_flow_style=False, explicit_start=True)
self.loggedParticipation = True
def logParticipation(self):
if not self.room.logToFile:
return
# All tours
filePath = 'plugins/stats/{room}/{format}'.format(room=self.room.title, format=self.format)
self._logParticipationInner(filePath, 'tournament-rankings.yaml')
# Official tours
if self.official:
self._logParticipationInner(filePath, 'official-rankings.yaml')
def _logWinsInner(self, winner, fileDir, fileName):
if not self.loggedParticipation: return # This may happen if the bot joins midway through a tournament
os.makedirs(fileDir, exist_ok=True)
filePath = '{path}/{file}'.format(path=fileDir, file=fileName)
with open(filePath, 'a+') as yf:
yf.seek(0, 0)
data = yaml.load(yf, Loader = Loader)
# Tournament winner
for user in winner:
data[Tournament.toId(user)]['won'] += 1
# Game winners
for user in self.gameWinners:
userData = data[Tournament.toId(user)]
if 'gamewins' not in userData:
userData['gamewins'] = 0
userData['gamewins'] += self.gameWinners[user]
with open(filePath, 'w') as yf:
yaml.dump(data, yf, default_flow_style = False, explicit_start = True)
def logWins(self, winner):
if not self.room.logToFile:
return
# All tours
filePath = 'plugins/stats/{room}/{format}'.format(room=self.room.title, format=self.format)
self._logWinsInner(winner, filePath, 'tournament-rankings.yaml')
# Official tours
if self.official:
self._logWinsInner(winner, filePath, 'official-rankings.yaml')
def tourHandler(robot, room, *params):
if 'create' in params[0]:
room.createTour(robot.ws, params[1], robot.bh)
if room.loading: return
# Tour was created, join it if in supported formats
if room.joinTours and room.tour.format in robot.bh.supportedFormats:
room.tour.joinTour()
# Check if any other room we are in are watching for tournaments in the format
for alertRoom in robot.rooms.values():
if room.title == alertRoom.title: continue # No self-reports
if room.tour.format in alertRoom.formatWatchlist:
html = Tournament.alertTournamentString(room)
if robot.canHtml(alertRoom):
robot.say(alertRoom, '/addhtmlbox {}'.format(html))
elif 'end' == params[0]:
if not room.loading and room.tour:
winners, tier = room.getTourWinner(params[1])
room.tour.winner = ', '.join(winners)
if robot.name in winners:
message = 'I won the {form} tournament :o'.format(form = tier)
if len(winners) > 1:
winners.remove(robot.name)
message += '\nCongratulations to {others} for also winning :)'.format(others = ', '.join(winners))
robot.say(room, message, False)
else:
robot.say(room, 'Congratulations to {name} for winning :)'.format(name = ', '.join(winners)), False)
# This is a bit slow for large datasets, consider refactoring
room.tour.logWins(winners)
html = room.endTour()
# HTML existing means we had an official tour
if html:
robot.say(room, '/addhtmlbox {}'.format(html))
elif 'forceend' in params[0]:
room.endTour()
else:
# This is for general tournament updates
if not room.tour or room.loading: return
room.tour.onUpdate(params)
def rawmessage(robot, room, *message):
if not room.tour: return
if not room.tour.official: return
message = '|'.join(message)
if 'Removed bans' in message or 'Added bans' in message:
room.tour.official = False
def queryresponse(robot, room, query, *data):
data = '|'.join(data)
if query == 'savereplay':
roomName = 'battle-{room}'.format(room = json.loads(data)['id'])
robot.leaveRoom(roomName)
def oldgentour(bot, cmd, msg, user, room):
reply = ReplyObject('', True, True)
if not room.tour: return reply.response('No tour is currently active, so this command is disabled.')
if not room.tour.format.startswith('gen'): return reply.response("The current tour isn't a previous generation, so this command is disabled.")
pastGens = {'gen1': 'RBY', 'gen2':'GSC', 'gen3':'RSE', 'gen4':'DPP'}
warning = ''
if room.tour.format[0:4] in pastGens: warning = "/wall Please note that bringing Pokemon that aren't **{gen} NU** will disqualify you\n".format(gen = pastGens[room.tour.format[0:4]])
return reply.response(warning + "/wall Sample teams here: http://www.smogon.com/forums/threads/3562659/")
def tourhistory(bot, cmd, msg, user, room):
reply = ReplyObject('', True)
history = ''
if msg:
room = bot.getRoom(msg)
for tour in room.pastTours:
history += """
Name: {name}
Winner: {winner}
Runner-Up: {runnerup}
# of Participants: {players}
Time: {starttime}
Finals: {replay}\n""".format(
name = tour.title,
winner = tour.winner,
runnerup = tour.runnerUp,
players = len(tour.players),
starttime = tour.startTime,
replay = tour.finals)
r = requests.post('https://pastebin.com/api/api_post.php',
data = {
'api_dev_key': bot.apikeys['pastebin'],
'api_option':'paste',
'api_paste_code': textwrap.dedent(history),
'api_paste_private': 0,
'api_paste_expire_date':'N'})
if 'Bad API request' in r.text:
return reply.response('Something went wrong ({error})'.format(error = r.text))
return reply.response(r.text)
def getranking(bot, cmd, msg, user, room):
reply = ReplyObject('', True, True)
if not user.hasRank('%') and not room.isPM: return reply.response('Listing the rankings require Room Driver (%) or higher.')
# format is room (optional), format, user (if ever, also optional)
parts = list(map(Tournament.toId, msg.split(',')))
roomTitle = ''
if os.path.exists('plugins/stats/{room}'.format(room=parts[0])):
roomTitle = parts.pop(0)
elif os.path.exists('plugins/stats/{room}'.format(room=room.title)):
roomTitle = room.title
else:
return reply.response('The room {} has no data about rankings'.format(msg.split(',')[0]))
officialTour = cmd == 'officialleaderboard'
if not parts and not officialTour:
return reply.response('No format given')
try:
formatName = parts.pop(0)
except IndexError:
# Official tours has no format
formatName = ''
if os.path.exists('plugins/stats/{room}/{format}'.format(room=roomTitle, format=formatName)):
formatData = Tournament.getTournamentData(roomTitle, formatName, officialTour)
else:
return reply.response('The room has no data about the format {}'.format(formatName))
try:
userData = formatData[parts[0]]
try:
gamewins = userData['gamewins']
except KeyError:
gamewins = 'N/A'
return reply.response('{user} has played {games}, won {ind} games, and {wins} tours ({winrate:.1f}% tour win rate)'.format(user = parts[0], games = userData['entered'], ind = gamewins, wins = userData['won'], winrate = (userData['won'] / userData['entered']) * 100))
except IndexError:
rankingsTable = Tournament.buildRankingsTable(formatData, formatName)
if bot.canHtml(room):
return reply.response('/addhtmlbox {}'.format(rankingsTable))
else:
return reply.response('Cannot show full rankings in this room')
except KeyError:
return reply.response('{user} has no data for {tier} in {room}'.format(user = parts[0], tier = format, room = roomTitle))
def excludetour(bot, cmd, msg, user, room):
reply = ReplyObject('', True, True)
if not user.hasRank('%'): return reply.response('Permission denied. Room Driver (%) or higher')
if not room.tour: return reply.response('No tournament found')
room.tour.official = False
def resetofficials(bot, cmd, msg, user, room):
reply = ReplyObject('', True, True)
if not user.hasRank('@'): return reply.response('Permission denied. Room Mod (@) or higher')
fileList = glob.glob('plugins/stats/{room}/*/official-rankings.yaml'.format(room=room.title))
if not fileList:
return reply.response('No official rankings to clear')
try:
for f in fileList:
os.remove(f)
return reply.response('Official rankings reset')
except FileNotFoundError:
return reply.response('Error while clearing official data')
# Exports
handlers = {
'tournament': tourHandler,
'queryresponse': queryresponse,
'raw': rawmessage,
}
commands = [
Command(['oldgentour'], oldgentour),
Command(['tourhistory'], tourhistory),
Command(['leaderboard', 'officialleaderboard'], getranking),
Command(['excludetour'], excludetour),
Command(['resetofficials'], resetofficials)
] |
import pytest
from hw import ObjectDict
from hw.object_dict import NoneType
@pytest.mark.parametrize(
"typename, value",
[("int", 42), ("float", 3.14159), ("string", "I'm a string"), ("NoneType", None)],
)
def test_passthrough(typename, value):
assert ObjectDict(value) is value
def test_empty_list():
value = ObjectDict([])
assert type(value) is list
assert value == []
def test_lists():
list_1 = [1, 2, 3]
od = ObjectDict(list_1)
assert od == list_1
for i1, i2 in zip(list_1, od):
assert i1 is i2
def test_list_iteration():
od = ObjectDict({"key1": [{"key": "value0"}, {"key": "value1"}, {"key": "value2"}]})
assert (
[o.key for o in od.key1]
== [od.key1[i].key for i in range(3)]
== [f"value{i}" for i in range(3)]
)
def test_empty_object_dict():
od_1 = ObjectDict({})
assert type(od_1) is ObjectDict
assert len(od_1) == 0
def test_object_dict():
od_1 = ObjectDict({"top": "level"})
assert type(od_1) is ObjectDict
assert od_1.top == "level"
od_1.new_string = "3456"
assert set(od_1.keys()) == {"top", "new_string"}
od_1.new_list = [1, 2, {"key": "value"}]
assert type(od_1.new_list[2]) == ObjectDict
assert od_1.new_list[2].key is od_1.new_list[2]["key"]
assert od_1.new_list[2].key == "value"
assert set(od_1.keys()) == {"top", "new_string", "new_list"}
def test_recursive_object_dict():
od_1 = ObjectDict({"very": {"much": "smaller"}})
assert od_1.very["much"] == "smaller"
assert type(od_1.very) is ObjectDict, f"Wrong type: {type(od_1.very)}"
def test_absent_key_raises_correctly():
od_1 = ObjectDict({"very": {"much": "smaller"}})
with pytest.raises(AttributeError):
_ = od_1.no_such_key
with pytest.raises(KeyError):
_ = od_1["no_such_key"]
def test_dir_method():
od_1 = ObjectDict(dict(a=1, b=2, c=3, d=4))
assert set(dir(od_1)) == set("abcd")
def test_no_args():
assert dict(ObjectDict()) == {}
def test_invalid_value():
with pytest.raises(ValueError):
ObjectDict(object())
if __name__ == "__main__":
pytest.main()
|
class DiceRollResult:
"""
Class to keep track of the result of the roll of the dice.
"""
def __init__(self):
self.__dice_result = 0
# It is used to keep track of how many times an equal number has been rolled
self.__double_value_counter = 0
@property
def dice_result(self):
return self.__dice_result
@property
def double_value_counter(self):
return self.__double_value_counter
def __increase_double_value_counter(self):
self.__double_value_counter += 1
def update_dice_result(self, first_result: int, second_result: int):
"""
Update the value of the dice result, and if the two numbers are equal it increases the double value counter.
"""
self.__dice_result = first_result + second_result
if first_result == second_result:
self.__increase_double_value_counter()
def reset(self):
self.__dice_result = 0
self.__double_value_counter = 0
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unique element dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import unique as experimental_unique
from tensorflow.python.util import deprecation
@deprecation.deprecated(None, "Use `tf.data.experimental.unique()`.")
def unique():
"""Creates a `Dataset` from another `Dataset`, discarding duplicates.
Use this transformation to produce a dataset that contains one instance of
each unique element in the input. For example:
```python
dataset = tf.data.Dataset.from_tensor_slices([1, 37, 2, 37, 2, 1])
# Using `unique()` will drop the duplicate elements.
dataset = dataset.apply(tf.data.experimental.unique()) # ==> { 1, 37, 2 }
```
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return experimental_unique.unique()
|
# -*- coding: utf-8 -*-
"""
greenbyteapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
from greenbyteapi.api_helper import APIHelper
import greenbyteapi.models.data_signal
import greenbyteapi.models.status_item
class StatusItem(object):
"""Implementation of the 'StatusItem' model.
A status that may contain statuses of the same type as sub-statuses. Note
that for sub-statuses the fields `categoryIec`, `categoryContract`, and
`subStatus` will always be null.
Attributes:
turbine_status_id (int): The id of a turbine status.
device_id (int): The id of the device affected by the status.
timestamp_start (datetime): The timestamp when the status began. The
timestamp is in the time zone configured in the Greenbyte Platform
without UTC offset.
timestamp_end (datetime): The timestamp when the status ended. The
timestamp is in the time zone configured in the Greenbyte Platform
without UTC offset.
has_timestamp_end (bool): Indicates whether the status has a
duration.
category (StatusCategoryEnum): The category a status belongs to.
code (float): The status code.
message (string): A description of the status code.
comment (string): A user comment associated with the status.
lost_production_signal (DataSignal): A data signal.
lost_production (float): The lost production in kWh associated with
the status. This field will be null if the caller is not
authorized for the system-configured lost production signal. The
configured lost production signal is available via the
`/configuration.json` endpoint (`DataSignalConfiguration`
schema).
category_iec (string): The status category as defined by the IEC.
category_contract (object): The status category as defined the
availability contract assigned to the site.
sub_status (list of StatusItem): Statuses of the same type that have
been grouped under this status.
acknowledged (bool): Indicates whether the status has been
acknowledged.
"""
# Create a mapping from Model property names to API property names
_names = {
"turbine_status_id":'turbineStatusId',
"device_id":'deviceId',
"timestamp_start":'timestampStart',
"timestamp_end":'timestampEnd',
"has_timestamp_end":'hasTimestampEnd',
"category":'category',
"code":'code',
"message":'message',
"comment":'comment',
"lost_production_signal":'lostProductionSignal',
"lost_production":'lostProduction',
"category_iec":'categoryIec',
"category_contract":'categoryContract',
"sub_status":'subStatus',
"acknowledged":'acknowledged'
}
def __init__(self,
turbine_status_id=None,
device_id=None,
timestamp_start=None,
timestamp_end=None,
has_timestamp_end=None,
category=None,
code=None,
message=None,
comment=None,
lost_production_signal=None,
lost_production=None,
category_iec=None,
category_contract=None,
sub_status=None,
acknowledged=None):
"""Constructor for the StatusItem class"""
# Initialize members of the class
self.turbine_status_id = turbine_status_id
self.device_id = device_id
self.timestamp_start = APIHelper.RFC3339DateTime(timestamp_start) if timestamp_start else None
self.timestamp_end = APIHelper.RFC3339DateTime(timestamp_end) if timestamp_end else None
self.has_timestamp_end = has_timestamp_end
self.category = category
self.code = code
self.message = message
self.comment = comment
self.lost_production_signal = lost_production_signal
self.lost_production = lost_production
self.category_iec = category_iec
self.category_contract = category_contract
self.sub_status = sub_status
self.acknowledged = acknowledged
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
turbine_status_id = dictionary.get('turbineStatusId')
device_id = dictionary.get('deviceId')
timestamp_start = APIHelper.RFC3339DateTime.from_value(dictionary.get("timestampStart")).datetime if dictionary.get("timestampStart") else None
timestamp_end = APIHelper.RFC3339DateTime.from_value(dictionary.get("timestampEnd")).datetime if dictionary.get("timestampEnd") else None
has_timestamp_end = dictionary.get('hasTimestampEnd')
category = dictionary.get('category')
code = dictionary.get('code')
message = dictionary.get('message')
comment = dictionary.get('comment')
lost_production_signal = greenbyteapi.models.data_signal.DataSignal.from_dictionary(dictionary.get('lostProductionSignal')) if dictionary.get('lostProductionSignal') else None
lost_production = dictionary.get('lostProduction')
category_iec = dictionary.get('categoryIec')
category_contract = dictionary.get('categoryContract')
sub_status = None
if dictionary.get('subStatus') != None:
sub_status = list()
for structure in dictionary.get('subStatus'):
sub_status.append(greenbyteapi.models.status_item.StatusItem.from_dictionary(structure))
acknowledged = dictionary.get('acknowledged')
# Return an object of this model
return cls(turbine_status_id,
device_id,
timestamp_start,
timestamp_end,
has_timestamp_end,
category,
code,
message,
comment,
lost_production_signal,
lost_production,
category_iec,
category_contract,
sub_status,
acknowledged)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-03 11:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calculator', '0002_auto_20161121_1804'),
]
operations = [
migrations.RemoveField(
model_name='scenario',
name='force_third',
),
]
|
def bingo(ticket, win):
|
from setuptools import setup, find_packages
description = """
See `github repo <https://github.com/pior/appsecrets>`_ for information.
"""
VERSION = '0.7.0' # maintained by release tool
setup(
name='appsecrets',
version=VERSION,
description='Manage your application secrets (with Google Cloud KMS)',
long_description=description,
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
],
keywords='secrets kms crypto',
author="Pior Bastida",
author_email="pior@pbastida.net",
url="https://github.com/pior/appsecrets",
license="MIT",
packages=find_packages(),
package_data={'appsecrets': ['py.typed']},
include_package_data=True,
zip_safe=False,
install_requires=[
'google-api-python-client ~= 1.7.0',
],
entry_points={
'console_scripts': ['appsecrets = appsecrets.cli:main'],
},
)
|
import time, sys
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
START_URL = 'https://life.bsc.es/pid/pydockweb/default/index'
EMAIL = 'ser499webscraper@gmail.com'
if len(sys.argv) != 3:
print('Usage: -a receptor -a ligand')
sys.exit(1)
receptor = sys.argv[1]
receptor_chain = ''
if ':' in receptor:
receptor_chain = receptor.split(':')[1].upper()
receptor = receptor.split(':')[0]
ligand = sys.argv[2]
ligand_chain = ''
if ':' in ligand:
ligand_chain = ligand.split(':')[1].upper()
ligand = ligand.split(':')[0]
options = Options()
options.add_argument("--headless")
browser = webdriver.Firefox(firefox_options=options)
browser.implicitly_wait(60)
# load job submit page
browser.get(START_URL)
# enter project name
project_name = browser.find_element_by_name('project_name')
project_name.send_keys('{}_{}'.format(sys.argv[1], sys.argv[2]))
# enter email
email = browser.find_element_by_name('email')
email.send_keys(EMAIL)
# select option to enter PDB codes
operation_list = browser.find_elements_by_name('operation')
for radio_but in operation_list:
if 'pdb_code' == radio_but.get_attribute('value'):
radio_but.click()
# enter receptor pdb
rec_tb = browser.find_element_by_name('receptor_pdb_code')
rec_tb.send_keys(receptor)
# enter ligand pdb
lig_tb = browser.find_element_by_name('ligand_pdb_code')
lig_tb.send_keys(ligand)
# select agreement statement
browser.find_element_by_name('agreement').click()
# submit form
browser.find_element_by_name('submit').click()
# select receptor chains
receptor_chains_list = browser.find_elements_by_name('chains_receptor')
if receptor_chain == '':
# select all of the chains
for chain in receptor_chains_list:
chain.click()
else:
# select only chains specified in chain list
for chain in receptor_chains_list:
if chain.get_attribute('value') in receptor_chain:
chain.click()
# select ligand chains
ligand_chains_list = browser.find_elements_by_name('chains_ligand')
if ligand_chain == '':
# select all of the chains
for chain in ligand_chains_list:
chain.click()
else:
# select only chains specified in chain list
for chain in ligand_chains_list:
if chain.get_attribute('value') in ligand_chain:
chain.click()
# continue to next page
browser.find_element_by_name('submit').click()
# skip restraints and continue to submission page
btns = browser.find_elements_by_class_name('btn')
for btn in btns:
if btn.get_attribute('value') == 'Next Step':
btn.click()
# submit job
btns = browser.find_elements_by_class_name('btn')
for btn in btns:
if btn.get_attribute('value') == 'Submit job':
btn.click()
print('job submitted')
browser.close() |
import ctypes
import osgDB, osgViewer, osg, osgGA, osgAnimation
floatKeys = osgAnimation.FloatKeyframeContainer()
key0 = osgAnimation.FloatKeyframe(0.0, 1.0)
floatKeys.push_back(key0)
vec3Keys = osgAnimation.Vec3KeyframeContainer()
key0 = osgAnimation.Vec3Keyframe(0.0, osg.Vec3(1,2,3))
vec3Keys.push_back(key0)
vec4Keys = osgAnimation.Vec4KeyframeContainer()
key0 = osgAnimation.Vec4Keyframe(0.0, osg.Vec4(1,2,3,4))
vec4Keys.push_back(key0)
|
import pathlib
import subprocess
from conductor.utils.git import Git
def test_detect_no_git(tmp_path: pathlib.Path):
g = Git(tmp_path)
assert not g.is_used()
def test_detect_empty_repo(tmp_path: pathlib.Path):
setup_git(tmp_path, initialize=False)
g = Git(tmp_path)
assert g.is_used()
def test_detect_with_commits(tmp_path: pathlib.Path):
setup_git(tmp_path, initialize=True)
g = Git(tmp_path)
assert g.is_used()
def test_get_current_commit(tmp_path: pathlib.Path):
g = Git(tmp_path)
# No repository
assert g.current_commit() is None
setup_git(tmp_path, initialize=False)
# No commits
assert g.current_commit() is None
# Create a commit with an empty file
open(tmp_path / "test.txt", "w", encoding="UTF-8").close()
results = subprocess.run(["git", "add", "test.txt"], cwd=tmp_path, check=False)
assert results.returncode == 0
results = subprocess.run(
["git", "commit", "-m", "Test commit."], cwd=tmp_path, check=False
)
assert results.returncode == 0
# Fetch the commit's hash
get_hash_results = subprocess.run(
["git", "rev-parse", "HEAD"],
cwd=tmp_path,
capture_output=True,
text=True,
check=False,
)
assert get_hash_results.returncode == 0
expected_hash = get_hash_results.stdout.strip()
# We should retrieve the same hash
current_commit = g.current_commit()
assert current_commit is not None
assert current_commit.hash == expected_hash
assert not current_commit.has_changes
# Making changes to the file should result in `has_changes` being set to
# True.
with open(tmp_path / "test.txt", "w", encoding="UTF-8") as f:
f.write("Hello world!")
current_commit = g.current_commit()
assert current_commit is not None
assert current_commit.hash == expected_hash
assert current_commit.has_changes
def test_is_ancestor(tmp_path: pathlib.Path):
setup_git(tmp_path, initialize=True)
g = Git(tmp_path)
first_commit = g.current_commit()
assert first_commit is not None
# A commit is an ancestor of itself.
assert g.is_ancestor(
commit_hash=first_commit.hash, candidate_ancestor_hash=first_commit.hash
)
# Create a new commit, which should be a descendant of `first_commit`.
new_commit_hash = create_commit(tmp_path, "Branch1")
assert g.is_ancestor(
commit_hash=new_commit_hash, candidate_ancestor_hash=first_commit.hash
)
# The new commit is not an ancestor of `first_commit`
assert not g.is_ancestor(
commit_hash=first_commit.hash, candidate_ancestor_hash=new_commit_hash
)
# Create a sibling commit and check that they are not ancestors of
# eachother.
results = subprocess.run(
["git", "checkout", first_commit.hash], cwd=tmp_path, check=False
)
assert results.returncode == 0
sibling_commit = create_commit(tmp_path, "Branch2")
assert not g.is_ancestor(
commit_hash=sibling_commit, candidate_ancestor_hash=new_commit_hash
)
assert not g.is_ancestor(
commit_hash=new_commit_hash, candidate_ancestor_hash=sibling_commit
)
def test_distance(tmp_path: pathlib.Path):
setup_git(tmp_path, initialize=False)
commit1 = create_commit(tmp_path, "C1")
commit2 = create_commit(tmp_path, "C2")
commit3 = create_commit(tmp_path, "C3")
g = Git(tmp_path)
assert g.get_distance(commit1, commit1) == 0
assert g.get_distance(commit2, commit1) == 1
assert g.get_distance(commit3, commit1) == 2
assert g.get_distance(commit3, commit2) == 1
assert g.is_ancestor(commit3, commit1)
assert g.is_ancestor(commit2, commit1)
assert g.is_ancestor(commit3, commit2)
# Git environment setup helpers
def setup_git(repository_root: pathlib.Path, initialize: bool):
results = subprocess.run(["git", "init"], cwd=repository_root, check=False)
assert results.returncode == 0
if initialize:
results = subprocess.run(
["git", "commit", "--allow-empty", "-m", "Initial commit"],
cwd=repository_root,
check=False,
)
assert results.returncode == 0
def create_commit(repository_root: pathlib.Path, message: str) -> str:
commit_results = subprocess.run(
["git", "commit", "--allow-empty", "-m", message],
cwd=repository_root,
check=False,
)
assert commit_results.returncode == 0
# Fetch the commit's hash
get_hash_results = subprocess.run(
["git", "rev-parse", "HEAD"],
cwd=repository_root,
capture_output=True,
text=True,
check=False,
)
assert get_hash_results.returncode == 0
return get_hash_results.stdout.strip()
|
### This program uses the tkinter library to draw egypt ###
from tkinter import *
# Creates a new window without a canvas
main = Tk()
# Creates a canvas to draw on
canvas = Canvas(main, bg="darkred", height=500, width=2000)
# Creates a sun rising from the west.
canvas.create_oval(10,10,150,150, fill ="darkorange")
# Creates a pyramid
canvas.create_polygon(0,500,250,250,500,500, fill="#fa7")
# Creates the beam of light reflected by the sun via the pyramid
canvas.create_polygon(1300,500,250,250,1200,500, fill="#FFF")
# Updates data on screen & ensures everything fits in properly
canvas.pack()
|
import unittest
from gphotospy.album import Album
class TestAlbum(unittest.TestCase):
def test_create_album(self):
pass
|
# Created by Xingyu Lin, 2019-09-18
import argparse
import torch
import numpy as np
from rlkit.torch.pytorch_util import set_gpu_mode
import rlkit.torch.pytorch_util as ptu
import copy
import cv2
def batch_chw_to_hwc(images):
rets = []
for i in range(len(images)):
rets.append(copy.copy(np.transpose(images[i], [2, 1, 0])[::-1, :, ::-1]))
return np.array(rets)
def visualize_vae(args):
data = torch.load(args.file)
vae = data['vae']
env = data['exploration/env']
set_gpu_mode(True)
obs = env.reset()
curr_img = obs['image_achieved_goal']
goal_img = obs['image_desired_goal']
imgs = batch_chw_to_hwc([curr_img.reshape(3, 48, 48),
goal_img.reshape(3, 48, 48)])
save_img = np.hstack(imgs) * 256
cv2.imwrite('./latent_space/original.png', save_img)
latent_distribution_params = vae.encode(ptu.from_numpy(np.array([curr_img, goal_img]).reshape(2, -1)))
latent_mean, logvar = ptu.get_numpy(latent_distribution_params[0]), \
ptu.get_numpy(latent_distribution_params[1])
curr_latent = latent_mean[0, :]
goal_latent = latent_mean[1, :]
# reconstr_imgs = ptu.get_numpy(vae.decode(ptu.from_numpy(latent_mean)))
# reconstr_curr_img = reconstr_imgs[0, :].reshape([3, 48, 48])
# reconstr_goal_img = reconstr_imgs[1, :].reshape([3, 48, 48])
alphas = np.linspace(0, 1, 30)
interpolate_latents = []
for alpha in alphas:
latent = (1 - alpha) * curr_latent + alpha * goal_latent
interpolate_latents.append(copy.copy(latent))
# print('debug:', vae.decode(ptu.from_numpy(np.array(interpolate_latents))))
reconstr_imgs = ptu.get_numpy(vae.decode(ptu.from_numpy(np.array(interpolate_latents)))[0])
reconstr_imgs = batch_chw_to_hwc(reconstr_imgs.reshape([-1, 3, 48, 48]))
save_img = np.hstack(reconstr_imgs) * 256
cv2.imwrite('./latent_space/latent_sapce.png', save_img)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str,
help='path to the snapshot file')
args = parser.parse_args()
visualize_vae(args)
|
from typing import List
def digits(x: int) -> List[int]:
return [int(d) for d in str(x)]
assert digits(103) == [1, 0, 3]
assert digits(5) == [5]
def new_recipes(recipe1: int, recipe2: int) -> List[int]:
total = recipe1 + recipe2
return digits(total)
assert new_recipes(3, 7) == [1, 0]
def scoreboard(num_steps: int, recipe1: int = 3, recipe2: int = 7):
scores = [recipe1, recipe2]
elf1 = 0
elf2 = 1
while len(scores) < num_steps:
scores.extend(new_recipes(scores[elf1], scores[elf2]))
elf1 = (elf1 + scores[elf1] + 1) % len(scores)
elf2 = (elf2 + scores[elf2] + 1) % len(scores)
return scores
assert scoreboard(20) == [3, 7, 1, 0, 1, 0, 1, 2, 4, 5, 1, 5, 8, 9, 1, 6, 7, 7, 9, 2]
def ten_after(n: int):
scores = scoreboard(n + 10)
return ''.join(str(x) for x in scores[n:n+10])
assert ten_after(9) == '5158916779'
assert ten_after(18) == '9251071085'
#print(ten_after(147061))
def recipes_to_the_left(n: int, recipe1: int = 3, recipe2: int = 7) -> int:
n_digits = digits(n)
num_digits = len(n_digits)
scores = [recipe1, recipe2]
elf1 = 0
elf2 = 1
while True:
for new_recipe in new_recipes(scores[elf1], scores[elf2]):
scores.append(new_recipe)
if scores[-num_digits:] == n_digits:
return len(scores) - num_digits
elf1 = (elf1 + scores[elf1] + 1) % len(scores)
elf2 = (elf2 + scores[elf2] + 1) % len(scores)
assert recipes_to_the_left(92510) == 18
assert recipes_to_the_left(59414) == 2018
print(recipes_to_the_left(147061))
|
from fabric.api import task, runs_once
from fabric.api import run, sudo, hide, settings, abort, execute, puts, env
from fabric import colors
from time import sleep
import json
import re
def _strip_bson(raw_output):
stripped = re.sub(r'(ISODate|ObjectId|NumberLong)\((.*?)\)', r'\2', raw_output)
return re.sub(r'Timestamp\((.*?)\)', r'"\1"', stripped)
def _run_mongo_command(command):
response = run("mongo --quiet --eval 'printjson(%s)'" % command)
try:
return json.loads(_strip_bson(response))
except ValueError:
print response
def _i_am_primary(primary=None):
return _run_mongo_command("rs.isMaster()")["ismaster"]
def _wait_for_ok():
while True:
if _cluster_is_ok():
return
sleep(5)
print("Waiting for cluster to be okay")
def _cluster_is_ok():
member_statuses = _run_mongo_command("rs.status()")["members"]
health_ok = all(s['health'] == 1 for s in member_statuses)
state_ok = all(s['stateStr'] in ['PRIMARY', 'SECONDARY']
for s in member_statuses)
one_primary = len([s for s in member_statuses
if s['stateStr'] == 'PRIMARY']) == 1
return health_ok and state_ok and one_primary
@task
def force_resync():
"""Force a mongo secondary to resync by removing all its data."""
if len(env.hosts) > 1:
abort("This task should only be run on one host at a time")
if _i_am_primary():
abort(colors.red("Refusing to force resync on primary", bold=True))
execute("puppet.disable", "Forcing mongodb resync")
execute("app.stop", "mongodb")
# wait for mongod process to stop
while run("ps -C mongod", quiet=True).return_code == 0:
puts("Waiting for mongod to stop")
sleep(1)
sudo("rm -rf /var/lib/mongodb/*")
execute("app.start", "mongodb")
execute("puppet.enable")
@task
def find_primary():
"""Find which mongo node is the master"""
with hide("everything"):
if _i_am_primary():
print(colors.blue("%s is primary" % env["host_string"], bold=True))
@task
@runs_once
def status():
"""Check the status of the mongo cluster"""
with hide("everything"):
if _cluster_is_ok():
print(colors.blue("Cluster is OK", bold=True))
return
print(colors.blue("db.printReplicationInfo()", bold=True))
print(_run_mongo_command("db.printReplicationInfo()"))
print(colors.blue("db.printSlaveReplicationInfo()", bold=True))
print(_run_mongo_command("db.printSlaveReplicationInfo()"))
print(colors.blue("rs.status()", bold=True))
print(json.dumps(_run_mongo_command("rs.status()"), indent=4))
@task
def step_down_primary(seconds='100'):
"""Step down as primary for a given number of seconds (default: 100)"""
# Mongo returns an exit code of 252 when the primary steps down, as well
# as disconnecting the current console session. We need to mark that as
# okay so that run() won't error.
with hide('output'), settings(ok_ret_codes=[0, 252]):
if _i_am_primary():
_run_mongo_command("rs.stepDown(%s)" % seconds)
if _i_am_primary():
print("I am still the primary")
else:
print("I am no longer the primary")
else:
print("I am not the primary")
@task
def safe_reboot():
"""Reboot a mongo machine, stepping down if it is the primary"""
import vm
if not vm.reboot_required():
print("No reboot required")
return
with hide("everything"):
_wait_for_ok()
if _i_am_primary():
execute(step_down_primary)
with hide("everything"):
_wait_for_ok()
execute(vm.reboot, hosts=[env['host_string']])
|
from typing import *
from mongoengine import StringField
from .commented_odm import CommentedODM
class Account(CommentedODM):
username = StringField(required=True)
hashdata = StringField(required=True)
Account.setup_odm()
|
#!/usr/bin/env /usr/local/bin/python3
#tool thrown together to decode obfuscate strings in sample with sha256 586409e98ade6e754cfba0bd0aec2e8690a909c41cebac4085cad6f79d9676b4
import re
def decode_HojmKcMqFYaYKqxLdDAfSKDHMYMhyihZnH_numArray1():
string_to_decode ='''numArray1[18] = 48;
numArray1[18] = 100;
numArray1[18] = 99;
numArray1[18] = 57;
numArray1[18] = 120;
numArray1[17] = 51;
numArray1[17] = 51;
numArray1[17] = 50;
numArray1[17] = 50;
numArray1[17] = 101;
numArray1[16] = 56;
numArray1[16] = 57;
numArray1[16] = 55;
numArray1[16] = 52;
numArray1[16] = 120;
numArray1[15] = 98;
numArray1[15] = 54;
numArray1[15] = 98;
numArray1[15] = 50;
numArray1[15] = 112;
numArray1[14] = 102;
numArray1[14] = 50;
numArray1[14] = 57;
numArray1[14] = 99;
numArray1[14] = 120;
numArray1[13] = 57;
numArray1[13] = 97;
numArray1[13] = 99;
numArray1[13] = 100;
numArray1[13] = 121;
numArray1[12] = 53;
numArray1[12] = 56;
numArray1[12] = 48;
numArray1[12] = 97;
numArray1[12] = 120;
numArray1[11] = 54;
numArray1[11] = 51;
numArray1[11] = 101;
numArray1[11] = 55;
numArray1[11] = 84;
numArray1[10] = 53;
numArray1[10] = 99;
numArray1[10] = 98;
numArray1[10] = 97;
numArray1[10] = 120;
numArray1[9] = 51;
numArray1[9] = 54;
numArray1[9] = 99;
numArray1[9] = 100;
numArray1[9] = 120;
numArray1[8] = 56;
numArray1[8] = 100;
numArray1[8] = 57;
numArray1[8] = 53;
numArray1[8] = 116;
numArray1[7] = 49;
numArray1[7] = 55;
numArray1[7] = 54;
numArray1[7] = 100;
numArray1[7] = 120;
numArray1[6] = 56;
numArray1[6] = 56;
numArray1[6] = 55;
numArray1[6] = 48;
numArray1[6] = 120;
numArray1[5] = 48;
numArray1[5] = 99;
numArray1[5] = 98;
numArray1[5] = 53;
numArray1[5] = 101;
numArray1[4] = 102;
numArray1[4] = 101;
numArray1[4] = 49;
numArray1[4] = 50;
numArray1[4] = 120;
numArray1[3] = 98;
numArray1[3] = 102;
numArray1[3] = 99;
numArray1[3] = 98;
numArray1[3] = 120;
numArray1[2] = 101;
numArray1[2] = 55;
numArray1[2] = 55;
numArray1[2] = 54;
numArray1[2] = 120;
numArray1[1] = 48;
numArray1[1] = 48;
numArray1[1] = 52;
numArray1[1] = 99;
numArray1[1] = 71;
numArray1[0] = 51;
numArray1[0] = 51;
numArray1[0] = 51;
numArray1[0] = 53;
numArray1[0] = 120;'''
results = decode_array(string_to_decode, 5).replace("x","") [::-1]
return(results)
def decode_HojmKcMqFYaYKqxLdDAfSKDHMYMhyihZnH_numArray2():
string_to_decode = '''numArray2[24] = 99;
numArray2[24] = 50;
numArray2[24] = 56;
numArray2[24] = 50;
numArray2[24] = 120;
numArray2[23] = 100;
numArray2[23] = 57;
numArray2[23] = 50;
numArray2[23] = 101;
numArray2[23] = 121;
numArray2[22] = 55;
numArray2[22] = 48;
numArray2[22] = 52;
numArray2[22] = 53;
numArray2[22] = 120;
numArray2[21] = 101;
numArray2[21] = 55;
numArray2[21] = 101;
numArray2[21] = 97;
numArray2[21] = 120;
numArray2[20] = 101;
numArray2[20] = 55;
numArray2[20] = 57;
numArray2[20] = 98;
numArray2[20] = 108;
numArray2[19] = 99;
numArray2[19] = 99;
numArray2[19] = 49;
numArray2[19] = 100;
numArray2[19] = 120;
numArray2[18] = 54;
numArray2[18] = 57;
numArray2[18] = 49;
numArray2[18] = 55;
numArray2[18] = 120;
numArray2[17] = 55;
numArray2[17] = 97;
numArray2[17] = 99;
numArray2[17] = 52;
numArray2[17] = 98;
numArray2[16] = 53;
numArray2[16] = 102;
numArray2[16] = 56;
numArray2[16] = 50;
numArray2[16] = 120;
numArray2[15] = 52;
numArray2[15] = 97;
numArray2[15] = 48;
numArray2[15] = 48;
numArray2[15] = 120;
numArray2[14] = 97;
numArray2[14] = 101;
numArray2[14] = 99;
numArray2[14] = 51;
numArray2[14] = 109;
numArray2[13] = 50;
numArray2[13] = 98;
numArray2[13] = 54;
numArray2[13] = 49;
numArray2[13] = 120;
numArray2[12] = 101;
numArray2[12] = 51;
numArray2[12] = 57;
numArray2[12] = 55;
numArray2[12] = 101;
numArray2[11] = 97;
numArray2[11] = 49;
numArray2[11] = 56;
numArray2[11] = 51;
numArray2[11] = 120;
numArray2[10] = 52;
numArray2[10] = 101;
numArray2[10] = 54;
numArray2[10] = 51;
numArray2[10] = 120;
numArray2[9] = 97;
numArray2[9] = 50;
numArray2[9] = 102;
numArray2[9] = 49;
numArray2[9] = 115;
numArray2[8] = 53;
numArray2[8] = 50;
numArray2[8] = 48;
numArray2[8] = 55;
numArray2[8] = 120;
numArray2[7] = 49;
numArray2[7] = 101;
numArray2[7] = 49;
numArray2[7] = 101;
numArray2[7] = 120;
numArray2[6] = 54;
numArray2[6] = 97;
numArray2[6] = 49;
numArray2[6] = 100;
numArray2[6] = 115;
numArray2[5] = 52;
numArray2[5] = 97;
numArray2[5] = 57;
numArray2[5] = 52;
numArray2[5] = 120;
numArray2[4] = 102;
numArray2[4] = 97;
numArray2[4] = 99;
numArray2[4] = 48;
numArray2[4] = 120;
numArray2[3] = 101;
numArray2[3] = 51;
numArray2[3] = 53;
numArray2[3] = 50;
numArray2[3] = 65;
numArray2[2] = 102;
numArray2[2] = 53;
numArray2[2] = 98;
numArray2[2] = 55;
numArray2[2] = 120;
numArray2[1] = 51;
numArray2[1] = 102;
numArray2[1] = 48;
numArray2[1] = 57;
numArray2[1] = 120;
numArray2[0] = 55;
numArray2[0] = 53;
numArray2[0] = 53;
numArray2[0] = 52;
numArray2[0] = 120;'''
results = decode_array(string_to_decode, 5).replace("x", "")[::-1]
return(results)
def decode_HojmKcMqFYaYKqxLdDAfSKDHMYMhyihZnH_numArray3():
string_to_decode = '''numArray3[18] = 51;
numArray3[18] = 51;
numArray3[18] = 49;
numArray3[18] = 100;
numArray3[18] = 120;
numArray3[17] = 50;
numArray3[17] = 54;
numArray3[17] = 51;
numArray3[17] = 52;
numArray3[17] = 120;
numArray3[16] = 97;
numArray3[16] = 57;
numArray3[16] = 102;
numArray3[16] = 100;
numArray3[16] = 120;
numArray3[15] = 55;
numArray3[15] = 98;
numArray3[15] = 51;
numArray3[15] = 98;
numArray3[15] = 100;
numArray3[14] = 56;
numArray3[14] = 55;
numArray3[14] = 99;
numArray3[14] = 52;
numArray3[14] = 120;
numArray3[13] = 52;
numArray3[13] = 102;
numArray3[13] = 98;
numArray3[13] = 99;
numArray3[13] = 120;
numArray3[12] = 56;
numArray3[12] = 48;
numArray3[12] = 100;
numArray3[12] = 56;
numArray3[12] = 97;
numArray3[11] = 102;
numArray3[11] = 53;
numArray3[11] = 57;
numArray3[11] = 102;
numArray3[11] = 120;
numArray3[10] = 55;
numArray3[10] = 54;
numArray3[10] = 48;
numArray3[10] = 100;
numArray3[10] = 120;
numArray3[9] = 98;
numArray3[9] = 100;
numArray3[9] = 49;
numArray3[9] = 99;
numArray3[9] = 120;
numArray3[8] = 102;
numArray3[8] = 52;
numArray3[8] = 57;
numArray3[8] = 100;
numArray3[8] = 120;
numArray3[7] = 99;
numArray3[7] = 102;
numArray3[7] = 52;
numArray3[7] = 52;
numArray3[7] = 120;
numArray3[6] = 100;
numArray3[6] = 101;
numArray3[6] = 97;
numArray3[6] = 57;
numArray3[6] = 111;
numArray3[5] = 56;
numArray3[5] = 48;
numArray3[5] = 49;
numArray3[5] = 48;
numArray3[5] = 120;
numArray3[4] = 54;
numArray3[4] = 51;
numArray3[4] = 55;
numArray3[4] = 57;
numArray3[4] = 120;
numArray3[3] = 54;
numArray3[3] = 101;
numArray3[3] = 50;
numArray3[3] = 57;
numArray3[3] = 76;
numArray3[2] = 55;
numArray3[2] = 48;
numArray3[2] = 51;
numArray3[2] = 102;
numArray3[2] = 120;
numArray3[1] = 49;
numArray3[1] = 101;
numArray3[1] = 55;
numArray3[1] = 100;
numArray3[1] = 120;
numArray3[0] = 50;
numArray3[0] = 101;
numArray3[0] = 101;
numArray3[0] = 99;
numArray3[0] = 120;'''
results = decode_array(string_to_decode, 5).replace("x", "")[::-1]
return(results)
def decode_HojmKcMqFYaYKqxLdDAfSKDHMYMhyihZnH_numArray4():
string_to_decode = '''numArray4[36] = 57;
numArray4[36] = 50;
numArray4[36] = 120;
numArray4[35] = 52;
numArray4[35] = 54;
numArray4[35] = 120;
numArray4[34] = 99;
numArray4[34] = 53;
numArray4[34] = 120;
numArray4[33] = 53;
numArray4[33] = 99;
numArray4[33] = 120;
numArray4[32] = 56;
numArray4[32] = 101;
numArray4[32] = 116;
numArray4[31] = 56;
numArray4[31] = 51;
numArray4[31] = 120;
numArray4[30] = 55;
numArray4[30] = 57;
numArray4[30] = 120;
numArray4[29] = 98;
numArray4[29] = 53;
numArray4[29] = 110;
numArray4[28] = 52;
numArray4[28] = 98;
numArray4[28] = 120;
numArray4[27] = 52;
numArray4[27] = 98;
numArray4[27] = 120;
numArray4[26] = 102;
numArray4[26] = 48;
numArray4[26] = 120;
numArray4[25] = 99;
numArray4[25] = 57;
numArray4[25] = 105;
numArray4[24] = 97;
numArray4[24] = 98;
numArray4[24] = 120;
numArray4[23] = 55;
numArray4[23] = 50;
numArray4[23] = 120;
numArray4[22] = 101;
numArray4[22] = 101;
numArray4[22] = 111;
numArray4[21] = 101;
numArray4[21] = 49;
numArray4[21] = 120;
numArray4[20] = 55;
numArray4[20] = 56;
numArray4[20] = 120;
numArray4[19] = 54;
numArray4[19] = 101;
numArray4[19] = 120;
numArray4[18] = 50;
numArray4[18] = 99;
numArray4[18] = 80;
numArray4[17] = 97;
numArray4[17] = 49;
numArray4[17] = 120;
numArray4[16] = 98;
numArray4[16] = 101;
numArray4[16] = 120;
numArray4[15] = 52;
numArray4[15] = 55;
numArray4[15] = 120;
numArray4[14] = 57;
numArray4[14] = 101;
numArray4[14] = 121;
numArray4[13] = 100;
numArray4[13] = 101;
numArray4[13] = 120;
numArray4[12] = 101;
numArray4[12] = 102;
numArray4[12] = 120;
numArray4[11] = 100;
numArray4[11] = 99;
numArray4[11] = 120;
numArray4[10] = 48;
numArray4[10] = 99;
numArray4[10] = 114;
numArray4[9] = 53;
numArray4[9] = 49;
numArray4[9] = 116;
numArray4[8] = 54;
numArray4[8] = 49;
numArray4[8] = 120;
numArray4[7] = 102;
numArray4[7] = 51;
numArray4[7] = 120;
numArray4[6] = 97;
numArray4[6] = 55;
numArray4[6] = 110;
numArray4[5] = 53;
numArray4[5] = 51;
numArray4[5] = 120;
numArray4[4] = 100;
numArray4[4] = 100;
numArray4[4] = 120;
numArray4[3] = 52;
numArray4[3] = 57;
numArray4[3] = 69;
numArray4[2] = 99;
numArray4[2] = 49;
numArray4[2] = 120;
numArray4[1] = 49;
numArray4[1] = 53;
numArray4[1] = 120;
numArray4[0] = 100;
numArray4[0] = 97;
numArray4[0] = 120;'''
results = decode_array(string_to_decode, 3).replace("x", "")[::-1]
return(results)
def decode_HojmKcMqFYaYKqxLdDAfSKDHMYMhyihZnH_numArray5():
string_to_decode = ''' numArray5[17] = 101;
numArray5[17] = 51;
numArray5[17] = 120;
numArray5[16] = 53;
numArray5[16] = 56;
numArray5[16] = 120;
numArray5[15] = 50;
numArray5[15] = 52;
numArray5[15] = 101;
numArray5[14] = 52;
numArray5[14] = 53;
numArray5[14] = 120;
numArray5[13] = 98;
numArray5[13] = 56;
numArray5[13] = 107;
numArray5[12] = 54;
numArray5[12] = 100;
numArray5[12] = 120;
numArray5[11] = 53;
numArray5[11] = 102;
numArray5[11] = 111;
numArray5[10] = 102;
numArray5[10] = 51;
numArray5[10] = 120;
numArray5[9] = 102;
numArray5[9] = 54;
numArray5[9] = 118;
numArray5[8] = 54;
numArray5[8] = 102;
numArray5[8] = 120;
numArray5[7] = 98;
numArray5[7] = 50;
numArray5[7] = 120;
numArray5[6] = 99;
numArray5[6] = 102;
numArray5[6] = 110;
numArray5[5] = 100;
numArray5[5] = 53;
numArray5[5] = 120;
numArray5[4] = 99;
numArray5[4] = 53;
numArray5[4] = 120;
numArray5[3] = 54;
numArray5[3] = 102;
numArray5[3] = 73;
numArray5[2] = 54;
numArray5[2] = 48;
numArray5[2] = 120;
numArray5[1] = 102;
numArray5[1] = 57;
numArray5[1] = 120;
numArray5[0] = 101;
numArray5[0] = 48;
numArray5[0] = 120;'''
results = decode_array(string_to_decode, 3).replace("x", "")[::-1]
return(results)
def decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray1():
string_to_decode = '''numArray1[9] = 53;
numArray1[9] = 97;
numArray1[9] = 50;
numArray1[9] = 121;
numArray1[8] = 100;
numArray1[8] = 101;
numArray1[8] = 57;
numArray1[8] = 110;
numArray1[7] = 98;
numArray1[7] = 101;
numArray1[7] = 102;
numArray1[7] = 104;
numArray1[6] = 55;
numArray1[6] = 50;
numArray1[6] = 101;
numArray1[6] = 71;
numArray1[5] = 97;
numArray1[5] = 97;
numArray1[5] = 57;
numArray1[5] = 69;
numArray1[4] = 102;
numArray1[4] = 99;
numArray1[4] = 50;
numArray1[4] = 99;
numArray1[3] = 51;
numArray1[3] = 50;
numArray1[3] = 101;
numArray1[3] = 105;
numArray1[2] = 50;
numArray1[2] = 97;
numArray1[2] = 48;
numArray1[2] = 112;
numArray1[1] = 52;
numArray1[1] = 102;
numArray1[1] = 100;
numArray1[1] = 67;
numArray1[0] = 51;
numArray1[0] = 57;
numArray1[0] = 100;
numArray1[0] = 114;'''
results = decode_array(string_to_decode, 3)
return(results)
def decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray2():
string_to_decode = '''numArray2[68] = 55;
numArray2[68] = 99;
numArray2[68] = 51;
numArray2[68] = 97;
numArray2[68] = 108;
numArray2[67] = 56;
numArray2[67] = 99;
numArray2[67] = 57;
numArray2[67] = 99;
numArray2[67] = 109;
numArray2[66] = 52;
numArray2[66] = 56;
numArray2[66] = 48;
numArray2[66] = 48;
numArray2[66] = 116;
numArray2[65] = 50;
numArray2[65] = 52;
numArray2[65] = 51;
numArray2[65] = 102;
numArray2[65] = 104;
numArray2[64] = 48;
numArray2[64] = 48;
numArray2[64] = 97;
numArray2[64] = 100;
numArray2[64] = 46;
numArray2[63] = 102;
numArray2[63] = 48;
numArray2[63] = 53;
numArray2[63] = 53;
numArray2[63] = 54;
numArray2[62] = 98;
numArray2[62] = 55;
numArray2[62] = 48;
numArray2[62] = 99;
numArray2[62] = 57;
numArray2[61] = 52;
numArray2[61] = 52;
numArray2[61] = 49;
numArray2[61] = 50;
numArray2[61] = 65;
numArray2[60] = 97;
numArray2[60] = 49;
numArray2[60] = 102;
numArray2[60] = 97;
numArray2[60] = 57;
numArray2[59] = 52;
numArray2[59] = 98;
numArray2[59] = 97;
numArray2[59] = 50;
numArray2[59] = 70;
numArray2[58] = 102;
numArray2[58] = 50;
numArray2[58] = 49;
numArray2[58] = 97;
numArray2[58] = 67;
numArray2[57] = 51;
numArray2[57] = 54;
numArray2[57] = 53;
numArray2[57] = 101;
numArray2[57] = 52;
numArray2[56] = 55;
numArray2[56] = 51;
numArray2[56] = 56;
numArray2[56] = 55;
numArray2[56] = 51;
numArray2[55] = 97;
numArray2[55] = 49;
numArray2[55] = 52;
numArray2[55] = 49;
numArray2[55] = 49;
numArray2[54] = 97;
numArray2[54] = 101;
numArray2[54] = 56;
numArray2[54] = 55;
numArray2[54] = 53;
numArray2[53] = 98;
numArray2[53] = 101;
numArray2[53] = 49;
numArray2[53] = 98;
numArray2[53] = 48;
numArray2[52] = 49;
numArray2[52] = 99;
numArray2[52] = 98;
numArray2[52] = 54;
numArray2[52] = 65;
numArray2[51] = 55;
numArray2[51] = 100;
numArray2[51] = 98;
numArray2[51] = 48;
numArray2[51] = 51;
numArray2[50] = 49;
numArray2[50] = 99;
numArray2[50] = 56;
numArray2[50] = 54;
numArray2[50] = 50;
numArray2[49] = 50;
numArray2[49] = 53;
numArray2[49] = 101;
numArray2[49] = 49;
numArray2[49] = 48;
numArray2[48] = 56;
numArray2[48] = 53;
numArray2[48] = 53;
numArray2[48] = 97;
numArray2[48] = 49;
numArray2[47] = 57;
numArray2[47] = 55;
numArray2[47] = 48;
numArray2[47] = 102;
numArray2[47] = 51;
numArray2[46] = 56;
numArray2[46] = 49;
numArray2[46] = 101;
numArray2[46] = 99;
numArray2[46] = 49;
numArray2[45] = 51;
numArray2[45] = 54;
numArray2[45] = 101;
numArray2[45] = 99;
numArray2[45] = 50;
numArray2[44] = 54;
numArray2[44] = 51;
numArray2[44] = 99;
numArray2[44] = 49;
numArray2[44] = 66;
numArray2[43] = 50;
numArray2[43] = 54;
numArray2[43] = 55;
numArray2[43] = 56;
numArray2[43] = 54;
numArray2[42] = 101;
numArray2[42] = 100;
numArray2[42] = 50;
numArray2[42] = 97;
numArray2[42] = 48;
numArray2[41] = 99;
numArray2[41] = 50;
numArray2[41] = 97;
numArray2[41] = 102;
numArray2[41] = 57;
numArray2[40] = 98;
numArray2[40] = 102;
numArray2[40] = 54;
numArray2[40] = 55;
numArray2[40] = 69;
numArray2[39] = 52;
numArray2[39] = 102;
numArray2[39] = 52;
numArray2[39] = 56;
numArray2[39] = 49;
numArray2[38] = 51;
numArray2[38] = 101;
numArray2[38] = 49;
numArray2[38] = 54;
numArray2[38] = 49;
numArray2[37] = 48;
numArray2[37] = 49;
numArray2[37] = 48;
numArray2[37] = 53;
numArray2[37] = 52;
numArray2[36] = 54;
numArray2[36] = 56;
numArray2[36] = 53;
numArray2[36] = 50;
numArray2[36] = 54;
numArray2[35] = 49;
numArray2[35] = 49;
numArray2[35] = 99;
numArray2[35] = 53;
numArray2[35] = 52;
numArray2[34] = 55;
numArray2[34] = 54;
numArray2[34] = 102;
numArray2[34] = 56;
numArray2[34] = 50;
numArray2[33] = 57;
numArray2[33] = 98;
numArray2[33] = 50;
numArray2[33] = 52;
numArray2[33] = 55;
numArray2[32] = 98;
numArray2[32] = 101;
numArray2[32] = 48;
numArray2[32] = 53;
numArray2[32] = 49;
numArray2[31] = 97;
numArray2[31] = 50;
numArray2[31] = 51;
numArray2[31] = 56;
numArray2[31] = 47;
numArray2[30] = 101;
numArray2[30] = 57;
numArray2[30] = 99;
numArray2[30] = 97;
numArray2[30] = 101;
numArray2[29] = 49;
numArray2[29] = 98;
numArray2[29] = 49;
numArray2[29] = 55;
numArray2[29] = 115;
numArray2[28] = 52;
numArray2[28] = 98;
numArray2[28] = 101;
numArray2[28] = 50;
numArray2[28] = 97;
numArray2[27] = 101;
numArray2[27] = 54;
numArray2[27] = 54;
numArray2[27] = 100;
numArray2[27] = 98;
numArray2[26] = 56;
numArray2[26] = 49;
numArray2[26] = 55;
numArray2[26] = 101;
numArray2[26] = 47;
numArray2[25] = 56;
numArray2[25] = 55;
numArray2[25] = 50;
numArray2[25] = 55;
numArray2[25] = 109;
numArray2[24] = 52;
numArray2[24] = 49;
numArray2[24] = 102;
numArray2[24] = 50;
numArray2[24] = 111;
numArray2[23] = 51;
numArray2[23] = 53;
numArray2[23] = 98;
numArray2[23] = 52;
numArray2[23] = 99;
numArray2[22] = 99;
numArray2[22] = 56;
numArray2[22] = 97;
numArray2[22] = 48;
numArray2[22] = 46;
numArray2[21] = 53;
numArray2[21] = 98;
numArray2[21] = 50;
numArray2[21] = 49;
numArray2[21] = 114;
numArray2[20] = 102;
numArray2[20] = 100;
numArray2[20] = 98;
numArray2[20] = 50;
numArray2[20] = 111;
numArray2[19] = 52;
numArray2[19] = 52;
numArray2[19] = 102;
numArray2[19] = 57;
numArray2[19] = 122;
numArray2[18] = 57;
numArray2[18] = 50;
numArray2[18] = 101;
numArray2[18] = 54;
numArray2[18] = 111;
numArray2[17] = 52;
numArray2[17] = 54;
numArray2[17] = 57;
numArray2[17] = 50;
numArray2[17] = 114;
numArray2[16] = 51;
numArray2[16] = 51;
numArray2[16] = 53;
numArray2[16] = 99;
numArray2[16] = 111;
numArray2[15] = 48;
numArray2[15] = 102;
numArray2[15] = 48;
numArray2[15] = 101;
numArray2[15] = 120;
numArray2[14] = 50;
numArray2[14] = 99;
numArray2[14] = 52;
numArray2[14] = 99;
numArray2[14] = 111;
numArray2[13] = 98;
numArray2[13] = 50;
numArray2[13] = 98;
numArray2[13] = 51;
numArray2[13] = 98;
numArray2[12] = 56;
numArray2[12] = 102;
numArray2[12] = 100;
numArray2[12] = 100;
numArray2[12] = 111;
numArray2[11] = 56;
numArray2[11] = 57;
numArray2[11] = 97;
numArray2[11] = 101;
numArray2[11] = 108;
numArray2[10] = 102;
numArray2[10] = 101;
numArray2[10] = 52;
numArray2[10] = 98;
numArray2[10] = 111;
numArray2[9] = 51;
numArray2[9] = 48;
numArray2[9] = 50;
numArray2[9] = 53;
numArray2[9] = 114;
numArray2[8] = 97;
numArray2[8] = 102;
numArray2[8] = 48;
numArray2[8] = 57;
numArray2[8] = 111;
numArray2[7] = 97;
numArray2[7] = 56;
numArray2[7] = 98;
numArray2[7] = 49;
numArray2[7] = 99;
numArray2[6] = 50;
numArray2[6] = 99;
numArray2[6] = 102;
numArray2[6] = 102;
numArray2[6] = 47;
numArray2[5] = 56;
numArray2[5] = 102;
numArray2[5] = 50;
numArray2[5] = 56;
numArray2[5] = 47;
numArray2[4] = 97;
numArray2[4] = 98;
numArray2[4] = 56;
numArray2[4] = 52;
numArray2[4] = 58;
numArray2[3] = 98;
numArray2[3] = 54;
numArray2[3] = 51;
numArray2[3] = 57;
numArray2[3] = 112;
numArray2[2] = 97;
numArray2[2] = 97;
numArray2[2] = 101;
numArray2[2] = 98;
numArray2[2] = 116;
numArray2[1] = 97;
numArray2[1] = 55;
numArray2[1] = 55;
numArray2[1] = 50;
numArray2[1] = 116;
numArray2[0] = 102;
numArray2[0] = 55;
numArray2[0] = 53;
numArray2[0] = 51;
numArray2[0] = 104;'''
results = decode_array(string_to_decode, 5) [::-1]
return(results)
def decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray3():
string_to_decode = ''' numArray3[68] = 51;
numArray3[68] = 102;
numArray3[68] = 55;
numArray3[68] = 52;
numArray3[68] = 108;
numArray3[67] = 54;
numArray3[67] = 52;
numArray3[67] = 99;
numArray3[67] = 102;
numArray3[67] = 109;
numArray3[66] = 56;
numArray3[66] = 56;
numArray3[66] = 51;
numArray3[66] = 49;
numArray3[66] = 116;
numArray3[65] = 57;
numArray3[65] = 50;
numArray3[65] = 57;
numArray3[65] = 48;
numArray3[65] = 104;
numArray3[64] = 57;
numArray3[64] = 54;
numArray3[64] = 102;
numArray3[64] = 48;
numArray3[64] = 46;
numArray3[63] = 55;
numArray3[63] = 57;
numArray3[63] = 52;
numArray3[63] = 100;
numArray3[63] = 50;
numArray3[62] = 57;
numArray3[62] = 57;
numArray3[62] = 52;
numArray3[62] = 99;
numArray3[62] = 69;
numArray3[61] = 52;
numArray3[61] = 51;
numArray3[61] = 52;
numArray3[61] = 100;
numArray3[61] = 67;
numArray3[60] = 51;
numArray3[60] = 54;
numArray3[60] = 98;
numArray3[60] = 54;
numArray3[60] = 65;
numArray3[59] = 99;
numArray3[59] = 51;
numArray3[59] = 56;
numArray3[59] = 99;
numArray3[59] = 54;
numArray3[58] = 100;
numArray3[58] = 99;
numArray3[58] = 98;
numArray3[58] = 56;
numArray3[58] = 68;
numArray3[57] = 56;
numArray3[57] = 55;
numArray3[57] = 98;
numArray3[57] = 54;
numArray3[57] = 67;
numArray3[56] = 51;
numArray3[56] = 51;
numArray3[56] = 101;
numArray3[56] = 102;
numArray3[56] = 65;
numArray3[55] = 99;
numArray3[55] = 55;
numArray3[55] = 57;
numArray3[55] = 49;
numArray3[55] = 52;
numArray3[54] = 100;
numArray3[54] = 56;
numArray3[54] = 98;
numArray3[54] = 49;
numArray3[54] = 68;
numArray3[53] = 53;
numArray3[53] = 99;
numArray3[53] = 101;
numArray3[53] = 56;
numArray3[53] = 69;
numArray3[52] = 56;
numArray3[52] = 48;
numArray3[52] = 57;
numArray3[52] = 51;
numArray3[52] = 68;
numArray3[51] = 102;
numArray3[51] = 99;
numArray3[51] = 53;
numArray3[51] = 98;
numArray3[51] = 67;
numArray3[50] = 101;
numArray3[50] = 99;
numArray3[50] = 49;
numArray3[50] = 101;
numArray3[50] = 56;
numArray3[49] = 102;
numArray3[49] = 55;
numArray3[49] = 56;
numArray3[49] = 55;
numArray3[49] = 50;
numArray3[48] = 48;
numArray3[48] = 55;
numArray3[48] = 50;
numArray3[48] = 54;
numArray3[48] = 70;
numArray3[47] = 101;
numArray3[47] = 101;
numArray3[47] = 101;
numArray3[47] = 101;
numArray3[47] = 56;
numArray3[46] = 102;
numArray3[46] = 49;
numArray3[46] = 48;
numArray3[46] = 101;
numArray3[46] = 55;
numArray3[45] = 98;
numArray3[45] = 102;
numArray3[45] = 50;
numArray3[45] = 57;
numArray3[45] = 54;
numArray3[44] = 101;
numArray3[44] = 52;
numArray3[44] = 99;
numArray3[44] = 57;
numArray3[44] = 55;
numArray3[43] = 55;
numArray3[43] = 50;
numArray3[43] = 53;
numArray3[43] = 50;
numArray3[43] = 53;
numArray3[42] = 56;
numArray3[42] = 102;
numArray3[42] = 50;
numArray3[42] = 54;
numArray3[42] = 50;
numArray3[41] = 56;
numArray3[41] = 57;
numArray3[41] = 48;
numArray3[41] = 54;
numArray3[41] = 65;
numArray3[40] = 97;
numArray3[40] = 56;
numArray3[40] = 99;
numArray3[40] = 98;
numArray3[40] = 55;
numArray3[39] = 101;
numArray3[39] = 98;
numArray3[39] = 100;
numArray3[39] = 51;
numArray3[39] = 52;
numArray3[38] = 101;
numArray3[38] = 98;
numArray3[38] = 57;
numArray3[38] = 52;
numArray3[38] = 48;
numArray3[37] = 101;
numArray3[37] = 101;
numArray3[37] = 53;
numArray3[37] = 52;
numArray3[37] = 69;
numArray3[36] = 98;
numArray3[36] = 49;
numArray3[36] = 50;
numArray3[36] = 52;
numArray3[36] = 70;
numArray3[35] = 57;
numArray3[35] = 54;
numArray3[35] = 49;
numArray3[35] = 55;
numArray3[35] = 50;
numArray3[34] = 97;
numArray3[34] = 57;
numArray3[34] = 51;
numArray3[34] = 48;
numArray3[34] = 68;
numArray3[33] = 97;
numArray3[33] = 99;
numArray3[33] = 56;
numArray3[33] = 57;
numArray3[33] = 51;
numArray3[32] = 99;
numArray3[32] = 52;
numArray3[32] = 57;
numArray3[32] = 48;
numArray3[32] = 57;
numArray3[31] = 53;
numArray3[31] = 56;
numArray3[31] = 100;
numArray3[31] = 56;
numArray3[31] = 47;
numArray3[30] = 98;
numArray3[30] = 55;
numArray3[30] = 100;
numArray3[30] = 97;
numArray3[30] = 101;
numArray3[29] = 102;
numArray3[29] = 99;
numArray3[29] = 54;
numArray3[29] = 101;
numArray3[29] = 115;
numArray3[28] = 56;
numArray3[28] = 57;
numArray3[28] = 51;
numArray3[28] = 49;
numArray3[28] = 97;
numArray3[27] = 100;
numArray3[27] = 102;
numArray3[27] = 56;
numArray3[27] = 102;
numArray3[27] = 98;
numArray3[26] = 100;
numArray3[26] = 56;
numArray3[26] = 55;
numArray3[26] = 100;
numArray3[26] = 47;
numArray3[25] = 101;
numArray3[25] = 54;
numArray3[25] = 98;
numArray3[25] = 101;
numArray3[25] = 109;
numArray3[24] = 51;
numArray3[24] = 57;
numArray3[24] = 48;
numArray3[24] = 50;
numArray3[24] = 111;
numArray3[23] = 55;
numArray3[23] = 51;
numArray3[23] = 50;
numArray3[23] = 57;
numArray3[23] = 99;
numArray3[22] = 102;
numArray3[22] = 48;
numArray3[22] = 101;
numArray3[22] = 50;
numArray3[22] = 46;
numArray3[21] = 98;
numArray3[21] = 53;
numArray3[21] = 99;
numArray3[21] = 102;
numArray3[21] = 114;
numArray3[20] = 57;
numArray3[20] = 56;
numArray3[20] = 102;
numArray3[20] = 54;
numArray3[20] = 111;
numArray3[19] = 52;
numArray3[19] = 53;
numArray3[19] = 48;
numArray3[19] = 99;
numArray3[19] = 122;
numArray3[18] = 51;
numArray3[18] = 101;
numArray3[18] = 97;
numArray3[18] = 53;
numArray3[18] = 111;
numArray3[17] = 97;
numArray3[17] = 53;
numArray3[17] = 99;
numArray3[17] = 57;
numArray3[17] = 114;
numArray3[16] = 52;
numArray3[16] = 57;
numArray3[16] = 55;
numArray3[16] = 54;
numArray3[16] = 111;
numArray3[15] = 57;
numArray3[15] = 49;
numArray3[15] = 53;
numArray3[15] = 55;
numArray3[15] = 120;
numArray3[14] = 97;
numArray3[14] = 100;
numArray3[14] = 50;
numArray3[14] = 102;
numArray3[14] = 111;
numArray3[13] = 52;
numArray3[13] = 57;
numArray3[13] = 53;
numArray3[13] = 51;
numArray3[13] = 98;
numArray3[12] = 56;
numArray3[12] = 56;
numArray3[12] = 51;
numArray3[12] = 99;
numArray3[12] = 111;
numArray3[11] = 52;
numArray3[11] = 56;
numArray3[11] = 53;
numArray3[11] = 98;
numArray3[11] = 108;
numArray3[10] = 100;
numArray3[10] = 48;
numArray3[10] = 56;
numArray3[10] = 54;
numArray3[10] = 111;
numArray3[9] = 49;
numArray3[9] = 97;
numArray3[9] = 52;
numArray3[9] = 99;
numArray3[9] = 114;
numArray3[8] = 49;
numArray3[8] = 97;
numArray3[8] = 51;
numArray3[8] = 98;
numArray3[8] = 111;
numArray3[7] = 101;
numArray3[7] = 100;
numArray3[7] = 56;
numArray3[7] = 55;
numArray3[7] = 99;
numArray3[6] = 55;
numArray3[6] = 52;
numArray3[6] = 97;
numArray3[6] = 99;
numArray3[6] = 47;
numArray3[5] = 57;
numArray3[5] = 48;
numArray3[5] = 98;
numArray3[5] = 49;
numArray3[5] = 47;
numArray3[4] = 100;
numArray3[4] = 57;
numArray3[4] = 51;
numArray3[4] = 98;
numArray3[4] = 58;
numArray3[3] = 97;
numArray3[3] = 100;
numArray3[3] = 57;
numArray3[3] = 97;
numArray3[3] = 112;
numArray3[2] = 55;
numArray3[2] = 99;
numArray3[2] = 100;
numArray3[2] = 55;
numArray3[2] = 116;
numArray3[1] = 99;
numArray3[1] = 54;
numArray3[1] = 99;
numArray3[1] = 57;
numArray3[1] = 116;
numArray3[0] = 50;
numArray3[0] = 48;
numArray3[0] = 101;
numArray3[0] = 52;
numArray3[0] = 104;'''
results = decode_array(string_to_decode, 5) [::-1]
return(results)
def decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray4():
string_to_decode = '''numArray4[68] = 97;
numArray4[68] = 54;
numArray4[68] = 97;
numArray4[68] = 48;
numArray4[68] = 108;
numArray4[67] = 56;
numArray4[67] = 53;
numArray4[67] = 54;
numArray4[67] = 49;
numArray4[67] = 109;
numArray4[66] = 50;
numArray4[66] = 98;
numArray4[66] = 53;
numArray4[66] = 49;
numArray4[66] = 116;
numArray4[65] = 98;
numArray4[65] = 54;
numArray4[65] = 51;
numArray4[65] = 101;
numArray4[65] = 104;
numArray4[64] = 99;
numArray4[64] = 56;
numArray4[64] = 50;
numArray4[64] = 57;
numArray4[64] = 46;
numArray4[63] = 55;
numArray4[63] = 54;
numArray4[63] = 57;
numArray4[63] = 98;
numArray4[63] = 53;
numArray4[62] = 97;
numArray4[62] = 49;
numArray4[62] = 55;
numArray4[62] = 97;
numArray4[62] = 55;
numArray4[61] = 101;
numArray4[61] = 51;
numArray4[61] = 55;
numArray4[61] = 55;
numArray4[61] = 56;
numArray4[60] = 101;
numArray4[60] = 98;
numArray4[60] = 54;
numArray4[60] = 99;
numArray4[60] = 54;
numArray4[59] = 51;
numArray4[59] = 57;
numArray4[59] = 101;
numArray4[59] = 55;
numArray4[59] = 56;
numArray4[58] = 54;
numArray4[58] = 53;
numArray4[58] = 97;
numArray4[58] = 52;
numArray4[58] = 69;
numArray4[57] = 99;
numArray4[57] = 53;
numArray4[57] = 55;
numArray4[57] = 99;
numArray4[57] = 67;
numArray4[56] = 55;
numArray4[56] = 50;
numArray4[56] = 52;
numArray4[56] = 54;
numArray4[56] = 50;
numArray4[55] = 50;
numArray4[55] = 100;
numArray4[55] = 50;
numArray4[55] = 102;
numArray4[55] = 55;
numArray4[54] = 101;
numArray4[54] = 102;
numArray4[54] = 53;
numArray4[54] = 100;
numArray4[54] = 57;
numArray4[53] = 100;
numArray4[53] = 98;
numArray4[53] = 52;
numArray4[53] = 100;
numArray4[53] = 57;
numArray4[52] = 97;
numArray4[52] = 54;
numArray4[52] = 49;
numArray4[52] = 102;
numArray4[52] = 65;
numArray4[51] = 50;
numArray4[51] = 102;
numArray4[51] = 51;
numArray4[51] = 49;
numArray4[51] = 49;
numArray4[50] = 100;
numArray4[50] = 101;
numArray4[50] = 100;
numArray4[50] = 53;
numArray4[50] = 52;
numArray4[49] = 56;
numArray4[49] = 98;
numArray4[49] = 57;
numArray4[49] = 100;
numArray4[49] = 50;
numArray4[48] = 102;
numArray4[48] = 53;
numArray4[48] = 50;
numArray4[48] = 97;
numArray4[48] = 56;
numArray4[47] = 55;
numArray4[47] = 98;
numArray4[47] = 49;
numArray4[47] = 51;
numArray4[47] = 50;
numArray4[46] = 52;
numArray4[46] = 48;
numArray4[46] = 101;
numArray4[46] = 48;
numArray4[46] = 53;
numArray4[45] = 99;
numArray4[45] = 53;
numArray4[45] = 100;
numArray4[45] = 98;
numArray4[45] = 55;
numArray4[44] = 98;
numArray4[44] = 53;
numArray4[44] = 52;
numArray4[44] = 97;
numArray4[44] = 70;
numArray4[43] = 54;
numArray4[43] = 97;
numArray4[43] = 51;
numArray4[43] = 48;
numArray4[43] = 53;
numArray4[42] = 98;
numArray4[42] = 101;
numArray4[42] = 98;
numArray4[42] = 98;
numArray4[42] = 56;
numArray4[41] = 53;
numArray4[41] = 51;
numArray4[41] = 53;
numArray4[41] = 57;
numArray4[41] = 49;
numArray4[40] = 51;
numArray4[40] = 50;
numArray4[40] = 49;
numArray4[40] = 101;
numArray4[40] = 52;
numArray4[39] = 102;
numArray4[39] = 98;
numArray4[39] = 102;
numArray4[39] = 50;
numArray4[39] = 51;
numArray4[38] = 54;
numArray4[38] = 101;
numArray4[38] = 50;
numArray4[38] = 102;
numArray4[38] = 55;
numArray4[37] = 49;
numArray4[37] = 101;
numArray4[37] = 51;
numArray4[37] = 51;
numArray4[37] = 69;
numArray4[36] = 102;
numArray4[36] = 48;
numArray4[36] = 49;
numArray4[36] = 100;
numArray4[36] = 49;
numArray4[35] = 49;
numArray4[35] = 54;
numArray4[35] = 50;
numArray4[35] = 97;
numArray4[35] = 69;
numArray4[34] = 101;
numArray4[34] = 99;
numArray4[34] = 99;
numArray4[34] = 102;
numArray4[34] = 57;
numArray4[33] = 49;
numArray4[33] = 53;
numArray4[33] = 50;
numArray4[33] = 48;
numArray4[33] = 66;
numArray4[32] = 101;
numArray4[32] = 102;
numArray4[32] = 56;
numArray4[32] = 97;
numArray4[32] = 70;
numArray4[31] = 101;
numArray4[31] = 55;
numArray4[31] = 53;
numArray4[31] = 53;
numArray4[31] = 47;
numArray4[30] = 50;
numArray4[30] = 50;
numArray4[30] = 52;
numArray4[30] = 101;
numArray4[30] = 101;
numArray4[29] = 54;
numArray4[29] = 52;
numArray4[29] = 97;
numArray4[29] = 57;
numArray4[29] = 115;
numArray4[28] = 99;
numArray4[28] = 97;
numArray4[28] = 54;
numArray4[28] = 97;
numArray4[28] = 97;
numArray4[27] = 51;
numArray4[27] = 55;
numArray4[27] = 99;
numArray4[27] = 54;
numArray4[27] = 98;
numArray4[26] = 51;
numArray4[26] = 102;
numArray4[26] = 99;
numArray4[26] = 97;
numArray4[26] = 47;
numArray4[25] = 57;
numArray4[25] = 99;
numArray4[25] = 52;
numArray4[25] = 56;
numArray4[25] = 109;
numArray4[24] = 57;
numArray4[24] = 97;
numArray4[24] = 53;
numArray4[24] = 52;
numArray4[24] = 111;
numArray4[23] = 97;
numArray4[23] = 99;
numArray4[23] = 102;
numArray4[23] = 98;
numArray4[23] = 99;
numArray4[22] = 53;
numArray4[22] = 101;
numArray4[22] = 53;
numArray4[22] = 54;
numArray4[22] = 46;
numArray4[21] = 56;
numArray4[21] = 48;
numArray4[21] = 100;
numArray4[21] = 49;
numArray4[21] = 114;
numArray4[20] = 48;
numArray4[20] = 50;
numArray4[20] = 57;
numArray4[20] = 55;
numArray4[20] = 111;
numArray4[19] = 51;
numArray4[19] = 54;
numArray4[19] = 57;
numArray4[19] = 51;
numArray4[19] = 122;
numArray4[18] = 56;
numArray4[18] = 54;
numArray4[18] = 48;
numArray4[18] = 102;
numArray4[18] = 111;
numArray4[17] = 57;
numArray4[17] = 50;
numArray4[17] = 100;
numArray4[17] = 48;
numArray4[17] = 114;
numArray4[16] = 49;
numArray4[16] = 55;
numArray4[16] = 54;
numArray4[16] = 53;
numArray4[16] = 111;
numArray4[15] = 102;
numArray4[15] = 56;
numArray4[15] = 100;
numArray4[15] = 48;
numArray4[15] = 120;
numArray4[14] = 52;
numArray4[14] = 48;
numArray4[14] = 48;
numArray4[14] = 52;
numArray4[14] = 111;
numArray4[13] = 101;
numArray4[13] = 55;
numArray4[13] = 51;
numArray4[13] = 50;
numArray4[13] = 98;
numArray4[12] = 100;
numArray4[12] = 56;
numArray4[12] = 102;
numArray4[12] = 52;
numArray4[12] = 111;
numArray4[11] = 53;
numArray4[11] = 55;
numArray4[11] = 48;
numArray4[11] = 57;
numArray4[11] = 108;
numArray4[10] = 55;
numArray4[10] = 98;
numArray4[10] = 49;
numArray4[10] = 56;
numArray4[10] = 111;
numArray4[9] = 55;
numArray4[9] = 100;
numArray4[9] = 56;
numArray4[9] = 55;
numArray4[9] = 114;
numArray4[8] = 99;
numArray4[8] = 50;
numArray4[8] = 50;
numArray4[8] = 53;
numArray4[8] = 111;
numArray4[7] = 52;
numArray4[7] = 55;
numArray4[7] = 98;
numArray4[7] = 102;
numArray4[7] = 99;
numArray4[6] = 97;
numArray4[6] = 55;
numArray4[6] = 54;
numArray4[6] = 101;
numArray4[6] = 47;
numArray4[5] = 54;
numArray4[5] = 97;
numArray4[5] = 98;
numArray4[5] = 98;
numArray4[5] = 47;
numArray4[4] = 51;
numArray4[4] = 48;
numArray4[4] = 102;
numArray4[4] = 56;
numArray4[4] = 58;
numArray4[3] = 102;
numArray4[3] = 50;
numArray4[3] = 56;
numArray4[3] = 97;
numArray4[3] = 112;
numArray4[2] = 102;
numArray4[2] = 101;
numArray4[2] = 54;
numArray4[2] = 99;
numArray4[2] = 116;
numArray4[1] = 98;
numArray4[1] = 54;
numArray4[1] = 56;
numArray4[1] = 53;
numArray4[1] = 116;
numArray4[0] = 99;
numArray4[0] = 54;
numArray4[0] = 57;
numArray4[0] = 50;
numArray4[0] = 104;'''
results = decode_array(string_to_decode, 5) [::-1]
return(results)
def decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray5():
string_to_decode = '''numArray5[42] = 98;
numArray5[42] = 49;
numArray5[42] = 55;
numArray5[42] = 56;
numArray5[42] = 120;
numArray5[41] = 53;
numArray5[41] = 48;
numArray5[41] = 57;
numArray5[41] = 54;
numArray5[41] = 103;
numArray5[40] = 100;
numArray5[40] = 56;
numArray5[40] = 98;
numArray5[40] = 51;
numArray5[40] = 120;
numArray5[39] = 101;
numArray5[39] = 50;
numArray5[39] = 100;
numArray5[39] = 55;
numArray5[39] = 120;
numArray5[38] = 48;
numArray5[38] = 50;
numArray5[38] = 100;
numArray5[38] = 100;
numArray5[38] = 110;
numArray5[37] = 97;
numArray5[37] = 52;
numArray5[37] = 98;
numArray5[37] = 56;
numArray5[37] = 120;
numArray5[36] = 56;
numArray5[36] = 100;
numArray5[36] = 97;
numArray5[36] = 52;
numArray5[36] = 120;
numArray5[35] = 49;
numArray5[35] = 49;
numArray5[35] = 102;
numArray5[35] = 100;
numArray5[35] = 105;
numArray5[34] = 97;
numArray5[34] = 53;
numArray5[34] = 54;
numArray5[34] = 52;
numArray5[34] = 120;
numArray5[33] = 57;
numArray5[33] = 55;
numArray5[33] = 55;
numArray5[33] = 97;
numArray5[33] = 114;
numArray5[32] = 51;
numArray5[32] = 50;
numArray5[32] = 97;
numArray5[32] = 51;
numArray5[32] = 120;
numArray5[31] = 100;
numArray5[31] = 55;
numArray5[31] = 50;
numArray5[31] = 49;
numArray5[31] = 116;
numArray5[30] = 97;
numArray5[30] = 55;
numArray5[30] = 102;
numArray5[30] = 98;
numArray5[30] = 120;
numArray5[29] = 101;
numArray5[29] = 99;
numArray5[29] = 55;
numArray5[29] = 55;
numArray5[29] = 83;
numArray5[28] = 97;
numArray5[28] = 97;
numArray5[28] = 53;
numArray5[28] = 49;
numArray5[28] = 120;
numArray5[27] = 52;
numArray5[27] = 56;
numArray5[27] = 100;
numArray5[27] = 100;
numArray5[27] = 100;
numArray5[26] = 52;
numArray5[26] = 52;
numArray5[26] = 51;
numArray5[26] = 97;
numArray5[26] = 120;
numArray5[25] = 55;
numArray5[25] = 54;
numArray5[25] = 100;
numArray5[25] = 102;
numArray5[25] = 97;
numArray5[24] = 49;
numArray5[24] = 99;
numArray5[24] = 56;
numArray5[24] = 99;
numArray5[24] = 120;
numArray5[23] = 53;
numArray5[23] = 49;
numArray5[23] = 52;
numArray5[23] = 54;
numArray5[23] = 120;
numArray5[22] = 48;
numArray5[22] = 50;
numArray5[22] = 101;
numArray5[22] = 52;
numArray5[22] = 111;
numArray5[21] = 102;
numArray5[21] = 99;
numArray5[21] = 102;
numArray5[21] = 53;
numArray5[21] = 120;
numArray5[20] = 48;
numArray5[20] = 101;
numArray5[20] = 57;
numArray5[20] = 57;
numArray5[20] = 120;
numArray5[19] = 97;
numArray5[19] = 99;
numArray5[19] = 98;
numArray5[19] = 48;
numArray5[19] = 108;
numArray5[18] = 50;
numArray5[18] = 55;
numArray5[18] = 50;
numArray5[18] = 52;
numArray5[18] = 120;
numArray5[17] = 54;
numArray5[17] = 51;
numArray5[17] = 97;
numArray5[17] = 48;
numArray5[17] = 120;
numArray5[16] = 57;
numArray5[16] = 55;
numArray5[16] = 99;
numArray5[16] = 49;
numArray5[16] = 120;
numArray5[15] = 55;
numArray5[15] = 51;
numArray5[15] = 100;
numArray5[15] = 102;
numArray5[15] = 110;
numArray5[14] = 51;
numArray5[14] = 100;
numArray5[14] = 101;
numArray5[14] = 52;
numArray5[14] = 120;
numArray5[13] = 50;
numArray5[13] = 101;
numArray5[13] = 101;
numArray5[13] = 97;
numArray5[13] = 120;
numArray5[12] = 97;
numArray5[12] = 98;
numArray5[12] = 57;
numArray5[12] = 97;
numArray5[12] = 120;
numArray5[11] = 100;
numArray5[11] = 49;
numArray5[11] = 102;
numArray5[11] = 52;
numArray5[11] = 119;
numArray5[10] = 97;
numArray5[10] = 101;
numArray5[10] = 98;
numArray5[10] = 53;
numArray5[10] = 120;
numArray5[9] = 56;
numArray5[9] = 54;
numArray5[9] = 102;
numArray5[9] = 97;
numArray5[9] = 120;
numArray5[8] = 50;
numArray5[8] = 51;
numArray5[8] = 99;
numArray5[8] = 52;
numArray5[8] = 120;
numArray5[7] = 55;
numArray5[7] = 48;
numArray5[7] = 102;
numArray5[7] = 53;
numArray5[7] = 111;
numArray5[6] = 48;
numArray5[6] = 97;
numArray5[6] = 101;
numArray5[6] = 98;
numArray5[6] = 120;
numArray5[5] = 101;
numArray5[5] = 102;
numArray5[5] = 97;
numArray5[5] = 99;
numArray5[5] = 120;
numArray5[4] = 50;
numArray5[4] = 50;
numArray5[4] = 54;
numArray5[4] = 54;
numArray5[4] = 120;
numArray5[3] = 49;
numArray5[3] = 54;
numArray5[3] = 51;
numArray5[3] = 54;
numArray5[3] = 68;
numArray5[2] = 101;
numArray5[2] = 53;
numArray5[2] = 51;
numArray5[2] = 52;
numArray5[2] = 120;
numArray5[1] = 53;
numArray5[1] = 98;
numArray5[1] = 102;
numArray5[1] = 101;
numArray5[1] = 120;
numArray5[0] = 52;
numArray5[0] = 56;
numArray5[0] = 50;
numArray5[0] = 54;
numArray5[0] = 120;
'''
results = decode_array(string_to_decode, 5).replace("x", "")[::-1]
return(results)
def decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray6():
string_to_decode = '''numArray6[2] = 99;
numArray6[2] = 54;
inumArray6[2] = 54;
numArray6[2] = 97;
numArray6[2] = 62;
numArray6[1] = 53;
numArray6[1] = 49;
numArray6[1] = 101;
numArray6[1] = 102;
numArray6[1] = 112;
numArray6[0] = 50;
numArray6[0] = 57;
numArray6[0] = 51;
numArray6[0] = 102;
numArray6[0] = 60;
'''
results = decode_array(string_to_decode, 5) [::-1]
return(results)
def decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray7():
string_to_decode = ''' numArray7[2] = 55;
numArray7[2] = 50;
numArray7[2] = 50;
numArray7[2] = 102;
numArray7[2] = 62;
numArray7[1] = 54;
numArray7[1] = 48;
numArray7[1] = 54;
numArray7[1] = 98;
numArray7[1] = 112;
numArray7[0] = 97;
numArray7[0] = 53;
numArray7[0] = 49;
numArray7[0] = 57;
numArray7[0] = 60;
'''
results = decode_array(string_to_decode, 5) [::-1]
return(results)
def decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray8():
string_to_decode = ''' numArray8[3] = 99;
numArray8[3] = 56;
numArray8[3] = 56;
numArray8[3] = 97;
numArray8[3] = 62;
numArray8[2] = 100;
numArray8[2] = 50;
numArray8[2] = 97;
numArray8[2] = 98;
numArray8[2] = 112;
numArray8[1] = 50;
numArray8[1] = 99;
numArray8[1] = 55;
numArray8[1] = 98;
numArray8[1] = 47;
numArray8[0] = 98;
numArray8[0] = 97;
numArray8[0] = 48;
numArray8[0] = 49;
numArray8[0] = 60;
'''
results = decode_array(string_to_decode, 5) [::-1]
return(results)
def decode_FnuTeIWrgiBQUDSJWU_numArray2():
string_to_decode = '''((short[])(object)array3)[35] = 51;
((short[])(object)array3)[35] = 48;
((short[])(object)array3)[35] = 53;
((short[])(object)array3)[34] = 54;
((short[])(object)array3)[34] = 49;
((short[])(object)array3)[34] = 56;
((short[])(object)array3)[33] = 52;
((short[])(object)array3)[33] = 101;
((short[])(object)array3)[33] = 101;
((short[])(object)array3)[32] = 48;
((short[])(object)array3)[32] = 53;
((short[])(object)array3)[32] = 52;
((short[])(object)array3)[31] = 52;
((short[])(object)array3)[31] = 97;
((short[])(object)array3)[31] = 98;
((short[])(object)array3)[30] = 52;
((short[])(object)array3)[30] = 48;
((short[])(object)array3)[30] = 53;
((short[])(object)array3)[29] = 57;
((short[])(object)array3)[29] = 53;
((short[])(object)array3)[29] = 52;
((short[])(object)array3)[28] = 57;
((short[])(object)array3)[28] = 99;
((short[])(object)array3)[28] = 57;
((short[])(object)array3)[27] = 48;
((short[])(object)array3)[27] = 48;
((short[])(object)array3)[27] = 54;
((short[])(object)array3)[26] = 98;
((short[])(object)array3)[26] = 57;
((short[])(object)array3)[26] = 51;
((short[])(object)array3)[25] = 52;
((short[])(object)array3)[25] = 57;
((short[])(object)array3)[25] = 50;
((short[])(object)array3)[24] = 49;
((short[])(object)array3)[24] = 53;
((short[])(object)array3)[24] = 50;
((short[])(object)array3)[23] = 48;
((short[])(object)array3)[23] = 102;
((short[])(object)array3)[23] = 45;
((short[])(object)array3)[22] = 98;
((short[])(object)array3)[22] = 100;
((short[])(object)array3)[22] = 55;
((short[])(object)array3)[21] = 56;
((short[])(object)array3)[21] = 55;
((short[])(object)array3)[21] = 101;
((short[])(object)array3)[20] = 48;
((short[])(object)array3)[20] = 50;
((short[])(object)array3)[20] = 54;
((short[])(object)array3)[19] = 101;
((short[])(object)array3)[19] = 48;
((short[])(object)array3)[19] = 56;
((short[])(object)array3)[18] = 102;
((short[])(object)array3)[18] = 50;
((short[])(object)array3)[18] = 45;
((short[])(object)array3)[17] = 55;
((short[])(object)array3)[17] = 56;
((short[])(object)array3)[17] = 53;
((short[])(object)array3)[16] = 53;
((short[])(object)array3)[16] = 101;
((short[])(object)array3)[16] = 101;
((short[])(object)array3)[15] = 53;
((short[])(object)array3)[15] = 100;
((short[])(object)array3)[15] = 56;
((short[])(object)array3)[14] = 48;
((short[])(object)array3)[14] = 98;
((short[])(object)array3)[14] = 52;
((short[])(object)array3)[13] = 99;
((short[])(object)array3)[13] = 57;
((short[])(object)array3)[13] = 45;
((short[])(object)array3)[12] = 99;
((short[])(object)array3)[12] = 56;
((short[])(object)array3)[12] = 50;
((short[])(object)array3)[11] = 97;
((short[])(object)array3)[11] = 102;
((short[])(object)array3)[11] = 49;
((short[])(object)array3)[10] = 98;
((short[])(object)array3)[10] = 53;
((short[])(object)array3)[10] = 55;
((short[])(object)array3)[9] = 50;
((short[])(object)array3)[9] = 51;
((short[])(object)array3)[9] = 100;
((short[])(object)array3)[8] = 99;
((short[])(object)array3)[8] = 98;
((short[])(object)array3)[8] = 45;
((short[])(object)array3)[7] = 50;
((short[])(object)array3)[7] = 56;
((short[])(object)array3)[7] = 54;
((short[])(object)array3)[6] = 57;
((short[])(object)array3)[6] = 48;
((short[])(object)array3)[6] = 56;
((short[])(object)array3)[5] = 100;
((short[])(object)array3)[5] = 53;
((short[])(object)array3)[5] = 53;
((short[])(object)array3)[4] = 100;
((short[])(object)array3)[4] = 51;
((short[])(object)array3)[4] = 51;
((short[])(object)array3)[3] = 53;
((short[])(object)array3)[3] = 49;
((short[])(object)array3)[3] = 99;
((short[])(object)array3)[2] = 53;
((short[])(object)array3)[2] = 56;
((short[])(object)array3)[2] = 99;
((short[])(object)array3)[1] = 97;
((short[])(object)array3)[1] = 53;
((short[])(object)array3)[1] = 53;
((short[])(object)array3)[0] = 97;
((short[])(object)array3)[0] = 51;
((short[])(object)array3)[0] = 53;'''
results = decode_array(string_to_decode, 3)
return(results)
def decode_FnuTeIWrgiBQUDSJWU_numArray1():
array_of_strings = []
string_to_decode = '''((short[])(object)array2)[35] = 54;
((short[])(object)array2)[35] = 97;
((short[])(object)array2)[35] = 56;
((short[])(object)array2)[34] = 101;
((short[])(object)array2)[34] = 99;
((short[])(object)array2)[34] = 101;
((short[])(object)array2)[33] = 53;
((short[])(object)array2)[33] = 99;
((short[])(object)array2)[33] = 49;
((short[])(object)array2)[32] = 54;
((short[])(object)array2)[32] = 49;
((short[])(object)array2)[32] = 52;
((short[])(object)array2)[31] = 50;
((short[])(object)array2)[31] = 102;
((short[])(object)array2)[31] = 56;
((short[])(object)array2)[30] = 100;
((short[])(object)array2)[30] = 50;
((short[])(object)array2)[30] = 101;
((short[])(object)array2)[29] = 54;
((short[])(object)array2)[29] = 53;
((short[])(object)array2)[29] = 52;
((short[])(object)array2)[28] = 97;
((short[])(object)array2)[28] = 97;
((short[])(object)array2)[28] = 57;
((short[])(object)array2)[27] = 53;
((short[])(object)array2)[27] = 98;
((short[])(object)array2)[27] = 50;
((short[])(object)array2)[26] = 56;
((short[])(object)array2)[26] = 52;
((short[])(object)array2)[26] = 97;
((short[])(object)array2)[25] = 99;
((short[])(object)array2)[25] = 49;
((short[])(object)array2)[25] = 55;
((short[])(object)array2)[24] = 100;
((short[])(object)array2)[24] = 51;
((short[])(object)array2)[24] = 99;
((short[])(object)array2)[23] = 56;
((short[])(object)array2)[23] = 100;
((short[])(object)array2)[23] = 45;
((short[])(object)array2)[22] = 52;
((short[])(object)array2)[22] = 49;
((short[])(object)array2)[22] = 99;
((short[])(object)array2)[21] = 102;
((short[])(object)array2)[21] = 102;
((short[])(object)array2)[21] = 99;
((short[])(object)array2)[20] = 55;
((short[])(object)array2)[20] = 50;
((short[])(object)array2)[20] = 98;
((short[])(object)array2)[19] = 100;
((short[])(object)array2)[19] = 52;
((short[])(object)array2)[19] = 97;
((short[])(object)array2)[18] = 52;
((short[])(object)array2)[18] = 102;
((short[])(object)array2)[18] = 45;
((short[])(object)array2)[17] = 97;
((short[])(object)array2)[17] = 57;
((short[])(object)array2)[17] = 48;
((short[])(object)array2)[16] = 100;
((short[])(object)array2)[16] = 56;
((short[])(object)array2)[16] = 99;
((short[])(object)array2)[15] = 100;
((short[])(object)array2)[15] = 99;
((short[])(object)array2)[15] = 53;
((short[])(object)array2)[14] = 52;
((short[])(object)array2)[14] = 48;
((short[])(object)array2)[14] = 52;
((short[])(object)array2)[13] = 50;
((short[])(object)array2)[13] = 101;
((short[])(object)array2)[13] = 45;
((short[])(object)array2)[12] = 54;
((short[])(object)array2)[12] = 56;
((short[])(object)array2)[12] = 49;
((short[])(object)array2)[11] = 52;
((short[])(object)array2)[11] = 51;
((short[])(object)array2)[11] = 52;
((short[])(object)array2)[10] = 49;
((short[])(object)array2)[10] = 54;
((short[])(object)array2)[10] = 49;
((short[])(object)array2)[9] = 53;
((short[])(object)array2)[9] = 51;
((short[])(object)array2)[9] = 57;
((short[])(object)array2)[8] = 54;
((short[])(object)array2)[8] = 54;
((short[])(object)array2)[8] = 45;
((short[])(object)array2)[7] = 48;
((short[])(object)array2)[7] = 48;
((short[])(object)array2)[7] = 50;
((short[])(object)array2)[6] = 99;
((short[])(object)array2)[6] = 56;
((short[])(object)array2)[6] = 100;
((short[])(object)array2)[5] = 100;
((short[])(object)array2)[5] = 52;
((short[])(object)array2)[5] = 54;
((short[])(object)array2)[4] = 51;
((short[])(object)array2)[4] = 52;
((short[])(object)array2)[4] = 55;
((short[])(object)array2)[3] = 50;
((short[])(object)array2)[3] = 48;
((short[])(object)array2)[3] = 102;
((short[])(object)array2)[2] = 55;
((short[])(object)array2)[2] = 56;
((short[])(object)array2)[2] = 50;
((short[])(object)array2)[1] = 56;
((short[])(object)array2)[1] = 50;
((short[])(object)array2)[1] = 57;
((short[])(object)array2)[0] = 101;
((short[])(object)array2)[0] = 98;
((short[])(object)array2)[0] = 51;'''
results = decode_array(string_to_decode, 3)
return(results)
def decode_array(string_to_decode, index_of_string):
# Use a breakpoint in the code line below to debug your script.
results = []
array_of_strings = [int(s) for s in re.findall("(\d+);", string_to_decode)]
for index, item in enumerate(array_of_strings):
if not ((index+1) % index_of_string):
results.append(array_of_strings[index])
ret_val = ''.join(chr(i) for i in results)
return(ret_val)
def decode_payload():
str1 = "2ebheafcea4Cd"
payload = open('payload.html','r')
str2 = payload.read()
str3 = str2
str4 = "<p>"
str5 = str2
oldValue1 = "<p>"
str6 = str5.replace(oldValue1, "")
oldValue2 = "</p>"
str2 = str6.replace(oldValue2, "")
for index in range(0,10):
oldChar = str1[index]
newChar = chr(index)
str2 = str2.replace(oldChar, newChar)
str2 = str2.replace(oldChar, newChar)
str2 = str2.replace(oldChar, newChar)
return(str2)
def final_decode(str1):
final_array = []
chArray = "Q"
for str2 in str1.split(chArray,):
if len(str2) > 0:
for i in str2:
final_array.append(str(hex(ord(i))).replace('0x',''))
return(final_array)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print(f"decode_FnuTeIWrgiBQUDSJWU_numArray1() = {decode_FnuTeIWrgiBQUDSJWU_numArray1()}")
print(f"decode_FnuTeIWrgiBQUDSJWU_numArray2() = {decode_FnuTeIWrgiBQUDSJWU_numArray2()}")
print(f"decode_HojmKcMqFYaYKqxLdDAfSKDHMYMhyihZnH_numArray1() = {decode_HojmKcMqFYaYKqxLdDAfSKDHMYMhyihZnH_numArray1()}")
print(f"decode_HojmKcMqFYaYKqxLdDAfSKDHMYMhyihZnH_numArray2() = {decode_HojmKcMqFYaYKqxLdDAfSKDHMYMhyihZnH_numArray2()}")
print(f"decode_HojmKcMqFYaYKqxLdDAfSKDHMYMhyihZnH_numArray3() = {decode_HojmKcMqFYaYKqxLdDAfSKDHMYMhyihZnH_numArray3()}")
print(f"decode_HojmKcMqFYaYKqxLdDAfSKDHMYMhyihZnH_numArray4() = {decode_HojmKcMqFYaYKqxLdDAfSKDHMYMhyihZnH_numArray4()}")
print(f"decode_HojmKcMqFYaYKqxLdDAfSKDHMYMhyihZnH_numArray5() = {decode_HojmKcMqFYaYKqxLdDAfSKDHMYMhyihZnH_numArray5()}")
print(f"GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray1 = {decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray1()}")
print(f"decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray2 = {decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray2()}")
print(f"decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray3() = {decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray3()}")
print(f"decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray4() = {decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray4()}")
print(f"decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray5() = {decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray5()}")
print(f"decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray6() = {decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray6()}")
print(f"decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray7() = {decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray7()}")
print(f"decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray8() = {decode_GEIKkGIvfNpHikmYeosNPoyAiuMORmesMcSBEcXYzasCDCDx_numARray8()}")
#str1 = decode_payload()
#print(str1)
#final = final_decode(str1)
#output = open('output','w')
#output.write(str(final))
#print(final)
|
from pyglet.window import mouse, key
from pyglet import gl
import app
import graphicutils as gu
from core import draw
from ui import widgets, elements, handlers
from core.context_wrapper import ContextWrapper
from constants import *
class ContextFrame(widgets.Frame):
def __init__(self, x, y, w, h):
super().__init__(x, y, w, h)
self.ctx_wrapper = ContextWrapper(self.width, self.height)
self.mouse_handler = handlers.CustomMouseHandler()
self.mouse_handler.on_double_click = self.on_double_click
self.KEYMAP = {
(key.MOD_SHIFT, key.A): 'add object',
(key.MOD_SHIFT, key.F): 'add force',
(key.MOD_SHIFT, key.M): 'move object',
(None, key.HOME): 'home',
(None, key.DELETE): 'delete',
(None, key.SPACE): 'pause'}
self.init_ui()
def init_ui(self):
self.timeline = app.widgets.Timeline(x=10, y=10)
self.add_object_window = app.widgets.AddRigidbodyWindow(self)
self.add_force_window = app.widgets.AddForceWindow(self)
self.edit_object_window = app.widgets.EditRigidbodyWindow(self)
self.edit_forces_window = app.widgets.EditForcesWindow(self)
self.add_object_window.x = 4
self.add_object_window.y = 4
self.add_force_window.x = 4
self.add_force_window.y = 4
self.add(self.timeline)
self.add(self.add_object_window)
self.add(self.add_force_window)
self.add(self.edit_object_window)
self.add(self.edit_forces_window)
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
if self.pressed:
if scroll_y > 0:
self.ctx_wrapper.zoom_in()
elif scroll_y < 0:
self.ctx_wrapper.zoom_out()
def on_mouse_press(self, x, y, button, modifiers):
super().on_mouse_press(x, y, button, modifiers)
x -= self.x
y -= self.y
if self.pressed:
mode = self.ctx_wrapper.get_mode()
cam = self.ctx_wrapper.get_camera()
x_, y_ = cam.get_relative_position(x, y)
if button == mouse.LEFT:
if mode == SELECT_MODE:
self.ctx_wrapper.set_selection_area(x, y, x, y)
elif mode == RULER_MODE:
self.ctx_wrapper.set_ruler(x_, y_, x_, y_)
elif mode == MOVE_MODE:
pass
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
super().on_mouse_drag(x, y, dx, dy, buttons, modifiers)
x -= self.x
y -= self.y
if self.pressed:
if buttons == mouse.LEFT:
mode = self.ctx_wrapper.get_mode()
cam = self.ctx_wrapper.get_camera()
x_, y_ = cam.get_relative_position(x, y)
if mode == SELECT_MODE:
x1, y1, x2, y2 = self.ctx_wrapper.get_selection_area()
self.ctx_wrapper.set_selection_area(x1, y1, x, y)
elif mode == RULER_MODE:
x1, y1, x2, y2 = self.ctx_wrapper.get_ruler()
self.ctx_wrapper.set_ruler(x1, y1, x_, y_)
elif mode == MOVE_MODE:
self.ctx_wrapper.move_selected(dx, dy)
elif buttons == mouse.RIGHT:
self.ctx_wrapper.move_camera(-dx,-dy)
def on_mouse_release(self, x, y, button, modifiers):
super().on_mouse_release(x, y, button, modifiers)
self.mouse_handler.on_mouse_release(x, y, button, modifiers)
if self.pressed:
if button == mouse.LEFT:
mode = self.ctx_wrapper.get_mode()
if mode == SELECT_MODE:
self.ctx_wrapper.select_area()
self.ctx_wrapper.set_selection_area(0, 0, 0, 0)
def on_mouse_motion(self, x, y, dx, dy):
self.mouse_handler.on_mouse_motion(x, y, dx, dy)
def on_double_click(self, x, y, button, modifiers):
self.ctx_wrapper.select_closer(
x=x - self.global_position[0],
y=y - self.global_position[1])
selected = self.ctx_wrapper.get_selected()
if selected:
self.edit_object_window.show()
self.edit_object_window.x = x
self.edit_object_window.top = y
self.edit_object_window.set_target(selected[0])
def on_key_press(self, symbol, modifiers):
super().on_key_press(symbol, modifiers)
command = None
for mod, sym in self.KEYMAP.keys():
if symbol == sym:
if mod is None or modifiers & mod:
command = self.KEYMAP[(mod, sym)]
break
if command == 'add object':
self.add_object_window.show()
elif command == 'add force':
self.add_force_window.show()
elif command == 'move object':
self.ctx_wrapper.set_move_mode()
elif command == 'home':
self.ctx_wrapper.camera_to_home()
elif command == 'delete':
self.ctx_wrapper.delete_selected()
elif command == 'pause':
self.ctx_wrapper.toggle_pause()
def resize(self, width, height):
super().resize(width, height)
self.ctx_wrapper.resize(int(width), int(height))
self.timeline.resize(width - 20, 20)
def draw_overlayer(self):
camera = self.ctx_wrapper.get_camera()
ctx_mode = self.ctx_wrapper.get_mode()
zoom = camera.zoom
for obj in self.ctx_wrapper.get_selected():
if camera.collide(obj):
pos = obj.position * zoom
objx = int(pos[0] + camera.centerx)
objy = int(pos[1] + camera.centery)
draw.draw_circle(objx, objy, 25 * zoom, (1, 0.2, 0.2, 1))
# Draw selection area
if ctx_mode == SELECT_MODE:
x1, y1, x2, y2 = self.ctx_wrapper.get_selection_area()
if x1 != x2 and y1 != y2:
draw.draw_select_area(x1, y1, x2, y2)
# Draw ruler
if ctx_mode == RULER_MODE:
x1, y1, x2, y2 = self.ctx_wrapper.get_ruler()
if x1 != x2 and y1 != y2:
draw.draw_ruler(x1, y1, x2, y2)
def draw_ctx(self):
draw.draw_grid()
draw.draw_axes()
for obj in self.ctx_wrapper.get_objects():
draw.draw_object(obj)
draw.draw_path(obj)
self.draw_overlayer()
def draw(self):
self.update_viewport()
gl.glColor3f(*app.colors.CONTEXT_BACKGROUND_COLOR)
gu.draw_rect(0, 0, self.width, self.height, gl.GL_QUADS)
self.draw_ctx()
self.draw_widgets()
def update(self, dt):
super().update(dt)
self.ctx_wrapper.update(dt)
|
from account.models.users import User
from account.serializer.user import UserSerializer
from account.forms import LoginForm, SignUpForm
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from django.contrib.auth import login, authenticate, logout
from django.shortcuts import redirect
from django.contrib.auth.mixins import LoginRequiredMixin
from learn_word import models
import logging
logger = logging.getLogger("app")
class Signup(APIView):
renderer_classes = [TemplateHTMLRenderer]
def get(self, request):
form = SignUpForm
return Response({'form': form}, template_name='account/create.html')
def post(self, request):
email = request.POST['email']
password = request.POST['password']
form = SignUpForm(request.POST or None)
if not form.is_valid():
logger.debug(f'validate error: {form}')
return Response({'form': form}, template_name='account/create.html')
# if form.is_duplicate_email():
# return Response({'form': form}, template_name='account/create.html')
user = User.objects.create_user(email=email, password=password)
login(request, user)
return redirect('/account/dashboard')
class Login(APIView):
queryset = User.objects.all()
serializer_class = UserSerializer
renderer_classes = [TemplateHTMLRenderer]
template_name = "account/login.html"
def get(self, request):
logger.debug(request.user)
logger.debug(request.user.is_authenticated)
if request.user.is_authenticated:
logger.info(f'Logged in pk: {request.user.pk}')
return redirect('/account/dashboard')
form = LoginForm
return Response({'form': form}, template_name='account/login.html')
def post(self, request):
email = request.POST['email']
password = request.POST['password']
form = LoginForm(request.POST or None)
user = authenticate(email=email, password=password)
if user:
logger.info(f'Login: {user}')
login(request, user)
return redirect('/account/dashboard')
logger.info(f'Login failed email: {email}, password: {password}')
return Response({'form': form, 'login_failed': True}, template_name='account/login.html')
class Logout(APIView):
def get(self, request):
logger.debug(f'logout user:{request.user}')
logout(request)
return redirect('/account/login/')
class Dashboard(LoginRequiredMixin, APIView):
renderer_classes = [TemplateHTMLRenderer]
def get(self, request):
user = request.user
number_of_today_study = models.WordLog.number_of_today_study(user.id)
data = {
'number_of_today_study': number_of_today_study
}
return Response(data, template_name='account/dashboard.html')
|
# Generated by Django 3.1.4 on 2020-12-15 23:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('administracion', '0003_auto_20201212_1253'),
]
operations = [
migrations.AddField(
model_name='almacenmodel',
name='estado',
field=models.BooleanField(default=True),
),
]
|
# -*- coding: utf-8 -*-
"""
File name: simulation_interface.py
Description: a set of functions for recording and training/testing neural networks
Author: Roman Pogodin
Python version: 3.6
"""
IS_REPRODUCIBLE = True
from warnings import warn
import numpy as np
# For reproducibility in Keras
# https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
if IS_REPRODUCIBLE:
warn('Tensorflow will be running on a single CPU to get a reproducible result. '
'Set IS_REPRODUCIBLE to False to use more cores or GPUs')
import tensorflow as tf
import random as rn
import os
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
rn.seed(12345)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1,
device_count={'CPU': 1, 'GPU': 0})
from keras import backend as K
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
import reservoir_net
import attractor_net
import h5py
import keras
import keras.datasets.mnist as mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, BatchNormalization
from keras.layers import Conv2D, Conv1D, MaxPooling2D, Activation, Reshape
from keras.optimizers import RMSprop
from keras.models import Model
from keras.utils import plot_model
from utils import compute_shifted_cross_correlation
from utils import reshape_rows_to_blocks
class AttractorRecorder:
"""
Records and saves a simulation of the attractor network
"""
def __init__(self, n_attractor_neurons=2500, n_reservoir_neurons=1000, randseed=42, noisy_weights_std=2.0):
self._n_attractor_neurons = n_attractor_neurons
self._n_reservoir_neurons = n_reservoir_neurons
self._attractor = attractor_net.AttractorNetwork(noisy_weights_std=noisy_weights_std, randseed=randseed,
num_rec=self._n_attractor_neurons)
self._warm_up_time = 100
weights_shape = (self._n_reservoir_neurons, self._n_attractor_neurons)
connect_prob = 0.1
rand_generator = np.random.RandomState(1)
weights_indicator = np.array(rand_generator.uniform(size=weights_shape) <= connect_prob, dtype=int)
std = np.sqrt(1.0 / (connect_prob * self._n_reservoir_neurons))
self._output_weights = rand_generator.normal(loc=0.0, scale=std, size=weights_shape) * weights_indicator
def record_output(self, max_time, filename=None, warm_up_input=None, hdf_file=None, hdf_prefix=''):
"""
Records a bump and saves its trajectory, activity and converted activity (that is, multiplied
by a random Gaussian weight matrix of the shape (_n_reservoir_neurons, _n_attractor_neurons)
"""
bump_trajectory = np.zeros((max_time, 2))
converted_output = np.zeros((max_time, self._n_reservoir_neurons))
network_rates = np.zeros((max_time, self._n_attractor_neurons))
self._attractor.reset_network()
for net_time in range(self._warm_up_time):
self._attractor.update(warm_up_input)
for net_time in range(max_time):
self._attractor.update()
bump_trajectory[net_time] = self._attractor.position
converted_output[net_time] = self._output_weights.dot(self._attractor.firing_rates)
network_rates[net_time] = self._attractor.firing_rates
if filename is not None:
np.save(filename + '_rates', network_rates) # requires a lot of space
np.save(filename + '_output', converted_output)
np.save(filename + '_trajectory', bump_trajectory)
if hdf_file is not None:
# hdf_file.create_dataset('%srates' % hdf_prefix, data=network_rates,
# compression='lzf', shuffle=True) # requires a lot of space
hdf_file.create_dataset('%soutput' % hdf_prefix, data=converted_output,
compression='lzf', shuffle=True)
hdf_file.create_dataset('%strajectory' % hdf_prefix, data=bump_trajectory,
compression='lzf', shuffle=True)
class ReservoirInterface:
"""
An interface for training and testing of a reservoir network.
"""
def __init__(self, reservoir, period):
self.reservoir = reservoir
self._period = int(period)
self._recorded_variables = []
self._n_target_functions = 1
self._warm_up_time = 1000 # for reset_network
def reset_network(self):
"""Resets network and makes it run with no input for self._warm_up_time"""
self.reservoir.reset_network_activity()
for net_time in range(self._warm_up_time):
self.reservoir.update()
def _simulate_period(self, attractor_input=None, target_function=None, count_updates=False):
"""
Runs the network for one period.
:param attractor_input: an already converted (of shape (n_rec,) attractor input or None
:param target_function: a target function for train or None for test
:param count_updates: whether to count the number of updates
:return: values of each variable from self._recorded_variables of the shape [n_vars, time]
"""
result = np.zeros((len(self._recorded_variables), self._period))
n_updates = 0
current_attractor_input = None
current_target_function = None
for net_time in range(self._period):
if attractor_input is not None:
current_attractor_input = attractor_input[net_time]
if target_function is not None:
current_target_function = target_function[net_time]
self.reservoir.update(external_input=current_attractor_input, target_function=current_target_function)
for i in range(len(self._recorded_variables)):
result[i, net_time] = eval('self.reservoir.' + self._recorded_variables[i])
if count_updates and self.reservoir.are_weights_updated():
n_updates += 1
return result, n_updates
def _simulate(self, n_epochs, recorded_variables=None, period_reset=False,
attractor_input=None, target_functions=None, count_updates=False):
"""
Simulates a network for several epochs.
:param n_epochs: number of periods for each target function
:param recorded_variables: a list of 1d variables to record
:param period_reset: whether to reset the net after each period
:param attractor_input: an already converted (of shape (period, n_rec) attractor input or None
:param target_functions: a list of target functions for train or None for test
:param count_updates: whether to count the number of updates
:return: values of each variable from recorded_variables of the shape [n_vars, time]
"""
if attractor_input is not None and len(attractor_input.shape) == 2: # for one task
attractor_input = attractor_input[None, :, :]
if recorded_variables is None:
self._recorded_variables = []
else:
self._recorded_variables = recorded_variables.copy()
n_updates = 0
result = np.zeros((len(self._recorded_variables), self._period * n_epochs * self._n_target_functions))
current_attractor_input = None
current_target_function = None
for i in range(n_epochs * self._n_target_functions):
if period_reset:
self.reset_network()
if attractor_input is not None:
current_attractor_input = attractor_input[i % self._n_target_functions]
if target_functions is not None:
current_target_function = target_functions[i % self._n_target_functions]
result[:, i * self._period:(i + 1) * self._period], curr_n_updates = \
self._simulate_period(attractor_input=current_attractor_input,
target_function=current_target_function,
count_updates=count_updates)
if target_functions is not None and self.reservoir.are_updates_delayed():
self.reservoir.apply_weight_updates()
n_updates += curr_n_updates
return result, n_updates
def train(self, n_epochs, target_functions, recorded_variables=None,
period_reset=False, attractor_input=None, count_updates=False):
"""
Trains the network for several epochs.
:param n_epochs: number of periods for each target function
:param target_functions: a list of target functions for train or None for test
:param recorded_variables: a list of 1d variables to record
:param period_reset: whether to reset the net after each period
:param attractor_input: an already converted (of shape (period, n_rec) attractor input or None
:param count_updates: whether to count the number of updates
:return: values of each variable from recorded_variables of the shape [n_vars, time]
"""
if len(target_functions.shape) == 1: # for one task
target_functions = target_functions[None, :]
self._n_target_functions = target_functions.shape[0]
return self._simulate(n_epochs, recorded_variables, period_reset, attractor_input,
target_functions, count_updates)
def test(self, n_epochs, recorded_variables=None, period_reset=False, attractor_input=None):
"""
Test the network for several epochs (no plasticity).
:param n_epochs: number of periods for each target function
:param recorded_variables: a list of 1d variables to record
:param period_reset: whether to reset the net after each period
:param attractor_input: an already converted (of shape (n_rec,) attractor input or None
:return: values of each variable from recorded_variables of the shape [n_vars, time]
"""
return self._simulate(n_epochs, recorded_variables, period_reset, attractor_input)[0]
def setup_reservoir(n_reservoir_neurons=1000, learning_rule='hebb', randseed=42, hebb_rate=0.0005,
hebb_decay=20.0 * 1e3, num_out=1, hebb_const=False, update_prob=1.0, delayed_updates=False):
"""Setups a reservoir network instance."""
reservoir = reservoir_net.ReservoirNet(num_rec=n_reservoir_neurons, randseed=randseed, learning_rule=learning_rule,
hebb_rate=hebb_rate, hebb_decay=hebb_decay, num_out=num_out,
hebb_const=hebb_const, update_prob=update_prob,
delayed_updates=delayed_updates)
return reservoir
def run_reservoir(period=1000, num_train_epochs=10, num_test_epochs=50, target_values=None, n_reservoir_neurons=1000,
attractor_input=None, learning_rule='hebb', train_recordings=None, test_recordings=None,
simulator=None, count_updates=False, freeze_train_activity=False):
"""
Trains and then tests a reservoir network.
:param period: length of one period
:param num_train_epochs: number of train periods for each target function
:param num_test_epochs: number of test periods for each target function
:param target_values: array of target functions, shape=(n_function, period) (or (period,) for one target)
:param n_reservoir_neurons: number of reservoir neurons
:param attractor_input: an already converted (of shape (period, n_rec) attractor input or None
:param learning_rule: 'hebb' for reward-modulated Hebbian rule or 'force' for FORCE
:param train_recordings: a list of 1d variables to record during training
:param test_recordings: a list of 1d variables to record during testing
:param simulator: an instance of ReservoirInterface
:param count_updates: whether to count the number of updates
:param freeze_train_activity: whether to memorise train activity before test and recover it after
:return: values of each variable from recorded_variables of the shape [n_vars, time] for train and test
"""
if simulator is None:
reservoir = setup_reservoir(n_reservoir_neurons=n_reservoir_neurons, learning_rule=learning_rule)
simulator = ReservoirInterface(reservoir, period)
train_results, n_updates = simulator.train(num_train_epochs, target_values, train_recordings,
attractor_input=attractor_input, count_updates=count_updates)
if freeze_train_activity:
simulator.reservoir.memorise_state()
test_results = simulator.test(num_test_epochs, test_recordings,
attractor_input=attractor_input)
if freeze_train_activity:
simulator.reservoir.recover_memorised_state()
if count_updates:
return train_results, test_results, n_updates
return train_results, test_results
def compute_num_recordings(recording_pattern):
"""Calculates the total number of recordings for a recording pattern."""
total_num_recordings = recording_pattern[0, 1] // recording_pattern[0, 0] # +1 for init recording
for i in range(1, recording_pattern.shape[0]):
total_num_recordings += (recording_pattern[i, 1] - recording_pattern[i - 1, 1]) // \
recording_pattern[i, 0]
return total_num_recordings
def record_learning_dynamics(period=1000, num_test_epochs=50, target_function=None, n_reservoir_neurons=1000,
attractor_input=None, learning_rule='hebb', test_recordings=None,
test_recording_pattern=np.array([[10, 50], [25, 200], [100, 400]]), # train_periods, until
filename='test_performance.hdf', randseed=42, hebb_rate=0.0005, hebb_decay=20 * 1e3,
target_values=None):
"""
Records dynamics of a reservoir network to an .hdf file.
The variable test_recording_pattern has a format [[n_1, m_1], ...] where n_1 is the number of train periods between
each 50 periods test and m_1 is the maximum number of train periods for which this pattern is done.
Consequently, the very last element m_k sets the overall number of train periods.
The network's state is memorised after each train period and recovered after testing.
Note that there is no test for an untrained network as the weighs are initialised to 0 and produce the same error
every time
:param period: length of one period
:param num_test_epochs: number of test periods for each target function
:param target_function: a function object to evaluate a 1d function
:param n_reservoir_neurons: number of reservoir neurons
:param attractor_input: an already converted (of shape (period, n_rec) attractor input or None
:param learning_rule: 'hebb' for reward-modulated Hebbian rule or 'force' for FORCE
:param test_recordings: a list of 1d variables to record during testing
:param test_recording_pattern: look above
:param filename: name of the file with recordings
:param randseed: random seed for the reservoir
:param hebb_rate: initial learning rate for Hebb learning
:param hebb_decay: hyperbolic decay for Hebb learning
:param target_values: if target function is not specified, these values are used
:return: None
"""
reservoir = setup_reservoir(n_reservoir_neurons=n_reservoir_neurons, learning_rule=learning_rule, randseed=randseed,
hebb_rate=hebb_rate, hebb_decay=hebb_decay)
simulator = ReservoirInterface(reservoir, period)
if target_function is not None:
target_values = target_function(np.linspace(0, period - 1, period), 1.0 / period)
elif target_values is None:
warn('Target function is not specified, test only')
n_finished_periods = 0
n_pattern = 0
total_num_recordings = compute_num_recordings(test_recording_pattern)
results_shape = (len(test_recordings), total_num_recordings, num_test_epochs * period)
weights_shape = (total_num_recordings, 1, n_reservoir_neurons)
output_file = h5py.File(filename, 'a')
output_file.create_dataset('/seed%d/recorded_variables' % randseed, data=np.array(test_recordings, dtype='S'))
output_file.create_dataset('/seed%d/target_values' % randseed, data=target_values)
output_file.create_dataset('/seed%d/hebb_rate' % randseed, data=hebb_rate)
output_file.create_dataset('/seed%d/hebb_decay' % randseed, data=hebb_decay)
test_results = output_file.create_dataset('/seed%d/test_results' % randseed, results_shape, 'float')
output_weights = output_file.create_dataset('/seed%d/output_weights' % randseed, weights_shape, 'float')
time_stamp = output_file.create_dataset('/seed%d/time_stamp' % randseed, (total_num_recordings,), 'int')
output_file.flush()
# # not needed as w(0)=0, so the error is always the same in the beginning
# record_simulation_step(time_stamp, test_results, output_weights, 0, n_finished_periods, simulator,
# period, 0, num_test_epochs, target_values, attractor_input, test_recordings)
current_time = 0
while n_finished_periods < test_recording_pattern[-1, -1]:
if n_finished_periods >= test_recording_pattern[n_pattern, -1]:
n_pattern += 1
n_finished_periods += test_recording_pattern[n_pattern, 0]
time_stamp[current_time] = n_finished_periods
test_results[:, current_time, :] = run_reservoir(period, test_recording_pattern[n_pattern, 0],
num_test_epochs, target_values,
attractor_input=attractor_input,
test_recordings=test_recordings,
simulator=simulator, freeze_train_activity=True)[1]
output_weights[current_time, :, :] = simulator.reservoir.weights_out.copy()
current_time += 1
output_file.flush()
output_file.close()
def record_mnist_inputs(n_digits=10, n_train_epochs=50, n_test_epochs=50, filename='recordings/mnist_inputs.hdf'):
"""Takes first examples from the MNIST dataset and converts them to bump's inputs."""
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 28, 28, 1) / 255.0
x_test = x_test.reshape(10000, 28, 28, 1) / 255.0
y_train_categorical = keras.utils.to_categorical(y_train, 10)
y_test_categorical = keras.utils.to_categorical(y_test, 10)
file = h5py.File(filename, 'w')
file.create_dataset('n_train', data=n_train_epochs)
file.create_dataset('n_test', data=n_test_epochs)
model = Sequential()
model.add(Conv2D(32, (3, 3), use_bias=False, input_shape=(28, 28, 1)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Conv2D(32, (3, 3), use_bias=False))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(16, (3, 3), use_bias=False, padding='same'))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(128))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dense(64))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(32))
model.add(BatchNormalization())
model.add(Activation("sigmoid"))
model.add(Dense(25))
model.add(BatchNormalization())
model.add(Activation("softmax"))
model.add(Reshape((25, 1)))
model.add(Conv1D(25, 2, use_bias=False, padding='same'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Activation("relu", name='attractor_input'))
model.add(Dropout(0.4))
model.add(Dense(10, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy'])
plot_model(model, to_file='mnist_model.pdf', show_shapes=True, rankdir='LR')
history = model.fit(x_train, y_train_categorical,
batch_size=256,
epochs=20,
verbose=1,
validation_data=(x_test, y_test_categorical),
shuffle=False)
score = model.evaluate(x_test, y_test_categorical, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
attractor_input = Model(inputs=model.input, outputs=model.get_layer('attractor_input').output)
def scale_input(x, scaling=2):
return np.kron(x, np.ones((scaling, scaling)))
for digit in range(n_digits):
train = attractor_input.predict(x_train[y_train == digit][:n_train_epochs])
test = attractor_input.predict(x_test[y_test == digit][:n_test_epochs])
train = train.reshape(n_train_epochs, int(np.sqrt(train.shape[1])), int(np.sqrt(train.shape[1])))
test = test.reshape(n_test_epochs, int(np.sqrt(test.shape[1])), int(np.sqrt(test.shape[1])))
for i in range(n_train_epochs):
train[i] = reshape_rows_to_blocks(train[i])
for i in range(n_test_epochs):
test[i] = reshape_rows_to_blocks(test[i])
train = scale_input(train, scaling=2).reshape((n_train_epochs, -1))
test = scale_input(test, scaling=2).reshape((n_test_epochs, -1))
train = 1 * (train / train.max(axis=1)[:, None])
test = 1 * (test / test.max(axis=1)[:, None])
file.create_dataset('/%d/train' % digit, data=train)
file.create_dataset('/%d/test' % digit, data=test)
file.flush()
file.close()
def record_ideal_digit_inputs(n_digits=10, n_train_epochs=50, n_test_epochs=50,
filename='recordings/ideal_digit_inputs.hdf'):
"""Records inputs to the attractor layer that are noiseless and distinct for different digits."""
file = h5py.File(filename, 'w')
file.create_dataset('n_train', data=n_train_epochs)
file.create_dataset('n_test', data=n_test_epochs)
train = np.zeros((n_train_epochs, 50, 50))
test = np.zeros((n_test_epochs, 50, 50))
for digit in range(n_digits):
train.fill(0.0)
test.fill(0.0)
train[:, int(5 * digit):int(5 * (digit + 1)), int(5 * digit):int(5 * (digit + 1))] = 1
test[:, int(5 * digit):int(5 * (digit + 1)), int(5 * digit):int(5 * (digit + 1))] = 1
file.create_dataset('/%d/train' % digit, data=train.reshape((n_train_epochs, 2500)))
file.create_dataset('/%d/test' % digit, data=test.reshape((n_test_epochs, 2500)))
file.flush()
file.close()
def record_mnist_attractors(n_digits=10, signal_length=1000, filename='recordings/mnist_attractors.hdf',
input_file='recordings/mnist_inputs.hdf', randseed=0):
"""Records attractor dynamics for converted inputs from MNIST images."""
input_file = h5py.File(input_file, 'r')
output_file = h5py.File(filename, 'w')
recorder = AttractorRecorder(n_attractor_neurons=2500, randseed=randseed, noisy_weights_std=6.0)
n_train = input_file['n_train'][()]
n_test = input_file['n_test'][()]
output_file.create_dataset('n_train', data=n_train)
output_file.create_dataset('n_test', data=n_test)
for digit in range(n_digits):
train_input = input_file['/%d/train' % digit]
for epoch in range(n_train):
recorder.record_output(signal_length, warm_up_input=train_input[epoch], hdf_file=output_file,
hdf_prefix='/%d/train/%d' % (digit, epoch))
output_file.flush()
test_input = input_file['/%d/test' % digit]
for epoch in range(n_test):
recorder.record_output(signal_length, warm_up_input=test_input[epoch], hdf_file=output_file,
hdf_prefix='/%d/test/%d' % (digit, epoch))
output_file.flush()
input_file.close()
output_file.close()
def record_mnist_learning_dynamics(target_values, n_reservoir_neurons=1000, learning_rule='hebb',
external_input_type='mnist', filename='./recordings/mnist_performance.hdf',
randseed=42, hebb_rate=0.0005, hebb_decay=20 * 1e3, n_epochs=1):
"""
Records test results for digits' drawings learning induced by pre-processed MNIST inputs/ideal noiseless inputs.
:param target_values: targets of the shape [n_targets, n_dimensions, period]
:param n_reservoir_neurons: number of reservoir neurons
:param learning_rule: hebb or force
:param external_input_type: mnist or ideal
:param filename: where to save the result
:param randseed: random seed for the reservoir
:param hebb_rate: learning rate
:param hebb_decay: time decay constant (rate decays as 1 / (1 + t/T))
:param n_epochs: number of repeats for the training input
:return: None
"""
reservoir = setup_reservoir(n_reservoir_neurons=n_reservoir_neurons, learning_rule=learning_rule, randseed=randseed,
hebb_rate=hebb_rate, hebb_decay=hebb_decay, num_out=2)
test_recordings = ['zhat[0]', 'zhat[1]']
input_file = h5py.File('./recordings/mnist_attractors.hdf', 'r')
n_train = input_file['n_train'][()]
n_test = input_file['n_test'][()]
period = input_file['/0/train/0output'][:].shape[0]
if external_input_type == 'ideal':
input_file.close()
input_file = h5py.File('./recordings/ideal_digit_attractors.hdf', 'r')
if external_input_type == 'mnist' or external_input_type == 'ideal':
train_input = np.zeros((10 * n_train, period, 1000))
test_input = np.zeros((10 * n_test, period, 1000))
for digit in range(10):
for i in range(n_train):
train_input[digit + i * 10] = input_file['/%d/train/%doutput' % (digit, i)][:]
for i in range(n_test):
test_input[digit + i * 10] = input_file['/%d/test/%doutput' % (digit, i)][:]
else:
train_input = None
test_input = None
simulator = ReservoirInterface(reservoir, period)
output_file = h5py.File(filename, 'a')
output_file.create_dataset('/seed%d/recorded_variables' % randseed, data=np.array(test_recordings, dtype='S'))
output_file.create_dataset('/seed%d/target_values' % randseed, data=target_values)
output_file.create_dataset('/seed%d/hebb_rate' % randseed, data=hebb_rate)
output_file.create_dataset('/seed%d/hebb_decay' % randseed, data=hebb_decay)
# just training
for i in range(n_epochs):
run_reservoir(period, n_train, 0, target_values, attractor_input=train_input, simulator=simulator)
output_file.create_dataset('/seed%d/test_results' % randseed,
data=run_reservoir(period, 0, n_test, target_values, attractor_input=test_input,
test_recordings=test_recordings, simulator=simulator)[1])
input_file.close()
output_file.close()
def record_constant_rate_updates(target_values, update_prob=1.0, attractor_input=None,
learning_rule='hebb', max_periods=400, filename='stochastic_updates.hdf',
randseed=42, hebb_rate=0.0005, delayed_updates=False, hdf_folder=None):
"""
Records an experiment with probabilistic updates for Hebbian learning (w/ and w/o the attractor).
The network's state is memorised after each train period to be recovered after test.
:param target_values: a 1d signal to learn
:param update_prob: probability of an update
:param attractor_input: input from the attractor layer
:param learning_rule: hebb or force
:param max_periods: the maximum number of periods to compute for
:param filename: where to save the results
:param randseed: random seed of the reservoir
:param hebb_rate: learning rate (no decay, so it will be constant)
:param delayed_updates: if true, weights are updated in the end of each period
:param hdf_folder: float, value for a folder in the hdf file. None -> update_prob
:return: None
"""
period = len(target_values)
reservoir = setup_reservoir(learning_rule=learning_rule, randseed=randseed,
hebb_rate=hebb_rate, hebb_const=True, update_prob=update_prob,
delayed_updates=delayed_updates)
simulator = ReservoirInterface(reservoir, period)
correlation = np.zeros(max_periods)
updates = np.zeros(max_periods)
for period in range(max_periods):
test_results, curr_n_updates = run_reservoir(period, 1, 1, target_values, attractor_input=attractor_input,
test_recordings=['zhat'], simulator=simulator,
count_updates=True, freeze_train_activity=True)[1:]
updates[period] = curr_n_updates
correlation[period] = compute_shifted_cross_correlation(test_results, target_values)
if hdf_folder is None:
hdf_folder = update_prob
output_file = h5py.File(filename, 'a')
output_file.create_dataset('%.1f/seed%d/targets' % (hdf_folder, randseed), data=target_values)
output_file.create_dataset('%.1f/seed%d/max_periods' % (hdf_folder, randseed), data=max_periods)
output_file.create_dataset('%.1f/seed%d/hebb_rate' % (hdf_folder, randseed), data=hebb_rate)
output_file.create_dataset('%.1f/seed%d/updates' % (hdf_folder, randseed), data=updates)
output_file.create_dataset('%.1f/seed%d/correlation' % (hdf_folder, randseed), data=correlation)
output_file.close()
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
from bluepy.btle import UUID, Peripheral, ADDR_TYPE_RANDOM, Scanner, DefaultDelegate, BTLEException
from bluepy import btle
import time
from time import sleep
import struct
import binascii
import sys
import os
import datetime
from BoogioLogger import *
#PERIPHERAL_UUID = "dc:80:07:ef:8b:cf"
#PERIPHERAL_UUID = "f5:47:18:cf:9c:dc"
if os.getenv('C', '1') == '0':
ANSI_RED = ''
ANSI_GREEN = ''
ANSI_YELLOW = ''
ANSI_CYAN = ''
ANSI_WHITE = ''
ANSI_OFF = ''
else:
ANSI_CSI = "\033["
ANSI_RED = ANSI_CSI + '31m'
ANSI_GREEN = ANSI_CSI + '32m'
ANSI_YELLOW = ANSI_CSI + '33m'
ANSI_CYAN = ANSI_CSI + '36m'
ANSI_WHITE = ANSI_CSI + '37m'
ANSI_OFF = ANSI_CSI + '0m'
def dump_services(dev):
services = sorted(dev.services, key=lambda s: s.hndStart)
for s in services:
print ("\t%04x: %s" % (s.hndStart, s))
if s.hndStart == s.hndEnd:
continue
chars = s.getCharacteristics()
for i, c in enumerate(chars):
props = c.propertiesToString()
h = c.getHandle()
if 'READ' in props:
val = c.read()
if c.uuid == btle.AssignedNumbers.device_name:
string = ANSI_CYAN + '\'' + \
val.decode('utf-8') + '\'' + ANSI_OFF
elif c.uuid == btle.AssignedNumbers.device_information:
string = repr(val)
else:
string = '<s' + binascii.b2a_hex(val).decode('utf-8') + '>'
else:
string = ''
print ("\t%04x: %-59s %-12s %s" % (h, c, props, string))
while True:
h += 1
if h > s.hndEnd or (i < len(chars) - 1 and h >= chars[i + 1].getHandle() - 1):
break
try:
val = dev.readCharacteristic(h)
print ("\t%04x: <%s>" %
(h, binascii.b2a_hex(val).decode('utf-8')))
except btle.BTLEException:
break
class ScanPrint(btle.DefaultDelegate):
def __init__(self, opts):
btle.DefaultDelegate.__init__(self)
self.opts = opts
def handleDiscovery(self, dev, isNewDev, isNewData):
if isNewDev:
status = "new"
elif isNewData:
if self.opts.new:
return
status = "update"
else:
if not self.opts.all:
return
status = "old"
if dev.rssi < self.opts.sensitivity:
return
print (' Device (%s): %s (%s), %d dBm %s' %
(status,
ANSI_WHITE + dev.addr + ANSI_OFF,
dev.addrType,
dev.rssi,
('' if dev.connectable else '(not connectable)'))
)
for (sdid, desc, val) in dev.getScanData():
if sdid in [8, 9]:
print ('\t' + desc + ': \'' + ANSI_CYAN + val + ANSI_OFF + '\'')
else:
print ('\t' + desc + ': <' + val + '>')
if not dev.scanData:
print ('\t(no data)')
print
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--hci', action='store', type=int, default=0, help='Interface number for scan')
parser.add_argument('-t', '--timeout', action='store', type=int, default=4, help='Scan delay, 0 for continuous')
parser.add_argument('-s', '--sensitivity', action='store', type=int, default=-128, help='dBm value for filtering far devices')
parser.add_argument('-d', '--discover', action='store_true', help='Connect and discover service to scanned devices')
parser.add_argument('-a', '--all', action='store_true', help='Display duplicate adv responses, by default show new + updated')
parser.add_argument('-n', '--new', action='store_true', help='Display only new adv responses, by default show new + updated')
parser.add_argument('-v', '--verbose', action='store_true', help='Increase output verbosity')
parser.add_argument('-b', '--bpx', action='store', help='connect to device with this address')
arg = parser.parse_args(sys.argv[1:])
if arg.bpx != None:
print("arg = " + str(arg))
print("arg.bpx = " + str(arg.bpx))
PERIPHERAL_UUID = str(arg.bpx)
else:
PERIPHERAL_UUID = "dc:80:07:ef:8b:cf"
#PERIPHERAL_UUID = "f5:47:18:cf:9c:dc"
btle.Debugging = arg.verbose
scanner = btle.Scanner(arg.hci).withDelegate(ScanPrint(arg))
print (ANSI_RED + "Scanning for devices..." + ANSI_OFF)
devices = scanner.scan(arg.timeout)
if arg.discover:
print (ANSI_RED + "Discovering services..." + ANSI_OFF)
for d in devices:
if not d.connectable:
continue
print (" Connecting to", ANSI_WHITE + d.addr + ANSI_OFF + ":")
dev = btle.Peripheral(d)
dump_services(dev)
dev.disconnect()
print
if __name__ == "__main__":
main()
buffer0CharacteristicHandle = None
buffer1CharacteristicHandle = None
buffer2CharacteristicHandle = None
class MyDelegate(DefaultDelegate):
def __init__(self):
self.MAX_FORCE_VALUE = 1023.0
self.MAX_ACCELERATION_VALUE = 8000.0
self.MAX_ROTATION_VALUE = 1000.0
self.MAX_HEADING_VALUE = 1000.0
self.MAX_SHORT_VALUE = 65535.0
self.HALF_OF_MAX_SHORT_VALUE = 32767.0
self.ACCELERATION_CONVERSION_COEFFICIENT = 1.0 / 1000.0
self.ROTATION_CONVERSION_COEFFICIENT = 1.0 / 1000.0
self.buffer0IsEmpty = False
self.buffer1IsEmpty = False
self.buffer2IsEmpty = False
self.logger = BoogioLogger(PERIPHERAL_UUID)
def handleNotification(self, hnd, data):
if(struct.unpack('<B', data[0:1])[0] == b'\xff' and struct.unpack('<B', data[1:2])[0] == b'\x01'):
if(hnd == buffer0CharacteristicHandle):
self.buffer0IsEmpty = True
return
elif(hnd == buffer1CharacteristicHandle):
self.buffer1IsEmpty = True
return
elif(hnd == buffer2CharacteristicHandle):
self.buffer2IsEmpty = True
return
milliseconds = struct.unpack('>Q', data[0:8])[0]
timestamp = datetime.datetime.fromtimestamp(milliseconds/1000.0).strftime("%Y-%m-%d %H:%M:%S.%f")
self.logger.setTime(timestamp)
#header = "[" + str(milliseconds) + "]"
header = "[" + timestamp + "]"
#Debug print repr(data)
if (hnd == buffer0CharacteristicHandle):
force0 = struct.unpack('<H', data[8:10])[0]
force1 = struct.unpack('<H', data[10:12])[0]
force2 = struct.unpack('<H', data[12:14])[0]
force3 = struct.unpack('<H', data[14:16])[0]
force4 = struct.unpack('<H', data[16:18])[0]
force5 = struct.unpack('<H', data[18:20])[0]
print(header + "[Buffer_0]----[" + str(force0) + " " + str(force1) + " " + str(force2)+ " " + str(force3) + " " + str(force4) + " " + str(force5) + "]")
self.logger.insertBuffer0Values(timestamp, force0, force1, force2, force3, force4, force5)
elif (hnd == buffer1CharacteristicHandle):
force6 = struct.unpack('<H', data[8:10])[0]
force7 = struct.unpack('<H', data[10:12])[0]
accelerationX = struct.unpack('<h', data[12:14])[0]
accelerationY = struct.unpack('<h', data[14:16])[0]
accelerationZ = struct.unpack('<h', data[16:18])[0]
print(header + "[Buffer_1]----[" + str(force6) + " " + str(force7) + " " + str(accelerationX) + " " + str(accelerationY) + " " + str(accelerationZ) + "]")
self.logger.insertBuffer1Values(timestamp, force6, force7, accelerationX, accelerationY, accelerationZ)
elif (hnd == buffer2CharacteristicHandle):
rotationX = struct.unpack('<h', data[8:10])[0]
rotationY = struct.unpack('<h', data[10:12])[0]
rotationZ = struct.unpack('<h', data[12:14])[0]
rotationW = struct.unpack('<h', data[14:16])[0]
print(header + "[Buffer_2]----[" + str(rotationX) + " " + str(rotationY) + " " + str(rotationZ) + " " + str(rotationW) + "]")
self.logger.insertBuffer2Values(timestamp, rotationX, rotationY, rotationZ, rotationW)
else:
teptep = binascii.b2a_hex(data)
print('Notification: UNKOWN: hnd {}, data {}'.format(hnd, teptep))
def _str_to_int(self, s):
""" Transform hex str into int. """
i = int(s, 16)
if i >= 2**7:
i -= 2**8
return i
boogioPeripheral = Peripheral(PERIPHERAL_UUID, "random")
boogioDelegate = MyDelegate()
boogioPeripheral.setDelegate(boogioDelegate)
boogioShoeSensorService = None
buffer0Characteristic = None
buffer1Characteristic = None
buffer2Characteristic = None
CCCD_UUID = 0x2902
print("Boogio Peripheral:")
for svc in boogioPeripheral.services:
print(" ")
print(str(svc))
if svc.uuid == "f3641400-00B0-4240-ba50-05ca45bf8abc":
boogioShoeSensorService = boogioPeripheral.getServiceByUUID(svc.uuid)
for characteristic in boogioShoeSensorService.getCharacteristics():
print(characteristic)
if characteristic.uuid == "f3641401-00B0-4240-ba50-05ca45bf8abc":
buffer0Characteristic = characteristic
buffer0CharacteristicHandle = characteristic.getHandle()
buffer0CCCD = characteristic.getDescriptors(forUUID=CCCD_UUID)[0]
elif characteristic.uuid == "f3641402-00B0-4240-ba50-05ca45bf8abc":
buffer1Characteristic = characteristic
buffer1CharacteristicHandle = characteristic.getHandle()
buffer1CCCD = characteristic.getDescriptors(forUUID=CCCD_UUID)[0]
elif characteristic.uuid == "f3641403-00B0-4240-ba50-05ca45bf8abc":
buffer2Characteristic = characteristic
buffer2CharacteristicHandle = characteristic.getHandle()
buffer2CCCD = characteristic.getDescriptors(forUUID=CCCD_UUID)[0]
BLACK = (0,0,0)
RED = (255,60,120)
GREEN = (58,255,118)
BLUE = (64,128,255)
ORANGE = (252, 97, 38)
YELLOW = (255, 255, 15)
current_time = int(round(time.time() * 1000))
byteString = bytearray()
byteString.append(0x00) #set time command
byteString.append((current_time >> 56) & 0xff)
byteString.append((current_time >> 48) & 0xff)
byteString.append((current_time >> 40) & 0xff)
byteString.append((current_time >> 32) & 0xff)
byteString.append((current_time >> 24) & 0xff)
byteString.append((current_time >> 16) & 0xff)
byteString.append((current_time >> 8) & 0xff)
byteString.append((current_time >> 0) & 0xff)
time.sleep(1)
reload(sys)
sys.setdefaultencoding('utf8')
# upate timestamp
print("Timestamp = " + str(current_time))
#boogioPeripheral.writeCharacteristic(forceCharacteristicHandle, byteString, True)
buffer0Characteristic.write(str(byteString), withResponse = True)
time.sleep(1)
setProtocolByteString = bytearray()
setProtocolByteString.append(0x01) # set protocol command
setProtocolByteString.append(0x02) # synchronization enum
buffer0Characteristic.write(str(setProtocolByteString), withResponse = True)
setSampleRateByteString = bytearray()
setSampleRateByteString.append(0x04) # set sample rate command
setSampleRateByteString.append(0x01) # frequency argument (Hz)
buffer0Characteristic.write(str(setSampleRateByteString), withResponse = True)
time.sleep(1)
buffer0CCCD.write(b"\x01\x00", True)
buffer1CCCD.write(b"\x01\x00", True)
buffer2CCCD.write(b"\x01\x00", True)
syncCount = 10
sleepTime = 1
syncStep = bytearray()
syncStep.append(0x02) # Sync Command
syncStep.append(0x01) # 1 Readings
boogioDelegate.logger.connect()
while(True):
if(not boogioDelegate.buffer0IsEmpty):
buffer0Characteristic.write(str(syncStep), withResponse = True)
boogioPeripheral.waitForNotifications(sleepTime)
if(not boogioDelegate.buffer1IsEmpty):
buffer1Characteristic.write(str(syncStep), withResponse = True)
boogioPeripheral.waitForNotifications(sleepTime)
if(not boogioDelegate.buffer2IsEmpty):
buffer2Characteristic.write(str(syncStep), withResponse = True)
boogioPeripheral.waitForNotifications(sleepTime)
print("\r\n")
if(boogioDelegate.buffer0IsEmpty and boogioDelegate.buffer1IsEmpty and boogioDelegate.buffer2IsEmpty):
break
boogioDelegate.logger.commit()
boogioDelegate.logger.disconnect()
print("Done Syncing.")
boogioPeripheral.disconnect()
|
###############################################################################
##
## Copyright 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import pickle
class Case:
FAILED = "FAILED"
OK = "OK"
NON_STRICT = "NON-STRICT"
WRONG_CODE = "WRONG CODE"
UNCLEAN = "UNCLEAN"
FAILED_BY_CLIENT = "FAILED BY CLIENT"
INFORMATIONAL = "INFORMATIONAL"
UNIMPLEMENTED = "UNIMPLEMENTED"
# to remove
NO_CLOSE = "NO_CLOSE"
SUBCASES = []
def __init__(self, protocol):
self.p = protocol
self.received = []
self.expected = {}
self.expectedClose = {}
self.behavior = Case.FAILED
self.behaviorClose = Case.FAILED
self.result = "Actual events differ from any expected."
self.resultClose = "TCP connection was dropped without close handshake"
self.reportTime = False
self.reportCompressionRatio = False
self.trafficStats = None
self.subcase = None
self.suppressClose = False # suppresses automatic close behavior (used in cases that deliberately send bad close behavior)
## defaults for permessage-deflate - will be overridden in
## permessage-deflate test cases (but only for those)
##
self.perMessageDeflate = False
self.perMessageDeflateOffers = []
self.perMessageDeflateAccept = lambda connectionRequest, acceptNoContextTakeover, acceptMaxWindowBits, requestNoContextTakeover, requestMaxWindowBits: None
self.init()
def getSubcaseCount(self):
return len(Case.SUBCASES)
def setSubcase(self, subcase):
self.subcase = subcase
def init(self):
pass
def onOpen(self):
pass
def onMessage(self, msg, binary):
self.received.append(("message", msg, binary))
self.finishWhenDone()
def onPing(self, payload):
self.received.append(("ping", payload))
self.finishWhenDone()
def onPong(self, payload):
self.received.append(("pong", payload))
self.finishWhenDone()
def onClose(self, wasClean, code, reason):
pass
def compare(self, obj1, obj2):
return pickle.dumps(obj1) == pickle.dumps(obj2)
def onConnectionLost(self, failedByMe):
# check if we passed the test
for e in self.expected:
if self.compare(self.received, self.expected[e]):
self.behavior = e
self.passed = True
self.result = "Actual events match at least one expected."
break
# check the close status
if self.expectedClose["closedByMe"] != self.p.closedByMe:
self.behaviorClose = Case.FAILED
self.resultClose = "The connection was failed by the wrong endpoint"
elif self.expectedClose["requireClean"] and not self.p.wasClean:
self.behaviorClose = Case.UNCLEAN
self.resultClose = "The spec requires the connection to be failed cleanly here"
elif self.p.remoteCloseCode != None and self.p.remoteCloseCode not in self.expectedClose["closeCode"]:
self.behaviorClose = Case.WRONG_CODE
self.resultClose = "The close code should have been %s or empty" % ','.join(map(str,self.expectedClose["closeCode"]))
elif not self.p.factory.isServer and self.p.droppedByMe:
self.behaviorClose = Case.FAILED_BY_CLIENT
self.resultClose = "It is preferred that the server close the TCP connection"
else:
self.behaviorClose = Case.OK
self.resultClose = "Connection was properly closed"
## for UTF8 tests, closing by wrong endpoint means case failure, since
## the peer then did not detect the invalid UTF8 at all
##
closedByWrongEndpointIsFatal = self.expectedClose.get("closedByWrongEndpointIsFatal", False)
if closedByWrongEndpointIsFatal and self.expectedClose["closedByMe"] != self.p.closedByMe:
self.behavior = Case.FAILED
def finishWhenDone(self):
# if we match at least one expected outcome check if we are supposed to
# start the closing handshake and if so, do it.
for e in self.expected:
if not self.compare(self.received, self.expected[e]):
return
if self.expectedClose["closedByMe"] and not self.suppressClose:
self.p.sendClose(self.expectedClose["closeCode"][0])
|
"""A setup module for psidPy
Based on the pypa sample project.
A tool to download data and build psid panels based on psidR by Florian Oswald.
See:
https://github.com/floswald/psidR
https://github.com/tyler-abbot/psidPy
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='psid_py',
version='1.0.2',
description='A tool to build PSID panels.',
# The project's main homepage
url='https://github.com/tyler-abbot/psidPy',
# Author details
author='Tyler Abbot',
author_email='tyler.abbot@sciencespo.fr',
# Licensing information
license='MIT',
classifiers=[
#How mature is this project?
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Information Analysis',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'],
# What does your project relate to?
keywords='statistics econometrics data',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['requests',
'pandas',
'beautifulsoup4'],
)
|
"""
Tutorials / horn antenna
Description at:
http://openems.de/index.php/Tutorial:_Horn_Antenna
(C) 2011,2012,2013 Thorsten Liebig <thorsten.liebig@uni-due.de>
Python Adaptation : ESIR Project 2015
"""
from pylayers.em.openems.openems import *
import scipy.constants as cst
import numpy as np
# setup the simulation
unit = 1e-3 # all length in mm
class HornAntenna(object):
def __init__(self,**kwargs):
defaults = {'unit' : 1e-3,
'width' : 20,
'height' : 30 ,
'length' : 50 ,
'feed_length' : 50 ,
'thickness' : 2,
'angle' : np.array([20,20])*np.pi/180.
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
self.unit = kwargs['unit']
self.width = kwargs['width']
self.height = kwargs['height']
self.length = kwargs['length']
self.feed_length = kwargs['feed_length']
self.thickness = kwargs['thickness']
self.angle = kwargs['angle']
HA = HornAntenna()
# size of the simulation box
SimBox = np.r_[200,200,200]
# frequency range of interest
f_start = 10e9
f_stop = 20e9
# frequency of interest
f0 = 15e9
#waveguide TE-mode definition
TE_mode = 'TE10'
a = HA.width
b = HA.height
# setup FDTD parameter & excitation function
F = FDTD(EndCriteria="1e-4")
F.add(Exc(typ='Gaussian',f0=0.5*(f_start+f_stop),fc=0.5*(f_stop-f_start)))
F.add(BoundaryCond(['PML 8','PML 8','PML 8','PML 8','PML 8','PML 8']))
# setup CSXCAD geometry & mesh
# currently, openEMS cannot automatically generate a mesh
max_res = ((cst.c/f_stop)/unit)/15. # cell size: lambda/20
C = CSX()
#
# Warning : It is not the same thing to add a new properties (add) and to add
# a new primitive to an existing property (primitive)
#
C.add(Matter('horn',
p=Box(
P1=[-a/2.-HA.thickness,-b/2.,0],
P2=[-a/2.,-b/2.,0],Pr=10)
))
#
# Define Mesh
#
linex = [-SimBox[0]/2.,-a/2., a/2., SimBox[0]/2.]
meshx = SmoothMeshLine( linex, max_res, 1.4)
liney = [-SimBox[1]/2., -b/2., b/2., SimBox[1]/2.]
meshy = SmoothMeshLine( liney, max_res, 1.4 )
linez = [-HA.feed_length, 0 ,SimBox[2]-HA.feed_length ]
meshz = SmoothMeshLine( linez, max_res, 1.4 )
C.add(RectilinearGrid(meshx,meshy,meshz))
#
# Waveguide
#
C.primitive('horn',Box(
P1=[-a/2.-HA.thickness,-b/2.,meshz[0]],
P2=[-a/2.,b/2.,0],Pr=10)
)
C.primitive('horn',Box(
P1=[a/2.+HA.thickness,-b/2.,meshz[0]],
P2=[a/2.,b/2.,0],Pr=10)
)
C.primitive('horn', Box(
P1=[-a/2.-HA.thickness,b/2.+HA.thickness,meshz[0]],
P2=[a/2.+HA.thickness,b/2.,0],Pr=10)
)
C.primitive('horn', Box(
P1=[-a/2.-HA.thickness,-b/2.-HA.thickness,meshz[0]],
P2=[a/2.+HA.thickness,-b/2.,0],Pr=10)
)
#
# horn opening 4 metallic plates
#
horn_opening1 = np.array([[0, HA.length, HA.length, 0],
[a/2.,
a/2 + np.sin(HA.angle[0])*HA.length,
-a/2 - np.sin(HA.angle[0])*HA.length,
-a/2.]])
horn_opening2 = np.array([[b/2+HA.thickness,
b/2+HA.thickness + np.sin(HA.angle[1])*HA.length,
-b/2-HA.thickness - np.sin(HA.angle[1])*HA.length,
-b/2-HA.thickness],
[ 0, HA.length, HA.length, 0]])
L1 = LinPoly(lp=horn_opening1.T,Pr=10)
L2 = LinPoly(lp=horn_opening1.T,Pr=10)
L3 = LinPoly(lp=horn_opening2.T,Pr=10,normdir=0)
L4 = LinPoly(lp=horn_opening2.T,Pr=10,normdir=0)
T1 = Transformation()
T2 = Transformation()
T3 = Transformation()
T4 = Transformation()
# y translate
Tr1 = Translate([0,-b/2-HA.thickness/2,0])
Tr2 = Translate([0,b/2+HA.thickness/2,0])
# x translate
Tr3 = Translate([-a/2-HA.thickness/2,0,0])
Tr4 = Translate([a/2+HA.thickness/2,0,0])
Rx1 = Rotate_X(HA.angle[1])
Rx2 = Rotate_X(-HA.angle[1])
Rx3 = Rotate_Y(-HA.angle[1])
Rx4 = Rotate_Y(HA.angle[1])
T1.append(Rx1)
T1.append(Tr1)
T2.append(Rx2)
T2.append(Tr2)
T3.append(Rx3)
T3.append(Tr3)
T4.append(Rx4)
T4.append(Tr4)
L1.append(T1)
L2.append(T2)
L3.append(T3)
L4.append(T4)
C.primitive('horn',L1)
C.primitive('horn',L2)
C.primitive('horn',L3)
C.primitive('horn',L4)
## first ProbeBox
#C.add(ProbeBox(name='port_ut1', Type='wv', Weight='1'),
# a=Attributes([(0*cos(0.15708*(x--10))*sin(0*(y--15))),
# (-0.05*sin(0.15708*(x--10))*cos(0*(y--15))),0]),
# p=Box(P1=[-10,-15,-25],P2=[10,15,-25],Pr=0)
#
## second ProbeBox
#
#C.add(ProbeBox(name='port_it1', Type='wc', Weight='1'), a=Attributes([(0.05*sin(0.15708*(x--10))*cos(0*(y--15))),0*cos(0.15708*(x--10))*sin(0*(y--15))),0]), p=Box(P1=[-10,-15,-25],P2=[10,15,-25],Pr=0)
#
#
##
A = (a + 2*np.sin(HA.angle[0])*HA.length)*unit * (b + 2*np.sin(HA.angle[1])*HA.length)*unit;
##
## apply the excitation
start=[-a/2, -b/2 ,meshz[7] ];
stop =[ a/2, b/2 ,meshz[0]+HA.feed_length/2. ];
C.add(Excitation('port_excite_1',typ="Es",excite="1,1,0"))
# AddRectWaveGuidePort( CSX, 0, 1, start, stop, 2, a*unit, b*unit, TE_mode, 1);
##
##%% nf2ff calc
##start = [mesh.x(9) mesh.y(9) mesh.z(9)];
##stop = [mesh.x(end-8) mesh.y(end-8) mesh.z(end-8)];
##[CSX nf2ff] = CreateNF2FFBox(CSX, 'nf2ff', start, stop, 'Directions', [1 1 1 1 0 1]);
##
##%% prepare simulation folder
##Sim_Path = 'tmp_Horn_Antenna';
##Sim_CSX = 'horn_ant.xml';
##
##[status, message, messageid] = rmdir( Sim_Path, 's' ); % clear previous directory
##[status, message, messageid] = mkdir( Sim_Path ); % create empty simulation folder
##
##%% write openEMS compatible xml-file
##WriteOpenEMS([Sim_Path '/' Sim_CSX], FDTD, CSX);
##
##%% show the structure
##CSXGeomPlot([Sim_Path '/' Sim_CSX]);
##
##%% run openEMS
##RunOpenEMS(Sim_Path, Sim_CSX);
##
##%% postprocessing & do the plots
##freq = linspace(f_start,f_stop,201);
##
##port = calcPort(port, Sim_Path, freq);
##
##Zin = port.uf.tot ./ port.if.tot;
##s11 = port.uf.ref ./ port.uf.inc;
##
##plot( freq/1e9, 20*log10(abs(s11)), 'k-', 'Linewidth', 2 );
##ylim([-60 0]);
##grid on
##title( 'reflection coefficient S_{11}' );
##xlabel( 'frequency f / GHz' );
##ylabel( 'reflection coefficient |S_{11}|' );
##
##drawnow
##
##%% NFFF contour plots %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
##
##% calculate the far field at phi=0 degrees and at phi=90 degrees
##thetaRange = (0:2:359) - 180;
##disp( 'calculating far field at phi=[0 90] deg...' );
##nf2ff = CalcNF2FF(nf2ff, Sim_Path, f0, thetaRange*pi/180, [0 90]*pi/180);
##
##Dlog=10*log10(nf2ff.Dmax);
##G_a = 4*pi*A/(c0/f0)^2;
##e_a = nf2ff.Dmax/G_a;
##
##% display some antenna parameter
##disp( ['radiated power: Prad = ' num2str(nf2ff.Prad) ' Watt']);
##disp( ['directivity: Dmax = ' num2str(Dlog) ' dBi'] );
##disp( ['aperture efficiency: e_a = ' num2str(e_a*100) '%'] );
##
##%%
##% normalized directivity
##figure
##plotFFdB(nf2ff,'xaxis','theta','param',[1 2]);
##drawnow
##% D_log = 20*log10(nf2ff.E_norm{1}/max(max(nf2ff.E_norm{1})));
##% D_log = D_log + 10*log10(nf2ff.Dmax);
##% plot( nf2ff.theta, D_log(:,1) ,'k-', nf2ff.theta, D_log(:,2) ,'r-' );
##
##% polar plot
##figure
##polarFF(nf2ff,'xaxis','theta','param',[1 2],'logscale',[-40 20], 'xtics', 12);
##drawnow
##% polar( nf2ff.theta, nf2ff.E_norm{1}(:,1) )
##
##%% calculate 3D pattern
##phiRange = sort( unique( [-180:5:-100 -100:2.5:-50 -50:1:50 50:2.5:100 100:5:180] ) );
##thetaRange = sort( unique([ 0:1:50 50:2.:100 100:5:180 ]));
##
##disp( 'calculating 3D far field...' );
##nf2ff = CalcNF2FF(nf2ff, Sim_Path, f0, thetaRange*pi/180, phiRange*pi/180, 'Verbose',2,'Outfile','nf2ff_3D.h5');
##
##figure
##plotFF3D(nf2ff);
##
##%%
##E_far_normalized = nf2ff.E_norm{1}/max(nf2ff.E_norm{1}(:));
##DumpFF2VTK([Sim_Path '/Horn_Pattern.vtk'],E_far_normalized,thetaRange,phiRange,'scale',1e-3);
S = OpenEMS(F,C)
#
S.save(filename='HornAntenna.xml')
|
"""
system/pre_kaster.py - Tasks to be done when the program is initialized
Copyright (C) 2017-2018 Nguyen Hoang Duong <novakglow@gmail.com>
Licensed under MIT License (see LICENSE).
"""
import sys
import os
sys.path.insert(0, "utils")
from global_vars import *
def main():
"""
All processes to be ran on program's startup
:return: 100 if operation success
102 if there's no log file and Kaster has to create a new one
104 if there's no .kasterrc file in user's home directory (fatal)
"""
__process__ = "pre_kaster.py -> main()"
if not os.path.isfile(config_path):
kaster_logger.critical(":%s: Could not find Kaster's configuration file (.kasterrc) in home directory" % __process__)
print("Make sure that you've ran install.sh")
sys.exit(104)
# Create program's files path if there isn't one
if not os.path.isdir(kaster_dir):
os.mkdir(kaster_dir)
if not os.path.isfile(log_path):
open(log_path, "a").close()
return 102
if os.path.getsize(log_path) > 50000000: # Check if log file's size is larger than 50MB
os.remove(log_path)
open(log_path, "a").close() # Create a new, empty log file
kaster_logger.info("%s: Renewed log file" % __process__)
return 100
|
#!/usr/bin/python
fname = "H37Rv_genome.fasta"
fh = open(fname, 'r')
fh.readline() # remove the fasta header
numA = 0
numT = 0
numC = 0
numG = 0
for line in fh:
numA += line.count("A")
numT += line.count("T")
numC += line.count("C")
numG += line.count("G")
allbases = float(numA+numT+numC+numG)
print ("sum:", numA+numT+numC+numG)
print ("A:", numA, "\tT:", numT, "\tC:", numC, "\tG:", numG)
print ("A:", numA/allbases, "\tT:", numT/allbases, "\tC:", numC/allbases, "\tG:", numG/allbases)
|
import numpy as np
def bbox_iou(bbox_a, bbox_b):
'''
计算建议框和真实框的重合程度
Parameters
----------
bbox_a
bbox_b
Returns
-------
'''
if bbox_a.shape[1] != 4 or bbox_b.shape[1] != 4:
print(bbox_a, bbox_b)
raise IndexError
tl = np.maximum(bbox_a[:, None, :2], bbox_b[:, :2])
br = np.minimum(bbox_a[:, None, 2:], bbox_b[:, 2:])
area_i = np.prod(br - tl, axis=2) * (tl < br).all(axis=2)
area_a = np.prod(bbox_a[:, 2:] - bbox_a[:, :2], axis=1)
area_b = np.prod(bbox_b[:, 2:] - bbox_b[:, :2], axis=1)
return area_i / (area_a[:, None] + area_b - area_i)
def bbox2loc(pbox, tbox):
'''
计算真实框与建议框的偏移量,这里与rpn网络预测的处理手法一样,都是
求变化量.rpn网格预测的变化量+先验框=建议框.
分类网络预测的偏移量+建议框=网络最终预测框
Parameters
----------
pbox:已提取的正负样本的坐标,[x_min,y_min,x_max,y_max]
tbox:正负样本对应的真实框的坐标,[x_min,y_min,x_max,y_max]
Returns
-------
'''
# 获得建议框宽高与中心
proposal_w = pbox[:, 2] - pbox[:, 0]
proposal_h = pbox[:, 3] - pbox[:, 1]
proposal_center_x = pbox[:, 0] + 0.5 * proposal_w
proposal_center_y = pbox[:, 1] + 0.5 * proposal_h
# 获得真实框宽高与中心
true_w = tbox[:, 2] - tbox[:, 0]
true_h = tbox[:, 3] - tbox[:, 1]
true_center_x = tbox[:, 0] + 0.5 * true_w
true_center_y = tbox[:, 1] + 0.5 * true_h
eps = np.finfo(proposal_h.dtype).eps
# eps=0.0001
proposal_w = np.maximum(proposal_w, eps)
proposal_h = np.maximum(proposal_h, eps)
# 计算真实框与建议框偏移量并归一化
dx = (true_center_x - proposal_center_x) / proposal_w
dy = (true_center_y - proposal_center_y) / proposal_h
dw = np.log(true_w / proposal_w)
dh = np.log(true_h / proposal_h)
# loc.shape=(n,4),n:正负样本总数,值为建议框编号,4:真实框与建议框中心点偏差量和框高比例值
# 并且所有值都已经归一化
loc = np.concatenate([dx[:, None], dy[:, None],
dw[:, None], dh[:, None]], axis=-1)
return loc
def calc_iou(proposal_boxes, config, true_boxes, num_classes):
true_boxes_loc = true_boxes[:, :4]
true_boxes_cls = true_boxes[:, 4]
if len(true_boxes_loc) == 0:
# 若某张图片不存在真实框(标注框),则将真实框坐标,真实框与建议框iou值,真实框标签全部置为0
true_box_idx = np.zeros(len(proposal_boxes), np.int32)
max_iou = np.zeros(len(proposal_boxes))
gt_roi_label = np.zeros(len(proposal_boxes))
else:
# 计算建议框和真实框的重合程度,iou.shape=(n,x),n:建议框数量,值表示建议框编号, x:真实框数量
# 即每个建议框与所有真实框的iou值
iou = bbox_iou(proposal_boxes, true_boxes_loc)
# 获得每一个建议框最对应的真实框的iou (n,),n:建议框数量,值表示某个建议框与所有真实框iou中最大的值
max_iou = iou.max(axis=1)
# 获得每一个建议框最对应的真实框 (n,),n:建议框数量, 值表示某个建议框与所有真实框iou中最大的值的索引
# 即获得某个建议框与哪个真实框iou最大,该建议框负责预测这个真实框,预测物体的类别是这个真实框的物体类别
true_box_idx = iou.argmax(axis=1)
# 取得建议框应该预测的真实类别 (n,),n:建议框数量, 值表示某个建议框应该预测的类别值
gt_roi_label = true_boxes_cls[true_box_idx]
# 满足建议框和真实框iou大于neg_iou_thresh_high的作为负样本
# 将正样本的数量限制在self.pos_roi_per_image以内
# 将满足iou条件的建议框设为正样本
pos_index = np.where(max_iou >= config.classifier_max_iou)[0]
# 限制一张图片中的正样本数量不超过总样本数的一半
pos_nums = int(min(config.num_rois // 2, pos_index.size))
if pos_index.size > 0:
# 随机从正样本的索引中不重复提取pos_nums个正样本,样本次序会被打乱
pos_index = np.random.choice(pos_index, size=pos_nums, replace=False)
# 满足建议框和真实框重合程度小于neg_iou_thresh_high大于neg_iou_thresh_low作为负样本
# 将正样本的数量和负样本的数量的总和固定成self.n_sample
neg_index = np.where((max_iou < config.classifier_max_iou) &
(max_iou >= config.classifier_min_iou))[0]
# 一张图片中,样本总数-正样本=负样本数量
neg_nums = config.num_rois - pos_nums
if neg_nums > neg_index.size:
# 当需要的负样本数量多于已存在的负样本,可以重复抽样,不然负样本数量不够呀~
neg_index = np.random.choice(neg_index, size=neg_nums, replace=True)
else:
neg_index = np.random.choice(neg_index, size=neg_nums, replace=False)
keep_index = np.append(pos_index, neg_index)
# 提取训练用的所有正阳本与负样本,前半部分为正样本,后半部分为负样本
pos_neg_boxes = proposal_boxes[keep_index]
if len(true_boxes_loc) != 0:
# (n,4),n:正负样本总数,值为建议框编号,4:x,y,w,h.真实框与建议框中心点偏差量和框高比例值
loc_offset = bbox2loc(pos_neg_boxes, true_boxes_loc[true_box_idx[keep_index]])
# 预测的应该的变化量*分类系数? x*8,y*8,w*4,h*4,可能会使预测效果更好吧!
loc_offset = loc_offset * np.array(config.classifier_regr_std)
else:
loc_offset = np.zeros_like(pos_neg_boxes)
# 取得所有正负样本应该预测的类别值
gt_roi_label = gt_roi_label[keep_index]
# voc数据集20个类别,包含背景共21个. 0,1,2,...,20. 其中20为背景,
# 此处将负样本类别全部设为背景 20
gt_roi_label[pos_nums:] = num_classes - 1
# 正负样本对应的建议框坐标
pos_neg_loc = np.zeros_like(pos_neg_boxes)
# (n,4),n:正负样本总和,前半部分为正样本. x,y,w,h=>y,x,h,w
pos_neg_loc[:, [0, 1, 2, 3]] = pos_neg_boxes[:, [1, 0, 3, 2]]
# (n,21),n:建议框数量,21:每个建议框,应该有的预测类别one_hot编码
label_one_hot = np.eye(num_classes)[np.array(gt_roi_label, np.int32)]
# 正负样本数总和
pos_neg_nums = np.shape(loc_offset)[0]
true_class_label = np.zeros([pos_neg_nums, num_classes - 1, 4])
true_boxes_offset = np.zeros([pos_neg_nums, num_classes - 1, 4])
# 将每个正样本对应的真实标签置为1,并把偏移量写入对应位置
true_class_label[np.arange(pos_nums), np.array(gt_roi_label[:pos_nums], np.int32)] = 1
true_boxes_offset[np.arange(pos_nums), np.array(gt_roi_label[:pos_nums], np.int32)] = loc_offset[:pos_nums]
true_class_label = np.reshape(true_class_label, [pos_neg_nums, -1])
true_boxes_offset = np.reshape(true_boxes_offset, [pos_neg_nums, -1])
# (n,160): n:建议框数量,160:前80是类别,后80是类别对应的坐标
true_boxes_offset = np.concatenate([np.array(true_class_label),
np.array(true_boxes_offset)], axis=1)
return pos_neg_loc, label_one_hot, true_boxes_offset
|
fruit = input()
set_type = input()
set_count = int(input())
price = 0
if set_type == "small":
if fruit == "Watermelon":
price = set_count * 2 * 56
elif fruit == "Mango":
price = set_count * 2 * 36.66
elif fruit == "Pineapple":
price = set_count * 2 * 42.10
elif fruit == "Raspberry":
price = set_count * 2 * 20
if set_type == "big":
if fruit == "Watermelon":
price = set_count * 5 * 28.70
elif fruit == "Mango":
price = set_count * 5 * 19.60
elif fruit == "Pineapple":
price = set_count * 5 * 24.80
elif fruit == "Raspberry":
price = set_count * 5 * 15.20
if price >= 400 and price <= 1000:
price = (85 * price) / 100
if price > 1000:
price = price / 2
print(f"{price:.2f} lv.")
|
# -*- coding: utf-8 -*-
"""Extends urllib with additional handlers."""
from __future__ import (absolute_import, division, print_function,
unicode_literals, with_statement)
import argparse
import json
import sys
import boto3
def keys_from_bucket_objects(objects):
"""Extract keys from a list of S3 bucket objects."""
return [x["Key"] for x in objects if not x["Key"].endswith("/")]
def list_s3_keys(bucket_name, prefix='/', delimiter='/', start_after=''):
"""List S3 Keys."""
s3_paginator = boto3.client('s3').get_paginator('list_objects_v2')
prefix = prefix[1:] if prefix.startswith(delimiter) else prefix
start_after = (
(start_after or prefix) if prefix.endswith(delimiter) else start_after
)
for page in s3_paginator.paginate(
Bucket=bucket_name, Prefix=prefix, StartAfter=start_after
):
for content in page.get('Contents', ()):
yield content['Key']
def main(bucket, prefix="", delimiter=","):
"""Return dictionary of delimited keys."""
keys = list_s3_keys(bucket, prefix=prefix)
return {"keys": delimiter.join(keys)}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"json", type=argparse.FileType("r"), default=sys.stdin,
help="Parses BUCKET, PREFIX, and DELIMITER from a json file or stdin"
)
args = parser.parse_args()
json_args = {}
with args.json as fp_:
json_args = json.load(fp_)
sys.exit(print(json.dumps(main(**json_args))))
|
import platform
import pypinyin
from pathlib import Path
from PIL.ImageFont import FreeTypeFont
from PIL import Image, ImageDraw, ImageFont
from PIL.Image import Image as IMG
dir_path = Path(__file__).parent.absolute()
def cn2py(word) -> str:
temp = ""
for i in pypinyin.pinyin(word, style=pypinyin.NORMAL):
temp += "".join(i)
return temp
# 移除windows和linux下特殊字符
def remove_prohibited_str(name: str) -> str:
if platform.system().lower() == "windows":
tmp = ""
for i in name:
if i not in ["\\", "/", ":", "*", "?", '"', "<", ">", "|"]:
tmp += i
name = tmp
else:
name = name.replace("/", "\\")
return name
def load_font(fontname: str = "msyh.ttf", fontsize: int = 16) -> FreeTypeFont:
return ImageFont.truetype(
str(dir_path / f"resources/fonts/{fontname}"), fontsize, encoding="utf-8"
)
def circled_number(num: int) -> IMG:
font = load_font(fontsize=450)
text = str(num)
text_w = font.getsize(text)[0]
w = 240 + text_w
w = w if w >= 500 else 500
img = Image.new("RGBA", (w, 500))
draw = ImageDraw.Draw(img)
draw.ellipse(((0, 0), (500, 500)), fill="red")
draw.ellipse(((w - 500, 0), (w, 500)), fill="red")
draw.rectangle(((250, 0), (w - 250, 500)), fill="red")
draw.text(
(120, -60),
text,
font=font,
fill="white",
stroke_width=10,
stroke_fill="white",
)
return img
|
import json
import os
import sys
import urllib.parse
from datetime import datetime
from string import Template
import numpy
import uvicorn
from fastapi import FastAPI, BackgroundTasks
from starlette.requests import Request
from starlette.responses import HTMLResponse, RedirectResponse, Response
from starlette.staticfiles import StaticFiles
from arguments import get_arguments
from web.backend import Backend
from web.templating import (
generate_index_html,
generate_index_html_validation,
)
from web.utils import (
CommentBody,
SourcingState,
RequestBody,
ValidationRequestBody,
WorkingRequestBody,
)
app = FastAPI()
app.mount("/static", StaticFiles(directory="web/static"), name="static")
def get_prediction(args, example, models, processor, tokenizer):
option_probs = []
for model in models:
pred_label, pred_probs = predict_example(
args, model, example, processor, tokenizer
)
option_probs.append(pred_probs)
option_average_probs = list(numpy.average(option_probs, axis=0))
pred_label = int(numpy.argmax(option_average_probs))
gold_label = int(example["gold_answer"][-1]) - 1
if example["mode"] in ["adv", "plain"]:
success = gold_label != pred_label
else:
success = False
return {
"model_prediction_probability": option_average_probs,
"model_prediction_label": pred_label,
"success": success,
"gold_label": gold_label,
"probabilities": option_probs,
}
def make_prediction(input_body: RequestBody):
example = input_body.convert_to_example()
return get_prediction(
app.state.args,
example,
app.state.model_kit["models"],
app.state.model_kit["processor"],
app.state.model_kit["tokenizer"],
)
def call_template(template_dir, backend, uid, worker_id, run_mode, is_external):
collection_mode = backend.user_assignments[uid].collection_mode
template_name = backend.hit_config[collection_mode].get(
"template_name", collection_mode
)
instruction_path = backend.hit_config[collection_mode].get(
"instruction_template", f"instruction_{template_name}.html"
)
mini_instruction_path = backend.hit_config[collection_mode].get(
"mini_instruction_template", f"mini_instruction_{template_name}.html"
)
faq_path = backend.hit_config[collection_mode].get(
"faq_template", f"faq_{template_name}.html"
)
task_path = backend.hit_config[collection_mode].get(
"task_template", f"task_{template_name}.html"
)
subconfig = backend.hit_config[collection_mode].get("subconfig", "")
if collection_mode == "plain" and subconfig == "justification":
instruction_path = "instruction_justification.html"
faq_path = "faq_justification.html"
index_title = backend.hit_config[collection_mode].get("index_title", "")
# todo: migrate tutorial codes
if collection_mode in ["plain", "adv"]:
passages = backend.get_passages(uid)
task_ids = backend.get_task_ids(uid)
if not index_title:
index_title = "Writing hard reading comprehension questions."
return generate_index_html(
collection_mode,
index_title,
os.path.join(template_dir, "index.html"),
os.path.join(template_dir, instruction_path),
os.path.join(template_dir, faq_path),
os.path.join(template_dir, mini_instruction_path),
os.path.join(template_dir, task_path),
uid,
task_ids,
passages,
backend.get_current_task_index(uid),
backend.get_success_task_ids(uid),
app.state.args.run_mode,
"yes" if app.state.args.is_external else "no",
subconfig,
)
elif collection_mode in ["validation"]:
if not index_title:
if template_name == "evaluation":
index_title = "Evaluate reading comprehension questions!"
else:
index_title = "Solve reading comprehension questions!"
validation_tasks = backend.get_user_validation_task(uid, worker_id)
if validation_tasks is None:
backend.logger.info(
f"Cannot issue a new validation task for worker_id={worker_id}"
)
return open(os.path.join(template_dir, "error.html"), "r").read()
return generate_index_html_validation(
collection_mode,
index_title,
os.path.join(template_dir, "index.html"),
os.path.join(template_dir, instruction_path),
os.path.join(template_dir, faq_path),
os.path.join(template_dir, mini_instruction_path),
os.path.join(template_dir, task_path),
uid,
validation_tasks,
app.state.args.run_mode,
"yes" if app.state.args.is_external else "no",
subconfig=subconfig,
)
else:
raise ValueError(f"Invalid collection mode: {collection_mode}")
@app.get("/", response_class=HTMLResponse)
def read_root(
request: Request,
uid: str = None,
status: str = None,
worker_id: str = None,
assignment_id: str = None,
debug: str = None,
assignmentId: str = None,
workerId: str = None,
):
# # emergency mode
# return "We are sorry that no HITs are available. Please click 'Return' on the HIT page to avoid any impact on your approval rating."
# return "We are sorry that an error has occured. Please click 'Return' on the HIT page to avoid any impact on your approval rating."
if workerId is not None and worker_id is None:
worker_id = workerId
if assignmentId is not None and assignment_id is None:
assignment_id = assignmentId
template_dir = app.state.args.template_dir
run_mode = app.state.args.run_mode
backend = Backend.get_backend(app)
# uid must be included in url when sandbox and release modes
if not uid and run_mode in ["sandbox", "release"]:
# backend.logger.info("Called error.html")
return open(os.path.join(template_dir, "error.html"), "r").read()
# if the link is clicked in preview
if assignment_id == "ASSIGNMENT_ID_NOT_AVAILABLE" and run_mode != "debug":
return open(os.path.join(template_dir, "not_accepted.html"), "r").read()
datestr = datetime.now().isoformat()
if not uid and run_mode == "debug": # issue a uid if debug
uid = backend.get_debug_uid()
if debug:
url = urllib.parse.urlencode({"uid": uid, "debug": debug})
return RedirectResponse(url="?" + url)
else:
url = urllib.parse.urlencode({"uid": uid})
return RedirectResponse(url="?" + url)
# check how many HITs the worker did
if app.state.args.worker_log_path and worker_id:
if not backend.check_if_worker_can_accept_hit(worker_id, uid):
backend.logger.info(f"GET: worker {worker_id} cannot accept hit")
return open(os.path.join(template_dir, "known_worker.html"), "r").read()
else:
backend.logger.info(f"GET: worker {worker_id} can accept hit")
# create user data
backend.logger.info(f"GET / {datestr} / {uid} {worker_id} {assignment_id} {status}")
uid = backend.check_user_data(uid, status, worker_id, assignment_id)
if uid is not None and status == "new":
redirect_url = urllib.parse.urlencode(
{
"uid": uid,
"assignmentId": assignment_id,
"workerId": worker_id,
"status": "redirect",
}
)
return RedirectResponse(url="?" + redirect_url)
if uid == "" and run_mode != "debug":
backend.logger.info("Called error.html")
return open(os.path.join(template_dir, "error.html"), "r").read()
backend.logging_current_status()
state = backend.get_current_state(uid)
if state == SourcingState.START or state == SourcingState.WORKING:
return call_template(
template_dir, backend, uid, worker_id, run_mode, app.state.args.is_external
)
elif state == SourcingState.FINISHED or state == SourcingState.QUIT:
fpath = os.path.join(template_dir, "finished.html")
template = Template(open(fpath, "r").read())
return template.substitute(
{"completion_code": backend.get_completion_code(uid)}
)
else:
return open(os.path.join(template_dir, "error.html"), "r").read()
@app.post("/predict")
def predict(body: RequestBody):
backend = Backend.get_backend(app)
response = make_prediction(body)
backend.save_submission(
body.uid.strip(), body.passage_id, body.convert_to_example(), response
)
return response or {"success": True}
@app.post("/submit")
def submit(body: RequestBody):
backend = Backend.get_backend(app)
backend.save_submission(
body.uid.strip(), body.passage_id, body.convert_to_example(), None
)
return {"success": True}
@app.post("/complete") # complete writing HIT
async def complete(body: CommentBody, background_tasks: BackgroundTasks):
backend = Backend.get_backend(app)
backend.finalize_user_assignment(body.uid)
backend.save_comment(body.uid, body.comment)
backend.logger.info(f"Mturk submitted: {backend.get_current_state(body.uid)} {body.uid} {body.worker_id} {body.assignment_id}")
backend.logging_current_status()
background_tasks.add_task(
backend.sleep_and_process_terminal_actions,
body.uid,
body.worker_id,
body.assignment_id,
)
return {"completion_code": backend.get_completion_code(body.uid)}
@app.post("/validation_submit") # complete validation HIT
async def validation_submit(
body: ValidationRequestBody, background_tasks: BackgroundTasks
):
backend = Backend.get_backend(app)
uid = body.uid.strip()
responses = body.responses
mode = body.mode
backend.save_validation_submission(body.uid.strip(), mode, responses)
background_tasks.add_task(
backend.sleep_and_process_terminal_actions,
uid,
body.worker_id,
body.assignment_id,
)
backend.logging_current_status()
backend.logger.info(f"Mturk submitted: {backend.get_current_state(body.uid)} {body.uid} {body.worker_id} {body.assignment_id}")
return {"completion_code": backend.get_completion_code(uid)}
@app.get("/serialize")
def serialize_backend_data():
"""
To stop the API server and resume it with the same data
"""
backend = Backend.get_backend(app)
backend.serialize_data()
@app.get("/show_log", include_in_schema=False)
def show_log_file(mode: str = "log", passcode: str = None):
if app.state.args.passcode:
backend = Backend.get_backend(app)
res = backend.show_log(mode, passcode, app.state.args.passcode)
return Response(content=res)
else:
return ""
@app.get("/set_value", include_in_schema=False)
def set_value(variable: str, value: int, passcode: str = None):
if not app.state.args.passcode:
return ""
backend = Backend.get_backend(app)
if not backend.check_passcode(passcode, app.state.args.passcode):
return "incorrect passcode"
if variable == "max_acceptable_hit_num":
backend.max_acceptable_hit_num = value
return f"set {variable}={value}"
@app.get("/get_value", include_in_schema=False)
def get_value(variable: str, passcode: str = None):
if not app.state.args.passcode:
return ""
backend = Backend.get_backend(app)
if not backend.check_passcode(passcode, app.state.args.passcode):
return "incorrect passcode"
if variable == "max_acceptable_hit_num":
return f"{variable}={backend.max_acceptable_hit_num}"
@app.get("/enter_debug", include_in_schema=False)
def enter_debug(passcode: str = None):
if not app.state.args.passcode:
return ""
backend = Backend.get_backend(app)
if not backend.check_passcode(passcode, app.state.args.passcode):
return "incorrect passcode"
# backend.dataset["validation"]['gutenberg-Deephaven_and_Selected_Stories_&_Sketches-paragraph-0152_adv_3VW6495TLOJSA6R7QJX94RMER2TYYR_0'] = backend.dataset["validation"]['gutenberg-Deephaven_and_Selected_Stories_&_Sketches-paragraph-0152_adv_3VW6495TLOJSA6R7QJX94RMER2TYYR_0']
breakpoint()
@app.post("/working")
def update_working_status(body: WorkingRequestBody):
uid = body.uid
backend = Backend.get_backend(app)
backend.update_current_time(uid)
return {"updated": 1}
def check_config_template_paths(config, template_dir):
def check_exists(file_name):
filepath = os.path.join(template_dir, file_name)
if not os.path.exists(filepath):
print(f"Not found: {filepath}")
return False
return True
for temp_type in ["instruction", "mini_instruction", "faq", "task"]:
if f"{temp_type}_template" in config:
assert check_exists(config[f"{temp_type}_template"])
elif "template_name" in config:
# TODO
template_name = config["template_name"]
assert check_exists(f"{temp_type}_{template_name}.html")
else:
assert check_exists(f"{temp_type}_{config['collection_mode']}.html")
if __name__ == "__main__":
if sys.version_info.minor <= 6:
print("Run with Python >= 3.7")
exit(1)
# setup arguments
args = get_arguments()
# check arguments
if args.max_acceptable_hit_num == 1 and args.run_mode == "release":
msg = f"Proceed with args.max_acceptable_hit_num={args.max_acceptable_hit_num}? [Y/N] "
if input(msg) != "Y":
raise ValueError("Aborted.")
hit_configs = json.load(open(args.hit_config, "r"))
setattr(args, "hit_config", {})
for config in hit_configs:
check_config_template_paths(config, args.template_dir)
if "hit_template" in config:
config["hit_template"] = os.path.join(
args.template_dir, config["hit_template"]
)
args.hit_config[config["collection_mode"]] = config
if len(args.hit_config) == 1:
collection_mode = list(args.hit_config.keys())[0]
else:
collection_mode = "multiple"
# if not args.collected_passage_path and args.run_mode in ["release", "sandbox"]:
# msg = f"Proceed without args.collected_passage_path? [Y/N] "
# if input(msg) != "Y":
# raise ValueError("Aborted.")
# set up path strings
datestr = datetime.now().strftime("%Y%m%d%H%M%S")
if not args.worker_log_path and args.run_mode != "debug":
args.worker_log_path = os.path.join(
args.worker_log_dir, f"{args.run_mode}{datestr}.json"
)
args.log_dir = os.path.join(args.log_dir, f"{args.run_mode}", f"{collection_mode}")
os.makedirs(args.log_dir, exist_ok=True)
if args.resume:
annot_datestr = os.path.basename(os.path.normpath(args.save_dir))
log_file = f"{args.log_dir}/{annot_datestr}.log"
if os.path.exists(log_file):
args.log_file = log_file
else:
args.log_file = f"{args.log_dir}/{args.annotation}{datestr}resume.log"
else:
args.save_dir = os.path.join(
args.save_dir,
f"{args.run_mode}",
f"{collection_mode}",
f"{args.annotation}{datestr}",
)
os.makedirs(args.save_dir, exist_ok=True)
args.log_file = f"{args.log_dir}/{args.annotation}{datestr}.log"
if args.run_mode == "debug":
args.is_external = True
print(json.dumps(args.__dict__))
for k, v in json.load(open(args.mturk_config, "r")).items():
setattr(args, k, v)
# setup models
models = []
if (
not args.no_model
and len(set(args.hit_config.keys()) & set(["adv"])) > 0
):
for k, v in json.load(open(args.model_config, "r")).items():
setattr(args, k, v)
from model.run_multiple_choice import load_model
if args.model_type == "unifiedqa":
from model.run_multiple_choice import predict_generation_example as predict_example
else:
from model.run_multiple_choice import predict_example
if args.model_ensemble:
for model_path in args.ensemble_models:
args.model_name_or_path = model_path
model, processor, tokenizer = load_model(args)
models.append(model)
else:
model, processor, tokenizer = load_model(args)
models.append(model)
else:
processor, tokenizer = None, None
app.state.model_kit = {
"models": models,
"processor": processor,
"tokenizer": tokenizer,
}
app.state.args = args
if args.is_external:
question_template_path = None
else:
question_template_path = os.path.join(
args.template_dir, "hit_template_COLLECTION_MODE.html"
)
if not args.base_url.endswith(":8000"):
if args.base_url.endswith("/"):
args.base_url = args.base_url[:-1]
args.base_url = args.base_url + f":{args.port}"
mturk_config = {
"access_id": args.aws_access_key_id,
"secret_key": args.aws_secret_access_key,
"base_url": args.base_url,
"template_path": question_template_path,
"issued_hits": "issued_hits", # save directory of issued HITs
}
app.state.mturk_config = mturk_config
Backend.get_backend(app)
# start app
if args.run_mode in ["sandbox", "release"] and "localhost" not in args.base_url:
uvicorn.run(app, host="0.0.0.0", port=args.port, log_level="info")
else:
uvicorn.run(app, log_level="info")
|
# Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Functions Lab
# MAGIC
# MAGIC Building on the previous lab, the FizzBuzz Test, we are going to refactor that code into a function.
# MAGIC 0. Declare a function.
# MAGIC 0. The name of the function should be **`fizz_buzz`**
# MAGIC 0. The function has one parameter, presumably an integer (**`int`**).
# MAGIC 0. The function should return a string (**`str`**)
# MAGIC 0. Add a guard, or pre-condition, that asserts that the one specified parameter is of type **`int`**.
# MAGIC
# MAGIC Bonus: Update your function to use type hints.
# COMMAND ----------
# MAGIC %md To help you get started, we have included one possible solution to the Fizz Buzz Test here.
# MAGIC
# MAGIC NOTE: You will not want to include the for loop when you make your function.
# COMMAND ----------
for num in range(1, 101):
if (num % 5 == 0) and (num % 3 == 0):
print("FizzBuzz")
elif num % 5 == 0:
print("Buzz")
elif num % 3 == 0:
print("Fizz")
else:
print(num)
# COMMAND ----------
# TODO
def FILL_IN
assert FILL_IN
if FILL_IN
# COMMAND ----------
# MAGIC %md Use the code below to test your function.
# COMMAND ----------
expected = "Fizz"
result = fizz_buzz(3)
assert result == expected, f"Expected {expected}, but found {result}."
expected = "Buzz"
result = fizz_buzz(5)
assert result == expected, f"Expected {expected}, but found {result}."
expected = "FizzBuzz"
result = fizz_buzz(15)
assert result == expected, f"Expected {expected}, but found {result}."
expected = "7"
result = fizz_buzz(7)
assert result == expected, f"Expected {expected}, but found {result}."
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2021 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
|
import os
import unittest
from XvfbRobot import XvfbRobot
def reset_display():
os.environ["DISPLAY"] = ":0"
class TestXvfbRobot(unittest.TestCase):
def setUp(self):
reset_display()
def test_start_virtual_display(self):
xvfb_robot = XvfbRobot()
xvfb_robot.start_virtual_display()
display_var = ':{0}'.format(xvfb_robot._display.new_display)
self.assertIsNotNone(xvfb_robot._display)
self.assertEqual(display_var, os.environ['DISPLAY'])
def test_start_without_existing_display(self):
del os.environ['DISPLAY']
xvfb_robot = XvfbRobot()
xvfb_robot.start_virtual_display()
display_var = ':{0}'.format(xvfb_robot._display.new_display)
self.assertIsNotNone(xvfb_robot._display)
self.assertEqual(display_var, os.environ['DISPLAY'])
def test_start_with_kwargs(self):
w = 800
h = 600
depth = 16
xvfb_robot = XvfbRobot()
xvfb_robot.start_virtual_display(width=w, height=h, colordepth=depth)
self.assertIsNotNone(xvfb_robot._display)
self.assertEqual(w, xvfb_robot._display.width)
self.assertEqual(h, xvfb_robot._display.height)
self.assertEqual(depth, xvfb_robot._display.colordepth)
display_var = ':{}'.format(xvfb_robot._display.new_display)
self.assertEqual(display_var, os.environ['DISPLAY'])
def test_start_with_arbitrary_kwargs(self):
xvfb_robot = XvfbRobot()
xvfb_robot.start_virtual_display(nolisten="tcp")
self.assertIsNotNone(xvfb_robot._display)
display_var = ':{}'.format(xvfb_robot._display.new_display)
self.assertEqual(display_var, os.environ['DISPLAY'])
def test_start_fails_with_unknown_kwargs(self):
xvfb_robot = XvfbRobot()
with self.assertRaises(RuntimeError):
xvfb_robot.start_virtual_display(foo="bar")
|
"""
/llrws/tools/mave/validation/__init__.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tools for interfacing with MAVE file validation tasks.
"""
import csv
import warnings
import numpy as np
import pandas as pd
warnings.simplefilter(action="ignore", category=UserWarning)
def get_tidy_pd_dataframe_from_csv(csv_filepath):
"""Similar to pd.read_csv, except there are tidying steps to remove
metadata headers while preserving appropriate dtypes.
Args:
csv_filepath (str): File path to benchmark CSV file
Returns:
(pandas.core.frame.DataFrame): Tidied pandas dataframe
"""
with open(csv_filepath, newline="") as csvfile:
csv_reader = csv.reader(csvfile, skipinitialspace=True)
csv_df = pd.DataFrame(remove_metadata_header_from_csv(csv_reader))
# Convert 'NA' to NaN dtype
csv_df = csv_df.replace("NA", np.nan)
# Grab first row and set as the new header
new_header = csv_df.iloc[0]
csv_df = csv_df[1:]
csv_df.columns = new_header
# Convert object dtypes to floating point values
csv_df = csv_df.apply(pd.to_numeric, errors="ignore")
return csv_df.reset_index(drop=True)
def remove_metadata_header_from_csv(csv_reader):
"""Trims metadata header (starts with '#') from stream of CSV content.
Args:
csv_reader (_csv.reader): Reader object of a CSV file
Returns:
(generator[list]): CSV rows without metadata header
"""
for row in csv_reader:
# Don't pop!
if row[0].startswith("#"):
continue
yield row
|
"""
This is a module used to get hardware info
"""
import psutil
import cpuinfo
import math
import platform
import sys
import sysconfig
from datetime import datetime
import speedtest
class CPU:
info = cpuinfo.get_cpu_info()
@staticmethod
def cpu_cores(hyperthreading = False):
return psutil.cpu_count(logical = hyperthreading)
@staticmethod
def architecture():
return CPU.info['arch']
@staticmethod
def name():
return CPU.info['brand_raw']
@staticmethod
def percent(time=0.2):
return psutil.cpu_percent(time)
@staticmethod
def temp(fahrenheit=False):
if platform.system() == 'Linux':
return psutil.sensors_temperatures(fahrenheit=fahrenheit)['coretemp']
elif platform.system() == 'Windows':
import WinTmp
return WinTmp.CPU_Temp()
elif platform.system() == 'Darwin':
import MacTmp
return MacTmp.CPU_Temp()
class GPU:
@staticmethod
def Get_Gpus(multiple=False):
if platform.system() == 'Windows':
import wmi
computer = wmi.WMI()
if multiple:
return [gpu.name for gpu in computer.Win32_VideoController()]
else:
return computer.Win32_VideoController()[0]
@staticmethod
def temp():
if platform.system() == 'Linux':
return psutil.sensors_temperatures(fahrenheit=fahrenheit)
elif platform.system() == 'Windows':
import WinTmp
return WinTmp.GPU_Temp()
elif platform.system() == 'Darwin':
import MacTmp
return MacTmp.GPU_Temp()
class Ram:
mem = psutil.virtual_memory()
@staticmethod
def total_mem(acc):
return round(Ram.mem.total/1000000000, acc)
@staticmethod
def used_mem(acc):
return round(Ram.mem.used/1000000000, acc)
@staticmethod
def free_mem(acc):
return round(Ram.mem.available/1000000000, acc)
@staticmethod
def refresh():
Ram.mem = psutil.virtual_memory()
class Disk:
@staticmethod
def list_disks(every = False):
return [{'device': i[0], 'mountpoint': i[1], 'fstype': i[2], 'opts': i[3]} for each in psutil.disk_partitions(all=every) for i in list(each)]
@staticmethod
def get_size(bts, suffix="B"):
"""
Scale bytes to its proper format
e.g:
1253656 => '1.20MB'
1253656678 => '1.17GB'
"""
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bts < factor:
return f"{bts:.2f}{unit}{suffix}"
bts /= factor
@staticmethod
def total_r_and_w():
disk_io = psutil.disk_io_counters()
return {'read': Disk.get_size(disk_io.read_bytes), 'write': Disk.get_size(disk_io.write_bytes)}
@staticmethod
def space(every=False):
partitions = psutil.disk_partitions(all=every)
to_return = {}
for partition in partitions:
try:
partition_usage = psutil.disk_usage(partition.mountpoint)
except PermissionError:
# this can be catched due to the disk that
# isn't ready
continue
to_return[(partition.device, partition.mountpoint, partition.fstype)] = {
'total': Disk.get_size(partition_usage.total),
'used': Disk.get_size(partition_usage.used),
'free': Disk.get_size(partition_usage.free),
'usage_percentage': partition_usage.percent
}
return to_return
class Network:
@staticmethod
def get_ip():
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = None
finally:
s.close()
return IP
@staticmethod
def singlespeedtest():
s = speedtest.Speedtest()
s.get_servers()
s.get_best_server()
s.download()
s.upload()
res = s.results.dict()
return res["download"], res["upload"], res["ping"]
@staticmethod
def speedtest():
for i in range(3):
d, u, p = Network.singlespeedtest()
print('Test #{}\n'.format(i+1))
if i == 3:
return {'Download': d, 'Upload': u, 'Ping': p}
class PythonInfo:
version = (str(sys.version_info[0])+'.'+str(sys.version_info[1])+'.'+str(sys.version_info[2]))
interpreterlocation = sys.executable
location = sys.exec_prefix
copyrightinfo = sys.copyright
pythoninfo = sys.version
class System:
@staticmethod
def name():
return [platform.system(), platform.release()]
@staticmethod
def hardware(more = False):
if platform.system() == 'Windows':
import wmi
computer = wmi.WMI()
if more:
cpus = [proc for proc in computer.Win32_Processor()]
gpus = [gpu for gpu in computer.Win32_VideoController()]
else:
cpus = [proc.name for proc in computer.Win32_Processor()]
gpus = [gpu.name for gpu in computer.Win32_VideoController()]
return {'cpus': cpus, 'gpus': gpus}
else:
return None
@staticmethod
def users():
return psutil.users()
@staticmethod
def fans_rpm():
if platform.system() == 'Linux':
return psutil.sensors_fans()
else:
return None
@staticmethod
def battery_info():
info = (list(psutil.sensors_battery()))
return {'Percentage': info[0], 'SecondLeft': info[1], 'MinutesLeft': round(info[1]/60), 'HoursLeft': round(info[1]/1440, 2), 'PluggedIn': info[2]}
|
from django.shortcuts import render,redirect,HttpResponse,reverse
from django.views.generic import View
from apps.imgshow.models import Imgmain
class Imghomepage(View):
def get(self,request):
img=Imgmain.objects.all()
return render(request,'imghomepage.html',locals())
|
from vou.utils import logistic
from random import Random
from enum import IntEnum, unique
from itertools import repeat
from collections import deque
import numpy as np
@unique
class BehaviorWhenResumingUse(IntEnum):
SAME_DOSE = 0
LOWER_DOSE = 1
@unique
class OverdoseType(IntEnum):
NON_FATAL = 0
FATAL = 1
class Person:
def __init__(
self,
rng: Random,
starting_dose: int = 50,
dose_increase: int = 25,
base_threshold: float = 0.001,
tolerance_window: int = 3_000,
external_risk: float = 0.5,
internal_risk: float = 0.5,
behavioral_variability: float = 0.1,
behavior_when_resuming_use: BehaviorWhenResumingUse = None,
):
# Parameters
self.rng = rng
self.dose = starting_dose
self.dose_increase = dose_increase
self.threshold = base_threshold
self.tolerance_window = tolerance_window
self.external_risk = external_risk
self.internal_risk = internal_risk
self.behavioral_variability = behavioral_variability
self.update_downward_pressure()
self.set_risk_logit()
self.behavior_when_resuming_use = behavior_when_resuming_use
self.post_OD_use_pause = None
self.last_dose_increase = 0
# A bunch of empty lists to store data during simulation
self.concentration = []
self.tolerance_input = deque(repeat(0, self.tolerance_window))
self.tolerance_input_sum = 0
self.desperation = []
self.habit = []
self.effect = []
self.overdoses = []
self.effect_record = {}
self.took_dose = []
def update_downward_pressure(
self, midpoint_min: int = 100, midpoint_max: int = 1_000
):
"""
Sets the person's downward pressure. Downward pressure is intended to represent
the person's overall motivation NOT to use and increase dose. It counterbalances
their motivation TO use from their threshold and desperation and their motivation
TO increase dose from their effect and increase threshold.
Downward pressure is computed with a logistic function taking the following
arguments:
- Person's external risk: used as the intercept, or "baseline" downward pressure
- Person's internal risk: transformed and used as the midpoint of the logistic
curve
- Person's current dose: used as the X value
"""
# Person's internal risk is a value from 0 to 1. We use this in its raw
# form, but also need to convert it to the sigmoid midpoint parameter for the
# downward pressure logistic function. This function takes an internal risk from
# 0 to 1 and scales it to a midpoint in the specified range.
midpoint_range = midpoint_max - midpoint_min
midpoint = (self.internal_risk * midpoint_range) + midpoint_min
# External risk is quantified as 0=good, 1=bad for intuitiveness. However,
# in the logistic function for downward pressure, 0 is bad and 1 is good,
# since higher values lead to more downward pressure. Therefore, we invert
# external risk to get the user's downward pressure baseline.
baseline_dp = 1 - self.external_risk
# Main logistic function
self.downward_pressure = baseline_dp + (
(1 - baseline_dp) / (1 + np.exp(-0.005 * (self.dose - midpoint)))
)
def set_risk_logit(self):
"""
The risk logit is used to adjust the person's threshold for opioid use. Persons
with extreme risk levels (low or high) will have extreme threshold multipliers,
while persons with normal risk levels will have threshold multipliers close to zero.
"""
avg_risk = (self.external_risk + self.internal_risk) / 2
self.risk_logit = np.log(avg_risk / (1 - avg_risk)) / 0.25
def lower_dose_after_pause(self):
"""
Sets the person's dose to their maximum past habit, rounded to the nearest
increment of their dose increase amount.
Also updates their downward pressure since dose has changed.
"""
self.dose = self.dose_increase * round(max(self.habit) / self.dose_increase)
self.update_downward_pressure()
def will_take_dose(self, t: int):
"""
Evaluates several conditions to decide whether the person will take another
dose at a given time point.
Returns a boolean value indicating whether the person will take a dose.
"""
# Is a recent overdose preventing the person from using?
if self.overdoses and t < self.overdoses[-1] + self.post_OD_use_pause:
return False
# Does the person want another dose?
elif self.concentration[-1] > self.threshold:
return False
# Does downward pressure prevent person from taking dose when they want one?
elif self.rng.random() < self.downward_pressure:
return False
else:
return True
def did_overdose(self, x0: float = 1243.6936832876, k: float = 0.0143710866):
"""
Checks whether the person's most recent opioid dose causes an overdose.
First, a baseline OD risk value is generated using a function derived from
Dasgupta et al 2016. A logistic model was fitted to their data, along with
the assumption that a dose of 2 grams has an OD probability of 1. (See
notebooks/od_risk_curve.ipynb). We use that model to generate the baseline
risk value for the person's dose.
Since the Dasgupta study used prescription data, we assume that these overdose
risks are for people who are tolerant to their prescribed dose. Therefore,
we add an additional risk multiplier based on the ratio of the dose to the
person's tolerance.
A very general heuristic is that at steady state, (preferred_dose / tolerance)
roughly equals 2. We define "excess" as (dose / tolerance) - 1, or roughly 1 at
steady state. We multiply the person's baseline OD risk by this excess squared.
"""
dose = self.concentration[-1]
tolerance = self.habit[-1]
# bound extremely low tolerance values to avoid huge excess values
tolerance = max(1, tolerance)
# parameters based on Dasgupta et al 2016 - see notebooks/od_risk_curve.ipynb
baseline_OD_risk = logistic(x=dose, L=1, k=k, x0=x0)
excess = ((dose / tolerance) - 1) ** 2
tolerance_adjusted_OD_risk = baseline_OD_risk * excess
if self.rng.random() < tolerance_adjusted_OD_risk:
# Overdose occurred
return True
def overdose(self, t: int):
"""
Takes the necessary actions when the person has overdosed. Records the OD,
sets the amount of time the person will stop using after OD, and adjusts
their dose if they will reduce their dose after OD.
Returns the type of overdose, fatal or non-fatal. Simulation.simulate() uses
this value to break the simulation loop in the case of a fatal OD.
"""
self.overdoses.append(t)
# Set amount of time person will stop using after OD.
self.post_OD_use_pause = self.compute_OD_use_pause()
# Adjust person's dose.
self.dose = self.dose * self.compute_OD_dose_reduction()
# Check if overdose caused death. Per Dunn et al 2010, about 1 in every 8.5
# ODs is fatal.
if self.rng.random() < (1 / 8.5):
return OverdoseType.FATAL
else:
return OverdoseType.NON_FATAL
def compute_OD_use_pause(self):
"""
Computes the amount of time the person will pause use after an overdose based
on the person's risk factors and a random draw. The lowest risk persons will
pause 60 days. This value decays exponentially quite quickly, since research
shows that most persons resume use within 24 hours of an OD.
"""
maximum = 60 * 100
rate = -0.999
rand = self.rng.uniform(0.5, 1.5)
combined_risk = self.internal_risk + self.external_risk
pause = (maximum * (1 + rate) ** combined_risk) * rand
return pause
def compute_OD_dose_reduction(self):
"""
Computes a multiplier by which the person will reduce their dose after an OD
based on the person's risk factors and a random draw. The lowest risk persons
will reduce their dose by half, while the highest risk persons will maintain
the same dose.
"""
intercept = 0.5
slope = 0.25
rand = self.rng.uniform(0.5, 1.5)
combined_risk = self.internal_risk + self.external_risk
dose_reduction = (combined_risk * slope + intercept) * rand
if dose_reduction > 1:
return 1
else:
return dose_reduction
def will_increase_dose(
self, effect_window: int = 20, increase_threshold: float = 0.4,
):
"""
Checks whether the person will increase their dose. Based on a comparison of
the average of past dose effects to the person's desired dose. Effect window
and increase threshold are calibrated parameters and not intended to be varied
during simulation.
"""
if self.dose < 2_000:
last_n_dose_effects = [
self.effect_record[d]
for d in self.took_dose[-effect_window:]
if d > self.last_dose_increase
]
if np.mean(last_n_dose_effects) < (self.dose * increase_threshold):
if self.rng.random() > self.downward_pressure:
return True
def increase_dose(self, t: int):
"""
Takes the necessary steps when the person increases their dose. Updates dose,
records the current time as the last time of dose increase, and updates the
person's downward pressure for the new dose.
"""
self.dose += self.dose_increase
self.last_dose_increase = t
self.update_downward_pressure()
|
# Copyright 2017-2020 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import logging
import pprint
import six
from guild import flag_util
from guild import util
log = logging.getLogger("guild")
###################################################################
# State
###################################################################
class OpCmd(object):
def __init__(self, cmd_args, cmd_env, cmd_flags, flags_dest):
self.cmd_args = cmd_args
self.cmd_env = cmd_env
self.cmd_flags = cmd_flags
self.flags_dest = flags_dest
class CmdFlag(object):
def __init__(self, arg_name=None, arg_skip=False, arg_switch=None, env_name=None):
self.arg_name = arg_name
self.arg_skip = arg_skip
self.arg_switch = arg_switch
self.env_name = env_name
###################################################################
# Generate command
###################################################################
def generate(op_cmd, flag_vals, resolve_params):
return (_gen_args(op_cmd, flag_vals, resolve_params), _gen_env(op_cmd, flag_vals))
def _gen_args(op_cmd, flag_vals, resolve_params):
encoded_resolve_params = _encode_arg_params(resolve_params)
args = []
for arg in op_cmd.cmd_args:
if arg == "__flag_args__":
args.extend(
_flag_args(flag_vals, op_cmd.flags_dest, op_cmd.cmd_flags, args)
)
else:
args.append(util.resolve_refs(arg, encoded_resolve_params))
return args
def _encode_arg_params(params):
return {name: _encode_general_arg(val) for name, val in params.items()}
def _encode_general_arg(val):
# Use same encoding used for env vals.
return _encode_env_val(val)
def _flag_args(flag_vals, flag_dest, cmd_flags, cmd_args):
args = []
for name, val in sorted(flag_vals.items()):
cmd_flag = cmd_flags.get(name)
args.extend(_args_for_flag(name, val, cmd_flag, flag_dest, cmd_args))
return args
def _args_for_flag(name, val, cmd_flag, flag_dest, cmd_args):
cmd_flag = cmd_flag or CmdFlag()
if cmd_flag.arg_skip:
return []
arg_name = cmd_flag.arg_name or name
if "--%s" % arg_name in cmd_args:
log.warning(
"ignoring flag '%s=%s' because it's shadowed "
"in the operation cmd as --%s",
name,
flag_util.encode_flag_val(val),
arg_name,
)
return []
if cmd_flag.arg_switch is not None:
if cmd_flag.arg_switch == val:
return ["--%s" % arg_name]
else:
return []
elif val is not None:
return ["--%s" % arg_name, _encode_flag_arg(val, flag_dest)]
else:
return []
def _encode_flag_arg(val, dest):
if dest == "globals" or dest.startswith("global:"):
return _encode_flag_arg_for_globals(val)
else:
return _encode_flag_arg_for_argparse(val)
def _encode_flag_arg_for_globals(val):
"""Returns an encoded flag value for Python globals interface.
Flags destined for globals within a Python module are encoded
using standard YAML encoding. Decoding must be handled using
standard YAML decoding.
"""
return flag_util.format_flag(val)
def _encode_flag_arg_for_argparse(val):
"""Returns an encoded flag val for use by Python argparse.
"""
if val is True:
return "1"
elif val is False or val is None:
return ""
elif isinstance(val, six.string_types):
return val
else:
return pprint.pformat(val)
def _gen_env(op_cmd, flag_vals):
env = _encoded_cmd_env(op_cmd)
_resolve_env_flag_refs(flag_vals, env)
_apply_flag_env(flag_vals, op_cmd, env)
return env
def _encoded_cmd_env(op_cmd):
return {name: _encode_env_val(val) for name, val in op_cmd.cmd_env.items()}
def _encode_env_val(val):
if val is True:
return "1"
elif val is False:
return "0"
elif val is None:
return ""
elif isinstance(val, six.string_types):
return val
else:
return pprint.pformat(val)
def _resolve_env_flag_refs(flag_vals, env):
for env_name, env_val in env.items():
env[env_name] = util.resolve_refs(env_val, flag_vals)
def _apply_flag_env(flag_vals, op_cmd, env):
env.update(
{
_flag_env_name(name, op_cmd): _encode_env_val(val)
for name, val in flag_vals.items()
}
)
def _flag_env_name(flag_name, op_cmd):
cmd_flag = op_cmd.cmd_flags.get(flag_name)
if cmd_flag and cmd_flag.env_name:
return cmd_flag.env_name
return _default_flag_env_name(flag_name)
def _default_flag_env_name(flag_name):
return "FLAG_%s" % util.env_var_name(flag_name)
###################################################################
# Data IO
###################################################################
def for_data(data):
cmd_args = data.get("cmd-args") or []
cmd_env = data.get("cmd-env") or {}
cmd_flags = _cmd_flags_for_data(data.get("cmd-flags"))
flags_dest = data.get("flags-dest")
return OpCmd(cmd_args, cmd_env, cmd_flags, flags_dest)
def _cmd_flags_for_data(data):
if not data:
return {}
if not isinstance(data, dict):
raise ValueError(data)
return {
flag_name: _cmd_flag_for_data(cmd_flag_data)
for flag_name, cmd_flag_data in data.items()
}
def _cmd_flag_for_data(data):
if not isinstance(data, dict):
raise ValueError(data)
return CmdFlag(
arg_name=data.get("arg-name"),
arg_skip=data.get("arg-skip"),
arg_switch=data.get("arg-switch"),
env_name=data.get("env-name"),
)
def as_data(op_cmd):
data = {
"cmd-args": op_cmd.cmd_args,
}
if op_cmd.cmd_env:
data["cmd-env"] = op_cmd.cmd_env
cmd_flags_data = _cmd_flags_as_data(op_cmd.cmd_flags)
if cmd_flags_data:
data["cmd-flags"] = cmd_flags_data
if op_cmd.flags_dest:
data["flags-dest"] = op_cmd.flags_dest
return data
def _cmd_flags_as_data(cmd_flags):
data = {}
for flag_name, cmd_flag in cmd_flags.items():
cmd_flag_data = _cmd_flag_as_data(cmd_flag)
if cmd_flag_data:
data[flag_name] = cmd_flag_data
return data
def _cmd_flag_as_data(cmd_flag):
data = {}
if cmd_flag.arg_name:
data["arg-name"] = cmd_flag.arg_name
if cmd_flag.arg_skip:
data["arg-skip"] = cmd_flag.arg_skip
if cmd_flag.arg_switch:
data["arg-switch"] = cmd_flag.arg_switch
if cmd_flag.env_name:
data["env-name"] = cmd_flag.env_name
return data
|
import pandas as pd
from tqdm import tqdm
def initFile():
with open('../logs/meanScoreLogs.csv', 'w') as f:
pass
def getDF(df, angle):
dff = df[df['angle'] == angle]
dff = dff.sort_values(by=["reward"], ascending=False)
return dff
def writeFile(file):
df = pd.read_csv(file, sep=',', header=None, names=(
'vector', 'where', 'angle', 'power', 'reward', 'turn'))
angles = [0, 1]
where = [0, 1, 2]
power = [3, 5, 7, 12, 16]
vecs = []
dff = df[~df['vector'].isin(vecs)]
hoge = dff['vector']
vecSize = hoge.duplicated().value_counts()[False]
for _ in tqdm(range(vecSize)):
dff = df[~df['vector'].isin(vecs)]
v = str(dff.iloc[0, 0])
vecs.append(v)
ans = ""
for w in where:
for a in angles:
for p in power:
d = dff[dff['vector'] == v]
d = d[d['where'] == w]
d = d[d['angle'] == a]
d = d[d['power'] == p]
turn = []
turn.append(int(d.iloc[0, 5]) % 16)
for i in range(len(d)):
for j in range(len(turn)):
if turn[j] == int(d.iloc[i, 5]) % 16:
pass
else:
turn.append(int(d.iloc[i, 5]) % 16)
print(turn)
exit()
"""
for t in turn:
d = d[d['turn'] == t]
score = 0
size = len(d)
for i in range(size):
score += float(d.iloc[i, 4])
score /= size
ans += v+","+str(w)+","+str(a)+"," + \
str(p)+","+str(score)+','str(t)+"\n"
"""
"""
with open('../logs/meanScoreLogs.csv', 'a')as f:
f.write(ans)
"""
initFile()
writeFile("../logs/logsVer2Ver2.csv")
|
# Copyright @2018 The CNN_MonoFusion Authors (NetEaseAI-CVlab).
# All Rights Reserved.
#
# Please cited our paper if you find CNN_MonoFusion useful in your research!
#
# See the License for the specific language governing permissions
# and limitations under the License.
#
from __future__ import print_function
import numpy as np
import tensorflow as tf
import socket
import cv2
import time
import sys
sys.path.append("..")
import os
import argparse
import adenet_def
# socket parameters
address = ('', 6666)
# gpu config
# os.environ["CUDA_VISIBLE_DEVICES"]='1'
# depth parameters
depth_factor = 13107
depth_factor_inv = 1.0/depth_factor
max_depth = 4.5
depth_gradient_thr = 0.2
img_gradient_thr = 10
INPUT_SIZE = '160,240'
height = 160
width = 240
channels = 3
batch_size = 1
black_hole_width = 25
# camera parameters
cx_gt = 492.247
cy_gt = 263.355
focal_scale = 1.0
nyu_focal = 1.49333
# img mean
IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
model_path = '../models/adenet_merge_nyu_kinect_tum/neair-adenet-final'
def gradient(img_gray):
gx = np.gradient(img_gray, axis=0)
gy = np.gradient(img_gray, axis=1)
g = gx*gx + gy*gy
return np.sqrt(g)
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def send_depth(conn, depth):
result, encoded_depth = cv2.imencode('.png', depth)
str_depth = encoded_depth.tostring()
str_length = str(len(str_depth)).ljust(16).encode()
conn.sendall(str_length)
conn.sendall(str_depth)
def receive_image(conn):
str_length = receive_all(conn, 16)
if str_length is None:
return None
str_length = str_length.decode('utf-8')
length = int(str_length)
# print 'recv length', length
buf = receive_all(conn, length)
if buf is None:
return None
encoded_img = np.fromstring(buf, dtype=np.uint8)
img = cv2.imdecode(encoded_img, cv2.IMREAD_COLOR)
return img
def receive_all(conn, count):
buf = b''
while count:
new_buf = conn.recv(count)
if not new_buf:
return None
buf += new_buf
count -= len(new_buf)
return buf
def main():
print("Adaptive-depth-esti-net server socket !")
# Create a placeholder for the input image
input_node = tf.placeholder(tf.float32, shape=(None, height, width, channels))
# Construct the network
net = adenet_def.ResNet50_astrous_concat({'data': input_node}, batch_size, 1, False)
# SESSION
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)
# INIT
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
# SAVER
print('Loading astrous-os8-concat model ......')
loader = tf.train.Saver()
load(loader, sess, model_path)
print('Load done!')
# SOCKET
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(address)
while True:
print('Wait for connection...')
s.listen(True)
conn, addr = s.accept()
print('Accept a connection.')
# receive image size of client
depth_thr=0.1
str_depth_thr = receive_all(conn, 16)
str_focal_scale = receive_all(conn, 16)
if str_depth_thr is None:
print('Can not receive client\'s image parameters. Use ground truth instead.')
depth_thr = 0.1
else:
str_depth_thr = str_depth_thr.decode('utf-8')
depth_thr = float(str_depth_thr)
print('User Config Depth_Gradient_Thr: ', depth_thr)
focal_scale = 1.0
if str_focal_scale is None:
print('Can not receive client\'s image parameters of focal_scale. Use default instead.')
focal_scale = 1.0
else:
str_focal_scale = str_focal_scale.decode('utf-8')
focal_scale = float(str_focal_scale)
print('User Config focal_scale: ', focal_scale)
# img_id = 0
try:
while True:
img = receive_image(conn)
if img is None:
print('Connection is closed.')
break
# img pre-process
preprocess_start_time = time.time()
img_height = img.shape[0]
img_width = img.shape[1]
img_resize = img - IMG_MEAN
img_resize_expend = np.expand_dims(np.asarray(img_resize), axis = 0)
# adenet predict
forward_start_time = time.time()
pred = sess.run(net.get_output(), feed_dict={input_node: img_resize_expend})
forward_end_time = time.time()
# pred-depth process
depth_pred_origin = pred[0,:,:,0]
depth_pred = np.copy(depth_pred_origin)
depth_pred[depth_pred>max_depth] = 0
depth_pred = depth_pred * focal_scale
depth_pred_gradient = gradient(depth_pred_origin)
depth_pred[depth_pred_gradient>depth_thr] = 0
# depth around
depth_pred_scale = depth_pred*depth_factor
depth_pred_around = np.around(depth_pred_scale)
depth_pred_around = depth_pred_around.astype(np.uint16)
# depth_pred_resize = cv2.resize(depth_pred_around, (480,270), interpolation=cv2.INTER_NEAREST)
# depth_pred_resize[:, :70] = 0
# depth_pred_resize[:, (480-70):] = 0
depth_process_end_time = time.time()
# cost time
forward_duration = forward_end_time - forward_start_time
im_preprocess_duration = forward_start_time - preprocess_start_time
depth_process_duration = depth_process_end_time - forward_end_time
# print time
print('recv img size: ', img.shape, ', depth-pred size: ', depth_pred_around.shape)
cost_time_str = 'img preprocess {:.6f}sec ' + ', forward {:.6f}sec' + ', depth process {:.6f}sec '
sys_out_msg = cost_time_str.format(im_preprocess_duration, forward_duration, depth_process_duration)
# sys.stdout.write(sys_out_msg)
# sys.stdout.flush()
print(sys_out_msg)
# send depth
# print 'depth size: ', depth_pred_resize.shape
send_depth(conn,depth_pred_around)
except:
pass
if __name__ == '__main__':
main()
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("SKIM")
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.1 $'),
name = cms.untracked.string('$Source: /cvs/CMSSW/CMSSW/DPGAnalysis/Skims/python/logErrorSkim_cfg.py,v $'),
annotation = cms.untracked.string('LogError skim')
)
#
#
# This is for testing purposes.
#
#
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
# run 136066 lumi~500
'/store/data/Run2010A/MinimumBias/RECO/v1/000/136/066/18F6DB82-5566-DF11-B289-0030487CAF0E.root'),
secondaryFileNames = cms.untracked.vstring(
'/store/data/Run2010A/MinimumBias/RAW/v1/000/136/066/38D48BED-3C66-DF11-88A5-001D09F27003.root')
)
process.source.inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
#------------------------------------------
# Load standard sequences.
#------------------------------------------
process.load('Configuration/StandardSequences/MagneticField_AutoFromDBCurrent_cff')
process.load('Configuration/StandardSequences/GeometryIdeal_cff')
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'GR10_P_V8::All'
process.load("Configuration/StandardSequences/RawToDigi_Data_cff")
process.load("Configuration/StandardSequences/Reconstruction_cff")
process.load('Configuration/EventContent/EventContent_cff')
#drop collections created on the fly
process.FEVTEventContent.outputCommands.append("drop *_MEtoEDMConverter_*_*")
process.FEVTEventContent.outputCommands.append("drop *_*_*_SKIM")
#
# Load common sequences
#
process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskAlgoTrigConfig_cff')
process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff')
process.load('HLTrigger/HLTfilters/hltLevel1GTSeed_cfi')
#################################logerrorharvester############################################
process.load("FWCore.Modules.logErrorFilter_cfi")
from Configuration.StandardSequences.RawToDigi_Data_cff import gtEvmDigis
process.gtEvmDigis = gtEvmDigis.clone()
process.stableBeam = cms.EDFilter("HLTBeamModeFilter",
L1GtEvmReadoutRecordTag = cms.InputTag("gtEvmDigis"),
AllowedBeamMode = cms.vuint32(11),
saveTags = cms.bool(False)
)
process.logerrorpath=cms.Path(process.gtEvmDigis+process.stableBeam+process.logErrorFilter)
process.outlogerr = cms.OutputModule("PoolOutputModule",
outputCommands = process.FEVTEventContent.outputCommands,
fileName = cms.untracked.string('/tmp/azzi/logerror_filter.root'),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RAW-RECO'),
filterName = cms.untracked.string('Skim_logerror')),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring("logerrorpath")
))
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
process.outpath = cms.EndPath(process.outlogerr)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.