gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import array
from collections import namedtuple
from pyspark import SparkContext, since
from pyspark.rdd import RDD
from pyspark.mllib.common import JavaModelWrapper, callMLlibFunc, inherit_doc
from pyspark.mllib.util import JavaLoader, JavaSaveable
from pyspark.sql import DataFrame
__all__ = ['MatrixFactorizationModel', 'ALS', 'Rating']
class Rating(namedtuple("Rating", ["user", "product", "rating"])):
"""
Represents a (user, product, rating) tuple.
>>> r = Rating(1, 2, 5.0)
>>> (r.user, r.product, r.rating)
(1, 2, 5.0)
>>> (r[0], r[1], r[2])
(1, 2, 5.0)
.. versionadded:: 1.2.0
"""
def __reduce__(self):
return Rating, (int(self.user), int(self.product), float(self.rating))
@inherit_doc
class MatrixFactorizationModel(JavaModelWrapper, JavaSaveable, JavaLoader):
"""A matrix factorisation model trained by regularized alternating
least-squares.
>>> r1 = (1, 1, 1.0)
>>> r2 = (1, 2, 2.0)
>>> r3 = (2, 1, 2.0)
>>> ratings = sc.parallelize([r1, r2, r3])
>>> model = ALS.trainImplicit(ratings, 1, seed=10)
>>> model.predict(2, 2)
0.4...
>>> testset = sc.parallelize([(1, 2), (1, 1)])
>>> model = ALS.train(ratings, 2, seed=0)
>>> model.predictAll(testset).collect()
[Rating(user=1, product=1, rating=1.0...), Rating(user=1, product=2, rating=1.9...)]
>>> model = ALS.train(ratings, 4, seed=10)
>>> model.userFeatures().collect()
[(1, array('d', [...])), (2, array('d', [...]))]
>>> model.recommendUsers(1, 2)
[Rating(user=2, product=1, rating=1.9...), Rating(user=1, product=1, rating=1.0...)]
>>> model.recommendProducts(1, 2)
[Rating(user=1, product=2, rating=1.9...), Rating(user=1, product=1, rating=1.0...)]
>>> model.rank
4
>>> first_user = model.userFeatures().take(1)[0]
>>> latents = first_user[1]
>>> len(latents)
4
>>> model.productFeatures().collect()
[(1, array('d', [...])), (2, array('d', [...]))]
>>> first_product = model.productFeatures().take(1)[0]
>>> latents = first_product[1]
>>> len(latents)
4
>>> products_for_users = model.recommendProductsForUsers(1).collect()
>>> len(products_for_users)
2
>>> products_for_users[0]
(1, (Rating(user=1, product=2, rating=...),))
>>> users_for_products = model.recommendUsersForProducts(1).collect()
>>> len(users_for_products)
2
>>> users_for_products[0]
(1, (Rating(user=2, product=1, rating=...),))
>>> model = ALS.train(ratings, 1, nonnegative=True, seed=10)
>>> model.predict(2, 2)
3.8...
>>> df = sqlContext.createDataFrame([Rating(1, 1, 1.0), Rating(1, 2, 2.0), Rating(2, 1, 2.0)])
>>> model = ALS.train(df, 1, nonnegative=True, seed=10)
>>> model.predict(2, 2)
3.8...
>>> model = ALS.trainImplicit(ratings, 1, nonnegative=True, seed=10)
>>> model.predict(2, 2)
0.4...
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = MatrixFactorizationModel.load(sc, path)
>>> sameModel.predict(2, 2)
0.4...
>>> sameModel.predictAll(testset).collect()
[Rating(...
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
.. versionadded:: 0.9.0
"""
@since("0.9.0")
def predict(self, user, product):
"""
Predicts rating for the given user and product.
"""
return self._java_model.predict(int(user), int(product))
@since("0.9.0")
def predictAll(self, user_product):
"""
Returns a list of predicted ratings for input user and product pairs.
"""
assert isinstance(user_product, RDD), "user_product should be RDD of (user, product)"
first = user_product.first()
assert len(first) == 2, "user_product should be RDD of (user, product)"
user_product = user_product.map(lambda u_p: (int(u_p[0]), int(u_p[1])))
return self.call("predict", user_product)
@since("1.2.0")
def userFeatures(self):
"""
Returns a paired RDD, where the first element is the user and the
second is an array of features corresponding to that user.
"""
return self.call("getUserFeatures").mapValues(lambda v: array.array('d', v))
@since("1.2.0")
def productFeatures(self):
"""
Returns a paired RDD, where the first element is the product and the
second is an array of features corresponding to that product.
"""
return self.call("getProductFeatures").mapValues(lambda v: array.array('d', v))
@since("1.4.0")
def recommendUsers(self, product, num):
"""
Recommends the top "num" number of users for a given product and returns a list
of Rating objects sorted by the predicted rating in descending order.
"""
return list(self.call("recommendUsers", product, num))
@since("1.4.0")
def recommendProducts(self, user, num):
"""
Recommends the top "num" number of products for a given user and returns a list
of Rating objects sorted by the predicted rating in descending order.
"""
return list(self.call("recommendProducts", user, num))
def recommendProductsForUsers(self, num):
"""
Recommends top "num" products for all users. The number returned may be less than this.
"""
return self.call("wrappedRecommendProductsForUsers", num)
def recommendUsersForProducts(self, num):
"""
Recommends top "num" users for all products. The number returned may be less than this.
"""
return self.call("wrappedRecommendUsersForProducts", num)
@property
@since("1.4.0")
def rank(self):
"""Rank for the features in this model"""
return self.call("rank")
@classmethod
@since("1.3.1")
def load(cls, sc, path):
"""Load a model from the given path"""
model = cls._load_java(sc, path)
wrapper = sc._jvm.MatrixFactorizationModelWrapper(model)
return MatrixFactorizationModel(wrapper)
class ALS(object):
"""Alternating Least Squares matrix factorization
.. versionadded:: 0.9.0
"""
@classmethod
def _prepare(cls, ratings):
if isinstance(ratings, RDD):
pass
elif isinstance(ratings, DataFrame):
ratings = ratings.rdd
else:
raise TypeError("Ratings should be represented by either an RDD or a DataFrame, "
"but got %s." % type(ratings))
first = ratings.first()
if isinstance(first, Rating):
pass
elif isinstance(first, (tuple, list)):
ratings = ratings.map(lambda x: Rating(*x))
else:
raise TypeError("Expect a Rating or a tuple/list, but got %s." % type(first))
return ratings
@classmethod
@since("0.9.0")
def train(cls, ratings, rank, iterations=5, lambda_=0.01, blocks=-1, nonnegative=False,
seed=None):
"""
Train a matrix factorization model given an RDD of ratings given by users to some products,
in the form of (userID, productID, rating) pairs. We approximate the ratings matrix as the
product of two lower-rank matrices of a given rank (number of features). To solve for these
features, we run a given number of iterations of ALS. This is done using a level of
parallelism given by `blocks`.
"""
model = callMLlibFunc("trainALSModel", cls._prepare(ratings), rank, iterations,
lambda_, blocks, nonnegative, seed)
return MatrixFactorizationModel(model)
@classmethod
@since("0.9.0")
def trainImplicit(cls, ratings, rank, iterations=5, lambda_=0.01, blocks=-1, alpha=0.01,
nonnegative=False, seed=None):
"""
Train a matrix factorization model given an RDD of 'implicit preferences' given by users
to some products, in the form of (userID, productID, preference) pairs. We approximate the
ratings matrix as the product of two lower-rank matrices of a given rank (number of
features). To solve for these features, we run a given number of iterations of ALS.
This is done using a level of parallelism given by `blocks`.
"""
model = callMLlibFunc("trainImplicitALSModel", cls._prepare(ratings), rank,
iterations, lambda_, blocks, alpha, nonnegative, seed)
return MatrixFactorizationModel(model)
def _test():
import doctest
import pyspark.mllib.recommendation
from pyspark.sql import SQLContext
globs = pyspark.mllib.recommendation.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
|
"""
@package mi.dataset.parser
@file marine-integrations/mi/dataset/parser/dosta_abcdjm_cspp.py
@author Mark Worden
@brief Parser for the dosta_abcdjm_cspp dataset driver
Release notes:
initial release
"""
import numpy
from mi.core.log import get_logger
from mi.core.common import BaseEnum
from mi.core.instrument.dataset_data_particle import DataParticle
from mi.dataset.parser.common_regexes import INT_REGEX, FLOAT_REGEX, MULTIPLE_TAB_REGEX, END_OF_LINE_REGEX
from mi.dataset.parser.cspp_base import CsppParser, Y_OR_N_REGEX, CsppMetadataDataParticle, MetadataRawDataKey, \
encode_y_or_n
log = get_logger()
__author__ = 'Mark Worden'
__license__ = 'Apache 2.0'
# A regular expression for special characters that could exist in a data record preceding the model
SPECIAL_CHARS_REGEX = r'(?:[\?][%])?'
# A regular expression that should match a dosta_abcdjm data record
# NOTE the group names must match the string literals in the ParticleKey below
# strings used here instead of enumerated constants for readability
DATA_REGEX = r'(?P<profiler_timestamp>' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # Profiler Timestamp
DATA_REGEX += '(?P<pressure_depth>' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # Depth
DATA_REGEX += '(?P<suspect_timestamp>' + Y_OR_N_REGEX + ')' + MULTIPLE_TAB_REGEX # Suspect Timestamp
DATA_REGEX += SPECIAL_CHARS_REGEX + '(?P<product_number>' + INT_REGEX + ')' + MULTIPLE_TAB_REGEX # Model Number
DATA_REGEX += '(?P<serial_number>' + INT_REGEX + ')' + MULTIPLE_TAB_REGEX # Serial Number
DATA_REGEX += '(?P<estimated_oxygen_concentration>' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # oxygen content
# relative air saturation
# Note: relative air saturation is missing in some early deployments. If not present match group will be None.
DATA_REGEX += '(?:(?P<estimated_oxygen_saturation>' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX + ')?'
DATA_REGEX += '(?P<optode_temperature>' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # ambient temperature
DATA_REGEX += '(?P<calibrated_phase>' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # calibrated phase
DATA_REGEX += '(?P<temp_compensated_phase>' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # temperature compensated phase
DATA_REGEX += '(?P<blue_phase>' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # phase with blue excitation
DATA_REGEX += '(?P<red_phase>' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # phase with red excitation
DATA_REGEX += '(?P<blue_amplitude>' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # amplitude with blue excitation
DATA_REGEX += '(?P<red_amplitude>' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # amplitude with red excitation
DATA_REGEX += '(?P<raw_temperature>' + FLOAT_REGEX + ')' + END_OF_LINE_REGEX # raw temperature, voltage
class DataParticleType(BaseEnum):
"""
The data particle types that a dosta_abcdjm_cspp parser could generate
"""
METADATA_RECOVERED = 'dosta_abcdjm_cspp_metadata_recovered'
INSTRUMENT_RECOVERED = 'dosta_abcdjm_cspp_instrument_recovered'
METADATA_TELEMETERED = 'dosta_abcdjm_cspp_metadata'
INSTRUMENT_TELEMETERED = 'dosta_abcdjm_cspp_instrument'
class DostaAbcdjmCsppParserDataParticleKey(BaseEnum):
"""
The data particle keys associated with dosta_abcdjm_cspp data particle parameters
"""
PRODUCT_NUMBER = 'product_number'
SERIAL_NUMBER = 'serial_number'
PROFILER_TIMESTAMP = 'profiler_timestamp'
PRESSURE = 'pressure_depth'
SUSPECT_TIMESTAMP = 'suspect_timestamp'
ESTIMATED_OXYGEN_CONCENTRATION = 'estimated_oxygen_concentration'
ESTIMATED_OXYGEN_SATURATION = 'estimated_oxygen_saturation'
OPTODE_TEMPERATURE = 'optode_temperature'
CALIBRATED_PHASE = 'calibrated_phase'
TEMP_COMPENSATED_PHASE = 'temp_compensated_phase'
BLUE_PHASE = 'blue_phase'
RED_PHASE = 'red_phase'
BLUE_AMPLITUDE = 'blue_amplitude'
RED_AMPLITUDE = 'red_amplitude'
RAW_TEMPERATURE = 'raw_temperature'
def float_or_none(float_val):
if float_val is None:
return None
return float(float_val)
# A group of non common metadata particle encoding rules used to simplify encoding using a loop
NON_COMMON_METADATA_PARTICLE_ENCODING_RULES = [
(DostaAbcdjmCsppParserDataParticleKey.PRODUCT_NUMBER, int),
(DostaAbcdjmCsppParserDataParticleKey.SERIAL_NUMBER, str)
]
# A group of instrument data particle encoding rules used to simplify encoding using a loop
INSTRUMENT_PARTICLE_ENCODING_RULES = [
(DostaAbcdjmCsppParserDataParticleKey.PROFILER_TIMESTAMP, numpy.float),
(DostaAbcdjmCsppParserDataParticleKey.PRESSURE, float),
(DostaAbcdjmCsppParserDataParticleKey.SUSPECT_TIMESTAMP, encode_y_or_n),
(DostaAbcdjmCsppParserDataParticleKey.ESTIMATED_OXYGEN_CONCENTRATION, float),
(DostaAbcdjmCsppParserDataParticleKey.ESTIMATED_OXYGEN_SATURATION, float_or_none),
(DostaAbcdjmCsppParserDataParticleKey.OPTODE_TEMPERATURE, float),
(DostaAbcdjmCsppParserDataParticleKey.CALIBRATED_PHASE, float),
(DostaAbcdjmCsppParserDataParticleKey.TEMP_COMPENSATED_PHASE, float),
(DostaAbcdjmCsppParserDataParticleKey.BLUE_PHASE, float),
(DostaAbcdjmCsppParserDataParticleKey.RED_PHASE, float),
(DostaAbcdjmCsppParserDataParticleKey.BLUE_AMPLITUDE, float),
(DostaAbcdjmCsppParserDataParticleKey.RED_AMPLITUDE, float),
(DostaAbcdjmCsppParserDataParticleKey.RAW_TEMPERATURE, float),
]
class DostaAbcdjmCsppMetadataDataParticle(CsppMetadataDataParticle):
"""
Class for building a dosta_abcdjm_cspp metadata particle
"""
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
an array of dictionaries defining the data in the particle
with the appropriate tag.
"""
# Set the base metadata parsed values to the results to return
results = self._build_metadata_parsed_values()
data_match = self.raw_data[MetadataRawDataKey.DATA_MATCH]
# Process each of the non common metadata particle parameters
for (name, encoding) in NON_COMMON_METADATA_PARTICLE_ENCODING_RULES:
results.append(self._encode_value(name, data_match.group(name), encoding))
# Set the internal timestamp
internal_timestamp_unix = numpy.float(data_match.group(
DostaAbcdjmCsppParserDataParticleKey.PROFILER_TIMESTAMP))
self.set_internal_timestamp(unix_time=internal_timestamp_unix)
return results
class DostaAbcdjmCsppMetadataRecoveredDataParticle(DostaAbcdjmCsppMetadataDataParticle):
"""
Class for building a dosta_abcdjm_cspp recovered metadata particle
"""
_data_particle_type = DataParticleType.METADATA_RECOVERED
class DostaAbcdjmCsppMetadataTelemeteredDataParticle(DostaAbcdjmCsppMetadataDataParticle):
"""
Class for building a dosta_abcdjm_cspp telemetered metadata particle
"""
_data_particle_type = DataParticleType.METADATA_TELEMETERED
class DostaAbcdjmCsppInstrumentDataParticle(DataParticle):
"""
Class for building a dosta_abcdjm_cspp instrument data particle
"""
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
an array of dictionaries defining the data in the particle
with the appropriate tag.
"""
results = []
# Process each of the instrument particle parameters
for (name, encoding) in INSTRUMENT_PARTICLE_ENCODING_RULES:
results.append(self._encode_value(name, self.raw_data.group(name), encoding))
# # Set the internal timestamp
internal_timestamp_unix = numpy.float(self.raw_data.group(
DostaAbcdjmCsppParserDataParticleKey.PROFILER_TIMESTAMP))
self.set_internal_timestamp(unix_time=internal_timestamp_unix)
return results
class DostaAbcdjmCsppInstrumentRecoveredDataParticle(DostaAbcdjmCsppInstrumentDataParticle):
"""
Class for building a dosta_abcdjm_cspp recovered instrument data particle
"""
_data_particle_type = DataParticleType.INSTRUMENT_RECOVERED
class DostaAbcdjmCsppInstrumentTelemeteredDataParticle(DostaAbcdjmCsppInstrumentDataParticle):
"""
Class for building a dosta_abcdjm_cspp telemetered instrument data particle
"""
_data_particle_type = DataParticleType.INSTRUMENT_TELEMETERED
class DostaAbcdjmCsppParser(CsppParser):
def __init__(self,
config,
stream_handle,
exception_callback):
"""
This method is a constructor that will instantiate an DostaAbcdjmCsppParser object.
@param config The configuration for this DostaAbcdjmCsppParser parser
@param stream_handle The handle to the data stream containing the dosta_abcdjm_cspp data
@param exception_callback The function to call to report exceptions
"""
# Call the superclass constructor
super(DostaAbcdjmCsppParser, self).__init__(config,
stream_handle,
exception_callback,
DATA_REGEX)
|
|
#!/usr/bin/python
"""This script BLASTs the preprocessed input agains the Taxonomy databases"""
try:
import optparse, sys, re, csv, traceback
from os import path
import logging.handlers
from libs.python_modules.utils.sysutil import pathDelim
from libs.python_modules.utils.metapathways_utils import fprintf, printf, eprintf, exit_process
from libs.python_modules.utils.sysutil import getstatusoutput
from libs.python_modules.utils.errorcodes import error_message, get_error_list, insert_error
except:
print(""" Could not load some user defined module functions""")
print(""" Make sure your typed 'source MetaPathwaysrc'""")
print(""" """)
print(traceback.print_exc(10))
sys.exit(3)
PATHDELIM=pathDelim()
def fprintf(file, fmt, *args):
file.write(fmt % args)
def printf(fmt, *args):
sys.stdout.write(fmt % args)
def files_exist( files ):
for file in files:
if not path.exists(file):
print('Could not read File ' + file )
print('Please make sure these sequences are in the \"blastDB\" folder')
sys.exit(3)
return False
return True
def getFormat(dbstring):
dbstring = dbstring.rstrip()
dbstring = dbstring.lstrip()
dbstring = dbstring.lower()
# silva, greengenes spellings should be lowercase always
format = 0
if re.search(r'silva',dbstring, re.I):
format = 1
if re.search(r'greengenes',dbstring, re.I):
format = 2
return format
#sequences with no seperate taxonomic name gets the sequences name
def parse_Format_0(line):
fields = re.split(' ', line)
if len( fields ) ==1:
name = fields[0].replace('>','')
taxonomy = name
else:
return( None, None )
return( name.strip(), taxonomy.strip() )
#silva
def parse_Format_1(line):
fields = re.split(' ', line)
if len( fields ) >= 2:
name = fields[0].replace('>','')
fields = fields[1:]
taxonomy = " "
taxonomy = taxonomy.join(fields)
else:
return( None, None )
return( name.strip(), taxonomy.strip() )
def parse_Format_2(line):
fields = re.split(' ', line)
names = []
if len( fields ) >= 2:
name = fields[0].replace('>','')
for field in fields[1:]:
field =field.strip()
if re.match("[a-z]_[a-zA-Z0-9-_]*;",field):
names.append(field)
taxonomy = ""
taxonomy = taxonomy.join(names)
else:
return( None, None )
return( name.strip(), taxonomy.strip() )
def getName_and_Taxonomy(line, format=0):
if format==0:
name, taxonomy = parse_Format_0(line)
elif format==1:
name, taxonomy = parse_Format_1(line)
# print name + " " + taxonomy
elif format==2:
(name, taxonomy) = parse_Format_2(line)
else:
return( None, None )
return(name, taxonomy )
START_PATTERN = re.compile(r'^>')
def read_select_fasta_sequences(candidates, records, input_file_name):
input_file = open(input_file_name, 'r')
line = input_file.readline()
while line:
if START_PATTERN.search(line):
name=line.strip()
name = re.sub(r'>', '',name)
if name and name in candidates:
records[name]=""
else:
sequence = line.strip()
if sequence and name in candidates:
records[name] += sequence
line = input_file.readline()
input_file.close()
def write_selected_sequences(selected_sequences, output_file_name):
output_file = open(output_file_name, 'w')
for read in selected_sequences:
fprintf(output_file, ">%s\n", read)
fprintf(output_file,"%s\n", selected_sequences[read])
output_file.close()
def append_taxonomic_information(databaseSequences, table, params):
try:
tax_seqs = open(databaseSequences, 'r')
except IOError:
print("Cannot read file " + tax_maps + " !")
sys.exit(3)
format = getFormat(databaseSequences)
taxmapLines = tax_seqs.readlines()
tax_seqs.close()
taxMapping={}
for line in taxmapLines:
if not re.match('>', line):
continue
line = line.strip()
name, taxonomy = getName_and_Taxonomy(line, format)
if name:
taxMapping[name] = taxonomy
for key in table:
key = str(key)
if int(table[key][5] - table[key][4] ) > params['length'] and table[key][0] > params['similarity'] and table[key][1] < params['evalue'] and table[key][2] > params['bitscore']:
if table[key][3] in taxMapping:
table[key].append(taxMapping[table[key][3]])
else:
table[key].append('-')
else:
table[key].append('-')
def process_blastout_file(blast_file, database, table, errorlogger = None):
try:
blastfile = open(blast_file, 'r')
except IOError:
eprintf("ERROR : Cannot write read file " + blast_file + " !" )
if errorlogger!=None:
errorlogger.write("STATS_rRNA\tERROR\tCannot write read blast output file " + blast_file + " for database " + database )
exit_process()
blastLines = blastfile.readlines()
blastfile.close()
for line in blastLines:
line = line.strip()
fields = re.split('\t', line)
if len(fields) < 12:
continue
fields[0] = str(fields[0].strip())
fields[1] = str(fields[1].strip())
fields[2] = float(fields[2].strip())
fields[6] = int(fields[6].strip())
fields[7] = int(fields[7].strip())
fields[10] = float(fields[10].strip())
fields[11] = float(fields[11].strip())
table[str(fields[0].strip())] = [fields[2], fields [10], fields[11], fields[1], fields[6], fields[7]]
# print table
# sys.exit(0)
#print blast_file + ' ' + tax_maps + ' ' + database
usage = sys.argv[0] + """ -i x.blastout [y.blastout] -d xdatabase [ydatabase] -m xtax_maps [ ytax_maps ] -o outputfile -b n -e 0.ddd -s N """
parser = None
def createParser():
global parser
epilog = """The input nucleotide sequences are BLASTed against the selected rRNA databases such as SSU or LSU Silva sequence databases and SSU Greengene database. The hits with high bit scores are flagged as rRNA and the resulting taxonomy from the databases are assigned. The results from this step are put in the results/rRNA folder, with one tsv file for each rRNA database."""
epilog = re.sub(r'\s+',' ', epilog)
parser = optparse.OptionParser(usage=usage, epilog = epilog)
# Input options
input_group = optparse.OptionGroup(parser, 'input options')
input_group.add_option('-i', '--blastout', dest='blast_files',
metavar='BLAST_FILE', action='append', default=[],
help='BLAST output file from blasting of nucleotide sequeces against Taxonomic databases')
input_group.add_option('-d', '--databases', dest='tax_databases',
metavar='TAX_DATABASE', action='append', default=[],
help='Taxonomic databases')
input_group.add_option('-o', '--output', dest='output',
metavar='OUTPUTE', help='Taxonomic databases')
input_group.add_option('-f', '--fasta', dest='fasta',
metavar='NUC_SEQUENCES', help='The nucleotide sequences')
input_group.add_option('-q', '--query', dest='fasta',
metavar='NUC_SEQUENCES', help='The nucleotide sequences')
parser.add_option_group(input_group)
# filtering options
filtering_options = optparse.OptionGroup(parser, 'filteroptions')
filtering_options.add_option('-b', '--bit_score', dest='bitscore', metavar='BITSCORE',
default=0, help="bit score cutoff")
filtering_options.add_option('-e', '--e-value', metavar= 'evalue',
dest='evalue', default=1e-6,
help='e-value cut off for the blast hits')
filtering_options.add_option('-s', '--similarity', metavar='similarity',
dest='similarity', default = 40,
help='% similarity cut off for the blast hits')
filtering_options.add_option('-l', '--length', metavar='length',
dest='length', default = 180,
help='length cut off for the blast hits')
parser.add_option_group(filtering_options)
# run process parameters
process_options = optparse.OptionGroup(parser, 'processoptions')
process_options.add_option('-n', '--num_threads', dest='num_threads', metavar='num_threads',
default=1, help="number of threads")
parser.add_option_group(process_options)
def main(argv, errorlogger = None, runcommand = None, runstatslogger = None):
global parser
options, args = parser.parse_args(argv)
if not len(options.blast_files):
parser.error('At least one taxonomic BLAST output is required')
if runBlastCommandrRNA(runcommand = runcommand) !=0:
if errorlogger:
errorlogger.write("ERROR: Failed to BLAST the sequences against database %s : " %(options.tax_databases[0]) )
errorlogger.write(" : " + runcommand)
exit_process("ERROR: Failed to BLAST the sequences against database %s : " %(options.tax_databases[0]) +\
" : " + runcommand)
if not ( len(options.tax_databases) == len( options.blast_files) ):
parser.error('Number of taxonomic databases and BLAST outputs should be the same')
if not options.output:
parser.error('Output file must be specified')
# Incredible sanity check
if not files_exist(options.blast_files):
sys.exit(0)
if not files_exist( options.tax_databases):
sys.exit(0)
params = {'length': int(options.length), 'similarity': float(options.similarity), 'evalue':float(options.evalue), 'bitscore':float(options.bitscore) }
#print params['bitscore']
table={}
for x in range(0, len(options.blast_files)):
table[options.tax_databases[x]]={}
process_blastout_file(options.blast_files[x], options.tax_databases[x],table[options.tax_databases[x]], errorlogger = errorlogger)
priority = 7000
reads = {}
for x in range(0, len(options.blast_files)):
append_taxonomic_information(options.tax_databases[x], table[options.tax_databases[x]], params)
for key in table[options.tax_databases[x]]:
if len(table[options.tax_databases[x]][key][6]) > 1:
reads[key] = True
dbname = re.sub(r'^.*' + PATHDELIM, '', options.tax_databases[x])
runstatslogger.write("%s\tTaxonomic hits in %s\t%s\n" %(str(priority), dbname, str(len(reads))))
priority += 1
outputfile = open(options.output, 'w')
fprintf(outputfile, "#Similarity cutoff :\t" + str(params['similarity']) +'\n')
fprintf(outputfile, "#Length cutoff :\t" + str(params['length']) +'\n')
fprintf(outputfile, "#Evalue cutoff :\t" + str(params['evalue']) +'\n')
fprintf(outputfile, "#Bit score cutoff :\t" + str(params['bitscore']) +'\n')
fprintf(outputfile, "#Number of rRNA sequences detected:\t" + str(len(reads)) +'\n\n')
for x in range(0, len(options.tax_databases)):
# printf('\t%s\t\t\t', re.sub(r'^.*/','', options.tax_databases[x]))
fprintf(outputfile, '\t%s\t\t\t', re.sub(r'^.*' + PATHDELIM, '', options.tax_databases[x]))
#printf('\n')
fprintf(outputfile,'\n')
#printf('%s', 'read')
for x in range(0, len(options.blast_files)):
fprintf(outputfile, '%s\t%s\t%s\t%s\t%s\t%s\t%s', 'sequence', 'start', 'end', 'similarity', 'evalue', 'bitscore', 'taxonomy')
fprintf(outputfile,'\n')
for read in reads:
#printf('%s', read)
fprintf(outputfile,'%s', read)
for x in range(0, len(options.blast_files)):
if read in table[options.tax_databases[x]]:
fprintf(outputfile, '\t%s\t%s\t%s\t%s\t%s\t%s', str(table[options.tax_databases[x]][read][4]), str(table[options.tax_databases[x]][read][5]), str(table[options.tax_databases[x]][read][0]),str(table[options.tax_databases[x]][read][1]),str(table[options.tax_databases[x]][read][2]), str(table[options.tax_databases[x]][read][6]))
else:
fprintf(outputfile, '\t-\t-\t-\t-\t-\t-')
fprintf(outputfile,'\n')
outputfile.close()
# collect the exact reads
database_hits = {}
for read in reads:
for x in range(0, len(options.blast_files)):
if read in table[options.tax_databases[x]]:
database_hits[read] = [ table[options.tax_databases[x]][read][4], table[options.tax_databases[x]][read][5]]
# pick the hits, trim them according to the match and write them
if options.fasta:
selected_sequences={}
read_select_fasta_sequences(database_hits, selected_sequences, options.fasta)
for read in database_hits:
selected_sequences[read] = selected_sequences[read][database_hits[read][0]:database_hits[read][1]]
write_selected_sequences(selected_sequences, options.output +'.fasta')
def runBlastCommandrRNA(runcommand = None):
if runcommand == None:
return False
result = getstatusoutput(runcommand)
return result[0]
def MetaPathways_rRNA_stats_calculator(argv, extra_command = None, errorlogger = None, runstatslogger =None):
if errorlogger != None:
errorlogger.write("#STEP\tSTATS_rRNA\n")
createParser()
try:
main(argv, errorlogger = errorlogger, runcommand= extra_command, runstatslogger = runstatslogger)
except:
insert_error(6)
return (0,'')
return (0,'')
if __name__ == '__main__':
createParser()
main(sys.argv[1:])
|
|
"""GATK variant calling -- HaplotypeCaller and UnifiedGenotyper.
"""
import os
from distutils.version import LooseVersion
import shutil
import subprocess
import toolz as tz
from bcbio import bam, broad, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.pipeline.shared import subset_variant_regions
from bcbio.pipeline import datadict as dd
from bcbio.variation import annotation, bamprep, bedutils, ploidy, vcfutils
from bcbio.provenance import do
def standard_cl_params(items):
"""Shared command line parameters for GATK programs.
Handles no removal of duplicate reads for amplicon or
non mark duplicate experiments. If we have pre-aligned inputs we
ignore the value or mark duplicates (since they may already be
marked in the input BAM).
"""
out = []
def _skip_duplicates(data):
return (dd.get_coverage_interval(data) == "amplicon" or
(dd.get_aligner(data) and not dd.get_mark_duplicates(data)))
if any(_skip_duplicates(d) for d in items):
broad_runner = broad.runner_from_config(items[0]["config"])
gatk_type = broad_runner.gatk_type()
if gatk_type == "gatk4":
out += ["--disable-read-filter", "NotDuplicateReadFilter"]
elif LooseVersion(broad_runner.gatk_major_version()) >= LooseVersion("3.5"):
out += ["-drf", "DuplicateRead"]
return out
def _shared_gatk_call_prep(align_bams, items, ref_file, region, out_file, num_cores=1):
"""Shared preparation work for GATK variant calling.
"""
data = items[0]
config = data["config"]
broad_runner = broad.runner_from_config(config)
gatk_type = broad_runner.gatk_type()
for x in align_bams:
bam.index(x, config)
picard_runner = broad.runner_from_path("picard", config)
picard_runner.run_fn("picard_index_ref", ref_file)
params = ["-R", ref_file]
coverage_depth_min = tz.get_in(["algorithm", "coverage_depth_min"], config)
if coverage_depth_min and coverage_depth_min < 4:
confidence = "4.0"
params += ["--standard_min_confidence_threshold_for_calling", confidence]
for a in annotation.get_gatk_annotations(config):
params += ["--annotation", a]
for x in align_bams:
params += ["-I", x]
variant_regions = bedutils.population_variant_regions(items)
region = subset_variant_regions(variant_regions, region, out_file, items)
if region:
if gatk_type == "gatk4":
params += ["-L", bamprep.region_to_gatk(region), "--interval-set-rule", "INTERSECTION"]
else:
params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule", "INTERSECTION"]
params += standard_cl_params(items)
return broad_runner, params
def unified_genotyper(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Perform SNP genotyping on the given alignment file.
"""
if out_file is None:
out_file = "%s-variants.vcf.gz" % utils.splitext_plus(align_bams[0])[0]
if not utils.file_exists(out_file):
broad_runner, params = \
_shared_gatk_call_prep(align_bams, items, ref_file, region, out_file)
with file_transaction(items[0], out_file) as tx_out_file:
params += ["-T", "UnifiedGenotyper",
"-o", tx_out_file,
"-ploidy", (str(ploidy.get_ploidy(items, region))
if broad_runner.gatk_type() == "restricted" else "2"),
"--genotype_likelihoods_model", "BOTH"]
resources = config_utils.get_resources("gatk", items[0]["config"])
if "options" in resources:
params += [str(x) for x in resources.get("options", [])]
broad_runner.run_gatk(params)
return vcfutils.bgzip_and_index(out_file, items[0]["config"])
def _joint_calling(items):
"""Determine if this call feeds downstream into joint calls.
"""
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), items[0])
if jointcaller:
assert len(items) == 1, "Can only do joint calling preparation with GATK with single samples"
assert tz.get_in(("metadata", "batch"), items[0]) is not None, \
"Joint calling requires batched samples, %s has no metadata batch." % dd.get_sample_name(items[0])
return jointcaller
def _use_spark(num_cores, gatk_type, items, opts):
data = items[0]
use_spark = False
if dd.get_analysis(data).lower() != "rna-seq":
use_spark = (len(items) == 1 and num_cores > 1 and gatk_type == "gatk4") or "--spark-master" in opts
return use_spark
def haplotype_caller(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Call variation with GATK's HaplotypeCaller.
This requires the full non open-source version of GATK.
"""
if out_file is None:
out_file = "%s-variants.vcf.gz" % utils.splitext_plus(align_bams[0])[0]
if not utils.file_exists(out_file):
num_cores = dd.get_num_cores(items[0])
broad_runner, params = \
_shared_gatk_call_prep(align_bams, items, ref_file, region, out_file, num_cores)
gatk_type = broad_runner.gatk_type()
assert gatk_type in ["restricted", "gatk4"], \
"Require full version of GATK 2.4+, or GATK4 for haplotype calling"
with file_transaction(items[0], out_file) as tx_out_file:
resources = config_utils.get_resources("gatk-spark", items[0]["config"])
spark_opts = [str(x) for x in resources.get("options", [])]
if _use_spark(num_cores, gatk_type, items, spark_opts):
params += ["-T", "HaplotypeCallerSpark"]
if spark_opts:
params += spark_opts
else:
params += ["--spark-master", "local[%s]" % num_cores,
"--conf", "spark.local.dir=%s" % os.path.dirname(tx_out_file),
"--conf", "spark.driver.host=localhost", "--conf", "spark.network.timeout=800",
"--conf", "spark.executor.heartbeatInterval=100"]
else:
params += ["-T", "HaplotypeCaller"]
params += ["--annotation", "ClippingRankSumTest",
"--annotation", "DepthPerSampleHC"]
# Enable hardware based optimizations in GATK 3.1+
if LooseVersion(broad_runner.gatk_major_version()) >= LooseVersion("3.1"):
if _supports_avx():
# Scale down HMM thread default to avoid overuse of cores
# https://github.com/bcbio/bcbio-nextgen/issues/2442
if gatk_type == "gatk4":
params += ["--native-pair-hmm-threads", "1"]
# GATK4 selects the right HMM optimization automatically with FASTEST_AVAILABLE
# GATK3 needs to be explicitly set
else:
params += ["--pair_hmm_implementation", "VECTOR_LOGLESS_CACHING"]
resources = config_utils.get_resources("gatk-haplotype", items[0]["config"])
if "options" in resources:
params += [str(x) for x in resources.get("options", [])]
# Prepare gVCFs if doing joint calling
is_joint = False
if _joint_calling(items) or any("gvcf" in dd.get_tools_on(d) for d in items):
is_joint = True
# If joint calling parameters not set in user options
if not any([x in ["--emit-ref-confidence", "-ERC", "--emitRefConfidence"] for x in params]):
if gatk_type == "gatk4":
params += ["--emit-ref-confidence", "GVCF"]
else:
params += ["--emitRefConfidence", "GVCF"]
params += ["--variant_index_type", "LINEAR", "--variant_index_parameter", "128000"]
# Set GQ banding to not be single GQ resolution
# No recommended default but try to balance resolution and size
# http://gatkforums.broadinstitute.org/gatk/discussion/7051/recommendation-best-practices-gvcf-gq-bands
if not any([x in ["-GQB"] for x in params]):
for boundary in [10, 20, 30, 40, 60, 80]:
params += ["-GQB", str(boundary)]
# Enable non-diploid calling in GATK 3.3+
if LooseVersion(broad_runner.gatk_major_version()) >= LooseVersion("3.3"):
params += ["-ploidy", str(ploidy.get_ploidy(items, region))]
if gatk_type == "gatk4":
# GATK4 Spark calling does not support bgzipped output, use plain VCFs
if is_joint and _use_spark(num_cores, gatk_type, items, spark_opts):
tx_out_file = tx_out_file.replace(".vcf.gz", ".vcf")
params += ["--output", tx_out_file]
else:
params += ["-o", tx_out_file]
broad_runner.new_resources("gatk-haplotype")
memscale = {"magnitude": 0.9 * num_cores, "direction": "increase"} if num_cores > 1 else None
try:
broad_runner.run_gatk(params, os.path.dirname(tx_out_file), memscale=memscale,
parallel_gc=_use_spark(num_cores, gatk_type, items, spark_opts))
except subprocess.CalledProcessError as msg:
# Spark failing on regions without any reads, write an empty VCF instead
# https://github.com/broadinstitute/gatk/issues/4234
if (_use_spark(num_cores, gatk_type, items, spark_opts) and
str(msg).find("java.lang.UnsupportedOperationException: empty collection") >= 0 and
str(msg).find("at org.apache.spark.rdd.RDD") >= 0):
vcfutils.write_empty_vcf(tx_out_file, samples=[dd.get_sample_name(d) for d in items])
else:
raise
if tx_out_file.endswith(".vcf"):
vcfutils.bgzip_and_index(tx_out_file, items[0]["config"])
# avoid bug in GATK where files can get output as non-compressed
if out_file.endswith(".gz") and not os.path.exists(out_file + ".tbi"):
with open(out_file, "r") as in_handle:
is_plain_text = in_handle.readline().startswith("##fileformat")
if is_plain_text:
text_out_file = out_file
out_file = out_file.replace(".vcf.gz", ".vcf")
shutil.move(text_out_file, out_file)
return vcfutils.bgzip_and_index(out_file, items[0]["config"])
def _supports_avx():
"""Check for support for Intel AVX acceleration."""
if os.path.exists("/proc/cpuinfo"):
with open("/proc/cpuinfo") as in_handle:
for line in in_handle:
if line.startswith("flags") and line.find("avx") > 0:
return True
def collect_artifact_metrics(data):
"""Run CollectSequencingArtifacts to collect pre-adapter ligation artifact metrics
https://gatk.broadinstitute.org/hc/en-us/articles/360037429491-CollectSequencingArtifactMetrics-Picard-
"""
OUT_SUFFIXES = [".bait_bias_detail_metrics", ".error_summary_metrics",
".pre_adapter_detail_metrics", ".pre_adapter_summary_metrics"]
broad_runner = broad.runner_from_config(dd.get_config(data))
gatk_type = broad_runner.gatk_type()
ref_file = dd.get_ref_file(data)
bam_file = dd.get_work_bam(data)
if not bam_file:
return None
out_dir = os.path.join(dd.get_work_dir(data), "metrics", "artifact", dd.get_sample_name(data))
utils.safe_makedir(out_dir)
out_base = os.path.join(out_dir, dd.get_sample_name(data))
out_files = [out_base + x for x in OUT_SUFFIXES]
if all([utils.file_exists(x) for x in out_files]):
return out_files
with file_transaction(data, out_dir) as tx_out_dir:
utils.safe_makedir(tx_out_dir)
out_base = os.path.join(tx_out_dir, dd.get_sample_name(data))
params = ["-T", "CollectSequencingArtifactMetrics",
"--VALIDATION_STRINGENCY", "SILENT",
"-R", ref_file,
"-I", bam_file,
"-O", out_base]
broad_runner.run_gatk(params, log_error=False, parallel_gc=True)
return out_files
def collect_oxog_metrics(data):
"""
extracts 8-oxoguanine (OxoG) artifact metrics from CollectSequencingArtifacts
output so we don't have to run CollectOxoGMetrics.
"""
input_base = os.path.join(dd.get_work_dir(data), "metrics", "artifact", dd.get_sample_name(data),
dd.get_sample_name(data))
if not utils.file_exists(input_base + ".pre_adapter_detail_metrics"):
return None
OUT_SUFFIXES = [".oxog_metrics"]
broad_runner = broad.runner_from_config(dd.get_config(data))
gatk_type = broad_runner.gatk_type()
out_dir = os.path.join(dd.get_work_dir(data), "metrics", "oxog", dd.get_sample_name(data))
utils.safe_makedir(out_dir)
ref_file = dd.get_ref_file(data)
out_base = os.path.join(out_dir, dd.get_sample_name(data))
out_files = [out_base + x for x in OUT_SUFFIXES]
if all([utils.file_exists(x) for x in out_files]):
return out_files
with file_transaction(data, out_dir) as tx_out_dir:
utils.safe_makedir(tx_out_dir)
out_base = os.path.join(tx_out_dir, dd.get_sample_name(data))
params = ["-T", "ConvertSequencingArtifactToOxoG",
"--INPUT_BASE", input_base,
"-O", out_base,
"-R", ref_file]
broad_runner.run_gatk(params, log_error=False, parallel_gc=True)
# multiqc <= 1.9 looks for INPUT not INPUT_BASE for these files
# see (https://github.com/ewels/MultiQC/pull/1310)
cmd = f"sed 's/INPUT_BASE/INPUT/g' {out_base}.oxog_metrics -i"
do.run(cmd, f"Fixing {out_base}.oxog_metrics to work with MultiQC.")
return out_files
|
|
import json
from audio import audio
import logging
import os
from threading import Thread
from time import sleep
from Queue import Queue
import sys
import inspect
import re
INTERVAL = 0.1
EFFECT_LENGTH = 2
logging.basicConfig()
logger = logging.getLogger(__name__)
path = os.path.realpath(__file__)
dir_path = os.path.dirname(os.path.realpath(__file__))
def convert(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
class Combinator(object):
"""Interprets a JSON dancemat effects configuration and applies effects corresponding to button combinations"""
def __init__(self, filename=None, track=None):
"""
:param filename: The json configuration file (see configurations/effects_1.json)
:param track: The midi track
If no arguments are passed this combinator can be used to generate a JSON template by adding combos
"""
if filename is not None and track is not None:
self.__current_combos = []
with open(filename) as f:
self.combos = map(lambda d: Combo(track, d), json.loads(f.read()))
for combo in self.combos:
combo.start()
self.button_map = {sum(map(hash, combo.buttons)): combo for combo in self.combos}
else:
self.combos = []
def apply_for_buttons(self, buttons):
"""
Sets the effects that best correspond to the set of buttons. If a specific combo is defined for the set of
buttons that combo will be applied. Otherwise, effects for each individual buttons will be stacked.
:param buttons: A list of buttons
"""
buttons_hash = sum(map(hash, buttons))
if buttons_hash in self.button_map:
self.button_map[buttons_hash].play()
def dict(self):
return map(Combo.dict, self.combos)
def stop(self):
for combo in self.combos:
combo.stop()
class Combo(object):
"""Maps a set of buttons to a set of effects"""
def __init__(self, track=None, combo_dict=None):
"""
:param track: The midi track
:param combo_dict: A dictionary describing
"""
if combo_dict is not None:
self.buttons = set(combo_dict["buttons"])
self.effects = map(lambda d: Effect.from_dict(track, d), combo_dict["effects"])
else:
self.buttons = []
self.effects = []
def apply(self):
"""
Apply all of the effects listed in this combo
"""
for effect in self.effects:
effect.apply()
def remove(self):
"""
Remove all of the effects listed in this combo
"""
for effect in self.effects:
effect.remove()
def start(self):
for effect in self.effects:
effect.start()
def stop(self):
for effect in self.effects:
effect.stop()
def play(self):
for effect in self.effects:
effect.play()
def dict(self):
return {"buttons": self.buttons, "effects": map(Effect.dict, self.effects)}
def __repr__(self):
return str(map(Effect.__repr__, self.effects))
def __str__(self):
return self.__repr__()
class Effect(Thread):
"""An individual effect that can be applied to change the music"""
def __init__(self, effect_dict):
"""
:param effect_dict: A dictionary describing this effect.
"""
super(Effect, self).__init__()
self.name = effect_dict["name"]
self.value = effect_dict["value"] if "value" in effect_dict else 0
self.length = (effect_dict["length"] if "length" in effect_dict else EFFECT_LENGTH)
self.is_started = False
self.queue = Queue()
@classmethod
def from_dict(cls, track, effect_dict):
"""
Factory method to create an effect class for a dictionary
:param track: A midi track
:param effect_dict: A dictionary describing an effect.
"""
name = effect_dict["name"]
print Effect.classes
try:
effect_class = Effect.classes[name]
except KeyError:
raise AssertionError("No effect named {}".format(name))
return effect_class(track, effect_dict)
def apply(self):
raise AssertionError("{}.apply not overridden".format(self.name))
def remove(self):
raise AssertionError("{}.default not overridden".format(self.name))
def play(self):
self.queue.put("play")
def stop(self):
self.queue.put("stop")
def run(self):
is_applied = False
time = 0.
while True:
if not self.queue.empty():
message = self.queue.get()
if message == "play":
time = 0.
if not is_applied:
is_applied = True
self.apply()
elif message == "stop":
break
if time >= self.length and is_applied:
is_applied = False
self.remove()
if time <= self.length:
time += INTERVAL
sleep(INTERVAL)
def dict(self):
return {"name": self.name, "value": self.value}
def __str__(self):
return self.__repr__()
def __repr__(self):
return "<Effect name={} value={}>".format(self.name, self.value)
class ChannelEffect(Effect):
"""An effect that modifies one or more channels"""
def __init__(self, track, effect_dict):
"""
:param track: A midi track
:param effect_dict: An effect dictionary that includes one or more channels or instrument types
(see player.InstrumentType)
"""
super(ChannelEffect, self).__init__(effect_dict)
self.instrument_types = None
self.instrument_group = None
self.__channels = None
self.track = track
if "channels" in effect_dict:
self.__channels = effect_dict["channels"]
if "instrument_types" in effect_dict:
self.instrument_types = effect_dict["instrument_types"]
if "instrument_group" in effect_dict:
self.instrument_group = effect_dict["instrument_group"]
if all(map(lambda p: p is None, [self.__channels, self.instrument_types, self.instrument_group])):
self.channels = self.track.channels_with_instrument_group("all")
else:
self.channels = []
if self.__channels is not None:
self.channels.extend([channel for channel in self.track.channels if channel.number in self.__channels])
if self.instrument_types is not None:
for instrument_type in self.instrument_types:
self.channels.extend(self.track.channels_with_instrument_type(instrument_type))
if self.instrument_group is not None:
self.channels.extend(self.track.channels_with_instrument_group(self.instrument_group))
@property
def dict(self):
d = super(ChannelEffect, self).dict()
if self.__channels is not None:
d["channels"] = self.__channels
if self.instrument_types is not None:
d["instrument_types"] = self.instrument_types
if self.instrument_group is not None:
d["instrument_group"] = self.instrument_group
return d
class PitchBend(ChannelEffect):
"""Bends the pitch of one or more channels"""
def apply(self):
for channel in self.channels:
channel.pitch_bend(self.value)
def remove(self):
for channel in self.channels:
channel.pitch_bend(audio.PITCHWHEEL_DEFAULT)
class VolumeChange(ChannelEffect):
"""Changes the volume of one or more channels"""
def apply(self):
for channel in self.channels:
channel.volume = self.value
def remove(self):
for channel in self.channels:
channel.volume = audio.VOLUME_DEFAULT
class Intervals(ChannelEffect):
"""Converts notes played through a channel into one or more relative intervals in the key"""
def apply(self):
for channel in self.channels:
channel.intervals = self.value
def remove(self):
for channel in self.channels:
channel.intervals = None
class InstrumentType(ChannelEffect):
"""Changes the type of instrument of one or more channels. See player.InstrumentType"""
def __init__(self, track, effect_dict):
super(InstrumentType, self).__init__(track, effect_dict)
self.defaults = [channel.instrument_type for channel in self.channels]
def apply(self):
for channel in self.channels:
channel.instrument_type = self.value
def remove(self):
for n, channel in enumerate(self.channels):
channel.instrument_type = self.defaults[n]
class InstrumentVersion(ChannelEffect):
"""Changes the version of the instrument for one or more channels. e.g. from one piano to a different piano"""
def __init__(self, track, effect_dict):
super(InstrumentVersion, self).__init__(track, effect_dict)
self.defaults = [channel.instrument_version for channel in self.channels]
def apply(self):
for channel in self.channels:
channel.instrument_version = self.value
def remove(self):
for n, channel in enumerate(self.channels):
channel.instrument_version = self.defaults[n]
class TrackEffect(Effect):
"""An effect that is applied to the whole track"""
def __init__(self, track, effect_dict):
super(TrackEffect, self).__init__(effect_dict)
self.track = track
class TempoShift(TrackEffect):
"""Shifts the tempo of the whole track by some factor. 0.5 is half tempo and 2 double tempo"""
def apply(self):
self.track.tempo_shift = self.value
def remove(self):
self.track.tempo_shift = audio.TEMPO_SHIFT_DEFAULT
class Modulation(ChannelEffect):
def apply(self):
for channel in self.channels:
channel.modulation = self.value
def remove(self):
for channel in self.channels:
channel.modulation = 0
class Pan(ChannelEffect):
def apply(self):
for channel in self.channels:
channel.pan = self.value
def remove(self):
for channel in self.channels:
channel.pan = 63
class ChannelSwitch(ChannelEffect):
def __init__(self, track, effect_dict):
super(ChannelSwitch, self).__init__(track, effect_dict)
self.position = 0
def apply(self):
if len(self.channels) == 0:
logger.warning("No channels in ChannelSwitch")
return
self.position = (self.position + 1) % len(self.channels)
print("switching to channel {}".format(self.position))
for channel in self.channels:
channel.volume = 0
self.channels[self.position].volume = 1
Effect.classes = {convert(key): cls for key, cls in inspect.getmembers(sys.modules[__name__], inspect.isclass)}
if __name__ == "__main__":
"""This code generates a template with every single buttons and double button combination"""
combinator = Combinator()
all_buttons = ['up', 'down', 'left', 'right', 'triangle', 'circle', 'square', 'x']
for button1 in all_buttons:
c = Combo()
c.buttons = [button1]
combinator.combos.append(c)
for button2 in all_buttons:
if button1 < button2:
c = Combo()
c.buttons = [button1, button2]
combinator.combos.append(c)
with open(sys.argv[1], 'w+') as f:
f.write(json.dumps(combinator.dict()))
class TestEffect(object):
def test_class_names(self):
assert "channel_switch" in Effect.classes
def test_convert(self):
assert convert("ChannelSwitch") == "channel_switch"
|
|
# The clearinghouse testlib must be imported first.
from clearinghouse.tests import testlib
from clearinghouse.tests import mocklib
from clearinghouse.common.api import maindb
from clearinghouse.common.exceptions import *
from clearinghouse.common.util import validations
from clearinghouse.website.control import interface
import unittest
mocklib.mock_lockserver_calls()
good_username = "myusername"
good_password = "mypassword"
good_email = "example@example.com"
good_affiliation = "my affiliation"
good_pubkey = "400 401"
class SeattleGeniTestCase(unittest.TestCase):
def setUp(self):
# Setup a fresh database for each test.
testlib.setup_test_db()
def tearDown(self):
# Cleanup the test database.
testlib.teardown_test_db()
def _create_user_expect_success(self, username, password=good_password, email=good_email,
affiliation=good_affiliation, pubkey=good_pubkey):
# We expect a single key to be generated through the backed (the donor key)
mocklib.mock_backend_generate_key(["1 2"])
created_user = interface.register_user(username, password, email, affiliation, pubkey)
user_from_db = maindb.get_user(username)
assert(user_from_db.username == created_user.username)
assert(user_from_db.email == created_user.email)
assert(user_from_db.affiliation == created_user.affiliation)
assert(user_from_db.user_pubkey == created_user.user_pubkey)
assert(user_from_db.user_privkey == created_user.user_privkey)
assert(user_from_db.donor_pubkey == created_user.donor_pubkey)
def _create_user_expect_validation_error(self, username, password=good_password, email=good_email,
affiliation=good_affiliation, pubkey=good_pubkey):
# An empty list because the ValidationError should occur before any backend
# generate_key() calls are performed.
mocklib.mock_backend_generate_key([])
func = interface.register_user
args = (username, password, email, affiliation, pubkey)
self.assertRaises(ValidationError, func, *args)
def test_username_minimum_length(self):
username = "a" * validations.USERNAME_MIN_LENGTH
userobj = self._create_user_expect_success(username)
def test_username_too_short(self):
username = "a" * (validations.USERNAME_MIN_LENGTH - 1)
self._create_user_expect_validation_error(username)
def test_username_maximum_length(self):
username = "a" * validations.USERNAME_MAX_LENGTH
userobj = self._create_user_expect_success(username)
def test_username_too_long(self):
username = "a" * (validations.USERNAME_MAX_LENGTH + 1)
self._create_user_expect_validation_error(username)
def test_username_invalid_chars_spaces(self):
self._create_user_expect_validation_error("test user") # space in middle
self._create_user_expect_validation_error(" testuser") # leading space
self._create_user_expect_validation_error("testuser ") # trailing space
def test_valid_chars_nonleading_underscores(self):
self._create_user_expect_success("9test_user")
self._create_user_expect_success("test9user_") # one trailing underscore
self._create_user_expect_success("testuser__") # two trailing underscores
def test_username_invalid_chars_leading_underscores(self):
username = "_testuser"
self._create_user_expect_validation_error(username)
def test_password_too_short(self):
bad_password = "a" * (validations.PASSWORD_MIN_LENGTH - 1)
self._create_user_expect_validation_error(good_username, password=bad_password)
def test_password_same_as_username(self):
self._create_user_expect_validation_error(good_username, password=good_username)
def test_valid_country_email(self):
email = "test@example.co.uk"
self._create_user_expect_success(good_username, email=email)
def test_valid_gmail_label_email(self):
email = "test#label@gmail.com"
self._create_user_expect_success(good_username, email=email)
def test_invalid_email(self):
bad_email = "test@test" # missing expected tld
self._create_user_expect_validation_error(good_username, email=bad_email)
def test_affiliation_too_short(self):
bad_affiliation = "a" * (validations.AFFILIATION_MIN_LENGTH - 1)
self._create_user_expect_validation_error(good_username, affiliation=bad_affiliation)
def test_affiliation_too_long(self):
bad_affiliation = "a" * (validations.AFFILIATION_MAX_LENGTH + 1)
self._create_user_expect_validation_error(good_username, affiliation=bad_affiliation)
def test_invalid_user_pubkey_empty_string(self):
"""
Tests an empty string for the pubkey. It should be None rather than an
empty string to indicate that we should generate the key for the user.
"""
bad_pubkey = ""
self._create_user_expect_validation_error(good_username, pubkey=bad_pubkey)
def test_invalid_user_pubkey_invalid_key(self):
bad_pubkey = "0" # should be two numbers
self._create_user_expect_validation_error(good_username, pubkey=bad_pubkey)
bad_pubkey = "1 0" # first number must be smaller than second
self._create_user_expect_validation_error(good_username, pubkey=bad_pubkey)
bad_pubkey = "a b" # letters, not numbers
self._create_user_expect_validation_error(good_username, pubkey=bad_pubkey)
bad_pubkey = "2 3 3" # they might have tried to upload their private key
self._create_user_expect_validation_error(good_username, pubkey=bad_pubkey)
def test_seattlegeni_generates_user_keypair(self):
# We expect a single keypair to be generated directly through the keygen
# api (specifically, the user keys).
user_pubkey = "3 4"
user_privkey = "2 3 3"
mocklib.mock_keygen_generate_keypair([(user_pubkey, user_privkey)])
# We expect a single key to be generated directly through the backed api
# (specifically, the donor key).
donor_pubkey = "1 2"
mocklib.mock_backend_generate_key([donor_pubkey])
provided_pubkey=None
interface.register_user(good_username, good_password, good_email,
good_affiliation, provided_pubkey)
user_from_db = maindb.get_user(good_username)
assert(user_from_db.user_pubkey == user_pubkey)
assert(user_from_db.user_privkey == user_privkey)
assert(user_from_db.donor_pubkey == donor_pubkey)
# TODO: test the number of free vessel credits the user has after creation.
# TODO: test the number of used vessel credits the user has after creation.
# TODO: test the number of donations the user has after creation.
# TODO: test username exists
def run_test():
unittest.main()
if __name__ == "__main__":
run_test()
|
|
# --------------------------------------------------------
# Fully Convolutional Instance-aware Semantic Segmentation
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Haozhi Qi, Guodong Zhang
# --------------------------------------------------------
"""
Pascal VOC database
This class loads ground truth notations from standard Pascal VOC XML data formats
and transform them into IMDB format. Selective search is used for proposals, see roidb
function. Results are written as the Pascal VOC format. Evaluation is based on mAP
criterion.
"""
import cPickle
import cv2
import os
import scipy.io as sio
import numpy as np
import hickle as hkl
from imdb import IMDB
from pascal_voc_eval import voc_eval, voc_eval_sds
from ds_utils import unique_boxes, filter_small_boxes
class PascalVOC(IMDB):
def __init__(self, image_set, root_path, devkit_path, result_path=None, mask_size=-1, binary_thresh=None):
"""
fill basic information to initialize imdb
:param image_set: 2007_trainval, 2007_test, etc
:param root_path: 'selective_search_data' and 'cache'
:param devkit_path: data and results
:return: imdb object
"""
year, image_set = image_set.split('_')
super(PascalVOC, self).__init__('voc_' + year, image_set, root_path, devkit_path, result_path) # set self.name
self.year = year
self.root_path = root_path
self.devkit_path = devkit_path
self.data_path = os.path.join(devkit_path, 'VOC' + year)
self.classes = ['__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor']
self.num_classes = len(self.classes)
self.image_set_index = self.load_image_set_index()
self.num_images = len(self.image_set_index)
print 'num_images', self.num_images
self.mask_size = mask_size
self.binary_thresh = binary_thresh
self.config = {'comp_id': 'comp4',
'use_diff': False,
'min_size': 2}
def load_image_set_index(self):
"""
find out which indexes correspond to given image set (train or val)
:return:
"""
image_set_index_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt')
assert os.path.exists(image_set_index_file), 'Path does not exist: {}'.format(image_set_index_file)
with open(image_set_index_file) as f:
image_set_index = [x.strip() for x in f.readlines()]
return image_set_index
def image_path_from_index(self, index):
"""
given image index, find out full path
:param index: index of a specific image
:return: full path of this image
"""
image_file = os.path.join(self.data_path, 'JPEGImages', index + '.jpg')
if not os.path.exists(image_file):
image_file = os.path.join(self.data_path, 'img', index + '.jpg')
assert os.path.exists(image_file), 'Path does not exist: {}'.format(image_file)
return image_file
def mask_path_from_index(self, index, gt_mask):
"""
given image index, cache high resolution mask and return full path of masks
:param index: index of a specific image
:return: full path of this mask
"""
if self.image_set == 'val':
return []
cache_file = os.path.join(self.cache_path, 'VOCMask')
if not os.path.exists(cache_file):
os.makedirs(cache_file)
# instance level segmentation
gt_mask_file = os.path.join(cache_file, index + '.hkl')
if not os.path.exists(gt_mask_file):
hkl.dump(gt_mask.astype('bool'), gt_mask_file, mode='w', compression='gzip')
# cache flip gt_masks
gt_mask_flip_file = os.path.join(cache_file, index + '_flip.hkl')
if not os.path.exists(gt_mask_flip_file):
hkl.dump(gt_mask[:, :, ::-1].astype('bool'), gt_mask_flip_file, mode='w', compression='gzip')
return gt_mask_file
def gt_roidb(self):
"""
return ground truth image regions database
:return: imdb[image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = [self.load_pascal_annotation(index) for index in self.image_set_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def gt_sdsdb(self):
"""
:return:
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_sdsdb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
sdsdb = cPickle.load(fid)
print '{} gt sdsdb loaded from {}'.format(self.name, cache_file)
return sdsdb
print 'loading sbd mask annotations'
gt_sdsdb = [self.load_sbd_mask_annotations(index) for index in self.image_set_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_sdsdb, fid, cPickle.HIGHEST_PROTOCOL)
# for future release usage
# need to implement load sbd data
return gt_sdsdb
def load_sbd_mask_annotations(self, index):
"""
Load gt_masks information from SBD's additional data
"""
sds_rec = dict()
sds_rec['image'] = self.image_path_from_index(index)
size = cv2.imread(sds_rec['image']).shape
sds_rec['height'] = size[0]
sds_rec['width'] = size[1]
# class level segmentation
seg_cls_name = os.path.join(self.data_path, 'cls', index + '.mat')
seg_cls_mat = sio.loadmat(seg_cls_name)
seg_cls_data = seg_cls_mat['GTcls']['Segmentation'][0][0]
# instance level segmentation
seg_obj_name = os.path.join(self.data_path, 'inst', index + '.mat')
seg_obj_mat = sio.loadmat(seg_obj_name)
seg_obj_data = seg_obj_mat['GTinst']['Segmentation'][0][0]
unique_inst = np.unique(seg_obj_data)
background_ind = np.where(unique_inst == 0)[0]
unique_inst = np.delete(unique_inst, background_ind)
border_inds = np.where(unique_inst == 255)[0]
unique_inst = np.delete(unique_inst, border_inds)
num_objs = len(unique_inst)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros(num_objs, dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
gt_masks = np.zeros((num_objs, size[0], size[1]))
for idx, inst_id in enumerate(unique_inst):
[r, c] = np.where(seg_obj_data == inst_id)
x1 = np.min(c)
x2 = np.max(c)
y1 = np.min(r)
y2 = np.max(r)
cur_gt_mask = (seg_obj_data == inst_id)
cur_gt_mask_cls = seg_cls_data[cur_gt_mask]
assert np.unique(cur_gt_mask_cls).shape[0] == 1
cur_inst_cls = np.unique(cur_gt_mask_cls)[0]
boxes[idx, :] = [x1, y1, x2, y2]
gt_classes[idx] = cur_inst_cls
gt_masks[idx, :, :] = cur_gt_mask
overlaps[idx, cur_inst_cls] = 1.0
sds_rec.update({
'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'max_classes': overlaps.argmax(axis=1),
'max_overlaps': overlaps.max(axis=1),
'cache_seg_inst': self.mask_path_from_index(index, gt_masks),
'flipped': False
})
return sds_rec
def load_pascal_annotation(self, index):
"""
for a given index, load image and bounding boxes info from XML file
:param index: index of a specific image
:return: record['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
import xml.etree.ElementTree as ET
roi_rec = dict()
roi_rec['image'] = self.image_path_from_index(index)
filename = os.path.join(self.data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
size = tree.find('size')
roi_rec['height'] = float(size.find('height').text)
roi_rec['width'] = float(size.find('width').text)
objs = tree.findall('object')
if not self.config['use_diff']:
non_diff_objs = [obj for obj in objs if int(obj.find('difficult').text) == 0]
objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
class_to_index = dict(zip(self.classes, range(self.num_classes)))
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = class_to_index[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
roi_rec.update({'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'max_classes': overlaps.argmax(axis=1),
'max_overlaps': overlaps.max(axis=1),
'flipped': False})
return roi_rec
def load_selective_search_roidb(self, gt_roidb):
"""
turn selective search proposals into selective search roidb
:param gt_roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
:return: roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
import scipy.io
matfile = os.path.join(self.root_path, 'selective_search_data', self.name + '.mat')
assert os.path.exists(matfile), 'selective search data does not exist: {}'.format(matfile)
raw_data = scipy.io.loadmat(matfile)['boxes'].ravel() # original was dict ['images', 'boxes']
box_list = []
for i in range(raw_data.shape[0]):
boxes = raw_data[i][:, (1, 0, 3, 2)] - 1 # pascal voc dataset starts from 1.
keep = unique_boxes(boxes)
boxes = boxes[keep, :]
keep = filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
box_list.append(boxes)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def selective_search_roidb(self, gt_roidb, append_gt=False):
"""
get selective search roidb and ground truth roidb
:param gt_roidb: ground truth roidb
:param append_gt: append ground truth
:return: roidb of selective search
"""
cache_file = os.path.join(self.cache_path, self.name + '_ss_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} ss roidb loaded from {}'.format(self.name, cache_file)
return roidb
if append_gt:
print 'appending ground truth annotations'
ss_roidb = self.load_selective_search_roidb(gt_roidb)
roidb = IMDB.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self.load_selective_search_roidb(gt_roidb)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote ss roidb to {}'.format(cache_file)
return roidb
def evaluate_detections(self, detections):
"""
top level evaluations
:param detections: result matrix, [bbox, confidence]
:return: None
"""
# make all these folders for results
result_dir = os.path.join(self.result_path, 'results')
if not os.path.exists(result_dir):
os.mkdir(result_dir)
year_folder = os.path.join(self.result_path, 'results', 'VOC' + self.year)
if not os.path.exists(year_folder):
os.mkdir(year_folder)
res_file_folder = os.path.join(self.result_path, 'results', 'VOC' + self.year, 'Main')
if not os.path.exists(res_file_folder):
os.mkdir(res_file_folder)
self.write_pascal_results(detections)
info = self.do_python_eval()
return info
def evaluate_sds(self, all_boxes, all_masks):
self._write_voc_seg_results_file(all_boxes, all_masks)
info = self._py_evaluate_segmentation()
return info
def _write_voc_seg_results_file(self, all_boxes, all_masks):
"""
Write results as a pkl file, note this is different from
detection task since it's difficult to write masks to txt
"""
# make all these folders for results
result_dir = os.path.join(self.result_path, 'results')
if not os.path.exists(result_dir):
os.mkdir(result_dir)
# Always reformat result in case of sometimes masks are not
# binary or is in shape (n, sz*sz) instead of (n, sz, sz)
all_boxes, all_masks = self._reformat_result(all_boxes, all_masks)
for cls_inds, cls in enumerate(self.classes):
if cls == '__background__':
continue
print 'Writing {} VOC results file'.format(cls)
filename = os.path.join(result_dir, cls + '_det.pkl')
print filename
with open(filename, 'wb') as f:
cPickle.dump(all_boxes[cls_inds], f, cPickle.HIGHEST_PROTOCOL)
filename = os.path.join(result_dir, cls + '_seg.pkl')
with open(filename, 'wb') as f:
cPickle.dump(all_masks[cls_inds], f, cPickle.HIGHEST_PROTOCOL)
def _reformat_result(self, boxes, masks):
num_images = self.num_images
num_class = len(self.classes)
reformat_masks = [[[] for _ in xrange(num_images)]
for _ in xrange(num_class)]
for cls_inds in xrange(1, num_class):
for img_inds in xrange(num_images):
if len(masks[cls_inds][img_inds]) == 0:
continue
num_inst = masks[cls_inds][img_inds].shape[0]
reformat_masks[cls_inds][img_inds] = masks[cls_inds][img_inds]\
.reshape(num_inst, self.mask_size, self.mask_size)
# reformat_masks[cls_inds][img_inds] = reformat_masks[cls_inds][img_inds] >= 0.4
all_masks = reformat_masks
return boxes, all_masks
def _py_evaluate_segmentation(self):
info_str = ''
gt_dir = self.data_path
imageset_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt')
cache_dir = os.path.join(self.devkit_path, 'annotations_cache')
output_dir = os.path.join(self.result_path, 'results')
aps = []
# define this as true according to SDS's evaluation protocol
use_07_metric = True
print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
info_str += 'VOC07 metric? ' + ('Y' if use_07_metric else 'No')
info_str += '\n'
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
print '~~~~~~ Evaluation use min overlap = 0.5 ~~~~~~'
info_str += '~~~~~~ Evaluation use min overlap = 0.5 ~~~~~~'
info_str += '\n'
for i, cls in enumerate(self.classes):
if cls == '__background__':
continue
det_filename = os.path.join(output_dir, cls + '_det.pkl')
seg_filename = os.path.join(output_dir, cls + '_seg.pkl')
ap = voc_eval_sds(det_filename, seg_filename, gt_dir,
imageset_file, cls, cache_dir, self.classes, self.mask_size, self.binary_thresh, ov_thresh=0.5)
aps += [ap]
print('AP for {} = {:.2f}'.format(cls, ap*100))
info_str += 'AP for {} = {:.2f}\n'.format(cls, ap*100)
print('Mean AP@0.5 = {:.2f}'.format(np.mean(aps)*100))
info_str += 'Mean AP@0.5 = {:.2f}\n'.format(np.mean(aps)*100)
print '~~~~~~ Evaluation use min overlap = 0.7 ~~~~~~'
info_str += '~~~~~~ Evaluation use min overlap = 0.7 ~~~~~~\n'
aps = []
for i, cls in enumerate(self.classes):
if cls == '__background__':
continue
det_filename = os.path.join(output_dir, cls + '_det.pkl')
seg_filename = os.path.join(output_dir, cls + '_seg.pkl')
ap = voc_eval_sds(det_filename, seg_filename, gt_dir,
imageset_file, cls, cache_dir, self.classes, self.mask_size, self.binary_thresh, ov_thresh=0.7)
aps += [ap]
print('AP for {} = {:.2f}'.format(cls, ap*100))
info_str += 'AP for {} = {:.2f}\n'.format(cls, ap*100)
print('Mean AP@0.7 = {:.2f}'.format(np.mean(aps)*100))
info_str += 'Mean AP@0.7 = {:.2f}\n'.format(np.mean(aps)*100)
return info_str
def get_result_file_template(self):
"""
this is a template
VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
:return: a string template
"""
res_file_folder = os.path.join(self.result_path, 'results', 'VOC' + self.year, 'Main')
comp_id = self.config['comp_id']
filename = comp_id + '_det_' + self.image_set + '_{:s}.txt'
path = os.path.join(res_file_folder, filename)
return path
def write_pascal_results(self, all_boxes):
"""
write results files in pascal devkit path
:param all_boxes: boxes to be processed [bbox, confidence]
:return: None
"""
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print 'Writing {} VOC results file'.format(cls)
filename = self.get_result_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_set_index):
dets = all_boxes[cls_ind][im_ind]
if len(dets) == 0:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1, dets[k, 2] + 1, dets[k, 3] + 1))
def do_python_eval(self):
"""
python evaluation wrapper
:return: info_str
"""
info_str = ''
annopath = os.path.join(self.data_path, 'Annotations', '{0!s}.xml')
imageset_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt')
annocache = os.path.join(self.cache_path, self.name + '_annotations.pkl')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if self.year == 'SDS' or int(self.year) < 2010 else False
print 'VOC07 metric? ' + ('Y' if use_07_metric else 'No')
info_str += 'VOC07 metric? ' + ('Y' if use_07_metric else 'No')
info_str += '\n'
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
filename = self.get_result_file_template().format(cls)
rec, prec, ap = voc_eval(filename, annopath, imageset_file, cls, annocache,
ovthresh=0.5, use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
info_str += 'AP for {} = {:.4f}\n'.format(cls, ap)
print('Mean AP@0.5 = {:.4f}'.format(np.mean(aps)))
info_str += 'Mean AP@0.5 = {:.4f}\n\n'.format(np.mean(aps))
# @0.7
aps = []
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
filename = self.get_result_file_template().format(cls)
rec, prec, ap = voc_eval(filename, annopath, imageset_file, cls, annocache,
ovthresh=0.7, use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
info_str += 'AP for {} = {:.4f}\n'.format(cls, ap)
print('Mean AP@0.7 = {:.4f}'.format(np.mean(aps)))
info_str += 'Mean AP@0.7 = {:.4f}'.format(np.mean(aps))
return info_str
|
|
import matplotlib
if matplotlib.get_backend() != 'Qt4Agg':
matplotlib.use('Qt4Agg', warn=True, force=True)
matplotlib.rcParams['toolbar'] = 'None'
import hotspotter.draw_func2 as df2
import hotspotter.helpers as helpers
import matplotlib.pyplot as plt
import numpy as np
import scipy
import matplotlib
import matplotlib.pyplot as plt
def hesaff_output():
import sympy as sy
import collections
from sympy.matrices.expressions.factorizations import lu, LofCholesky, qr, svd
import sympy
import sympy.matrices
import sympy.matrices.expressions
import sympy.matrices.expressions.factorizations
#from sympy.mpmath import sqrtm
sqrtm = sympy.mpmath.sqrtm
a, b, c, a11, a12, a21, a22 = sy.symbols('a b c a11 a12 a21 a22', real=True, commutative=True, nonzero=True, imaginary=False, comparable=True)
E = sy.Matrix(((a, b), (b, c)))
A = sy.Matrix(((a11, 0), (a21, a22)))
#x = E.solve(A.T * A)
# A.T * A == E
eq1 = sy.Eq(E, A.T * A)
a_ = sy.solve(eq1, a, check=False)
b_ = sy.solve(eq1, b, check=False)
c_ = sy.solve(eq1, c, check=False)
a11_ = sy.solve(eq1, a11, check=False)
a21_ = sy.solve(eq1, a21, check=False)
a22_ = sy.solve(eq1, a22, check=False)
eq2 = eq1.subs(a11, a11_[0][0])
a21_ = sy.solve(eq2, a21, check=False)
eq3 = eq1.subs(a21, a21_[a21])
a22_ = sy.solve(eq3, a22, check=False)
L1, D1 = E.LDLdecomposition()
U1 = L1.T
E_LDL1 = L1 * D1 * U1
L3 = E.cholesky()
E3 = L3 * L3.T * L3.inv() * L3
E3 = L3 * L3.T * (L3.inv() * L3) * (L3.T.inv() * L3.T )
E3 = L3 * (L3.inv() * L3).T L3.T * (L3.T.inv() * L3.T ) *
L3.inv() * E3 = L3.T
A2 = L3.T
A2.T * A2
print(E_LDL1)
L2, U2, p = E.LUdecomposition()
E_LU2 = L2 * U2
print(E_LU2)
#---------------
def asMatrix(list_):
N = int(len(list_) / 2)
Z = sympy.Matrix([list_[0:N], list_[N:]])
return Z
def simplify_mat(X):
list_ = [_.simplify() for _ in X]
Z = asMatrix(list_)
return Z
def symdot(X, Y):
Z = asMatrix(X.dot(Y))
return Z
Eq = sy.Eq
solve = sy.solve
R = A
M = symdot(A,A)
_b = sy.solve(b, eq1)
# Solve in terms of A
eq1 = sy.Eq(a, M[0])
eq2 = sy.Eq(c, M[2])
eq3 = sy.Eq(d, M[3])
w1 = sy.solve(eq1, w)[1].subs(y,0)
#y1 = sympy.Eq(0, M[1]) # y is 0
x1 = sy.solve(eq2, x)[0]
z1 = sy.solve(eq3, z)[1].subs(y,0)
x2 = x1.subs(w, w1).subs(z, z1)
R_itoA = simplify_mat(sympy.Matrix([(w1, x2), (0, z1)]))
Rinv_itoA = simplify_mat(R_itoA.inv())
print('Given the lower traingular matrix: A=[(a, 0), (c, d)]')
print('Its inverse is: inv(A)')
print(Ainv) # sub to lower triangular
print('--')
print('Its square root is: R = sqrtm(A)')
print(R_itoA)
print('--')
Epinv = E.pinv()
# Left sinuglar vectors are eigenvectors of M * M.H
left_singular_vects = (E * E.T).eigenvects()
# Right sinuglar vectors are eigenvectors of M * M.H
right_singular_vects = (E.T * E).eigenvects()
# Singular values
singular_vals = E.singular_values()
U = sy.Matrix([list(_[2][0]) for _ in left_singular_vects]).T
S = sy.Matrix([(sing_vals[0], 0),(0, singular_vals[1])])
V = sy.Matrix([list(_[2][0]) for _ in right_singular_vects]).T
assert U.shape == S.shape == V.shape == E.shape
assert sy.ask(sy.Q.orthogonal(U))
assert sy.ask(sy.Q.orthogonal(V))
assert sy.ask(sy.Q.diagonal(S))
u,s,v = svd(E)
#n = sy.Symbol('n')
#X = sy.MatrixSymbol('X', n, n)
U, S, V = svd(E)
assert U.shape == S.shape == V.shape == E.shape
assert sy.ask(sy.Q.orthogonal(U))
assert sy.ask(sy.Q.orthogonal(V))
assert sy.ask(sy.Q.diagonal(S))
# L.H is conjugate transpose
# E = L.dot(L.H)
L, U, p = E.LUdecomposition_Simple()
Q, R = E.QRdecomposition()
E.LUdecompositionFF
def SVD(A):
UEM, UEV = (A.T * A).diagonalize(normalize=True, sort=True)
VEM, VEV = (A * A.T).diagonalize(normalize=True, sort=True)
sigma = UEV ** sy.S(1)/2
return UEM, sigma, VEM
U, S, V = SVD(E)
help(E.cholesky)
help(E.LDLdecomposition)
help(E.QRdecomposition)
help(E.LUdecomposition_Simple)
help(E.LUdecompositionFF)
help(E.LUsolve)
L = E.cholesky()
E_2 = L * L.T
M = Matrix(((1,0,0,0,2),(0,0,3,0,0),(0,0,0,0,0),(0,4,0,0,0)))
M = sy.Matrix(2,3, [1,2,3,4,5,6])
N = M.H * (M * M.H) ** -1
N = M.H * (M * M.H) ** -1
U, Sig, V = M.SVD()
assert U * Sig * V.T == M
assert U*U.T == U.T*U == eye(U.cols)
assert V*V.T == V.T*V == eye(V.cols)
#assert S.is_diagonal()
M = Matrix(((0,1),(1,0),(1,1)))
U, Sig, V = M.SVD()
assert U * Sig * V.T == M
assert U*U.T == U.T*U == eye(U.cols)
assert V*V.T == V.T*V == eye(V.cols)
import sympy
import sympy.galgebra.GA as GA
import sympy.galgebra.latex_ex as tex
import sympy.printing as symprint
import sympy.abc
import sympy.mpmath
a, b, c, d = sympy.symbols('a b c d')
theta = sympy.abc.theta
sin = sympy.functions.elementary.trigonometric.sin
cos = sympy.functions.elementary.trigonometric.cos
sqrtm = sympy.mpmath.sqrtm
xc = sympy.Matrix([sin(theta), cos(theta)])
E = sympy.Matrix([(a, b), (b, c)])
def numpy_test():
# Constants
inv = scipy.linalg.inv
sqrtm = scipy.linalg.sqrtm
tau = np.pi*2
sin = np.sin
cos = np.cos
# Variables
N = 2**5
theta = np.linspace(0, tau, N)
a, b, c, d = (1, 0, .5, .8)
xc = np.array((sin(theta), cos(theta))) # points on unit circle 2xN
A = np.array([(a, b), # maps points on ellipse
(c, d)]) # to points on unit circle
# Test data
Ainv = inv(A)
# Test data
xe = Ainv.dot(xc)
# Test Ellipse
E = A.T.dot(A) # equation of ellipse
test = lambda ell: ell.T.dot(E).dot(ell).diagonal()
print all(np.abs(1 - test(xe)) < 1e-9)
# Start Plot
df2.reset()
def plotell(ell):
df2.plot(ell[0], ell[1])
# Plot Circle
fig = df2.figure(1, plotnum=121, title='xc')
plotell(xc)
# Plot Ellipse
fig = df2.figure(1, plotnum=122, title='xe = inv(A).dot(xc)')
plotell(xe)
# End Plot
df2.set_geometry(fig.number, 1000, 75, 500, 500)
df2.update()
# E = ellipse
#
# points x on ellipse satisfy x.T * E * x = 1
#
# A.T * A = E
# A = transforms points on an ellipse to a unit circle
def sqrtm_eq():
M = np.array([(33, 24), (48, 57)])
R1 = np.array([(1, 4), (8, 5)])
R2 = np.array([(5, 2), (4, 7)])
print M
print R1.dot(R1)
print R2.dot(R2)
'''
matrix is positive semidefinite
if x.conjT.dot(M).dot(x) >= 0
or if rank(A) = rank(A.conjT.dot(A))
'''
def find_ellipse_major_minor():
import sympy
import sympy.galgebra.GA as GA
import sympy.galgebra.latex_ex as tex
import sympy.printing as symprint
import sympy.abc
import sympy.mpmath
Eq = sympy.Eq
solve = sympy.solve
sin = sympy.functions.elementary.trigonometric.sin
cos = sympy.functions.elementary.trigonometric.cos
a, b, c, d = sympy.symbols('a b c d')
theta = sympy.abc.theta
# R * E
a2 = cos(theta)*a - sin(theta)*c
b2 = cos(theta)*b - sin(theta)*d
c2 = sin(theta)*a + cos(theta)*c
d2 = sin(theta)*b + cos(theta)*d
# Set b2 and c2 to 0
b2_eq_0 = Eq(b2,0)
#
c2_eq_0 = Eq(c2,0)
theta_b = solve(b2_eq_0, theta)
theta_c = solve(c2_eq_0, theta)
def sympy_manual_sqrtm_inv():
'''
Manual calculation of inv(sqrtm) for a lower triangular matrix
'''
# https://sympy.googlecode.com/files/sympy-0.7.2.win32.exe
import sympy
import sympy.galgebra.GA as GA
import sympy.galgebra.latex_ex as tex
import sympy.printing as symprint
import sympy.abc
import sympy.mpmath
a, b, c, d = sympy.symbols('a b c d')
theta = sympy.abc.theta
sin = sympy.functions.elementary.trigonometric.sin
cos = sympy.functions.elementary.trigonometric.cos
sqrtm = sympy.mpmath.sqrtm
xc = sympy.Matrix([sin(theta), cos(theta)])
A = sympy.Matrix([(a, b), (c, d)])
A = sympy.Matrix([(a, 0), (c, d)])
Ainv = A.inv()
#Asqrtm = sqrtm(A)
def asMatrix(list_):
N = int(len(list_) / 2)
Z = sympy.Matrix([list_[0:N], list_[N:]])
return Z
def simplify_mat(X):
list_ = [_.simplify() for _ in X]
Z = asMatrix(list_)
return Z
def symdot(X, Y):
Z = asMatrix(X.dot(Y))
return Z
Eq = sympy.Eq
solve = sympy.solve
a, b, c, d = sympy.symbols('a b c d')
w, x, y, z = sympy.symbols('w x y z')
R = sympy.Matrix([(w, x), (y, z)])
M = symdot(R,R)
# Solve in terms of A
eq1 = Eq(a, M[0])
eq2 = Eq(c, M[2])
eq3 = Eq(d, M[3])
w1 = solve(eq1, w)[1].subs(y,0)
#y1 = sympy.Eq(0, M[1]) # y is 0
x1 = solve(eq2, x)[0]
z1 = solve(eq3, z)[1].subs(y,0)
x2 = x1.subs(w, w1).subs(z, z1)
R_itoA = simplify_mat(sympy.Matrix([(w1, x2), (0, z1)]))
Rinv_itoA = simplify_mat(R_itoA.inv())
print('Given the lower traingular matrix: A=[(a, 0), (c, d)]')
print('Its inverse is: inv(A)')
print(Ainv) # sub to lower triangular
print('--')
print('Its square root is: R = sqrtm(A)')
print(R_itoA)
print('--')
print('Method 1: Its inverse square root is: M1 = inv(sqrtm(A))')
print(Rinv_itoA)
print('--')
# Solve in terms of A (but from inv)
a_, b_, c_, d_ = Ainv
eq1_ = Eq(a_, M[0])
eq2_ = Eq(c_, M[2])
eq3_ = Eq(d_, M[3])
w1_ = solve(eq1_, w)[1].subs(y,0)
x1_ = solve(eq2_, x)[0]
z1_ = solve(eq3_, z)[1].subs(y,0)
#sympy.Eq(0, M[1]) # y is 0
x2_ = x1_.subs(w, w1_).subs(z, z1_)
Rinv_itoA_2 = simplify_mat(sympy.Matrix([(w1_, x2_), (0, z1_)]))
print('Method 1: Its square root inverse is: M2 = sqrtm(inv(A))')
print(Rinv_itoA_2)
print('----')
print('Perform checks to make sure the calculation was correct')
print('Checking that A == inv(M1 M1)')
print simplify_mat(symdot(Rinv_itoA,Rinv_itoA).inv())
print('----')
print('Checking that A == inv(M2 M2)')
print simplify_mat(symdot(Rinv_itoA_2,Rinv_itoA_2).inv())
print('....')
print('....')
# hmmm, why not equal? ah, they are equiv
sqrt = sympy.sqrt
ans1 = c/(-sqrt(a)*d - a*sqrt(d))
ans2 = -c/(a*d*(sqrt(1/a) + sqrt(1/d)))
print('There are two equivilent ways of specifying the b component of sqrt(inv(A))')
print ans1
print ans2
#------------------------------
A2 = simplify_mat(R_itoA.dot(R_itoA))
E_ = A.T.dot(A)
E = sympy.Matrix([E_[0:2], E_[2:4]])
print('Original ellipse matrix: E = A.T.dot(A)')
print(E.subs(b,0))
E_evects = E.eigenvects()
E_evals = E.eigenvals()
e1, e2 = E_evects
print('\n---Eigenvect1---')
print('\ne1[0]=')
print e1[0]
print('\ne1[1]=')
print e1[1]
print('\ne1[2]=')
print e1[2]
print('\n---Eigenvect2---')
print('\ne2[0]=')
print e2[0]
print('\ne2[1]=')
print e2[1]
print('\ne2[2]=')
print e2[2]
print('\n---(inv(sqrtm(A))) dot circle points---')
print('Given a transformation A and an angle theta, the point on that ellipse is: ')
xe = Rinv_itoA.dot(xc)
print xe
#A2 = sqrtm(inv(A)).real
#A3 = inv(sqrtm(A)).real
#fig = df2.figure(1, plotnum=223, title='')
#plotell(e2)
#fig = df2.figure(1, plotnum=224, title='')
#plotell(e3)
#df2.reset()
#real_data()
sympy_manual_sqrtm_inv()
r'''
str1 = '\n'.join(helpers.execstr_func(sympy_data).split('\n')[0:-2])
str2 = '\n'.join(helpers.execstr_func(execute_test).split('\n')[3:])
toexec = str1 + '\n' + str2
exec(toexec)
'''
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from collections import OrderedDict
from acq4.util import Qt
from .CanvasItem import CanvasItem
import numpy as np
import scipy.ndimage as ndimage
import pyqtgraph as pg
import pyqtgraph.flowchart
import acq4.util.DataManager as DataManager
import acq4.util.debug as debug
from .itemtypes import registerItemType
class ImageCanvasItem(CanvasItem):
"""
CanvasItem displaying an image.
The image may be 2 or 3-dimensional.
Options:
image: May be a fileHandle, ndarray, or GraphicsItem.
handle: May optionally be specified in place of image
"""
_typeName = "Image"
def __init__(self, image=None, **opts):
## If no image was specified, check for a file handle..
if image is None:
image = opts.get('handle', None)
item = None
self.data = None
if isinstance(image, Qt.QGraphicsItem):
item = image
elif isinstance(image, np.ndarray):
self.data = image
elif isinstance(image, DataManager.FileHandle):
opts['handle'] = image
self.handle = image
self.data = self.handle.read()
if 'name' not in opts:
opts['name'] = self.handle.shortName()
try:
if 'transform' in self.handle.info():
tr = pg.SRTTransform3D(self.handle.info()['transform'])
tr = pg.SRTTransform(tr) ## convert to 2D
opts['pos'] = tr.getTranslation()
opts['scale'] = tr.getScale()
opts['angle'] = tr.getRotation()
else: ## check for older info formats
if 'imagePosition' in self.handle.info():
opts['scale'] = self.handle.info()['pixelSize']
opts['pos'] = self.handle.info()['imagePosition']
elif 'Downsample' in self.handle.info():
### Needed to support an older format stored by 2p imager
if 'pixelSize' in self.handle.info():
opts['scale'] = self.handle.info()['pixelSize']
if 'microscope' in self.handle.info():
m = self.handle.info()['microscope']
opts['pos'] = m['position'][0:2]
else:
info = self.data._info[-1]
opts['pos'] = info.get('imagePosition', None)
elif hasattr(self.data, '_info'):
info = self.data._info[-1]
opts['scale'] = info.get('pixelSize', None)
opts['pos'] = info.get('imagePosition', None)
else:
opts['defaultUserTransform'] = {'scale': (1e-5, 1e-5)}
opts['scalable'] = True
except:
debug.printExc('Error reading transformation for image file %s:' % image.name())
if item is None:
item = pg.ImageItem()
CanvasItem.__init__(self, item, **opts)
self.splitter = Qt.QSplitter()
self.splitter.setOrientation(Qt.Qt.Vertical)
self.layout.addWidget(self.splitter, self.layout.rowCount(), 0, 1, 2)
self.filterGroup = pg.GroupBox("Image Filter")
fgl = Qt.QGridLayout()
fgl.setContentsMargins(3, 3, 3, 3)
fgl.setSpacing(1)
self.filterGroup.setLayout(fgl)
self.filter = ImageFilterWidget()
self.filter.sigStateChanged.connect(self.filterStateChanged)
fgl.addWidget(self.filter)
self.splitter.addWidget(self.filterGroup)
self.histogram = pg.HistogramLUTWidget()
self.histogram.setImageItem(self.graphicsItem())
# addWidget arguments: row, column, rowspan, colspan
self.splitter.addWidget(self.histogram)
self.imgModeCombo = Qt.QComboBox()
self.imgModeCombo.addItems(['SourceOver', 'Overlay', 'Plus', 'Multiply'])
self.layout.addWidget(self.imgModeCombo, self.layout.rowCount(), 0, 1, 1)
self.imgModeCombo.currentIndexChanged.connect(self.imgModeChanged)
self.autoBtn = Qt.QPushButton("Auto")
self.autoBtn.setCheckable(True)
self.autoBtn.setChecked(True)
self.layout.addWidget(self.autoBtn, self.layout.rowCount()-1, 1, 1, 1)
self.timeSlider = Qt.QSlider(Qt.Qt.Horizontal)
self.layout.addWidget(self.timeSlider, self.layout.rowCount(), 0, 1, 2)
self.timeSlider.valueChanged.connect(self.timeChanged)
# ## controls that only appear if there is a time axis
self.timeControls = [self.timeSlider]
if self.data is not None:
if isinstance(self.data, pg.metaarray.MetaArray):
self.filter.setInput(self.data.asarray())
else:
self.filter.setInput(self.data)
self.updateImage()
# Needed to ensure selection box wraps the image properly
tr = self.saveTransform()
self.resetUserTransform()
self.restoreTransform(tr)
# Why doesn't this work?
#self.selectBoxFromUser() ## move select box to match new bounds
@classmethod
def checkFile(cls, fh):
if not fh.isFile():
return 0
ext = fh.ext().lower()
if ext == '.ma':
return 10
elif ext in ['.ma', '.png', '.jpg', '.tif']:
return 100
return 0
def timeChanged(self, t):
self.updateImage()
def imgModeChanged(self):
mode = str(self.imgModeCombo.currentText())
self.graphicsItem().setCompositionMode(getattr(Qt.QPainter, 'CompositionMode_' + mode))
def filterStateChanged(self):
self.updateImage()
def updateImage(self):
img = self.graphicsItem()
# Try running data through flowchart filter
data = self.filter.output()
if data is None:
data = self.data
if data.ndim == 4:
showTime = True
elif data.ndim == 3:
if data.shape[2] <= 4: ## assume last axis is color
showTime = False
else:
showTime = True
else:
showTime = False
if showTime:
self.timeSlider.setMinimum(0)
self.timeSlider.setMaximum(data.shape[0]-1)
self.graphicsItem().setImage(data[self.timeSlider.value()], autoLevels=self.autoBtn.isChecked())
else:
self.graphicsItem().setImage(data, autoLevels=self.autoBtn.isChecked())
for widget in self.timeControls:
widget.setVisible(showTime)
def saveState(self, **kwds):
state = CanvasItem.saveState(self, **kwds)
state['imagestate'] = self.histogram.saveState()
state['filter'] = self.filter.saveState()
state['composition'] = self.imgModeCombo.currentText()
return state
def restoreState(self, state):
CanvasItem.restoreState(self, state)
self.filter.restoreState(state['filter'])
self.imgModeCombo.setCurrentIndex(self.imgModeCombo.findText(state['composition']))
self.histogram.restoreState(state['imagestate'])
registerItemType(ImageCanvasItem)
class ImageFilterWidget(Qt.QWidget):
sigStateChanged = Qt.Signal()
def __init__(self):
Qt.QWidget.__init__(self)
self.layout = Qt.QGridLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
# Set up filter buttons
self.btns = OrderedDict()
row, col = 0, 0
for name in ['Mean', 'Max', 'Max w/Gaussian', 'Max w/Median', 'Edge']:
btn = Qt.QPushButton(name)
self.btns[name] = btn
btn.setCheckable(True)
self.layout.addWidget(btn, row, col)
btn.clicked.connect(self.filterBtnClicked)
col += 1
if col > 1:
col = 0
row += 1
# show flowchart control panel inside a collapsible group box
self.fcGroup = pg.GroupBox('Filter Settings')
fgl = Qt.QVBoxLayout()
self.fcGroup.setLayout(fgl)
fgl.setContentsMargins(0, 0, 0, 0)
self.layout.addWidget(self.fcGroup, row+1, 0, 1, 2)
self.fc = pg.flowchart.Flowchart(terminals={'dataIn': {'io':'in'}, 'dataOut': {'io':'out'}})
fgl.addWidget(self.fc.widget())
self.fcGroup.setCollapsed(True)
self.fc.sigStateChanged.connect(self.sigStateChanged)
def filterBtnClicked(self, checked):
# remember slice before clearing fc
snode = self.fc.nodes().get('Slice', None)
if snode is not None:
snstate = snode.saveState()
else:
snstate = None
print(snstate)
self.fc.clear()
if not checked:
return
btn = self.sender()
# uncheck all other filter btns
for b in self.btns.values():
if b is not btn:
b.setChecked(False)
name = btn.text()
if name == 'Mean':
s = self.fc.createNode('Slice', name="Slice")
m = self.fc.createNode('Mean', name="Mean", pos=[150, 0])
self.fc.connectTerminals(self.fc['dataIn'], s['In'])
self.fc.connectTerminals(s['Out'], m['In'])
self.fc.connectTerminals(m['Out'], self.fc['dataOut'])
elif name == 'Max':
s = self.fc.createNode('Slice', name="Slice")
m = self.fc.createNode('Max', name="Max", pos=[150, 0])
self.fc.connectTerminals(self.fc['dataIn'], s['In'])
self.fc.connectTerminals(s['Out'], m['In'])
self.fc.connectTerminals(m['Out'], self.fc['dataOut'])
elif name == 'Max w/Gaussian':
s = self.fc.createNode('Slice', name="Slice", pos=[-40, 0])
f = self.fc.createNode('GaussianFilter', name="GaussianFilter", pos=[70, 0])
m = self.fc.createNode('Max', name="Max", pos=[180, 0])
self.fc.connectTerminals(self.fc['dataIn'], s['In'])
self.fc.connectTerminals(s['Out'], f['In'])
self.fc.connectTerminals(f['Out'], m['In'])
self.fc.connectTerminals(m['Out'], self.fc['dataOut'])
elif name == 'Max w/Median':
s = self.fc.createNode('Slice', name="Slice", pos=[-40, 0])
f = self.fc.createNode('MedianFilter', name="MedianFilter", pos=[70, 0])
m = self.fc.createNode('Max', name="Max", pos=[180, 0])
self.fc.connectTerminals(self.fc['dataIn'], s['In'])
self.fc.connectTerminals(s['Out'], f['In'])
self.fc.connectTerminals(f['Out'], m['In'])
self.fc.connectTerminals(m['Out'], self.fc['dataOut'])
elif name == 'Edge':
s = self.fc.createNode('Slice', name="Slice", pos=[-40, 0])
f1 = self.fc.createNode('PythonEval', name='GaussDiff', pos=[70, 0])
f1.setCode("""
from scipy.ndimage import gaussian_filter
img = args['input'].astype(float)
edge = gaussian_filter(img, (0, 2, 2)) - gaussian_filter(img, (0, 1, 1))
return {'output': edge}
""")
m = self.fc.createNode('Max', name="Max", pos=[180, 0])
self.fc.connectTerminals(self.fc['dataIn'], s['In'])
self.fc.connectTerminals(s['Out'], f1['input'])
self.fc.connectTerminals(f1['output'], m['In'])
self.fc.connectTerminals(m['Out'], self.fc['dataOut'])
# restore slice is possible
if snstate is not None:
snode = self.fc.nodes().get('Slice', None)
if snode is not None:
print("restore!")
snode.restoreState(snstate)
def setInput(self, img):
self.fc.setInput(dataIn=img)
def output(self):
return self.fc.output()['dataOut']
def process(self, img):
return self.fc.process(dataIn=img)['dataOut']
def saveState(self):
return {'flowchart': self.fc.saveState()}
def restoreState(self, state):
self.fc.restoreState(state['flowchart'])
|
|
"""
Example of how to use byte-code execution technique to trace accesses to numpy
arrays.
This file demonstrates two applications of this technique:
* optimize numpy computations for repeated calling
* provide automatic differentiation of procedural code
"""
import __builtin__
import ctypes
import inspect
import logging
import opcode
#import os
import sys
#import trace
import traceback
import types
import numpy as np
import theano
import autodiff
from autodiff.utils import itercode, orderedcallargs, flat_from_doc
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# from theano.tensor.shared_randomstreams import RandomStreams
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
# XXX FIXME This will not do - seed must be exposed.
global_randomstreams = RandomStreams(seed=123)
# Opcode help: http://docs.python.org/library/dis.html
# -- cellget returns the contents of a cell
cellget = ctypes.pythonapi.PyCell_Get
cellget.restype = ctypes.py_object
cellget.argtypes = (ctypes.py_object,)
# -- cellmake creates a cell pointer
cellmake = ctypes.pythonapi.PyCell_New
cellmake.restype = ctypes.py_object
cellmake.argtypes = (ctypes.py_object,)
def istensor(x):
tensortypes = (theano.tensor.TensorConstant,
theano.tensor.TensorVariable)
return isinstance(x, tensortypes)
class Unassigned(object):
"""Unassigned value"""
class LoadUnassigned(Exception):
"""Access to Unassigned value"""
class FrameVM(object):
"""
A Class for evaluating a code block of CPython bytecode,
and tracking accesses to numpy arrays.
"""
def __init__(self, watcher, func):
logger.debug('FrameVM: {0}'.format(func))
self.watcher = watcher
if isinstance(func, autodiff.symbolic.Function):
func = func.pyfn
self.func = func
self.stack = []
self._locals = None
self._myglobals = None
self.code_iter = None
self.print_ops = False
self.print_stack = False
def push(self, item):
if item is Unassigned:
raise LoadUnassigned()
self.stack.append(item)
def pop(self):
return self.stack.pop(-1)
def pushN(self, items):
for item in items:
if item is Unassigned:
raise LoadUnassigned()
self.stack.extend(items)
def popN(self, N):
rval = self.stack[-N:]
self.stack[-N:] = []
return rval
def add_shadow(self, x):
if id(x) in self.watcher.constants:
return
# -- We cannot safely set up shadow variables that are aliased to
# memory that is visible to the running program, unless that
# program can guarantee that all views of that memory are
# immutable. CPython caches small ints (-5 <= i <= 256), so
# we wrap them in a non-cached _int() instance.
if isinstance(x, int):
if type(x) is int and -5 <= x <= 256:
x = np.int_(x)
s_x = self.watcher.shared(np.asarray(x))
elif isinstance(x, float):
s_x = self.watcher.shared(np.asarray(x))
elif getattr(x, 'dtype', None) == bool:
print >> sys.stderr, ('Warning: Theano has no bool, '
'upgrading to int8')
s_x = self.watcher.shared(x.astype('int8'))
elif isinstance(x, (np.ndarray, np.number)):
s_x = self.watcher.shared(x)
else:
return
self.watcher.shadow(x, s_x)
def ensure_shadow(self, x):
# small ints can not be shadowed due to CPython memory caching, so we
# wrap them in non-cached _ints.
if type(x) is int and -5 <= x <= 256:
x = np.int_(x)
if id(x) not in self.watcher:
self.add_shadow(x)
return self.watcher.getvar(x)
def call(self, args, kwargs):
if not isinstance(args, tuple):
raise TypeError('vm.call: args must be tuple', args)
if not isinstance(kwargs, dict):
raise TypeError('vm.call: kwargs must be dict', kwargs)
func = self.func
if isinstance(func, type) and issubclass(func, BaseException):
# XXX not shadowing exception creation, because exceptions
# do not have func_code. Is this OK? can we do better?
return func(*args, **kwargs)
func_code = self.func.func_code
self._myglobals = {}
self._locals = []
for name in func_code.co_names:
#print 'name', name
try:
self._myglobals[name] = func.func_globals[name]
except KeyError:
try:
self._myglobals[name] = __builtin__.__getattribute__(name)
except AttributeError:
#print 'WARNING: name lookup failed', name
pass
# get function arguments
argspec = inspect.getargspec(func)
# match function arguments to passed parameters
callargs = orderedcallargs(func, *args, **kwargs)
# named args => locals
self._locals.extend(callargs[arg] for arg in argspec.args)
# *args => locals
if argspec.varargs:
self._locals.append(callargs[argspec.varargs])
# **kwargs => locals
if argspec.keywords:
self._locals.append(callargs[argspec.keywords])
# other vars => locals
no_unbound_args = len(func_code.co_varnames) - len(self._locals)
self._locals.extend([Unassigned] * no_unbound_args)
# shadow arguments
for val in flat_from_doc(callargs):
if id(val) not in self.watcher:
self.add_shadow(val)
self.code_iter = itercode(func_code.co_code)
jmp = None
while not hasattr(self, 'rval'):
try:
i, op, arg = self.code_iter.send(jmp)
except StopIteration:
break
name = opcode.opname[op]
# method names can't have '+' in them
name = {'SLICE+0': 'SLICE_PLUS_0',
'SLICE+1': 'SLICE_PLUS_1',
'SLICE+2': 'SLICE_PLUS_2',
'SLICE+3': 'SLICE_PLUS_3',
'STORE_SLICE+0': 'STORE_SLICE_PLUS_0',
'STORE_SLICE+1': 'STORE_SLICE_PLUS_1',
'STORE_SLICE+2': 'STORE_SLICE_PLUS_2',
'STORE_SLICE+3': 'STORE_SLICE_PLUS_3',
}.get(name, name)
if self.print_ops:
print 'OP: ', i, name
if self.print_stack:
print self.stack
try:
op_method = getattr(self, 'op_' + name)
except AttributeError:
raise AttributeError('FrameVM does not have a method defined '
'for \'op_{0}\''.format(name))
except:
raise
jmp = op_method(i, op, arg)
return self.rval
def op_BINARY_ADD(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
# No Theano vars allowed on the stack
assert not hasattr(arg1, 'type')
assert not hasattr(arg2, 'type')
r = arg1 + arg2
self.push(r)
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 + s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 + s2)
#print 'added sym'
def op_BINARY_DIVIDE(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
assert not hasattr(arg1, 'type')
assert not hasattr(arg2, 'type')
r = arg1 / arg2
self.push(r)
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 / s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 / s2)
def op_BINARY_FLOOR_DIVIDE(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
assert not hasattr(arg1, 'type')
assert not hasattr(arg2, 'type')
r = arg1 // arg2
self.push(r)
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 // s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 // s2)
def op_BINARY_SUBTRACT(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
assert not hasattr(arg1, 'type')
assert not hasattr(arg2, 'type')
r = arg1 - arg2
self.push(r)
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 - s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 - s2)
def op_BINARY_MULTIPLY(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
r = arg1 * arg2
self.push(r)
assert not hasattr(arg1, 'type')
assert not hasattr(arg2, 'type')
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 * s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 * s2)
#print 'mul sym', id(r)
def op_BINARY_POWER(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
r = arg1 ** arg2
self.push(r)
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2).astype(s1.dtype)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 ** s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 ** s2)
#print 'mul sym', id(r)
def op_BINARY_MODULO(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
r = arg1 % arg2
self.push(r)
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 % s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 % s2)
def op_BINARY_SUBSCR(self, i, op, arg):
# Implements TOS = TOS1[TOS].
tos1, tos = self.popN(2)
#print 'tos', tos
#print 'tos1', tos1
rval = tos1[tos]
self.push(rval)
if id(tos) in self.watcher:
s_tos = self.ensure_shadow(tos)
else:
s_tos = tos
if id(tos1) in self.watcher:
s_tos1 = self.ensure_shadow(tos1)
else:
s_tos1 = tos1
if isinstance(tos, np.ndarray) and tos.dtype == bool:
s_rval = s_tos1[s_tos.nonzero()]
else:
s_rval = s_tos1[s_tos]
if id(tos) in self.watcher or id(tos1) in self.watcher:
self.watcher.shadow(rval, s_rval)
def op_BUILD_MAP(self, i, op, arg):
self.push({})
def op_BUILD_SLICE(self, i, op, arg):
if arg == 2:
tos1, tos = self.popN(2)
self.push(slice(tos1, tos))
elif arg == 3:
tos2, tos1, tos = self.popN(3)
self.push(slice(tos2, tos1, tos))
else:
raise NotImplementedError()
def op_BUILD_TUPLE(self, i, op, arg):
if arg:
self.push(tuple(self.popN(arg)))
else:
self.push(())
def op_BUILD_LIST(self, i, op, arg):
if arg:
self.push(list(self.popN(arg)))
else:
self.push([])
def op_CALL_FUNCTION(self, i, op, arg, call_vargs=None, call_kwargs=None):
if call_vargs is None:
# -- these are the things passed with *foo syntax
call_vargs = ()
if call_kwargs is None:
# -- these are the things passed with **foo syntax
call_kwargs = {}
n_args = arg & 0xFF
n_kwargs = (arg & 0xFF00) >> 8
#print 'N_ARGS', n_args, n_kwargs, call_vargs
assert not (arg >> 16) # what would this stuff up here mean?
kwargs = dict([(self.stack[-2 * ii], self.stack[-2 * ii + 1])
for ii in range(n_kwargs, 0, -1)])
args = [self.stack[-ii - 2 * n_kwargs] for ii in range(n_args, 0, -1)]
assert all(Unassigned is not ai for ai in args)
# -- pop all args off the stack
if arg:
self.stack = self.stack[:- n_args - 2 * n_kwargs]
# -- pop the function itself off the stack
func = self.pop()
args = args + list(call_vargs)
orig_kwargs_size = len(kwargs)
kwargs.update(call_kwargs)
assert len(kwargs) == orig_kwargs_size + len(call_kwargs)
#print dir(func)
#print func.__self__
all_args = args + kwargs.values()
# -- get symbolic args
if len(call_vargs) > 0:
s_args = [self.watcher.getvar(a) for a in args[:-len(call_vargs)]]
s_args.extend(self.watcher.getvar(a) for a in call_vargs)
s_args = tuple(s_args)
else:
s_args = tuple(self.watcher.getvar(a) for a in args)
s_kwargs = dict([(kw, self.watcher.getvar(val))
for kw, val in kwargs.items()])
if hasattr(func, '__theano_op__'):
# XXX: document that we are assuming func is pure -
# if rval depends on globals or closure this Context is not
# going to know that.
# -- hand control back to Python for duration of func
rval = func(*args, **kwargs)
if any(id(a) in self.watcher for a in all_args):
s_rval = func.__theano_op__(*s_args, **s_kwargs)
self.watcher.shadow(rval, s_rval)
# ================ NumPy and builtin functions
elif ((getattr(func, '__module__', None)
and func.__module__.startswith('numpy'))
or isinstance(func, np.ufunc)
or str(func) == '<built-in function abs>'
or str(func) == '<built-in function max>'
or str(func) == '<built-in function min>'
or str(func) == '<built-in function sum>'):
rval = func(*args, **kwargs)
if any(id(a) in self.watcher for a in all_args):
if func.__name__ == 'sum':
if type(rval) == int:
rval = np.int_(rval)
s_rval = theano.tensor.sum(*s_args, **s_kwargs)
self.watcher.shadow(rval, s_rval)
elif func.__name__ in ('abs', 'absolute'):
self.watcher.shadow(rval, abs(*s_args))
elif func.__name__ == 'max':
assert str(func) == '<built-in function max>'
s_rval = theano.tensor.maximum(*s_args, **s_kwargs)
assert s_rval.ndim == 0 # builtin max can't make vector
self.watcher.shadow(rval, s_rval)
elif func.__name__ == 'min':
assert str(func) == '<built-in function min>'
s_rval = theano.tensor.minimum(*s_args, **s_kwargs)
assert s_rval.ndim == 0 # builtin min can't make vector
self.watcher.shadow(rval, s_rval)
elif func.__name__ == 'reshape':
self.watcher.shadow(
rval, theano.tensor.reshape(*s_args, **s_kwargs))
elif func.__name__ == 'arange':
# tensor.arange takes the dtype of its input but
# numpy.arange does not. Since we are compiling the Theano
# graph, recast the numpy value to match the symbolic dtype
sval = theano.tensor.arange(*s_args, **s_kwargs)
rval = rval.astype(sval.dtype)
elif func.__name__ in theano.tensor.basic._cast_mapping.keys():
# handle cast functions
rval = func(*args, **kwargs)
sval = theano.tensor.cast(*s_args, dtype=func.__name__)
self.watcher.shadow(rval, sval)
elif func.__name__ in ['bool', 'bool_', 'bool8']:
# Theano has no bool type, cast to int8 instead
sval = theano.tensor.cast(*s_args, dtype='int8')
elif func.__name__ in ['ones', 'zeros']:
s_fn = getattr(theano.tensor, func.__name__)
sval = s_fn(*s_args, **s_kwargs).astype(str(rval.dtype))
self.watcher.shadow(rval, sval)
elif func.__name__ == 'identity':
# theano has no identity function, only 'eye'
dtype = s_kwargs.get('dtype', None)
if not dtype and len(s_args) > 1:
dtype = s_args[1]
sval = theano.tensor.eye(s_args[0], dtype=dtype)
self.watcher.shadow(rval, sval)
else:
try:
theano_fn = getattr(theano.tensor, func.__name__)
except:
raise NotImplementedError(func)
# XXX should we do this? since it is not obvious that
# reductions don't take symbolic args, this could lead to
# users compiling functions that are supposed to have axis
# arguments but silently ignore them. Leaving this
# functionality out for now -- Users must call Constant()
# explicitly.
# many Theano reductions do not support symbolic axes
# by checking for it here we don't have to wrap the
# argument in a Constant()
# argspec = orderedargspec(theano_fn, *s_args, **s_kwargs)
# if (istensor(argspec.get('axis', None)) and
# func.__name__ not in ['concatenate']):
# if 'axis' in s_kwargs:
# s_kwargs['axis'] = kwargs['axis']
# else:
# r_axis = args[argspec.args.index('axis')]
# s_args[argspec.args.index('axis')] = r_axis
self.watcher.shadow(rval, theano_fn(*s_args, **s_kwargs))
else:
# no argument was shadowed (e.g. zeros())
self.add_shadow(rval)
# ================ Array methods
elif isinstance(getattr(func, '__self__', None),
(np.ndarray, np.number)):
assert id(func.__self__) in self.watcher
s_self = self.watcher.svars[id(func.__self__)]
if 0:
pass
elif func.__name__ == 'copy':
assert not args
assert not kwargs
rval = func()
self.watcher.shadow(rval, s_self.copy())
elif func.__name__ == 'reshape':
rval = func(*args, **kwargs)
# Theano requires shape to be a tuple
if not isinstance(s_args[0], (list, tuple)):
s_args = (s_args,)
self.watcher.shadow(rval, s_self.reshape(*s_args, **s_kwargs))
elif func.__name__ == 'swapaxes':
rval = func(*args, **kwargs)
axis1, axis2 = args
s_dims = range(s_self.ndim)
s_dims[axis1], s_dims[axis2] = s_dims[axis2], s_dims[axis1]
self.watcher.shadow(rval, s_self.dimshuffle(*s_dims))
elif func.__name__ == 'astype':
rval = func(*args, **kwargs)
if 'dtype' in kwargs:
dtype = kwargs['dtype']
else:
dtype = args[0]
if not isinstance(dtype, str):
# catch numpy dtype objects like np.float32
try:
dtype = dtype.__name__
except:
raise NotImplementedError
if dtype == 'bool':
dtype == 'int8'
self.watcher.shadow(rval, s_self.astype(dtype))
elif func.__name__ == 'sort':
# sort is an inplace method
rval = func() # returns None
# shadow the original array; it has been updated inplace
self.watcher.shadow(func.__self__, s_self.sort())
else:
try:
theano_fn = getattr(s_self, func.__name__)
except:
raise NotImplementedError(func)
rval = func(*args, **kwargs)
self.watcher.shadow(rval, theano_fn(*s_args, **s_kwargs))
# ================ built-ins
elif 'built-in' in str(func):
if len(args) == len(kwargs) == 0:
rval = func()
# -- built-in ndarray methods should be caught above, not here.
elif func.__name__ in ('setdefault',):
rval = func(*args, **kwargs)
elif func.__name__ in ('enumerate', 'range', 'xrange', 'zip'):
rval = func(*args, **kwargs)
elif 'method rand of mtrand.RandomState' in str(func):
# build Theano random uniform numbers
rval = func(*args, **kwargs)
self.watcher.shadow(
rval,
global_randomstreams.uniform(
low=0,
high=1,
size=tuple(args),
dtype=str(np.asarray(rval).dtype)))
elif ('method random of mtrand.RandomState' in str(func)
or 'method random_sample of mtrand.RandomState'
in str(func)):
# build Theano random uniform numbers
rval = func(*args, **kwargs)
self.watcher.shadow(
rval,
global_randomstreams.uniform(
low=0,
high=1,
size=autodiff.utils.as_seq(args[0], tuple),
dtype=str(np.asarray(rval).dtype)))
elif 'method uniform of mtrand.RandomState' in str(func):
# build Theano random normal numbers
rval = func(*args, **kwargs)
self.watcher.shadow(
rval,
global_randomstreams.uniform(
*args,
dtype=str(np.asarray(rval).dtype),
**kwargs))
else:
raise NotImplementedError(func)
# ================ Types
elif type(func) == type:
rval = func(*args, **kwargs)
# ================ AutoDiff Functions
elif func is autodiff.functions.constant:
# make sure the rval will have a vaild id, then add it to the
# Context's constants set (so it can be ignored)
rval = func(*args, **kwargs)
if isinstance(rval, int):
rval = np.int_(rval)
elif isinstance(rval, float):
rval = np.float_(rval)
elif isinstance(rval, bool):
rval = np.bool_(rval)
else:
rval = np.asarray(rval)
self.watcher.constants.add(id(rval))
elif func is autodiff.functions.tag:
# make sure the rval is shadowed, then add a new svar with the
# appropriate tag
rval = func(*args, **kwargs)
tag = kwargs.pop('tag', args[1])
sval = self.ensure_shadow(rval)
self.watcher.svars[tag] = sval
# ================ Everything Else
else:
logger.debug('stepping into %s' % str(func))
vm = FrameVM(self.watcher, func)
rval = vm.call(tuple(args), kwargs)
self.push(rval)
def op_CALL_FUNCTION_VAR(self, i, op, arg):
call_vargs = self.pop()
return self.op_CALL_FUNCTION(i, op, arg, call_vargs=call_vargs)
def op_CALL_FUNCTION_VAR_KW(self, i, op, arg):
call_vargs, call_kwargs = self.popN(2)
rval = self.op_CALL_FUNCTION(i,
op,
arg,
call_vargs=call_vargs,
call_kwargs=call_kwargs)
return rval
def op_COMPARE_OP(self, i, op, arg):
opname = opcode.cmp_op[arg]
right = self.pop()
left = self.pop()
if 0:
pass
elif opname == '==':
self.push(left == right)
elif opname == '!=':
self.push(left != right)
elif opname == '>':
self.push(left > right)
elif opname == '<':
self.push(left < right)
elif opname == '>=':
self.push(left >= right)
elif opname == '<=':
self.push(left <= right)
elif opname == 'is':
self.push(left is right)
elif opname == 'in':
self.push(left in right)
else:
raise NotImplementedError('comparison: %s' % opname)
if any(id(a) in self.watcher for a in [left, right]):
sargs = [self.watcher.getvar(ai) for ai in [left, right]]
tos = self.stack[-1]
if 0:
pass
elif opname == '==':
self.watcher.shadow(tos, theano.tensor.eq(*sargs))
elif opname == '!=':
self.watcher.shadow(tos, theano.tensor.neq(*sargs))
elif opname == '<':
self.watcher.shadow(tos, theano.tensor.lt(*sargs))
elif opname == '>':
self.watcher.shadow(tos, theano.tensor.gt(*sargs))
elif opname == '<=':
self.watcher.shadow(tos, theano.tensor.le(*sargs))
elif opname == '>=':
self.watcher.shadow(tos, theano.tensor.ge(*sargs))
elif opname == 'is':
pass
else:
raise NotImplementedError('Comparison on watched args',
opname)
def op_DUP_TOP(self, i, op, arg):
self.stack.append(self.stack[-1])
def op_DUP_TOPX(self, i, op, arg):
assert arg > 0
self.stack.extend(self.stack[-arg:])
def op_FOR_ITER(self, i, op, arg):
# either push tos.next()
# or pop tos and send (arg)
tos = self.stack[-1]
try:
next = tos.next()
# print 'next', next
self.push(next)
except StopIteration:
self.pop()
return ('rel', arg)
def op_INPLACE_ADD(self, i, op, arg):
tos = self.pop()
tos1 = self.pop()
r = tos1
r += tos
self.push(r)
if (id(tos) in self.watcher or id(tos1) in self.watcher):
s_tos = self.ensure_shadow(tos)
s_tos1 = self.ensure_shadow(tos1)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s_tos + s_tos1).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s_tos + s_tos1)
def op_INPLACE_DIVIDE(self, i, op, arg):
tos = self.pop()
tos1 = self.pop()
r = tos1
r /= tos
self.push(r)
if (id(tos) in self.watcher or id(tos1) in self.watcher):
s_tos = self.ensure_shadow(tos)
s_tos1 = self.ensure_shadow(tos1)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s_tos / s_tos1).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s_tos / s_tos1)
def op_INPLACE_MULTIPLY(self, i, op, arg):
tos = self.pop()
tos1 = self.pop()
r = tos1
r *= tos
self.push(r)
if (id(tos) in self.watcher or id(tos1) in self.watcher):
s_tos = self.ensure_shadow(tos)
s_tos1 = self.ensure_shadow(tos1)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s_tos * s_tos1).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s_tos * s_tos1)
def op_INPLACE_SUBTRACT(self, i, op, arg):
tos1, tos = self.popN(2)
r = tos1
r -= tos
self.push(r)
if (id(tos) in self.watcher or id(tos1) in self.watcher):
s_tos = self.ensure_shadow(tos)
s_tos1 = self.ensure_shadow(tos1)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s_tos - s_tos1).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s_tos - s_tos1)
def op_JUMP_ABSOLUTE(self, i, op, arg):
# print 'sending', arg
return ('abs', arg)
def op_JUMP_FORWARD(self, i, op, arg):
return ('rel', arg)
def op_JUMP_IF_TRUE(self, i, op, arg):
tos = self.stack[-1]
if tos:
return ('rel', arg)
def op_GET_ITER(self, i, op, arg):
# replace tos -> iter(tos)
tos = self.stack[-1]
if id(tos) in self.watcher:
raise NotImplementedError('iterator of watched value')
self.stack[-1] = iter(tos)
def op_LOAD_GLOBAL(self, i, op, arg):
# print 'LOAD_GLOBAL', self.names[arg]
tos = self._myglobals[self.func.func_code.co_names[arg]]
if type(tos) is int and -5 <= tos <= 256:
tos = np.int_(tos)
self.push(tos)
if id(tos) not in self.watcher:
self.add_shadow(self.stack[-1])
def op_LOAD_ATTR(self, i, op, arg):
# print 'LOAD_ATTR', self.names[arg]
attr = self.func.func_code.co_names[arg]
#
# we would like to do
# self.stack[-1] = getattr(TOS, attr)
#
# *EXCEPT* if attr is a property, then it actually represents a
# function call
tos = self.pop()
if isinstance(tos, np.ndarray):
if id(tos) not in self.watcher:
raise NotImplementedError(
'how did this var get here?', (id(tos), tos))
if id(tos) in self.watcher:
s_tos = self.watcher.svars[id(tos)]
if attr == 'shape':
rval = tos.shape
# note this old comment... what does it mean?
# XXX: NOT TRACKING SHAPE CHANGES BECAUSE
# BAD INTERACTION WITH fbncc.__theano_op__
self.watcher.shadow(rval, s_tos.shape)
elif attr == 'T':
rval = tos.T
self.watcher.shadow(rval, s_tos.T)
elif attr == 'imag':
rval = tos.imag
self.watcher.shadow(rval, s_tos.imag)
else:
try:
rval = getattr(tos, attr)
except:
raise NotImplementedError('ndarray attribute %s' % attr)
self.push(rval)
else:
logger.debug('attribute access %s' % attr)
rval = getattr(tos, attr)
self.push(rval)
# if (isinstance(rval, np.ndarray)
# and id(rval) not in self.watcher):
# self.add_shadow(rval)
if id(rval) not in self.watcher:
self.add_shadow(rval)
def op_LOAD_CONST(self, i, op, arg):
tos = self.func.func_code.co_consts[arg]
if type(tos) is int and -5 <= tos <= 256:
tos = np.int_(tos)
self.push(tos)
# if isinstance(tos, float):
# if id(tos) not in self.watcher:
# var = theano.tensor.as_tensor_variable(tos)
# self.watcher.svars[id(tos)] = var
if (isinstance(tos, np.ndarray) and id(tos) not in self.watcher):
raise NotImplementedError()
def op_LOAD_CLOSURE(self, i, op, arg):
co_cellvars = self.func.func_code.co_cellvars
co_freevars = self.func.func_code.co_freevars
co_varnames = self.func.func_code.co_varnames
if arg < len(co_cellvars):
name = co_cellvars[arg]
else:
name = co_freevars[arg - len(co_cellvars)]
thing = self._locals[co_varnames.index(name)]
cell = cellmake(thing)
self.push(cell)
def op_LOAD_DEREF(self, i, op, arg):
# -- this is called to access a variable that appears in multiple
# scopes.
# -- vars *referenced* by nested scopes
co_cellvars = self.func.func_code.co_cellvars
# -- vars read from enclosing scopes
co_freevars = self.func.func_code.co_freevars
# -- all varnames
co_varnames = self.func.func_code.co_varnames
if arg < len(co_cellvars):
# -- normal case
name = co_cellvars[arg]
# -- XXX: Is this really the right thing to do??
thing = self._locals[co_varnames.index(name)]
else:
name = co_freevars[arg - len(co_cellvars)]
closure = self.func.func_closure
assert len(co_freevars) == len(closure)
# print 'LOAD_DEREF (%s:%s)' % (self.func, name)
cell = closure[arg - len(co_cellvars)]
thing = cellget(cell)
self.push(thing)
# if (isinstance(thing, np.ndarray) and id(thing) not in self.watcher):
# self.add_shadow(thing)
if id(thing) not in self.watcher:
self.add_shadow(thing)
def op_LOAD_FAST(self, i, op, arg):
tos = self._locals[arg]
try:
self.push(tos)
except LoadUnassigned:
raise LoadUnassigned(self.func.func_code.co_varnames[arg])
if not isinstance(tos, (int, float)):
if id(tos) not in self.watcher:
self.add_shadow(tos)
def op_MAKE_CLOSURE(self, i, op, arg):
return self.op_MAKE_FUNCTION(i, op, arg, w_closure=True)
def op_MAKE_FUNCTION(self, i, op, arg, w_closure=False):
func_code = self.pop()
if w_closure:
cells = self.pop()
if arg:
argdefs = tuple(self.stack[-arg:])
self.stack[-arg:] = []
else:
argdefs = ()
if w_closure:
fn = types.FunctionType(func_code,
self.func.func_globals,
argdefs=argdefs,
closure=cells,)
else:
fn = types.FunctionType(func_code,
self.func.func_globals,
argdefs=argdefs)
self.push(fn)
def op_POP_BLOCK(self, i, op, arg):
logger.debug('POP_BLOCK, what to do?')
pass
def op_POP_JUMP_IF_FALSE(self, i, op, arg):
#tos = self.stack[-1]
tos = self.pop()
if not tos:
return ('abs', arg)
def op_POP_JUMP_IF_TRUE(self, i, op, arg):
#tos = self.stack[-1]
tos = self.pop()
if tos:
return ('abs', arg)
def op_POP_TOP(self, i, op, arg):
self.pop()
def op_PRINT_ITEM(self, i, op, arg):
thing = self.pop()
if str(thing) == 'PRINT_OPS:True':
self.print_ops = True
if str(thing) == 'PRINT_STACK:True':
self.print_stack = True
print thing,
def op_PRINT_NEWLINE(self, i, op, arg):
print ''
def op_SETUP_LOOP(self, i, op, arg):
logger.debug('SETUP_LOOP, what to do?')
pass
def op_SLICE_PLUS_0(self, i, op, arg):
#Implements TOS = TOS[:].
TOS = self.pop()
new_tos = TOS[:]
self.push(new_tos)
if id(TOS) in self.watcher:
s = self.watcher.getvar(TOS)
s_rval = s[:]
self.watcher.shadow(new_tos, s_rval)
def op_SLICE_PLUS_1(self, i, op, arg):
# TOS = TOS1[TOS:]
TOS1, TOS = self.popN(2)
new_tos = TOS1[TOS:]
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1]):
s = self.watcher.getvar(TOS)
s1 = self.watcher.getvar(TOS1)
s_rval = s1[s:]
self.watcher.shadow(new_tos, s_rval)
def op_SLICE_PLUS_2(self, i, op, arg):
# TOS = TOS1[:TOS]
TOS1, TOS = self.popN(2)
new_tos = TOS1[:TOS]
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1]):
s = self.watcher.getvar(TOS)
s1 = self.watcher.getvar(TOS1)
s_rval = s1[:s]
self.watcher.shadow(new_tos, s_rval)
def op_SLICE_PLUS_3(self, i, op, arg):
# Implements TOS = TOS2[TOS1:TOS]
TOS2, TOS1, TOS = self.popN(3)
new_tos = TOS2[TOS1:TOS]
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1, TOS2]):
s = self.watcher.getvar(TOS)
s1 = self.watcher.getvar(TOS1)
s2 = self.watcher.getvar(TOS2)
s_rval = s2[s1:s]
self.watcher.shadow(new_tos, s_rval)
def op_STORE_ATTR(self, i, op, arg):
# implements TOS.name = TOS1
TOS1, TOS = self.popN(2)
if TOS in self.watcher:
raise NotImplementedError()
name = self.func.func_code.co_names[arg]
setattr(TOS, name, TOS1)
def op_STORE_SLICE_PLUS_0(self, i, op, arg):
#Implements TOS[:] = TOS1
TOS1, TOS = self.popN(2)
new_tos = TOS
new_tos[:] = TOS1
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1]):
s_tos = self.watcher.getvar(TOS)
s_tos1 = self.watcher.getvar(TOS1)
s_rval = theano.tensor.set_subtensor(s_tos[:], s_tos1)
self.watcher.shadow(new_tos, s_rval)
def op_STORE_SLICE_PLUS_1(self, i, op, arg):
TOS2, TOS1, TOS = self.popN(3)
new_tos = TOS1
new_tos[TOS:] = TOS2
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1, TOS2]):
s_tos = self.watcher.getvar(TOS)
s_tos1 = self.watcher.getvar(TOS1)
s_tos2 = self.watcher.getvar(TOS2)
s_rval = theano.tensor.set_subtensor(s_tos1[s_tos:], s_tos2)
self.watcher.shadow(new_tos, s_rval)
def op_STORE_SLICE_PLUS_2(self, i, op, arg):
# TOS1[:TOS] = TOS2
TOS2, TOS1, TOS = self.popN(3)
new_tos = TOS1
new_tos[:TOS] = TOS2
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1, TOS2]):
s_tos = self.watcher.getvar(TOS)
s_tos1 = self.watcher.getvar(TOS1)
s_tos2 = self.watcher.getvar(TOS2)
s_rval = theano.tensor.set_subtensor(s_tos1[:s_tos], s_tos2)
self.watcher.shadow(new_tos, s_rval)
def op_STORE_SLICE_PLUS_3(self, i, op, arg):
# Implements TOS2[TOS1:TOS] = TOS3
TOS3, TOS2, TOS1, TOS = self.popN(4)
new_tos = TOS2
new_tos[TOS1:TOS] = TOS3
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1, TOS2, TOS3]):
s_tos = self.watcher.getvar(TOS)
s_tos1 = self.watcher.getvar(TOS1)
s_tos2 = self.watcher.getvar(TOS2)
s_tos3 = self.watcher.getvar(TOS3)
s_rval = theano.tensor.set_subtensor(s_tos2[s_tos1:s_tos], s_tos3)
self.watcher.shadow(new_tos, s_rval)
def op_STORE_FAST(self, i, op, arg):
self._locals[arg] = self.pop()
def op_STORE_MAP(self, i, op, arg):
key = self.pop()
val = self.pop()
dct = self.stack[-1]
dct[key] = val
def op_STORE_SUBSCR(self, i, op, arg):
# Implements TOS1[TOS] = TOS2.
tos = self.pop()
tos1 = self.pop()
tos2 = self.pop()
tos1[tos] = tos2
# tos can't be real-valued so there's no gradient through it
if id(tos1) in self.watcher or id(tos2) in self.watcher:
s_tos1 = self.ensure_shadow(tos1)
s_tos2 = self.ensure_shadow(tos2)
new_s_tos1 = theano.tensor.set_subtensor(s_tos1[tos], s_tos2)
self.watcher.svars[id(tos1)] = new_s_tos1
def op_RAISE_VARARGS(self, i, op, arg):
print >> sys.stderr, "Exception in autodiff.Context:"
if 1 <= arg:
exc = self.pop()
else:
exc = None
if 2 <= arg:
param = self.pop()
else:
param = None
if 3 <= arg:
tb = self.pop()
traceback.print_tb(tb, file=sys.stderr)
else:
print >> sys.stderr, "No traceback info available"
if param is not None:
raise param
elif exc is not None:
raise exc()
else:
raise Exception('Completely mysterious exception')
def op_RETURN_VALUE(self, i, op, arg):
self.rval = self.pop()
if id(self.rval) not in self.watcher:
self.add_shadow(self.rval)
def op_ROT_TWO(self, i, op, arg):
a = self.stack[-1]
b = self.stack[-2]
self.stack[-1] = b
self.stack[-2] = a
def op_ROT_THREE(self, i, op, arg):
a = self.stack[-1]
b = self.stack[-2]
c = self.stack[-3]
self.stack[-1] = b
self.stack[-2] = c
self.stack[-3] = a
def op_ROT_FOUR(self, i, op, arg):
a = self.stack[-1]
b = self.stack[-2]
c = self.stack[-3]
d = self.stack[-4]
self.stack[-1] = b
self.stack[-2] = c
self.stack[-3] = d
self.stack[-4] = a
def op_UNARY_NEGATIVE(self, i, op, arg):
arg1 = self.pop()
assert not hasattr(arg1, 'type')
r = -arg1
self.push(r)
if id(arg1) in self.watcher:
s1 = self.ensure_shadow(arg1)
self.watcher.shadow(r, -s1)
def op_UNPACK_SEQUENCE(self, i, op, arg):
tos = self.pop()
self.stack.extend(tos[::-1])
class Context(object):
def __init__(self, device=None, borrowable=(), force_floatX=False):
"""
borrowable : tuple of objects
If an object in this tuple is encountered while tracing the
function, then its symbolic representation will alias that object's
memory location. This means that *inplace* operations on the Python
(likely NumPy) object will affect the symbolic function.
force_floatX : bool
If True, floats and float NumPy ndarrays will be cast to the dtype
specified at theano.config.floatX when forming symbolic shared
variables, if they do not have it already. Objects in `borrowable`
are never cast.
"""
self.svars = {}
self.nogc = [] # ids that must not be reused
# XXX: rethink to avoid actually holding on to all these intermediates.
self.device = device
self.borrowable_ids = [id(b) for b in borrowable]
self.force_floatX = force_floatX
self.constants = set()
def __iter__(self):
return self.svars.__iter__()
def shadow(self, rval, sval, force=True):
assert hasattr(sval, 'type') # assert sval is Theano variable
if force:
self.svars[id(rval)] = sval
else:
self.svars.setdefault(id(rval), sval)
# -- shadow vars have to match dtype and ndim
if isinstance(rval, np.ndarray):
if str(rval.dtype) == 'bool':
assert sval.dtype == 'int8', (rval.dtype, sval.dtype)
elif not self.force_floatX:
assert str(rval.dtype) == sval.dtype, (rval, sval)
assert rval.ndim == sval.ndim, (rval, sval)
# -- assert postcondition
assert sval is self.getvar(rval)
self.nogc.append(rval)
def call(self, fn, args=(), kwargs={}):
vm = FrameVM(self, fn)
return vm.call(args, kwargs)
def shared(self, obj, name=None, borrow=None):
if borrow is None:
borrow = (id(obj) in self.borrowable_ids)
if self.force_floatX and not borrow:
if (isinstance(obj, np.ndarray)
and 'float' in str(obj.dtype)
and str(obj.dtype) != theano.config.floatX):
obj = obj.astype(theano.config.floatX)
# not all objects have shared constructors with a borrow keyword
# for example theano.shared(np.float32(1)) works but
# theano.shared(np.float32(1), borrow=[False|True]) fails
if self.device == 'cpu':
try:
return theano.tensor._shared(obj, borrow=borrow)
except:
return theano.tensor._shared(obj)
else:
try:
return theano.shared(obj, borrow=borrow)
except:
return theano.shared(obj)
def getvar(self, var):
return self.svars.get(id(var), var)
def reset(self):
self.constants.clear()
|
|
'''
If this is running then python is obviously installed, but we need to make sure that python3 is installed.
What we need to do:
0. (optional) Check disk space
0.1: The same env checks in run.py?
1: Make sure this is python 3.5+
1.1: If we installed python 3.5, restart using that
2. Check for and install required programs:
- brew (osx)
- git
- ffmpeg
- libopus (non windows)
- libffi (non windows)
- libsodium (sometimes needed, might need to check after pynacl)
- a compiler
3: Ensure pip and update pip
4: Git clone and clean out non arch related stuff (run scripts, bins/ on non-windows)
5: Install requirements.txt packages (if everything is ok then no errors should occur)
6. Copy configs and prompt the user for something (maybe)
The OSX specific steps might be a bit different so we just need to pay special attention to those steps
Remember to make sure the user knows the script might prompt for password
Print the command beforehand just so they know whats happening
When the script runs the user should be greeted with some text and a press [enter/whatever] to continue prompt
'''
from __future__ import print_function
import os
import re
import shutil
import sys
import logging
import platform
import tempfile
import traceback
import subprocess
import argparse
from glob import glob
from shutil import rmtree
from textwrap import dedent
try:
from urllib.request import urlopen, Request, urlretrieve
except ImportError:
# Use urllib2 for Python 2.
# noinspection PyUnresolvedReferences
from urllib2 import urlopen, Request
from urllib import urlretrieve
# Arguments
ap = argparse.ArgumentParser()
ap.add_argument('--dir', help='the name of the directory to install to (default: MusicBot)')
args = ap.parse_args()
# Logging setup goes here
PY_VERSION = sys.version_info # (3, 5, 1, ...)
SYS_PLATFORM = sys.platform # 'win32', 'linux', 'darwin'
SYS_UNAME = platform.uname()
SYS_ARCH = ('32', '64')[SYS_UNAME[4].endswith('64')]
SYS_PKGMANAGER = None # TODO: Figure this out
PLATFORMS = ['win32', 'linux', 'darwin', 'linux2']
MINIMUM_PY_VERSION = (3, 5)
TARGET_PY_VERSION = "3.5.2"
if SYS_PLATFORM not in PLATFORMS:
raise RuntimeError('Unsupported system "%s"' % SYS_PLATFORM)
if SYS_PLATFORM == 'linux2':
SYS_PLATFORM = 'linux'
TEMP_DIR = tempfile.TemporaryDirectory(prefix='musicbot-')
try:
PY_BUILD_DIR = os.path.join(TEMP_DIR, "Python-%s" % TARGET_PY_VERSION)
except TypeError: # expected str, bytes or os.PathLike object, not TemporaryDirectory
PY_BUILD_DIR = os.path.join(TEMP_DIR.name, "Python-%s" % TARGET_PY_VERSION)
INSTALL_DIR = args.dir if args.dir is not None else 'MusicBot'
GET_PIP = "https://bootstrap.pypa.io/get-pip.py"
# python2 compat bollocks
if PY_VERSION >= (3,):
raw_input = input
def read_from_urllib(r):
# Reads data from urllib in a version-independant way.
if PY_VERSION[0] == 2:
return r.read()
else:
return r.read().decode("utf-8")
def sudo_check_output(args, **kwargs):
if not isinstance(args, (list, tuple)):
args = args.split()
return subprocess.check_output(('sudo',) + tuple(args), **kwargs)
def sudo_check_call(args, **kwargs):
if not isinstance(args, (list, tuple)):
args = args.split()
return subprocess.check_call(('sudo',) + tuple(args), **kwargs)
def tmpdownload(url, name=None, subdir=''):
if name is None:
name = os.path.basename(url)
_name = os.path.join(TEMP_DIR.name, subdir, name)
return urlretrieve(url, _name)
def find_library(libname):
if SYS_PLATFORM == 'win32': return
# TODO: This
def yes_no(question):
while True: # spooky
ri = raw_input('{} (y/n): '.format(question))
if ri.lower() in ['yes', 'y']: return True
elif ri.lower() in ['no', 'n']: return False
"""
Finding lib dev headers:
1. Get include dirs and search for headers
"echo | gcc -xc++ -E -v -" and parse for include dirs
linux subprocess.check_output("find /usr[/local]/include -iname 'ffi.h'", shell=True) (find /usr/include /usr/local/include ...?)
2. Have gcc deal with it and check the error output
gcc -lffi (Fail: cannot find -lffi) vs (Success: ... undefined reference to `main')
"""
###############################################################################
class SetupTask(object):
def __getattribute__(self, item):
try:
# Check for platform variant of function first
return object.__getattribute__(self, item + '_' + SYS_PLATFORM)
except:
pass
if item.endswith('_dist'):
try:
# check for dist aliases, ex: setup_dist -> setup_win32
return object.__getattribute__(self, item.rsplit('_', 1)[0] + '_' + SYS_PLATFORM)
except:
try:
# If there's no dist variant, try to fallback to the generic, ex: setup_dist -> setup
return object.__getattribute__(self, item.rsplit('_', 1)[0])
except:
pass
return object.__getattribute__(self, item)
@classmethod
def run(cls):
self = cls()
if not self.check():
self.setup(self.download())
def check(self):
"""
Check to see if the component exists and works
"""
pass
def download(self):
"""
Download the component
"""
pass
def setup(self, data):
"""
Install the componenet and any other required tasks
"""
pass
###############################################################################
class EnsurePython(SetupTask):
PYTHON_BASE = "https://www.python.org/ftp/python/{ver}/"
# For some reason only the tgz's have a capital P
PYTHON_TGZ = PYTHON_BASE + "Python-{ver}.tgz"
PYTHON_EXE = PYTHON_BASE + "python-{ver}.exe"
PYTHON_PKG = PYTHON_BASE + "python-{ver}-macosx10.6.pkg"
def check(self):
if PY_VERSION >= MINIMUM_PY_VERSION:
return True
# TODO: Check for python 3.5 and restart if found
def download_win32(self):
exe, _ = tmpdownload(self.PYTHON_EXE.format(ver=TARGET_PY_VERSION))
return exe
def setup_win32(self, data):
# https://docs.python.org/3/using/windows.html#installing-without-ui
args = {
'PrependPath': '1',
'InstallLauncherAllUsers': '0',
'Include_test': '0'
}
command = [data, '/quiet'] + ['='.join(x) for x in args.items()]
subprocess.check_call(command)
self._restart(None)
def download_linux(self):
tgz, _ = tmpdownload(self.PYTHON_TGZ.format(ver=TARGET_PY_VERSION))
return tgz
def setup_linux(self, data):
# tar -xf data
# do build process
if os.path.exists(PY_BUILD_DIR):
try:
shutil.rmtree(PY_BUILD_DIR)
except OSError:
sudo_check_call("rm -rf %s" % PY_BUILD_DIR)
subprocess.check_output("tar -xf {} -C {}".format(data, TEMP_DIR.name).split())
olddir = os.getcwd()
# chdir into it
os.chdir(PY_BUILD_DIR)
# Configure and make.
subprocess.check_call('./configure --enable-ipv6 --enable-shared --with-system-ffi --without-ensurepip'.split())
subprocess.check_call('make')
sudo_check_call("make install")
# Change back.
os.chdir(olddir)
executable = "python{}".format(TARGET_PY_VERSION[0:3])
self._restart(None)
# TODO: Move to _restart
# Restart into the new executable.
print("Rebooting into Python {}...".format(TARGET_PY_VERSION))
# Use os.execl to switch program
os.execl("/usr/local/bin/{}".format(executable), "{}".format(executable), __file__)
def download_darwin(self):
pkg, _ = tmpdownload(self.PYTHON_PKG.format(ver=TARGET_PY_VERSION))
return pkg
def setup_darwin(self, data):
subprocess.check_call(data.split()) # I hope this works?
self._restart(None)
def _restart(self, *cmds):
# TODO: os.execl
pass # Restart with 3.5 if needed
class EnsureEnv(SetupTask):
pass # basically the important checks from run.py, not sure exactly what I need to check though
class EnsureBrew(SetupTask):
def check(self):
if SYS_PLATFORM == 'darwin':
try:
subprocess.check_output(['brew'])
except FileNotFoundError:
return False
except subprocess.CalledProcessError:
pass
return True
def download(self):
cmd = '/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"'
subprocess.check_call(cmd, shell=True)
def setup(self, data):
subprocess.check_call('brew update'.split())
class EnsureGit(SetupTask):
WIN_OPTS = dedent("""
[Setup]
Lang=default
Group=Git
NoIcons=0
SetupType=default
Components=ext,ext\shellhere,assoc
Tasks=
PathOption=Cmd
SSHOption=OpenSSH
CRLFOption=CRLFAlways
BashTerminalOption=MinTTY
PerformanceTweaksFSCache=Enabled
""")
def check(self):
try:
subprocess.check_output(['git', '--version'])
except FileNotFoundError:
return False
return True
@staticmethod
def _get_latest_win_git_version():
version = ('2.10.1', 'v2.10.1.windows.1')
try:
url = "https://github.com/git-for-windows/git/releases/latest"
req = Request(url, method='HEAD')
with urlopen(req) as resp:
full_ver = os.path.basename(resp.url)
match = re.match(r'v(\d+\.\d+\.\d+)', full_ver)
return match.groups()[0], full_ver
except:
return version
@classmethod
def _get_latest_win_get_download(cls):
dist_ver, full_ver = cls._get_latest_win_git_version()
url = "https://github.com/git-for-windows/git/releases/download/{fullver}/Git-{ver}-{arch}-bit.exe"
return url.format(full_ver=full_ver, ver=dist_ver, arch=SYS_ARCH)
def download_win32(self):
result, _ = tmpdownload(self._get_latest_win_git_version(), 'git-setup.exe')
return result
def setup_win32(self, data):
with tempfile.NamedTemporaryFile('w+', encoding='utf8') as f:
f.file.write(self.WIN_OPTS)
f.file.flush()
args = [
data,
'/SILENT',
'/NORESTART',
'/NOCANCEL',
'/SP-',
'/LOG',
'/SUPPRESSMSGBOXES',
'/LOADINF="%s"' % f.name
]
subprocess.check_call(args)
def download_linux(self):
pass # need package manager abstraction
# def setup_linux(self, data):
# pass # nothing really needed, I don't think setting any git options is necessary
def download_darwin(self):
subprocess.check_call('brew install git'.split())
# def setup_darwin(self, data):
# pass # same as linux, probably can just delete these stubs
class EnsureFFmpeg(SetupTask):
AVCONV_CHECK = b"Please use avconv instead"
def check_win32(self):
return True # ffmpeg comes with the bot
def check(self):
try:
data = subprocess.check_output(['ffmpeg', '-version'], stderr=subprocess.STDOUT)
except FileNotFoundError:
return False
else:
return self.AVCONV_CHECK not in data
def download_linux(self):
# check if ubuntu, add ppa's, install
# otherwise check for other repo variants
# if all else fails: https://trac.ffmpeg.org/wiki/CompilationGuide
pass
def setup_linux(self, data):
pass
def download_darwin(self):
subprocess.check_call('brew install ffmpeg'.split())
class EnsureOpus(SetupTask):
"""
Locate libopus.so.0 or whatever it'd be called (maybe ctypes.find_library)
"""
def check_win32(self):
return True # opus comes with the lib
def check(self):
pass
def download_linux(self):
pass
def setup_linux(self, data):
pass
def download_darwin(self):
pass
def setup_darwin(self, data):
pass
class EnsureFFI(SetupTask):
"""
see: find_library up above
"""
def check_win32(self):
return True # cffi has wheels
def check(self):
pass
def download_linux(self):
pass
def setup_linux(self, data):
pass
def download_darwin(self):
pass
def setup_darwin(self, data):
pass
class EnsureSodium(SetupTask):
# This one is going to be weird since sometimes its not needed (check python import)
def check_win32(self):
return True
class EnsureCompiler(SetupTask):
# oh god
def check_win32(self):
return True # yay wheels
class EnsurePip(SetupTask):
def check(self):
# Check if pip is installed by importing it.
try:
import pip
except ImportError:
return False
else:
return True
def download(self):
# Try and use ensurepip.
try:
import ensurepip
return False
except ImportError:
# Download `get-pip.py`.
# We hope we have urllib.request, otherwise we're sort of fucked.
f = tempfile.NamedTemporaryFile(delete=False)
f.close() # we only want the name
print("Downloading pip...")
urlretrieve(GET_PIP, f.name)
return f.name
def setup(self, data):
if not data:
# It's safe to use ensurepip.
print("Installing pip...")
try:
import ensurepip
ensurepip.bootstrap()
except PermissionError:
# panic and try and sudo it
sudo_check_call("python3.5 -m ensurepip")
return
# Instead, we have to run get-pip.py.
print("Installing pip...")
try:
sudo_check_call(["python3.5", "{}".format(data)])
except FileNotFoundError:
subprocess.check_call(["python3.5", "{}".format(data)])
class GitCloneMusicbot(SetupTask):
GIT_URL = "https://github.com/Just-Some-Bots/MusicBot.git"
GIT_CMD = "git clone --depth 10 --no-single-branch %s %s" % (GIT_URL, INSTALL_DIR)
def download(self):
print("Cloning files using Git...")
if os.path.isdir(INSTALL_DIR):
r = yes_no('A folder called %s already exists here. Overwrite?' % INSTALL_DIR)
if r is False:
print('Exiting. Use the --dir parameter when running this script to specify a different folder.')
sys.exit(1)
else:
os.rmdir(INSTALL_DIR)
subprocess.check_call(self.GIT_CMD.split())
def setup(self, data):
os.chdir(INSTALL_DIR)
import pip
pip.main("install --upgrade -r requirements.txt".split())
class SetupMusicbot(SetupTask):
def _rm(self, f):
try:
return os.unlink(f)
except:
pass
def _rm_glob(self, p):
fs = glob(p)
for f in fs:
self._rm(f)
def _rm_dir(self, d):
return rmtree(d, ignore_errors=True)
def download(self): # lazy way to call a function on all platforms
self._rm('.dockerignore')
self._rm('Dockerfile')
def setup_win32(self, data):
self._rm_glob('*.sh')
self._rm_glob('*.command')
def setup_linux(self, data):
self._rm_glob('*.bat')
self._rm_glob('*.command')
self._rm_dir('bin')
def setup_darwin(self, data):
self._rm_glob('*.bat')
self._rm_glob('*.sh')
self._rm_dir('bin')
###############################################################################
def preface():
print(" MusicBot Bootstrapper (v0.1) ".center(50, '#'))
print("This script will install the MusicBot into a folder called '%s' in your current directory." % INSTALL_DIR,
"\nDepending on your system and environment, several packages and dependencies will be installed.",
"\nTo ensure there are no issues, you should probably run this script as an administrator.")
print()
raw_input("Press enter to begin. ")
print()
def main():
preface()
print("Bootstrapping MusicBot on Python %s." % '.'.join(list(map(str, PY_VERSION))))
EnsurePython.run()
EnsureBrew.run()
EnsureGit.run()
EnsureFFmpeg.run()
EnsureOpus.run()
EnsureFFI.run()
EnsureSodium.run()
EnsureCompiler.run()
EnsurePip.run()
GitCloneMusicbot.run()
SetupMusicbot.run()
if __name__ == '__main__':
try:
main()
except SystemExit:
pass
except Exception:
traceback.print_exc()
# noinspection PyUnboundLocalVariable
raw_input("An error has occured, press enter to exit. ")
finally:
TEMP_DIR.cleanup()
|
|
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""An example functional test
The module-level docstring should include a high-level description of
what the test is doing. It's the first thing people see when they open
the file and should give the reader information about *what* the test
is testing and *how* it's being tested
"""
# Imports should be in PEP8 ordering (std library first, then third party
# libraries then local imports).
from collections import defaultdict
# Avoid wildcard * imports
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv
from test_framework.mininode import (
P2PInterface,
mininode_lock,
msg_block,
msg_getdata,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
wait_until,
)
# P2PInterface is a class containing callbacks to be executed when a P2P
# message is received from the node-under-test. Subclass P2PInterface and
# override the on_*() methods if you need custom behaviour.
class BaseNode(P2PInterface):
def __init__(self):
"""Initialize the P2PInterface
Used to initialize custom properties for the Node that aren't
included by default in the base class. Be aware that the P2PInterface
base class already stores a counter for each P2P message type and the
last received message of each type, which should be sufficient for the
needs of most tests.
Call super().__init__() first for standard initialization and then
initialize custom properties."""
super().__init__()
# Stores a dictionary of all blocks received
self.block_receive_map = defaultdict(int)
def on_block(self, message):
"""Override the standard on_block callback
Store the hash of a received block in the dictionary."""
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
def on_inv(self, message):
"""Override the standard on_inv callback"""
pass
def custom_function():
"""Do some custom behaviour
If this function is more generally useful for other tests, consider
moving it to a module in test_framework."""
# self.log.info("running custom_function") # Oops! Can't run self.log outside the BitcoinTestFramework
pass
class ExampleTest(BitcoinTestFramework):
# Each functional test is a subclass of the BitcoinTestFramework class.
# Override the set_test_params(), skip_test_if_missing_module(), add_options(), setup_chain(), setup_network()
# and setup_nodes() methods to customize the test setup as required.
def set_test_params(self):
"""Override test parameters for your individual test.
This method must be overridden and num_nodes must be explicitly set."""
self.setup_clean_chain = True
self.num_nodes = 3
# Use self.extra_args to change command-line arguments for the nodes
self.extra_args = [[], ["-logips"], []]
# self.log.info("I've finished set_test_params") # Oops! Can't run self.log before run_test()
# Use skip_test_if_missing_module() to skip the test if your test requires certain modules to be present.
# This test uses generate which requires wallet to be compiled
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
# Use add_options() to add specific command-line options for your test.
# In practice this is not used very much, since the tests are mostly written
# to be run in automated environments without command-line options.
# def add_options()
# pass
# Use setup_chain() to customize the node data directories. In practice
# this is not used very much since the default behaviour is almost always
# fine
# def setup_chain():
# pass
def setup_network(self):
"""Setup the test network topology
Often you won't need to override this, since the standard network topology
(linear: node0 <-> node1 <-> node2 <-> ...) is fine for most tests.
If you do override this method, remember to start the nodes, assign
them to self.nodes, connect them and then sync."""
self.setup_nodes()
# In this test, we're not connecting node2 to node0 or node1. Calls to
# sync_all() should not include node2, since we're not expecting it to
# sync.
connect_nodes(self.nodes[0], 1)
self.sync_all(self.nodes[0:2])
# Use setup_nodes() to customize the node start behaviour (for example if
# you don't want to start all nodes at the start of the test).
# def setup_nodes():
# pass
def custom_method(self):
"""Do some custom behaviour for this test
Define it in a method here because you're going to use it repeatedly.
If you think it's useful in general, consider moving it to the base
BitcoinTestFramework class so other tests can use it."""
self.log.info("Running custom_method")
def run_test(self):
"""Main test logic"""
# Create P2P connections will wait for a verack to make sure the connection is fully up
self.nodes[0].add_p2p_connection(BaseNode())
# Generating a block on one of the nodes will get us out of IBD
blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
self.sync_all(self.nodes[0:2])
# Notice above how we called an RPC by calling a method with the same
# name on the node object. Notice also how we used a keyword argument
# to specify a named RPC argument. Neither of those are defined on the
# node object. Instead there's some __getattr__() magic going on under
# the covers to dispatch unrecognised attribute calls to the RPC
# interface.
# Logs are nice. Do plenty of them. They can be used in place of comments for
# breaking the test into sub-sections.
self.log.info("Starting test!")
self.log.info("Calling a custom function")
custom_function()
self.log.info("Calling a custom method")
self.custom_method()
self.log.info("Create some blocks")
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
height = self.nodes[0].getblockcount()
for i in range(10):
# Use the mininode and blocktools functionality to manually build a block
# Calling the generate() rpc is easier, but this allows us to exactly
# control the blocks and transactions.
block = create_block(self.tip, create_coinbase(height+1), self.block_time)
block.solve()
block_message = msg_block(block)
# Send message is used to send a P2P message to the node over our P2PInterface
self.nodes[0].p2p.send_message(block_message)
self.tip = block.sha256
blocks.append(self.tip)
self.block_time += 1
height += 1
self.log.info("Wait for node1 to reach current tip (height 11) using RPC")
self.nodes[1].waitforblockheight(11)
self.log.info("Connect node2 and node1")
connect_nodes(self.nodes[1], 2)
self.log.info("Add P2P connection to node2")
self.nodes[0].disconnect_p2ps()
self.nodes[2].add_p2p_connection(BaseNode())
self.log.info("Wait for node2 reach current tip. Test that it has propagated all the blocks to us")
getdata_request = msg_getdata()
for block in blocks:
getdata_request.inv.append(CInv(2, block))
self.nodes[2].p2p.send_message(getdata_request)
# wait_until() will loop until a predicate condition is met. Use it to test properties of the
# P2PInterface objects.
wait_until(lambda: sorted(blocks) == sorted(list(self.nodes[2].p2p.block_receive_map.keys())), timeout=5, lock=mininode_lock)
self.log.info("Check that each block was received only once")
# The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving
# messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking
# and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
with mininode_lock:
for block in self.nodes[2].p2p.block_receive_map.values():
assert_equal(block, 1)
if __name__ == '__main__':
ExampleTest().main()
|
|
#!/usr/bin/env python
'''
Main AVClass class
'''
import sys
import re
import string
import logging
from collections import OrderedDict as OrdDict
from collections import namedtuple
from operator import itemgetter, attrgetter
# Set logging
log = logging.getLogger(__name__)
# Prefix to identify platform tags
platform_prefix = "FILE:os:"
# Default category for tags in taxonomy with no category
uncategorized_cat = "UNC"
SampleInfo = namedtuple('SampleInfo',
['md5', 'sha1', 'sha256', 'labels', 'vt_tags'])
# AVs to use in suffix removal
suffix_removal_av_set = {'Norman', 'Avast', 'Avira', 'Kaspersky',
'ESET-NOD32', 'Fortinet', 'Jiangmin', 'Comodo',
'GData', 'Avast', 'Sophos',
'TrendMicro-HouseCall', 'TrendMicro',
'NANO-Antivirus', 'Microsoft'}
class Tag:
''' A Tag in the taxonomy '''
def __init__(self, s):
word_list = s.strip().split(":")
if len(word_list) > 1:
self._name = word_list[-1].lower()
self._cat = word_list[0].upper()
self._prefix_l = [x.lower() for x in word_list[1:-1]]
path = self._cat
for x in self._prefix_l:
path = path + ':' + x
self._path = path + ':' + self._name
else:
self._name = word_list[0].lower()
self._cat = uncategorized_cat
self._prefix_l = []
self._path = self._name
def __hash__(self):
''' Return hash '''
return hash((self._path))
@property
def name(self):
''' Return tag name '''
return self._name
@property
def cat(self):
''' Return tag category '''
return self._cat
@property
def path(self):
''' Return tag path '''
return self._path
@property
def prefix_l(self):
''' Return tag prefix list '''
return self._prefix_l
class Taxonomy:
'''
A taxonomy of tags and generic tokens read from file
'''
def __init__(self, filepath):
''' Map tag.name | tag.path -> Tag '''
self._tags = set()
self._tag_map = {}
if filepath:
self.read_taxonomy(filepath)
def __len__(self):
''' Taxonomy length is the number of tags it contains '''
return len(self._tags)
def __iter__(self):
''' Iterator over the alphabetically sorted tags in the taxonomy '''
return (t for t in sorted(self._tags))
def is_generic(self, t):
''' Return true if input is generic, false otherwise '''
tag = self._tag_map.get(t, None)
if tag:
return tag.cat == "GEN"
else:
return False
def is_tag(self, t):
''' Return true if input is tag, false otherwise '''
return t in self._tag_map
def add_tag(self, s, override=False):
''' Add tag to taxonomy
If tag already exists with different path,
only replaces if override True '''
tag = Tag(s)
t = self._tag_map.get(tag.name, None)
if t and (t.path != tag.path):
if (not override):
return
else:
log.warning("[Taxonomy] Replacing %s with %s\n" % (
t.path, tag.path))
del self._tag_map[t.path]
log.debug("[Taxonomy] Adding tag %s" % s)
self._tags.add(tag)
self._tag_map[tag.name] = tag
self._tag_map[tag.path] = tag
return
def remove_tag(self, t):
''' Remove tag from taxonomy. Returns 1 if removed, zero if unknown '''
tag = self._tag_map.get(t, None)
if tag:
log.debug("[Taxonomy] Removing tag: %s" % tag.path)
del self._tag_map[tag.name]
del self._tag_map[tag.path]
self._tags.remove(tag)
return 1
else:
return 0
def get_category(self, t):
''' Return category of input tag, UNK if not a tag '''
tag = self._tag_map.get(t, None)
if tag:
return tag.cat
else:
return "UNK"
def get_path(self, t):
''' Return full path for given tag, or empty string if not a tag '''
tag = self._tag_map.get(t, None)
if tag:
return tag.path
else:
return ("UNK:" + t)
def get_prefix_l(self, t):
''' Return prefix list for given tag, or empty string if not a tag '''
tag = self._tag_map.get(t, None)
if tag:
return tag.prefix_l
else:
return []
def get_prefix(self, t):
''' Return prefix string for given tag,
or empty string if not a tag '''
tag = self._tag_map.get(t, None)
if tag:
return tag.prefix_l
else:
return t.path[0:t.path.rfind(':')]
def get_depth(self, t):
''' Return depth of tag in taxonomy.
Returns zero if tag not in taxonomy.
A normal tag CAT:name has depth two '''
tag = self._tag_map.get(t, None)
if tag:
return len(tag.prefix_l) + 2
else:
return 0
def get_info(self, t):
''' Return (path,category) for given tag, or UNK:t if not a tag '''
tag = self._tag_map.get(t, None)
if tag:
return tag.path, tag.cat
else:
return "UNK:" + t, "UNK"
def expand(self, t):
''' Return list of tags in prefix list that are leaves '''
tag = self._tag_map.get(t, None)
if tag:
return [t for t in tag.prefix_l if t in self._tag_map]
else:
return []
def platform_tags(self):
''' Returns list with platform tags in taxonomy '''
acc = set()
for idx,tag in self._tag_map.items():
if tag.path.startswith(platform_prefix):
acc.add(tag.name)
return acc
def overlaps(self, t1, t2):
''' Returns true if the path of the given tags overlaps '''
m1 = self.get_prefix_l(t1)
m2 = self.get_prefix_l(t2)
return (t1 in m2) or (t2 in m1)
def remove_overlaps(self, l):
''' Returns list with overlapping tags removed '''
if not l:
return l
pair_l = sorted([(self.get_depth(t),t) for t in l])
out_l = [pair_l.pop()[1]]
while pair_l:
t = pair_l.pop()[1]
if (not any(self.overlaps(t, e) for e in out_l)):
out_l.append(t)
return out_l
def read_taxonomy(self, filepath):
'''Read taxonomy from given file '''
with open(filepath, 'r') as fd:
for line in fd:
if line.startswith('#') or line == '\n':
continue
self.add_tag(line.strip())
return
def to_file(self, filepath):
''' Output sorted taxonomy to given file '''
# Open output file
fd = open(filepath, 'w')
# Write sorted tags
tag_l = sorted(self._tag_map.items(),
key=lambda item : item[1].path,
reverse=False)
idx = 0
for name,tag in tag_l:
if (idx % 2) == 0:
fd.write(tag.path+"\n")
idx+=1
# Close output file
fd.close()
class Rules:
'''
Rules are src -> dst1, dst2, ... relations
'''
def __init__(self, filepath):
''' Map src -> set(dst) '''
self._src_map = {}
if filepath:
self.read_rules(filepath)
def __len__(self):
''' Length is number of rules, i.e., number of src '''
return len(self._src_map)
def add_rule(self, src, dst_l, overwrite=False):
''' Add rule. If rule exists:
if overwrite==True, replace destination list
else append dst_l to current target set '''
# Remove src from dst_l if it exists
dst_l = filter(lambda x: x != src, dst_l)
# If no destinations, nothing to do
if (not dst_l):
return
log.debug("[Rules] Adding %s -> %s" % (src, dst_l))
src_tag = Tag(src)
if overwrite:
target_l = [Tag(dst).name for dst in dst_l]
self._src_map[src_tag.name] = set(target_l)
else:
curr_dst = self._src_map.get(src_tag.name, set())
for dst in dst_l:
dst_tag = Tag(dst)
curr_dst.add(dst_tag.name)
self._src_map[src_tag.name] = curr_dst
return
def remove_rule(self, src):
l = self._src_map.get(src, [])
if l:
log.debug("[Rules] Removing rule: %s -> %s" % (src, l))
del self._src_map[src]
return 1
else:
return 0
def get_dst(self, src):
''' Returns dst list for given src, or empty list if no expansion '''
return list(self._src_map.get(src, []))
def read_rules(self, filepath):
'''Read rules from given file'''
with open(filepath, 'r') as fd:
for line in fd:
if line.startswith('#') or line == '\n':
continue
word_list = line.strip().split()
if len(word_list) > 1:
self.add_rule(word_list[0],word_list[1:])
return
def to_file(self, filepath, taxonomy=None):
''' Output sorted rules to given file
If taxonomy is provided, it outputs full tag path '''
fd = open(filepath, 'w')
for src,dst_set in sorted(self._src_map.items()):
dst_l = sorted(dst_set, reverse=False)
if taxonomy:
src_path = taxonomy.get_path(src)
path_l = [taxonomy.get_path(t) for t in dst_l]
dst_str = '\t'.join(path_l)
fd.write("%s\t%s\n" % (src_path,dst_str))
else:
dst_str = '\t'.join(dst_l)
fd.write("%s\t%s\n" % (src,dst_str))
fd.close()
def expand_src_destinations(self, src):
''' Return destination list for given src after recursively
following any rules for destinations '''
dst_set = self._src_map.get(src, set())
out = set()
while dst_set:
dst = dst_set.pop()
l = self._src_map.get(dst, [])
if l:
for e in l:
if (e not in out) and (e != dst):
dst_set.add(e)
else:
out.add(dst)
return out
def expand_all_destinations(self):
''' Return destination list for given src after recursively
following any rules for destinations '''
src_l = self._src_map.keys()
for src in src_l:
dst_l = self.expand_src_destinations(src)
self._src_map[src] = dst_l
class Tagging(Rules):
'''
Tagging rules have src UNK and dst in taxonomy
'''
def __init__(self, filepath):
Rules.__init__(self, filepath)
def validate(self, taxonomy):
''' Check that tags in tagging rules are in given taxonomy '''
for tok,tag_l in self._src_map.items():
if taxonomy.is_tag(tok):
sys.stdout.write("[Tagging] SRC %s in taxonomy\n" % tok)
for t in tag_l:
if (not taxonomy.is_tag(t)):
sys.stdout.write("[Tagging] %s not in taxonomy\n" % t)
class Expansion(Rules):
'''
Expansion rules have src and dst in taxonomy and
src.category != dst.category
'''
def __init__(self, filepath):
Rules.__init__(self, filepath)
def validate(self, taxonomy):
''' Check that tags in expansion rules are in given taxonomy '''
for src,dst_set in self._src_map.items():
if (not taxonomy.is_tag(src)):
sys.stdout.write("[Expansion] %s not in taxonomy\n" % src)
for dst in dst_set:
if (not taxonomy.is_tag(dst)):
sys.stdout.write("[Expansion] %s not in taxonomy\n" % dst)
class AvLabels:
'''
Class to operate on AV labels,
such as extracting the most likely family name.
'''
def __init__(self, tag_file, exp_file = None, tax_file = None,
av_file = None, aliasdetect=False):
# Read taxonomy
self.taxonomy = Taxonomy(tax_file)
# Read tag rules
self.tagging = Tagging(tag_file)
# Read expansion rules
self.expansions = Expansion(exp_file)
# Read AV engines
self.avs = self.read_avs(av_file) if av_file else None
# Alias statistics initialization
self.aliasdetect = aliasdetect
@staticmethod
def read_avs(avs_file):
'''Read AV engine set from given file'''
with open(avs_file) as fd:
avs = set(map(str.strip, fd.readlines()))
return avs
@staticmethod
def get_sample_info_lb(vt_rep):
'''Parse and extract sample information from JSON line
Returns a SampleInfo named tuple
'''
return SampleInfo(vt_rep['md5'], vt_rep['sha1'], vt_rep['sha256'],
vt_rep['av_labels'], [])
@staticmethod
def get_sample_info_vt_v2(vt_rep):
'''Parse and extract sample information from JSON line
Returns a SampleInfo named tuple
'''
label_pairs = []
# Obtain scan results, if available
try:
scans = vt_rep['scans']
md5 = vt_rep['md5']
sha1 = vt_rep['sha1']
sha256 = vt_rep['sha256']
except KeyError:
return None
# Obtain labels from scan results
for av, res in scans.items():
if res['detected']:
label = res['result']
clean_label = ''.join(filter(
lambda x: x in string.printable,
label)).strip()
label_pairs.append((av, clean_label))
# Obtain VT tags, if available
vt_tags = vt_rep.get('tags', [])
return SampleInfo(md5, sha1, sha256, label_pairs, vt_tags)
@staticmethod
def get_sample_info_vt_v3(vt_rep):
'''Parse and extract sample information from JSON line
Returns a SampleInfo named tuple
'''
# VT file reports in APIv3 contain all info under 'data'
# but reports from VT file feed (also APIv3) don't have it
# Handle both cases silently here
if 'data' in vt_rep:
vt_rep = vt_rep['data']
label_pairs = []
# Obtain scan results, if available
try:
scans = vt_rep['attributes']['last_analysis_results']
md5 = vt_rep['attributes']['md5']
sha1 = vt_rep['attributes']['sha1']
sha256 = vt_rep['attributes']['sha256']
except KeyError:
return None
# Obtain labels from scan results
for av, res in scans.items():
label = res['result']
if label is not None:
clean_label = ''.join(filter(
lambda x: x in string.printable,
label)).strip()
label_pairs.append((av, clean_label))
# Obtain VT tags, if available
vt_tags = vt_rep['attributes'].get('tags', [])
return SampleInfo(md5, sha1, sha256, label_pairs, vt_tags)
@staticmethod
def is_pup(tag_pairs, taxonomy):
'''This function classifies the sample as PUP or not
by checking if highest ranked CLASS tag contains "grayware"
and is above a predefined threshold
Return:
True/False/None
'''
threshold = 0.5
# If no tags, return false
if len(tag_pairs) < 1:
return None
max_ctr = tag_pairs[0][1]
for (tag,ctr) in tag_pairs:
(path, cat) = taxonomy.get_info(tag)
if (cat == "CLASS"):
if ("grayware" in path):
return (float(ctr) >= float(max_ctr)*threshold)
else:
return False
return False
@staticmethod
def _remove_suffixes(av_name, label):
'''Remove AV specific suffixes from given label
Returns updated label'''
# Truncate after last '.'
if av_name in suffix_removal_av_set:
label = label.rsplit('.', 1)[0]
# Truncate after last '.'
# if suffix only contains digits or uppercase (no lowercase) chars
if av_name == 'AVG':
tokens = label.rsplit('.', 1)
if len(tokens) > 1 and re.match("^[A-Z0-9]+$", tokens[1]):
label = tokens[0]
# Truncate after last '!'
if av_name == 'Agnitum':
label = label.rsplit('!', 1)[0]
return label
def get_label_tags(self, label, hashes):
''' Return list of tags in given label
Tokenizes label, filters unneeded tokens, and
applies tagging rules '''
# Initialize set of tags to return
# We use a set to avoid duplicate tokens in the same AV label
# This avoids "potentially unwanted" contributing twice BEH:pup
tags = set()
# If empty label, nothing to do
if not label:
return tags
# Split label into tokens and process each token
for token in re.split("[^0-9a-zA-Z]", label):
# Convert token to lowercase
token = token.lower()
# Remove digits at the end
end_len = len(re.findall("\d*$", token)[0])
if end_len:
token = token[:-end_len]
# Ignore token if prefix of a hash of the sample
# Most AVs use MD5 prefixes in labels,
# but we check SHA1 and SHA256 as well
hash_token = False
for hash_str in hashes:
if hash_str[0:len(token)] == token:
hash_token = True
break
if hash_token:
continue
# Ignore generic tokens
if self.taxonomy.is_generic(token):
continue
# Apply tagging rule
dst_l = self.tagging.get_dst(token)
if dst_l:
# Ignore generic tokens
for t in dst_l:
if not self.taxonomy.is_generic(t):
tags.add(t)
# Add token if longer than 3 characters and no tagging rule
elif len(token) > 3:
tags.add(token)
# Return tags
return tags
def _expand(self, tag_set):
''' Return expanded set of tags '''
ret = set()
for t in tag_set:
# Include tag
ret.add(t)
# Include target of expansion rule in output
ret.update(self.expansions.get_dst(t))
# Include implicit expansions in taxonomy
ret.update(self.taxonomy.expand(t))
# Return a list for backwards compatibility
return ret
def get_sample_tags(self, sample_info):
''' Returns dictionary tag -> AV list of tags for the given sample '''
# Whitelist the AVs to filter the ones with meaningful labels
av_whitelist = self.avs
# Initialize auxiliary data structures
duplicates = set()
av_dict = {}
# Process each AV label
for (av_name, label) in sample_info.labels:
# If empty label, nothing to do
if not label:
continue
################
# AV selection #
################
if av_whitelist and av_name not in av_whitelist:
continue
#####################
# Duplicate removal #
#####################
# Emsisoft uses same label as
# GData/ESET-NOD32/BitDefender/Ad-Aware/MicroWorld-eScan,
# but suffixes ' (B)' to their label. Remove the suffix.
if label.endswith(' (B)'):
label = label[:-4]
# F-Secure uses Avira's engine since Nov. 2018
# but prefixes 'Malware.' to Avira's label. Remove the prefix.
if label.startswith('Malware.'):
label = label[8:]
# Other engines often use exactly the same label, e.g.,
# AVG/Avast
# K7Antivirus/K7GW
# Kaspersky/ZoneAlarm
# If we have seen the exact same label before, skip
if label in duplicates:
continue
# If not, we add it to duplicates
else:
duplicates.add(label)
##################
# Suffix removal #
##################
label = self._remove_suffixes(av_name, label)
########################################################
# Tokenization and tagging #
########################################################
hashes = [ sample_info.md5, sample_info.sha1, sample_info.sha256 ]
tags = self.get_label_tags(label, hashes)
########################################################
# Expansions #
########################################################
# NOTE: Avoiding to do expansion when aliases
if self.aliasdetect:
expanded_tags = tags
else:
expanded_tags = self._expand(tags)
########################################################
# Stores information that relates AV vendors with tags #
########################################################
for t in expanded_tags:
av_dict.setdefault(t, []).append(av_name)
return av_dict
def rank_tags(self, av_dict, threshold=1):
''' Return list of (tag, confidence) ranked by decreasing confidence
and filter tags with less or equal threshold confidence '''
pairs = ((t, len(avs)) for (t,avs) in av_dict.items()
if len(avs) > threshold)
return sorted(pairs, key=itemgetter(1,0), reverse=True)
|
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import netaddr
from neutron_lib.api import validators
from neutron_lib import constants
from neutron_lib.db import utils as db_utils
from neutron_lib import exceptions as exc
from oslo_config import cfg
from oslo_db import exception as os_db_exc
from oslo_db.sqlalchemy import utils as sa_utils
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import uuidutils
from sqlalchemy import and_
from sqlalchemy import event
from sqlalchemy import not_
from neutron._i18n import _, _LE, _LI
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.callbacks import events
from neutron.callbacks import exceptions
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.common import utils
from neutron import context as ctx
from neutron.db import api as db_api
from neutron.db import db_base_plugin_common
from neutron.db import ipam_pluggable_backend
from neutron.db import models_v2
from neutron.db import rbac_db_mixin as rbac_mixin
from neutron.db import rbac_db_models as rbac_db
from neutron.db import standardattrdescription_db as stattr_db
from neutron.extensions import ip_allocation as ipa
from neutron.extensions import l3
from neutron import ipam
from neutron.ipam import exceptions as ipam_exc
from neutron.ipam import subnet_alloc
from neutron import manager
from neutron import neutron_plugin_base_v2
from neutron.objects import base as base_obj
from neutron.objects import subnetpool as subnetpool_obj
from neutron.plugins.common import constants as service_constants
LOG = logging.getLogger(__name__)
# Ports with the following 'device_owner' values will not prevent
# network deletion. If delete_network() finds that all ports on a
# network have these owners, it will explicitly delete each port
# and allow network deletion to continue. Similarly, if delete_subnet()
# finds out that all existing IP Allocations are associated with ports
# with these owners, it will allow subnet deletion to proceed with the
# IP allocations being cleaned up by cascade.
AUTO_DELETE_PORT_OWNERS = [constants.DEVICE_OWNER_DHCP]
DNS_DOMAIN_DEFAULT = 'openstacklocal.'
FQDN_MAX_LEN = 255
def _check_subnet_not_used(context, subnet_id):
try:
kwargs = {'context': context, 'subnet_id': subnet_id}
registry.notify(
resources.SUBNET, events.BEFORE_DELETE, None, **kwargs)
except exceptions.CallbackFailure as e:
raise exc.SubnetInUse(subnet_id=subnet_id, reason=e)
def _update_subnetpool_dict(orig_pool, new_pool):
keys_to_update = (
set(orig_pool.fields.keys()) - set(orig_pool.synthetic_fields))
updated = {k: new_pool.get(k, orig_pool[k]) for k in keys_to_update}
new_prefixes = new_pool.get('prefixes', constants.ATTR_NOT_SPECIFIED)
if new_prefixes is not constants.ATTR_NOT_SPECIFIED:
orig_ip_set = netaddr.IPSet(orig_pool.prefixes)
new_ip_set = netaddr.IPSet(new_prefixes)
if not orig_ip_set.issubset(new_ip_set):
msg = _("Existing prefixes must be "
"a subset of the new prefixes")
raise n_exc.IllegalSubnetPoolPrefixUpdate(msg=msg)
new_ip_set.compact()
updated['prefixes'] = [str(prefix.cidr)
for prefix in new_ip_set.iter_cidrs()]
else:
updated['prefixes'] = [str(prefix)
for prefix in orig_pool.prefixes]
return updated
class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
neutron_plugin_base_v2.NeutronPluginBaseV2,
rbac_mixin.RbacPluginMixin,
stattr_db.StandardAttrDescriptionMixin):
"""V2 Neutron plugin interface implementation using SQLAlchemy models.
Whenever a non-read call happens the plugin will call an event handler
class method (e.g., network_created()). The result is that this class
can be sub-classed by other classes that add custom behaviors on certain
events.
"""
# This attribute specifies whether the plugin supports or not
# bulk/pagination/sorting operations. Name mangling is used in
# order to ensure it is qualified by class
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
def __init__(self):
self.set_ipam_backend()
if cfg.CONF.notify_nova_on_port_status_changes:
# Import nova conditionally to support the use case of Neutron
# being used outside of an OpenStack context.
from neutron.notifiers import nova
# NOTE(arosen) These event listeners are here to hook into when
# port status changes and notify nova about their change.
self.nova_notifier = nova.Notifier.get_instance()
event.listen(models_v2.Port, 'after_insert',
self.nova_notifier.send_port_status)
event.listen(models_v2.Port, 'after_update',
self.nova_notifier.send_port_status)
event.listen(models_v2.Port.status, 'set',
self.nova_notifier.record_port_status_changed)
for e in (events.BEFORE_CREATE, events.BEFORE_UPDATE,
events.BEFORE_DELETE):
registry.subscribe(self.validate_network_rbac_policy_change,
rbac_mixin.RBAC_POLICY, e)
@db_api.retry_if_session_inactive()
def validate_network_rbac_policy_change(self, resource, event, trigger,
context, object_type, policy,
**kwargs):
"""Validates network RBAC policy changes.
On creation, verify that the creator is an admin or that it owns the
network it is sharing.
On update and delete, make sure the tenant losing access does not have
resources that depend on that access.
"""
if object_type != 'network' or policy['action'] != 'access_as_shared':
# we only care about shared network policies
return
# The object a policy targets cannot be changed so we can look
# at the original network for the update event as well.
net = self._get_network(context, policy['object_id'])
if event in (events.BEFORE_CREATE, events.BEFORE_UPDATE):
# we still have to verify that the caller owns the network because
# _get_network will succeed on a shared network
if not context.is_admin and net['tenant_id'] != context.tenant_id:
msg = _("Only admins can manipulate policies on networks "
"they do not own.")
raise exc.InvalidInput(error_message=msg)
tenant_to_check = None
if event == events.BEFORE_UPDATE:
new_tenant = kwargs['policy_update']['target_tenant']
if policy['target_tenant'] != new_tenant:
tenant_to_check = policy['target_tenant']
if event == events.BEFORE_DELETE:
tenant_to_check = policy['target_tenant']
if tenant_to_check:
self.ensure_no_tenant_ports_on_network(net['id'], net['tenant_id'],
tenant_to_check)
def ensure_no_tenant_ports_on_network(self, network_id, net_tenant_id,
tenant_id):
ctx_admin = ctx.get_admin_context()
rb_model = rbac_db.NetworkRBAC
other_rbac_entries = self._model_query(ctx_admin, rb_model).filter(
and_(rb_model.object_id == network_id,
rb_model.action == 'access_as_shared'))
ports = self._model_query(ctx_admin, models_v2.Port).filter(
models_v2.Port.network_id == network_id)
if tenant_id == '*':
# for the wildcard we need to get all of the rbac entries to
# see if any allow the remaining ports on the network.
other_rbac_entries = other_rbac_entries.filter(
rb_model.target_tenant != tenant_id)
# any port with another RBAC entry covering it or one belonging to
# the same tenant as the network owner is ok
allowed_tenants = [entry['target_tenant']
for entry in other_rbac_entries]
allowed_tenants.append(net_tenant_id)
ports = ports.filter(
~models_v2.Port.tenant_id.in_(allowed_tenants))
else:
# if there is a wildcard rule, we can return early because it
# allows any ports
query = other_rbac_entries.filter(rb_model.target_tenant == '*')
if query.count():
return
ports = ports.filter(models_v2.Port.tenant_id == tenant_id)
if ports.count():
raise n_exc.InvalidSharedSetting(network=network_id)
def set_ipam_backend(self):
self.ipam = ipam_pluggable_backend.IpamPluggableBackend()
def _validate_host_route(self, route, ip_version):
try:
netaddr.IPNetwork(route['destination'])
netaddr.IPAddress(route['nexthop'])
except netaddr.core.AddrFormatError:
err_msg = _("Invalid route: %s") % route
raise exc.InvalidInput(error_message=err_msg)
except ValueError:
# netaddr.IPAddress would raise this
err_msg = _("Invalid route: %s") % route
raise exc.InvalidInput(error_message=err_msg)
self._validate_ip_version(ip_version, route['nexthop'], 'nexthop')
self._validate_ip_version(ip_version, route['destination'],
'destination')
def _validate_shared_update(self, context, id, original, updated):
# The only case that needs to be validated is when 'shared'
# goes from True to False
if updated['shared'] == original.shared or updated['shared']:
return
ports = self._model_query(
context, models_v2.Port).filter(models_v2.Port.network_id == id)
ports = ports.filter(not_(models_v2.Port.device_owner.startswith(
constants.DEVICE_OWNER_NETWORK_PREFIX)))
subnets = self._model_query(
context, models_v2.Subnet).filter(
models_v2.Subnet.network_id == id)
tenant_ids = set([port['tenant_id'] for port in ports] +
[subnet['tenant_id'] for subnet in subnets])
# raise if multiple tenants found or if the only tenant found
# is not the owner of the network
if (len(tenant_ids) > 1 or len(tenant_ids) == 1 and
tenant_ids.pop() != original.tenant_id):
raise n_exc.InvalidSharedSetting(network=original.name)
def _validate_ipv6_attributes(self, subnet, cur_subnet):
if cur_subnet:
self._validate_ipv6_update_dhcp(subnet, cur_subnet)
return
ra_mode_set = validators.is_attr_set(subnet.get('ipv6_ra_mode'))
address_mode_set = validators.is_attr_set(
subnet.get('ipv6_address_mode'))
self._validate_ipv6_dhcp(ra_mode_set, address_mode_set,
subnet['enable_dhcp'])
if ra_mode_set and address_mode_set:
self._validate_ipv6_combination(subnet['ipv6_ra_mode'],
subnet['ipv6_address_mode'])
if address_mode_set or ra_mode_set:
self._validate_eui64_applicable(subnet)
def _validate_eui64_applicable(self, subnet):
# Per RFC 4862, section 5.5.3, prefix length and interface
# id together should be equal to 128. Currently neutron supports
# EUI64 interface id only, thus limiting the prefix
# length to be 64 only.
if ipv6_utils.is_auto_address_subnet(subnet):
if netaddr.IPNetwork(subnet['cidr']).prefixlen != 64:
msg = _('Invalid CIDR %s for IPv6 address mode. '
'OpenStack uses the EUI-64 address format, '
'which requires the prefix to be /64.')
raise exc.InvalidInput(
error_message=(msg % subnet['cidr']))
def _validate_ipv6_combination(self, ra_mode, address_mode):
if ra_mode != address_mode:
msg = _("ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode "
"set to '%(addr_mode)s' is not valid. "
"If both attributes are set, they must be the same value"
) % {'ra_mode': ra_mode, 'addr_mode': address_mode}
raise exc.InvalidInput(error_message=msg)
def _validate_ipv6_dhcp(self, ra_mode_set, address_mode_set, enable_dhcp):
if (ra_mode_set or address_mode_set) and not enable_dhcp:
msg = _("ipv6_ra_mode or ipv6_address_mode cannot be set when "
"enable_dhcp is set to False.")
raise exc.InvalidInput(error_message=msg)
def _validate_ipv6_update_dhcp(self, subnet, cur_subnet):
if ('enable_dhcp' in subnet and not subnet['enable_dhcp']):
msg = _("Cannot disable enable_dhcp with "
"ipv6 attributes set")
ra_mode_set = validators.is_attr_set(subnet.get('ipv6_ra_mode'))
address_mode_set = validators.is_attr_set(
subnet.get('ipv6_address_mode'))
if ra_mode_set or address_mode_set:
raise exc.InvalidInput(error_message=msg)
old_ra_mode_set = validators.is_attr_set(
cur_subnet.get('ipv6_ra_mode'))
old_address_mode_set = validators.is_attr_set(
cur_subnet.get('ipv6_address_mode'))
if old_ra_mode_set or old_address_mode_set:
raise exc.InvalidInput(error_message=msg)
def _create_bulk(self, resource, context, request_items):
objects = []
collection = "%ss" % resource
items = request_items[collection]
context.session.begin(subtransactions=True)
try:
for item in items:
obj_creator = getattr(self, 'create_%s' % resource)
objects.append(obj_creator(context, item))
context.session.commit()
except Exception:
context.session.rollback()
with excutils.save_and_reraise_exception():
LOG.error(_LE("An exception occurred while creating "
"the %(resource)s:%(item)s"),
{'resource': resource, 'item': item})
return objects
@db_api.retry_if_session_inactive()
def create_network_bulk(self, context, networks):
return self._create_bulk('network', context, networks)
@db_api.retry_if_session_inactive()
def create_network(self, context, network):
"""Handle creation of a single network."""
net_db = self.create_network_db(context, network)
return self._make_network_dict(net_db, process_extensions=False,
context=context)
def create_network_db(self, context, network):
# single request processing
n = network['network']
# NOTE(jkoelker) Get the tenant_id outside of the session to avoid
# unneeded db action if the operation raises
tenant_id = n['tenant_id']
with context.session.begin(subtransactions=True):
args = {'tenant_id': tenant_id,
'id': n.get('id') or uuidutils.generate_uuid(),
'name': n['name'],
'admin_state_up': n['admin_state_up'],
'status': n.get('status', constants.NET_STATUS_ACTIVE),
'description': n.get('description')}
network = models_v2.Network(**args)
if n['shared']:
entry = rbac_db.NetworkRBAC(
network=network, action='access_as_shared',
target_tenant='*', tenant_id=network['tenant_id'])
context.session.add(entry)
context.session.add(network)
return network
@db_api.retry_if_session_inactive()
def update_network(self, context, id, network):
n = network['network']
with context.session.begin(subtransactions=True):
network = self._get_network(context, id)
# validate 'shared' parameter
if 'shared' in n:
entry = None
for item in network.rbac_entries:
if (item.action == 'access_as_shared' and
item.target_tenant == '*'):
entry = item
break
setattr(network, 'shared', True if entry else False)
self._validate_shared_update(context, id, network, n)
update_shared = n.pop('shared')
if update_shared and not entry:
entry = rbac_db.NetworkRBAC(
network=network, action='access_as_shared',
target_tenant='*', tenant_id=network['tenant_id'])
context.session.add(entry)
elif not update_shared and entry:
context.session.delete(entry)
context.session.expire(network, ['rbac_entries'])
# The filter call removes attributes from the body received from
# the API that are logically tied to network resources but are
# stored in other database tables handled by extensions
network.update(self._filter_non_model_columns(n,
models_v2.Network))
return self._make_network_dict(network, context=context)
@db_api.retry_if_session_inactive()
def delete_network(self, context, id):
with context.session.begin(subtransactions=True):
network = self._get_network(context, id)
context.session.query(models_v2.Port).filter_by(
network_id=id).filter(
models_v2.Port.device_owner.
in_(AUTO_DELETE_PORT_OWNERS)).delete(synchronize_session=False)
port_in_use = context.session.query(models_v2.Port).filter_by(
network_id=id).first()
if port_in_use:
raise exc.NetworkInUse(net_id=id)
# clean up subnets
subnets = self._get_subnets_by_network(context, id)
for subnet in subnets:
self.delete_subnet(context, subnet['id'])
context.session.delete(network)
@db_api.retry_if_session_inactive()
def get_network(self, context, id, fields=None):
network = self._get_network(context, id)
return self._make_network_dict(network, fields, context=context)
@db_api.retry_if_session_inactive()
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'network', limit, marker)
make_network_dict = functools.partial(self._make_network_dict,
context=context)
return self._get_collection(context, models_v2.Network,
make_network_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
@db_api.retry_if_session_inactive()
def get_networks_count(self, context, filters=None):
return self._get_collection_count(context, models_v2.Network,
filters=filters)
@db_api.retry_if_session_inactive()
def create_subnet_bulk(self, context, subnets):
return self._create_bulk('subnet', context, subnets)
def _validate_ip_version(self, ip_version, addr, name):
"""Check IP field of a subnet match specified ip version."""
ip = netaddr.IPNetwork(addr)
if ip.version != ip_version:
data = {'name': name,
'addr': addr,
'ip_version': ip_version}
msg = _("%(name)s '%(addr)s' does not match "
"the ip_version '%(ip_version)s'") % data
raise exc.InvalidInput(error_message=msg)
def _validate_subnet(self, context, s, cur_subnet=None):
"""Validate a subnet spec."""
# This method will validate attributes which may change during
# create_subnet() and update_subnet().
# The method requires the subnet spec 's' has 'ip_version' field.
# If 's' dict does not have 'ip_version' field in an API call
# (e.g., update_subnet()), you need to set 'ip_version' field
# before calling this method.
ip_ver = s['ip_version']
if validators.is_attr_set(s.get('cidr')):
self._validate_ip_version(ip_ver, s['cidr'], 'cidr')
# TODO(watanabe.isao): After we found a way to avoid the re-sync
# from the agent side, this restriction could be removed.
if cur_subnet:
dhcp_was_enabled = cur_subnet.enable_dhcp
else:
dhcp_was_enabled = False
if s.get('enable_dhcp') and not dhcp_was_enabled:
subnet_prefixlen = netaddr.IPNetwork(s['cidr']).prefixlen
error_message = _("Subnet has a prefix length that is "
"incompatible with DHCP service enabled.")
if ((ip_ver == 4 and subnet_prefixlen > 30) or
(ip_ver == 6 and subnet_prefixlen > 126)):
raise exc.InvalidInput(error_message=error_message)
net = netaddr.IPNetwork(s['cidr'])
if net.is_multicast():
error_message = _("Multicast IP subnet is not supported "
"if enable_dhcp is True.")
raise exc.InvalidInput(error_message=error_message)
elif net.is_loopback():
error_message = _("Loopback IP subnet is not supported "
"if enable_dhcp is True.")
raise exc.InvalidInput(error_message=error_message)
if validators.is_attr_set(s.get('gateway_ip')):
self._validate_ip_version(ip_ver, s['gateway_ip'], 'gateway_ip')
is_gateway_not_valid = (
ipam.utils.check_gateway_invalid_in_subnet(
s['cidr'], s['gateway_ip']))
if is_gateway_not_valid:
error_message = _("Gateway is not valid on subnet")
raise exc.InvalidInput(error_message=error_message)
# Ensure the gateway IP is not assigned to any port
# skip this check in case of create (s parameter won't have id)
# NOTE(salv-orlando): There is slight chance of a race, when
# a subnet-update and a router-interface-add operation are
# executed concurrently
if cur_subnet and not ipv6_utils.is_ipv6_pd_enabled(s):
ipal = models_v2.IPAllocation
alloc_qry = context.session.query(ipal)
alloc_qry = alloc_qry.join("port", "routerport")
allocated = alloc_qry.filter(
ipal.ip_address == cur_subnet['gateway_ip'],
ipal.subnet_id == cur_subnet['id']).first()
if allocated and allocated['port_id']:
raise n_exc.GatewayIpInUse(
ip_address=cur_subnet['gateway_ip'],
port_id=allocated['port_id'])
if validators.is_attr_set(s.get('dns_nameservers')):
if len(s['dns_nameservers']) > cfg.CONF.max_dns_nameservers:
raise n_exc.DNSNameServersExhausted(
subnet_id=s.get('id', _('new subnet')),
quota=cfg.CONF.max_dns_nameservers)
for dns in s['dns_nameservers']:
try:
netaddr.IPAddress(dns)
except Exception:
raise exc.InvalidInput(
error_message=(_("Error parsing dns address %s") %
dns))
self._validate_ip_version(ip_ver, dns, 'dns_nameserver')
if validators.is_attr_set(s.get('host_routes')):
if len(s['host_routes']) > cfg.CONF.max_subnet_host_routes:
raise n_exc.HostRoutesExhausted(
subnet_id=s.get('id', _('new subnet')),
quota=cfg.CONF.max_subnet_host_routes)
# check if the routes are all valid
for rt in s['host_routes']:
self._validate_host_route(rt, ip_ver)
if ip_ver == 4:
if validators.is_attr_set(s.get('ipv6_ra_mode')):
raise exc.InvalidInput(
error_message=(_("ipv6_ra_mode is not valid when "
"ip_version is 4")))
if validators.is_attr_set(s.get('ipv6_address_mode')):
raise exc.InvalidInput(
error_message=(_("ipv6_address_mode is not valid when "
"ip_version is 4")))
if ip_ver == 6:
self._validate_ipv6_attributes(s, cur_subnet)
def _validate_subnet_for_pd(self, subnet):
"""Validates that subnet parameters are correct for IPv6 PD"""
if (subnet.get('ip_version') != constants.IP_VERSION_6):
reason = _("Prefix Delegation can only be used with IPv6 "
"subnets.")
raise exc.BadRequest(resource='subnets', msg=reason)
mode_list = [constants.IPV6_SLAAC,
constants.DHCPV6_STATELESS]
ra_mode = subnet.get('ipv6_ra_mode')
if ra_mode not in mode_list:
reason = _("IPv6 RA Mode must be SLAAC or Stateless for "
"Prefix Delegation.")
raise exc.BadRequest(resource='subnets', msg=reason)
address_mode = subnet.get('ipv6_address_mode')
if address_mode not in mode_list:
reason = _("IPv6 Address Mode must be SLAAC or Stateless for "
"Prefix Delegation.")
raise exc.BadRequest(resource='subnets', msg=reason)
def _update_router_gw_ports(self, context, network, subnet):
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if l3plugin:
gw_ports = self._get_router_gw_ports_by_network(context,
network['id'])
router_ids = [p['device_id'] for p in gw_ports]
ctx_admin = context.elevated()
ext_subnets_dict = {s['id']: s for s in network['subnets']}
for id in router_ids:
router = l3plugin.get_router(ctx_admin, id)
external_gateway_info = router['external_gateway_info']
# Get all stateful (i.e. non-SLAAC/DHCPv6-stateless) fixed ips
fips = [f for f in external_gateway_info['external_fixed_ips']
if not ipv6_utils.is_auto_address_subnet(
ext_subnets_dict[f['subnet_id']])]
num_fips = len(fips)
# Don't add the fixed IP to the port if it already
# has a stateful fixed IP of the same IP version
if num_fips > 1:
continue
if num_fips == 1 and netaddr.IPAddress(
fips[0]['ip_address']).version == subnet['ip_version']:
continue
external_gateway_info['external_fixed_ips'].append(
{'subnet_id': subnet['id']})
info = {'router': {'external_gateway_info':
external_gateway_info}}
l3plugin.update_router(context, id, info)
def _create_subnet(self, context, subnet, subnetpool_id):
s = subnet['subnet']
with context.session.begin(subtransactions=True):
network = self._get_network(context, s["network_id"])
subnet, ipam_subnet = self.ipam.allocate_subnet(context,
network,
s,
subnetpool_id)
if hasattr(network, 'external') and network.external:
self._update_router_gw_ports(context,
network,
subnet)
# If this subnet supports auto-addressing, then update any
# internal ports on the network with addresses for this subnet.
if ipv6_utils.is_auto_address_subnet(subnet):
updated_ports = self.ipam.add_auto_addrs_on_network_ports(context,
subnet, ipam_subnet)
for port_id in updated_ports:
port_info = {'port': {'id': port_id}}
self.update_port(context, port_id, port_info)
return self._make_subnet_dict(subnet, context=context)
def _get_subnetpool_id(self, context, subnet):
"""Return the subnetpool id for this request
:param subnet: The subnet dict from the request
"""
use_default_subnetpool = subnet.get('use_default_subnetpool')
if use_default_subnetpool == constants.ATTR_NOT_SPECIFIED:
use_default_subnetpool = False
subnetpool_id = subnet.get('subnetpool_id')
if subnetpool_id == constants.ATTR_NOT_SPECIFIED:
subnetpool_id = None
if use_default_subnetpool and subnetpool_id:
msg = _('subnetpool_id and use_default_subnetpool cannot both be '
'specified')
raise exc.BadRequest(resource='subnets', msg=msg)
if subnetpool_id:
return subnetpool_id
if not use_default_subnetpool:
return
cidr = subnet.get('cidr')
if validators.is_attr_set(cidr):
ip_version = netaddr.IPNetwork(cidr).version
else:
ip_version = subnet.get('ip_version')
if not validators.is_attr_set(ip_version):
msg = _('ip_version must be specified in the absence of '
'cidr and subnetpool_id')
raise exc.BadRequest(resource='subnets', msg=msg)
if ip_version == 6 and cfg.CONF.ipv6_pd_enabled:
return constants.IPV6_PD_POOL_ID
subnetpool = self.get_default_subnetpool(context, ip_version)
if subnetpool:
return subnetpool['id']
msg = _('No default subnetpool found for IPv%s') % ip_version
raise exc.BadRequest(resource='subnets', msg=msg)
@db_api.retry_if_session_inactive()
def create_subnet(self, context, subnet):
s = subnet['subnet']
cidr = s.get('cidr', constants.ATTR_NOT_SPECIFIED)
prefixlen = s.get('prefixlen', constants.ATTR_NOT_SPECIFIED)
has_cidr = validators.is_attr_set(cidr)
has_prefixlen = validators.is_attr_set(prefixlen)
if has_cidr and has_prefixlen:
msg = _('cidr and prefixlen must not be supplied together')
raise exc.BadRequest(resource='subnets', msg=msg)
if has_cidr:
# turn the CIDR into a proper subnet
net = netaddr.IPNetwork(s['cidr'])
subnet['subnet']['cidr'] = '%s/%s' % (net.network, net.prefixlen)
subnetpool_id = self._get_subnetpool_id(context, s)
if not subnetpool_id and not has_cidr:
msg = _('a subnetpool must be specified in the absence of a cidr')
raise exc.BadRequest(resource='subnets', msg=msg)
if subnetpool_id:
self.ipam.validate_pools_with_subnetpool(s)
if subnetpool_id == constants.IPV6_PD_POOL_ID:
if has_cidr:
# We do not currently support requesting a specific
# cidr with IPv6 prefix delegation. Set the subnetpool_id
# to None and allow the request to continue as normal.
subnetpool_id = None
self._validate_subnet(context, s)
else:
prefix = n_const.PROVISIONAL_IPV6_PD_PREFIX
subnet['subnet']['cidr'] = prefix
self._validate_subnet_for_pd(s)
else:
if not has_cidr:
msg = _('A cidr must be specified in the absence of a '
'subnet pool')
raise exc.BadRequest(resource='subnets', msg=msg)
self._validate_subnet(context, s)
return self._create_subnet(context, subnet, subnetpool_id)
def _update_allocation_pools(self, subnet):
"""Gets new allocation pools and formats them correctly"""
allocation_pools = self.ipam.generate_pools(subnet['cidr'],
subnet['gateway_ip'])
return [{'start': str(netaddr.IPAddress(p.first,
subnet['ip_version'])),
'end': str(netaddr.IPAddress(p.last, subnet['ip_version']))}
for p in allocation_pools]
@db_api.retry_if_session_inactive()
def update_subnet(self, context, id, subnet):
"""Update the subnet with new info.
The change however will not be realized until the client renew the
dns lease or we support gratuitous DHCP offers
"""
s = subnet['subnet']
new_cidr = s.get('cidr')
db_subnet = self._get_subnet(context, id)
# Fill 'ip_version' and 'allocation_pools' fields with the current
# value since _validate_subnet() expects subnet spec has 'ip_version'
# and 'allocation_pools' fields.
s['ip_version'] = db_subnet.ip_version
s['cidr'] = db_subnet.cidr
s['id'] = db_subnet.id
s['tenant_id'] = db_subnet.tenant_id
s['subnetpool_id'] = db_subnet.subnetpool_id
self._validate_subnet(context, s, cur_subnet=db_subnet)
db_pools = [netaddr.IPRange(p['first_ip'], p['last_ip'])
for p in db_subnet.allocation_pools]
update_ports_needed = False
if new_cidr and ipv6_utils.is_ipv6_pd_enabled(s):
# This is an ipv6 prefix delegation-enabled subnet being given an
# updated cidr by the process_prefix_update RPC
s['cidr'] = new_cidr
update_ports_needed = True
net = netaddr.IPNetwork(s['cidr'], s['ip_version'])
# Update gateway_ip and allocation pools based on new cidr
s['gateway_ip'] = utils.get_first_host_ip(net, s['ip_version'])
s['allocation_pools'] = self._update_allocation_pools(s)
range_pools = None
if s.get('allocation_pools') is not None:
# Convert allocation pools to IPRange to simplify future checks
range_pools = self.ipam.pools_to_ip_range(s['allocation_pools'])
self.ipam.validate_allocation_pools(range_pools, s['cidr'])
s['allocation_pools'] = range_pools
# If either gateway_ip or allocation_pools were specified
gateway_ip = s.get('gateway_ip', db_subnet.gateway_ip)
gateway_ip_changed = gateway_ip != db_subnet.gateway_ip
if gateway_ip_changed or s.get('allocation_pools') is not None:
pools = range_pools if range_pools is not None else db_pools
if gateway_ip:
self.ipam.validate_gw_out_of_pools(gateway_ip, pools)
if gateway_ip_changed:
# Provide pre-update notification not to break plugins that don't
# support gateway ip change
kwargs = {'context': context, 'subnet_id': id,
'network_id': db_subnet.network_id}
registry.notify(resources.SUBNET_GATEWAY, events.BEFORE_UPDATE,
self, **kwargs)
with context.session.begin(subtransactions=True):
subnet, changes = self.ipam.update_db_subnet(context, id, s,
db_pools)
# we expire here since ipam may have made changes to relationships
# that will be stale on any subsequent lookups while the subnet object
# is in the session otherwise.
context.session.expire(subnet)
result = self._make_subnet_dict(subnet, context=context)
if update_ports_needed:
# Find ports that have not yet been updated
# with an IP address by Prefix Delegation, and update them
filters = {'fixed_ips': {'subnet_id': [s['id']]}}
ports = self.get_ports(context, filters=filters)
routers = []
for port in ports:
for ip in port['fixed_ips']:
if ip['subnet_id'] == s['id']:
if (port['device_owner'] in
constants.ROUTER_INTERFACE_OWNERS):
routers.append(port['device_id'])
ip['ip_address'] = s['gateway_ip']
else:
# We remove ip_address and pass only PD subnet_id
# in port's fixed_ip for port_update. Later, IPAM
# drivers will allocate eui64 address with new
# prefix when they find PD subnet_id in port's
# fixed_ip.
ip.pop('ip_address', None)
# FIXME(kevinbenton): this should not be calling update_port
# inside of a transaction.
setattr(context, 'GUARD_TRANSACTION', False)
self.update_port(context, port['id'], {'port': port})
# Send router_update to l3_agent
if routers:
l3_rpc_notifier = l3_rpc_agent_api.L3AgentNotifyAPI()
l3_rpc_notifier.routers_updated(context, routers)
if gateway_ip_changed:
kwargs = {'context': context, 'subnet_id': id,
'network_id': db_subnet.network_id}
registry.notify(resources.SUBNET_GATEWAY, events.AFTER_UPDATE,
self, **kwargs)
return result
def _subnet_check_ip_allocations(self, context, subnet_id):
return (context.session.query(models_v2.IPAllocation).
filter_by(subnet_id=subnet_id).join(models_v2.Port).first())
def _subnet_get_user_allocation(self, context, subnet_id):
"""Check if there are any user ports on subnet and return first."""
# need to join with ports table as IPAllocation's port
# is not joined eagerly and thus producing query which yields
# incorrect results
return (context.session.query(models_v2.IPAllocation).
filter_by(subnet_id=subnet_id).join(models_v2.Port).
filter(~models_v2.Port.device_owner.
in_(AUTO_DELETE_PORT_OWNERS)).first())
def _subnet_check_ip_allocations_internal_router_ports(self, context,
subnet_id):
# Do not delete the subnet if IP allocations for internal
# router ports still exist
allocs = context.session.query(models_v2.IPAllocation).filter_by(
subnet_id=subnet_id).join(models_v2.Port).filter(
models_v2.Port.device_owner.in_(
constants.ROUTER_INTERFACE_OWNERS)
).first()
if allocs:
LOG.debug("Subnet %s still has internal router ports, "
"cannot delete", subnet_id)
raise exc.SubnetInUse(subnet_id=subnet_id)
@db_api.retry_if_session_inactive()
def delete_subnet(self, context, id):
with context.session.begin(subtransactions=True):
subnet = self._get_subnet(context, id)
# Make sure the subnet isn't used by other resources
_check_subnet_not_used(context, id)
# Delete all network owned ports
qry_network_ports = (
context.session.query(models_v2.IPAllocation).
filter_by(subnet_id=subnet['id']).
join(models_v2.Port))
# Remove network owned ports, and delete IP allocations
# for IPv6 addresses which were automatically generated
# via SLAAC
is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet)
if is_auto_addr_subnet:
self._subnet_check_ip_allocations_internal_router_ports(
context, id)
else:
qry_network_ports = (
qry_network_ports.filter(models_v2.Port.device_owner.
in_(AUTO_DELETE_PORT_OWNERS)))
network_ports = qry_network_ports.all()
if network_ports:
for port in network_ports:
context.session.delete(port)
# Check if there are more IP allocations, unless
# is_auto_address_subnet is True. In that case the check is
# unnecessary. This additional check not only would be wasteful
# for this class of subnet, but is also error-prone since when
# the isolation level is set to READ COMMITTED allocations made
# concurrently will be returned by this query
if not is_auto_addr_subnet:
alloc = self._subnet_check_ip_allocations(context, id)
if alloc:
LOG.info(_LI("Found port (%(port_id)s, %(ip)s) having IP "
"allocation on subnet "
"%(subnet)s, cannot delete"),
{'ip': alloc.ip_address,
'port_id': alloc.port_id,
'subnet': id})
raise exc.SubnetInUse(subnet_id=id)
context.session.delete(subnet)
# Delete related ipam subnet manually,
# since there is no FK relationship
self.ipam.delete_subnet(context, id)
@db_api.retry_if_session_inactive()
def get_subnet(self, context, id, fields=None):
subnet = self._get_subnet(context, id)
return self._make_subnet_dict(subnet, fields, context=context)
@db_api.retry_if_session_inactive()
def get_subnets(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
return self._get_subnets(context, filters, fields, sorts, limit,
marker, page_reverse)
@db_api.retry_if_session_inactive()
def get_subnets_count(self, context, filters=None):
return self._get_collection_count(context, models_v2.Subnet,
filters=filters)
@db_api.retry_if_session_inactive()
def get_subnets_by_network(self, context, network_id):
return [self._make_subnet_dict(subnet_db) for subnet_db in
self._get_subnets_by_network(context, network_id)]
def _validate_address_scope_id(self, context, address_scope_id,
subnetpool_id, sp_prefixes, ip_version):
"""Validate the address scope before associating.
Subnetpool can associate with an address scope if
- the tenant user is the owner of both the subnetpool and
address scope
- the admin is associating the subnetpool with the shared
address scope
- there is no prefix conflict with the existing subnetpools
associated with the address scope.
- the address family of the subnetpool and address scope
are the same
"""
if not validators.is_attr_set(address_scope_id):
return
if not self.is_address_scope_owned_by_tenant(context,
address_scope_id):
raise n_exc.IllegalSubnetPoolAssociationToAddressScope(
subnetpool_id=subnetpool_id, address_scope_id=address_scope_id)
as_ip_version = self.get_ip_version_for_address_scope(context,
address_scope_id)
if ip_version != as_ip_version:
raise n_exc.IllegalSubnetPoolIpVersionAssociationToAddressScope(
subnetpool_id=subnetpool_id, address_scope_id=address_scope_id,
ip_version=as_ip_version)
subnetpools = subnetpool_obj.SubnetPool.get_objects(
context, address_scope_id=address_scope_id)
new_set = netaddr.IPSet(sp_prefixes)
for sp in subnetpools:
if sp.id == subnetpool_id:
continue
sp_set = netaddr.IPSet(sp.prefixes)
if sp_set.intersection(new_set):
raise n_exc.AddressScopePrefixConflict()
def _check_subnetpool_update_allowed(self, context, subnetpool_id,
address_scope_id):
"""Check if the subnetpool can be updated or not.
If the subnetpool is associated to a shared address scope not owned
by the tenant, then the subnetpool cannot be updated.
"""
if not self.is_address_scope_owned_by_tenant(context,
address_scope_id):
msg = _("subnetpool %(subnetpool_id)s cannot be updated when"
" associated with shared address scope "
"%(address_scope_id)s") % {
'subnetpool_id': subnetpool_id,
'address_scope_id': address_scope_id}
raise n_exc.IllegalSubnetPoolUpdate(reason=msg)
def _check_default_subnetpool_exists(self, context, ip_version):
"""Check if a default already exists for the given IP version.
There can only be one default subnetpool for each IP family. Raise an
InvalidInput error if a default has already been set.
"""
if self.get_default_subnetpool(context, ip_version):
msg = _("A default subnetpool for this IP family has already "
"been set. Only one default may exist per IP family")
raise exc.InvalidInput(error_message=msg)
@db_api.retry_if_session_inactive()
def create_subnetpool(self, context, subnetpool):
sp = subnetpool['subnetpool']
sp_reader = subnet_alloc.SubnetPoolReader(sp)
if sp_reader.address_scope_id is constants.ATTR_NOT_SPECIFIED:
sp_reader.address_scope_id = None
if sp_reader.is_default:
self._check_default_subnetpool_exists(context,
sp_reader.ip_version)
self._validate_address_scope_id(context, sp_reader.address_scope_id,
id, sp_reader.prefixes,
sp_reader.ip_version)
pool_args = {'tenant_id': sp['tenant_id'],
'id': sp_reader.id,
'name': sp_reader.name,
'ip_version': sp_reader.ip_version,
'default_prefixlen':
sp_reader.default_prefixlen,
'min_prefixlen': sp_reader.min_prefixlen,
'max_prefixlen': sp_reader.max_prefixlen,
'is_default': sp_reader.is_default,
'shared': sp_reader.shared,
'default_quota': sp_reader.default_quota,
'address_scope_id': sp_reader.address_scope_id,
'description': sp_reader.description,
'prefixes': sp_reader.prefixes}
subnetpool = subnetpool_obj.SubnetPool(context, **pool_args)
subnetpool.create()
return self._make_subnetpool_dict(subnetpool.db_obj)
@db_api.retry_if_session_inactive()
def update_subnetpool(self, context, id, subnetpool):
new_sp = subnetpool['subnetpool']
with context.session.begin(subtransactions=True):
orig_sp = self._get_subnetpool(context, id=id)
updated = _update_subnetpool_dict(orig_sp, new_sp)
reader = subnet_alloc.SubnetPoolReader(updated)
if reader.is_default and not orig_sp.is_default:
self._check_default_subnetpool_exists(context,
reader.ip_version)
if orig_sp.address_scope_id:
self._check_subnetpool_update_allowed(context, id,
orig_sp.address_scope_id)
self._validate_address_scope_id(context, reader.address_scope_id,
id, reader.prefixes,
reader.ip_version)
address_scope_changed = (
orig_sp.address_scope_id != reader.address_scope_id)
orig_sp.update_fields(reader.subnetpool)
orig_sp.update()
if address_scope_changed:
# Notify about the update of subnetpool's address scope
kwargs = {'context': context, 'subnetpool_id': id}
registry.notify(resources.SUBNETPOOL_ADDRESS_SCOPE,
events.AFTER_UPDATE,
self.update_subnetpool,
**kwargs)
for key in ['min_prefixlen', 'max_prefixlen', 'default_prefixlen']:
updated['key'] = str(updated[key])
self._apply_dict_extend_functions(attributes.SUBNETPOOLS,
updated, orig_sp.db_obj)
return updated
@db_api.retry_if_session_inactive()
def get_subnetpool(self, context, id, fields=None):
subnetpool = self._get_subnetpool(context, id)
return self._make_subnetpool_dict(subnetpool.db_obj, fields)
@db_api.retry_if_session_inactive()
def get_subnetpools(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
pager = base_obj.Pager(sorts, limit, page_reverse, marker)
subnetpools = subnetpool_obj.SubnetPool.get_objects(
context, _pager=pager, validate_filters=False, **filters)
return [
self._make_subnetpool_dict(pool.db_obj, fields)
for pool in subnetpools
]
@db_api.retry_if_session_inactive()
def get_default_subnetpool(self, context, ip_version):
"""Retrieve the default subnetpool for the given IP version."""
filters = {'is_default': True,
'ip_version': ip_version}
subnetpool = self.get_subnetpools(context, filters=filters)
if subnetpool:
return subnetpool[0]
@db_api.retry_if_session_inactive()
def delete_subnetpool(self, context, id):
with context.session.begin(subtransactions=True):
subnetpool = self._get_subnetpool(context, id=id)
subnets = self._get_subnets_by_subnetpool(context, id)
if subnets:
reason = _("Subnet pool has existing allocations")
raise n_exc.SubnetPoolDeleteError(reason=reason)
subnetpool.delete()
def _check_mac_addr_update(self, context, port, new_mac, device_owner):
if (device_owner and
device_owner.startswith(constants.DEVICE_OWNER_NETWORK_PREFIX)):
raise n_exc.UnsupportedPortDeviceOwner(
op=_("mac address update"), port_id=id,
device_owner=device_owner)
@db_api.retry_if_session_inactive()
def create_port_bulk(self, context, ports):
return self._create_bulk('port', context, ports)
def _create_db_port_obj(self, context, port_data):
mac_address = port_data.pop('mac_address', None)
if mac_address:
if self._is_mac_in_use(context, port_data['network_id'],
mac_address):
raise exc.MacAddressInUse(net_id=port_data['network_id'],
mac=mac_address)
else:
mac_address = self._generate_mac()
db_port = models_v2.Port(mac_address=mac_address, **port_data)
context.session.add(db_port)
return db_port
@db_api.retry_if_session_inactive()
def create_port(self, context, port):
db_port = self.create_port_db(context, port)
return self._make_port_dict(db_port, process_extensions=False)
def create_port_db(self, context, port):
p = port['port']
port_id = p.get('id') or uuidutils.generate_uuid()
network_id = p['network_id']
# NOTE(jkoelker) Get the tenant_id outside of the session to avoid
# unneeded db action if the operation raises
tenant_id = p['tenant_id']
if p.get('device_owner'):
self._enforce_device_owner_not_router_intf_or_device_id(
context, p.get('device_owner'), p.get('device_id'), tenant_id)
port_data = dict(tenant_id=tenant_id,
name=p['name'],
id=port_id,
network_id=network_id,
admin_state_up=p['admin_state_up'],
status=p.get('status', constants.PORT_STATUS_ACTIVE),
device_id=p['device_id'],
device_owner=p['device_owner'],
description=p.get('description'))
if p.get('mac_address') is not constants.ATTR_NOT_SPECIFIED:
port_data['mac_address'] = p.get('mac_address')
with context.session.begin(subtransactions=True):
# Ensure that the network exists.
self._get_network(context, network_id)
# Create the port
db_port = self._create_db_port_obj(context, port_data)
p['mac_address'] = db_port['mac_address']
try:
self.ipam.allocate_ips_for_port_and_store(
context, port, port_id)
db_port['ip_allocation'] = ipa.IP_ALLOCATION_IMMEDIATE
except ipam_exc.DeferIpam:
db_port['ip_allocation'] = ipa.IP_ALLOCATION_DEFERRED
fixed_ips = p['fixed_ips']
if validators.is_attr_set(fixed_ips) and not fixed_ips:
# [] was passed explicitly as fixed_ips. An unaddressed port.
db_port['ip_allocation'] = ipa.IP_ALLOCATION_NONE
return db_port
def _validate_port_for_update(self, context, db_port, new_port, new_mac):
changed_owner = 'device_owner' in new_port
current_owner = (new_port.get('device_owner') or
db_port['device_owner'])
changed_device_id = new_port.get('device_id') != db_port['device_id']
current_device_id = new_port.get('device_id') or db_port['device_id']
if current_owner and changed_device_id or changed_owner:
self._enforce_device_owner_not_router_intf_or_device_id(
context, current_owner, current_device_id,
db_port['tenant_id'])
if new_mac and new_mac != db_port['mac_address']:
self._check_mac_addr_update(context, db_port,
new_mac, current_owner)
@db_api.retry_if_session_inactive()
def update_port(self, context, id, port):
new_port = port['port']
with context.session.begin(subtransactions=True):
db_port = self._get_port(context, id)
new_mac = new_port.get('mac_address')
self._validate_port_for_update(context, db_port, new_port, new_mac)
# Note: _make_port_dict is called here to load extension data
# (specifically host binding). The IPAM plugin is separate from
# the core plugin, so extensions are not loaded.
#
# The IPAM code could cheat and get it directly from db_port but it
# would have to know about the implementation (remember ml2 has its
# own port binding schema that differs from the generic one)
#
# This code could extract just the port binding host here and pass
# that in. The problem is that db_base_plugin_common shouldn't
# know anything about port binding. This compromise sends IPAM a
# port_dict with all of the extension data loaded.
try:
self.ipam.update_port(
context,
old_port_db=db_port,
old_port=self._make_port_dict(db_port),
new_port=new_port)
except ipam_exc.IpAddressAllocationNotFound as e:
# If a port update and a subnet delete interleave, there is a
# chance that the IPAM update operation raises this exception.
# Rather than throwing that up to the user under some sort of
# conflict, bubble up a retry instead that should bring things
# back to sanity.
raise os_db_exc.RetryRequest(e)
result = self._make_port_dict(db_port)
return result
@db_api.retry_if_session_inactive()
def delete_port(self, context, id):
with context.session.begin(subtransactions=True):
self.ipam.delete_port(context, id)
def delete_ports_by_device_id(self, context, device_id, network_id=None):
query = (context.session.query(models_v2.Port.id)
.enable_eagerloads(False)
.filter(models_v2.Port.device_id == device_id))
if network_id:
query = query.filter(models_v2.Port.network_id == network_id)
port_ids = [p[0] for p in query]
for port_id in port_ids:
try:
self.delete_port(context, port_id)
except exc.PortNotFound:
# Don't raise if something else concurrently deleted the port
LOG.debug("Ignoring PortNotFound when deleting port '%s'. "
"The port has already been deleted.",
port_id)
@db_api.retry_if_session_inactive()
def get_port(self, context, id, fields=None):
port = self._get_port(context, id)
return self._make_port_dict(port, fields)
def _get_ports_query(self, context, filters=None, sorts=None, limit=None,
marker_obj=None, page_reverse=False):
Port = models_v2.Port
IPAllocation = models_v2.IPAllocation
if not filters:
filters = {}
query = self._model_query(context, Port)
fixed_ips = filters.pop('fixed_ips', {})
ip_addresses = fixed_ips.get('ip_address')
subnet_ids = fixed_ips.get('subnet_id')
if ip_addresses or subnet_ids:
query = query.join(Port.fixed_ips)
if ip_addresses:
query = query.filter(IPAllocation.ip_address.in_(ip_addresses))
if subnet_ids:
query = query.filter(IPAllocation.subnet_id.in_(subnet_ids))
query = self._apply_filters_to_query(query, Port, filters, context)
if sorts:
sort_keys = db_utils.get_and_validate_sort_keys(sorts, Port)
sort_dirs = db_utils.get_sort_dirs(sorts, page_reverse)
query = sa_utils.paginate_query(query, Port, limit,
marker=marker_obj,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
return query
@db_api.retry_if_session_inactive()
def get_ports(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'port', limit, marker)
query = self._get_ports_query(context, filters=filters,
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
items = [self._make_port_dict(c, fields) for c in query]
if limit and page_reverse:
items.reverse()
return items
@db_api.retry_if_session_inactive()
def get_ports_count(self, context, filters=None):
return self._get_ports_query(context, filters).count()
def _enforce_device_owner_not_router_intf_or_device_id(self, context,
device_owner,
device_id,
tenant_id):
"""Prevent tenants from replacing the device id of router ports with
a router uuid belonging to another tenant.
"""
if device_owner not in constants.ROUTER_INTERFACE_OWNERS:
return
if not context.is_admin:
# check to make sure device_id does not match another tenants
# router.
if device_id:
if hasattr(self, 'get_router'):
try:
ctx_admin = context.elevated()
router = self.get_router(ctx_admin, device_id)
except l3.RouterNotFound:
return
else:
l3plugin = (
manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT))
if l3plugin:
try:
ctx_admin = context.elevated()
router = l3plugin.get_router(ctx_admin,
device_id)
except l3.RouterNotFound:
return
else:
# raise as extension doesn't support L3 anyways.
raise n_exc.DeviceIDNotOwnedByTenant(
device_id=device_id)
if tenant_id != router['tenant_id']:
raise n_exc.DeviceIDNotOwnedByTenant(device_id=device_id)
db_base_plugin_common.DbBasePluginCommon.register_model_query_hook(
models_v2.Port,
"port",
'_port_query_hook',
'_port_filter_hook',
None)
|
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from optparse import OptionParser
import curses
import threading, Queue
import socket
import signal
import re, StringIO
import logging as LOG
LOG_FILENAME = 'zktop_log.out'
LOG.basicConfig(filename=LOG_FILENAME,level=LOG.DEBUG)
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("", "--servers", dest="servers",
default="localhost:2181", help="comma separated list of host:port (default localhost:2181)")
parser.add_option("-n", "--names",
action="store_true", dest="names", default=False,
help="resolve session name from ip (default False)")
parser.add_option("", "--fix_330",
action="store_true", dest="fix_330", default=False,
help="workaround for a bug in ZK 3.3.0")
(options, args) = parser.parse_args()
resized_sig = False
class ZKServer(object):
def __init__(self, server, server_id):
self.server_id = server_id
self.host, self.port = server.split(':')
try:
stat = send_cmd(self.host, self.port, 'stat\n')
sio = StringIO.StringIO(stat)
line = sio.readline()
m = re.search('.*: (\d+\.\d+\.\d+)-.*', line)
self.version = m.group(1)
sio.readline()
self.sessions = []
for line in sio:
attr, value = line.split(':', 1)
attr = attr.strip().replace(" ", "_").replace("/", "_").lower()
self.__dict__[attr] = value.strip()
self.min_latency, self.avg_latency, self.max_latency = self.latency_min_avg_max.split("/")
self.unavailable = False
except:
self.unavailable = True
self.mode = "Unavailable"
self.sessions = []
self.version = "Unknown"
return
def send_cmd(host, port, cmd):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, int(port)))
result = []
try:
s.sendall(cmd)
# shutting down the socket write side helps ensure
# that we don't end up with TIME_WAIT sockets
if not options.fix_330:
s.shutdown(socket.SHUT_WR)
while True:
data = s.recv(4096)
if not data:
break
result.append(data)
finally:
s.close()
return "".join(result)
q_stats = Queue.Queue()
p_wakeup = threading.Condition()
def wakeup_poller():
p_wakeup.acquire()
p_wakeup.notifyAll()
p_wakeup.release()
def reset_server_stats(server):
host, port = server.split(':')
send_cmd(host, port, "srst\n")
server_id = 0
class StatPoller(threading.Thread):
def __init__(self, server):
self.server = server
global server_id
self.server_id = server_id
server_id += 1
threading.Thread.__init__(self)
def run(self):
p_wakeup.acquire()
while True:
s = ZKServer(self.server, self.server_id)
q_stats.put(s)
p_wakeup.wait(3.0)
# no need - never hit here except exit - "p_wakeup.release()"
# also, causes error on console
class BaseUI(object):
def __init__(self, win):
self.win = win
global mainwin
self.maxy, self.maxx = mainwin.getmaxyx()
self.resize(self.maxy, self.maxx)
def resize(self, maxy, maxx):
LOG.debug("resize called y %d x %d" % (maxy, maxx))
self.maxy = maxy
self.maxx = maxx
def addstr(self, y, x, line, flags = 0):
LOG.debug("addstr with maxx %d" % (self.maxx))
self.win.addstr(y, x, line[:self.maxx-1], flags)
self.win.clrtoeol()
self.win.noutrefresh()
class SummaryUI(BaseUI):
def __init__(self, height, width, server_count):
BaseUI.__init__(self, curses.newwin(1, width, 0, 0))
self.session_counts = [0 for i in range(server_count)]
self.node_counts = [0 for i in range(server_count)]
self.zxids = [0 for i in range(server_count)]
def update(self, s):
self.win.erase()
if s.unavailable:
self.session_counts[s.server_id] = 0
self.node_counts[s.server_id] = 0
self.zxids[s.server_id] = 0
else:
self.session_counts[s.server_id] = len(s.sessions)
self.node_counts[s.server_id] = int(s.node_count)
self.zxids[s.server_id] = long(s.zxid, 16)
nc = max(self.node_counts)
zxid = max(self.zxids)
sc = sum(self.session_counts)
self.addstr(0, 0, "Ensemble -- nodecount:%d zxid:0x%x sessions:%d" %
(nc, zxid, sc))
class ServerUI(BaseUI):
def __init__(self, height, width, server_count):
BaseUI.__init__(self, curses.newwin(server_count + 2, width, 1, 0))
def resize(self, maxy, maxx):
BaseUI.resize(self, maxy, maxx)
self.addstr(1, 0, "ID SERVER PORT M OUTST RECVD SENT CONNS MINLAT AVGLAT MAXLAT", curses.A_REVERSE)
def update(self, s):
if s.unavailable:
self.addstr(s.server_id + 2, 0, "%-2s %-15s %5s %s" %
(s.server_id, s.host[:15], s.port, s.mode[:1].upper()))
else:
self.addstr(s.server_id + 2, 0, "%-2s %-15s %5s %s %8s %8s %8s %5d %6s %6s %6s" %
(s.server_id, s.host[:15], s.port, s.mode[:1].upper(),
s.outstanding, s.received, s.sent, len(s.sessions),
s.min_latency, s.avg_latency, s.max_latency))
mainwin = None
class Main(object):
def __init__(self, servers):
self.servers = servers.split(",")
def show_ui(self, stdscr):
global mainwin
mainwin = stdscr
curses.use_default_colors()
# w/o this for some reason takes 1 cycle to draw wins
stdscr.refresh()
signal.signal(signal.SIGWINCH, sigwinch_handler)
TIMEOUT = 250
stdscr.timeout(TIMEOUT)
server_count = len(self.servers)
maxy, maxx = stdscr.getmaxyx()
uis = (SummaryUI(maxy, maxx, server_count),
ServerUI(maxy, maxx, server_count))
# start the polling threads
pollers = [StatPoller(server) for server in self.servers]
for poller in pollers:
poller.setName("PollerThread:" + server)
poller.setDaemon(True)
poller.start()
LOG.debug("starting main loop")
global resized_sig
flash = None
while True:
try:
if resized_sig:
resized_sig = False
self.resize(uis)
wakeup_poller()
while not q_stats.empty():
zkserver = q_stats.get_nowait()
for ui in uis:
ui.update(zkserver)
ch = stdscr.getch()
if 0 < ch <=255:
if ch == ord('q'):
return
elif ch == ord('h'):
flash = "Help: q:quit r:reset stats spc:refresh"
flash_count = 1000/TIMEOUT * 5
elif ch == ord('r'):
[reset_server_stats(server) for server in self.servers]
flash = "Server stats reset"
flash_count = 1000/TIMEOUT * 5
wakeup_poller()
elif ch == ord(' '):
wakeup_poller()
stdscr.move(1, 0)
if flash:
stdscr.addstr(1, 0, flash)
flash_count -= 1
if flash_count == 0:
flash = None
stdscr.clrtoeol()
curses.doupdate()
except KeyboardInterrupt:
break
def resize(self, uis):
curses.endwin()
curses.doupdate()
global mainwin
mainwin.refresh()
maxy, maxx = mainwin.getmaxyx()
for ui in uis:
ui.resize(maxy, maxx)
def sigwinch_handler(*nada):
LOG.debug("sigwinch called")
global resized_sig
resized_sig = True
if __name__ == '__main__':
LOG.debug("startup")
ui = Main(options.servers)
curses.wrapper(ui.show_ui)
|
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json, sys
from frappe import _
from frappe.utils import cint, flt, now, cstr, strip_html
from frappe.model import default_fields
from frappe.model.naming import set_new_name
class BaseDocument(object):
ignore_in_getter = ("doctype", "_meta", "meta", "_table_fields", "_valid_columns")
def __init__(self, d):
self.update(d)
self.dont_update_if_missing = []
@property
def meta(self):
if not hasattr(self, "_meta"):
self._meta = frappe.get_meta(self.doctype)
return self._meta
def update(self, d):
if "doctype" in d:
self.set("doctype", d.get("doctype"))
# first set default field values of base document
for key in default_fields:
if key in d:
self.set(key, d.get(key))
for key, value in d.iteritems():
self.set(key, value)
return self
def update_if_missing(self, d):
if isinstance(d, BaseDocument):
d = d.get_valid_dict()
if "doctype" in d:
self.set("doctype", d.get("doctype"))
for key, value in d.iteritems():
# dont_update_if_missing is a list of fieldnames, for which, you don't want to set default value
if (self.get(key) is None) and (value is not None) and (key not in self.dont_update_if_missing):
self.set(key, value)
def get_db_value(self, key):
return frappe.db.get_value(self.doctype, self.name, key)
def get(self, key=None, filters=None, limit=None, default=None):
if key:
if isinstance(key, dict):
return _filter(self.get_all_children(), key, limit=limit)
if filters:
if isinstance(filters, dict):
value = _filter(self.__dict__.get(key, []), filters, limit=limit)
else:
default = filters
filters = None
value = self.__dict__.get(key, default)
else:
value = self.__dict__.get(key, default)
if value is None and key not in self.ignore_in_getter \
and key in (d.fieldname for d in self.meta.get_table_fields()):
self.set(key, [])
value = self.__dict__.get(key)
return value
else:
return self.__dict__
def getone(self, key, filters=None):
return self.get(key, filters=filters, limit=1)[0]
def set(self, key, value, as_value=False):
if isinstance(value, list) and not as_value:
self.__dict__[key] = []
self.extend(key, value)
else:
self.__dict__[key] = value
def delete_key(self, key):
if key in self.__dict__:
del self.__dict__[key]
def append(self, key, value=None):
if value==None:
value={}
if isinstance(value, (dict, BaseDocument)):
if not self.__dict__.get(key):
self.__dict__[key] = []
value = self._init_child(value, key)
self.__dict__[key].append(value)
return value
else:
raise ValueError, "Document attached to child table must be a dict or BaseDocument, not " + str(type(value))[1:-1]
def extend(self, key, value):
if isinstance(value, list):
for v in value:
self.append(key, v)
else:
raise ValueError
def remove(self, doc):
self.get(doc.parentfield).remove(doc)
def _init_child(self, value, key):
if not self.doctype:
return value
if not isinstance(value, BaseDocument):
if "doctype" not in value:
value["doctype"] = self.get_table_field_doctype(key)
if not value["doctype"]:
raise AttributeError, key
value = BaseDocument(value)
value.init_valid_columns()
value.parent = self.name
value.parenttype = self.doctype
value.parentfield = key
if not getattr(value, "idx", None):
value.idx = len(self.get(key) or []) + 1
if not getattr(value, "name", None):
value.__dict__['__islocal'] = 1
return value
def get_valid_dict(self):
d = {}
for fieldname in self.meta.get_valid_columns():
d[fieldname] = self.get(fieldname)
return d
def init_valid_columns(self):
for key in default_fields:
if key not in self.__dict__:
self.__dict__[key] = None
if self.doctype in ("DocField", "DocPerm") and self.parent in ("DocType", "DocField", "DocPerm"):
from frappe.model.meta import get_table_columns
valid = get_table_columns(self.doctype)
else:
valid = self.meta.get_valid_columns()
for key in valid:
if key not in self.__dict__:
self.__dict__[key] = None
def is_new(self):
return self.get("__islocal")
def as_dict(self, no_nulls=False):
doc = self.get_valid_dict()
doc["doctype"] = self.doctype
for df in self.meta.get_table_fields():
children = self.get(df.fieldname) or []
doc[df.fieldname] = [d.as_dict(no_nulls=no_nulls) for d in children]
if no_nulls:
for k in doc.keys():
if doc[k] is None:
del doc[k]
for key in ("_user_tags", "__islocal", "__onload", "_starred_by"):
if self.get(key):
doc[key] = self.get(key)
return doc
def as_json(self):
return json.dumps(self.as_dict(), indent=1, sort_keys=True)
def get_table_field_doctype(self, fieldname):
return self.meta.get_field(fieldname).options
def get_parentfield_of_doctype(self, doctype):
fieldname = [df.fieldname for df in self.meta.get_table_fields() if df.options==doctype]
return fieldname[0] if fieldname else None
def db_insert(self):
set_new_name(self)
d = self.get_valid_dict()
columns = d.keys()
try:
frappe.db.sql("""insert into `tab{doctype}`
({columns}) values ({values})""".format(
doctype = self.doctype,
columns = ", ".join(["`"+c+"`" for c in columns]),
values = ", ".join(["%s"] * len(columns))
), d.values())
except Exception, e:
if e.args[0]==1062:
type, value, traceback = sys.exc_info()
frappe.msgprint(_("Duplicate name {0} {1}").format(self.doctype, self.name))
raise frappe.NameError, (self.doctype, self.name, e), traceback
else:
raise
self.set("__islocal", False)
def db_update(self):
if self.get("__islocal") or not self.name:
self.db_insert()
return
d = self.get_valid_dict()
columns = d.keys()
try:
frappe.db.sql("""update `tab{doctype}`
set {values} where name=%s""".format(
doctype = self.doctype,
values = ", ".join(["`"+c+"`=%s" for c in columns])
), d.values() + [d.get("name")])
except Exception, e:
if e.args[0]==1062:
type, value, traceback = sys.exc_info()
fieldname = str(e).split("'")[-2]
frappe.msgprint(_("{0} must be unique".format(self.meta.get_label(fieldname))))
raise frappe.ValidationError, (self.doctype, self.name, e), traceback
else:
raise
def db_set(self, fieldname, value):
self.set(fieldname, value)
self.set("modified", now())
self.set("modified_by", frappe.session.user)
frappe.db.set_value(self.doctype, self.name, fieldname, value, self.modified, self.modified_by)
def _fix_numeric_types(self):
for df in self.meta.get("fields"):
if df.fieldtype == "Check":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif self.get(df.fieldname) is not None:
if df.fieldtype == "Int":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif df.fieldtype in ("Float", "Currency", "Percent"):
self.set(df.fieldname, flt(self.get(df.fieldname)))
if self.docstatus is not None:
self.docstatus = cint(self.docstatus)
def _get_missing_mandatory_fields(self):
"""Get mandatory fields that do not have any values"""
def get_msg(df):
if df.fieldtype == "Table":
return "{}: {}: {}".format(_("Error"), _("Data missing in table"), _(df.label))
elif self.parentfield:
return "{}: {} #{}: {}: {}".format(_("Error"), _("Row"), self.idx,
_("Value missing for"), _(df.label))
else:
return "{}: {}: {}".format(_("Error"), _("Value missing for"), _(df.label))
missing = []
for df in self.meta.get("fields", {"reqd": 1}):
if self.get(df.fieldname) in (None, []) or not strip_html(cstr(self.get(df.fieldname))).strip():
missing.append((df.fieldname, get_msg(df)))
return missing
def get_invalid_links(self, is_submittable=False):
def get_msg(df, docname):
if self.parentfield:
return "{} #{}: {}: {}".format(_("Row"), self.idx, _(df.label), docname)
else:
return "{}: {}".format(_(df.label), docname)
invalid_links = []
cancelled_links = []
for df in self.meta.get_link_fields() + self.meta.get("fields",
{"fieldtype":"Dynamic Link"}):
docname = self.get(df.fieldname)
if docname:
if df.fieldtype=="Link":
doctype = df.options
if not doctype:
frappe.throw(_("Options not set for link field {0}").format(df.fieldname))
else:
doctype = self.get(df.options)
if not doctype:
frappe.throw(_("{0} must be set first").format(self.meta.get_label(df.options)))
# MySQL is case insensitive. Preserve case of the original docname in the Link Field.
value = frappe.db.get_value(doctype, docname)
setattr(self, df.fieldname, value)
if not value:
invalid_links.append((df.fieldname, docname, get_msg(df, docname)))
elif (df.fieldname != "amended_from"
and (is_submittable or self.meta.is_submittable) and frappe.get_meta(doctype).is_submittable
and cint(frappe.db.get_value(doctype, docname, "docstatus"))==2):
cancelled_links.append((df.fieldname, docname, get_msg(df, docname)))
return invalid_links, cancelled_links
def _validate_selects(self):
if frappe.flags.in_import:
return
for df in self.meta.get_select_fields():
if df.fieldname=="naming_series" or not (self.get(df.fieldname) and df.options):
continue
options = (df.options or "").split("\n")
# if only empty options
if not filter(None, options):
continue
# strip and set
self.set(df.fieldname, cstr(self.get(df.fieldname)).strip())
value = self.get(df.fieldname)
if value not in options and not (frappe.flags.in_test and value.startswith("_T-")):
# show an elaborate message
prefix = _("Row #{0}:").format(self.idx) if self.get("parentfield") else ""
label = _(self.meta.get_label(df.fieldname))
comma_options = '", "'.join(_(each) for each in options)
frappe.throw(_('{0} {1} cannot be "{2}". It should be one of "{3}"').format(prefix, label,
value, comma_options))
def _validate_constants(self):
if frappe.flags.in_import:
return
constants = [d.fieldname for d in self.meta.get("fields", {"set_only_once": 1})]
if constants:
values = frappe.db.get_value(self.doctype, self.name, constants, as_dict=True)
for fieldname in constants:
if self.get(fieldname) != values.get(fieldname):
frappe.throw(_("Value cannot be changed for {0}").format(self.meta.get_label(fieldname)),
frappe.CannotChangeConstantError)
def _validate_update_after_submit(self):
current = frappe.db.get_value(self.doctype, self.name, "*", as_dict=True)
for key, value in current.iteritems():
df = self.meta.get_field(key)
if df and not df.allow_on_submit and (self.get(key) or value) and self.get(key) != value:
frappe.throw(_("Not allowed to change {0} after submission").format(df.label),
frappe.UpdateAfterSubmitError)
def get_formatted(self, fieldname, doc=None, currency=None):
from frappe.utils.formatters import format_value
return format_value(self.get(fieldname), self.meta.get_field(fieldname),
doc=doc or self, currency=currency)
def _filter(data, filters, limit=None):
"""pass filters as:
{"key": "val", "key": ["!=", "val"],
"key": ["in", "val"], "key": ["not in", "val"], "key": "^val",
"key" : True (exists), "key": False (does not exist) }"""
out = []
for d in data:
add = True
for f in filters:
fval = filters[f]
if fval is True:
fval = ("not None", fval)
elif fval is False:
fval = ("None", fval)
elif not isinstance(fval, (tuple, list)):
if isinstance(fval, basestring) and fval.startswith("^"):
fval = ("^", fval[1:])
else:
fval = ("=", fval)
if not frappe.compare(getattr(d, f, None), fval[0], fval[1]):
add = False
break
if add:
out.append(d)
if limit and (len(out)-1)==limit:
break
return out
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module is for communicating with im.kayac.com.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import exceptions
from . import constants
import future.standard_library
future.standard_library.install_aliases()
import builtins
import urllib.parse
import urllib.request
import hashlib
import contextlib
import json
import urllib.error
import logging
#: The logger for this module.
api_logger = logging.getLogger(__name__)
def create_post_request(url, params, encoding=constants.KAYACIM_ENCODING):
"""URL-encode the parameters and create a POST request.
:param url: The URL where a POST request should be sent.
:type url: str (on Python 3) or unicode (on Python 2)
:param dict params: The dictionary representing the parameters,
whose values should be str (on Python 3) or
unicode (on Python 2).
:param encoding: The encoding should be used.
:type encoding: str (on Python 3) or unicode (on Python 2)
:return: an HTTP POST request
:rtype: :class:`urllib.request.Request` (on Python 3) or
:class:`future.backports.urllib.request.Request`
(on Python 2)
"""
# Encode strings
enc_params = dict((k, v.encode(encoding)) for k, v in params.items())
# URL-encode parameters
api_logger.debug("URL-encoding the parameters.")
urlenc_params = urllib.parse.urlencode(enc_params).encode(encoding)
# Create a request
api_logger.debug("Creating an HTTP POST request.")
return urllib.request.Request(url=url, data=urlenc_params)
def generate_signature(data, encoding=constants.KAYACIM_ENCODING):
"""Generate the SHA-1 digest of the given string.
:param data: The string should be processed.
:type data: str (on Python 3) or unicode (on Python 2)
:return: the SHA-1 digest
:rtype: str (on Python 3) or
:class:`future.types.newstr.newstr` (on Python 2)
"""
api_logger.debug("Generating a SHA-1 digest.")
return builtins.str(hashlib.sha1(data.encode(encoding)
).hexdigest())
class KayacIMAPI(object):
"""Class for accessing im.kayac.com API.
:param username: The username for your account.
:type username: str (on Python 3) or unicode (on Python 2)
:param method: (optional) The authorization method. Choose
from
:data:`pykayacim.constants.KAYACIM_METHODS`.
:type method: str (on Python 3) or unicode (on Python 2)
:param key: (optional) The password or secret key.
:type key: str (on Python 3) or unicode (on Python 2)
:raises pykayacim.exceptions.PyKayacIMMethodError: if an
unavailable
method is
specified
or the
provided
information is
insufficient
"""
def __init__(self, username, method="none", key=None):
#: The username for your im.kayac.com account.
self.username = username
#: The URL where POST requests should be sent.
self.post_url = constants.KAYACIM_URL + self.username
if method in constants.KAYACIM_METHODS:
#: The authorization method im.kayac.com accepts.
self.method = method
else:
api_logger.critical(
"Unavailable method: '{method}'".format(method=method))
raise exceptions.PyKayacIMMethodError(
details="The method '{method}' is unavailable.".format(
method=method))
if self.method != "none":
if key is not None:
#: The password or secret key.
self.key = key
else:
api_logger.critical("Missing parameter: 'key'")
raise exceptions.PyKayacIMMethodError(
details="Provide 'key' for '{method}'.".format(
method=method))
#: The parameters for a POST request.
self.post_params = dict()
#: The object representing the request sent to im.kayac.com.
self.post_request = None
#: The dictionary representing the response from im.kayac.com.
self.post_response = None
api_logger.debug("Successfully initialized a KayacIMAPI instance.")
def prepare_parameters(self, message, handler=None):
"""Creates a dictionary representing the provided parameters.
This method is called by
:meth:`pykayacim.api.KayacIMAPI.send`, and does not need to
be called directly.
:param message: The message which should be sent.
:type message: str (on Python 3) or unicode (on Python 2)
:param handler: (optional) The URI scheme for iPhone
applications, which starts with "mailto:"
for example.
"""
self.post_params["message"] = message
if handler is not None:
self.post_params["handler"] = handler
if self.method == "password":
self.post_params["password"] = self.key
elif self.method == "secret":
self.post_params["sig"] = generate_signature(
message + self.key)
api_logger.debug("Prepared the parameters for the POST request.")
def resend(self):
"""Resend the previous message.
:raises pykayacim.exceptions.PyKayacIMAPIError: if im.kayac.com
reports an error
:raises pykayacim.exceptions.PyKayacIMCommunicationError: if
connection
with
im.kayac.com
fails
:raises pykayacim.exceptions.PyKayacIMMessageError: if no message was
sent previously
"""
if self.post_request is None:
api_logger.error(
"No message was sent to {username} previously.".format(
username=self.username))
raise exceptions.PyKayacIMMessageError(
details="No message was sent to {username} previously.".format(
username=self.username))
api_logger.debug("Connecting: {url}".format(url=self.post_url))
try:
with contextlib.closing(
urllib.request.urlopen(self.post_request)) as res:
api_logger.debug("Analyzing the response.")
self.post_response = json.loads(
res.read().decode(constants.KAYACIM_ENCODING))
except urllib.error.URLError as e:
api_logger.exception("Communication failed: %s", e)
raise exceptions.PyKayacIMCommunicationError(
reason=builtins.str(e.reason))
except ValueError as e:
api_logger.exception("Invalid response: %s", e)
raise exceptions.PyKayacIMAPIError(
errmsg="Invalid response from im.kayac.com.")
else:
if self.post_response["result"] != u"posted":
errmsg = self.post_response["error"]
api_logger.error("API Error: {errmsg}".format(errmsg=errmsg))
raise exceptions.PyKayacIMAPIError(errmsg=errmsg)
else:
api_logger.info("Sent the notification to {username}".format(
username=self.username))
def send(self, message, handler=None):
"""Send a push notification via im.kayac.com.
:param message: The message which should be sent.
:type message: str (on Python 3) or unicode (on Python 2)
:param handler: (optional) The URI scheme for iPhone applications,
which starts with "mailto:" for example.
:type handler: str (on Python 3) or unicode (on Python 2)
:raises pykayacim.exceptions.PyKayacIMAPIError: if im.kayac.com
reports an error
:raises pykayacim.exceptions.PyKayacIMCommunicationError: if
connection
with
im.kayac.com
fails
"""
self.prepare_parameters(message=message, handler=handler)
self.post_request = create_post_request(
url=self.post_url, params=self.post_params)
self.resend()
|
|
#!/usr/bin/env python
# This script uses the following Unicode tables:
# - UnicodeData.txt
from collections import namedtuple
import csv
import os
import subprocess
NUM_CODEPOINTS=0x110000
def to_ranges(iter):
current = None
for i in iter:
if current is None or i != current[1] or i in (0x10000, 0x20000):
if current is not None:
yield tuple(current)
current = [i, i + 1]
else:
current[1] += 1
if current is not None:
yield tuple(current)
def get_escaped(codepoints):
for c in codepoints:
if (c.class_ or "Cn") in "Cc Cf Cs Co Cn Zl Zp Zs".split() and c.value != ord(' '):
yield c.value
def get_file(f):
try:
return open(os.path.basename(f))
except FileNotFoundError:
subprocess.run(["curl", "-O", f], check=True)
return open(os.path.basename(f))
Codepoint = namedtuple('Codepoint', 'value class_')
def get_codepoints(f):
r = csv.reader(f, delimiter=";")
prev_codepoint = 0
class_first = None
for row in r:
codepoint = int(row[0], 16)
name = row[1]
class_ = row[2]
if class_first is not None:
if not name.endswith("Last>"):
raise ValueError("Missing Last after First")
for c in range(prev_codepoint + 1, codepoint):
yield Codepoint(c, class_first)
class_first = None
if name.endswith("First>"):
class_first = class_
yield Codepoint(codepoint, class_)
prev_codepoint = codepoint
if class_first is not None:
raise ValueError("Missing Last after First")
for c in range(prev_codepoint + 1, NUM_CODEPOINTS):
yield Codepoint(c, None)
def compress_singletons(singletons):
uppers = [] # (upper, # items in lowers)
lowers = []
for i in singletons:
upper = i >> 8
lower = i & 0xff
if len(uppers) == 0 or uppers[-1][0] != upper:
uppers.append((upper, 1))
else:
upper, count = uppers[-1]
uppers[-1] = upper, count + 1
lowers.append(lower)
return uppers, lowers
def compress_normal(normal):
# lengths 0x00..0x7f are encoded as 00, 01, ..., 7e, 7f
# lengths 0x80..0x7fff are encoded as 80 80, 80 81, ..., ff fe, ff ff
compressed = [] # [truelen, (truelenaux), falselen, (falselenaux)]
prev_start = 0
for start, count in normal:
truelen = start - prev_start
falselen = count
prev_start = start + count
assert truelen < 0x8000 and falselen < 0x8000
entry = []
if truelen > 0x7f:
entry.append(0x80 | (truelen >> 8))
entry.append(truelen & 0xff)
else:
entry.append(truelen & 0x7f)
if falselen > 0x7f:
entry.append(0x80 | (falselen >> 8))
entry.append(falselen & 0xff)
else:
entry.append(falselen & 0x7f)
compressed.append(entry)
return compressed
def print_singletons(uppers, lowers, uppersname, lowersname):
print("#[rustfmt::skip]")
print("const {}: &[(u8, u8)] = &[".format(uppersname))
for u, c in uppers:
print(" ({:#04x}, {}),".format(u, c))
print("];")
print("#[rustfmt::skip]")
print("const {}: &[u8] = &[".format(lowersname))
for i in range(0, len(lowers), 8):
print(" {}".format(" ".join("{:#04x},".format(l) for l in lowers[i:i+8])))
print("];")
def print_normal(normal, normalname):
print("#[rustfmt::skip]")
print("const {}: &[u8] = &[".format(normalname))
for v in normal:
print(" {}".format(" ".join("{:#04x},".format(i) for i in v)))
print("];")
def main():
file = get_file("https://www.unicode.org/Public/UNIDATA/UnicodeData.txt")
codepoints = get_codepoints(file)
CUTOFF=0x10000
singletons0 = []
singletons1 = []
normal0 = []
normal1 = []
extra = []
for a, b in to_ranges(get_escaped(codepoints)):
if a > 2 * CUTOFF:
extra.append((a, b - a))
elif a == b - 1:
if a & CUTOFF:
singletons1.append(a & ~CUTOFF)
else:
singletons0.append(a)
elif a == b - 2:
if a & CUTOFF:
singletons1.append(a & ~CUTOFF)
singletons1.append((a + 1) & ~CUTOFF)
else:
singletons0.append(a)
singletons0.append(a + 1)
else:
if a >= 2 * CUTOFF:
extra.append((a, b - a))
elif a & CUTOFF:
normal1.append((a & ~CUTOFF, b - a))
else:
normal0.append((a, b - a))
singletons0u, singletons0l = compress_singletons(singletons0)
singletons1u, singletons1l = compress_singletons(singletons1)
normal0 = compress_normal(normal0)
normal1 = compress_normal(normal1)
print("""\
// NOTE: The following code was generated by "src/libcore/unicode/printable.py",
// do not edit directly!
fn check(x: u16, singletonuppers: &[(u8, u8)], singletonlowers: &[u8], normal: &[u8]) -> bool {
let xupper = (x >> 8) as u8;
let mut lowerstart = 0;
for &(upper, lowercount) in singletonuppers {
let lowerend = lowerstart + lowercount as usize;
if xupper == upper {
for &lower in &singletonlowers[lowerstart..lowerend] {
if lower == x as u8 {
return false;
}
}
} else if xupper < upper {
break;
}
lowerstart = lowerend;
}
let mut x = x as i32;
let mut normal = normal.iter().cloned();
let mut current = true;
while let Some(v) = normal.next() {
let len = if v & 0x80 != 0 {
((v & 0x7f) as i32) << 8 | normal.next().unwrap() as i32
} else {
v as i32
};
x -= len;
if x < 0 {
break;
}
current = !current;
}
current
}
pub(crate) fn is_printable(x: char) -> bool {
let x = x as u32;
let lower = x as u16;
if x < 0x10000 {
check(lower, SINGLETONS0U, SINGLETONS0L, NORMAL0)
} else if x < 0x20000 {
check(lower, SINGLETONS1U, SINGLETONS1L, NORMAL1)
} else {\
""")
for a, b in extra:
print(" if 0x{:x} <= x && x < 0x{:x} {{".format(a, a + b))
print(" return false;")
print(" }")
print("""\
true
}
}\
""")
print()
print_singletons(singletons0u, singletons0l, 'SINGLETONS0U', 'SINGLETONS0L')
print_singletons(singletons1u, singletons1l, 'SINGLETONS1U', 'SINGLETONS1L')
print_normal(normal0, 'NORMAL0')
print_normal(normal1, 'NORMAL1')
if __name__ == '__main__':
main()
|
|
from abc import ABC, abstractmethod
import cvxpy as cvx
import numpy as np
from cvxpy import SolverError
class Relevance_CVXProblem(ABC):
def __repr__(self) -> str:
if self.isLowerBound:
lower = "Lower"
else:
lower = "Upper"
name = f"{lower}_{self.current_feature}_{self.__class__.__name__}"
state = ""
for s in self.init_hyperparameters.items():
state += f"{s[0]}:{s[1]}, "
for s in self.init_model_constraints.items():
state += f"{s[0]}:{s[1]}, "
state = "(" + state[:-2] + ")"
if self.isProbe:
prefix = f"Probe_{self.probeID}"
else:
prefix = ""
return prefix + name + state
def __init__(
self,
current_feature: int,
data: tuple,
hyperparameters,
best_model_constraints,
preset_model=None,
best_model_state=None,
probeID=-1,
**kwargs,
) -> None:
self._probeID = probeID
self._feature_relevance = None
self.isLowerBound = None
# General data
self.current_feature = current_feature
self.preset_model = preset_model
self.best_model_state = best_model_state
self.preprocessing_data(data, best_model_state)
# Initialize constraints
self._constraints = []
self._objective = None
self.w = None
self._init_constraints(hyperparameters, best_model_constraints)
if self.preset_model is not None:
self._add_preset_constraints(self.preset_model, best_model_constraints)
self.init_hyperparameters = hyperparameters
self.init_model_constraints = best_model_constraints
def preprocessing_data(self, data, best_model_state):
X, y = data
self.n = X.shape[0]
self.d = X.shape[1]
self.X = X
self.y = np.array(y)
@property
def constraints(self):
return self._constraints
def add_constraint(self, new):
self._constraints.append(new)
@property
def objective(self):
return self._objective
@property
def solved_relevance(self):
if self.is_solved:
return self.objective.value
else:
raise Exception("Problem not solved. No feature relevance computed.")
@property
def probeID(self):
return self._probeID
@property
def isProbe(self):
return self.probeID >= 0
@abstractmethod
def _init_constraints(self, parameters, init_model_constraints):
pass
@abstractmethod
def init_objective_UB(self, **kwargs):
pass
@abstractmethod
def init_objective_LB(self, **kwargs):
pass
@property
def cvx_problem(self):
return self._cvx_problem
@property
def is_solved(self):
if self._solver_status in self.accepted_status:
try:
val = self.objective.value
except ValueError:
return False
return True
else:
return False
@property
def accepted_status(self):
return ["optimal", "optimal_inaccurate"]
def solve(self) -> object:
# We init cvx problem here because pickling LP solver objects is problematic
# by deferring it to here, worker threads do the problem building themselves and we spare the serialization
self._cvx_problem = cvx.Problem(
objective=self.objective, constraints=self.constraints
)
try:
# print("Solve", self)
self._cvx_problem.solve(**self.solver_kwargs)
except SolverError:
# We ignore Solver Errors, which are common with our framework:
# We solve multiple problems per bound and choose a feasible solution later (see '_create_interval')
pass
self._solver_status = self._cvx_problem.status
# self._cvx_problem = None
return self
def _retrieve_result(self):
return self.current_feature, self.objective
@property
def solver_kwargs(self):
return {"verbose": False, "solver": "ECOS", "max_iters": 300}
def _add_preset_constraints(self, preset_model: dict, best_model_constraints):
for feature, current_preset in preset_model.items():
# Skip current feature
if feature == self.current_feature:
continue
# Skip unset values
if all(np.isnan(current_preset)):
continue
# a weight bigger than the optimal model L1 makes no sense
assert abs(current_preset[0]) <= best_model_constraints["w_l1"]
assert abs(current_preset[1]) <= best_model_constraints["w_l1"]
# We add a pair of constraints depending on sign of known coefficient
# this makes it possible to solve this as a convex problem
if current_preset[0] >= 0:
self.add_constraint(self.w[feature] >= current_preset[0])
self.add_constraint(self.w[feature] <= current_preset[1])
else:
self.add_constraint(self.w[feature] <= current_preset[0])
self.add_constraint(self.w[feature] >= current_preset[1])
@classmethod
def generate_lower_bound_problem(
cls,
best_hyperparameters,
init_constraints,
best_model_state,
data,
di,
preset_model,
probeID=-1,
):
problem = cls(
di,
data,
best_hyperparameters,
init_constraints,
preset_model=preset_model,
best_model_state=best_model_state,
probeID=probeID,
)
problem.init_objective_LB()
problem.isLowerBound = True
yield problem
@classmethod
def generate_upper_bound_problem(
cls,
best_hyperparameters,
init_constraints,
best_model_state,
data,
di,
preset_model,
probeID=-1,
):
for sign in [-1, 1]:
problem = cls(
di,
data,
best_hyperparameters,
init_constraints,
preset_model=preset_model,
best_model_state=best_model_state,
probeID=probeID,
)
problem.init_objective_UB(sign=sign)
problem.isLowerBound = False
yield problem
@classmethod
def aggregate_min_candidates(cls, min_problems_candidates):
vals = [candidate.solved_relevance for candidate in min_problems_candidates]
min_value = min(vals)
return min_value
@classmethod
def aggregate_max_candidates(cls, max_problems_candidates):
vals = [candidate.solved_relevance for candidate in max_problems_candidates]
max_value = max(vals)
return max_value
|
|
#!/usr/bin/env python
'''
ooiservices/app/main/data.py
Support for generating sample data
'''
__author__ = 'Andy Bird'
from flask import jsonify, request, current_app, url_for, Flask, make_response
from ooiservices.app.uframe import uframe as api
import numpy as np
import calendar
import time
from dateutil.parser import parse
from datetime import datetime
from ooiservices.app.main.errors import internal_server_error
from ooiservices.app import cache
import requests
#ignore list for data fields
FIELDS_IGNORE = ["stream_name","quality_flag"]
COSMO_CONSTANT = 2208988800
def get_data(stream, instrument,field):
#get data from uframe
#-------------------
# m@c: 02/01/2015
#uframe url to get stream data from instrument:
# /sensor/user/inv/<stream_name>/<instrument_name>
#
#-------------------
#TODO: create better error handler if uframe is not online/responding
data = []
try:
url = current_app.config['UFRAME_URL'] + '/sensor/m2m/inv/' + stream + '/' + instrument
data = requests.get(url)
data = data.json()
except Exception,e:
return {'error':'uframe connection cannot be made:'+str(e)}
if len(data)==0:
return {'error':'non data available'}
hasStartDate = False
hasEndDate = False
if 'startdate' in request.args:
st_date = datetime.datetime.strptime(request.args['startdate'], "%Y-%m-%d %H:%M:%S")
hasStartDate = True
if 'enddate' in request.args:
ed_date = datetime.datetime.strptime(request.args['enddate'], "%Y-%m-%d %H:%M:%S")
hasEndDate = True
#got normal data plot
#create the data fields,assumes the same data fields throughout
d_row = data[0]
#data store
some_data = []
pref_timestamp = d_row["preferred_timestamp"]
#figure out the header rows
inital_fields = d_row.keys()
#move timestamp to the front
inital_fields.insert(0, inital_fields.pop(inital_fields.index(pref_timestamp)))
data_cols,data_field_list = _get_col_outline(data,pref_timestamp,inital_fields,field)
x = [ d[pref_timestamp] for d in data ]
y = [ d[field] for d in data ]
#genereate dict for the data thing
resp_data = {'x':x,
'y':y,
'data_length':len(x),
'x_field':pref_timestamp,
'y_field':field,
'dt_units':'seconds since 1900-01-01 00:00:00',
#'start_time' : datetime.datetime.fromtimestamp(data[0][pref_timestamp]).isoformat(),
#'end_time' : datetime.datetime.fromtimestamp(data[-1][pref_timestamp]).isoformat()
}
#return jsonify(**resp_data)
return resp_data
def gen_data(start_date, end_date, sampling_rate, mean, std_dev):
'''
Returns a dictionary that contains the x coordinate time and the y
coordinate which is random data normally distributed about the mean with
the specified standard deviation.
'''
time0 = calendar.timegm(parse(start_date).timetuple())
time1 = calendar.timegm(parse(end_date).timetuple())
dt = sampling_rate # obs per second
x = np.arange(time0, time1, dt)
y = np.random.normal(mean, std_dev, x.shape[0])
xy = np.array([x,y])
row_order_xy = xy.T
iso0 = datetime.utcfromtimestamp(time0).isoformat()
iso1 = datetime.utcfromtimestamp(time1).isoformat()
return {'size' : x.shape[0], 'start_time':iso0, 'end_time':iso1, 'cols':['x','y'], 'rows':row_order_xy.tolist()}
@cache.memoize(timeout=3600)
def plot_time_series(fig, ax, x, y, fill=False, title='', ylabel='',
title_font={}, axis_font={}, **kwargs):
if not title_font:
title_font = title_font_default
if not axis_font:
axis_font = axis_font_default
h = ppl.plot(ax, x, y, **kwargs)
ppl.scatter(ax, x, y, **kwargs)
get_time_label(ax, x)
fig.autofmt_xdate()
if ylabel:
ax.set_ylabel(ylabel, **axis_font)
if title:
ax.set_title(title, **title_font)
if 'degree' in ylabel:
ax.set_ylim([0, 360])
ax.grid(True)
if fill:
miny = min(ax.get_ylim())
ax.fill_between(x, y, miny+1e-7, facecolor = h[0].get_color(), alpha=0.15)
# plt.subplots_adjust(top=0.85)
plt.tight_layout()
def get_time_label(ax, dates):
'''
Custom date axis formatting
'''
def format_func(x, pos=None):
x = mdates.num2date(x)
if pos == 0:
fmt = '%Y-%m-%d %H:%M'
else:
fmt = '%H:%M'
label = x.strftime(fmt)
# label = label.rstrip("0")
# label = label.rstrip(".")
return label
day_delta = (max(dates)-min(dates)).days
if day_delta < 1:
ax.xaxis.set_major_formatter(FuncFormatter(format_func))
else:
# pass
major = mdates.AutoDateLocator()
formt = mdates.AutoDateFormatter(major, defaultfmt=u'%Y-%m-%d')
formt.scaled[1.0] = '%Y-%m-%d'
formt.scaled[30] = '%Y-%m'
formt.scaled[1./24.] = '%Y-%m-%d %H:%M:%S'
# formt.scaled[1./(24.*60.)] = FuncFormatter(format_func)
ax.xaxis.set_major_locator(major)
ax.xaxis.set_major_formatter(formt)
def _get_data_type(data_input):
'''
gets the data type in a format google charts understands
'''
if data_input is float or data_input is int:
return "number"
elif data_input is str or data_input is unicode:
return "string"
else:
return "unknown"
def _get_annotation(instrument_name, stream_name):
annotations = Annotation.query.filter_by(instrument_name=instrument_name, stream_name=stream_name).all()
return [annotation.to_json() for annotation in annotations]
def _get_col_outline(data,pref_timestamp,inital_fields,requested_field):
'''
gets the column outline for the google chart response, figures out what annotations are required where...
'''
data_fields = []
data_field_list= []
#used to cound the fields, used for annotations
field_count = 1
#loop and generate inital col dict
for field in inital_fields:
if field == pref_timestamp:
d_type = "datetime"
elif field in FIELDS_IGNORE or str(field).endswith('_timestamp'):
continue
else:
if requested_field is not None:
if field == requested_field:
d_type = _get_data_type(type(data[0][field]))
else:
continue
else:
#map the data types to the correct data type for google charts
d_type = _get_data_type(type(data[0][field]))
data_field_list.append(field)
data_fields.append({"id": "",
"label": field,
"type": d_type})
return data_fields,data_field_list
def _get_annotation_content(annotation_field, pref_timestamp, annotations_list, d, data_field):
'''
creates the annotation content for a given field
'''
#right now x and y are timeseries data
for an in annotations_list:
if an['field_x'] == pref_timestamp or an['field_y'] == data_field:
# and and y value
an_date_time = datetime.datetime.strptime(an['pos_x'], "%Y-%m-%dT%H:%M:%S")
an_int_date_time = int(an_date_time.strftime("%s"))
if int(d['fixed_dt']) == an_int_date_time:
if annotation_field == "annotation":
return {"v":an["title"]}
elif annotation_field == "annotationText":
return {"v":an['comment']}
#return nothing
return {"v":None,"f":None}
|
|
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from neon import NervanaObject
from neon.util.persist import load_class
import numpy as np
def get_param_list(layer_list):
'''
returns a flattened list of params
'''
plist = []
for l in layer_list:
ptuple = l.get_params()
plist.extend(ptuple) if isinstance(ptuple, list) else plist.append(ptuple)
return plist
class Optimizer(NervanaObject):
'''
Optimizers will take a param, update, and state
will be responsible for keeping track of a schedule
'''
def __init__(self, name=None):
super(Optimizer, self).__init__(name=name)
@classmethod
def gen_class(cls, pdict):
if 'schedule' in pdict:
typ = pdict['schedule']['type']
scls = load_class(typ)
sched = scls.gen_class(pdict['schedule']['config'])
pdict['schedule'] = sched
return cls(**pdict)
def optimize(self, layer_list, epoch):
raise NotImplementedError()
def clip_gradient_norm(self, param_list, clip_norm):
"""
Scale the magnitude of the network's gradients
Arguments:
param_list (list): a list of layer parameters
clip_norm (float, optional): Value to scale gradients'
magnitude by.
"""
scale_factor = 1
if clip_norm:
grad_list = [grad for (param, grad), states in param_list]
grad_square_sums = sum(self.be.sum(self.be.square(grad)) for grad in grad_list)
grad_norm = self.be.zeros((1, 1))
grad_norm[:] = self.be.sqrt(grad_square_sums)/self.be.bsz
scale_factor = clip_norm / max(float(grad_norm.get()), float(clip_norm))
return scale_factor
def clip_gradient_value(self, grad, clip_value):
"""
Element-wise clip a list of gradients.
Arguments:
grad (list): a list of gradients of a single layer
gradient_clip_value (float, optional): Value to element-wise clip
gradients.
Defaults to None.
"""
if clip_value:
return self.be.clip(grad, -abs(clip_value), abs(clip_value))
else:
return grad
class Schedule(NervanaObject):
"""
Learning rate schedule for constant or step learning rates.
By default implements a constant learning rate.
"""
def __init__(self, step_config=None, change=1.):
"""
Arguments:
step_config (int or list, optional): Configure the epoch step rate (int)
or step times (list of epoch indices). Defaults to None (constant).
change (float or list, optional): In step mode, learning rate is
multiplied by ``change ** steps``, where ``steps`` is the number of
steps in the step schedule that have passed. If ``change`` is a list,
``step_config`` must also be a list. Then at ``step[i]``, the
learning rate is set to ``change[i]``.
"""
if isinstance(step_config, list) and isinstance(change, list):
assert len(step_config) == len(change), "change and step_config must have the same" \
"length after step_config is deduplicated to do epoch-level LR assignment."
self.step_config = step_config
self.change = change
self.steps = 0
def get_learning_rate(self, learning_rate, epoch):
"""
Get the current learning rate given the epoch and initial rate
Arguments:
learning_rate (float): the initial learning rate
epoch (int): the current epoch, used to calculate the new effective learning rate.
"""
if isinstance(self.step_config, list) and isinstance(self.change, list):
if epoch in self.step_config:
# steps will store the current lr
self.steps = self.change[self.step_config.index(epoch)]
if self.steps == 0:
return learning_rate
else:
return self.steps
elif isinstance(self.step_config, int):
self.steps = np.floor((epoch + 1) / self.step_config)
elif isinstance(self.step_config, list):
self.steps = np.sum(epoch >= np.array(self.step_config))
return float(learning_rate * self.change ** self.steps)
class ExpSchedule(Schedule):
"""
Exponential learning rate schedule.
Arguments:
decay (float): how much exponential decay to apply to the learning rate
"""
def __init__(self, decay):
self.decay = decay
def get_learning_rate(self, learning_rate, epoch):
return float(learning_rate / (1. + self.decay * epoch))
class PolySchedule(Schedule):
"""
Polynomial learning rate schedule.
Arguments:
total_epochs (int): total number of epochs over which to calculate interpolated decay
power (float): total decay parameter
"""
def __init__(self, total_epochs, power):
self.total_epochs = np.float32(total_epochs)
self.power = power
def get_learning_rate(self, learning_rate, epoch):
return float(learning_rate * (1. - (epoch / self.total_epochs)) ** self.power)
class GradientDescentMomentum(Optimizer):
"""
Stochastic gradient descent with momentum
"""
def __init__(self, learning_rate, momentum_coef, stochastic_round=False,
wdecay=0.0, gradient_clip_norm=None, gradient_clip_value=None,
name=None, schedule=Schedule()):
"""
Arguments:
learning_rate (float): the multiplicative coefficient of updates
momentum_coef (float): the coefficient of momentum
stochastic_round (bool, optional): Set this to True for stochastic
rounding. If False (default)
rounding will be to nearest. If
True use default width
stochastic rounding. Note that
this only affects the GPU
backend.
wdecay (float, optional): Amount of weight decay. Defaults to 0
gradient_clip_norm (float, optional): Value to scale gradients'
magnitude by.
Defaults to None.
gradient_clip_value (float, optional): Value to element-wise clip
gradients.
Defaults to None.
name (str, optional): the optimizer's layer's pretty-print name.
Defaults to "gdm".
schedule (neon.optimizers.optimizer.Schedule, optional): Learning
rate schedule. Defaults to a constant learning rate.
"""
super(GradientDescentMomentum, self).__init__(name=name)
self.learning_rate, self.momentum_coef = (learning_rate, momentum_coef)
self.gradient_clip_norm = gradient_clip_norm
self.gradient_clip_value = gradient_clip_value
self.wdecay = wdecay
self.schedule = schedule
self.stochastic_round = stochastic_round
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
layer_list (list): a list of Layer objects to optimize.
epoch (int): the current epoch, needed for the Schedule object.
"""
lrate = self.schedule.get_learning_rate(self.learning_rate, epoch)
param_list = get_param_list(layer_list)
scale_factor = self.clip_gradient_norm(param_list, self.gradient_clip_norm)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0 and self.momentum_coef != 0:
states.append(self.be.zeros_like(grad))
grad = grad / self.be.bsz
grad = self.clip_gradient_value(grad, self.gradient_clip_value)
if self.momentum_coef == 0:
velocity = - lrate * (scale_factor * grad + self.wdecay * param)
else:
velocity = states[0]
velocity[:] = velocity * self.momentum_coef \
- lrate * (scale_factor * grad + self.wdecay * param)
param[:] = param + velocity
class RMSProp(Optimizer):
"""
Root Mean Square propagation.
"""
def __init__(self, stochastic_round=False, decay_rate=0.95, learning_rate=2e-3, epsilon=1e-6,
gradient_clip_norm=None, gradient_clip_value=None, name=None,
schedule=Schedule()):
"""
Arguments:
stochastic_round (bool): Set this to True for stochastic rounding.
If False rounding will be to nearest.
If True will perform stochastic rounding using default width.
Only affects the gpu backend.
decay_rate (float): decay rate of states
learning_rate (float): the multiplication coefficent of updates
epsilon (float): smoothing epsilon to avoid divide by zeros
gradient_clip_norm (float, optional): Value to scale gradients'
magnitude by.
Defaults to None.
gradient_clip_value (float, optional): Value to element-wise clip
gradients.
Defaults to None.
schedule (neon.optimizers.optimizer.Schedule, optional): Learning rate schedule.
Defaults to a constant.
Notes:
Only constant learning rate is supported currently.
"""
super(RMSProp, self).__init__(name=name)
self.state_list = None
self.epsilon = epsilon
self.decay_rate = decay_rate
self.learning_rate = learning_rate
self.schedule = schedule
self.gradient_clip_norm = gradient_clip_norm
self.gradient_clip_value = gradient_clip_value
self.stochastic_round = stochastic_round
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
layer_list (list): a list of Layer objects to optimize.
epoch (int): the current epoch, needed for the Schedule object.
"""
lrate = self.schedule.get_learning_rate(self.learning_rate, epoch)
epsilon, decay = (self.epsilon, self.decay_rate)
param_list = get_param_list(layer_list)
scale_factor = self.clip_gradient_norm(param_list, self.gradient_clip_norm)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0:
states.append(self.be.zeros_like(grad))
grad = grad / self.be.bsz
grad = self.clip_gradient_value(grad, self.gradient_clip_value)
# update state
state = states[0]
state[:] = decay * state + self.be.square(grad) * (1.0 - decay)
param[:] = param \
- (scale_factor * grad * lrate) / (self.be.sqrt(state + epsilon) + epsilon)
class Adagrad(Optimizer):
"""
AdaGrad learning rule updates. See Duchi2011 for instance
"""
def __init__(self, stochastic_round=False, learning_rate=0.01, epsilon=1e-6,
gradient_clip_norm=None, gradient_clip_value=None, name=None):
"""
Arguments:
stochastic_round (bool): Set this to True for stochastic rounding.
If False rounding will be to nearest.
If True will perform stochastic rounding using default width.
Only affects the gpu backend.
learning_rate (float): the multiplication coefficent of updates
epsilon (float): smoothing epsilon to avoid divide by zeros
gradient_clip_norm (float, optional): Value to scale gradients'
magnitude by.
Defaults to None.
gradient_clip_value (float, optional): Value to element-wise clip
gradients.
Defaults to None.
Notes:
Only constant learning rate is supported currently.
"""
super(Adagrad, self).__init__(name=name)
self.state_list = None
self.epsilon = epsilon
self.learning_rate = learning_rate
self.gradient_clip_norm = gradient_clip_norm
self.gradient_clip_value = gradient_clip_value
self.stochastic_round = stochastic_round
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
layer_list (list): a list of Layer objects to optimize.
epoch (int): the current epoch, needed for the Schedule object.
"""
lrate, epsilon = (self.learning_rate, self.epsilon)
param_list = get_param_list(layer_list)
scale_factor = self.clip_gradient_norm(param_list, self.gradient_clip_norm)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0:
states.append(self.be.zeros_like(grad))
grad = grad / self.be.bsz
grad = self.clip_gradient_value(grad, self.gradient_clip_value)
# update state
state = states[0]
state[:] = state + self.be.square(grad)
param[:] = param - (scale_factor * grad * lrate) / (self.be.sqrt(state + epsilon))
class Adadelta(Optimizer):
"""
Adadelta based learning rule updates.
See Zeiler2012 for instance.
"""
def __init__(self, stochastic_round=False, decay=0.95, epsilon=1e-6, name=None):
"""
Args:
stochastic_round (bool): Set this to True for stochastic rounding.
If False rounding will be to nearest.
If True will perform stochastic rounding using default width.
Only affects the gpu backend.
decay: decay parameter in Adadelta
epsilon: epsilon parameter in Adadelta
"""
super(Adadelta, self).__init__(name=name)
self.decay = decay
self.epsilon = epsilon
self.stochastic_round = stochastic_round
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
param_list (list): a list of tuples of the form ((param, grad), state),
corresponding to parameters, grads,
and states of layers to be updated
epoch (int): the current epoch, needed for the Schedule object.
"""
epsilon, decay = (self.epsilon, self.decay)
param_list = get_param_list(layer_list)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0:
# E[Grad^2], E[Delt^2], updates
states.extend([self.be.zeros_like(grad) for i in range(3)])
grad = grad / self.be.bsz
states[0][:] = states[0] * decay + (1. - decay) * grad * grad
states[2][:] = self.be.sqrt((states[1] + epsilon) / (states[0] + epsilon)) * grad
states[1][:] = states[1] * decay + (1. - decay) * states[2] * states[2]
param[:] = param - states[2]
class Adam(Optimizer):
"""
Adam based learning rule updates. http://arxiv.org/pdf/1412.6980v8.pdf
"""
def __init__(self, stochastic_round=False, learning_rate=0.001, beta_1=0.9, beta_2=0.999,
epsilon=1e-8, name="adam"):
"""
Args:
stochastic_round (bool): Set this to True for stochastic rounding.
If False rounding will be to nearest.
If True will perform stochastic rounding using default width.
Only affects the gpu backend.
learning_rate (float): the multiplicative coefficient of updates
beta_1 (float): Adam parameter beta1
beta_2 (float): Adam parameter beta2
epsilon (float): numerical stability parameter
"""
super(Adam, self).__init__(name=name)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.learning_rate = learning_rate
self.stochastic_round = stochastic_round
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
param_list (list): a list of tuples of the form ((param, grad), state),
corresponding to parameters, grads, and states of layers to be updated
epoch (int): the current epoch, needed for the Schedule object.
"""
t = epoch + 1
l = self.learning_rate * self.be.sqrt(1 - self.beta_2 ** t) / (1 - self.beta_1 ** t)
param_list = get_param_list(layer_list)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0:
# running_1st_mom, running_2nd_mom
states.extend([self.be.zeros_like(grad) for i in range(2)])
grad = grad / self.be.bsz
m, v = states
m[:] = m * self.beta_1 + (1. - self.beta_1) * grad
v[:] = v * self.beta_2 + (1. - self.beta_2) * grad * grad
param[:] = param - l * m / (self.be.sqrt(v) + self.epsilon)
class MultiOptimizer(Optimizer):
"""
A wrapper class for using multiple Optimizers within the same model.
"""
def __init__(self, optimizer_mapping, name=None):
"""
Args:
optimizer_mapping (dict): dictionary specifying the mapping of layers to optimizers.
Key: Layer class name or Layer `name` attribute. The latter takes
precedence over the former for finer layer-to-layer control.
Don't name your layers ``'default'``. Value: the optimizer object to use for those
layers. For instance, ``{'default': optimizer1, 'Bias': optimizer2,
'special_bias': optimizer3}`` will use ``optimizer3`` for the layer named
``special_bias``, ``optimizer2`` for all other Bias layers, and ``optimizer1``
for all other layers.
"""
super(MultiOptimizer, self).__init__(name=name)
self.optimizer_mapping = optimizer_mapping
assert 'default' in self.optimizer_mapping, "Must specify a default" \
"optimizer in layer type to optimizer mapping"
self.map_list = None
@classmethod
def gen_class(cls, pdict):
for key in pdict['optimizer_mapping']:
# these should be optimizers
typ = pdict['optimizer_mapping'][key]['type']
ocls = load_class(typ)
if 'config' not in pdict['optimizer_mapping'][key]:
pdict['optimizer_mapping'][key]['config'] = {}
conf = pdict['optimizer_mapping'][key]['config']
pdict['optimizer_mapping'][key] = ocls.gen_class(conf)
return cls(**pdict)
def get_description(self):
desc = {'type': self.modulenm}
desc['config'] = {'optimizer_mapping': {}}
for key in self.optimizer_mapping:
opt_desc = self.optimizer_mapping[key].get_description()
desc['config']['optimizer_mapping'][key] = opt_desc
return desc
def map_optimizers(self, layer_list):
"""
maps the optimizers to their corresponding layers
"""
map_list = dict()
for layer in layer_list:
classname = layer.__class__.__name__
name = layer.name
opt = None
if name in self.optimizer_mapping:
opt = self.optimizer_mapping[name]
elif classname in self.optimizer_mapping:
opt = self.optimizer_mapping[classname]
else:
opt = self.optimizer_mapping['default']
if opt not in map_list:
map_list[opt] = [layer]
else:
map_list[opt].append(layer)
return map_list
def reset_mapping(self, new_mapping):
"""
Pass this optimizer a new mapping, and on subsequent optimize call, the
mapping will be refreshed (since map_list will be recreated)
"""
self.optimizer_mapping = new_mapping
self.map_list = None
def optimize(self, layer_list, epoch):
"""
Determine which optimizer in the container should go with which layers,
then apply their optimize functions to those layers.
Notes:
We can recalculate ``map_list`` in case ``optimizer_mapping`` changes
during training.
"""
if self.map_list is None:
self.map_list = self.map_optimizers(layer_list)
for opt in self.map_list:
opt.optimize(self.map_list[opt], epoch)
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import glob
import json
import weakref
from collections import OrderedDict
import numpy as np
import scipy
import scipy.stats
from six import iteritems
import pyqtgraph as pg
from acq4.modules.Module import Module
from acq4.analysis.AnalysisModule import AnalysisModule
import acq4.util.DataManager as DataManager
import acq4.analysis.atlas as atlas
from acq4.util.Canvas.Canvas import Canvas
from acq4.util.Canvas import items
from acq4.util import Qt
import six
from six.moves import range
Ui_Form = Qt.importTemplate('.MosaicEditorTemplate')
class MosaicEditorModule(Module):
"""
The Mosiac Editor allows the user to bring in multiple images onto
a canvas, and manipulate the images, including adjusting contrast,
position, and alpha.
Images created in Acq4 that have position information will be
represented according to their x,y positions (but not the z).
Groups of images can be scaled together.
An image stack can be "flattened" with different denoising methods
- useful for a quick reconstruction of filled cells.
Images can be compared against an atlas for reference, if the atlas
data is loaded.
This tool is useful for combining images taken at different positions
with a camera or 2P imaging system.
The resulting images may be saved as SVG or PNG files.
Mosaic Editor makes extensive use of pyqtgraph Canvas methods.
"""
moduleDisplayName = "Mosaic Editor"
moduleCategory = "Analysis"
def __init__(self, manager, name, config):
Module.__init__(self, manager, name, config)
self.dataModel = None # left over from old analysis system..
self.editor = MosaicEditor(self)
self.win = MosaicEditorWindow(self.editor, name)
self.win.show()
def dataManager(self):
"""Return DataManager module used to set base directory
"""
return self.manager.getModule("Data Manager")
class MosaicEditorWindow(Qt.QWidget):
def __init__(self, mod, name):
Qt.QWidget.__init__(self)
self.layout = Qt.QGridLayout()
self.setLayout(self.layout)
self.layout.setContentsMargins(0, 0, 0, 0)
self.dockarea = pg.dockarea.DockArea()
self.layout.addWidget(self.dockarea)
self.mod = mod
elems = self.mod.listElements()
for name, el in iteritems(elems):
w = self.mod.getElement(name, create=True)
d = pg.dockarea.Dock(name=name, size=el.size())
if w is not None:
d.addWidget(w)
pos = el.pos()
if pos is None:
pos = ()
#print d, pos
if isinstance(pos, six.string_types):
pos = (pos,)
self.dockarea.addDock(d, *pos)
self.elements = elems
self.setWindowTitle(name)
class MosaicEditor(AnalysisModule):
# Version number for save format.
# increment minor version number for backward-compatible changes
# increment major version number for backward-incompatible changes
_saveVersion = (2, 0)
# Types that appear in the dropdown menu as addable items
# Use MosaicEditor.registerItemType to add to this list.
_addTypes = OrderedDict()
# used to allow external modules to add their own ui elements to the mosaic editor
_extensions = OrderedDict()
@classmethod
def addExtension(cls, name, spec):
"""Add a specification for a UI element to add to newly created MosaicEditor instances.
Format is:
{
'type': 'ctrl',
'builder': callable,
'pos': ('right', 'Canvas'),
'size': (200, 400),
}
Where *callable* will be called with the MosaicEditor as its argument and must return a QWidget
to be inserted into the UI.
"""
cls._extensions[name] = spec
def __init__(self, host):
AnalysisModule.__init__(self, host=host)
self.items = weakref.WeakKeyDictionary()
self.files = weakref.WeakValueDictionary()
self.ctrl = Qt.QWidget()
self.ui = Ui_Form()
self.ui.setupUi(self.ctrl)
self.atlas = None
self.canvas = Canvas(name='MosaicEditor')
self._elements_ = OrderedDict([
('File Loader', {'type': 'fileInput', 'size': (200, 300), 'host': self}),
('Mosaic', {'type': 'ctrl', 'object': self.ctrl, 'pos': ('right',), 'size': (600, 100)}),
('Canvas', {'type': 'ctrl', 'object': self.canvas.ui.view, 'pos': ('bottom', 'Mosaic'), 'size': (600, 800)}),
('ItemList', {'type': 'ctrl', 'object': self.canvas.ui.canvasCtrlWidget, 'pos': ('right', 'Canvas'), 'size': (200, 400)}),
('ItemCtrl', {'type': 'ctrl', 'object': self.canvas.ui.canvasItemCtrl, 'pos': ('bottom', 'ItemList'), 'size': (200, 400)}),
])
for name, spec in self._extensions.items():
builder = spec.pop('builder', None)
if builder is not None:
spec['object'] = builder(self)
self._elements_[name] = spec
self.initializeElements()
self.clear(ask=False)
self.ui.fileLoader = self.getElement('File Loader', create=True)
self.ui.fileLoader.ui.fileTree.hide()
try:
self.ui.fileLoader.setBaseClicked() # get the currently selected directory in the DataManager
except:
pass
for a in atlas.listAtlases():
self.ui.atlasCombo.addItem(a)
# Add buttons to the canvas control panel
self.btnBox = Qt.QWidget()
self.btnLayout = Qt.QGridLayout()
self.btnLayout.setContentsMargins(0, 0, 0, 0)
self.btnBox.setLayout(self.btnLayout)
l = self.canvas.ui.gridLayout
l.addWidget(self.btnBox, l.rowCount(), 0, 1, l.columnCount())
self.addCombo = Qt.QComboBox()
self.addCombo.currentIndexChanged.connect(self._addItemChanged)
self.btnLayout.addWidget(self.addCombo, 0, 0, 1, 2)
self.addCombo.addItem('Add item..')
self.saveBtn = Qt.QPushButton("Save ...")
self.saveBtn.clicked.connect(self.saveClicked)
self.btnLayout.addWidget(self.saveBtn, 1, 0)
self.clearBtn = Qt.QPushButton("Clear All")
self.clearBtn.clicked.connect(self._handleClearBtnClick)
self.btnLayout.addWidget(self.clearBtn, 1, 1)
self.canvas.sigItemTransformChangeFinished.connect(self.itemMoved)
self.ui.atlasCombo.currentIndexChanged.connect(self.atlasComboChanged)
self.ui.normalizeBtn.clicked.connect(self.normalizeImages)
self.ui.tileShadingBtn.clicked.connect(self.rescaleImages)
self.ui.mosaicApplyScaleBtn.clicked.connect(self.updateScaling)
self.ui.mosaicFlipLRBtn.clicked.connect(self.flipLR)
self.ui.mosaicFlipUDBtn.clicked.connect(self.flipUD)
self.imageMax = 0.0
for menuString in self._addTypes:
self.addCombo.addItem(menuString)
@classmethod
def registerItemType(cls, itemclass, menuString=None):
"""Add an item type to the list of addable items.
"""
if menuString is None:
menuString = itemclass.typeName()
if itemclass.__name__ not in items.itemTypes():
items.registerItemType(itemclass)
cls._addTypes[menuString] = itemclass.__name__
def _addItemChanged(self, index):
# User requested to create and add a new item
if index <= 0:
return
itemtype = self._addTypes[self.addCombo.currentText()]
self.addCombo.setCurrentIndex(0)
self.addItem(type=itemtype)
def _handleClearBtnClick(self, checked):
self.clear(ask=True)
def atlasComboChanged(self, ind):
if ind == 0:
self.closeAtlas()
return
name = self.ui.atlasCombo.currentText()
self.loadAtlas(name)
def closeAtlas(self):
if self.atlas is not None:
self.atlas.close()
self.atlas = None
while True:
ch = self.ui.atlasLayout.takeAt(0)
if ch is None:
break
ch = ch.widget()
ch.hide()
ch.setParent(None)
def loadAtlas(self, name):
name = str(name)
self.closeAtlas()
cls = atlas.getAtlasClass(name)
obj = cls()
ctrl = obj.ctrlWidget(host=self)
self.ui.atlasLayout.addWidget(ctrl, 0, 0)
self.atlas = ctrl
def loadFileRequested(self, files):
if files is None:
return
for f in files:
if f.shortName().endswith('.mosaic'):
self.loadStateFile(f.name())
continue
if f in self.files: ## Do not allow loading the same file more than once
item = self.files[f]
item.show() # just show the file; but do not load it
continue
if f.isFile(): # add specified files
item = self.addFile(f)
elif f.isDir(): # Directories are more complicated
if self.dataModel.dirType(f) == 'Cell': # If it is a cell, just add the cell "Marker" to the plot
item = self.canvas.addFile(f)
else: # in all other directory types, look for MetaArray files
filesindir = glob.glob(f.name() + '/*.ma')
for fd in filesindir: # add files in the directory (ma files: e.g., images, videos)
try:
fdh = DataManager.getFileHandle(fd) # open file to get handle.
except IOError:
continue # just skip file
item = self.addFile(fdh)
if len(filesindir) == 0: # add protocol sequences
item = self.addFile(f)
self.canvas.autoRange()
def addFile(self, f, name=None, inheritTransform=True):
"""Load a file and add it to the canvas.
The new item will inherit the user transform from the previous item
(chronologocally) if it does not already have a user transform specified.
"""
item = self.canvas.addFile(f, name=name)
self.canvas.selectItem(item)
if isinstance(item, list):
item = item[0]
self.items[item] = f
self.files[f] = item
try:
item.timestamp = f.info()['__timestamp__']
except:
item.timestamp = None
## load or guess user transform for this item
if inheritTransform and not item.hasUserTransform() and item.timestamp is not None:
## Record the timestamp for this file, see what is the most recent transformation to copy
best = None
for i2 in self.items:
if i2 is item:
continue
if i2.timestamp is None :
continue
if i2.timestamp < item.timestamp:
if best is None or i2.timestamp > best.timestamp:
best = i2
if best is not None:
trans = best.saveTransform()
item.restoreTransform(trans)
return item
def addItem(self, item=None, type=None, **kwds):
"""Add an item to the MosaicEditor canvas.
May provide either *item* which is a CanvasItem or QGraphicsItem instance, or
*type* which is a string specifying the type of item to create and add.
"""
if isinstance(item, Qt.QGraphicsItem):
return self.canvas.addGraphicsItem(item, **kwds)
else:
return self.canvas.addItem(item, type, **kwds)
def rescaleImages(self):
"""
Apply corrections to the images and rescale the data.
This does the following:
1. compute mean image over entire selected group
2. smooth the mean image heavily.
3. rescale the images and correct for field flatness from the average image
4. apply the scale.
Use the min/max mosaic button to readjust the display scale after this
automatic operation if the scaling is not to your liking.
"""
nsel = len(self.canvas.selectedItems())
if nsel == 0:
return
# print dir(self.selectedItems()[0].data)
nxm = self.canvas.selectedItems()[0].data.shape
meanImage = np.zeros((nxm[0], nxm[1]))
nhistbins = 100
# generate a histogram of the global levels in the image (all images selected)
hm = np.histogram(np.dstack([x.data for x in self.canvas.selectedItems()]), nhistbins)
#$meanImage = np.mean(self.selectedItems().asarray(), axis=0)
n = 0
self.imageMax = 0.0
for i in range(nsel):
try:
meanImage = meanImage + np.array(self.canvas.selectedItems()[i].data)
imagemax = np.amax(np.amax(meanImage, axis=1), axis=0)
if imagemax > self.imageMax:
self.imageMax = imagemax
n = n + 1
except:
print('image i = %d failed' % i)
print('file name: ', self.canvas.selectedItems()[i].name)
print('expected shape of nxm: ', nxm)
print(' but got data shape: ', self.canvas.selectedItems()[i].data.shape)
meanImage = meanImage/n # np.mean(meanImage[0:n], axis=0)
filtwidth = np.floor(nxm[0]/10+1)
blimg = scipy.ndimage.filters.gaussian_filter(meanImage, filtwidth, order = 0, mode='reflect')
#pg.image(blimg)
m = np.argmax(hm[0]) # returns the index of the max count
# now rescale each individually
# rescaling is done against the global histogram, to keep the gain constant.
for i in range(nsel):
d = np.array(self.canvas.selectedItems()[i].data)
# hmd = np.histogram(d, 512) # return (count, bins)
xh = d.shape # capture shape just in case it is not right (have data that is NOT !!)
# flatten the illumination using the blimg average illumination pattern
newImage = d # / blimg[0:xh[0], 0:xh[1]] # (d - imin)/(blimg - imin) # rescale image.
hn = np.histogram(newImage, bins = hm[1]) # use bins from global image
n = np.argmax(hn[0])
newImage = (hm[1][m]/hn[1][n])*newImage # rescale to the global max.
self.canvas.selectedItems()[i].updateImage(newImage)
# self.canvas.selectedItems()[i].levelRgn.setRegion([0, 2.0])
self.canvas.selectedItems()[i].levelRgn.setRegion([0., self.imageMax])
def normalizeImages(self):
self.canvas.view.autoRange()
def updateScaling(self):
"""
Set all the selected images to have the scaling in the editor bar (absolute values)
"""
nsel = len(self.canvas.selectedItems())
if nsel == 0:
return
for i in range(nsel):
self.canvas.selectedItems()[i].levelRgn.setRegion([self.ui.mosaicDisplayMin.value(),
self.ui.mosaicDisplayMax.value()])
def flipUD(self):
"""
flip each image array up/down, in place. Do not change position.
Note: arrays are rotated, so use lr to do ud, etc.
"""
nsel = len(self.canvas.selectedItems())
if nsel == 0:
return
for i in range(nsel):
self.canvas.selectedItems()[i].data = np.fliplr(self.canvas.selectedItems()[i].data)
self.canvas.selectedItems()[i].graphicsItem().updateImage(self.canvas.selectedItems()[i].data)
# print dir(self.canvas.selectedItems()[i])
def flipLR(self):
"""
Flip each image array left/right, in place. Do not change position.
"""
nsel = len(self.canvas.selectedItems())
if nsel == 0:
return
for i in range(nsel):
self.canvas.selectedItems()[i].data = np.flipud(self.canvas.selectedItems()[i].data)
self.canvas.selectedItems()[i].graphicsItem().updateImage(self.canvas.selectedItems()[i].data)
def itemMoved(self, canvas, item):
"""Save an item's transformation if the user has moved it.
This is saved in the 'userTransform' attribute; the original position data is not affected."""
fh = self.items.get(item, None)
if not hasattr(fh, 'setInfo'):
fh = None
try:
item.storeUserTransform(fh)
except Exception as ex:
if len(ex.args) > 1 and ex.args[1] == 1: ## this means the item has no file handle to store position
return
raise
def getLoadedFiles(self):
"""Return a list of all file handles that have been loaded"""
return self.items.values()
def clear(self, ask=True):
"""Remove all loaded data and reset to the default state.
If ask is True (and there are items loaded), then the user is prompted
before clearing. If the user declines, then this method returns False.
"""
if ask and len(self.items) > 0:
response = Qt.QMessageBox.question(self.clearBtn, "Warning", "Really clear all items?",
Qt.QMessageBox.Ok|Qt.QMessageBox.Cancel)
if response != Qt.QMessageBox.Ok:
return False
self.canvas.clear()
self.items.clear()
self.files.clear()
self.lastSaveFile = None
return True
def saveState(self, relativeTo=None):
"""Return a serializable representation of the current state of the MosaicEditor.
This includes the list of all items, their current visibility and
parameters, and the view configuration.
"""
items = list(self.canvas.items)
items.sort(key=lambda i: i.zValue())
return OrderedDict([
('contents', 'MosaicEditor_save'),
('version', self._saveVersion),
('rootPath', relativeTo.name() if relativeTo is not None else ''),
('items', [item.saveState(relativeTo=relativeTo) for item in items]),
('view', self.canvas.view.getState()),
])
def saveStateFile(self, filename):
dh = DataManager.getDirHandle(os.path.dirname(filename))
state = self.saveState(relativeTo=dh)
json.dump(state, open(filename, 'w'), indent=4, cls=Encoder)
def restoreState(self, state, rootPath=None):
if state.get('contents', None) != 'MosaicEditor_save':
raise TypeError("This does not appear to be MosaicEditor save data.")
if state['version'][0] > self._saveVersion[0]:
raise TypeError("Save data has version %d.%d, but this MosaicEditor only supports up to version %d.x." % (state['version'][0], state['version'][1], self._saveVersion[0]))
if not self.clear():
return
root = state['rootPath']
if root == '':
# data was stored with no root path; filenames should be absolute
root = None
else:
# data was stored with no root path; filenames should be relative to the loaded file
root = DataManager.getHandle(rootPath)
loadfail = []
for itemState in state['items']:
fname = itemState.get('filename')
if fname is None:
# create item from scratch and restore state
itemtype = itemState.get('type')
if itemtype not in items.itemTypes():
# warn the user later on that we could not load this item
loadfail.append((itemState.get('name'), 'Unknown item type "%s"' % itemtype))
continue
item = self.addItem(type=itemtype, name=itemState['name'])
else:
# normalize file name for this OS
revsep = {'/':'\\', '\\':'/'}[os.path.sep]
fname = fname.replace(revsep, os.path.sep)
# create item by loading file and restore state
if root is None:
fh = DataManager.getHandle(fh)
else:
fh = root[fname]
item = self.addFile(fh, name=itemState['name'], inheritTransform=False)
item.restoreState(itemState)
self.canvas.view.setState(state['view'])
if len(loadfail) > 0:
msg = "\n".join(["%s: %s" % m for m in loadfail])
raise Exception("Failed to load some items:\n%s" % msg)
def loadStateFile(self, filename):
state = json.load(open(filename, 'r'))
self.restoreState(state, rootPath=os.path.dirname(filename))
def saveClicked(self):
base = self.ui.fileLoader.baseDir()
if self.lastSaveFile is None:
path = base.name()
else:
path = self.lastSaveFile
filename = Qt.QFileDialog.getSaveFileName(None, "Save mosaic file", path, "Mosaic files (*.mosaic)")
if filename == '':
return
if not filename.endswith('.mosaic'):
filename += '.mosaic'
self.lastSaveFile = filename
self.saveStateFile(filename)
def quit(self):
self.files = None
self.items = None
self.canvas.clear()
# Set up the default list of addable items
MosaicEditor.registerItemType(items.getItemType('GridCanvasItem'))
MosaicEditor.registerItemType(items.getItemType('RulerCanvasItem'))
MosaicEditor.registerItemType(items.getItemType('MarkersCanvasItem'))
MosaicEditor.registerItemType(items.getItemType('CellCanvasItem'))
MosaicEditor.registerItemType(items.getItemType('AtlasCanvasItem'))
class Encoder(json.JSONEncoder):
"""Used to clean up state for JSON export.
"""
def default(self, o):
if isinstance(o, np.integer):
return int(o)
return json.JSONEncoder.default(o)
|
|
import uuid
from cassandra import ConsistencyLevel, WriteFailure, WriteTimeout
from distutils.version import LooseVersion
from dtest import Tester
from thrift_bindings.v22 import ttypes as thrift_types
from thrift_tests import get_thrift_client
from tools.decorators import since
KEYSPACE = "foo"
# These tests use the cassandra.test.fail_writes_ks option, which was only
# implemented in 2.2, so we skip it before then.
@since('2.2')
class TestWriteFailures(Tester):
"""
Tests for write failures in the replicas,
@jira_ticket CASSANDRA-8592.
They require CURRENT_VERSION = VERSION_4 in CassandraDaemon.Server
otherwise these tests will fail.
"""
def setUp(self):
super(TestWriteFailures, self).setUp()
self.ignore_log_patterns = [
"Testing write failures", # The error to simulate a write failure
"ERROR WRITE_FAILURE", # Logged in DEBUG mode for write failures
"MigrationStage" # This occurs sometimes due to node down (because of restart)
]
self.supports_v5_protocol = self.cluster.version() >= LooseVersion('3.10')
self.expected_expt = WriteFailure
self.protocol_version = 5 if self.supports_v5_protocol else 4
self.replication_factor = 3
self.consistency_level = ConsistencyLevel.ALL
self.failing_nodes = [1, 2]
def tearDown(self):
super(TestWriteFailures, self).tearDown()
def _prepare_cluster(self, start_rpc=False):
self.cluster.populate(3)
if start_rpc:
self.cluster.set_configuration_options(values={'start_rpc': True})
self.cluster.start(wait_for_binary_proto=True)
self.nodes = self.cluster.nodes.values()
session = self.patient_exclusive_cql_connection(self.nodes[0], protocol_version=self.protocol_version)
session.execute("""
CREATE KEYSPACE IF NOT EXISTS %s
WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '%s' }
""" % (KEYSPACE, self.replication_factor))
session.set_keyspace(KEYSPACE)
session.execute("CREATE TABLE IF NOT EXISTS mytable (key text PRIMARY KEY, value text) WITH COMPACT STORAGE")
session.execute("CREATE TABLE IF NOT EXISTS countertable (key uuid PRIMARY KEY, value counter)")
for idx in self.failing_nodes:
node = self.nodes[idx]
node.stop()
node.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.test.fail_writes_ks=" + KEYSPACE])
if idx == 0:
session = self.patient_exclusive_cql_connection(node, protocol_version=self.protocol_version)
session.set_keyspace(KEYSPACE)
return session
def _perform_cql_statement(self, text):
session = self._prepare_cluster()
statement = session.prepare(text)
statement.consistency_level = self.consistency_level
if self.expected_expt is None:
session.execute(statement)
else:
with self.assertRaises(self.expected_expt) as cm:
session.execute(statement)
return cm.exception
def _assert_error_code_map_exists_with_code(self, exception, expected_code):
"""
Asserts that the given exception contains an error code map
where at least one node responded with some expected code.
This is meant for testing failure exceptions on protocol v5.
"""
self.assertIsNotNone(exception)
self.assertIsNotNone(exception.error_code_map)
expected_code_found = False
for error_code in exception.error_code_map.values():
if error_code == expected_code:
expected_code_found = True
break
self.assertTrue(expected_code_found, "The error code map did not contain " + str(expected_code))
@since('2.2', max_version='2.2.x')
def test_mutation_v2(self):
"""
A failed mutation at v2 receives a WriteTimeout
"""
self.expected_expt = WriteTimeout
self.protocol_version = 2
self._perform_cql_statement("INSERT INTO mytable (key, value) VALUES ('key1', 'Value 1')")
def test_mutation_v3(self):
"""
A failed mutation at v3 receives a WriteTimeout
"""
self.expected_expt = WriteTimeout
self.protocol_version = 3
self._perform_cql_statement("INSERT INTO mytable (key, value) VALUES ('key1', 'Value 1')")
def test_mutation_v4(self):
"""
A failed mutation at v4 receives a WriteFailure
"""
self.expected_expt = WriteFailure
self.protocol_version = 4
self._perform_cql_statement("INSERT INTO mytable (key, value) VALUES ('key1', 'Value 1')")
@since('3.10')
def test_mutation_v5(self):
"""
A failed mutation at v5 receives a WriteFailure with an error code map containing error code 0x0000
"""
self.expected_expt = WriteFailure
self.protocol_version = 5
exc = self._perform_cql_statement("INSERT INTO mytable (key, value) VALUES ('key1', 'Value 1')")
self._assert_error_code_map_exists_with_code(exc, 0x0000)
def test_mutation_any(self):
"""
A WriteFailure is not received at consistency level ANY
even if all nodes fail because of hinting
"""
self.consistency_level = ConsistencyLevel.ANY
self.expected_expt = None
self.failing_nodes = [0, 1, 2]
self._perform_cql_statement("INSERT INTO mytable (key, value) VALUES ('key1', 'Value 1')")
def test_mutation_one(self):
"""
A WriteFailure is received at consistency level ONE
if all nodes fail
"""
self.consistency_level = ConsistencyLevel.ONE
self.failing_nodes = [0, 1, 2]
exc = self._perform_cql_statement("INSERT INTO mytable (key, value) VALUES ('key1', 'Value 1')")
if self.supports_v5_protocol:
self._assert_error_code_map_exists_with_code(exc, 0x0000)
def test_mutation_quorum(self):
"""
A WriteFailure is not received at consistency level
QUORUM if quorum succeeds
"""
self.consistency_level = ConsistencyLevel.QUORUM
self.expected_expt = None
self.failing_nodes = [2]
self._perform_cql_statement("INSERT INTO mytable (key, value) VALUES ('key1', 'Value 1')")
def test_batch(self):
"""
A failed batch receives a WriteFailure
"""
exc = self._perform_cql_statement("""
BEGIN BATCH
INSERT INTO mytable (key, value) VALUES ('key2', 'Value 2') USING TIMESTAMP 1111111111111111
INSERT INTO mytable (key, value) VALUES ('key3', 'Value 3') USING TIMESTAMP 1111111111111112
APPLY BATCH
""")
if self.supports_v5_protocol:
self._assert_error_code_map_exists_with_code(exc, 0x0000)
def test_counter(self):
"""
A failed counter mutation receives a WriteFailure
"""
_id = str(uuid.uuid4())
exc = self._perform_cql_statement("""
UPDATE countertable
SET value = value + 1
where key = {uuid}
""".format(uuid=_id))
if self.supports_v5_protocol:
self._assert_error_code_map_exists_with_code(exc, 0x0000)
def test_paxos(self):
"""
A light transaction receives a WriteFailure
"""
exc = self._perform_cql_statement("INSERT INTO mytable (key, value) VALUES ('key1', 'Value 1') IF NOT EXISTS")
if self.supports_v5_protocol:
self._assert_error_code_map_exists_with_code(exc, 0x0000)
def test_paxos_any(self):
"""
A light transaction at consistency level ANY does not receive a WriteFailure
"""
self.consistency_level = ConsistencyLevel.ANY
self.expected_expt = None
self._perform_cql_statement("INSERT INTO mytable (key, value) VALUES ('key1', 'Value 1') IF NOT EXISTS")
@since('2.0', max_version='4')
def test_thrift(self):
"""
A thrift client receives a TimedOutException
"""
self._prepare_cluster(start_rpc=True)
self.expected_expt = thrift_types.TimedOutException
client = get_thrift_client()
client.transport.open()
client.set_keyspace(KEYSPACE)
with self.assertRaises(self.expected_expt):
client.insert('key1',
thrift_types.ColumnParent('mytable'),
thrift_types.Column('value', 'Value 1', 0),
thrift_types.ConsistencyLevel.ALL)
client.transport.close()
|
|
import random
import re
import sys
import time
from reusables.string_manipulation import int_to_words
from app.common_functions import formatted_items, comma_separated, parse_inventory_action, odds, remove_little_words, \
are_is, find_specifics, the_name
from app.main_classes import MapSquare, Player
from app.battle import Battle
from app.eat import Eat
class Adventure:
def __init__(self, name):
self.player = Player(name=name, location=(0, 0))
self.map = {(0, 0): MapSquare(name="spawn")}
self.player.square = self.map[(0, 0)]
self.map[(0, 0)].generate_items()
self.map[(0, 0)].generate_buildings(self.player)
self.map[(0, 0)].generate_mobs(self.player)
def help_me(self):
""" List of commands based on situation """
command_list = {"look around": True,
"go <southwest>": True,
"inventory": True,
"status": True,
"statistics": True,
"hit list": True,
"pick up <something>": bool(self.player.square.items and self.player.building_local is None),
"eat <something>": bool([x for x in self.player.inventory if x.category == "food"]),
"visit <a building>": bool(self.player.square.buildings),
"leave <the building>": bool(self.player.building_local),
"buy <something>": bool(self.player.building_local and self.player.building_local.wares),
"apply <for a job>": bool(self.player.building_local and self.player.building_local.jobs),
"battle <a mob>": bool(self.player.square.mobs or self.player.building_local.mobs),
"equip <something>": bool(self.player.inventory),
"ask <a mob> for a quest": bool(self.player.square.mobs or self.player.building_local.mobs),
"say hi to <a mob>": bool(self.player.square.mobs or self.player.building_local.mobs),
"turn in quest": bool(self.player.quest),
"go to work": bool(self.player.job),
"map": True,
"exit": True}
for command, status in command_list.items():
if status:
print(command)
def commands_manager(self, words):
""" Command handler. """
words = words.lower().split(" ")
commands = {
"^visit.*": (self.interact_with_building, [" ".join(words[1:])]),
"^take.*": (self.pick_up, [" ".join(words[1:])]),
"^eat.*": (self.eat_food, [" ".join(words[1:])]),
"^drink.*": (self.eat_food, [" ".join(words[1:])]),
"^buy.*": (self.buy, [" ".join(words[1:])]),
"^equip.*": (self.equip, [" ".join(words[1:])]),
"help": (self.help_me, []),
"^go (?!to work).*": (self.change_direction, [' '.join(words[1:]).strip()]),
"map": (self.player.square.map_picture, [self.map, self.player]),
".*turn in.*": (self.turn_in_quest, []),
"complete quest": (self.turn_in_quest, []),
"look around": (self.look_around, []),
".*look.*": (self.look_around, []),
"^work.*": (self.go_to_work, []),
"go to work": (self.go_to_work, []),
"leave": (self.leave_building, []),
"inventory": (print, [self.player.pretty_inventory()]),
"status": (print, [self.player.status()]),
"statistics": (self.player.statistics, []),
"hit list": (self.player.view_hit_list, []),
"ls": (print, ["What, you think this is Linux?"]),
"^attack": (self.battle_kickoff, [" ".join(words[1:])]),
"^fight": (self.battle_kickoff, [" ".join(words[1:])]),
"^battle": (self.battle_kickoff, [" ".join(words[1:])]),
".*say.*": (self.talk, [words]),
".*talk.*": (self.talk, [words]),
".*ask.*": (self.talk, [words]),
"^pick up.*": (self.pick_up, [" ".join(words[2:])]),
"^apply.*": (self.apply_for_job, [" ".join(words[1:])])
}
for k, v in commands.items():
if re.match(k, " ".join(words)):
v[0](*v[1])
return
if words[0] == "exit" and self.player.building_local:
self.leave_building()
elif words[0] == "exit" and self.player.building_local is None:
self.player.health = 0
print("Goodbye!")
# TODO save game before exiting
# TODO add inventory limit
else:
print("I don't know that command.")
def change_direction(self, direction):
""" Change direction """
if not direction or direction not in ["n", "ne", "nw", "s", "se", "sw", "e", "w", "north",
"northeast", "northwest", "south", "southeast", "southwest",
"east", "west", "up", "down", "left", "right"]:
print("Looks like you're headed nowhere fast!")
return
# TODO travel to distant squares
sys.stdout.write("Traveling . . .")
sys.stdout.flush()
count = 5
vehicles = [x.rarity for x in self.player.inventory if x.category == 'vehicle']
travel_time = 1 if self.player.speed_bonus is False else .8
if 'super rare' in vehicles:
travel_time = 0
elif 'rare' in vehicles:
travel_time = .1 if self.player.speed_bonus is False else 0
elif 'common' in vehicles:
travel_time = .2 if self.player.speed_bonus is False else .1
elif vehicles:
travel_time = .5 if self.player.speed_bonus is False else .4
while count > 0:
time.sleep(travel_time)
sys.stdout.write(" .")
sys.stdout.flush()
count -= 1
print()
self.leave_building()
x, y = self.player.location
if direction.lower() in ("n", "ne", "nw", "north", "northeast", "northwest", "up"):
y += 1
if direction.lower() in ("e", "ne", "se", "east", "northeast", "southeast", "right"):
x += 1
if direction.lower() in ("s", "se", "sw", "south", "southeast", "southwest", "down"):
y -= 1
if direction.lower() in ("w", "nw", "sw", "west", "northwest", "southwest", "left"):
x -= 1
new_coordinates = (x, y)
self.player.location = new_coordinates
if new_coordinates not in self.map.keys():
self.map[new_coordinates] = MapSquare()
self.map[new_coordinates].generate_buildings(self.player)
self.map[new_coordinates].generate_mobs(self.player)
self.map[new_coordinates].generate_items()
self.player.square = self.map[new_coordinates]
print(f"You are now located on map coordinates {new_coordinates}, which is {self.player.square.square_type}.")
self.look_around()
def look_around(self):
""" Describe the player's surroundings """
if self.player.building_local is None:
if self.player.square.items:
print(f"You can see {comma_separated(formatted_items(self.player.square.items))} near you.")
if self.player.square.buildings:
q = len(self.player.square.buildings)
if q == 1:
q = self.player.square.buildings[0].quantity
print(f"The building{'s' if q > 1 else ''} here {are_is(self.player.square.buildings)}.")
if self.player.square.mobs:
print(f"There {are_is(self.player.square.mobs)} here.")
if self.player.square.items == [] and self.player.square.buildings == [] and self.player.square.mobs == []:
print("Nothing seems to be nearby.")
else:
if self.player.building_local.wares:
wares = [f"{int_to_words(x.quantity)} {x.plural}" if x.quantity > 1 else x.name for x in self.player.building_local.wares]
print(f"This {self.player.building_local.name} has these items for sale: {comma_separated(wares)}")
if self.player.building_local.mobs:
print(f"There {are_is(self.player.building_local.mobs)} here.")
if self.player.building_local.jobs:
print(f"You can apply for the following open positions here: "
f"{comma_separated([x.name for x in self.player.building_local.jobs])}")
if (self.player.building_local.mobs is None or self.player.building_local.mobs == []) and \
(self.player.building_local.wares is None or self.player.building_local.wares == []):
print("There isn't anything here.")
def irritate_the_locals(self, item):
""" Decide whether or not to agro mobs if the player tries to pick up a rare item.
Returns list of mobs or False
"""
if item.rarity in ('rare', 'super rare') and odds(2) and self.player.square.mobs:
angry_mob = [x for x in self.player.square.mobs if odds(2) and x.health > 50]
if len(angry_mob) <= 1:
if self.player.square.mobs[0].health > 50:
angry_mob = [self.player.square.mobs[0]]
if angry_mob:
return angry_mob
return False
def pick_up(self, words):
""" Add items from surroundings to player inventory """
if not self.player.square.items:
print("Nothing to pick up.")
return
words = words.replace(',', '')
quantity, item_text = parse_inventory_action(words)
item_text = 'all' if item_text is None else item_text
specific_items = find_specifics(item_text, self.player.square.items)
if not specific_items:
print("Sorry, I can't find that.")
return
items_added = []
for item in specific_items:
angry_mob = self.irritate_the_locals(item)
if angry_mob is False:
q = item.quantity if quantity == "all" or quantity is None else quantity
if q > item.quantity:
print("Can't pick up that many.")
break
self.add_item_to_inventory(item, q)
items_added.append((item, q))
item.quantity -= q
else:
self.player.square.clean_up_map()
if items_added:
print(f"Added {comma_separated([x[0].name if x[1] == 1 else x[0].plural for x in items_added])} "
f"to your inventory.")
print(f"""Uh oh, {"the locals don't" if len(angry_mob) > 1 else "someone doesn't"} like you """
f"""trying to take """
f"""their {remove_little_words(item.name) if item.quantity == 1 else item.plural}!""")
self.battle_kickoff(None, angry_mob, [self.player], contested_item=item)
break
else:
self.player.square.clean_up_map()
if items_added:
print(f"Added {comma_separated([x[0].name if x[1] == 1 else x[0].plural for x in items_added])} "
f"to your inventory.")
def add_item_to_inventory(self, item_to_add, quantity, mob=None):
mob = mob or self.player
mob_inventory = mob.inventory if mob is not self.map else self.player.square.items
if mob != self.map and mob.equipped_weapon is not None and item_to_add.name == mob.equipped_weapon.name:
mob.equipped_weapon.quantity += quantity
elif item_to_add.name in [x.name for x in mob_inventory]:
for item in mob_inventory:
if item_to_add.name == item.name:
item.quantity += int(quantity)
else:
new_item = item_to_add.copy()
new_item.quantity = quantity
mob_inventory.append(new_item)
def eat_food(self, words):
""" Eat food in your inventory to gain health """
eat = Eat(self, words)
eat.eat_foods()
def go_to_work(self):
""" Spend time at work to earn money and experience """
if not self.player.job:
print("Sorry, you don't have a job. Try applying for one.")
return
night_jobs = ['bartender', 'night stocker', 'security guard']
if self.player.phase != "day" and self.player.job.name not in night_jobs:
print("You can only work in the daytime.")
return
elif self.player.phase != "night" and self.player.job.name in night_jobs:
print("You can only work in the nighttime.")
return
if self.player.job.location != self.player.location:
print(f"Your job is not here. You need to go here: {self.player.job.location}. Check your map for the '$' symbol.")
return
sys.stdout.write("Working . . .")
sys.stdout.flush()
count = 8
while count > 0:
time.sleep(1.5)
sys.stdout.write(" .")
sys.stdout.flush()
count -= 1
print()
print(f"You earned ${self.player.job.salary}.")
self.player.money += self.player.job.salary
if self.player.job.skills_learned:
for skill in self.player.job.skills_learned:
percentage = random.randint(0, 5)
if odds(3) and percentage:
print("You gained some skill mastery at work!")
self.player.increase_skill(skill, percentage)
if self.player.job.name == 'lawn mower':
print(f"Thanks {self.player.name}, the lawn looks really great! Be sure to stop by again some time.")
self.player.job = None
@staticmethod
def find_specific_job(words, list_of_jobs):
for job in list_of_jobs:
for word in remove_little_words(words).split(' '):
if word.lower() in job.name.lower() or word.lower() == job.name.lower():
return job
def apply_for_job(self, words):
""" Player skills determine chances of getting a job """
if not self.player.building_local or not self.player.building_local.jobs:
print("No jobs to apply for here.")
return
if not words:
if len(self.player.building_local.jobs) == 1:
job = self.player.building_local.jobs[0]
else:
print("What job are you applying for?")
return
else:
job = self.find_specific_job(words, self.player.building_local.jobs)
if job:
if job == self.player.job:
print("You already have this job.")
return
if job.name in ('king of the realm', 'evil overlord'):
if self.player.building_local.mobs:
print("There's a horrible monster in the way here that you have to kill first!")
return
if job.inventory_needed and job.inventory_needed not in self.player.inventory:
print(f"You need {job.inventory_needed} for this job.")
return
job.application_attempts += 1
if 10 > job.application_attempts > 3 and odds(10 - job.application_attempts):
print("Haven't I seen you here before? I'm thinking you aren't as qualified as you think you are.")
if odds(2):
depreciable_skills = ['intelligence', 'patience', 'science', 'communication']
skill_to_lower = depreciable_skills[random.randint(0, len(depreciable_skills)-1)]
try:
self.player.skills[skill_to_lower] -= 5
except KeyError:
self.player.skills[skill_to_lower] = -5
print(f"Your {skill_to_lower} skill went down 5%.")
return
if 10 <= job.application_attempts:
print("Please go away. We are interested in candidates that don't annoy us so much.")
self.player.increase_skill('self loathing', 10)
return
match_score = 1
if job.skills_needed:
level = 0
for skill in job.skills_needed:
if skill in self.player.skills.keys():
if self.player.skills[skill] > 90:
print(f"Wow, it says here you are really good at {skill}.")
level += self.player.skills[skill]
average_level = level / len(job.skills_needed)
score_card = {90: 1, 80: 3, 70: 5, 60: 8, 0: 15}
for average, score in score_card.items():
if average_level >= average:
match_score = score
break
if odds(match_score):
if match_score == 15:
print("Ok, we'll take a chance on you.")
print(f"Congratulations {self.player.name}, you got the job!")
self.player.job = job
if job.name in ('evil overlord', 'king of the realm'):
self.player.game_over()
self.player.building_local.jobs.remove(job)
else:
bad_news = [f"I'm sorry, we're looking for candidates with more "
f"{comma_separated(job.skills_needed)} skills right now.",
"We'll let you know."]
print(bad_news[random.randint(0, len(bad_news) - 1)])
else:
print("That's not a job we are hiring for currently.")
def interact_with_building(self, words):
""" Try entering a building """
building = find_specifics(words, self.player.square.buildings)
building = building[0] if building else None
if building is not None:
if building.category == 'building':
if odds(8) is True:
print(f"Too bad, {building.name} is closed right now. Try again later.")
else:
self.player.building_local = building
print(f"You are now inside {building.name}.")
self.look_around()
else:
if odds(10) is False or self.player.phase == 'night':
print("The occupants of this residence have kicked you out.")
else:
self.player.building_local = building
print("You are now inside a house.")
self.look_around()
else:
print("That's not a place you can visit.")
def leave_building(self):
""" Exit the building the player is inside """
if self.player.building_local is not None:
print(f"Leaving {self.player.building_local.name}")
self.player.building_local = None
def buy(self, words):
""" Establish a transaction to purchase wares """
if not self.player.building_local or not self.player.building_local.wares:
print("Nothing to buy here.")
return
wares = []
haggle_for = True
quantity, item_text = parse_inventory_action(words)
quantity = 'all' if not quantity else quantity
if quantity == 'all' and item_text is None:
wares = [x for x in self.player.building_local.wares]
ware_list = [f"{ware.plural} x {ware.quantity}" for ware in self.player.building_local.wares]
price_total = sum([ware.price * ware.quantity for ware in self.player.building_local.wares])
print(f"For {comma_separated(ware_list)}, that comes to ${price_total}.")
else:
for ware in self.player.building_local.wares:
if remove_little_words(item_text) in ware.name or remove_little_words(item_text) in ware.plural:
wares = [ware]
if quantity == "all":
quantity = ware.quantity
if quantity > ware.quantity:
print(f"Sorry, we only have {ware.quantity} for sale.")
haggle_for = False
else:
print(f"For {ware.plural} x {quantity}, that comes to ${ware.price * quantity}.")
break
else:
print("I can't figure out what you want.")
haggle_for = False
if haggle_for is True:
price_offered = input("Make me an offer:")
try:
price_offered = int(price_offered.strip(" "))
self.haggle(wares, quantity, price_offered)
except ValueError:
print("I was hoping for a number, sorry.")
except TypeError:
print("I was hoping for a number, sorry.")
def haggle(self, items, quantity, price_offered):
""" Negotiate the price on items for sale """
if price_offered > self.player.money:
print("Sorry you don't have enough cash to make that offer. Try getting a job.")
return
if quantity == 'all':
price = sum([item.price * item.quantity for item in items])
else:
price = sum([item.price for item in items]) * quantity
if price <= price_offered <= self.player.money:
self.buy_items(items, quantity, price_offered)
print("Purchase complete")
elif price > price_offered > 0:
discount = {2: price - (price * .1),
3: price - (price * .2),
4: price - (price * .3),
5: price - (price * .5)}
for k, v in discount.items():
if round(v) <= price_offered:
if odds(k):
print("Ok, sounds like a deal.")
self.buy_items(items, quantity, price_offered)
else:
print("Sorry, I can't sell for that price.")
break
else:
print("Sorry, I can't sell for that price.")
else:
print("Sorry, I can't sell for that price")
def buy_items(self, items, quantity, cost):
""" Add bought items to player inventory and subtract cost from player's cash """
for item in items:
q = item.quantity if quantity == 'all' else quantity
self.add_item_to_inventory(item, q)
if q == item.quantity:
self.player.building_local.wares.remove(item)
else:
item.quantity -= q
self.player.money -= cost
# TODO sell items
def talk(self, words):
""" Say hello to mobs and ask for quests """
# TODO trade items
# TODO talk to everyone in turn for talk to everyone
mobs = self.player.square.mobs if self.player.building_local is None else self.player.building_local.mobs
if mobs and len(mobs) == 1:
specific_mob = mobs
else:
specific_mob = find_specifics(words, mobs)
if not specific_mob:
print("Don't know who to talk to.")
else:
specific_mob = specific_mob[0]
single_mob = remove_little_words(specific_mob.name)
non_responses = [f"The {single_mob} just looks at you.",
f"The {single_mob} doesn't respond.",
f"The {single_mob} is pretending not to speak english.",
f"The {single_mob} lets out a high pitched and unintelligible shriek.",
f"The {single_mob} ignores you completely."]
no_quest_responses = [f"The {single_mob} shakes his head gravely.",
# TODO add mob gender
f"The {single_mob} says 'No quests today.'",
f"The {single_mob} says 'I don't feel like it right now'.",
f"The {single_mob} laughs maniacally. 'A quest? For you? Yeah right.'"]
yes_quest_responses = [f"The ground shakes as the {single_mob} roars 'YES, I HAVE A QUEST FOR YOU!'",
f"The {single_mob} says 'Yup, I've got a quest for you.'",
f"The {single_mob} says 'Fiiiineeee, I'll give you a quest'.",
f"The {single_mob} scratches his head thoughtfully."]
greeting_responses = [f"The {single_mob} nods.",
f"The {single_mob} smiles and waves at you.",
f"The {single_mob} sneers at your impertinence.",
f"The {single_mob} gives you a cheerful 'Hello!'"]
fighting_responses = [f"The {single_mob} is very annoyed by your nagging.",
"Those are fighting words.",
f"The {single_mob} takes great offence to you.",
f"The {single_mob}'s patience has snapped."]
if specific_mob.irritation_level < 10:
specific_mob.irritation_level += 1
if odds(11 - specific_mob.irritation_level):
print(fighting_responses[random.randint(0, len(no_quest_responses) - 1)])
self.battle_kickoff(None, [specific_mob], [self.player])
return
if "quest" in words:
quest = specific_mob.generate_quest()
if specific_mob.quest:
print(yes_quest_responses[random.randint(0, len(yes_quest_responses) - 1)])
print(specific_mob.quest[2])
if input("Do you accept the quest?{} yes/no:".format(
' This will replace your current quest.' if self.player.quest else '')).lower() == "yes":
self.player.quest = (specific_mob, self.player.location)
elif quest is None:
if quest is not False:
print(no_quest_responses[random.randint(0, len(no_quest_responses) - 1)])
elif any(word in ("hi", "hello", "greet", "greetings", "howdy") for word in words):
print(greeting_responses[random.randint(0, len(greeting_responses) - 1)])
else:
print(non_responses[random.randint(0, len(non_responses) - 1)])
self.player.greeting_count += 1
if self.player.greeting_count % 15 == 0:
print(f"By the way, you have been really outgoing lately!")
self.player.increase_skill('communication', random.randint(1, 5))
if self.player.greeting_count == 500:
print("Congratulations, you have been voted Time's Person of the Year, nominated for a Nobel Peace Prize, and have earned the Chattiest Adventurer achievement.")
def turn_in_quest(self):
""" Complete the quest if criteria is met, otherwise help player remember quest details """
if self.player.quest is None:
print("You don't have a quest.")
return
mob_name = remove_little_words(self.player.quest[0].name)
mob = self.player.quest[0]
if self.player.quest[1] != self.player.location:
print(f"The {mob_name} who gave you your quest is not here. You need to go to {self.player.quest[1]}. Check your map for the '*' symbol.")
return
if mob not in self.player.square.mobs:
print(f"Looks like the {mob_name} who gave you the quest is dead. That's too bad.")
self.player.quest = None
return
quest_item = mob.quest[0]
quantity = mob.quest[1]
for item in self.player.inventory:
if item.name == quest_item.name:
if item.quantity >= quantity:
print(f"You have enough {quest_item.plural} the {mob_name} requested.")
item.quantity -= quantity
self.add_item_to_inventory(item, quantity, mob)
skill = mob.skills[random.randint(0, len(mob.skills) - 1)]
percentage = random.randint(10, 70)
print(f"In exchange, the {mob_name} teaches you some {skill}.")
self.player.increase_skill(skill, percentage)
self.player.quest = None
mob.quest = None
else:
print(f"You don't have enough {quest_item.plural}. The {mob_name} requested {quantity}, "
f"and you have {item.quantity}.")
break
else:
print(f"You don't have any {quest_item.plural}. You need {quantity}.")
self.player.inventory = [i for i in self.player.inventory if i.quantity > 0]
def battle_kickoff(self, words, attacking_mobs=None, defending_mobs=None, contested_item=None):
attacking_mobs = attacking_mobs or None
defending_mobs = defending_mobs or None
contested_item = contested_item or None
list_of_locals = self.player.building_local.mobs if self.player.building_local else self.player.square.mobs
if defending_mobs:
m = comma_separated(formatted_items(attacking_mobs))
print(f"Look out, {m[0].upper()}{m[1:]} {'is' if len(attacking_mobs) == 1 else 'are'} gearing up to fight!")
for mob in attacking_mobs:
w = mob.equipped_weapon
if mob.equipped_weapon:
mob_id = the_name(mob.name)
print(f"{mob_id} is wielding {w.name if w.quantity == 1 else w.plural}.")
battle = Battle(adventure=self, list_of_attackers=attacking_mobs, list_of_defenders=defending_mobs,
contested_item=contested_item)
battle.battle_loop()
return
else:
defending_mobs = find_specifics(words, list_of_locals)
if not defending_mobs:
print("Who are you attacking?")
return
else:
m = comma_separated(formatted_items(defending_mobs))
print(f"Look out, {m[0].upper()}{m[1:]} {'is' if len(defending_mobs) == 1 else 'are'} gearing up to fight!")
for mob in defending_mobs:
w = mob.equipped_weapon
major = mob.major_armor.defense if mob.major_armor else 0
minor = mob.minor_armor.defense if mob.minor_armor else 0
armor_defense = (major + minor) * 5
mob_id = the_name(mob.name)
if mob.equipped_weapon:
print(f"{mob_id} is wielding {w.name if w.quantity == 1 else w.plural}")
if armor_defense:
armors = [mob.major_armor.name if mob.major_armor else None,
mob.minor_armor.name if mob.minor_armor else None]
print(f"{mob_id} is wearing {' and '.join(x for x in armors if x)} which reduces incoming damage by {armor_defense}")
battle = Battle(adventure=self, list_of_attackers=[self.player], list_of_defenders=defending_mobs)
battle.battle_loop()
def equip(self, words):
""" Select item from player inventory to use as battle weapon """
# TODO unequip
w = find_specifics(words, self.player.inventory)
if w:
try:
if w[0].category == 'minor armor':
if self.player.minor_armor:
self.add_item_to_inventory(self.player.minor_armor, self.player.minor_armor.quantity)
self.player.minor_armor = w[0]
elif w[0].category == 'major armor':
if self.player.major_armor:
self.add_item_to_inventory(self.player.major_armor, self.player.major_armor.quantity)
self.player.major_armor = w[0]
else:
if self.player.equipped_weapon:
weapon = self.player.equipped_weapon
self.player.equipped_weapon = None
self.add_item_to_inventory(weapon, weapon.quantity)
self.player.equipped_weapon = w[0]
except AttributeError:
if self.player.equipped_weapon:
weapon = self.player.equipped_weapon
self.player.equipped_weapon = None
self.add_item_to_inventory(weapon, weapon.quantity)
self.player.equipped_weapon = w[0]
print(f"Equipped {w[0].name if w[0].quantity == 1 else w[0].plural}")
self.player.inventory.remove(w[0])
else:
print(f"Can't find {words} in your inventory.")
|
|
# Copyright 2021 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
from .models import Author
from django.db import NotSupportedError
from django.db.models import Index
from django.db.models.fields import IntegerField
from django_spanner.schema import DatabaseSchemaEditor
from tests._helpers import HAS_OPENTELEMETRY_INSTALLED
from tests.unit.django_spanner.simple_test import SpannerSimpleTestClass
from unittest import mock
from tests.unit.django_spanner.test__opentelemetry_tracing import (
PROJECT,
INSTANCE_ID,
DATABASE_ID,
)
BASE_ATTRIBUTES = {
"db.type": "spanner",
"db.engine": "django_spanner",
"db.project": PROJECT,
"db.instance": INSTANCE_ID,
"db.name": DATABASE_ID,
}
class TestUtils(SpannerSimpleTestClass):
def test_quote_value(self):
"""
Tries quoting input value.
"""
schema_editor = DatabaseSchemaEditor(self.connection)
self.assertEqual(schema_editor.quote_value(value=1.1), "1.1")
def test_skip_default(self):
"""
Tries skipping default as Cloud spanner doesn't support it.
"""
schema_editor = DatabaseSchemaEditor(self.connection)
self.assertTrue(schema_editor.skip_default(field=None))
def test_create_model(self):
"""
Tries creating a model's table.
"""
with DatabaseSchemaEditor(self.connection) as schema_editor:
schema_editor.execute = mock.MagicMock()
schema_editor.create_model(Author)
schema_editor.execute.assert_called_once_with(
"CREATE TABLE tests_author (id INT64 NOT NULL, name STRING(40) "
+ "NOT NULL, last_name STRING(40) NOT NULL, num INT64 NOT "
+ "NULL, created TIMESTAMP NOT NULL, modified TIMESTAMP) "
+ "PRIMARY KEY(id)",
None,
)
if HAS_OPENTELEMETRY_INSTALLED:
span_list = self.ot_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
self.assertSpanAttributes(
"CloudSpannerDjango.create_model",
attributes=dict(BASE_ATTRIBUTES, model_name="tests_author"),
span=span_list[0],
)
def test_delete_model(self):
"""
Tests deleting a model
"""
with DatabaseSchemaEditor(self.connection) as schema_editor:
schema_editor.execute = mock.MagicMock()
schema_editor._constraint_names = mock.MagicMock()
schema_editor.delete_model(Author)
schema_editor.execute.assert_called_once_with(
"DROP TABLE tests_author",
)
if HAS_OPENTELEMETRY_INSTALLED:
span_list = self.ot_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
self.assertSpanAttributes(
"CloudSpannerDjango.delete_model",
attributes=dict(BASE_ATTRIBUTES, model_name="tests_author"),
span=span_list[0],
)
def test_delete_model_with_index(self):
"""
Tests deleting a model with index
"""
with DatabaseSchemaEditor(self.connection) as schema_editor:
schema_editor.execute = mock.MagicMock()
def delete_index_sql(*args, **kwargs):
# Overriding Statement creation with sql string.
return "DROP INDEX num_unique"
def constraint_names(*args, **kwargs):
return ["num_unique"]
schema_editor._delete_index_sql = delete_index_sql
schema_editor._constraint_names = constraint_names
schema_editor.delete_model(Author)
calls = [
mock.call("DROP INDEX num_unique"),
mock.call("DROP TABLE tests_author"),
]
schema_editor.execute.assert_has_calls(calls)
if HAS_OPENTELEMETRY_INSTALLED:
span_list = self.ot_exporter.get_finished_spans()
self.assertEqual(len(span_list), 2)
self.assertSpanAttributes(
"CloudSpannerDjango.delete_model.delete_index",
attributes=dict(
BASE_ATTRIBUTES,
model_name="tests_author",
index_name="num_unique",
),
span=span_list[0],
)
self.assertSpanAttributes(
"CloudSpannerDjango.delete_model",
attributes=dict(BASE_ATTRIBUTES, model_name="tests_author",),
span=span_list[1],
)
def test_add_field(self):
"""
Tests adding fields to models
"""
with DatabaseSchemaEditor(self.connection) as schema_editor:
schema_editor.execute = mock.MagicMock()
new_field = IntegerField(null=True)
new_field.set_attributes_from_name("age")
schema_editor.add_field(Author, new_field)
schema_editor.execute.assert_called_once_with(
"ALTER TABLE tests_author ADD COLUMN age INT64", []
)
def test_remove_field(self):
"""
Tests remove fields from models
"""
with DatabaseSchemaEditor(self.connection) as schema_editor:
schema_editor.execute = mock.MagicMock()
schema_editor._constraint_names = mock.MagicMock()
remove_field = IntegerField(unique=True)
remove_field.set_attributes_from_name("num")
schema_editor.remove_field(Author, remove_field)
schema_editor.execute.assert_called_once_with(
"ALTER TABLE tests_author DROP COLUMN num"
)
if HAS_OPENTELEMETRY_INSTALLED:
span_list = self.ot_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
self.assertSpanAttributes(
"CloudSpannerDjango.remove_field",
attributes=dict(
BASE_ATTRIBUTES, model_name="tests_author", field="num",
),
span=span_list[0],
)
def test_remove_field_with_index(self):
"""
Tests remove fields from models
"""
with DatabaseSchemaEditor(self.connection) as schema_editor:
schema_editor.execute = mock.MagicMock()
def delete_index_sql(*args, **kwargs):
# Overriding Statement creation with sql string.
return "DROP INDEX num_unique"
def constraint_names(*args, **kwargs):
return ["num_unique"]
schema_editor._delete_index_sql = delete_index_sql
schema_editor._constraint_names = constraint_names
remove_field = IntegerField(unique=True)
remove_field.set_attributes_from_name("num")
schema_editor.remove_field(Author, remove_field)
calls = [
mock.call("DROP INDEX num_unique"),
mock.call("ALTER TABLE tests_author DROP COLUMN num"),
]
schema_editor.execute.assert_has_calls(calls)
if HAS_OPENTELEMETRY_INSTALLED:
span_list = self.ot_exporter.get_finished_spans()
self.assertEqual(len(span_list), 2)
self.assertSpanAttributes(
"CloudSpannerDjango.remove_field.delete_index",
attributes=dict(
BASE_ATTRIBUTES,
model_name="tests_author",
field="num",
index_name="num_unique",
),
span=span_list[0],
)
self.assertSpanAttributes(
"CloudSpannerDjango.remove_field",
attributes=dict(
BASE_ATTRIBUTES, model_name="tests_author", field="num",
),
span=span_list[1],
)
def test_column_sql_not_null_field(self):
"""
Tests column sql for not null field
"""
with DatabaseSchemaEditor(self.connection) as schema_editor:
schema_editor.execute = mock.MagicMock()
new_field = IntegerField()
new_field.set_attributes_from_name("num")
sql, params = schema_editor.column_sql(Author, new_field)
self.assertEqual(sql, "INT64 NOT NULL")
self.assertEqual(params, [])
def test_column_sql_nullable_field(self):
"""
Tests column sql for nullable field
"""
with DatabaseSchemaEditor(self.connection) as schema_editor:
schema_editor.execute = mock.MagicMock()
new_field = IntegerField(null=True)
new_field.set_attributes_from_name("num")
sql, params = schema_editor.column_sql(Author, new_field)
self.assertEqual(sql, "INT64")
self.assertEqual(params, [])
def test_column_add_index(self):
"""
Tests column add index
"""
with DatabaseSchemaEditor(self.connection) as schema_editor:
schema_editor.execute = mock.MagicMock()
index = Index(name="test_author_index_num", fields=["num"])
schema_editor.add_index(Author, index)
name, args, kwargs = schema_editor.execute.mock_calls[0]
self.assertEqual(
str(args[0]),
"CREATE INDEX test_author_index_num ON tests_author (num)",
)
self.assertEqual(kwargs["params"], None)
self.assertEqual(name, "")
if HAS_OPENTELEMETRY_INSTALLED:
span_list = self.ot_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
self.assertSpanAttributes(
"CloudSpannerDjango.add_index",
attributes=dict(
BASE_ATTRIBUTES, model_name="tests_author", index="num",
),
span=span_list[0],
)
def test_alter_field(self):
"""
Tests altering existing field in table
"""
with DatabaseSchemaEditor(self.connection) as schema_editor:
schema_editor.execute = mock.MagicMock()
old_field = IntegerField()
old_field.set_attributes_from_name("num")
new_field = IntegerField()
new_field.set_attributes_from_name("author_num")
schema_editor.alter_field(Author, old_field, new_field)
schema_editor.execute.assert_called_once_with(
"ALTER TABLE tests_author RENAME COLUMN num TO author_num"
)
def test_alter_field_change_null_with_single_index(self):
"""
Tests altering nullability of field with single index
"""
with DatabaseSchemaEditor(self.connection) as schema_editor:
schema_editor.execute = mock.MagicMock()
def delete_index_sql(*args, **kwargs):
# Overriding Statement creation with sql string.
return "DROP INDEX num_unique"
def create_index_sql(*args, **kwargs):
# Overriding Statement creation with sql string.
return "CREATE INDEX tests_author ON tests_author (author_num)"
def constraint_names(*args, **kwargs):
return ["num_unique"]
schema_editor._delete_index_sql = delete_index_sql
schema_editor._create_index_sql = create_index_sql
schema_editor._constraint_names = constraint_names
old_field = IntegerField(null=True, db_index=True)
old_field.set_attributes_from_name("num")
new_field = IntegerField(db_index=True)
new_field.set_attributes_from_name("author_num")
schema_editor.alter_field(Author, old_field, new_field)
calls = [
mock.call("DROP INDEX num_unique"),
mock.call(
"ALTER TABLE tests_author RENAME COLUMN num TO author_num"
),
mock.call(
"ALTER TABLE tests_author ALTER COLUMN author_num INT64 NOT NULL",
[],
),
mock.call(
"CREATE INDEX tests_author ON tests_author (author_num)"
),
]
schema_editor.execute.assert_has_calls(calls)
if HAS_OPENTELEMETRY_INSTALLED:
span_list = self.ot_exporter.get_finished_spans()
self.assertEqual(len(span_list), 3)
self.assertSpanAttributes(
"CloudSpannerDjango.alter_field.delete_index",
attributes=dict(
BASE_ATTRIBUTES,
model_name="tests_author",
index_name="num_unique",
alter_field="num",
),
span=span_list[0],
)
self.assertSpanAttributes(
"CloudSpannerDjango.alter_field",
attributes=dict(
BASE_ATTRIBUTES,
model_name="tests_author",
alter_field="num",
),
span=span_list[1],
)
self.assertSpanAttributes(
"CloudSpannerDjango.alter_field.recreate_index",
attributes=dict(
BASE_ATTRIBUTES,
model_name="tests_author",
alter_field="author_num",
),
span=span_list[2],
)
def test_alter_field_nullability_change_raise_not_support_error(self):
"""
Tests altering nullability of existing field in table
"""
with DatabaseSchemaEditor(self.connection) as schema_editor:
schema_editor.execute = mock.MagicMock()
def constraint_names(*args, **kwargs):
return ["num_unique"]
schema_editor._constraint_names = constraint_names
old_field = IntegerField(null=True)
old_field.set_attributes_from_name("num")
new_field = IntegerField()
new_field.set_attributes_from_name("author_num")
with self.assertRaises(NotSupportedError):
schema_editor.alter_field(Author, old_field, new_field)
def test_alter_field_change_null_with_multiple_index_error(self):
"""
Tests altering nullability of field with multiple index not supported
"""
with DatabaseSchemaEditor(self.connection) as schema_editor:
schema_editor.execute = mock.MagicMock()
def constraint_names(*args, **kwargs):
return ["num_unique", "dummy_index"]
schema_editor._constraint_names = constraint_names
old_field = IntegerField(null=True, db_index=True)
old_field.set_attributes_from_name("num")
new_field = IntegerField()
new_field.set_attributes_from_name("author_num")
with self.assertRaises(NotSupportedError):
schema_editor.alter_field(Author, old_field, new_field)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The InverseGamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
__all__ = [
"InverseGamma",
"InverseGammaWithSoftplusConcentrationRate",
]
class InverseGamma(distribution.Distribution):
"""InverseGamma distribution.
The `InverseGamma` distribution is defined over positive real numbers using
parameters `concentration` (aka "alpha") and `rate` (aka "beta").
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta, x > 0) = x**(-alpha - 1) exp(-beta / x) / Z
Z = Gamma(alpha) beta**-alpha
```
where:
* `concentration = alpha`,
* `rate = beta`,
* `Z` is the normalizing constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The cumulative density function (cdf) is,
```none
cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta / x) / Gamma(alpha)
```
where `GammaInc` is the [upper incomplete Gamma function](
https://en.wikipedia.org/wiki/Incomplete_gamma_function).
The parameters can be intuited via their relationship to mean and stddev,
```none
concentration = alpha = (mean / stddev)**2
rate = beta = mean / stddev**2
```
Distribution parameters are automatically broadcast in all functions; see
examples for details.
WARNING: This distribution may draw 0-valued samples for small concentration
values. See note in `tf.random_gamma` docstring.
#### Examples
```python
dist = InverseGamma(concentration=3.0, rate=2.0)
dist2 = InverseGamma(concentration=[3.0, 4.0], rate=[2.0, 3.0])
```
"""
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="InverseGamma"):
"""Construct InverseGamma with `concentration` and `rate` parameters.
The parameters `concentration` and `rate` must be shaped in a way that
supports broadcasting (e.g. `concentration + rate` is a valid operation).
Args:
concentration: Floating point tensor, the concentration params of the
distribution(s). Must contain only positive values.
rate: Floating point tensor, the inverse scale params of the
distribution(s). Must contain only positive values.
validate_args: Python `Boolean`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `Boolean`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: `String` name prefixed to Ops created by this class.
Raises:
TypeError: if `concentration` and `rate` are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[concentration, rate]) as ns:
with ops.control_dependencies([
check_ops.assert_positive(concentration),
check_ops.assert_positive(rate),
] if validate_args else []):
self._concentration = array_ops.identity(
concentration, name="concentration")
self._rate = array_ops.identity(rate, name="rate")
contrib_tensor_util.assert_same_float_dtype(
[self._concentration, self._rate])
super(InverseGamma, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
is_continuous=True,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration,
self._rate],
name=ns)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("concentration", "rate"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def concentration(self):
"""Concentration parameter."""
return self._concentration
@property
def rate(self):
"""Rate parameter."""
return self._rate
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.concentration),
array_ops.shape(self.rate))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.concentration.get_shape(),
self.rate.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
@distribution_util.AppendDocstring(
"""Note: See `tf.random_gamma` docstring for sampling details and
caveats.""")
def _sample_n(self, n, seed=None):
return 1. / random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
beta=self.rate,
dtype=self.dtype,
seed=seed)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return math_ops.log(self._cdf(x))
def _cdf(self, x):
x = self._maybe_assert_valid_sample(x)
# Note that igammac returns the upper regularized incomplete gamma
# function Q(a, x), which is what we want for the CDF.
return math_ops.igammac(self.concentration, self.rate / x)
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return -(1. + self.concentration) * math_ops.log(x) - self.rate / x
def _log_normalization(self):
return (math_ops.lgamma(self.concentration)
- self.concentration * math_ops.log(self.rate))
def _entropy(self):
return (self.concentration
+ math_ops.log(self.rate)
+ math_ops.lgamma(self.concentration)
- ((1. + self.concentration) *
math_ops.digamma(self.concentration)))
@distribution_util.AppendDocstring(
"""The mean of an inverse gamma distribution is
`rate / (concentration - 1)`, when `concentration > 1`, and `NaN`
otherwise. If `self.allow_nan_stats` is `False`, an exception will be
raised rather than returning `NaN`""")
def _mean(self):
mean = self.rate / (self.concentration - 1.)
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(self.concentration > 1., mean, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], self.dtype), self.concentration,
message="mean undefined when any concentration <= 1"),
], mean)
@distribution_util.AppendDocstring(
"""Variance for inverse gamma is defined only for `concentration > 2`. If
`self.allow_nan_stats` is `False`, an exception will be raised rather
than returning `NaN`.""")
def _variance(self):
var = (math_ops.square(self.rate)
/ math_ops.square(self.concentration - 1.)
/ (self.concentration - 2.))
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(self.concentration > 2., var, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
constant_op.constant(2., dtype=self.dtype),
self.concentration,
message="variance undefined when any concentration <= 2"),
], var)
@distribution_util.AppendDocstring(
"""The mode of an inverse gamma distribution is `rate / (concentration +
1)`.""")
def _mode(self):
return self.rate / (1. + self.concentration)
def _maybe_assert_valid_sample(self, x):
contrib_tensor_util.assert_same_float_dtype(
tensors=[x], dtype=self.dtype)
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x),
], x)
class InverseGammaWithSoftplusConcentrationRate(InverseGamma):
"""`InverseGamma` with softplus of `concentration` and `rate`."""
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="InverseGammaWithSoftplusConcentrationRate"):
parameters = locals()
with ops.name_scope(name, values=[concentration, rate]) as ns:
super(InverseGammaWithSoftplusConcentrationRate, self).__init__(
concentration=nn.softplus(concentration,
name="softplus_concentration"),
rate=nn.softplus(rate, name="softplus_rate"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
|
|
# urllib3/connection.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sys
import socket
from socket import timeout as SocketTimeout
try: # Python 3
from http.client import HTTPConnection as _HTTPConnection, HTTPException
except ImportError:
from httplib import HTTPConnection as _HTTPConnection, HTTPException
class DummyConnection(object):
"Used to detect a failed ConnectionCls import."
pass
try: # Compiled with SSL?
ssl = None
HTTPSConnection = DummyConnection
class BaseSSLError(BaseException):
pass
try: # Python 3
from http.client import HTTPSConnection as _HTTPSConnection
except ImportError:
from httplib import HTTPSConnection as _HTTPSConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
pass
from .exceptions import (
ConnectTimeoutError,
)
from .packages.ssl_match_hostname import match_hostname
from .packages import six
from .util import (
assert_fingerprint,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
)
port_by_scheme = {
'http': 80,
'https': 443,
}
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
"""
default_port = port_by_scheme['http']
# By default, disable Nagle's Algorithm.
tcp_nodelay = 1
def __init__(self, *args, **kw):
if six.PY3: # Python 3
kw.pop('strict', None)
if sys.version_info < (2, 7): # Python 2.6 and older
kw.pop('source_address', None)
# Pre-set source_address in case we have an older Python like 2.6.
self.source_address = kw.get('source_address')
# Superclass also sets self.source_address in Python 2.7+.
_HTTPConnection.__init__(self, *args, **kw)
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: a new socket connection
"""
extra_args = []
if self.source_address: # Python 2.7+
extra_args.append(self.source_address)
conn = socket.create_connection(
(self.host, self.port), self.timeout, *extra_args)
conn.setsockopt(
socket.IPPROTO_TCP, socket.TCP_NODELAY, self.tcp_nodelay)
return conn
def _prepare_conn(self, conn):
self.sock = conn
# the _tunnel_host attribute was added in python 2.6.3 (via
# http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
# not have them.
if getattr(self, '_tunnel_host', None):
# Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme['https']
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **kw):
HTTPConnection.__init__(self, host, port, strict=strict,
timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
self.sock = ssl.wrap_socket(conn, self.key_file, self.cert_file)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
conn_kw = {}
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def connect(self):
# Add certificate verification
try:
sock = socket.create_connection(
address=(self.host, self.port), timeout=self.timeout,
**self.conn_kw)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY,
self.tcp_nodelay)
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
hostname = self.host
if getattr(self, '_tunnel_host', None):
# _tunnel_host was added in Python 2.6.3
# (See: http://hg.python.org/cpython/rev/0f57b30a152f)
self.sock = sock
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=hostname,
ssl_version=resolved_ssl_version)
if resolved_cert_reqs != ssl.CERT_NONE:
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif self.assert_hostname is not False:
match_hostname(self.sock.getpeercert(),
self.assert_hostname or hostname)
if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
|
|
"""Provides device automations for Cover."""
from typing import Any, Dict, List
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_ABOVE,
CONF_BELOW,
CONF_CONDITION,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_TYPE,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import (
condition,
config_validation as cv,
entity_registry,
template,
)
from homeassistant.helpers.config_validation import DEVICE_CONDITION_BASE_SCHEMA
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from . import (
DOMAIN,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
SUPPORT_SET_TILT_POSITION,
)
POSITION_CONDITION_TYPES = {"is_position", "is_tilt_position"}
STATE_CONDITION_TYPES = {"is_open", "is_closed", "is_opening", "is_closing"}
POSITION_CONDITION_SCHEMA = vol.All(
DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(POSITION_CONDITION_TYPES),
vol.Optional(CONF_ABOVE): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
vol.Optional(CONF_BELOW): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
}
),
cv.has_at_least_one_key(CONF_BELOW, CONF_ABOVE),
)
STATE_CONDITION_SCHEMA = DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(STATE_CONDITION_TYPES),
}
)
CONDITION_SCHEMA = vol.Any(POSITION_CONDITION_SCHEMA, STATE_CONDITION_SCHEMA)
async def async_get_conditions(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device conditions for Cover devices."""
registry = await entity_registry.async_get_registry(hass)
conditions: List[Dict[str, Any]] = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
state = hass.states.get(entry.entity_id)
if not state or ATTR_SUPPORTED_FEATURES not in state.attributes:
continue
supported_features = state.attributes[ATTR_SUPPORTED_FEATURES]
supports_open_close = supported_features & (SUPPORT_OPEN | SUPPORT_CLOSE)
# Add conditions for each entity that belongs to this integration
if supports_open_close:
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_open",
}
)
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_closed",
}
)
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_opening",
}
)
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_closing",
}
)
if supported_features & SUPPORT_SET_POSITION:
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_position",
}
)
if supported_features & SUPPORT_SET_TILT_POSITION:
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_tilt_position",
}
)
return conditions
async def async_get_condition_capabilities(hass: HomeAssistant, config: dict) -> dict:
"""List condition capabilities."""
if config[CONF_TYPE] not in ["is_position", "is_tilt_position"]:
return {}
return {
"extra_fields": vol.Schema(
{
vol.Optional(CONF_ABOVE, default=0): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
vol.Optional(CONF_BELOW, default=100): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
}
)
}
def async_condition_from_config(
config: ConfigType, config_validation: bool
) -> condition.ConditionCheckerType:
"""Create a function to test a device condition."""
if config_validation:
config = CONDITION_SCHEMA(config)
if config[CONF_TYPE] in STATE_CONDITION_TYPES:
if config[CONF_TYPE] == "is_open":
state = STATE_OPEN
elif config[CONF_TYPE] == "is_closed":
state = STATE_CLOSED
elif config[CONF_TYPE] == "is_opening":
state = STATE_OPENING
elif config[CONF_TYPE] == "is_closing":
state = STATE_CLOSING
def test_is_state(hass: HomeAssistant, variables: TemplateVarsType) -> bool:
"""Test if an entity is a certain state."""
return condition.state(hass, config[ATTR_ENTITY_ID], state)
return test_is_state
if config[CONF_TYPE] == "is_position":
position = "current_position"
if config[CONF_TYPE] == "is_tilt_position":
position = "current_tilt_position"
min_pos = config.get(CONF_ABOVE, None)
max_pos = config.get(CONF_BELOW, None)
value_template = template.Template( # type: ignore
f"{{{{ state.attributes.{position} }}}}"
)
def template_if(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Validate template based if-condition."""
value_template.hass = hass
return condition.async_numeric_state(
hass, config[ATTR_ENTITY_ID], max_pos, min_pos, value_template
)
return template_if
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or
# https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefb_facts
version_added: '2.7'
deprecated:
removed_in: '2.13'
why: Deprecated in favor of C(_info) module.
alternative: Use M(purefb_info) instead.
short_description: Collect facts from Pure Storage FlashBlade
description:
- Collect facts information from a Pure Storage FlashBlade running the
Purity//FB operating system. By default, the module will collect basic
fact information including hosts, host groups, protection
groups and volume counts. Additional fact information can be collected
based on the configured set of arguments.
author:
- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
options:
gather_subset:
description:
- When supplied, this argument will define the facts to be collected.
Possible values for this include all, minimum, config, performance,
capacity, network, subnets, lags, filesystems and snapshots.
required: false
type: list
default: minimum
extends_documentation_fragment:
- purestorage.fb
'''
EXAMPLES = r'''
- name: collect default set of facts
purefb_facts:
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: collect configuration and capacity facts
purefb_facts:
gather_subset:
- config
- capacity
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: collect all facts
purefb_facts:
gather_subset:
- all
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
'''
RETURN = r'''
ansible_facts:
description: Returns the facts collected from the FlashBlade
returned: always
type: complex
contains:
"capacity": {
"aggregate": {
"data_reduction": 1.1179228,
"snapshots": 0,
"total_physical": 17519748439,
"unique": 17519748439,
"virtual": 19585726464
},
"file-system": {
"data_reduction": 1.3642412,
"snapshots": 0,
"total_physical": 4748219708,
"unique": 4748219708,
"virtual": 6477716992
},
"object-store": {
"data_reduction": 1.0263462,
"snapshots": 0,
"total_physical": 12771528731,
"unique": 12771528731,
"virtual": 6477716992
},
"total": 83359896948925
}
"config": {
"alert_watchers": {
"enabled": true,
"name": "notify@acmestorage.com"
},
"array_management": {
"base_dn": null,
"bind_password": null,
"bind_user": null,
"enabled": false,
"name": "management",
"services": [
"management"
],
"uris": []
},
"directory_service_roles": {
"array_admin": {
"group": null,
"group_base": null
},
"ops_admin": {
"group": null,
"group_base": null
},
"readonly": {
"group": null,
"group_base": null
},
"storage_admin": {
"group": null,
"group_base": null
}
},
"dns": {
"domain": "demo.acmestorage.com",
"name": "demo-fb-1",
"nameservers": [
"8.8.8.8"
],
"search": [
"demo.acmestorage.com"
]
},
"nfs_directory_service": {
"base_dn": null,
"bind_password": null,
"bind_user": null,
"enabled": false,
"name": "nfs",
"services": [
"nfs"
],
"uris": []
},
"ntp": [
"0.ntp.pool.org"
],
"smb_directory_service": {
"base_dn": null,
"bind_password": null,
"bind_user": null,
"enabled": false,
"name": "smb",
"services": [
"smb"
],
"uris": []
},
"smtp": {
"name": "demo-fb-1",
"relay_host": null,
"sender_domain": "acmestorage.com"
},
"ssl_certs": {
"certificate": "-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----",
"common_name": "Acme Storage",
"country": "US",
"email": null,
"intermediate_certificate": null,
"issued_by": "Acme Storage",
"issued_to": "Acme Storage",
"key_size": 4096,
"locality": null,
"name": "global",
"organization": "Acme Storage",
"organizational_unit": "Acme Storage",
"passphrase": null,
"private_key": null,
"state": null,
"status": "self-signed",
"valid_from": "1508433967000",
"valid_to": "2458833967000"
}
}
"default": {
"blades": 15,
"buckets": 7,
"filesystems": 2,
"flashblade_name": "demo-fb-1",
"object_store_accounts": 1,
"object_store_users": 1,
"purity_version": "2.2.0",
"snapshots": 1,
"total_capacity": 83359896948925
}
"filesystems": {
"k8s-pvc-d24b1357-579e-11e8-811f-ecf4bbc88f54": {
"destroyed": false,
"fast_remove": false,
"hard_limit": true,
"nfs_rules": "*(rw,no_root_squash)",
"provisioned": 21474836480,
"snapshot_enabled": false
},
"z": {
"destroyed": false,
"fast_remove": false,
"hard_limit": false,
"provisioned": 1073741824,
"snapshot_enabled": false
}
}
"lag": {
"uplink": {
"lag_speed": 0,
"port_speed": 40000000000,
"ports": [
{
"name": "CH1.FM1.ETH1.1"
},
{
"name": "CH1.FM1.ETH1.2"
},
],
"status": "healthy"
}
}
"network": {
"fm1.admin0": {
"address": "10.10.100.6",
"gateway": "10.10.100.1",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"support"
],
"type": "vip",
"vlan": 2200
},
"fm2.admin0": {
"address": "10.10.100.7",
"gateway": "10.10.100.1",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"support"
],
"type": "vip",
"vlan": 2200
},
"nfs1": {
"address": "10.10.100.4",
"gateway": "10.10.100.1",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"data"
],
"type": "vip",
"vlan": 2200
},
"vir0": {
"address": "10.10.100.5",
"gateway": "10.10.100.1",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"management"
],
"type": "vip",
"vlan": 2200
}
}
"performance": {
"aggregate": {
"bytes_per_op": 0,
"bytes_per_read": 0,
"bytes_per_write": 0,
"read_bytes_per_sec": 0,
"reads_per_sec": 0,
"usec_per_other_op": 0,
"usec_per_read_op": 0,
"usec_per_write_op": 0,
"write_bytes_per_sec": 0,
"writes_per_sec": 0
},
"http": {
"bytes_per_op": 0,
"bytes_per_read": 0,
"bytes_per_write": 0,
"read_bytes_per_sec": 0,
"reads_per_sec": 0,
"usec_per_other_op": 0,
"usec_per_read_op": 0,
"usec_per_write_op": 0,
"write_bytes_per_sec": 0,
"writes_per_sec": 0
},
"nfs": {
"bytes_per_op": 0,
"bytes_per_read": 0,
"bytes_per_write": 0,
"read_bytes_per_sec": 0,
"reads_per_sec": 0,
"usec_per_other_op": 0,
"usec_per_read_op": 0,
"usec_per_write_op": 0,
"write_bytes_per_sec": 0,
"writes_per_sec": 0
},
"s3": {
"bytes_per_op": 0,
"bytes_per_read": 0,
"bytes_per_write": 0,
"read_bytes_per_sec": 0,
"reads_per_sec": 0,
"usec_per_other_op": 0,
"usec_per_read_op": 0,
"usec_per_write_op": 0,
"write_bytes_per_sec": 0,
"writes_per_sec": 0
}
}
"snapshots": {
"z.188": {
"destroyed": false,
"source": "z",
"source_destroyed": false,
"suffix": "188"
}
}
"subnet": {
"new-mgmt": {
"gateway": "10.10.100.1",
"interfaces": [
{
"name": "fm1.admin0"
},
{
"name": "fm2.admin0"
},
{
"name": "nfs1"
},
{
"name": "vir0"
}
],
"lag": "uplink",
"mtu": 1500,
"prefix": "10.10.100.0/24",
"services": [
"data",
"management",
"support"
],
"vlan": 2200
}
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_blade, purefb_argument_spec
MIN_REQUIRED_API_VERSION = '1.3'
HARD_LIMIT_API_VERSION = '1.4'
def generate_default_dict(blade):
default_facts = {}
defaults = blade.arrays.list_arrays().items[0]
default_facts['flashblade_name'] = defaults.name
default_facts['purity_version'] = defaults.version
default_facts['filesystems'] = \
len(blade.file_systems.list_file_systems().items)
default_facts['snapshots'] = \
len(blade.file_system_snapshots.list_file_system_snapshots().items)
default_facts['buckets'] = len(blade.buckets.list_buckets().items)
default_facts['object_store_users'] = \
len(blade.object_store_users.list_object_store_users().items)
default_facts['object_store_accounts'] = \
len(blade.object_store_accounts.list_object_store_accounts().items)
default_facts['blades'] = len(blade.blade.list_blades().items)
default_facts['total_capacity'] = \
blade.arrays.list_arrays_space().items[0].capacity
return default_facts
def generate_perf_dict(blade):
perf_facts = {}
total_perf = blade.arrays.list_arrays_performance()
http_perf = blade.arrays.list_arrays_performance(protocol='http')
s3_perf = blade.arrays.list_arrays_performance(protocol='s3')
nfs_perf = blade.arrays.list_arrays_performance(protocol='nfs')
perf_facts['aggregate'] = {
'bytes_per_op': total_perf.items[0].bytes_per_op,
'bytes_per_read': total_perf.items[0].bytes_per_read,
'bytes_per_write': total_perf.items[0].bytes_per_write,
'read_bytes_per_sec': total_perf.items[0].read_bytes_per_sec,
'reads_per_sec': total_perf.items[0].reads_per_sec,
'usec_per_other_op': total_perf.items[0].usec_per_other_op,
'usec_per_read_op': total_perf.items[0].usec_per_read_op,
'usec_per_write_op': total_perf.items[0].usec_per_write_op,
'write_bytes_per_sec': total_perf.items[0].write_bytes_per_sec,
'writes_per_sec': total_perf.items[0].writes_per_sec,
}
perf_facts['http'] = {
'bytes_per_op': http_perf.items[0].bytes_per_op,
'bytes_per_read': http_perf.items[0].bytes_per_read,
'bytes_per_write': http_perf.items[0].bytes_per_write,
'read_bytes_per_sec': http_perf.items[0].read_bytes_per_sec,
'reads_per_sec': http_perf.items[0].reads_per_sec,
'usec_per_other_op': http_perf.items[0].usec_per_other_op,
'usec_per_read_op': http_perf.items[0].usec_per_read_op,
'usec_per_write_op': http_perf.items[0].usec_per_write_op,
'write_bytes_per_sec': http_perf.items[0].write_bytes_per_sec,
'writes_per_sec': http_perf.items[0].writes_per_sec,
}
perf_facts['s3'] = {
'bytes_per_op': s3_perf.items[0].bytes_per_op,
'bytes_per_read': s3_perf.items[0].bytes_per_read,
'bytes_per_write': s3_perf.items[0].bytes_per_write,
'read_bytes_per_sec': s3_perf.items[0].read_bytes_per_sec,
'reads_per_sec': s3_perf.items[0].reads_per_sec,
'usec_per_other_op': s3_perf.items[0].usec_per_other_op,
'usec_per_read_op': s3_perf.items[0].usec_per_read_op,
'usec_per_write_op': s3_perf.items[0].usec_per_write_op,
'write_bytes_per_sec': s3_perf.items[0].write_bytes_per_sec,
'writes_per_sec': s3_perf.items[0].writes_per_sec,
}
perf_facts['nfs'] = {
'bytes_per_op': nfs_perf.items[0].bytes_per_op,
'bytes_per_read': nfs_perf.items[0].bytes_per_read,
'bytes_per_write': nfs_perf.items[0].bytes_per_write,
'read_bytes_per_sec': nfs_perf.items[0].read_bytes_per_sec,
'reads_per_sec': nfs_perf.items[0].reads_per_sec,
'usec_per_other_op': nfs_perf.items[0].usec_per_other_op,
'usec_per_read_op': nfs_perf.items[0].usec_per_read_op,
'usec_per_write_op': nfs_perf.items[0].usec_per_write_op,
'write_bytes_per_sec': nfs_perf.items[0].write_bytes_per_sec,
'writes_per_sec': nfs_perf.items[0].writes_per_sec,
}
return perf_facts
def generate_config_dict(blade):
config_facts = {}
config_facts['dns'] = blade.dns.list_dns().items[0].to_dict()
config_facts['smtp'] = blade.smtp.list_smtp().items[0].to_dict()
config_facts['alert_watchers'] = \
blade.alert_watchers.list_alert_watchers().items[0].to_dict()
api_version = blade.api_version.list_versions().versions
if HARD_LIMIT_API_VERSION in api_version:
config_facts['array_management'] = \
blade.directory_services.list_directory_services(names=['management']).items[0].to_dict()
config_facts['directory_service_roles'] = {}
roles = blade.directory_services.list_directory_services_roles()
for role in range(0, len(roles.items)):
role_name = roles.items[role].name
config_facts['directory_service_roles'][role_name] = {
'group': roles.items[role].group,
'group_base': roles.items[role].group_base
}
config_facts['nfs_directory_service'] = \
blade.directory_services.list_directory_services(names=['nfs']).items[0].to_dict()
config_facts['smb_directory_service'] = \
blade.directory_services.list_directory_services(names=['smb']).items[0].to_dict()
config_facts['ntp'] = blade.arrays.list_arrays().items[0].ntp_servers
config_facts['ssl_certs'] = \
blade.certificates.list_certificates().items[0].to_dict()
return config_facts
def generate_subnet_dict(blade):
sub_facts = {}
subnets = blade.subnets.list_subnets()
for sub in range(0, len(subnets.items)):
sub_name = subnets.items[sub].name
if subnets.items[sub].enabled:
sub_facts[sub_name] = {
'gateway': subnets.items[sub].gateway,
'mtu': subnets.items[sub].mtu,
'vlan': subnets.items[sub].vlan,
'prefix': subnets.items[sub].prefix,
'services': subnets.items[sub].services,
}
sub_facts[sub_name]['lag'] = subnets.items[sub].link_aggregation_group.name
sub_facts[sub_name]['interfaces'] = []
for iface in range(0, len(subnets.items[sub].interfaces)):
sub_facts[sub_name]['interfaces'].append({'name': subnets.items[sub].interfaces[iface].name})
return sub_facts
def generate_lag_dict(blade):
lag_facts = {}
groups = blade.link_aggregation_groups.list_link_aggregation_groups()
for groupcnt in range(0, len(groups.items)):
lag_name = groups.items[groupcnt].name
lag_facts[lag_name] = {
'lag_speed': groups.items[groupcnt].lag_speed,
'port_speed': groups.items[groupcnt].port_speed,
'status': groups.items[groupcnt].status,
}
lag_facts[lag_name]['ports'] = []
for port in range(0, len(groups.items[groupcnt].ports)):
lag_facts[lag_name]['ports'].append({'name': groups.items[groupcnt].ports[port].name})
return lag_facts
def generate_network_dict(blade):
net_facts = {}
ports = blade.network_interfaces.list_network_interfaces()
for portcnt in range(0, len(ports.items)):
int_name = ports.items[portcnt].name
if ports.items[portcnt].enabled:
net_facts[int_name] = {
'type': ports.items[portcnt].type,
'mtu': ports.items[portcnt].mtu,
'vlan': ports.items[portcnt].vlan,
'address': ports.items[portcnt].address,
'services': ports.items[portcnt].services,
'gateway': ports.items[portcnt].gateway,
'netmask': ports.items[portcnt].netmask,
}
return net_facts
def generate_capacity_dict(blade):
capacity_facts = {}
total_cap = blade.arrays.list_arrays_space()
file_cap = blade.arrays.list_arrays_space(type='file-system')
object_cap = blade.arrays.list_arrays_space(type='object-store')
capacity_facts['total'] = total_cap.items[0].capacity
capacity_facts['aggregate'] = {
'data_reduction': total_cap.items[0].space.data_reduction,
'snapshots': total_cap.items[0].space.snapshots,
'total_physical': total_cap.items[0].space.total_physical,
'unique': total_cap.items[0].space.unique,
'virtual': total_cap.items[0].space.virtual,
}
capacity_facts['file-system'] = {
'data_reduction': file_cap.items[0].space.data_reduction,
'snapshots': file_cap.items[0].space.snapshots,
'total_physical': file_cap.items[0].space.total_physical,
'unique': file_cap.items[0].space.unique,
'virtual': file_cap.items[0].space.virtual,
}
capacity_facts['object-store'] = {
'data_reduction': object_cap.items[0].space.data_reduction,
'snapshots': object_cap.items[0].space.snapshots,
'total_physical': object_cap.items[0].space.total_physical,
'unique': object_cap.items[0].space.unique,
'virtual': file_cap.items[0].space.virtual,
}
return capacity_facts
def generate_snap_dict(blade):
snap_facts = {}
snaps = blade.file_system_snapshots.list_file_system_snapshots()
for snap in range(0, len(snaps.items)):
snapshot = snaps.items[snap].name
snap_facts[snapshot] = {
'destroyed': snaps.items[snap].destroyed,
'source': snaps.items[snap].source,
'suffix': snaps.items[snap].suffix,
'source_destroyed': snaps.items[snap].source_destroyed,
}
return snap_facts
def generate_fs_dict(blade):
fs_facts = {}
fsys = blade.file_systems.list_file_systems()
for fsystem in range(0, len(fsys.items)):
share = fsys.items[fsystem].name
fs_facts[share] = {
'fast_remove': fsys.items[fsystem].fast_remove_directory_enabled,
'snapshot_enabled': fsys.items[fsystem].snapshot_directory_enabled,
'provisioned': fsys.items[fsystem].provisioned,
'destroyed': fsys.items[fsystem].destroyed,
}
if fsys.items[fsystem].http.enabled:
fs_facts[share]['http'] = fsys.items[fsystem].http.enabled
if fsys.items[fsystem].smb.enabled:
fs_facts[share]['smb_mode'] = fsys.items[fsystem].smb.acl_mode
if fsys.items[fsystem].nfs.enabled:
fs_facts[share]['nfs_rules'] = fsys.items[fsystem].nfs.rules
api_version = blade.api_version.list_versions().versions
if HARD_LIMIT_API_VERSION in api_version:
fs_facts[share]['hard_limit'] = fsys.items[fsystem].hard_limit_enabled
return fs_facts
def main():
argument_spec = purefb_argument_spec()
argument_spec.update(dict(
gather_subset=dict(default='minimum', type='list',)
))
module = AnsibleModule(argument_spec, supports_check_mode=True)
blade = get_blade(module)
versions = blade.api_version.list_versions().versions
if MIN_REQUIRED_API_VERSION not in versions:
module.fail_json(msg='FlashBlade REST version not supported. Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION))
subset = [test.lower() for test in module.params['gather_subset']]
valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity',
'network', 'subnets', 'lags',
'filesystems', 'snapshots')
subset_test = (test in valid_subsets for test in subset)
if not all(subset_test):
module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s"
% (",".join(valid_subsets), ",".join(subset)))
facts = {}
if 'minimum' in subset or 'all' in subset:
facts['default'] = generate_default_dict(blade)
if 'performance' in subset or 'all' in subset:
facts['performance'] = generate_perf_dict(blade)
if 'config' in subset or 'all' in subset:
facts['config'] = generate_config_dict(blade)
if 'capacity' in subset or 'all' in subset:
facts['capacity'] = generate_capacity_dict(blade)
if 'lags' in subset or 'all' in subset:
facts['lag'] = generate_lag_dict(blade)
if 'network' in subset or 'all' in subset:
facts['network'] = generate_network_dict(blade)
if 'subnets' in subset or 'all' in subset:
facts['subnet'] = generate_subnet_dict(blade)
if 'filesystems' in subset or 'all' in subset:
facts['filesystems'] = generate_fs_dict(blade)
if 'snapshots' in subset or 'all' in subset:
facts['snapshots'] = generate_snap_dict(blade)
module.exit_json(ansible_facts={'ansible_purefb_facts': facts})
if __name__ == '__main__':
main()
|
|
import django
import logging
import yaml
import os
import datetime
import re
from itertools import chain
from django.core.exceptions import ValidationError
from django.db import models
from django.apps import apps
from django.conf import settings
from djangae import environment
from djangae.db.utils import get_top_concrete_parent
from djangae.core.validators import MaxBytesValidator
from djangae.sandbox import allow_mode_write
from google.appengine.api.datastore import (
Entity,
Delete,
Query
)
logger = logging.getLogger(__name__)
_project_special_indexes = {}
_app_special_indexes = {}
_last_loaded_times = {}
_indexes_loaded = False
MAX_COLUMNS_PER_SPECIAL_INDEX = getattr(settings, "DJANGAE_MAX_COLUMNS_PER_SPECIAL_INDEX", 3)
CHARACTERS_PER_COLUMN = [31, 44, 54, 63, 71, 79, 85, 91, 97, 103]
STRIP_PERCENTS = django.VERSION < (1, 10)
def _get_project_index_file():
project_index_file = os.path.join(environment.get_application_root(), "djangaeidx.yaml")
return project_index_file
def _get_app_index_files():
index_files = []
for app_config in apps.get_app_configs():
app_path = app_config.path
project_index_file = os.path.join(app_path, "djangaeidx.yaml")
index_files.append(project_index_file)
return index_files
def _get_table_from_model(model_class):
return model_class._meta.db_table.encode("utf-8")
def _is_iterable(value):
return hasattr(value, '__iter__') # is a list, tuple or set?
def _deduplicate_list(value_list):
""" Deduplicate list of elements; value_list is expected to be a list
of containing hashable elements. """
return list(set(value_list))
def _make_lower(value):
""" Make string and list of strings lowercase """
if _is_iterable(value):
return [v.lower() for v in value]
else:
return value.lower()
def _merged_indexes():
"""
Returns the combination of the app and project special indexes
"""
global _project_special_indexes
global _app_special_indexes
result = _app_special_indexes.copy()
for model, indexes in _project_special_indexes.items():
for field_name, values in indexes.items():
result.setdefault(
model, {}
).setdefault(field_name, []).extend(values)
return result
def load_special_indexes():
global _project_special_indexes
global _app_special_indexes
global _last_loaded_times
global _indexes_loaded
if _indexes_loaded and environment.is_production_environment():
# Index files can't change if we're on production, so once they're loaded we don't need
# to check their modified times and reload them
return
def _read_file(filepath):
# Load any existing indexes
with open(filepath, "r") as stream:
data = yaml.load(stream)
return data
project_index_file = _get_project_index_file()
app_files = _get_app_index_files()
files_to_reload = {}
# Go through and reload any files that we find
for file_path in [project_index_file] + app_files:
if not os.path.exists(file_path):
continue
mtime = os.path.getmtime(file_path)
if _last_loaded_times.get(file_path) and _last_loaded_times[file_path] == mtime:
# The file hasn't changed since last time, so do nothing
continue
else:
# Mark this file for reloading, store the current modified time
files_to_reload[file_path] = mtime
# First, reload the project index file,
if project_index_file in files_to_reload:
mtime = files_to_reload[project_index_file]
_project_special_indexes = _read_file(project_index_file)
_last_loaded_times[project_index_file] = mtime
# Remove it from the files to reload
del files_to_reload[project_index_file]
# Now, load the rest of the files and update any entries
for file_path in files_to_reload:
mtime = files_to_reload[project_index_file]
new_data = _read_file(file_path)
_last_loaded_times[file_path] = mtime
# Update the app special indexes list
for model, indexes in new_data.items():
for field_name, values in indexes.items():
_app_special_indexes.setdefault(
model, {}
).setdefault(field_name, []).extend(values)
_indexes_loaded = True
logger.debug("Loaded special indexes for %d models", len(_merged_indexes()))
def special_index_exists(model_class, field_name, index_type):
table = _get_table_from_model(model_class)
return index_type in _merged_indexes().get(table, {}).get(field_name, [])
def special_indexes_for_model(model_class):
classes = [model_class] + model_class._meta.parents.keys()
result = {}
for klass in classes:
result.update(_merged_indexes().get(_get_table_from_model(klass), {}))
return result
def special_indexes_for_column(model_class, column):
return special_indexes_for_model(model_class).get(column, [])
def write_special_indexes():
"""
Writes the project-specific indexes to the project djangaeidx.yaml
"""
project_index_file = _get_project_index_file()
with allow_mode_write():
with open(project_index_file, "w") as stream:
stream.write(yaml.dump(_project_special_indexes))
def add_special_index(model_class, field_name, indexer, operator, value=None):
from djangae.utils import in_testing
from django.conf import settings
index_type = indexer.prepare_index_type(operator, value)
field_name = field_name.encode("utf-8") # Make sure we are working with strings
load_special_indexes()
if special_index_exists(model_class, field_name, index_type):
return
if environment.is_production_environment() or (
in_testing() and not getattr(settings, "GENERATE_SPECIAL_INDEXES_DURING_TESTING", False)
):
raise RuntimeError(
"There is a missing index in your djangaeidx.yaml - \n\n{0}:\n\t{1}: [{2}]".format(
_get_table_from_model(model_class), field_name, index_type
)
)
_project_special_indexes.setdefault(
_get_table_from_model(model_class), {}
).setdefault(field_name, []).append(str(index_type))
write_special_indexes()
class IgnoreForIndexing(Exception):
"""
An exception thrown from prep_value_for_database if the index column
should be removed from the entity.
Frustratingly, the legacy icontains and contains Indexers use 'value' when
determining the column name, and so you must pass the prepared value for the
column anyway, even though it won't be used.
When the legacy icontains/contains indexers are removed we should:
- Remove __init__ from this class
- Remove 'value' as an argument to indexed_column_name
- Stop setting 'values' from processed_value in django_instance_to_entity
"""
def __init__(self, processed_value):
self.processed_value = processed_value
class Indexer(object):
# Set this to True if prep_value_for_database returns additional Entity instances
# to save as descendents, rather than values to index as columns
PREP_VALUE_RETURNS_ENTITIES = False
# **IMPORTANT! If you return Entities from an indexer, the kind *must* start with
# _djangae_idx_XXXX where XXX is the top concrete model kind of the instance you
# are indexing. If you do not do this, then the tables will not be correctly flushed
# when the database is flushed**
@classmethod
def cleanup(cls, datastore_key):
"""
Called when an instance is deleted, if the instances has an index
which uses this indexer. This is mainly for cleaning up descendent kinds
(e.g. like those used in contains + icontains)
"""
pass
def handles(self, field, operator):
"""
When given a field instance and an operator (e.g. gt, month__gt etc.)
returns True or False whether or not this is the indexer to handle that
situation
"""
raise NotImplementedError()
def validate_can_be_indexed(self, value, negated):
"""Return True if the value is indexable, False otherwise"""
raise NotImplementedError()
def prep_value_for_database(self, value, index, **kwargs): raise NotImplementedError()
def prep_value_for_query(self, value, **kwargs): raise NotImplementedError()
def indexed_column_name(self, field_column, value, index): raise NotImplementedError()
def prep_query_operator(self, op):
if "__" in op:
return op.split("__")[-1]
else:
return "exact" # By default do an exact operation
def prepare_index_type(self, index_type, value): return index_type
def unescape(self, value):
value = value.replace("\\_", "_")
value = value.replace("\\%", "%")
value = value.replace("\\\\", "\\")
return value
class StringIndexerMixin(object):
STRING_FIELDS = (
models.TextField,
models.CharField,
models.URLField,
models.DateTimeField, # Django allows these for some reason
models.DateField,
models.TimeField,
models.IntegerField, # SQL coerces ints to strings, and so we need these too
models.PositiveIntegerField,
models.AutoField
)
def handles(self, field, operator):
from djangae.fields import iterable
try:
# Make sure the operator is in there
operator.split("__").index(self.OPERATOR)
except ValueError:
return False
if isinstance(field, self.STRING_FIELDS):
return True
elif (
isinstance(field, (iterable.ListField, iterable.SetField)) and
field.item_field_type.__class__ in self.STRING_FIELDS and operator.startswith("item__")
):
return True
return False
class DateIndexerMixin(object):
def handles(self, field, operator):
from djangae.fields import iterable
DATE_FIELDS = (
models.DateField,
models.DateTimeField
)
if operator.split("__")[0] != self.OPERATOR:
return False
if isinstance(field, DATE_FIELDS):
return True
elif (
isinstance(field, (iterable.ListField, iterable.SetField)) and
field.item_field_type.__class__ in DATE_FIELDS and operator.startswith("item__")
):
return True
return False
class TimeIndexerMixin(object):
def handles(self, field, operator):
from djangae.fields import iterable
TIME_FIELDS = (
models.TimeField,
models.DateTimeField
)
if operator.split("__")[0] != self.OPERATOR:
return False
if isinstance(field, TIME_FIELDS):
return True
elif (
isinstance(field, (iterable.ListField, iterable.SetField)) and
field.item_field_type.__class__ in TIME_FIELDS and operator.startswith("item__")
):
return True
return False
class IExactIndexer(StringIndexerMixin, Indexer):
OPERATOR = 'iexact'
def validate_can_be_indexed(self, value, negated):
return len(value) < 500
def prep_value_for_database(self, value, index, **kwargs):
if value is None:
return None
if isinstance(value, (int, long)):
value = str(value)
return value.lower()
def prep_value_for_query(self, value, **kwargs):
value = self.unescape(value)
return value.lower()
def indexed_column_name(self, field_column, value, index):
return "_idx_iexact_{0}".format(field_column)
class HourIndexer(TimeIndexerMixin, Indexer):
OPERATOR = 'hour'
def validate_can_be_indexed(self, value, negated):
return isinstance(value, datetime.datetime)
def prep_value_for_database(self, value, index, **kwargs):
if value:
return value.hour
return None
def prep_value_for_query(self, value, **kwargs):
if isinstance(value, (int, long)):
return value
if isinstance(value, basestring):
value = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
return value.hour
def indexed_column_name(self, field_column, value, index):
return "_idx_hour_{0}".format(field_column)
class MinuteIndexer(TimeIndexerMixin, Indexer):
OPERATOR = 'minute'
def validate_can_be_indexed(self, value, negated):
return isinstance(value, datetime.datetime)
def prep_value_for_database(self, value, index, **kwargs):
if value:
return value.minute
return None
def prep_value_for_query(self, value, **kwargs):
if isinstance(value, (int, long)):
return value
if isinstance(value, basestring):
value = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
return value.minute
def indexed_column_name(self, field_column, value, index):
return "_idx_minute_{0}".format(field_column)
class SecondIndexer(TimeIndexerMixin, Indexer):
OPERATOR = 'second'
def validate_can_be_indexed(self, value, negated):
return isinstance(value, datetime.datetime)
def prep_value_for_database(self, value, index, **kwargs):
if value:
return value.second
return None
def prep_value_for_query(self, value, **kwargs):
if isinstance(value, (int, long)):
return value
if isinstance(value, basestring):
value = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
return value.second
def indexed_column_name(self, field_column, value, index):
return "_idx_second_{0}".format(field_column)
class DayIndexer(DateIndexerMixin, Indexer):
OPERATOR = 'day'
def validate_can_be_indexed(self, value, negated):
return isinstance(value, (datetime.datetime, datetime.date))
def prep_value_for_database(self, value, index, **kwargs):
if value:
return value.day
return None
def prep_value_for_query(self, value, **kwargs):
if isinstance(value, (int, long)):
return value
if isinstance(value, basestring):
value = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
return value.day
def indexed_column_name(self, field_column, value, index):
return "_idx_day_{0}".format(field_column)
class YearIndexer(DateIndexerMixin, Indexer):
OPERATOR = 'year'
def validate_can_be_indexed(self, value, negated):
return isinstance(value, (datetime.datetime, datetime.date))
def prep_value_for_database(self, value, index, **kwargs):
if value:
return value.year
return None
def prep_value_for_query(self, value, **kwargs):
if isinstance(value, (int, long)):
return value
if isinstance(value, basestring):
value = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
return value.year
def indexed_column_name(self, field_column, value, index):
return "_idx_year_{0}".format(field_column)
class MonthIndexer(DateIndexerMixin, Indexer):
OPERATOR = 'month'
def validate_can_be_indexed(self, value, negated):
return isinstance(value, (datetime.datetime, datetime.date))
def prep_value_for_database(self, value, index, **kwargs):
if value:
return value.month
return None
def prep_value_for_query(self, value, **kwargs):
if isinstance(value, (int, long)):
return value
if isinstance(value, basestring):
value = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
return value.month
def indexed_column_name(self, field_column, value, index):
return "_idx_month_{0}".format(field_column)
class WeekDayIndexer(DateIndexerMixin, Indexer):
OPERATOR = 'week_day'
def validate_can_be_indexed(self, value, negated):
return isinstance(value, (datetime.datetime, datetime.date))
def prep_value_for_database(self, value, index, **kwargs):
if value:
zero_based_weekday = value.weekday()
if zero_based_weekday == 6: # Sunday
return 1 # Django treats the week as starting at Sunday, but 1 based
else:
return zero_based_weekday + 2
return None
def prep_value_for_query(self, value, **kwargs):
return value
def indexed_column_name(self, field_column, value, index):
return "_idx_week_day_{0}".format(field_column)
class ContainsIndexer(StringIndexerMixin, Indexer):
PREP_VALUE_RETURNS_ENTITIES = True
OPERATOR = u'contains'
INDEXED_COLUMN_NAME = OPERATOR
def validate_can_be_indexed(self, value, negated):
if negated:
return False
try:
MaxBytesValidator(limit_value=1500)(value)
return True
except ValidationError:
return False
@classmethod
def cleanup(cls, datastore_key):
# Kindless query, we don't know the kinds because we don't know all the fields
# that use contains. But, we do know that all the things we need to delete are:
# a.) A descendent
# b.) Have a key name of whatever OPERATOR is
qry = Query(keys_only=True, namespace=datastore_key.namespace())
qry = qry.Ancestor(datastore_key)
# Delete all the entities matching the ancestor query
Delete([x for x in qry.Run() if x.name() == cls.OPERATOR])
def _generate_kind_name(self, model, column):
return "_djangae_idx_{}_{}".format(
get_top_concrete_parent(model)._meta.db_table,
column
)
def _generate_permutations(self, value):
return [value[i:] for i in range(len(value))]
def prep_value_for_database(self, value, index, model, column):
if value is None:
raise IgnoreForIndexing([])
# If this a date or a datetime, or something that supports isoformat, then use that
if hasattr(value, "isoformat"):
value = value.isoformat()
if _is_iterable(value):
value = list(chain(*[self._generate_permutations(v) for v in value]))
else:
value = self._generate_permutations(value)
if not value:
raise IgnoreForIndexing([])
value = list(set(value)) # De-duplicate
entity = Entity(self._generate_kind_name(model, column), name=self.OPERATOR)
entity[self.INDEXED_COLUMN_NAME] = value
return [entity]
def prep_query_operator(self, operator):
return "IN"
def indexed_column_name(self, field_column, value, index):
# prep_value_for_query returns a list PKs, so we return __key__ as the column
return "__key__"
def prep_value_for_query(self, value, model, column, connection):
"""
Return a list of IDs of the associated contains models, these should
match up with the IDs from the parent entities
"""
if hasattr(value, "isoformat"):
value = value.isoformat()
else:
value = unicode(value)
value = self.unescape(value)
if STRIP_PERCENTS:
# SQL does __contains by doing LIKE %value%
if value.startswith("%") and value.endswith("%"):
value = value[1:-1]
namespace = connection.settings_dict.get("NAMESPACE", "")
qry = Query(self._generate_kind_name(model, column), keys_only=True, namespace=namespace)
qry['{} >='.format(self.INDEXED_COLUMN_NAME)] = value
qry['{} <='.format(self.INDEXED_COLUMN_NAME)] = value + u'\ufffd'
# We can't filter on the 'name' as part of the query, because the name is the key and these
# are child entities of the ancestor entities which they are indexing, and as we don't know
# the keys of the ancestor entities we can't create the complete keys, hence the comparison
# of `x.name() == self.OPERATOR` happens here in python
resulting_keys = set([x.parent() for x in qry.Run() if x.name() == self.OPERATOR])
return resulting_keys
class IContainsIndexer(ContainsIndexer):
OPERATOR = 'icontains'
def _generate_permutations(self, value):
return super(IContainsIndexer, self)._generate_permutations(value.lower())
def prep_value_for_query(self, value, model, column, connection):
return super(IContainsIndexer, self).prep_value_for_query(value.lower(), model, column, connection)
class LegacyContainsIndexer(StringIndexerMixin, Indexer):
OPERATOR = 'contains'
def number_of_permutations(self, value):
return sum(range(len(value)+1))
def validate_can_be_indexed(self, value, negated):
if negated:
return False
return isinstance(value, basestring) and len(value) <= 500
def prep_value_for_database(self, value, index, **kwargs):
results = []
if value:
# If this a date or a datetime, or something that supports isoformat, then use that
if hasattr(value, "isoformat"):
value = value.isoformat()
if self.number_of_permutations(value) > MAX_COLUMNS_PER_SPECIAL_INDEX*500:
raise ValueError(
"Can't index for contains query, this value is too long and has too many "
"permutations. You can increase the DJANGAE_MAX_COLUMNS_PER_SPECIAL_INDEX "
"setting to fix that. Use with caution."
)
if len(value) > CHARACTERS_PER_COLUMN[-1]:
raise ValueError(
"Can't index for contains query, this value can be maximum {0} characters long."
.format(CHARACTERS_PER_COLUMN[-1])
)
if _is_iterable(value):
# `value` is a list of strings. Generate a single combined list containing the
# substrings of each string in `value`
for element in value:
length = len(element)
lists = [element[i:j + 1] for i in range(length) for j in range(i, length)]
results.extend(lists)
else:
# `value` is a string. Generate a list of all its substrings.
length = len(value)
lists = [value[i:j + 1] for i in range(length) for j in range(i, length)]
results.extend(lists)
if not results:
raise IgnoreForIndexing([])
return _deduplicate_list(results)
def prep_value_for_query(self, value, **kwargs):
if hasattr(value, "isoformat"):
value = value.isoformat()
else:
value = unicode(value)
value = self.unescape(value)
if STRIP_PERCENTS:
# SQL does __contains by doing LIKE %value%
if value.startswith("%") and value.endswith("%"):
value = value[1:-1]
return value
def indexed_column_name(self, field_column, value, index):
# This we use when we actually query to return the right field for a given
# value length
length = len(value)
column_number = 0
for x in CHARACTERS_PER_COLUMN:
if length > x:
column_number += 1
return "_idx_contains_{0}_{1}".format(field_column, column_number)
def prep_query_operator(self, op):
return "exact"
class LegacyIContainsIndexer(LegacyContainsIndexer):
OPERATOR = 'icontains'
def prep_value_for_database(self, value, index, **kwargs):
if value is None:
raise IgnoreForIndexing("")
value = _make_lower(value)
result = super(LegacyIContainsIndexer, self).prep_value_for_database(value, index)
return result if result else None
def indexed_column_name(self, field_column, value, index):
column_name = super(LegacyIContainsIndexer, self).indexed_column_name(field_column, value, index)
return column_name.replace('_idx_contains_', '_idx_icontains_')
def prep_value_for_query(self, value, **kwargs):
return super(LegacyIContainsIndexer, self).prep_value_for_query(value).lower()
class EndsWithIndexer(StringIndexerMixin, Indexer):
"""
dbindexer originally reversed the string and did a startswith on it.
However, this is problematic as it uses an inequality and therefore
limits the queries you can perform. Instead, we store all permutations
of the last characters in a list field. Then we can just do an exact lookup on
the value. Which isn't as nice, but is more flexible.
"""
OPERATOR = 'endswith'
def validate_can_be_indexed(self, value, negated):
if negated:
return False
return isinstance(value, basestring) and len(value) < 500
def prep_value_for_database(self, value, index, **kwargs):
if value is None:
return None
results = []
if _is_iterable(value):
# `value` is a list of strings. Create a single combined list of "endswith" values
# of all the strings in the list
for element in value:
for i in range(0, len(element)):
results.append(element[i:])
else:
# `value` is a string. Create a list of "endswith" strings.
for i in range(0, len(value)):
results.append(value[i:])
if not results:
return None
return _deduplicate_list(results)
def prep_value_for_query(self, value, **kwargs):
value = self.unescape(value)
if STRIP_PERCENTS:
if value.startswith("%"):
value = value[1:]
return value
def indexed_column_name(self, field_column, value, index):
return "_idx_endswith_{0}".format(field_column)
def prep_query_operator(self, op):
return "exact"
class IEndsWithIndexer(EndsWithIndexer):
"""
Same as above, just all lower cased
"""
OPERATOR = 'iendswith'
def prep_value_for_database(self, value, index, **kwargs):
if value:
value = _make_lower(value)
return super(IEndsWithIndexer, self).prep_value_for_database(value, index)
def prep_value_for_query(self, value, **kwargs):
return super(IEndsWithIndexer, self).prep_value_for_query(value.lower())
def indexed_column_name(self, field_column, value, index):
return "_idx_iendswith_{0}".format(field_column)
class StartsWithIndexer(StringIndexerMixin, Indexer):
"""
Although we can do a startswith natively, doing it this way allows us to
use more queries (E.g. we save an exclude)
"""
OPERATOR = 'startswith'
def validate_can_be_indexed(self, value, negated):
if negated:
return False
return isinstance(value, basestring) and len(value) < 500
def prep_value_for_database(self, value, index, **kwargs):
if value is None:
return None
if isinstance(value, datetime.datetime):
value = value.strftime("%Y-%m-%d %H:%M:%S")
results = []
if _is_iterable(value):
# `value` is a list of strings. Create a single combined list of "startswith" values
# of all the strings in the list
for element in value:
for i in range(1, len(element) + 1):
results.append(element[:i])
else:
# `value` is a string. Create a list of "startswith" strings.
for i in range(1, len(value) + 1):
results.append(value[:i])
if not results:
return None
return _deduplicate_list(results)
def prep_value_for_query(self, value, **kwargs):
value = self.unescape(value)
if STRIP_PERCENTS:
if value.endswith("%"):
value = value[:-1]
return value
def indexed_column_name(self, field_column, value, index):
return "_idx_startswith_{0}".format(field_column)
def prep_query_operator(self, op):
return "exact"
class IStartsWithIndexer(StartsWithIndexer):
"""
Same as above, just all lower cased
"""
OPERATOR = 'istartswith'
def prep_value_for_database(self, value, index, **kwargs):
if value:
value = _make_lower(value)
return super(IStartsWithIndexer, self).prep_value_for_database(value, index)
def prep_value_for_query(self, value, **kwargs):
return super(IStartsWithIndexer, self).prep_value_for_query(value.lower())
def indexed_column_name(self, field_column, value, index):
return "_idx_istartswith_{0}".format(field_column)
class RegexIndexer(StringIndexerMixin, Indexer):
OPERATOR = 'regex'
def prepare_index_type(self, index_type, value):
"""
If we're dealing with RegexIndexer, we create a new index for each
regex pattern. Indexes are called regex__pattern.
"""
return '{}__{}'.format(index_type, value.encode("utf-8").encode('hex'))
def validate_can_be_indexed(self, value, negated):
if negated:
return False
return isinstance(value, bool)
def get_pattern(self, index):
try:
return index.split('__')[-1].decode('hex').decode("utf-8")
except IndexError:
return ''
def check_if_match(self, value, index, flags=0):
pattern = self.get_pattern(index)
if value:
if _is_iterable(value):
if any([bool(re.search(pattern, x, flags)) for x in value]):
return True
else:
if isinstance(value, (int, long)):
value = str(value)
return bool(re.search(pattern, value, flags))
return False
def prep_value_for_database(self, value, index, **kwargs):
return self.check_if_match(value, index)
def prep_value_for_query(self, value, **kwargs):
return True
def indexed_column_name(self, field_column, value, index):
return "_idx_regex_{0}_{1}".format(
field_column, self.get_pattern(index).encode("utf-8").encode('hex')
)
def prep_query_operator(self, op):
return "exact"
class IRegexIndexer(RegexIndexer):
OPERATOR = 'iregex'
def prepare_index_type(self, index_type, value):
return '{}__{}'.format(index_type, value.encode('hex'))
def prep_value_for_database(self, value, index, **kwargs):
return self.check_if_match(value, index, flags=re.IGNORECASE)
def indexed_column_name(self, field_column, value, index):
return "_idx_iregex_{0}_{1}".format(field_column, self.get_pattern(index).encode('hex'))
_REGISTERED_INDEXERS = []
def register_indexer(indexer_class):
global _REGISTERED_INDEXERS
_REGISTERED_INDEXERS.append(indexer_class())
def get_indexer(field, operator):
global _REGISTERED_INDEXERS
for indexer in _REGISTERED_INDEXERS:
if indexer.handles(field, operator):
return indexer
def indexers_for_model(model_class):
indexes = special_indexes_for_model(model_class)
indexers = []
for field in model_class._meta.fields:
if field.name in indexes:
for operator in indexes[field.name]:
indexers.append(get_indexer(field, operator))
return set(indexers)
register_indexer(IExactIndexer)
if getattr(settings, "DJANGAE_USE_LEGACY_CONTAINS_LOGIC", False):
register_indexer(LegacyContainsIndexer)
register_indexer(LegacyIContainsIndexer)
else:
register_indexer(ContainsIndexer)
register_indexer(IContainsIndexer)
register_indexer(HourIndexer)
register_indexer(MinuteIndexer)
register_indexer(SecondIndexer)
register_indexer(DayIndexer)
register_indexer(MonthIndexer)
register_indexer(YearIndexer)
register_indexer(WeekDayIndexer)
register_indexer(EndsWithIndexer)
register_indexer(IEndsWithIndexer)
register_indexer(StartsWithIndexer)
register_indexer(IStartsWithIndexer)
register_indexer(RegexIndexer)
register_indexer(IRegexIndexer)
|
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 12 16:50:15 2017
@author: Damien
"""
import random
import math
import numpy as np
from deap import base
from deap import creator
from deap import tools
import recognizer as rc
import template_patterns as tp
import matplotlib.pyplot as plt
POPULATION_SIZE = 50
MAX_GENERATIONS = 400
# Desired segment length
DLEN = 70
CROSSOVER_RATE = 0.5
SELECTION_RATE = 1
SEL_TOURNAMENT_SIZE = 10
MIN_SEGMENT_LENGTH = 10
# Probability to add a datapoint during mutate
# If mutate does not add a point, it will drop a point.
MUTATE_ADD_PROB = 0.5
MUTATE_PROB = 1 - CROSSOVER_RATE
# Stores all template patterns to be matched.
TEMPLATE_PATTERNS = tp.template_patterns()
def runGA(data_x,data_y):
"""
Input:
data_x: Data values, x-axis(time)
data_y: Data values, y-axis
Description: Runs the GA algorithm on the data
"""
# I assume that the data is a 2D nested list, with first dim for each row of data.
numDataPts = len(data_x)
segmentCount = int(math.ceil(numDataPts / DLEN)) - 1
# Convert to numpy format
data_x = np.array(data_x)
data_y = np.array(data_y)
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", set, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
# Each chromosome has size: segmentCount, with values between 0 to numDataPts
# Generate permutations with no repetition
toolbox.register("indices", random.sample, range(1,numDataPts-1), segmentCount)
toolbox.register("individual",tools.initIterate, creator.Individual,
toolbox.indices)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mate",tools.cxMessyOnePoint)
toolbox.register("mutate",my_mutate)
toolbox.register("evaluate",evaluate,data_x,data_y)
toolbox.register("select", tools.selTournament)
# For gathering statistics
stats_fit = tools.Statistics(key=lambda ind: ind.fitness.values)
stats_fit.register("avg", np.nanmean)
stats_fit.register("std", np.nanstd)
stats_fit.register("min", np.min)
stats_fit.register("max", np.nanmax)
stats_size = tools.Statistics(key=len)
stats_size.register("avg", np.mean)
stats_size.register("std", np.std)
stats_size.register("min", np.min)
stats_size.register("max", np.max)
mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
logbook = tools.Logbook()
pop = toolbox.population(n=POPULATION_SIZE)
# Evaluate the entire population
fitnesses = list(map(toolbox.evaluate, pop))
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
for g in range(MAX_GENERATIONS):
# Statistics collection
record = mstats.compile(pop)
print("Generation: " + str(g))
#print(record['fitness'])
logbook.record(gen=g,**record)
# Select the next generation individuals
offspring = toolbox.select(pop, int(round(len(pop) * SELECTION_RATE)),
SEL_TOURNAMENT_SIZE)
# Clone the selected individuals
offspring = list(map(toolbox.clone, offspring))
# Apply crossover on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CROSSOVER_RATE:
# Crossover requires list, so convert out of set data strcture
childlist1 = list(child1)
childlist2 = list(child2)
toolbox.mate(childlist1, childlist2)
child1.clear()
child2.clear()
child1.update(childlist1)
child2.update(childlist2)
del child1.fitness.values
del child2.fitness.values
# Apply mutation on the offspring
for mutant in offspring:
if random.random() < MUTATE_PROB:
toolbox.mutate(mutant,numDataPts)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# The population is entirely replaced by the offspring
pop[:] = offspring
print(logbook)
with open('GA_log.txt','w') as f:
f.write(logbook.__str__())
plot_statistics(logbook)
# Return the best individual
return (tools.selBest(pop,1))[0]
def my_mutate(chromo,maxVal):
"""
Input:
chromo: Chromosome
maxVal: Largest value +1 the chromosome can take
Output:
returns a sequence of Perceptually important point (PIP)
identification and the corresponding time values of size Q_length
"""
if (random.random() < MUTATE_ADD_PROB and len(chromo) < maxVal):
chromo_list = list(chromo)
# Mutate by adding element
disallowed_values = set()
for val in chromo_list:
if val-MIN_SEGMENT_LENGTH+1 < 0:
lower_bound = 0
else:
lower_bound = val-MIN_SEGMENT_LENGTH+1
if val+MIN_SEGMENT_LENGTH > maxVal:
upper_bound = maxVal
else:
upper_bound = val+MIN_SEGMENT_LENGTH
disallowed_values.union(range(lower_bound,upper_bound))
allowed_values = set(range(1,maxVal)) - disallowed_values
randNum = random.sample(allowed_values,1)[0]
chromo.add(randNum)
elif len(chromo) > 0:
# Mutate by removing element
valueToDel = random.sample(chromo,1)[0]
chromo.remove(valueToDel)
def evaluate(data_x,data_y,ind_set,plot_data=False):
"""
Description:
Calculates the fitness value by using the template method.
"""
# Get a list from the set data structure
ind = sorted(ind_set)
# Find number of different template lengths that exist
pat_len = set()
for pat in TEMPLATE_PATTERNS:
pat_len.add(len(pat))
pat_len = sorted(pat_len)
distortion_sum = 0
ind_len = len(ind)
for i in range(ind_len+1):
startIdx = ind[i-1] if i>0 else 0
endIdx = ind[i] if i<ind_len else len(data_x)
tmp_x = data_x[startIdx:endIdx]
tmp_y = data_y[startIdx:endIdx]
if(len(tmp_y) == 0):
print("This should not happen" + str(startIdx) + ' ' + str(endIdx))
pip_y,pip_x = rc.PIP_identification(tmp_y,tmp_x,7,isNumpyFormat=True)
distortion_val, pattern_name = rc.multiple_template_matching(
pip_y, pip_x, TEMPLATE_PATTERNS)
# Treat values above threshold as pattern not detected
#if distortion_val > 0.2:
# distortion_val = 5
#distortion_val += rc.temporal_control_penalty(endIdx-startIdx,70,2)
distortion_sum += distortion_val
# Early exit for invalid chromosomes.
if np.isinf(distortion_sum) == True:
break
# Plot for debugging
if plot_data == True:
plt.plot(data_x,data_y)
plt.axvline(x=data_x[startIdx],linestyle='--')
plt.axvline(x=data_x[endIdx-1],linestyle='--')
plt.plot(pip_x,pip_y,'-x')
plt.title('Data Plot: ' + str(startIdx) + ' - ' + str(endIdx))
plt.show()
plt.plot(pip_x,pip_y,'-x',color='c')
plt.title('PIP Plot: ' + str(startIdx) + ' - ' + str(endIdx))
plt.show()
if distortion_val > 0.3:
print('No Detection.')
else:
print('Detected as ' + pattern_name)
print('Distortion value: ' + str(distortion_val))
#input('Press any key to continue...')
# Normalize the distortion value by num of segments
if np.isinf(distortion_sum) == False:
distortion_sum /= (ind_len+1)
return (distortion_sum,)
def amplitude_control_penalty(PIP,dfr,dac):
"""
Description:
"""
min_pip = min(PIP)
fr = (max(PIP) - min_pip) / min_pip
d2 = fr - dfr
theta2 = dfr / dac
AC = 1 - (1/(1+math.exp(-d2/theta2)))
return AC
def plot_statistics(logbook):
gen = logbook.select("gen")
fit_mins = logbook.chapters["fitness"].select("min")
size_avgs = logbook.chapters["size"].select("avg")
fig, ax1 = plt.subplots()
line1 = ax1.plot(gen, fit_mins, "b-", label="Minimum Fitness")
ax1.set_xlabel("Generation")
ax1.set_ylabel("Fitness", color="b")
for tl in ax1.get_yticklabels():
tl.set_color("b")
ax2 = ax1.twinx()
line2 = ax2.plot(gen, size_avgs, "r-", label="Average Size")
ax2.set_ylabel("Size", color="r")
for tl in ax2.get_yticklabels():
tl.set_color("r")
lns = line1 + line2
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, loc="center right")
plt.savefig('stats_plot.pdf')
plt.show()
if __name__ == '__main__':
pass
|
|
# Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
import copy
import os
import sys
import conf
import g
from grax.access_level import Access_Level
from grax.user import User
from gwis.query_filters import Query_Filters
from gwis.query_viewport import Query_Viewport
from gwis.exception.gwis_error import GWIS_Error
from item import item_base
from item import item_user_access
from item import item_user_watching
from item import item_versioned
# 2013.03.30: [lb] not sure we should/can import geofeature.
# If not, move to qb.item_mgr.
from item import geofeature
from item import link_value
from item.feat import branch
from item.util.item_type import Item_Type
log = g.log.getLogger('attachment')
class One(item_user_watching.One):
item_type_id = Item_Type.ATTACHMENT
item_type_table = 'attachment'
item_gwis_abbrev = 'attc'
# This is a little coupled: all this class's derived classes' item_types.
child_item_types = (
Item_Type.ATTACHMENT,
Item_Type.ANNOTATION,
Item_Type.ATTRIBUTE,
Item_Type.POST,
Item_Type.TAG,
Item_Type.THREAD,
)
item_save_order = 3
local_defns = [
# py/psql name, deft, send?, pkey?, pytyp, reqv
]
attr_defns = item_user_watching.One.attr_defns + local_defns
psql_defns = item_user_watching.One.psql_defns + local_defns
gwis_defns = item_base.One.attr_defns_reduce_for_gwis([])
# *** Constructor
__slots__ = ()
def __init__(self, qb=None, row=None, req=None, copy_from=None):
item_user_watching.One.__init__(self, qb, row, req, copy_from)
# *** Saving to the Database
#
def load_all_link_values(self, qb):
# The base class shouldn't import link_value, so send it one.
links = link_value.Many()
self.load_all_link_values_(qb, links, lhs=True, rhs=False, heavywt=True)
#
def save_core(self, qb):
item_user_watching.One.save_core(self, qb)
# Save to the 'attachment' table.
self.save_insert(qb, One.item_type_table, One.psql_defns)
#
def save_update(self, qb):
g.assurt(False) # Not impl. for attachment.
item_user_watching.One.save_update(self, qb)
self.save_insert(qb, One.item_type_table, One.psql_defns,
do_update=True)
#
def save_verify_creator_arbiter(self, qb):
# BUG nnnn: This is a hack until posts' and threads' GIA records
# are fixed. Currently, just one publicly-editable record is created.
# Ideally, there'd be two records, one public-readable, and one
# creator-editable/or-arbitable. But we're still left with how
# to give branch arbiters edit-access to all users' threads and posts,
# so arbiters can censor threads and posts... would that be controlled
# by a GIA record for branch arbiters, or would it be controlled by
# a new_item_policy record? For not, we do it in code: branch arbiters
# can edit posts and threads.
if self.version > 1:
if qb.username == conf.anonymous_username:
raise GWIS_Error('Must be item creator to edit said item.')
try:
branch.Many.branch_enforce_permissions(qb, Access_Level.arbiter)
except GWIS_Error, e:
raise GWIS_Error('Not creator or branch arbiter too bad.')
# ***
#
@staticmethod
def as_insert_expression(qb, item):
insert_expr = (
"(%d, %d, %d, %d)"
% (item.system_id,
#? qb.branch_hier[0][0],
# or:
item.branch_id,
item.stack_id,
item.version,
))
return insert_expr
# ***
# ***
class Many(item_user_watching.Many):
one_class = One
# NOTE: Not Joining attachment table. It's got nothing new for us.
# *** Constructor
__slots__ = ()
def __init__(self):
item_user_watching.Many.__init__(self)
@staticmethod
def sql_format_timestamp(col_name_raw):
return (
"""
CASE
WHEN (%s > (now() - interval '1 day')) THEN
TO_CHAR(%s::TIMESTAMP, 'HH:MI am')
WHEN (%s > DATE_TRUNC('YEAR', now())) THEN
TO_CHAR(%s::TIMESTAMP, 'Mon DD')
ELSE
TO_CHAR(%s::TIMESTAMP, 'MM/DD/YYYY')
END
""" % (col_name_raw,
col_name_raw,
col_name_raw,
col_name_raw,
col_name_raw,))
# *** Public Interface
# Bug nnnn: 20110907: This and the next fcn. should be scrapped and replaced
# with calls to link_value instead. the link_value class checks permissions on
# things we join -- and this fcn. does not. I think we should do a link_value
# search on the attachment type desired, distinct on the lhs stack IDs, and
# then return those items. And besides not respecting permissions or branching,
# this fcn. embeds SQL into inner.join that makes fetching leafier items not
# find anything (that is, it does not respek use_filters_and_viewport).
#
# 2013.03.30: This is the old way of fetching attachments in a bbox.
# See sql_apply_query_filters for current implementation.
'''
def search_for_items(self, *args, **kwargs):
"""
Overrides the base class implementation (item_user_access). Fetching
attachments by viewport is a bit tricky, since attachments themselves
do not contain geometry. But if we fetch the link_values in a particular
viewport, we can then select the distinct set of attachments linked and
return those.
"""
qb = self.query_builderer(*args, **kwargs)
# The viewport only applies to the links, since attachments don't have
# geometry, so don't use a bbox for the attachments themselves. But we
# can use filters, but not all of them, so we'll clone the filters.
attc_qb = qb.clone(skip_clauses=True, skip_filtport=True, db_clone=True)
attc_qb.filters = copy.copy(qb.filters)
self.sql_clauses_cols_setup(attc_qb)
# If a viewport or geometry is specified, be magical.
if ((qb.viewport and qb.viewport.include)
or (qb.filters and (qb.filters.only_in_multi_geometry
# The filter used to be used for attachments
# but now it's just used for link_values
#or qb.filters.only_rhs_stack_ids
))):
# EXPLAIN: this code path is deprecated.
#
# We shouldn't search for attachments by bbox. Flashclient searches
# for all tags and attrs, and it searches link_values by rhs_stack_id.
# But [lb] doesn't think anyone should search for attcs by bbox.
log.warning('search_for_items: doing (slow?) attc-link_value search.')
log.verbose('search_for_items: yes vp: %s' % (self,))
# Make a query to get link_values and their geofeatures to determine
# which attachments to get (since attachments do not have geometry in
# group_item_access). Use the query as an embedded select and join
# against it to get just those attachments linked to items in the
# viewport or the filter.
inner_sql = self.sql_by_viewport_inner_sql(qb)
attc_qb.sql_clauses.inner.join += (
"""
JOIN (%s) AS link_items
ON (gia.stack_id = link_items.lhs_stack_id)
""" % (inner_sql,))
g.assurt(not attc_qb.viewport)
# NOTE: It's probably a better solution to selectively copy from
# qb.filters than to clone it and clear selectively (since we might add
# new filter options but forget to include them here).
#attc_qb.filters.filter_by_regions = ''
#attc_qb.filters.filter_by_watch_geom = False
#attc_qb.filters.filter_by_watch_item = 0
attc_qb.filters.only_in_multi_geometry = None
#attc_qb.filters.only_rhs_stack_ids = ''
# NOTE: Normally, the item classes shouldn't call Query_Overlord, but it
# doesn't reference the attachment classes, just geofeatures.
# Call without importing Query_Overlord.finalize_query(attc_qb):
attc_qb.item_mgr.finalize_query(attc_qb)
# NOTE: Not calling base class search_for_items.
log.verbose('search_for_items: attc_qb: %s' % (attc_qb,))
self.search_get_items(attc_qb)
attc_qb.db.close()
'''
#
def sql_by_viewport_inner_sql(self, qb):
# See comments above. Fetching attc-link-feat in one go is a no-go.
log.warning('sql_by_viewport_inner_sql: deprecated')
# Copy the qb, but exclude filters.
links_qb = qb.clone(skip_clauses=True, skip_filtport=True, db_clone=True)
links_qb.filters = Query_Filters(req=None)
# Would these help?:
# filter_by_username, filter_by_unread, min_access_level
#links_qb.filters.filter_by_regions = qb.filters.filter_by_regions
#links_qb.filters.filter_by_watch_geom = qb.filters.filter_by_watch_geom
#links_qb.filters.filter_by_watch_item = qb.filters.filter_by_watch_item
links_qb.filters.only_in_multi_geometry = (
qb.filters.only_in_multi_geometry)
#links_qb.filters.only_rhs_stack_ids = qb.filters.only_rhs_stack_ids
links_qb.sql_clauses = link_value.Many.sql_clauses_cols_name.clone()
# NOTE Not doing +=, but replacing the selects
# EXPLAIN Using distinct so we...
links_qb.sql_clauses.outer.select = "DISTINCT(group_item.lhs_stack_id)"
links_qb.sql_clauses.outer.shared = ""
links_qb.sql_clauses.inner.where_item_type_id_fcn = (
self.search_item_type_id_sql_from_link)
# Do we need to copy the viewport, too?
g.assurt(not links_qb.viewport)
if qb.viewport:
links_qb.viewport = Query_Viewport(req=None)
links_qb.viewport.include = qb.viewport.include
links_qb.viewport.exclude = qb.viewport.exclude
g.assurt(qb.use_filters_and_viewport)
# EXPLAIN: Not calling links_qb.finalize_query? Not needed or !item_mgr?
# Make a link_value Many object to make the SQL.
links = link_value.Many()
inner_sql = links.search_get_sql(links_qb)
links_qb.db.close()
return inner_sql
#
def search_item_type_id_sql_from_link(self, qb):
# NOTE: Not calling parent, which tests gia.item_type_id against
# self.one_class.item_type_id. We do it a little different.
where_clause = (""" (gia.item_type_id = %d
AND gia.link_lhs_type_id = %d) """
% (link_value.One.item_type_id,
self.one_class.item_type_id,))
return where_clause
#
def search_for_orphan_query(self, qb):
'''Returns all attachments that aren't marked as deleted but don't have
any non-deleted link_values'''
# See also the note in link_value.search_for_orphan_query.
g.assurt(False) # not tested
# FIXME: remove this:
#cols_item_versioned = ','.join([("iv.%s" % (attr_defn[0],))
# for attr_defn in item_versioned.One.local_defns])
sql = (
"""
SELECT
iv.stack_id
FROM
item_versioned AS iv
JOIN
%s AS at_child
USING (system_id)
WHERE
NOT iv.deleted
AND iv.valid_until_rid = %d
AND NOT EXISTS (
SELECT lv.stack_id
FROM link_value AS lv
JOIN item_versioned AS iv_2
USING (system_id)
WHERE lv.lhs_stack_id = iv.stack_id
AND iv_2.valid_until_rid = %d
AND NOT iv_2.deleted)
""" % (self.one_class.item_type_table,
conf.rid_inf,
conf.rid_inf))
self.sql_search(qb, sql)
#
def sql_apply_query_filters(self, qb, where_clause="", conjunction=""):
g.assurt((not conjunction) or (conjunction == "AND"))
# We can only call sql_where_filter_linked once per query. So we can't
# support about_stack_ids or filter_by_watch_feat and a viewport query.
sql_where_filter_linked_cnt = 0
if qb.filters.about_stack_ids:
sql_where_filter_linked_cnt += 1
if qb.filters.filter_by_watch_feat:
sql_where_filter_linked_cnt += 1
if ((qb.viewport is not None) and (qb.viewport.include)
or qb.filters.only_in_multi_geometry):
sql_where_filter_linked_cnt += 1
if qb.filters.filter_by_nearby_edits:
sql_where_filter_linked_cnt += 1
if sql_where_filter_linked_cnt > 1:
raise GWIS_Error('Please choose just one: '
'about_stack_ids, filter_by_watch_feat or viewport.')
if qb.filters.about_stack_ids:
linked_items_where = self.sql_where_filter_about(qb)
g.assurt(linked_items_where)
where_clause += " %s %s " % (conjunction, linked_items_where,)
conjunction = "AND"
if qb.filters.filter_by_watch_feat:
# FIXME: Debug, then combine handlers for filter_by_watch_feat
# and only_in_multi_geometry.
feat_qb = qb.clone(skip_clauses=True, skip_filtport=True)
feat_qb.filters = Query_Filters(req=None)
qfs = feat_qb.filters
# Set filter_by_watch_item=True and search for geofeatures
# that the user is watching.
qfs.filter_by_watch_item = qb.filters.filter_by_watch_feat
g.assurt(not qb.filters.only_in_multi_geometry)
g.assurt((qb.viewport is None) or (qb.viewport.include is None))
feat_qb.finalize_query()
feat_qb.sql_clauses = geofeature.Many.sql_clauses_cols_all.clone()
feats = geofeature.Many()
feats_sql = feats.search_get_sql(feat_qb)
feat_stack_id_table_ref = 'temp_stack_id__watch_feat'
thurrito_sql = (
"""
SELECT
stack_id
INTO TEMPORARY TABLE
%s
FROM
(%s) AS foo_feat_sid_1
""" % (feat_stack_id_table_ref,
feats_sql,))
rows = qb.db.sql(thurrito_sql)
#
join_on_to_self = self.sql_where_filter_linked_join_on_to_self(qb)
where_on_other = ""
join_on_temp = (
"""
JOIN %s
ON (flv.rhs_stack_id = %s.stack_id)
""" % (feat_stack_id_table_ref,
feat_stack_id_table_ref,))
linked_items_where = self.sql_where_filter_linked(qb, join_on_to_self,
where_on_other,
join_on_temp)
#
where_clause += " %s %s " % (conjunction, linked_items_where,)
conjunction = "AND"
# 2013.04.02: Freshly implemented in CcpV2. Not the quickest fcn., but it
# works.
# MAYBE: Disable this until we can find a better solution?
# MEH: [lb] got it under 15 seconds, I think. Good enough.
if qb.filters.filter_by_nearby_edits:
'''
g.assurt(False) # FIXME: This code is broke!
join = ' JOIN post_geo pg ON (p.id = pg.id)'
where_clause += (
"""
%s
-- FIXME: Instead of ST_Intersects/ST_Buffer, try: ST_DWithin
(ST_Intersects(
pg.geometry,
(SELECT ST_Buffer(collect(rr.geometry), 0)
FROM revision rr
WHERE
rr.username = %s
AND NOT is_social_rev(rr.id)))
""" % (conjunction,
qb.db.quoted(qb.username),))
conjunction = "AND"
'''
# FIXME: This was the older SQL snippet used for this filter. It
# was waaaaaayyyyy too slow. The one I used instead is also
# slow, but it doesn't time out, at least.
# [lb] notes that his database is missing geometry indices,
# but this didn't quite halve my experience, from 52 secs.
# to 29 secs. We need to run db_load_add_constraints.sql on
# the db.
#
# sql_or_sids = (
# """
# SELECT
# stack_id
# FROM
# geofeature AS gf_near
# WHERE
# ST_Intersects(
# gf_near.geometry,
# (
# SELECT
# ST_Buffer(collect(rr.geometry), 0)
# FROM
# revision rr
# WHERE
# rr.username = %s
# -- AND NOT is_social_rev(rr.id)
# )
# )
# """
# ) % (qb.db.quoted(qb.username),)
# FIXME: Very slow query: ~ 42 sec.
'''
sql_or_sids = (
"""
SELECT
stack_id
FROM
geofeature AS gf_near
JOIN revision AS rv_near
ON ST_Intersects(gf_near.geometry, rv_near.geometry)
WHERE
rv_near.username = %s
-- AND NOT is_social_rev(rv_near.id)
"""
) % (qb.db.quoted(qb.username),)
'''
# MAYBE: Why isn't setting user_group_id part of finalize_query?
#g.assurt(not qb.user_group_id)
if not qb.user_group_id:
qb.user_group_id = User.private_group_id(qb.db, qb.username)
g.assurt(qb.user_group_id)
geometry_table_ref = 'temp_geometry__edited_items'
geometry_sql = (
"""
SELECT
ST_Buffer(ST_Collect(grev.geometry), 0) AS geometry
INTO TEMPORARY TABLE
%s
FROM
group_revision AS grev
WHERE
grev.group_id = %d
""" % (geometry_table_ref,
qb.user_group_id,))
# 2013.04.02: On [lb]: Time: 405.073 ms
rows = qb.db.sql(geometry_sql)
# NOTE: This is a broad query: if a revision contains edits far apart,
# we'll find all the geofeatures in between. E.g., for [lb], it
# finds hundreds of thousands of byways; not very useful.
item_stack_id_table_ref = 'temp_stack_id__edited_items'
about_stack_ids_sql = (
"""
SELECT
DISTINCT(stack_id)
INTO TEMPORARY TABLE
%s
FROM
geofeature AS feat
JOIN
%s AS grev
ON ST_Intersects(feat.geometry, grev.geometry)
""" % (item_stack_id_table_ref,
geometry_table_ref,
))
# 2013.04.02: On [lb]: Time: 13106.527 ms
rows = qb.db.sql(about_stack_ids_sql)
#
join_on_to_self = self.sql_where_filter_linked_join_on_to_self(qb)
where_on_other = ""
join_on_temp = (
"""
JOIN %s
ON (flv.rhs_stack_id = %s.stack_id)
""" % (item_stack_id_table_ref,
item_stack_id_table_ref,))
linked_items_where = self.sql_where_filter_linked(qb, join_on_to_self,
where_on_other,
join_on_temp)
#
where_clause += " %s %s " % (conjunction, linked_items_where,)
conjunction = "AND"
# Only select posts whose name matches the user's search query.
where_clause = item_user_watching.Many.sql_apply_query_filters(
self, qb, where_clause, conjunction)
return where_clause
#
def sql_apply_query_viewport(self, qb, geo_table_name=None):
where_clause = ""
conjunction = "AND"
if (((qb.viewport is not None) and (qb.viewport.include))
or qb.filters.only_in_multi_geometry):
# FIXME: Debug, then combine handlers for filter_by_watch_feat
# and only_in_multi_geometry.
# This is Discussion's "About objects in visible area"
# and "Filter by Region (Names)"
# and "Filter by Watch Regions"
# MAYBE: We should probably restrict this query to a maximum size, to
# avoid really strenuous database queries.
feat_qb = qb.clone(skip_clauses=True, skip_filtport=True)
qfs = Query_Filters(req=None)
qfs.only_in_multi_geometry = qb.filters.only_in_multi_geometry
feat_qb.filters = qfs
feat_qb.viewport = qb.viewport
feat_qb.finalize_query()
feat_qb.sql_clauses = geofeature.Many.sql_clauses_cols_all.clone()
feats = geofeature.Many()
feats_sql = feats.search_get_sql(feat_qb)
# No: feats_sql.db.close()
feat_stack_id_table_ref = 'temp_stack_id__viewport'
thurrito_sql = (
"""
SELECT
stack_id
INTO TEMPORARY TABLE
%s
FROM
(%s) AS foo_vport_sid
""" % (feat_stack_id_table_ref,
feats_sql,))
rows = qb.db.sql(thurrito_sql)
#
count_sql = ("SELECT COUNT(*) FROM %s" % (feat_stack_id_table_ref,))
rows = qb.db.sql(count_sql)
# 2014.05.04: [lb] sees it faster to WHERE IN (SELECT ... FROM tmp)
# rather than to join on the tmp table.
n_sids = rows[0]['count']
log.debug('sql_where_filter_linked: tmp tble rows: %d' % (n_sids,))
# DETERMINE: What's the best cutoff point?
if n_sids > 2500:
# Does this path happen?
log.warning('sql_where_filter_linked: test me: join vs. where')
join_on_to_self = self.sql_where_filter_linked_join_on_to_self(qb)
where_on_other = ""
join_on_temp = (
"""
JOIN %s
ON (flv.rhs_stack_id = %s.stack_id)
""" % (feat_stack_id_table_ref,
feat_stack_id_table_ref,))
linked_items_where = self.sql_where_filter_linked(
qb, join_on_to_self, where_on_other, join_on_temp)
else:
# Use WHERE rather than JOINing.
(linked_items_where, sql_tmp_table,
) = link_value.Many.prepare_sids_temporary_table(self.qb,
'rhs_stack_id',
feat_stack_id_table_ref,
'lhs_stack_id',
'temp_stack_id__feats_attcs')
#
log.debug('sql_where_filter_linked: conjunction: %s' % (conjunction,))
where_clause += " %s %s " % (conjunction, linked_items_where,)
conjunction = "AND"
# Not calling item_versioned.Many.sql_apply_query_viewport, since
# we've processed the viewport.
return where_clause
#
def sql_where_filter_linked_join_on_to_self(self, qb):
join_on_to_self = "attc.stack_id = flv.lhs_stack_id"
return join_on_to_self
#
def sql_where_filter_linked_where_unto_self(self):
#where_unto_self = "attc.stack_id IN (SELECT stack_id FROM %)"
where_unto_self = "flv.lhs_stack_id IN (SELECT stack_id FROM %)"
return where_unto_self
#
def sql_where_filter_about(self, qb):
# "Filter by what is visible in the map"
#
# This is used if the user is at vector zoom level (because then the
# client has geofeature stack IDs on the items in the viewport). At
# raster zoom level, the client sends a bbox, so this filter isn't used.
#
# PERMS: Unless it's a custom client trying to hack us, we sent the
# rhs stack IDs to the client earlier, so we can assume the client
# has at least access_client to the rhs items. But even if the client
# is trying to hack us, the point is moot; all a client would learn is
# what random stack IDs it sent represent private geofeatures attached to
# public threads, which is a case that can't even happen right now.
join_on_to_self = self.sql_where_filter_linked_join_on_to_self(qb)
where_on_other = ("(flv.rhs_stack_id IN (%s))"
% (qb.filters.about_stack_ids,))
linked_items_where = self.sql_where_filter_linked(qb, join_on_to_self,
where_on_other)
return linked_items_where
# ***
#
@staticmethod
def bulk_insert_rows(qb, at_rows_to_insert):
g.assurt(qb.request_is_local)
g.assurt(qb.request_is_script)
g.assurt(qb.cp_maint_lock_owner or ('revision' in qb.db.locked_tables))
if at_rows_to_insert:
insert_sql = (
"""
INSERT INTO %s.%s (
system_id
, branch_id
, stack_id
, version
) VALUES
%s
""" % (conf.instance_name,
One.item_type_table,
','.join(at_rows_to_insert),))
qb.db.sql(insert_sql)
# ***
# ***
|
|
from contextlib import ContextDecorator, contextmanager
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, Error, ProgrammingError, connections,
)
class TransactionManagementError(ProgrammingError):
"""Transaction management is used improperly."""
pass
def get_connection(using=None):
"""
Get a database connection by name, or the default database connection
if no name is provided. This is a private API.
"""
if using is None:
using = DEFAULT_DB_ALIAS
return connections[using]
def get_autocommit(using=None):
"""Get the autocommit status of the connection."""
return get_connection(using).get_autocommit()
def set_autocommit(autocommit, using=None):
"""Set the autocommit status of the connection."""
return get_connection(using).set_autocommit(autocommit)
def commit(using=None):
"""Commit a transaction."""
get_connection(using).commit()
def rollback(using=None):
"""Roll back a transaction."""
get_connection(using).rollback()
def savepoint(using=None):
"""
Create a savepoint (if supported and required by the backend) inside the
current transaction. Return an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
return get_connection(using).savepoint()
def savepoint_rollback(sid, using=None):
"""
Roll back the most recent savepoint (if one exists). Do nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_rollback(sid)
def savepoint_commit(sid, using=None):
"""
Commit the most recent savepoint (if one exists). Do nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_commit(sid)
def clean_savepoints(using=None):
"""
Reset the counter used to generate unique savepoint ids in this thread.
"""
get_connection(using).clean_savepoints()
def get_rollback(using=None):
"""Get the "needs rollback" flag -- for *advanced use* only."""
return get_connection(using).get_rollback()
def set_rollback(rollback, using=None):
"""
Set or unset the "needs rollback" flag -- for *advanced use* only.
When `rollback` is `True`, trigger a rollback when exiting the innermost
enclosing atomic block that has `savepoint=True` (that's the default). Use
this to force a rollback without raising an exception.
When `rollback` is `False`, prevent such a rollback. Use this only after
rolling back to a known-good state! Otherwise, you break the atomic block
and data corruption may occur.
"""
return get_connection(using).set_rollback(rollback)
@contextmanager
def mark_for_rollback_on_error(using=None):
"""
Internal low-level utility to mark a transaction as "needs rollback" when
an exception is raised while not enforcing the enclosed block to be in a
transaction. This is needed by Model.save() and friends to avoid starting a
transaction when in autocommit mode and a single query is executed.
It's equivalent to:
connection = get_connection(using)
if connection.get_autocommit():
yield
else:
with transaction.atomic(using=using, savepoint=False):
yield
but it uses low-level utilities to avoid performance overhead.
"""
try:
yield
except Exception:
connection = get_connection(using)
if connection.in_atomic_block:
connection.needs_rollback = True
raise
def on_commit(func, using=None):
"""
Register `func` to be called when the current transaction is committed.
If the current transaction is rolled back, `func` will not be called.
"""
get_connection(using).on_commit(func)
#################################
# Decorators / context managers #
#################################
class Atomic(ContextDecorator):
"""
Guarantee the atomic execution of a given block.
An instance can be used either as a decorator or as a context manager.
When it's used as a decorator, __call__ wraps the execution of the
decorated function in the instance itself, used as a context manager.
When it's used as a context manager, __enter__ creates a transaction or a
savepoint, depending on whether a transaction is already in progress, and
__exit__ commits the transaction or releases the savepoint on normal exit,
and rolls back the transaction or to the savepoint on exceptions.
It's possible to disable the creation of savepoints if the goal is to
ensure that some code runs within a transaction without creating overhead.
A stack of savepoints identifiers is maintained as an attribute of the
connection. None denotes the absence of a savepoint.
This allows reentrancy even if the same AtomicWrapper is reused. For
example, it's possible to define `oa = atomic('other')` and use `@oa` or
`with oa:` multiple times.
Since database connections are thread-local, this is thread-safe.
This is a private API.
"""
def __init__(self, using, savepoint):
self.using = using
self.savepoint = savepoint
def __enter__(self):
connection = get_connection(self.using)
if not connection.in_atomic_block:
# Reset state when entering an outermost atomic block.
connection.commit_on_exit = True
connection.needs_rollback = False
if not connection.get_autocommit():
# sqlite3 in Python < 3.6 doesn't handle transactions and
# savepoints properly when autocommit is off.
# Turning autocommit back on isn't an option; it would trigger
# a premature commit. Give up if that happens.
if connection.features.autocommits_when_autocommit_is_off:
raise TransactionManagementError(
"Your database backend doesn't behave properly when "
"autocommit is off. Turn it on before using 'atomic'.")
# Pretend we're already in an atomic block to bypass the code
# that disables autocommit to enter a transaction, and make a
# note to deal with this case in __exit__.
connection.in_atomic_block = True
connection.commit_on_exit = False
if connection.in_atomic_block:
# We're already in a transaction; create a savepoint, unless we
# were told not to or we're already waiting for a rollback. The
# second condition avoids creating useless savepoints and prevents
# overwriting needs_rollback until the rollback is performed.
if self.savepoint and not connection.needs_rollback:
sid = connection.savepoint()
connection.savepoint_ids.append(sid)
else:
connection.savepoint_ids.append(None)
else:
connection.set_autocommit(False, force_begin_transaction_with_broken_autocommit=True)
connection.in_atomic_block = True
def __exit__(self, exc_type, exc_value, traceback):
connection = get_connection(self.using)
if connection.savepoint_ids:
sid = connection.savepoint_ids.pop()
else:
# Prematurely unset this flag to allow using commit or rollback.
connection.in_atomic_block = False
try:
if connection.closed_in_transaction:
# The database will perform a rollback by itself.
# Wait until we exit the outermost block.
pass
elif exc_type is None and not connection.needs_rollback:
if connection.in_atomic_block:
# Release savepoint if there is one
if sid is not None:
try:
connection.savepoint_commit(sid)
except DatabaseError:
try:
connection.savepoint_rollback(sid)
# The savepoint won't be reused. Release it to
# minimize overhead for the database server.
connection.savepoint_commit(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
raise
else:
# Commit transaction
try:
connection.commit()
except DatabaseError:
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
raise
else:
# This flag will be set to True again if there isn't a savepoint
# allowing to perform the rollback at this level.
connection.needs_rollback = False
if connection.in_atomic_block:
# Roll back to savepoint if there is one, mark for rollback
# otherwise.
if sid is None:
connection.needs_rollback = True
else:
try:
connection.savepoint_rollback(sid)
# The savepoint won't be reused. Release it to
# minimize overhead for the database server.
connection.savepoint_commit(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
else:
# Roll back transaction
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
finally:
# Outermost block exit when autocommit was enabled.
if not connection.in_atomic_block:
if connection.closed_in_transaction:
connection.connection = None
else:
connection.set_autocommit(True)
# Outermost block exit when autocommit was disabled.
elif not connection.savepoint_ids and not connection.commit_on_exit:
if connection.closed_in_transaction:
connection.connection = None
else:
connection.in_atomic_block = False
def atomic(using=None, savepoint=True):
# Bare decorator: @atomic -- although the first argument is called
# `using`, it's actually the function being decorated.
if callable(using):
return Atomic(DEFAULT_DB_ALIAS, savepoint)(using)
# Decorator: @atomic(...) or context manager: with atomic(...): ...
else:
return Atomic(using, savepoint)
def _non_atomic_requests(view, using):
try:
view._non_atomic_requests.add(using)
except AttributeError:
view._non_atomic_requests = {using}
return view
def non_atomic_requests(using=None):
if callable(using):
return _non_atomic_requests(using, DEFAULT_DB_ALIAS)
else:
if using is None:
using = DEFAULT_DB_ALIAS
return lambda view: _non_atomic_requests(view, using)
|
|
#!/usr/bin/env python
# encoding: utf-8
# opc.py
#
# Command-line interface for operations on one or more Open Packaging
# Convention (OPC) files, such as .docx, .pptx, and .xlsx files.
import argparse
import os
import sys
from opcdiag.controller import OpcController
class CommandController(object):
"""
Orchestrates processing of commands in the form of a list of arguments
(*argv*). A new instance is created using the :meth:`new` staticmethod.
Once instantiated, it can process any number of commands by calling its
:meth:`execute` method, once for each command.
"""
def __init__(self, parser, app_controller):
self._parser = parser
self._app_controller = app_controller
@staticmethod
def new():
"""
Return a newly created instance of |CommandController| fitted with a
fully configured parser and an instance of the application controller
to dispatch parsed commands to.
"""
parser = Command.parser()
app_controller = OpcController()
return CommandController(parser, app_controller)
def execute(self, argv=None):
"""
Interpret the command indicated by the arguments in *argv* and
execute it. If *argv* is |None|, ``sys.argv`` is used.
"""
# print help and exit if no args
arg_count = len(argv if argv else sys.argv)
if arg_count < 2:
self._parser.print_help()
sys.exit(1)
args = self._parser.parse_args(argv)
command = args.command
command.validate(args)
command.execute(args, self._app_controller)
class Command(object):
"""
Base class for sub-commands
"""
def __init__(self, parser):
super(Command, self).__init__()
self._parser = parser
@staticmethod
def parser():
"""
Return an instance of :class:`argparse.ArgumentParser` configured
with a subcommand parser for each of the commands that are a subclass
of |Command|.
"""
desc = (
'Browse and diff Microsoft Office .docx, .xlsx, and .pptx files.'
)
epilog = "'opc <command> --help' lists command-specific help"
parser = argparse.ArgumentParser(
prog='opc', description=desc, epilog=epilog
)
subparsers = parser.add_subparsers(title='available commands')
for command_cls in Command.__subclasses__():
command_parser = command_cls.add_command_parser_to(subparsers)
command = command_cls(command_parser)
command_parser.set_defaults(command=command)
return parser
def execute(self, args, app_controller):
"""
Abstract method, each command must implement
"""
msg = 'execute() must be implemented by all subclasses of Command'
raise NotImplementedError(msg)
def validate(self, args):
"""
Abstract method, each command must implement; just pass if there's
nothing to validate.
"""
msg = 'validate() must be implemented by all subclasses of Command'
raise NotImplementedError(msg)
class BrowseCommand(Command):
def __init__(self, parser):
super(BrowseCommand, self).__init__(parser)
@staticmethod
def add_command_parser_to(subparsers):
parser = subparsers.add_parser(
'browse',
help='List pretty-printed XML for a specified package part')
parser.add_argument(
'pkg_path', metavar='PKG_PATH',
help='Path to OPC package file')
parser.add_argument(
'filename', metavar='FILENAME',
help='Filename portion of the pack URI for the part to browse')
return parser
def execute(self, args, app_controller):
app_controller.browse(args.pkg_path, args.filename)
def validate(self, args):
try:
msg = "PKG_PATH '%s' does not exist" % args.pkg_path
assert os.path.exists(args.pkg_path), msg
except AssertionError as e:
self._parser.error(str(e))
class DiffCommand(Command):
def __init__(self, parser):
super(DiffCommand, self).__init__(parser)
@staticmethod
def add_command_parser_to(subparsers):
parser = subparsers.add_parser(
'diff', help='Show differences between two OPC package files')
parser.add_argument(
'pkg_1_path', metavar='PKG_1_PATH',
help='first package to compare')
parser.add_argument(
'pkg_2_path', metavar='PKG_2_PATH',
help='second package to compare')
return parser
def execute(self, args, app_controller):
app_controller.diff_pkg(args.pkg_1_path, args.pkg_2_path)
def validate(self, args):
paths_that_should_exist = (
(args.pkg_1_path, 'PKG_1_PATH'),
(args.pkg_2_path, 'PKG_2_PATH'),
)
try:
for path, metavar in paths_that_should_exist:
msg = "%s '%s' does not exist" % (metavar, path)
assert os.path.exists(path), msg
except AssertionError as e:
self._parser.error(str(e))
class DiffItemCommand(Command):
def __init__(self, parser):
super(DiffItemCommand, self).__init__(parser)
@staticmethod
def add_command_parser_to(subparsers):
parser = subparsers.add_parser(
'diff-item',
help='Show differences between a specified item in two OPC '
'package files')
parser.add_argument(
'pkg_1_path', metavar='PKG_1_PATH',
help='first package')
parser.add_argument(
'pkg_2_path', metavar='PKG_2_PATH',
help='second package')
parser.add_argument(
'filename', metavar='FILENAME',
help='Filename portion of pack URI for item to browse')
return parser
def execute(self, args, app_controller):
app_controller.diff_item(
args.pkg_1_path, args.pkg_2_path, args.filename)
def validate(self, args):
paths_that_should_exist = (
(args.pkg_1_path, 'PKG_1_PATH'),
(args.pkg_2_path, 'PKG_2_PATH'),
)
try:
for path, metavar in paths_that_should_exist:
msg = "%s '%s' does not exist" % (metavar, path)
assert os.path.exists(path), msg
except AssertionError as e:
self._parser.error(str(e))
class ExtractCommand(Command):
def __init__(self, parser):
super(ExtractCommand, self).__init__(parser)
@staticmethod
def add_command_parser_to(subparsers):
parser = subparsers.add_parser(
'extract',
help='Extract all items in a package to a directory')
parser.add_argument(
'pkg_path', metavar='PKG_PATH',
help='Path to package')
parser.add_argument(
'dirpath', metavar='DIRPATH',
help='Path to directory into which to extract package items')
return parser
def validate(self, args):
try:
msg = "PKG_PATH '%s' does not exist" % args.pkg_path
assert os.path.exists(args.pkg_path), msg
except AssertionError as e:
self._parser.error(str(e))
def execute(self, args, app_controller):
app_controller.extract_package(args.pkg_path, args.dirpath)
class RepackageCommand(Command):
def __init__(self, parser):
super(RepackageCommand, self).__init__(parser)
@staticmethod
def add_command_parser_to(subparsers):
parser = subparsers.add_parser(
'repackage',
help='Build an OPC package from the contents of a directory')
parser.add_argument(
'dirpath', metavar='DIRPATH',
help='Directory containing expanded package files')
parser.add_argument(
'new_package', metavar='NEW_PACKAGE',
help='Path at which to save new package file')
return parser
def validate(self, args):
try:
msg = "DIRPATH '%s' not found or not a directory" % args.dirpath
assert os.path.isdir(args.dirpath), msg
except AssertionError as e:
self._parser.error(str(e))
def execute(self, args, app_controller):
app_controller.repackage(args.dirpath, args.new_package)
class SubstituteCommand(Command):
def __init__(self, parser):
super(SubstituteCommand, self).__init__(parser)
@staticmethod
def add_command_parser_to(subparsers):
parser = subparsers.add_parser(
'substitute',
help='Substitute a part from one package into another')
parser.add_argument(
'filename', metavar='FILENAME',
help='Filename portion of partname for part to substitute')
parser.add_argument(
'src_pkg_path', metavar='SRC_PKG_PATH',
help='package from which to source part identified by FILENAME')
parser.add_argument(
'tgt_pkg_path', metavar='TGT_PKG_PATH',
help='package from which to get all remaining parts')
parser.add_argument(
'result_pkg_path', metavar='RESULT_PKG_PATH',
help='path at which to store resulting package file')
return parser
def validate(self, args):
paths_that_should_exist = (
(args.src_pkg_path, 'SRC_PKG_PATH'),
(args.tgt_pkg_path, 'TGT_PKG_PATH'),
)
try:
for path, metavar in paths_that_should_exist:
msg = "%s '%s' does not exist" % (metavar, path)
assert os.path.exists(path), msg
except AssertionError as e:
self._parser.error(str(e))
def execute(self, args, app_controller):
app_controller.substitute(
args.filename, args.src_pkg_path, args.tgt_pkg_path,
args.result_pkg_path)
def main(argv=None):
command_controller = CommandController.new()
command_controller.execute(argv)
|
|
import Live
from _Generic.Devices import *
from ableton.v2.control_surface.component import Component as ControlSurfaceComponent
from ableton.v2.control_surface.elements import EncoderElement, ButtonElement, DisplayDataSource
class Live8DeviceComponent(ControlSurfaceComponent):
__doc__ = ' Class representing a device in Live '
def __init__(self, *a, **k):
super(Live8DeviceComponent, self).__init__(*a, **k)
self._device_banks = DEVICE_DICT
self._device_best_banks = DEVICE_BOB_DICT
self._device_bank_names = BANK_NAME_DICT
self._device = None
self._parameter_controls = None
self._bank_up_button = None
self._bank_down_button = None
self._bank_buttons = None
self._on_off_button = None
self._lock_button = None
self._lock_callback = None
self._device_name_data_source = None
self._device_bank_registry = {}
self._bank_index = 0
self._bank_name = '<No Bank>'
self._locked_to_device = False
return None
def disconnect(self):
self._lock_callback = None
self._device_bank_registry = None
if self._parameter_controls != None:
for control in self._parameter_controls:
control.release_parameter()
self._parameter_controls = None
if self._bank_up_button != None:
self._bank_up_button.remove_value_listener(self._bank_up_value)
self._bank_up_button = None
if self._bank_down_button != None:
self._bank_down_button.remove_value_listener(self._bank_down_value)
self._bank_down_button = None
if self._bank_buttons != None:
for button in self._bank_buttons:
button.remove_value_listener(self._bank_value)
self._bank_buttons = None
if self._on_off_button != None:
if self._on_off_button.value_has_listener(self._on_off_value):
self._on_off_button.remove_value_listener(self._on_off_value)
self._on_off_button = None
if self._lock_button != None:
if self._lock_button.value_has_listener(self._lock_value):
self._lock_button.remove_value_listener(self._lock_value)
self._lock_button = None
if self._device != None:
parameter = self._on_off_parameter()
if parameter != None:
if parameter.value_has_listener(self._on_on_off_changed):
parameter.remove_value_listener(self._on_on_off_changed)
if self._device.name_has_listener(self._on_device_name_changed):
self._device.remove_name_listener(self._on_device_name_changed)
if self._device.parameters_has_listener(self._on_parameters_changed):
self._device.remove_parameters_listener(self._on_parameters_changed)
self._device = None
return None
def on_enabled_changed(self):
self.update()
def set_device(self, device):
assert ((device == None) or isinstance(device, Live.Device.Device))
if ((not self._locked_to_device) and (device != self._device)):
if (self._device != None):
self._device.remove_name_listener(self._on_device_name_changed)
self._device.remove_parameters_listener(self._on_parameters_changed)
parameter = self._on_off_parameter()
if (parameter != None):
parameter.remove_value_listener(self._on_on_off_changed)
if (self._parameter_controls != None):
for control in self._parameter_controls:
control.release_parameter()
self._device = device
if (self._device != None):
self._bank_index = 0
self._device.add_name_listener(self._on_device_name_changed)
self._device.add_parameters_listener(self._on_parameters_changed)
parameter = self._on_off_parameter()
if (parameter != None):
parameter.add_value_listener(self._on_on_off_changed)
for key in self._device_bank_registry.keys():
if (key == self._device):
self._bank_index = self._device_bank_registry.get(key, 0)
del self._device_bank_registry[key]
break
self._bank_name = '<No Bank>' #added
self._on_device_name_changed()
self.update()
def set_bank_nav_buttons(self, down_button, up_button):
assert ((down_button != None) or (up_button == None))
assert ((up_button == None) or isinstance(up_button, ButtonElement))
assert ((down_button == None) or isinstance(down_button, ButtonElement))
do_update = False
if up_button != self._bank_up_button:
do_update = True
if self._bank_up_button != None:
self._bank_up_button.remove_value_listener(self._bank_up_value)
self._bank_up_button = up_button
if self._bank_up_button != None:
self._bank_up_button.add_value_listener(self._bank_up_value)
if down_button != self._bank_down_button:
do_update = True
if self._bank_down_button != None:
self._bank_down_button.remove_value_listener(self._bank_down_value)
self._bank_down_button = down_button
if self._bank_down_button != None:
self._bank_down_button.add_value_listener(self._bank_down_value)
if do_update:
self.update()
return None
def set_bank_buttons(self, buttons):
assert ((buttons == None) or isinstance(buttons, tuple))
if self._bank_buttons != None:
for button in self._bank_buttons:
button.remove_value_listener(self._bank_value)
self._bank_buttons = buttons
if self._bank_buttons != None:
identify_sender = True
for button in self._bank_buttons:
button.add_value_listener(self._bank_value, identify_sender)
self.update()
return None
def set_parameter_controls(self, controls):
assert (controls != None)
assert isinstance(controls, tuple)
if self._device != None and self._parameter_controls != None:
for control in self._parameter_controls:
control.release_parameter()
for control in controls:
assert (control != None)
assert isinstance(control, EncoderElement)
self._parameter_controls = controls
self.update()
return None
def set_lock_to_device(self, lock, device):
assert isinstance(lock, type(False))
assert (lock is not self._locked_to_device)
if lock:
self.set_device(device)
else:
assert (device == self._device)
self._locked_to_device = lock
if self.is_enabled():
if (self._lock_button != None):
if self._locked_to_device:
self._lock_button.turn_on()
else:
self._lock_button.turn_off()
def set_lock_button(self, button):
assert ((button == None) or isinstance(button, ButtonElement))
if self._lock_button != None:
self._lock_button.remove_value_listener(self._lock_value)
self._lock_button = None
self._lock_button = button
if self._lock_button != None:
self._lock_button.add_value_listener(self._lock_value)
self.update()
return None
def set_on_off_button(self, button):
assert ((button == None) or isinstance(button, ButtonElement))
if self._on_off_button != None:
self._on_off_button.remove_value_listener(self._on_off_value)
self._on_off_button = None
self._on_off_button = button
if self._on_off_button != None:
self._on_off_button.add_value_listener(self._on_off_value)
self.update()
return None
def set_lock_callback(self, callback):
assert (self._lock_callback == None)
assert (callback != None)
assert (dir(callback).count('im_func') is 1)
self._lock_callback = callback
return None
def restore_bank(self, bank_index):
if self._device != None and self._is_banking_enabled() and self._locked_to_device and self._number_of_parameter_banks() > bank_index and self._bank_index != bank_index:
self._bank_index = bank_index
self.update()
return None
def device_name_data_source(self):
if self._device_name_data_source == None:
self._device_name_data_source = DisplayDataSource()
self._on_device_name_changed()
return self._device_name_data_source
def update(self):
if (self.is_enabled() and (self._device != None)):
self._device_bank_registry[self._device] = self._bank_index
if (self._parameter_controls != None):
old_bank_name = self._bank_name #added
self._assign_parameters()
if self._bank_name != old_bank_name: #added
try:
self._show_msg_callback(self._device.name + ' Bank: ' + self._bank_name) #added
except:
pass
if ((self._bank_up_button != None) and (self._bank_down_button != None)):
if (self._number_of_parameter_banks()) > (self._bank_index + 1):
self._bank_up_button.turn_on()
else:
self._bank_up_button.turn_off()
if (self._bank_index > 0):
self._bank_down_button.turn_on()
else:
self._bank_down_button.turn_off()
if (self._bank_buttons != None):
for index in range(len(self._bank_buttons)):
if (index == self._bank_index):
self._bank_buttons[index].turn_on()
else:
self._bank_buttons[index].turn_off()
else:
if (self._lock_button != None):
self._lock_button.turn_off()
if (self._bank_up_button != None):
self._bank_up_button.turn_off()
if (self._bank_down_button != None):
self._bank_down_button.turn_off()
if (self._bank_buttons != None):
for button in self._bank_buttons:
button.turn_off()
if (self._parameter_controls != None):
for control in self._parameter_controls:
control.release_parameter()
#self._rebuild_callback()
def _bank_up_value(self, value):
assert (self._bank_up_button != None)
assert (value != None)
assert isinstance(value, int)
if self.is_enabled():
if ((not self._bank_up_button.is_momentary()) or (value is not 0)):
if (self._device != None):
num_banks = self._number_of_parameter_banks()
if (self._bank_down_button == None):
self._bank_name = ''
self._bank_index = ((self._bank_index + 1) % num_banks)
self.update()
elif (num_banks > (self._bank_index + 1)):
self._bank_name = ''
self._bank_index += 1
self.update()
def _bank_down_value(self, value):
assert (self._bank_down_button != None)
assert (value != None)
assert isinstance(value, int)
if self.is_enabled():
if ((not self._bank_down_button.is_momentary()) or (value is not 0)):
if ((self._device != None) and (self._bank_index > 0)):
self._bank_name = ''
self._bank_index -= 1
self.update()
def _lock_value(self, value):
assert (self._lock_button != None)
assert (self._lock_callback != None)
assert (value != None)
assert isinstance(value, int)
if not self._lock_button.is_momentary() or value is not 0:
self._lock_callback()
return None
def _on_off_value(self, value):
assert (self._on_off_button != None)
assert (value in range(128))
if not self._on_off_button.is_momentary() or value is not 0:
parameter = self._on_off_parameter()
if parameter != None and parameter.is_enabled:
parameter.value = float(int(parameter.value == 0.0))
return None
def _bank_value(self, value, button):
assert (self._bank_buttons != None)
assert (value != None)
assert (button != None)
assert isinstance(value, int)
assert isinstance(button, ButtonElement)
assert (list(self._bank_buttons).count(button) == 1)
if self.is_enabled() and self._device != None: #added
if ((not button.is_momentary()) or (value is not 0)):
bank = list(self._bank_buttons).index(button)
if (bank != self._bank_index):
if (self._number_of_parameter_banks() > bank):
self._bank_name = '' #added
self._bank_index = bank
self.update()
else:
try:
self._show_msg_callback(self._device.name + ' Bank: ' + self._bank_name)
except:
pass
def _is_banking_enabled(self):
direct_banking = (self._bank_buttons != None)
roundtrip_banking = (self._bank_up_button != None)
increment_banking = ((self._bank_up_button != None) and (self._bank_down_button != None))
return (direct_banking or (roundtrip_banking or increment_banking))
def _assign_parameters(self):
assert self.is_enabled()
assert (self._device != None)
assert (self._parameter_controls != None)
self._bank_name = ('Bank ' + str(self._bank_index + 1)) #added
if (self._device.class_name in self._device_banks.keys()): #modified
assert (self._device.class_name in self._device_best_banks.keys())
banks = self._device_banks[self._device.class_name]
bank = None
#if (not self._is_banking_enabled()):
# banks = self._device_best_banks[self._device.class_name]
# self._bank_name = 'Best of Parameters' #added
if (len(banks) > self._bank_index):
bank = banks[self._bank_index]
if self._is_banking_enabled(): #added
if self._device.class_name in self._device_bank_names.keys(): #added
self._bank_name = self._device_bank_names[self._device.class_name] #added *recheck
assert ((bank == None) or (len(bank) >= len(self._parameter_controls)))
for index in range(len(self._parameter_controls)):
parameter = None
if (bank != None):
parameter = get_parameter_by_name(self._device, bank[index])
if (parameter != None):
self._parameter_controls[index].connect_to(parameter)
else:
self._parameter_controls[index].release_parameter()
else:
parameters = self._device_parameters_to_map()
num_controls = len(self._parameter_controls)
index = (self._bank_index * num_controls)
for control in self._parameter_controls:
if (index < len(parameters)):
control.connect_to(parameters[index])
else:
control.release_parameter()
index += 1
def _on_device_name_changed(self):
if (self._device_name_data_source != None):
if (self.is_enabled() and (self._device != None)):
self._device_name_data_source.set_display_string(self._device.name)
else:
self._device_name_data_source.set_display_string('No Device')
def _on_parameters_changed(self):
self.update()
def _on_off_parameter(self):
result = None
if (self._device != None):
for parameter in self._device.parameters:
if str(parameter.name).startswith('Device On'):
result = parameter
break
return result
def _on_on_off_changed(self):
if (self.is_enabled() and (self._on_off_button != None)):
turn_on = False
if (self._device != None):
parameter = self._on_off_parameter()
turn_on = ((parameter != None) and (parameter.value > 0.0))
if turn_on:
self._on_off_button.turn_on()
else:
self._on_off_button.turn_off()
def _device_parameters_to_map(self):
assert self.is_enabled()
assert (self._device != None)
assert (self._parameter_controls != None)
return self._device.parameters[1:] #check this...
def _number_of_parameter_banks(self):
return number_of_parameter_banks(self._device) #added
|
|
# -*- coding: utf-8 -*-
from copy import copy
from classytags.utils import flatten_context
from django.template import Template, Context
from django.template.loader import render_to_string
from django.utils import six
from django.utils.safestring import mark_safe
from cms.cache.placeholder import get_placeholder_cache, set_placeholder_cache
from cms.models.placeholdermodel import Placeholder
from cms.plugin_processors import (plugin_meta_context_processor, mark_safe_plugin_processor)
from cms.utils import get_language_from_request
from cms.utils.conf import get_cms_setting
from cms.utils.django_load import iterload_objects
DEFAULT_PLUGIN_CONTEXT_PROCESSORS = (
plugin_meta_context_processor,
)
# these are always called after all other plugin processors
DEFAULT_PLUGIN_PROCESSORS = (
mark_safe_plugin_processor,
)
class PluginContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in CMS_PLUGIN_CONTEXT_PROCESSORS.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, dict_, instance, placeholder, processors=None, current_app=None):
dict_ = flatten_context(dict_)
super(PluginContext, self).__init__(dict_)
if not processors:
processors = []
for processor in DEFAULT_PLUGIN_CONTEXT_PROCESSORS:
self.update(processor(instance, placeholder, self))
for processor in iterload_objects(get_cms_setting('PLUGIN_CONTEXT_PROCESSORS')):
self.update(processor(instance, placeholder, self))
for processor in processors:
self.update(processor(instance, placeholder, self))
def render_plugin(context, instance, placeholder, template, processors=None, current_app=None):
"""
Renders a single plugin and applies the post processors to it's rendered
content.
"""
if current_app:
context['request'].current_app = current_app
if not processors:
processors = []
if isinstance(template, six.string_types):
content = render_to_string(template, flatten_context(context))
elif (isinstance(template, Template) or (hasattr(template, 'template') and
hasattr(template, 'render') and isinstance(template.template, Template))):
content = template.render(context)
else:
content = ''
for processor in iterload_objects(get_cms_setting('PLUGIN_PROCESSORS')):
content = processor(instance, placeholder, content, context)
for processor in processors:
content = processor(instance, placeholder, content, context)
for processor in DEFAULT_PLUGIN_PROCESSORS:
content = processor(instance, placeholder, content, context)
return content
def render_plugins(plugins, context, placeholder, processors=None):
"""
Renders a collection of plugins with the given context, using the appropriate processors
for a given placeholder name, and returns a list containing a "rendered content" string
for each plugin.
This is the main plugin rendering utility function, use this function rather than
Plugin.render_plugin().
"""
out = []
total = len(plugins)
for index, plugin in enumerate(plugins):
plugin._render_meta.total = total
plugin._render_meta.index = index
context.push()
out.append(plugin.render_plugin(context, placeholder, processors=processors))
context.pop()
return out
def render_placeholder(placeholder, context_to_copy, name_fallback="Placeholder",
lang=None, default=None, editable=True, use_cache=True):
"""
Renders plugins for a placeholder on the given page using shallow copies of the
given context, and returns a string containing the rendered output.
Set editable = False to disable front-end editing for this placeholder
during rendering. This is primarily used for the "as" variant of the
render_placeholder tag.
"""
from cms.utils.placeholder import get_placeholder_conf, restore_sekizai_context
from cms.utils.plugins import get_plugins
# these are always called before all other plugin context processors
from sekizai.helpers import Watcher
if not placeholder:
return
context = copy(context_to_copy)
context.push()
request = context['request']
if not hasattr(request, 'placeholders'):
request.placeholders = {}
perms = (placeholder.has_change_permission(request) or not placeholder.cache_placeholder)
if not perms or placeholder.slot not in request.placeholders:
request.placeholders[placeholder.slot] = (placeholder, perms)
else:
request.placeholders[placeholder.slot] = (
placeholder, perms and request.placeholders[placeholder.slot][1]
)
if hasattr(placeholder, 'content_cache'):
return mark_safe(placeholder.content_cache)
page = placeholder.page if placeholder else None
# It's kind of duplicate of the similar call in `get_plugins`, but it's required
# to have a valid language in this function for `get_fallback_languages` to work
if lang:
save_language = lang
else:
lang = get_language_from_request(request)
save_language = lang
# Prepend frontedit toolbar output if applicable
toolbar = getattr(request, 'toolbar', None)
if (getattr(toolbar, 'edit_mode', False) and
getattr(toolbar, "show_toolbar", False) and
getattr(placeholder, 'is_editable', True) and editable):
from cms.middleware.toolbar import toolbar_plugin_processor
processors = (toolbar_plugin_processor, )
edit = True
else:
processors = None
edit = False
if get_cms_setting('PLACEHOLDER_CACHE') and use_cache:
if not edit and placeholder and not hasattr(placeholder, 'cache_checked'):
cached_value = get_placeholder_cache(placeholder, lang)
if cached_value is not None:
restore_sekizai_context(context, cached_value['sekizai'])
return mark_safe(cached_value['content'])
if page:
template = page.template
else:
template = None
plugins = [plugin for plugin in get_plugins(request, placeholder, template, lang=lang)]
# Add extra context as defined in settings, but do not overwrite existing context variables,
# since settings are general and database/template are specific
# TODO this should actually happen as a plugin context processor, but these currently overwrite
# existing context -- maybe change this order?
slot = getattr(placeholder, 'slot', None)
if slot:
for key, value in get_placeholder_conf("extra_context", slot, template, {}).items():
if key not in context:
context[key] = value
content = []
watcher = Watcher(context)
content.extend(render_plugins(plugins, context, placeholder, processors))
toolbar_content = ''
if edit and editable:
if not hasattr(request.toolbar, 'placeholder_list'):
request.toolbar.placeholder_list = []
if placeholder not in request.toolbar.placeholder_list:
request.toolbar.placeholder_list.append(placeholder)
toolbar_content = mark_safe(render_placeholder_toolbar(placeholder, context, name_fallback, save_language))
if content:
content = mark_safe("".join(content))
elif default:
# should be nodelist from a template
content = mark_safe(default.render(context_to_copy))
else:
content = ''
context['content'] = content
context['placeholder'] = toolbar_content
context['edit'] = edit
result = render_to_string("cms/toolbar/content.html", flatten_context(context))
changes = watcher.get_changes()
if placeholder and not edit and placeholder.cache_placeholder and get_cms_setting('PLACEHOLDER_CACHE') and use_cache:
set_placeholder_cache(placeholder, lang, content={'content': result, 'sekizai': changes}, request=request)
context.pop()
return result
def render_placeholder_toolbar(placeholder, context, name_fallback, save_language):
from cms.plugin_pool import plugin_pool
request = context['request']
page = placeholder.page if placeholder else None
if not page:
page = getattr(request, 'current_page', None)
if page:
if name_fallback and not placeholder:
placeholder = Placeholder.objects.create(slot=name_fallback)
page.placeholders.add(placeholder)
placeholder.page = page
if placeholder:
slot = placeholder.slot
else:
slot = None
context.push()
# to restrict child-only plugins from draggables..
context['allowed_plugins'] = [cls.__name__ for cls in plugin_pool.get_all_plugins(slot, page)] + plugin_pool.get_system_plugins()
context['placeholder'] = placeholder
context['language'] = save_language
context['page'] = page
toolbar = render_to_string("cms/toolbar/placeholder.html", flatten_context(context))
context.pop()
return toolbar
|
|
"""
pynipap - a Python NIPAP client library
=======================================
pynipap is a Python client library for the NIPAP IP address planning
system. It is structured as a simple ORM.
To make it easy to maintain it's quite "thin", passing many arguments
straight through to the backend. Thus, also the pynipap-specific
documentation is quite thin. For in-depth information please look at the
main :py:mod:`NIPAP API documentation <nipap.backend>`.
There are four ORM-classes:
* :class:`VRF`
* :class:`Pool`
* :class:`Prefix`
* :class:`Tag`
Each of these maps to the NIPAP objects with the same name. See the main
:py:mod:`NIPAP API documentation <nipap.backend>` for an overview of the
different object types and what they are used for.
There are also a few supporting classes:
* :class:`AuthOptions` - Authentication options.
And a bunch of exceptions:
* :class:`NipapError`
* :class:`NipapNonExistentError`
* :class:`NipapInputError`
* :class:`NipapMissingInputError`
* :class:`NipapExtraneousInputError`
* :class:`NipapNoSuchOperatorError`
* :class:`NipapValueError`
* :class:`NipapDuplicateError`
* :class:`NipapAuthError`
* :class:`NipapAuthenticationError`
* :class:`NipapAuthorizationError`
General usage
-------------
pynipap has been designed to be simple to use.
Preparations
^^^^^^^^^^^^
Make sure that pynipap is accessible in your `sys.path`, you can test it by
starting a python shell and running::
import pynipap
If that works, you are good to go!
To simplify your code slightly, you can import the individual classes into
your main namespace::
import pynipap
from pynipap import VRF, Pool, Prefix
Before you can access NIPAP you need to specify the URL to the NIPAP
XML-RPC service and the authentication options to use for your connection.
NIPAP has a authentication system which is somewhat involved, see the main
NIPAP documentation.
The URL, including the user credentials, is set in the pynipap module
variable `xmlrpc_uri` as so::
pynipap.xmlrpc_uri = "http://user:pass@127.0.0.1:1337/XMLRPC"
If you want to access the API externally, from another host, update the
corresponding lines in the nipap.conf file. Here you can also change the port. ::
listen = 0.0.0.0 ; IP address to listen on.
port = 1337 ; XML-RPC listen port (change requires restart)
The minimum authentication options which we need to set is the
`authoritative_source` option, which specifies what system is accessing
NIPAP. This is logged for each query which alters the NIPAP database and
attached to each prefix which is created or edited. Well-behaved clients
are required to honor this and verify that the user really want to alter
the prefix, when trying to edit a prefix which last was edited by another
system. The :class:`AuthOptions` class is a class with a shared state,
similar to a singleton class; that is, when a first instance is created
each consecutive instances will be copies of the first one. In this way the
authentication options can be accessed from all of the pynipap classes. ::
a = AuthOptions({
'authoritative_source': 'my_fancy_nipap_client'
})
After this, we are good to go!
Accessing data
^^^^^^^^^^^^^^
To fetch data from NIPAP, a set of static methods (@classmethod) has been
defined in each of the ORM classes. They are:
* :func:`get` - Get a single object from its ID.
* :func:`list` - List objects matching a simple criteria.
* :func:`search` - Perform a full-blown search.
* :func:`smart_search` - Perform a magic search from a string.
Each of these functions return either an instance of the requested class
(:py:class:`VRF`, :class:`Pool`, :class:`Prefix`) or a list of
instances. The :func:`search` and :func:`smart_search` functions also
embeds the lists in dicts which contain search meta data.
The easiest way to get data out of NIPAP is to use the :func:`get`-method,
given that you know the ID of the object you want to fetch::
# Fetch VRF with ID 1 and print its name
vrf = VRF.get(1)
print(vrf.name)
To list all objects each object has a :func:`list`-function. ::
# list all pools
pools = Pool.list()
# print the name of the pools
for p in pools:
print(p.name)
Each of the list functions can also take a `spec`-dict as a second
argument. With the spec you can perform a simple search operation by
specifying object attribute values. ::
# List pools with a default type of 'assignment'
pools = Pool.list({ 'default_type': 'assignment' })
Performing searches
^^^^^^^^^^^^^^^^^^^
Searches are easiest when using the object's :func:`smart_search`-method::
#Returns a dict which includes search metadata and
#a 'result' : [array, of, prefix, objects]
search_result = Prefix.smart_search('127.0.0.0/8')
prefix_objects = search_result['result']
prefix_objects[0].description
prefix_objects[0].prefix
You can also send query filters. ::
#Find the prefix for Vlan 901
vlan = 901
vlan_query = { 'val1': 'vlan', 'operator': 'equals', 'val2': vlan }
vlan_901 = Prefix.smart_search('', { }, vlan_query)['result'][0]
vlan_901.vlan
The following operators can be used. ::
* 'and'
* 'or'
* 'equals_any'
* '='
* 'equals'
* '<'
* 'less'
* '<='
* 'less_or_equal'
* '>'
* 'greater'
* '>='
* 'greater_or_equal'
* 'is'
* 'is_not'
* '!='
* 'not_equals'
* 'like': '
* 'regex_match'
* 'regex_not_match'
* '>>':
* 'contains'
* '>>='
* 'contains_equals'
* '<<'
* 'contained_within'
* '<<='
* 'contained_within_equals'
Saving changes
^^^^^^^^^^^^^^
Changes made to objects are not automatically saved. To save the changes,
simply run the object's :func:`save`-method::
vrf.name = "Spam spam spam"
vrf.save()
Error handling
--------------
As is customary in Python applications, an error results in an exception
being thrown. All pynipap exceptions extend the main exception
:class:`NipapError`. A goal with the pynipap library has been to make the
XML-RPC-channel to the backend as transparent as possible, so the XML-RPC
Faults which the NIPAP server returns in case of errors are converted and
re-thrown as new exceptions which also they extend :class:`NipapError`,
for example the NipapDuplicateError which is thrown when a duplicate key
error occurs in NIPAP.
Classes
-------
"""
import sys
import logging
if sys.version_info[0] < 3:
import xmlrpclib
int = long
else:
import xmlrpc.client as xmlrpclib
__version__ = "0.29.6"
__author__ = "Kristian Larsson, Lukas Garberg"
__author_email__= "kll@tele2.net, lukas@spritelink.net"
__copyright__ = "Copyright 2011, Kristian Larsson, Lukas Garberg"
__license__ = "MIT"
__status__ = "Development"
__url__ = "http://SpriteLink.github.com/NIPAP"
# This variable holds the URI to the nipap XML-RPC service which will be used.
# It must be set before the Pynipap can be used!
xmlrpc_uri = None
# Caching of objects is enabled per default but can be disabled for certain
# scenarios. Since we don't have any cache expiration time it can be useful to
# disable for long running applications.
CACHE = True
class AuthOptions:
""" A global-ish authentication option container.
Note that this essentially is a global variable. If you handle multiple
queries from different users, you need to make sure that the
AuthOptions-instances are set to the current user's.
"""
__shared_state = {}
options = None
def __init__(self, options = None):
""" Create a shared option container.
The argument 'options' must be a dict containing authentication
options.
"""
self.__dict__ = self.__shared_state
if len(self.__shared_state) == 0 and options is None:
raise NipapMissingInputError("authentication options not set")
if options is not None:
self.options = options
class XMLRPCConnection:
""" Handles a shared XML-RPC connection.
"""
__shared_state = {}
connection = None
_logger = None
def __init__(self):
""" Create XML-RPC connection.
The connection will be created to the URL set in the module
variable `xmlrpc_uri`. The instanciation will fail unless this
variable is set.
"""
if xmlrpc_uri is None:
raise NipapError('XML-RPC URI not specified')
# creating new instance
self.connection = xmlrpclib.ServerProxy(xmlrpc_uri, allow_none=True,
use_datetime=True)
self._logger = logging.getLogger(self.__class__.__name__)
class Pynipap:
""" A base class for the pynipap model classes.
All Pynipap classes which maps to data in NIPAP (:py:class:`VRF`,
:py:class:`Pool`, :py:class:`Prefix`) extends this class.
"""
_logger = None
""" Logging instance for this object.
"""
id = None
""" Internal database ID of object.
"""
def __eq__(self, other):
""" Perform test for equality.
"""
# Only possible if we have ID numbers set
if self.id is None or other.id is None:
return False
return self.id == other.id
def __init__(self, id=None):
""" Creates logger and XML-RPC-connection.
"""
self._logger = logging.getLogger(self.__class__.__name__)
self._auth_opts = AuthOptions()
self.id = id
class Tag(Pynipap):
""" A Tag.
"""
name = None
""" The Tag name
"""
@classmethod
def from_dict(cls, tag=None):
""" Create new Tag-object from dict.
Suitable for creating objects from XML-RPC data.
All available keys must exist.
"""
if tag is None:
tag = {}
l = Tag()
l.name = tag['name']
return l
@classmethod
def search(cls, query, search_opts=None):
""" Search tags.
For more information, see the backend function
:py:func:`nipap.backend.Nipap.search_tag`.
"""
if search_opts is None:
search_opts = {}
xmlrpc = XMLRPCConnection()
try:
search_result = xmlrpc.connection.search_tag(
{
'query': query,
'search_options': search_opts,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['result'] = []
result['search_options'] = search_result['search_options']
for xml_tag in search_result['result']:
result['result'].append(Tag.from_dict(xml_tag))
return result
class VRF(Pynipap):
""" A VRF.
"""
rt = None
""" The VRF RT, as a string (x:y or x.x.x.x:y).
"""
name = None
""" The name of the VRF, as a string.
"""
description = None
""" VRF description, as a string.
"""
num_prefixes_v4 = None
""" Number of IPv4 prefixes in this VRF
"""
num_prefixes_v6 = None
""" Number of IPv6 prefixes in this VRF
"""
total_addresses_v4 = None
""" Total number of IPv4 addresses in this VRF
"""
total_addresses_v6 = None
""" Total number of IPv6 addresses in this VRF
"""
used_addresses_v4 = None
""" Number of used IPv4 addresses in this VRF
"""
used_addresses_v6 = None
""" Number of used IPv6 addresses in this VRF
"""
free_addresses_v4 = None
""" Number of free IPv4 addresses in this VRF
"""
free_addresses_v6 = None
""" Number of free IPv6 addresses in this VRF
"""
def __init__(self):
Pynipap.__init__(self)
self.tags = {}
self.avps = {}
@classmethod
def list(cls, vrf=None):
""" List VRFs.
Maps to the function :py:func:`nipap.backend.Nipap.list_vrf` in the
backend. Please see the documentation for the backend function for
information regarding input arguments and return values.
"""
if vrf is None:
vrf = {}
xmlrpc = XMLRPCConnection()
try:
vrf_list = xmlrpc.connection.list_vrf(
{
'vrf': vrf,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
res = list()
for v in vrf_list:
res.append(VRF.from_dict(v))
return res
@classmethod
def from_dict(cls, parm, vrf = None):
""" Create new VRF-object from dict.
Suitable for creating objects from XML-RPC data.
All available keys must exist.
"""
if vrf is None:
vrf = VRF()
vrf.id = parm['id']
vrf.rt = parm['rt']
vrf.name = parm['name']
vrf.description = parm['description']
vrf.tags = {}
for tag_name in parm['tags']:
tag = Tag.from_dict({'name': tag_name })
vrf.tags[tag_name] = tag
vrf.avps = parm['avps']
vrf.num_prefixes_v4 = int(parm['num_prefixes_v4'])
vrf.num_prefixes_v6 = int(parm['num_prefixes_v6'])
vrf.total_addresses_v4 = int(parm['total_addresses_v4'])
vrf.total_addresses_v6 = int(parm['total_addresses_v6'])
vrf.used_addresses_v4 = int(parm['used_addresses_v4'])
vrf.used_addresses_v6 = int(parm['used_addresses_v6'])
vrf.free_addresses_v4 = int(parm['free_addresses_v4'])
vrf.free_addresses_v6 = int(parm['free_addresses_v6'])
return vrf
@classmethod
def get(cls, id):
""" Get the VRF with id 'id'.
"""
# cached?
if CACHE:
if id in _cache['VRF']:
log.debug('cache hit for VRF %d' % id)
return _cache['VRF'][id]
log.debug('cache miss for VRF %d' % id)
try:
vrf = VRF.list({ 'id': id })[0]
except IndexError:
raise NipapNonExistentError('no VRF with ID ' + str(id) + ' found')
_cache['VRF'][id] = vrf
return vrf
@classmethod
def search(cls, query, search_opts=None):
""" Search VRFs.
Maps to the function :py:func:`nipap.backend.Nipap.search_vrf` in
the backend. Please see the documentation for the backend function
for information regarding input arguments and return values.
"""
if search_opts is None:
search_opts = {}
xmlrpc = XMLRPCConnection()
try:
search_result = xmlrpc.connection.search_vrf(
{
'query': query,
'search_options': search_opts,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['result'] = []
result['search_options'] = search_result['search_options']
for v in search_result['result']:
result['result'].append(VRF.from_dict(v))
return result
@classmethod
def smart_search(cls, query_string, search_options=None, extra_query = None):
""" Perform a smart VRF search.
Maps to the function
:py:func:`nipap.backend.Nipap.smart_search_vrf` in the backend.
Please see the documentation for the backend function for
information regarding input arguments and return values.
"""
if search_options is None:
search_options = {}
xmlrpc = XMLRPCConnection()
try:
smart_result = xmlrpc.connection.smart_search_vrf(
{
'query_string': query_string,
'search_options': search_options,
'auth': AuthOptions().options,
'extra_query': extra_query
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['interpretation'] = smart_result['interpretation']
result['search_options'] = smart_result['search_options']
result['error'] = smart_result['error']
if 'error_message' in smart_result:
result['error_message'] = smart_result['error_message']
result['result'] = list()
for v in smart_result['result']:
result['result'].append(VRF.from_dict(v))
return result
def save(self):
""" Save changes made to object to NIPAP.
If the object represents a new VRF unknown to NIPAP (attribute `id`
is `None`) this function maps to the function
:py:func:`nipap.backend.Nipap.add_vrf` in the backend, used to
create a new VRF. Otherwise it maps to the function
:py:func:`nipap.backend.Nipap.edit_vrf` in the backend, used to
modify the VRF. Please see the documentation for the backend
functions for information regarding input arguments and return
values.
"""
xmlrpc = XMLRPCConnection()
data = {
'rt': self.rt,
'name': self.name,
'description': self.description,
'tags': [],
'avps': self.avps
}
for tag_name in self.tags:
data['tags'].append(tag_name)
if self.id is None:
# New object, create
try:
vrf = xmlrpc.connection.add_vrf(
{
'attr': data,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
else:
# Old object, edit
try:
vrfs = xmlrpc.connection.edit_vrf(
{
'vrf': { 'id': self.id },
'attr': data,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
if len(vrfs) != 1:
raise NipapError('VRF edit returned %d entries, should be 1.' % len(vrfs))
vrf = vrfs[0]
# Refresh object data with attributes from add/edit operation
VRF.from_dict(vrf, self)
_cache['VRF'][self.id] = self
def remove(self):
""" Remove VRF.
Maps to the function :py:func:`nipap.backend.Nipap.remove_vrf` in
the backend. Please see the documentation for the backend function
for information regarding input arguments and return values.
"""
xmlrpc = XMLRPCConnection()
try:
xmlrpc.connection.remove_vrf(
{
'vrf': { 'id': self.id },
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
if self.id in _cache['VRF']:
del(_cache['VRF'][self.id])
class Pool(Pynipap):
""" An address pool.
"""
name = None
description = None
default_type = None
ipv4_default_prefix_length = None
ipv6_default_prefix_length = None
vrf = None
member_prefixes_v4 = None
member_prefixes_v6 = None
used_prefixes_v4 = None
used_prefixes_v6 = None
free_prefixes_v4 = None
free_prefixes_v6 = None
total_prefixes_v4 = None
total_prefixes_v6 = None
total_addresses_v4 = None
total_addresses_v6 = None
used_addresses_v4 = None
used_addresses_v6 = None
free_addresses_v4 = None
free_addresses_v6 = None
def __init__(self):
Pynipap.__init__(self)
self.tags = {}
self.avps = {}
def save(self):
""" Save changes made to pool to NIPAP.
If the object represents a new pool unknown to NIPAP (attribute
`id` is `None`) this function maps to the function
:py:func:`nipap.backend.Nipap.add_pool` in the backend, used to
create a new pool. Otherwise it maps to the function
:py:func:`nipap.backend.Nipap.edit_pool` in the backend, used to
modify the pool. Please see the documentation for the backend
functions for information regarding input arguments and return
values.
"""
xmlrpc = XMLRPCConnection()
data = {
'name': self.name,
'description': self.description,
'default_type': self.default_type,
'ipv4_default_prefix_length': self.ipv4_default_prefix_length,
'ipv6_default_prefix_length': self.ipv6_default_prefix_length,
'tags': [],
'avps': self.avps
}
for tag_name in self.tags:
data['tags'].append(tag_name)
if self.id is None:
# New object, create
try:
pool = xmlrpc.connection.add_pool(
{
'attr': data,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
else:
# Old object, edit
try:
pools = xmlrpc.connection.edit_pool(
{
'pool': { 'id': self.id },
'attr': data,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
if len(pools) != 1:
raise NipapError('Pool edit returned %d entries, should be 1.' % len(pools))
pool = pools[0]
# Refresh object data with attributes from add/edit operation
Pool.from_dict(pool, self)
_cache['Pool'][self.id] = self
def remove(self):
""" Remove pool.
Maps to the function :py:func:`nipap.backend.Nipap.remove_pool` in
the backend. Please see the documentation for the backend function
for information regarding input arguments and return values.
"""
xmlrpc = XMLRPCConnection()
try:
xmlrpc.connection.remove_pool(
{
'pool': { 'id': self.id },
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
if self.id in _cache['Pool']:
del(_cache['Pool'][self.id])
@classmethod
def get(cls, id):
""" Get the pool with id 'id'.
"""
# cached?
if CACHE:
if id in _cache['Pool']:
log.debug('cache hit for pool %d' % id)
return _cache['Pool'][id]
log.debug('cache miss for pool %d' % id)
try:
pool = Pool.list({'id': id})[0]
except (IndexError, KeyError):
raise NipapNonExistentError('no pool with ID ' + str(id) + ' found')
_cache['Pool'][id] = pool
return pool
@classmethod
def search(cls, query, search_opts=None):
""" Search pools.
Maps to the function :py:func:`nipap.backend.Nipap.search_pool` in
the backend. Please see the documentation for the backend function
for information regarding input arguments and return values.
"""
if search_opts is None:
search_opts = {}
xmlrpc = XMLRPCConnection()
try:
search_result = xmlrpc.connection.search_pool(
{
'query': query,
'search_options': search_opts,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['result'] = []
result['search_options'] = search_result['search_options']
for pool in search_result['result']:
p = Pool.from_dict(pool)
result['result'].append(p)
return result
@classmethod
def smart_search(cls, query_string, search_options=None, extra_query = None):
""" Perform a smart pool search.
Maps to the function
:py:func:`nipap.backend.Nipap.smart_search_pool` in the backend.
Please see the documentation for the backend function for
information regarding input arguments and return values.
"""
if search_options is None:
search_options = {}
xmlrpc = XMLRPCConnection()
try:
smart_result = xmlrpc.connection.smart_search_pool(
{
'query_string': query_string,
'search_options': search_options,
'auth': AuthOptions().options,
'extra_query': extra_query
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['interpretation'] = smart_result['interpretation']
result['search_options'] = smart_result['search_options']
result['error'] = smart_result['error']
if 'error_message' in smart_result:
result['error_message'] = smart_result['error_message']
result['result'] = list()
for pool in smart_result['result']:
p = Pool.from_dict(pool)
result['result'].append(p)
return result
@classmethod
def from_dict(cls, parm, pool = None):
""" Create new Pool-object from dict.
Suitable for creating objects from XML-RPC data.
All available keys must exist.
"""
if pool is None:
pool = Pool()
pool.id = parm['id']
pool.name = parm['name']
pool.description = parm['description']
pool.default_type = parm['default_type']
pool.ipv4_default_prefix_length = parm['ipv4_default_prefix_length']
pool.ipv6_default_prefix_length = parm['ipv6_default_prefix_length']
for val in ('member_prefixes_v4', 'member_prefixes_v6',
'used_prefixes_v4', 'used_prefixes_v6', 'free_prefixes_v4',
'free_prefixes_v6', 'total_prefixes_v4', 'total_prefixes_v6',
'total_addresses_v4', 'total_addresses_v6', 'used_addresses_v4',
'used_addresses_v6', 'free_addresses_v4', 'free_addresses_v6'):
if parm[val] is not None:
setattr(pool, val, int(parm[val]))
pool.tags = {}
for tag_name in parm['tags']:
tag = Tag.from_dict({'name': tag_name })
pool.tags[tag_name] = tag
pool.avps = parm['avps']
# store VRF object in pool.vrf
if parm['vrf_id'] is not None:
pool.vrf = VRF.get(parm['vrf_id'])
return pool
@classmethod
def list(self, spec=None):
""" List pools.
Maps to the function :py:func:`nipap.backend.Nipap.list_pool` in
the backend. Please see the documentation for the backend function
for information regarding input arguments and return values.
"""
if spec is None:
spec = {}
xmlrpc = XMLRPCConnection()
try:
pool_list = xmlrpc.connection.list_pool(
{
'pool': spec,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
res = list()
for pool in pool_list:
p = Pool.from_dict(pool)
res.append(p)
return res
class Prefix(Pynipap):
""" A prefix.
"""
family = None
vrf = None
prefix = None
display_prefix = None
description = None
comment = None
node = None
pool = None
type = None
indent = None
country = None
external_key = None
order_id = None
customer_id = None
authoritative_source = None
alarm_priority = None
monitor = None
display = True
match = False
children = -2
vlan = None
added = None
last_modified = None
total_addresses = None
used_addreses = None
free_addreses = None
status = None
expires = None
def __init__(self):
Pynipap.__init__(self)
self.inherited_tags = {}
self.tags = {}
self.avps = {}
@classmethod
def get(cls, id):
""" Get the prefix with id 'id'.
"""
# cached?
if CACHE:
if id in _cache['Prefix']:
log.debug('cache hit for prefix %d' % id)
return _cache['Prefix'][id]
log.debug('cache miss for prefix %d' % id)
try:
prefix = Prefix.list({'id': id})[0]
except IndexError:
raise NipapNonExistentError('no prefix with ID ' + str(id) + ' found')
_cache['Prefix'][id] = prefix
return prefix
@classmethod
def find_free(cls, vrf, args):
""" Finds a free prefix.
Maps to the function
:py:func:`nipap.backend.Nipap.find_free_prefix` in the backend.
Please see the documentation for the backend function for
information regarding input arguments and return values.
"""
xmlrpc = XMLRPCConnection()
q = {
'args': args,
'auth': AuthOptions().options
}
# sanity checks
if isinstance(vrf, VRF):
q['vrf'] = { 'id': vrf.id }
elif vrf is None:
q['vrf'] = None
else:
raise NipapValueError('vrf parameter must be instance of VRF class')
# run XML-RPC query
try:
find_res = xmlrpc.connection.find_free_prefix(q)
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
pass
return find_res
@classmethod
def search(cls, query, search_opts=None):
""" Search for prefixes.
Maps to the function :py:func:`nipap.backend.Nipap.search_prefix`
in the backend. Please see the documentation for the backend
function for information regarding input arguments and return
values.
"""
if search_opts is None:
search_opts = {}
xmlrpc = XMLRPCConnection()
try:
search_result = xmlrpc.connection.search_prefix(
{
'query': query,
'search_options': search_opts,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['result'] = []
result['search_options'] = search_result['search_options']
for prefix in search_result['result']:
p = Prefix.from_dict(prefix)
result['result'].append(p)
return result
@classmethod
def smart_search(cls, query_string, search_options=None, extra_query = None):
""" Perform a smart prefix search.
Maps to the function
:py:func:`nipap.backend.Nipap.smart_search_prefix` in the backend.
Please see the documentation for the backend function for
information regarding input arguments and return values.
"""
if search_options is None:
search_options = {}
xmlrpc = XMLRPCConnection()
try:
smart_result = xmlrpc.connection.smart_search_prefix(
{
'query_string': query_string,
'search_options': search_options,
'auth': AuthOptions().options,
'extra_query': extra_query
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['interpretation'] = smart_result['interpretation']
result['search_options'] = smart_result['search_options']
result['error'] = smart_result['error']
if 'error_message' in smart_result:
result['error_message'] = smart_result['error_message']
result['result'] = list()
for prefix in smart_result['result']:
p = Prefix.from_dict(prefix)
result['result'].append(p)
return result
@classmethod
def list(cls, spec=None):
""" List prefixes.
Maps to the function :py:func:`nipap.backend.Nipap.list_prefix` in
the backend. Please see the documentation for the backend function
for information regarding input arguments and return values.
"""
if spec is None:
spec = {}
xmlrpc = XMLRPCConnection()
try:
pref_list = xmlrpc.connection.list_prefix(
{
'prefix': spec,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
res = list()
for pref in pref_list:
p = Prefix.from_dict(pref)
res.append(p)
return res
def save(self, args=None):
""" Save prefix to NIPAP.
If the object represents a new prefix unknown to NIPAP (attribute
`id` is `None`) this function maps to the function
:py:func:`nipap.backend.Nipap.add_prefix` in the backend, used to
create a new prefix. Otherwise it maps to the function
:py:func:`nipap.backend.Nipap.edit_prefix` in the backend, used to
modify the VRF. Please see the documentation for the backend
functions for information regarding input arguments and return
values.
"""
if args is None:
args = {}
xmlrpc = XMLRPCConnection()
data = {
'description': self.description,
'comment': self.comment,
'tags': [],
'node': self.node,
'type': self.type,
'country': self.country,
'order_id': self.order_id,
'customer_id': self.customer_id,
'external_key': self.external_key,
'alarm_priority': self.alarm_priority,
'monitor': self.monitor,
'vlan': self.vlan,
'avps': self.avps,
'expires': self.expires
}
if self.status is not None:
data['status'] = self.status
for tag_name in self.tags:
data['tags'].append(tag_name)
if self.vrf is not None:
if not isinstance(self.vrf, VRF):
raise NipapValueError("'vrf' attribute not instance of VRF class.")
data['vrf_id'] = self.vrf.id
# Prefix can be none if we are creating a new prefix
# from a pool or other prefix!
if self.prefix is not None:
data['prefix'] = self.prefix
if self.pool is None:
data['pool_id'] = None
else:
if not isinstance(self.pool, Pool):
raise NipapValueError("'pool' attribute not instance of Pool class.")
data['pool_id'] = self.pool.id
# New object, create from scratch
if self.id is None:
# format args
x_args = {}
if 'from-pool' in args:
x_args['from-pool'] = { 'id': args['from-pool'].id }
if 'family' in args:
x_args['family'] = args['family']
if 'from-prefix' in args:
x_args['from-prefix'] = args['from-prefix']
if 'prefix_length' in args:
x_args['prefix_length'] = args['prefix_length']
try:
prefix = xmlrpc.connection.add_prefix(
{
'attr': data,
'args': x_args,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
# Old object, edit
else:
# Add authoritative source to data
data['authoritative_source'] = self.authoritative_source
try:
# save
prefixes = xmlrpc.connection.edit_prefix(
{
'prefix': { 'id': self.id },
'attr': data,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
if len(prefixes) != 1:
raise NipapError('Prefix edit returned %d entries, should be 1.' % len(prefixes))
prefix = prefixes[0]
# Refresh object data with attributes from add/edit operation
Prefix.from_dict(prefix, self)
# update cache
_cache['Prefix'][self.id] = self
if self.pool is not None:
if self.pool.id in _cache['Pool']:
del _cache['Pool'][self.pool.id]
def remove(self, recursive = False):
""" Remove the prefix.
Maps to the function :py:func:`nipap.backend.Nipap.remove_prefix`
in the backend. Please see the documentation for the backend
function for information regarding input arguments and return
values.
"""
xmlrpc = XMLRPCConnection()
try:
xmlrpc.connection.remove_prefix(
{
'prefix': { 'id': self.id },
'recursive': recursive,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
# update cache
if self.id in _cache['Prefix']:
del(_cache['Prefix'][self.id])
if self.pool is not None:
if self.pool.id in _cache['Pool']:
del _cache['Pool'][self.pool.id]
@classmethod
def from_dict(cls, pref, prefix = None):
""" Create a Prefix object from a dict.
Suitable for creating Prefix objects from XML-RPC input.
"""
if prefix is None:
prefix = Prefix()
prefix.id = pref['id']
if pref['vrf_id'] is not None: # VRF is not mandatory
prefix.vrf = VRF.get(pref['vrf_id'])
prefix.family = pref['family']
prefix.prefix = pref['prefix']
prefix.display_prefix = pref['display_prefix']
prefix.description = pref['description']
prefix.comment = pref['comment']
prefix.node = pref['node']
if pref['pool_id'] is not None: # Pool is not mandatory
prefix.pool = Pool.get(pref['pool_id'])
prefix.type = pref['type']
prefix.indent = pref['indent']
prefix.country = pref['country']
prefix.order_id = pref['order_id']
prefix.customer_id = pref['customer_id']
prefix.external_key = pref['external_key']
prefix.authoritative_source = pref['authoritative_source']
prefix.alarm_priority = pref['alarm_priority']
prefix.monitor = pref['monitor']
prefix.vlan = pref['vlan']
prefix.added = pref['added']
prefix.last_modified = pref['last_modified']
prefix.total_addresses = int(pref['total_addresses'])
prefix.used_addresses = int(pref['used_addresses'])
prefix.free_addresses = int(pref['free_addresses'])
prefix.status = pref['status']
prefix.avps = pref['avps']
prefix.expires = pref['expires']
prefix.inherited_tags = {}
for tag_name in pref['inherited_tags']:
tag = Tag.from_dict({'name': tag_name })
prefix.inherited_tags[tag_name] = tag
prefix.tags = {}
for tag_name in pref['tags']:
tag = Tag.from_dict({'name': tag_name })
prefix.tags[tag_name] = tag
if 'match' in pref:
prefix.match = pref['match']
if 'display' in pref:
prefix.display = pref['display']
if 'children' in pref:
prefix.children = pref['children']
return prefix
def nipapd_version():
""" Get version of nipapd we're connected to.
Maps to the function :py:func:`nipap.xmlrpc.NipapXMLRPC.version` in the
XML-RPC API. Please see the documentation for the XML-RPC function for
information regarding the return value.
"""
xmlrpc = XMLRPCConnection()
try:
return xmlrpc.connection.version(
{
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
def nipap_db_version():
""" Get schema version of database we're connected to.
Maps to the function :py:func:`nipap.backend.Nipap._get_db_version` in
the backend. Please see the documentation for the backend function for
information regarding the return value.
"""
xmlrpc = XMLRPCConnection()
try:
return xmlrpc.connection.db_version(
{
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
#
# Define exceptions
#
class NipapError(Exception):
""" A generic NIPAP model exception.
All errors thrown from the NIPAP model extends this exception.
"""
pass
class NipapNonExistentError(NipapError):
""" Thrown when something can not be found.
For example when a given ID can not be found in the NIPAP database.
"""
class NipapInputError(NipapError):
""" Something wrong with the input we received
A general case.
"""
pass
class NipapMissingInputError(NipapInputError):
""" Missing input
Most input is passed in dicts, this could mean a missing key in a dict.
"""
pass
class NipapExtraneousInputError(NipapInputError):
""" Extraneous input
Most input is passed in dicts, this could mean an unknown key in a dict.
"""
pass
class NipapNoSuchOperatorError(NipapInputError):
""" A non existent operator was specified.
"""
pass
class NipapValueError(NipapError):
""" Something wrong with a value we have
For example, trying to send an integer when an IP address is expected.
"""
pass
class NipapDuplicateError(NipapError):
""" A duplicate entry was encountered
"""
pass
class NipapAuthError(NipapError):
""" General NIPAP AAA error
"""
pass
class NipapAuthenticationError(NipapAuthError):
""" Authentication failed.
"""
pass
class NipapAuthorizationError(NipapAuthError):
""" Authorization failed.
"""
pass
#
# GLOBAL STUFF
#
# Simple object cache
# TODO: fix some kind of timeout
_cache = {
'Pool': {},
'Prefix': {},
'VRF': {}
}
# Map from XML-RPC Fault codes to Exception classes
_fault_to_exception_map = {
1000: NipapError,
1100: NipapInputError,
1110: NipapMissingInputError,
1120: NipapExtraneousInputError,
1200: NipapValueError,
1300: NipapNonExistentError,
1400: NipapDuplicateError,
1500: NipapAuthError,
1510: NipapAuthenticationError,
1520: NipapAuthorizationError
}
log = logging.getLogger("Pynipap")
def _fault_to_exception(f):
""" Converts XML-RPC Fault objects to Pynipap-exceptions.
TODO: Is this one neccesary? Can be done inline...
"""
e = _fault_to_exception_map.get(f.faultCode)
if e is None:
e = NipapError
return e(f.faultString)
|
|
import uuid
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.utils import timezone
from django.utils.encoding import force_text
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
# MySQL stores positive fields as UNSIGNED ints.
integer_field_ranges = dict(
BaseDatabaseOperations.integer_field_ranges,
PositiveSmallIntegerField=(0, 65535),
PositiveIntegerField=(0, 4294967295),
)
cast_data_types = {
'CharField': 'char(%(max_length)s)',
'IntegerField': 'signed integer',
'BigIntegerField': 'signed integer',
'SmallIntegerField': 'signed integer',
'FloatField': 'signed',
'PositiveIntegerField': 'unsigned integer',
'PositiveSmallIntegerField': 'unsigned integer',
}
cast_char_field_without_max_length = 'char'
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
elif lookup_type == 'week':
# Override the value of default_week_format for consistency with
# other database backends.
# Mode 3: Monday, 1-53, with 4 or more days this year.
return "WEEK(%s, 3)" % field_name
else:
# EXTRACT returns 1-53 based on ISO-8601 for the week number.
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = {
'year': '%%Y-01-01',
'month': '%%Y-%%m-01',
} # Use double percents to escape.
if lookup_type in fields:
format_str = fields[lookup_type]
return "CAST(DATE_FORMAT(%s, '%s') AS DATE)" % (field_name, format_str)
elif lookup_type == 'quarter':
return "MAKEDATE(YEAR(%s), 1) + INTERVAL QUARTER(%s) QUARTER - INTERVAL 1 QUARTER" % (
field_name, field_name
)
else:
return "DATE(%s)" % (field_name)
def _convert_field_to_tz(self, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', '%s')" % (field_name, tzname)
return field_name
def datetime_cast_date_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return "DATE(%s)" % field_name
def datetime_cast_time_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return "TIME(%s)" % field_name
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return self.date_extract_sql(lookup_type, field_name)
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
if lookup_type == 'quarter':
return (
"CAST(DATE_FORMAT(MAKEDATE(YEAR({field_name}), 1) + "
"INTERVAL QUARTER({field_name}) QUARTER - " +
"INTERVAL 1 QUARTER, '%%Y-%%m-01 00:00:00') AS DATETIME)"
).format(field_name=field_name)
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def time_trunc_sql(self, lookup_type, field_name):
fields = {
'hour': '%%H:00:00',
'minute': '%%H:%%i:00',
'second': '%%H:%%i:%%s',
} # Use double percents to escape.
if lookup_type in fields:
format_str = fields[lookup_type]
return "CAST(DATE_FORMAT(%s, '%s') AS TIME)" % (field_name, format_str)
else:
return "TIME(%s)" % (field_name)
def date_interval_sql(self, timedelta):
return "INTERVAL '%06f' SECOND_MICROSECOND" % timedelta.total_seconds()
def format_for_duration_arithmetic(self, sql):
if self.connection.features.supports_microsecond_precision:
return 'INTERVAL %s MICROSECOND' % sql
else:
return 'INTERVAL FLOOR(%s / 1000000) SECOND' % sql
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return [(None, ("NULL", [], False))]
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_last_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
return force_text(getattr(cursor, '_last_executed', None), errors='replace')
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(self.quote_name(table)),
))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def validate_autopk_value(self, value):
# MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def adapt_datetimefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# MySQL doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return str(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
return str(value)
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
return "VALUES " + values_sql
def combine_expression(self, connector, sub_expressions):
if connector == '^':
return 'POW(%s)' % ','.join(sub_expressions)
# Convert the result to a signed integer since MySQL's binary operators
# return an unsigned integer.
elif connector in ('&', '|', '<<'):
return 'CONVERT(%s, SIGNED)' % connector.join(sub_expressions)
elif connector == '>>':
lhs, rhs = sub_expressions
return 'FLOOR(%(lhs)s / POW(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs}
return super().combine_expression(connector, sub_expressions)
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
elif internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'DateTimeField':
converters.append(self.convert_datetimefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
return converters
def convert_textfield_value(self, value, expression, connection):
if value is not None:
value = force_text(value)
return value
def convert_booleanfield_value(self, value, expression, connection):
if value in (0, 1):
value = bool(value)
return value
def convert_datetimefield_value(self, value, expression, connection):
if value is not None:
if settings.USE_TZ:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_uuidfield_value(self, value, expression, connection):
if value is not None:
value = uuid.UUID(value)
return value
def binary_placeholder_sql(self, value):
return '_binary %s' if value is not None and not hasattr(value, 'as_sql') else '%s'
def subtract_temporals(self, internal_type, lhs, rhs):
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
if self.connection.features.supports_microsecond_precision:
if internal_type == 'TimeField':
return (
"((TIME_TO_SEC(%(lhs)s) * POW(10, 6) + MICROSECOND(%(lhs)s)) -"
" (TIME_TO_SEC(%(rhs)s) * POW(10, 6) + MICROSECOND(%(rhs)s)))"
) % {'lhs': lhs_sql, 'rhs': rhs_sql}, lhs_params * 2 + rhs_params * 2
else:
return "TIMESTAMPDIFF(MICROSECOND, %s, %s)" % (rhs_sql, lhs_sql), rhs_params + lhs_params
elif internal_type == 'TimeField':
return (
"(TIME_TO_SEC(%s) * POW(10, 6) - TIME_TO_SEC(%s) * POW(10, 6))"
) % (lhs_sql, rhs_sql), lhs_params + rhs_params
else:
return "(TIMESTAMPDIFF(SECOND, %s, %s) * POW(10, 6))" % (rhs_sql, lhs_sql), rhs_params + lhs_params
|
|
#!/usr/bin/env python
# coding=utf-8
# Author: YAO Matrix (yaoweifeng0301@126.com)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import sys
import time
import datetime
import logging
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from data_io import load_imageset, split_cv
from metrics import precision, error_rate
module_dir = os.path.dirname(os.path.abspath(__file__))
module_name = os.path.basename(__file__).split('.')[0]
log_path = os.path.join(module_dir, os.path.pardir, os.path.pardir, 'logs', module_name + '_' + datetime.date.today().strftime('%Y%m%d') + '.log')
logger = logging.getLogger(module_name)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_path)
ch = logging.StreamHandler()
fh.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s][%(name)s][%(levelname)s]: %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64
NUM_EPOCHS = 50
EVAL_BATCH_SIZE = 64
EVAL_FREQUENCY = 100 # Number of steps between evaluations.
FLAGS = tf.app.flags.FLAGS
def main(argv = None): # pylint: disable=unused-argument
# load imageset
train_set_folder = os.path.join(module_dir, os.path.pardir, os.path.pardir, 'data/ocr/train')
test_set_folder = os.path.join(module_dir, os.path.pardir, os.path.pardir, 'data/ocr/test')
# Extract it into numpy arrays.
train_data, train_labels = load_imageset(train_set_folder, to_img_size = (28, 28, 1), ext = 'png')
test_data, test_labels = load_imageset(test_set_folder, to_img_size = (28, 28, 1), ext = 'png')
height = train_data.shape[1]
width = train_data.shape[2]
channel = (train_data.shape[3] if train_data.ndim > 3 else 1)
label_max = np.amax(train_labels)
label_min = np.amin(train_labels)
num_labels = label_max - label_min + 1
# Generate a validation set.
train_data, train_labels, validation_data, validation_labels = split_cv(train_data, train_labels, 0.1)
num_epochs = NUM_EPOCHS
train_size = train_labels.shape[0]
# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step using the {feed_dict} argument to the Run() call below.
train_data_node = tf.placeholder(
tf.float32,
shape = (BATCH_SIZE, height, width, channel))
train_labels_node = tf.placeholder(tf.int64, shape = (BATCH_SIZE,))
eval_data = tf.placeholder(
tf.float32,
shape=(EVAL_BATCH_SIZE, height, width, channel))
# The variables below hold all the trainable weights. They are passed an
# initial value which will be assigned when we call:
# {tf.initialize_all_variables().run()}
conv1_weights = tf.Variable(
tf.truncated_normal([5, 5, channel, 32], # 5x5 filter, depth 32.
stddev = 0.1,
seed = SEED),
name="conv1_weights")
conv1_biases = tf.Variable(tf.zeros([32]), name = "conv1_biases")
conv2_weights = tf.Variable(
tf.truncated_normal([5, 5, 32, 64],
stddev = 0.1,
seed = SEED),
name="conv2_weights")
conv2_biases = tf.Variable(tf.constant(0.1, shape = [64]), name = "conv2_biases")
fc1_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal(
[height // 4 * width // 4 * 64, 512],
stddev = 0.1,
seed = SEED),
name = "fc1_weights")
fc1_biases = tf.Variable(tf.constant(0.1, shape = [512]), name = "fc1_biases")
fc2_weights = tf.Variable(
tf.truncated_normal([512, num_labels],
stddev = 0.1,
seed = SEED),
name = "fc2_weights")
fc2_biases = tf.Variable(tf.constant(0.1, shape = [num_labels]), name = "fc2_biases")
# We will replicate the model structure for the training subgraph, as well
# as the evaluation subgraphs, while sharing the trainable parameters.
def lenet2(data, train = False):
"""LeNet2 definition."""
# 2D convolution, with 'SAME' padding (i.e. the output feature map has
# the same size as the input). Note that {strides} is a 4D array whose
# shape matches the data layout: [n, h, w, c].
conv1 = tf.nn.conv2d(data,
conv1_weights,
strides = [1, 1, 1, 1],
padding = 'SAME')
# Bias and rectified linear non-linearity.
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
# Max pooling. The kernel size spec {ksize} also follows the layout of
# the data. Here we have a pooling window of 2, and a stride of 2.
pool1 = tf.nn.max_pool(relu1,
ksize = [1, 2, 2, 1],
strides = [1, 2, 2, 1],
padding = 'SAME')
conv2 = tf.nn.conv2d(pool1,
conv2_weights,
strides = [1, 1, 1, 1],
padding = 'SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
pool2 = tf.nn.max_pool(relu2,
ksize = [1, 2, 2, 1],
strides = [1, 2, 2, 1],
padding = 'SAME')
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool2.get_shape().as_list()
reshape = tf.reshape(pool2,
[pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
fc1 = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# Add a 50% dropout during training only. Dropout also scales
# activations such that no rescaling is needed at evaluation time.
if train:
fc1 = tf.nn.dropout(fc1, 0.5, seed = SEED)
return tf.matmul(fc1, fc2_weights) + fc2_biases
# Training computation: logits + cross-entropy loss.
logits = lenet2(train_data_node, True)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, train_labels_node))
# L2 regularization for the fully connected parameters.
regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
# Add the regularization term to the loss.
loss += 5e-4 * regularizers
# Optimizer: set up a variable that's incremented once per batch and
# controls the learning rate decay.
batch = tf.Variable(0)
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
train_size, # Decay step.
0.95, # Decay rate.
staircase=True)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate,
0.9).minimize(loss,
global_step = batch)
# Predictions for the current training minibatch.
train_prediction = tf.nn.softmax(logits)
# Predictions for the test and validation, which we'll compute less often.
eval_prediction = tf.nn.softmax(lenet2(eval_data))
# Small utility function to evaluate a dataset by feeding batches of data to
# {eval_data} and pulling the results from {eval_predictions}.
# Saves memory and enables this to run on smaller GPUs.
def eval_in_batches(data, sess):
"""Get all predictions for a dataset by running it in small batches."""
size = data.shape[0]
if size < EVAL_BATCH_SIZE:
raise ValueError("batch size for evals larger than dataset: %d" % size)
predictions = np.ndarray(shape = (size, num_labels), dtype = np.float32)
for begin in xrange(0, size, EVAL_BATCH_SIZE):
end = begin + EVAL_BATCH_SIZE
if end <= size:
predictions[begin:end, :] = sess.run(
eval_prediction,
feed_dict={eval_data: data[begin:end, ...]})
else:
batch_predictions = sess.run(
eval_prediction,
feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]})
predictions[begin:, :] = batch_predictions[begin - size:, :]
return predictions
# Create a local session to run the training.
start_time = time.time()
model_dir = os.path.join(module_dir, os.path.pardir, os.path.pardir, 'models')
with tf.Session() as sess:
# Run all the initializers to prepare the trainable parameters.
tf.initialize_all_variables().run()
# Import base model weights
saver = tf.train.Saver([conv1_weights, conv1_biases, conv2_weights, conv2_biases, fc1_weights, fc1_biases])
ckpt = tf.train.get_checkpoint_state(os.path.join(model_dir, 'base'))
if ckpt and ckpt.model_checkpoint_path:
logger.info("Continue training from the model {}".format(ckpt.model_checkpoint_path))
saver.restore(sess, ckpt.model_checkpoint_path)
# for var in tf.trainable_variables():
# logger.info(var.eval())
logger.info('Initialized!')
# Loop through training steps.
for step in xrange(int(num_epochs * train_size) // BATCH_SIZE):
# Compute the offset of the current minibatch in the data.
# Note that we could use better randomization across epochs.
offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), ...]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
# This dictionary maps the batch data (as a numpy array) to the
# node in the graph it should be fed to.
feed_dict = {train_data_node: batch_data,
train_labels_node: batch_labels}
# Run the graph and fetch some of the nodes.
_, l, lr, predictions = sess.run(
[optimizer, loss, learning_rate, train_prediction],
feed_dict=feed_dict)
if step % EVAL_FREQUENCY == 0:
elapsed_time = time.time() - start_time
start_time = time.time()
logger.info('Step %d (epoch %.2f), %.1f ms' %
(step, float(step) * BATCH_SIZE / train_size,
1000 * elapsed_time / EVAL_FREQUENCY))
logger.info('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
logger.info('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels))
logger.info('Validation error: %.1f%%' % error_rate(
eval_in_batches(validation_data, sess), validation_labels))
sys.stdout.flush()
# Finally print the result!
test_precision = precision(eval_in_batches(test_data, sess), test_labels)
logger.info('Test precision: %.1f%%' % test_precision)
# Model persistence
saver = tf.train.Saver([conv1_weights, conv1_biases, conv2_weights, conv2_biases, fc1_weights, fc1_biases, fc2_weights, fc2_biases])
model_path = os.path.join(model_dir, "finetuned", "lenet_finetuned.ckpt")
save_path = saver.save(sess, model_path)
logger.info("Model saved in file: %s" % save_path)
if __name__ == '__main__':
tf.app.run()
|
|
import pydotplus
import os
import math
import numpy as np
from collections import OrderedDict
from itertools import islice
from . import util, diagnostics
from .distributions import Empirical
class Node():
def __init__(self, address_id, variable, weight):
self.address_id = address_id
self.variable = variable
self.weight = weight
self.outgoing_edges = []
if variable is None:
self.color = '#ffffff'
else:
if variable.control:
if variable.replace:
self.color = '#adff2f'
else:
self.color = '#fa8072'
elif variable.observable:
self.color = '#1effff'
if variable.observed:
self.color = '#1e90ff'
elif variable.tagged:
self.color = '#cccccc'
else:
self.color = '#ffd700'
def add_outgoing_edge(self, node, weight):
edge = Edge(self, node, weight)
self.outgoing_edges.append(edge)
return edge
def __repr__(self):
return 'Node(address_id:{}, weight:{}, outgoing_edges:{})'.format(self.address_id, self.weight, [str(edge) for edge in self.outgoing_edges])
class Edge():
def __init__(self, node_0, node_1, weight):
self.node_0 = node_0
self.node_1 = node_1
self.weight = weight
def __repr__(self):
return 'Edge(node_0: {}, node_1:{}, weight:{})'.format(self.node_0.address_id, self.node_1.address_id, self.weight)
class Graph():
def __init__(self, trace_dist, base_graph=None, use_address_base=True, n_most_frequent=None, normalize_weights=True):
self.nodes = []
self.edges = []
if base_graph is None:
self.address_stats = None
self.trace_stats = None
self.use_address_base = use_address_base
else:
self.address_stats = base_graph.address_stats
self.trace_stats = base_graph.trace_stats
self.use_address_base = base_graph.use_address_base
# self.address_stats = diagnostics._address_stats(trace_dist, use_address_base=self.use_address_base)
self.trace_stats = diagnostics._trace_stats(trace_dist, use_address_base=self.use_address_base, reuse_ids_from_address_stats=self.address_stats, reuse_ids_from_trace_stats=self.trace_stats)
self.address_stats = self.trace_stats['address_stats']
address_id_to_variable = self.address_stats['address_id_to_variable']
traces = self.trace_stats['traces']
traces = OrderedDict(sorted(dict(traces).items(), key=lambda x: x[1]['count'], reverse=True))
if n_most_frequent is not None:
# n_most_frequent = len(self.trace_stats)
traces = dict(islice(traces.items(), n_most_frequent))
nodes = {}
edges = {}
for key, value in traces.items():
weight = value['weight']
address_id_sequence = value['address_id_sequence']
for address_id in address_id_sequence:
if address_id in nodes:
nodes[address_id] += weight
else:
nodes[address_id] = weight
for left, right in zip(address_id_sequence, address_id_sequence[1:]):
if (left, right) in edges:
edges[(left, right)] += weight
else:
edges[(left, right)] = weight
for edge, weight in edges.items():
address_id_0 = edge[0]
node_0 = self.get_node(address_id_0)
if node_0 is None:
if address_id_0 in address_id_to_variable:
variable_0 = address_id_to_variable[address_id_0]
else:
variable_0 = None
node_0 = Node(address_id_0, variable_0, nodes[address_id_0])
self.add_node(node_0)
address_id_1 = edge[1]
node_1 = self.get_node(address_id_1)
if node_1 is None:
if address_id_1 in address_id_to_variable:
variable_1 = address_id_to_variable[address_id_1]
else:
variable_1 = None
node_1 = Node(address_id_1, variable_1, nodes[address_id_1])
self.add_node(node_1)
self.add_edge(node_0.add_outgoing_edge(node_1, weight))
if normalize_weights:
self.normalize_weights()
def add_node(self, node):
self.nodes.append(node)
def get_node(self, address_id):
return next((node for node in self.nodes if node.address_id == address_id), None)
def add_edge(self, edge):
self.edges.append(edge)
def normalize_weights(self):
node_weight_total = 0
for node in self.nodes:
node_weight_total += node.weight
edge_weight_total = 0
for edge in node.outgoing_edges:
edge_weight_total += edge.weight
if edge_weight_total > 0:
for edge in node.outgoing_edges:
edge.weight /= edge_weight_total
for node in self.nodes:
node.weight /= node_weight_total
def trace_graphs(self):
traces = self.trace_stats['traces']
for key, val in traces.items():
trace = val['trace']
trace_id = val['trace_id']
yield trace_id, Graph(Empirical([trace]), base_graph=self, use_address_base=self.use_address_base)
def render_to_graphviz(self, background_graph=None):
if background_graph is None:
graph = pydotplus.graphviz.Dot(graph_type='digraph', rankdir='LR')
else:
graph = pydotplus.graphviz.graph_from_dot_data(background_graph.render_to_graphviz())
for node in graph.get_nodes():
node.set_color('#cccccc')
node.set_fontcolor('#cccccc')
for edge in graph.get_edges():
edge.set_color('#cccccc')
edge.set_fontcolor('#cccccc')
edge.set_label(' ')
for edge in self.edges:
node_0 = edge.node_0
nodes = graph.get_node(node_0.address_id)
if len(nodes) > 0:
graph_node_0 = nodes[0]
else:
graph_node_0 = pydotplus.Node(node_0.address_id)
graph.add_node(graph_node_0)
graph_node_0.set_style('filled')
graph_node_0.set_fillcolor(node_0.color)
graph_node_0.set_color('black')
graph_node_0.set_fontcolor('black')
color_factor = 0.75 * (math.exp(1. - node_0.weight) - 1.) / (math.e - 1.)
graph_node_0.set_penwidth(max(0.1, 4 * (1 - color_factor)))
node_1 = edge.node_1
nodes = graph.get_node(node_1.address_id)
if len(nodes) > 0:
graph_node_1 = nodes[0]
else:
graph_node_1 = pydotplus.Node(node_1.address_id)
graph.add_node(graph_node_1)
graph_node_1.set_style('filled')
graph_node_1.set_fillcolor(node_1.color)
graph_node_1.set_color('black')
graph_node_1.set_fontcolor('black')
color_factor = 0.75 * (math.exp(1. - node_1.weight) - 1.) / (math.e - 1.)
graph_node_1.set_penwidth(max(0.25, 5 * (1 - color_factor)))
edges = graph.get_edge(node_0.address_id, node_1.address_id)
if len(edges) > 0:
graph_edge = edges[0]
else:
graph_edge = pydotplus.Edge(graph_node_0, graph_node_1, weight=max(edge.weight, 1e-3)) # pydotplus fails with extremely small weights
graph.add_edge(graph_edge)
# if background_graph is None:
graph_edge.set_label('\"{:,.3f}\"'.format(edge.weight))
color_factor = 0.75 * (math.exp(1. - edge.weight) - 1.) / (math.e - 1.)
graph_edge.set_color(util.rgb_to_hex((color_factor, color_factor, color_factor)))
graph_edge.set_fontcolor('black')
# else:
# graph_edge.set_color('black')
# graph_edge.set_fontcolor('black')
return graph.to_string()
def render_to_file(self, file_name, background_graph=None):
graph = self.render_to_graphviz(background_graph)
file_name_dot = file_name + '.dot'
with open(file_name_dot, 'w') as file:
file.write(graph)
file_name_pdf = file_name + '.pdf'
status = os.system('dot -Tpdf {} -o {}'.format(file_name_dot, file_name_pdf))
if status != 0:
print('Cannot not render to file {}. Check that GraphViz is installed.'.format(file_name_pdf))
def sample_execution(self):
node = self.get_node('START')
seq = [node]
while node.address_id != 'END':
weights = [edge.weight for edge in node.outgoing_edges]
edge = np.random.choice(node.outgoing_edges, 1, p=weights)[0]
node = edge.node_1
seq.append(node)
return seq
|
|
import time as timer
import cvxopt as co
import numpy as np
import pylab as pl
import sklearn.metrics as metric
import matplotlib.pyplot as plt
import scipy.io as io
from kernel import Kernel
from ocsvm import OCSVM
from latent_ocsvm import LatentOCSVM
from toydata import ToyData
from so_hmm import SOHMM
def get_model(num_exm, num_train, lens, block_len, blocks=1, anomaly_prob=0.15):
print('Generating {0} sequences, {1} for training, each with {2} anomaly probability.'.format(num_exm, num_train, anomaly_prob))
cnt = 0
X = []
Y = []
label = []
lblcnt = co.matrix(0.0,(1,lens))
for i in range(num_exm):
(exm, lbl, marker) = ToyData.get_2state_anom_seq(lens, block_len, anom_prob=anomaly_prob, num_blocks=blocks)
cnt += lens
X.append(exm)
Y.append(lbl)
label.append(marker)
# some lbl statistics
if i<num_train:
lblcnt += lbl
X = normalize_sequence_data(X)
return (SOHMM(X[0:num_train],Y[0:num_train]), SOHMM(X[num_train:],Y[num_train:]), SOHMM(X,Y), label)
def normalize_sequence_data(X, dims=1):
cnt = 0
tst_mean = co.matrix(0.0, (1, dims))
for i in range(len(X)):
lens = len(X[i][0,:])
cnt += lens
tst_mean += co.matrix(1.0, (1, lens))*X[i].trans()
tst_mean /= float(cnt)
print tst_mean
max_val = co.matrix(-1e10, (1, dims))
for i in range(len(X)):
for d in range(dims):
X[i][d,:] = X[i][d,:]-tst_mean[d]
foo = np.max(np.abs(X[i][d,:]))
max_val[d] = np.max([max_val[d], foo])
print max_val
for i in range(len(X)):
for d in range(dims):
X[i][d,:] /= max_val[d]
cnt = 0
max_val = co.matrix(-1e10, (1, dims))
tst_mean = co.matrix(0.0, (1, dims))
for i in range(len(X)):
lens = len(X[i][0,:])
cnt += lens
tst_mean += co.matrix(1.0, (1, lens))*X[i].trans()
for d in range(dims):
foo = np.max(np.abs(X[i][d,:]))
max_val[d] = np.max([max_val[d], foo])
print tst_mean/float(cnt)
print max_val
return X
def build_histograms(data, phi, num_train, bins=2, ord=2):
# first num_train phis are used for estimating
# histogram boundaries.
N = len(data)
(F, LEN) = data[0].size
max_phi = np.max(phi[:,:num_train])
min_phi = np.min(phi[:,:num_train])
print("Build histograms with {0} bins.".format(bins))
print (max_phi, min_phi)
thres = np.linspace(min_phi, max_phi+1e-8, bins+1)
print (max_phi, min_phi)
hist = co.matrix(0.0, (F*bins, 1))
phi_hist = co.matrix(0.0, (F*bins, N))
for i in xrange(N):
for f in xrange(F):
phi_hist[0 + f*bins,i] = np.where(np.array(data[i][f,:])<thres[0])[0].size
for b in range(1,bins-1):
cnt = np.where((np.array(data[i][f,:])>=thres[b]) & (np.array(data[i][f,:])<thres[b+1]))[0].size
phi_hist[b + f*bins,i] = float(cnt)
phi_hist[bins-1 + f*bins,i] = np.where(np.array(data[i][f,:])>=thres[bins-1])[0].size
phi_hist[:,i] /= np.linalg.norm(phi_hist[:,i], ord=ord)
hist += phi_hist[:,i]/float(N)
print('Histogram:')
print hist.trans()
kern = Kernel.get_kernel(phi_hist, phi_hist)
return kern, phi_hist
def build_seq_kernel(data, ord=2, type='linear', param=1.0):
# all sequences have the same length
N = len(data)
(F, LEN) = data[0].size
phi = co.matrix(0.0, (F*LEN, N))
for i in xrange(N):
for f in xrange(F):
phi[(f*LEN):(f*LEN)+LEN,i] = data[i][f,:].trans()
if ord>=1:
phi[:,i] /= np.linalg.norm(phi[:,i], ord=ord)
kern = Kernel.get_kernel(phi, phi, type=type, param=param)
return kern, phi
def build_kernel(data, num_train, bins=2, ord=2, typ='linear', param=1.0):
if typ=='hist':
foo, phi = build_seq_kernel(data, ord=-1)
return build_histograms(data, phi, num_train, bins=param, ord=ord)
elif typ=='':
return -1,-1
else:
return build_seq_kernel(data, ord=ord, type=typ.lower(), param=param)
def test_bayes(phi, kern, train, test, num_train, anom_prob, labels):
startTime = timer.time()
# bayes classifier
(DIMS, N) = phi.size
w_bayes = co.matrix(-1.0, (DIMS, 1))
#pred = w_bayes.trans()*phi[:,num_train:]
#(fpr, tpr, thres) = metric.roc_curve(labels[num_train:], pred.trans())
return timer.time() - startTime
def test_ocsvm(phi, kern, train, test, num_train, anom_prob, labels):
startTime = timer.time()
ocsvm = OCSVM(kern[:num_train,:num_train], C=1.0/(num_train*anom_prob))
msg = ocsvm.train_dual()
return timer.time() - startTime
def test_hmad(phi, kern, train, test, num_train, anom_prob, labels, zero_shot=False):
startTime = timer.time()
# train structured anomaly detection
sad = StructuredOCSVM(train, C=1.0/(num_train*anom_prob))
(lsol, lats, thres) = sad.train_dc(max_iter=60, zero_shot=zero_shot)
return timer.time() - startTime
if __name__ == '__main__':
LENS = 600
EXMS = 1100
EXMS_TRAIN = 400
ANOM_PROB = 0.1
REPS = 50
BLOCK_LEN = 120
BLOCKS = [100,200,400,600,800,1000]
#BLOCKS = [400]
methods = ['Bayes' ,'HMAD','OcSvm','OcSvm','OcSvm','OcSvm','OcSvm','OcSvm','OcSvm','OcSvm']
kernels = ['Linear','' ,'RBF' ,'RBF' ,'RBF' ,'Hist' ,'Hist' ,'Hist' ,'Linear','Linear']
kparams = ['' ,'' , 0.1 , 1.0 , 10.0 , 4 , 8 , 10 , '' , '']
ords = [+1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 2]
#methods = ['OcSvm','OcSvm','OcSvm']
#kernels = ['RBF' ,'RBF' ,'RBF' ]
#kparams = [ 10.1 , 1000.0 , 0.1 ]
#ords = [ 1 , 1 , 1 ]
#methods = ['Bayes','Bayes']
#kernels = ['Linear' ,'Linear' ]
#kparams = [ 1 , 1]
#ords = [ 1 , 2]
# collected means
res = []
for r in xrange(REPS):
for b in xrange(len(BLOCKS)):
(train, test, comb, labels) = get_model(BLOCKS[b]+1, BLOCKS[b], LENS, BLOCK_LEN, blocks=1, anomaly_prob=ANOM_PROB)
for m in range(len(methods)):
name = 'test_{0}'.format(methods[m].lower())
(kern, phi) = build_kernel(comb.X, BLOCKS[b], ord=ords[m], typ=kernels[m].lower(), param=kparams[m])
print('Calling {0}'.format(name))
time = eval(name)(phi, kern, train, test, BLOCKS[b], ANOM_PROB, labels)
print('-------------------------------------------------------------------------------')
print
print('Iter {0}/{1} in block {2}/{3} for method {4} ({5}/{6}) got TIME = {7}.'.format(r+1,REPS,b+1,len(BLOCKS),name,m+1,len(methods),time))
print
print('-------------------------------------------------------------------------------')
if len(res)<=b:
res.append([])
mlist = res[b]
if len(mlist)<=m:
mlist.append([])
cur = mlist[m]
cur.append(time)
print('RESULTS >-----------------------------------------')
print
times = np.ones((len(methods),len(BLOCKS)))
stds = np.ones((len(methods),len(BLOCKS)))
varis = np.ones((len(methods),len(BLOCKS)))
names = []
for b in range(len(BLOCKS)):
print("BLOCKS={0}:".format(BLOCKS[b]))
for m in range(len(methods)):
time = np.mean(res[b][m])
std = np.std(res[b][m])
var = np.var(res[b][m])
times[m,b] = time
stds[m,b] = std
varis[m,b] = var
kname = ''
if kernels[m]=='RBF' or kernels[m]=='Hist':
kname = ' ({0} {1})'.format(kernels[m],kparams[m])
elif kernels[m]=='Linear':
kname = ' ({0})'.format(kernels[m])
name = '{0}{1} [{2}]'.format(methods[m],kname,ords[m])
if len(names)<=m:
names.append(name)
print(" m={0}: Time={1} STD={2} VAR={3}".format(name,time,std,var))
print
print times
# store result as a file
data = {}
data['LENS'] = LENS
data['EXMS'] = EXMS
data['EXMS_TRAIN'] = EXMS_TRAIN
data['ANOM_PROB'] = ANOM_PROB
data['REPS'] = REPS
data['BLOCKS'] = BLOCKS
data['methods'] = methods
data['kernels'] = kernels
data['kparams'] = kparams
data['ords'] = ords
data['res'] = res
data['times'] = times
data['stds'] = stds
data['varis'] = varis
data['names'] = names
io.savemat('15_icml_toy_runtime_b0.mat',data)
print('finished')
|
|
# encoding: utf-8
"""
Tests basic things that generator3 consists of.
NOTE: does not work in Jython 2.2 or IronPython 1.x, because pyparsing does not.
"""
import unittest
from generator3 import *
M = ModuleRedeclarator
import sys
IS_CLI = sys.platform == 'cli'
VERSION = sys.version_info[:2] # only (major, minor)
class TestRestoreFuncByDocComment(unittest.TestCase):
"""
Tries to restore function signatures by doc strings.
"""
def setUp(self):
self.m = ModuleRedeclarator(None, None, '/dev/null')
def testTrivial(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(a, b, c) ololo", "f", "f", None)
self.assertEquals(result, "f(a, b, c)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testTrivialNested(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(a, (b, c), d) ololo", "f", "f", None)
self.assertEquals(result, "f(a, (b, c), d)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testWithDefault(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(a, b, c=1) ololo", "f", "f", None)
self.assertEquals(result, "f(a, b, c=1)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testNestedWithDefault(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(a, (b1, b2), c=1) ololo", "f", "f", None)
self.assertEquals(result, "f(a, (b1, b2), c=1)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testAbstractDefault(self):
# like new(S, ...)
result, ret_sig, note = self.m.parseFuncDoc('blah f(a, b=obscuredefault) ololo', "f", "f", None)
self.assertEquals(result, "f(a, b=None)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testWithReserved(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(class, object, def) ololo", "f", "f", None)
self.assertEquals(result, "f(p_class, p_object, p_def)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testWithReservedOpt(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(foo, bar[, def]) ololo", "f", "f", None)
self.assertEquals(result, "f(foo, bar, p_def=None)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testPseudoNested(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(a, (b1, b2, ...)) ololo", "f", "f", None)
self.assertEquals(result, "f(a, b_tuple)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testImportLike(self):
# __import__
result, ret_sig, note = self.m.parseFuncDoc("blah f(name, globals={}, locals={}, fromlist=[], level=-1) ololo",
"f", "f", None)
self.assertEquals(result, "f(name, globals={}, locals={}, fromlist=[], level=-1)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testOptionalBracket(self):
# reduce
result, ret_sig, note = self.m.parseFuncDoc("blah f(function, sequence[, initial]) ololo", "f", "f", None)
self.assertEquals(result, "f(function, sequence, initial=None)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testWithMore(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(foo [, bar1, bar2, ...]) ololo", "f", "f", None)
self.assertEquals(result, "f(foo, *bar)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testNestedOptionals(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(foo [, bar1 [, bar2]]) ololo", "f", "f", None)
self.assertEquals(result, "f(foo, bar1=None, bar2=None)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testInnerTuple(self):
result, ret_sig, note = self.m.parseFuncDoc("blah load_module(name, file, filename, (suffix, mode, type)) ololo"
, "load_module", "load_module", None)
self.assertEquals(result, "load_module(name, file, filename, (suffix, mode, type))")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testIncorrectInnerTuple(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(a, (b=1, c=2)) ololo", "f", "f", None)
self.assertEquals(result, "f(a, p_b)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testNestedOnly(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f((foo, bar, baz)) ololo", "f", "f", None)
self.assertEquals(result, "f((foo, bar, baz))")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testTwoPseudoNested(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f((a1, a2, ...), (b1, b2,..)) ololo", "f", "f", None)
self.assertEquals(result, "f(a_tuple, b_tuple)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testTwoPseudoNestedWithLead(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(x, (a1, a2, ...), (b1, b2,..)) ololo", "f", "f", None)
self.assertEquals(result, "f(x, a_tuple, b_tuple)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testPseudoNestedRange(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f((a1, ..., an), b) ololo", "f", "f", None)
self.assertEquals(result, "f(a_tuple, b)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testIncorrectList(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(x, y, 3, $) ololo", "f", "f", None)
self.assertEquals(result, "f(x, y, *args, **kwargs)")
self.assertEquals(note, M.SIG_DOC_UNRELIABLY)
def testIncorrectStarredList(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(x, *y, 3, $) ololo", "f", "f", None)
self.assertEquals(result, "f(x, *y, **kwargs)")
self.assertEquals(note, M.SIG_DOC_UNRELIABLY)
def testClashingNames(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(x, y, (x, y), z) ololo", "f", "f", None)
self.assertEquals(result, "f(x, y, (x_1, y_1), z)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testQuotedParam(self):
# like __delattr__
result, ret_sig, note = self.m.parseFuncDoc("blah getattr('name') ololo", "getattr", "getattr", None)
self.assertEquals(result, "getattr(name)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testQuotedParam2(self):
# like __delattr__, too
result, ret_sig, note = self.m.parseFuncDoc('blah getattr("name") ololo', "getattr", "getattr", None)
self.assertEquals(result, "getattr(name)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testOptionalTripleDot(self):
# like new(S, ...)
result, ret_sig, note = self.m.parseFuncDoc('blah f(foo, ...) ololo', "f", "f", None)
self.assertEquals(result, "f(foo, *more)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testUnderscoredName(self):
# like new(S, ...)
result, ret_sig, note = self.m.parseFuncDoc('blah f(foo_one, _bar_two) ololo', "f", "f", None)
self.assertEquals(result, "f(foo_one, _bar_two)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testDashedName(self):
# like new(S, ...)
result, ret_sig, note = self.m.parseFuncDoc('blah f(something-else, for-a-change) ololo', "f", "f", None)
self.assertEquals(result, "f(something_else, for_a_change)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testSpacedDefault(self):
# like new(S, ...)
result, ret_sig, note = self.m.parseFuncDoc('blah f(a, b = 1) ololo', "f", "f", None)
self.assertEquals(result, "f(a, b=1)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testSpacedName(self):
# like new(S, ...)
result, ret_sig, note = self.m.parseFuncDoc('blah femme(skirt or pants) ololo', "femme", "femme", None)
self.assertEquals(result, "femme(skirt_or_pants)")
self.assertEquals(note, M.SIG_DOC_NOTE)
class TestRestoreMethodByDocComment(unittest.TestCase):
"""
Restoring with a class name set
"""
def setUp(self):
self.m = ModuleRedeclarator(None, None, '/dev/null')
def testPlainMethod(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(self, foo, bar) ololo", "f", "f", "SomeClass")
self.assertEquals(result, "f(self, foo, bar)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testInsertSelf(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(foo, bar) ololo", "f", "f", "SomeClass")
self.assertEquals(result, "f(self, foo, bar)")
self.assertEquals(note, M.SIG_DOC_NOTE)
class TestAnnotatedParameters(unittest.TestCase):
"""
f(foo: int) and friends; in doc comments, happen in 2.x world, too.
"""
def setUp(self):
self.m = ModuleRedeclarator(None, None, '/dev/null')
def testMixed(self):
result, ret_sig, note = self.m.parseFuncDoc('blah f(i: int, foo) ololo', "f", "f", None)
self.assertEquals(result, "f(i, foo)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testNested(self):
result, ret_sig, note = self.m.parseFuncDoc('blah f(i: int, (foo: bar, boo: Decimal)) ololo', "f", "f", None)
self.assertEquals(result, "f(i, (foo, boo))")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testSpaced(self):
result, ret_sig, note = self.m.parseFuncDoc('blah f(i: int, j :int, k : int) ololo', "f", "f", None)
self.assertEquals(result, "f(i, j, k)")
self.assertEquals(note, M.SIG_DOC_NOTE)
if not IS_CLI and VERSION < (3, 0):
class TestInspect(unittest.TestCase):
"""
See that inspect actually works if needed
"""
def setUp(self):
self.m = ModuleRedeclarator(None, None, '/dev/null')
def testSimple(self):
def target(a, b, c=1, *d, **e):
return a, b, c, d, e
result = self.m.restoreByInspect(target)
self.assertEquals(result, "(a, b, c=1, *d, **e)")
def testNested(self):
# NOTE: Py3k can't handle nested tuple args, thus we compile it conditionally
code = (
"def target(a, (b, c), d, e=1):\n"
" return a, b, c, d, e"
)
namespace = {}
eval(compile(code, "__main__", "single"), namespace)
target = namespace['target']
result = self.m.restoreByInspect(target)
self.assertEquals(result, "(a, (b, c), d, e=1)")
class _DiffPrintingTestCase(unittest.TestCase):
def assertEquals(self, etalon, specimen, msg=None):
if type(etalon) == str and type(specimen) == str and etalon != specimen:
print("%s" % "\n")
# print side by side
ei = iter(etalon.split("\n"))
si = iter(specimen.split("\n"))
if VERSION < (3, 0):
si_next = si.next
else:
si_next = si.__next__
for el in ei:
try: sl = si_next()
except StopIteration: break # I wish the exception would just work as break
if el != sl:
print("!%s" % el)
print("?%s" % sl)
else:
print(">%s" % sl)
# one of the iters might not end yet
for el in ei:
print("!%s" % el)
for sl in si:
print("?%s" % sl)
raise self.failureException(msg)
else:
self.failUnlessEqual(etalon, specimen, msg)
class TestSpecialCases(unittest.TestCase):
"""
Tests cases where predefined overrides kick in
"""
def setUp(self):
import sys
if VERSION >= (3, 0):
import builtins as the_builtins
self.builtins_name = the_builtins.__name__
else:
import __builtin__ as the_builtins
self.builtins_name = the_builtins.__name__
self.m = ModuleRedeclarator(the_builtins, None, '/dev/null', doing_builtins=True)
def _testBuiltinFuncName(self, func_name, expected):
class_name = None
self.assertTrue(self.m.isPredefinedBuiltin(self.builtins_name, class_name, func_name))
result, note = self.m.restorePredefinedBuiltin(class_name, func_name)
self.assertEquals(result, func_name + expected)
self.assertEquals(note, "known special case of " + func_name)
def testZip(self):
self._testBuiltinFuncName("zip", "(seq1, seq2, *more_seqs)")
def testRange(self):
self._testBuiltinFuncName("range", "(start=None, stop=None, step=None)")
def testFilter(self):
self._testBuiltinFuncName("filter", "(function_or_none, sequence)")
# we caould want to test a calss without __dict__, but it takes a C extension to really create one,
class TestDataOutput(_DiffPrintingTestCase):
"""
Tests for sanity of output of data members
"""
def setUp(self):
self.m = ModuleRedeclarator(self, None, 4) # Pass anything with __dict__ as module
def checkFmtValue(self, data, expected):
buf = Buf(self.m)
self.m.fmtValue(buf.out, data, 0)
result = "".join(buf.data).strip()
self.assertEquals(expected, result)
def testRecursiveDict(self):
data = {'a': 1}
data['b'] = data
expected = "\n".join((
"{",
" 'a': 1,",
" 'b': '<value is a self-reference, replaced by this string>',",
"}"
))
self.checkFmtValue(data, expected)
def testRecursiveList(self):
data = [1]
data.append(data)
data.append(2)
data.append([10, data, 20])
expected = "\n".join((
"[",
" 1,",
" '<value is a self-reference, replaced by this string>',",
" 2,",
" [",
" 10,",
" '<value is a self-reference, replaced by this string>',",
" 20,",
" ],",
"]"
))
self.checkFmtValue(data, expected)
if not IS_CLI:
class TestReturnTypes(unittest.TestCase):
"""
Tests for sanity of output of data members
"""
def setUp(self):
self.m = ModuleRedeclarator(None, None, 4)
def checkRestoreFunction(self, doc, expected):
spec, ret_literal, note = self.m.parseFuncDoc(doc, "foo", "foo", None)
self.assertEqual(expected, ret_literal, "%r != %r; spec=%r, note=%r" % (expected, ret_literal, spec, note))
pass
def testSimpleArrowInt(self):
doc = "This is foo(bar) -> int"
self.checkRestoreFunction(doc, "0")
def testSimpleArrowList(self):
doc = "This is foo(bar) -> list"
self.checkRestoreFunction(doc, "[]")
def testArrowListOf(self):
doc = "This is foo(bar) -> list of int"
self.checkRestoreFunction(doc, "[]")
# def testArrowTupleOf(self):
# doc = "This is foo(bar) -> (a, b,..)"
# self.checkRestoreFunction(doc, "()")
def testSimplePrefixInt(self):
doc = "This is int foo(bar)"
self.checkRestoreFunction(doc, "0")
def testSimplePrefixObject(self):
doc = "Makes an instance: object foo(bar)"
self.checkRestoreFunction(doc, "object()")
if VERSION < (3, 0):
# TODO: we only support it in 2.x; must update when we do it in 3.x, too
def testSimpleArrowFile(self):
doc = "Opens a file: foo(bar) -> file"
self.checkRestoreFunction(doc, "file('/dev/null')")
def testUnrelatedPrefix(self):
doc = """
Consumes a list of int
foo(bar)
"""
self.checkRestoreFunction(doc, None)
###
if __name__ == '__main__':
unittest.main()
|
|
import datetime
import json
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange, Range
from django.contrib.postgres import forms, lookups
from django.db import models
from django.db.models.lookups import PostgresOperatorLookup
from .utils import AttributeSetter
__all__ = [
'RangeField', 'IntegerRangeField', 'BigIntegerRangeField',
'DecimalRangeField', 'DateTimeRangeField', 'DateRangeField',
'RangeBoundary', 'RangeOperators',
]
class RangeBoundary(models.Expression):
"""A class that represents range boundaries."""
def __init__(self, inclusive_lower=True, inclusive_upper=False):
self.lower = '[' if inclusive_lower else '('
self.upper = ']' if inclusive_upper else ')'
def as_sql(self, compiler, connection):
return "'%s%s'" % (self.lower, self.upper), []
class RangeOperators:
# https://www.postgresql.org/docs/current/functions-range.html#RANGE-OPERATORS-TABLE
EQUAL = '='
NOT_EQUAL = '<>'
CONTAINS = '@>'
CONTAINED_BY = '<@'
OVERLAPS = '&&'
FULLY_LT = '<<'
FULLY_GT = '>>'
NOT_LT = '&>'
NOT_GT = '&<'
ADJACENT_TO = '-|-'
class RangeField(models.Field):
empty_strings_allowed = False
def __init__(self, *args, **kwargs):
# Initializing base_field here ensures that its model matches the model for self.
if hasattr(self, 'base_field'):
self.base_field = self.base_field()
super().__init__(*args, **kwargs)
@property
def model(self):
try:
return self.__dict__['model']
except KeyError:
raise AttributeError("'%s' object has no attribute 'model'" % self.__class__.__name__)
@model.setter
def model(self, model):
self.__dict__['model'] = model
self.base_field.model = model
@classmethod
def _choices_is_value(cls, value):
return isinstance(value, (list, tuple)) or super()._choices_is_value(value)
def get_prep_value(self, value):
if value is None:
return None
elif isinstance(value, Range):
return value
elif isinstance(value, (list, tuple)):
return self.range_type(value[0], value[1])
return value
def to_python(self, value):
if isinstance(value, str):
# Assume we're deserializing
vals = json.loads(value)
for end in ('lower', 'upper'):
if end in vals:
vals[end] = self.base_field.to_python(vals[end])
value = self.range_type(**vals)
elif isinstance(value, (list, tuple)):
value = self.range_type(value[0], value[1])
return value
def set_attributes_from_name(self, name):
super().set_attributes_from_name(name)
self.base_field.set_attributes_from_name(name)
def value_to_string(self, obj):
value = self.value_from_object(obj)
if value is None:
return None
if value.isempty:
return json.dumps({"empty": True})
base_field = self.base_field
result = {"bounds": value._bounds}
for end in ('lower', 'upper'):
val = getattr(value, end)
if val is None:
result[end] = None
else:
obj = AttributeSetter(base_field.attname, val)
result[end] = base_field.value_to_string(obj)
return json.dumps(result)
def formfield(self, **kwargs):
kwargs.setdefault('form_class', self.form_field)
return super().formfield(**kwargs)
class IntegerRangeField(RangeField):
base_field = models.IntegerField
range_type = NumericRange
form_field = forms.IntegerRangeField
def db_type(self, connection):
return 'int4range'
class BigIntegerRangeField(RangeField):
base_field = models.BigIntegerField
range_type = NumericRange
form_field = forms.IntegerRangeField
def db_type(self, connection):
return 'int8range'
class DecimalRangeField(RangeField):
base_field = models.DecimalField
range_type = NumericRange
form_field = forms.DecimalRangeField
def db_type(self, connection):
return 'numrange'
class DateTimeRangeField(RangeField):
base_field = models.DateTimeField
range_type = DateTimeTZRange
form_field = forms.DateTimeRangeField
def db_type(self, connection):
return 'tstzrange'
class DateRangeField(RangeField):
base_field = models.DateField
range_type = DateRange
form_field = forms.DateRangeField
def db_type(self, connection):
return 'daterange'
RangeField.register_lookup(lookups.DataContains)
RangeField.register_lookup(lookups.ContainedBy)
RangeField.register_lookup(lookups.Overlap)
class DateTimeRangeContains(PostgresOperatorLookup):
"""
Lookup for Date/DateTimeRange containment to cast the rhs to the correct
type.
"""
lookup_name = 'contains'
postgres_operator = RangeOperators.CONTAINS
def process_rhs(self, compiler, connection):
# Transform rhs value for db lookup.
if isinstance(self.rhs, datetime.date):
output_field = models.DateTimeField() if isinstance(self.rhs, datetime.datetime) else models.DateField()
value = models.Value(self.rhs, output_field=output_field)
self.rhs = value.resolve_expression(compiler.query)
return super().process_rhs(compiler, connection)
def as_postgresql(self, compiler, connection):
sql, params = super().as_postgresql(compiler, connection)
# Cast the rhs if needed.
cast_sql = ''
if (
isinstance(self.rhs, models.Expression) and
self.rhs._output_field_or_none and
# Skip cast if rhs has a matching range type.
not isinstance(self.rhs._output_field_or_none, self.lhs.output_field.__class__)
):
cast_internal_type = self.lhs.output_field.base_field.get_internal_type()
cast_sql = '::{}'.format(connection.data_types.get(cast_internal_type))
return '%s%s' % (sql, cast_sql), params
DateRangeField.register_lookup(DateTimeRangeContains)
DateTimeRangeField.register_lookup(DateTimeRangeContains)
class RangeContainedBy(PostgresOperatorLookup):
lookup_name = 'contained_by'
type_mapping = {
'smallint': 'int4range',
'integer': 'int4range',
'bigint': 'int8range',
'double precision': 'numrange',
'numeric': 'numrange',
'date': 'daterange',
'timestamp with time zone': 'tstzrange',
}
postgres_operator = RangeOperators.CONTAINED_BY
def process_rhs(self, compiler, connection):
rhs, rhs_params = super().process_rhs(compiler, connection)
# Ignore precision for DecimalFields.
db_type = self.lhs.output_field.cast_db_type(connection).split('(')[0]
cast_type = self.type_mapping[db_type]
return '%s::%s' % (rhs, cast_type), rhs_params
def process_lhs(self, compiler, connection):
lhs, lhs_params = super().process_lhs(compiler, connection)
if isinstance(self.lhs.output_field, models.FloatField):
lhs = '%s::numeric' % lhs
elif isinstance(self.lhs.output_field, models.SmallIntegerField):
lhs = '%s::integer' % lhs
return lhs, lhs_params
def get_prep_lookup(self):
return RangeField().get_prep_value(self.rhs)
models.DateField.register_lookup(RangeContainedBy)
models.DateTimeField.register_lookup(RangeContainedBy)
models.IntegerField.register_lookup(RangeContainedBy)
models.FloatField.register_lookup(RangeContainedBy)
models.DecimalField.register_lookup(RangeContainedBy)
@RangeField.register_lookup
class FullyLessThan(PostgresOperatorLookup):
lookup_name = 'fully_lt'
postgres_operator = RangeOperators.FULLY_LT
@RangeField.register_lookup
class FullGreaterThan(PostgresOperatorLookup):
lookup_name = 'fully_gt'
postgres_operator = RangeOperators.FULLY_GT
@RangeField.register_lookup
class NotLessThan(PostgresOperatorLookup):
lookup_name = 'not_lt'
postgres_operator = RangeOperators.NOT_LT
@RangeField.register_lookup
class NotGreaterThan(PostgresOperatorLookup):
lookup_name = 'not_gt'
postgres_operator = RangeOperators.NOT_GT
@RangeField.register_lookup
class AdjacentToLookup(PostgresOperatorLookup):
lookup_name = 'adjacent_to'
postgres_operator = RangeOperators.ADJACENT_TO
@RangeField.register_lookup
class RangeStartsWith(models.Transform):
lookup_name = 'startswith'
function = 'lower'
@property
def output_field(self):
return self.lhs.output_field.base_field
@RangeField.register_lookup
class RangeEndsWith(models.Transform):
lookup_name = 'endswith'
function = 'upper'
@property
def output_field(self):
return self.lhs.output_field.base_field
@RangeField.register_lookup
class IsEmpty(models.Transform):
lookup_name = 'isempty'
function = 'isempty'
output_field = models.BooleanField()
@RangeField.register_lookup
class LowerInclusive(models.Transform):
lookup_name = 'lower_inc'
function = 'LOWER_INC'
output_field = models.BooleanField()
@RangeField.register_lookup
class LowerInfinite(models.Transform):
lookup_name = 'lower_inf'
function = 'LOWER_INF'
output_field = models.BooleanField()
@RangeField.register_lookup
class UpperInclusive(models.Transform):
lookup_name = 'upper_inc'
function = 'UPPER_INC'
output_field = models.BooleanField()
@RangeField.register_lookup
class UpperInfinite(models.Transform):
lookup_name = 'upper_inf'
function = 'UPPER_INF'
output_field = models.BooleanField()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU Distribution Strategy.
This is experimental. It's not ready for general use.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def get_tpu_system_metadata(tpu_cluster_resolver):
"""Retrieves TPU system metadata given a TPUClusterResolver."""
master = tpu_cluster_resolver.master()
# pylint: disable=protected-access
cluster_spec = tpu_cluster_resolver.cluster_spec()
cluster_def = cluster_spec.as_cluster_def() if cluster_spec else None
tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata(
master,
cluster_def=cluster_def,
query_topology=False))
return tpu_system_metadata
# TODO(jhseu): Deduplicate with MirroredStrategy?
def _create_tpu_mirrored_variable(devices, real_mirrored_creator, *args,
**kwargs): # pylint: disable=g-missing-docstring
# Figure out what collections this variable should be added to.
# We'll add the TPUMirroredVariable to those collections instead.
collections = kwargs.pop("collections", None)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
kwargs["collections"] = []
# TODO(jhseu): Should we have different behavior for different
# synchronization settings?
# Get aggregation value
# TODO(jhseu): Support aggregation in a replica context.
aggregation = kwargs.pop("aggregation", vs.VariableAggregation.NONE)
if aggregation not in [
vs.VariableAggregation.NONE,
vs.VariableAggregation.SUM,
vs.VariableAggregation.MEAN,
vs.VariableAggregation.ONLY_FIRST_REPLICA,
]:
raise ValueError("Invalid variable aggregation mode: {} for variable: {}"
.format(aggregation, kwargs["name"]))
# Ignore user-specified caching device, not needed for mirrored variables.
kwargs.pop("caching_device", None)
# TODO(josh11b,apassos): It would be better if variable initialization
# was never recorded on the tape instead of having to do this manually
# here.
with tape.stop_recording():
index = real_mirrored_creator(devices, *args, **kwargs)
result = values.TPUMirroredVariable(index, index[devices[0]], aggregation)
if not context.executing_eagerly():
g = ops.get_default_graph()
# If "trainable" is True, next_creator() will add the member variables
# to the TRAINABLE_VARIABLES collection, so we manually remove
# them and replace with the MirroredVariable. We can't set
# "trainable" to False for next_creator() since that causes functions
# like implicit_gradients to skip those variables.
if kwargs.get("trainable", True):
collections.append(ops.GraphKeys.TRAINABLE_VARIABLES)
l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES)
for v in index.values():
l.remove(v)
g.add_to_collections(collections, result)
return result
class TPUStrategy(distribute_lib.DistributionStrategy):
"""TPU distribution strategy implementation."""
def __init__(self, tpu_cluster_resolver, steps_per_run, num_cores=None):
"""Initializes the TPUStrategy object.
Args:
tpu_cluster_resolver: A tf.contrib.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
steps_per_run: Number of steps to run on device before returning to the
host. Note that this can have side-effects on performance, hooks,
metrics, summaries etc.
This parameter is only used when Distribution Strategy is used with
estimator or keras.
num_cores: Number of cores to use on the TPU. If None specified, then
auto-detect the cores and topology of the TPU system.
"""
super(TPUStrategy, self).__init__(TPUExtended(
self, tpu_cluster_resolver, steps_per_run, num_cores))
@property
def steps_per_run(self):
"""DEPRECATED: use .extended.steps_per_run instead."""
return self._extended.steps_per_run
class TPUExtended(distribute_lib.DistributionStrategyExtended):
"""Implementation of TPUStrategy."""
# Track what TPU devices have been initialized.
_initialized_devices = []
def __init__(self, container_strategy, tpu_cluster_resolver, steps_per_run,
num_cores=None):
super(TPUExtended, self).__init__(container_strategy)
self._tpu_cluster_resolver = tpu_cluster_resolver
self._tpu_metadata = get_tpu_system_metadata(self._tpu_cluster_resolver)
# TODO(sourabhbajaj): Change this from num_cores to metadata_override
self._num_cores_override = num_cores
# TODO(jhseu): Switch to DeviceAssignment to support pods and model
# parallelism.
device_map = {d.name: i for i, d in enumerate(self._tpu_metadata.devices)
if "device:TPU:" in d.name}
self._device_index = values.PerReplica(device_map)
self._host_device = self.get_host_cpu_device(0)
self._tpu_devices = tuple(sorted(device_map.keys()))
# Only create variables for the number of replicas we're running.
self._tpu_devices = self._tpu_devices[:self._num_replicas_in_sync]
# TODO(sourabhbajaj): Remove this once performance of running one step
# at a time is comparable to multiple steps.
self.steps_per_run = steps_per_run
self._require_static_shapes = True
# Initialize the TPU devices.
self._initialize_tpu()
def _initialize_tpu(self):
"""Initialize the TPU devices in a separate session and graph.
We keep track of all the TPU devices that we're initialized as we should
only be running TPU initialize once for the entire process.
"""
master = self._tpu_cluster_resolver.master()
# Verify TPU has not already been initialized in this process.
if master in TPUExtended._initialized_devices:
logging.info("TPU master %s has already been initialized." % master)
return
logging.info("Initializing the TPU system.")
session_config = config_pb2.ConfigProto(allow_soft_placement=True)
self._configure(session_config)
with ops.Graph().as_default():
with session_lib.Session(config=session_config, target=master) as sess:
sess.run([tpu.initialize_system()])
logging.info("Finized initializing TPU system.")
# Update Strategy state to make sure we can track device initialization.
TPUExtended._initialized_devices.append(master)
def _get_enqueue_op_per_host(self, host_id, multi_worker_iterator,
input_shapes, iterations):
"""Create an enqueue op for a single host identified using host_id.
The while_loop op returned will run `iterations` times and in each run
enqueue batches for each shard.
Args:
host_id: integer, id of the host to run the enqueue ops on.
multi_worker_iterator: MultiWorkerDataIterator to read the input data.
input_shapes: shape of inputs to be enqueue on the queue. This is same as
the value of `nest.flatten(iterator.output_shapes)`.
iterations: integer, number of iterations to be run; determines the
number of batches to be enqueued.
Returns:
while_loop_op running `iterations` times; in each run we enqueue a batch
on the infeed queue from the host with id `host_id` for each device shard.
"""
host = self.get_host_cpu_device(host_id)
# TODO(sourabhbajaj): Possibly make changes to MultiWorkerDataset
# to work with TPU Prefetch so clean up this code.
iterator = (
multi_worker_iterator.get_iterator(self.get_host(host_id))._iterator) # pylint: disable=protected-access
def _infeed_enqueue_ops_fn():
"""Enqueue ops for one iteration."""
control_deps = []
sharded_inputs = []
enqueue_ops = []
with ops.device(host):
for _ in range(self.num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
inputs = nest.flatten(iterator.get_next())
control_deps.extend(inputs)
sharded_inputs.append(inputs)
for core_id, shard_input in enumerate(sharded_inputs):
enqueue_ops.append(
tpu_ops.infeed_enqueue_tuple(
inputs=shard_input,
shapes=input_shapes,
device_ordinal=core_id))
return enqueue_ops
def enqueue_ops_loop_body(i):
"""Callable for the loop body of the while_loop instantiated below."""
with ops.control_dependencies(_infeed_enqueue_ops_fn()):
return i + 1
with ops.device(host):
enqueue_op_per_host = control_flow_ops.while_loop(
lambda i: i < iterations,
enqueue_ops_loop_body,
[constant_op.constant(0)],
parallel_iterations=1)
return enqueue_op_per_host
def _make_dataset_iterator(self, dataset):
"""Make iterators for each of the TPU hosts."""
worker_devices = [
(self.get_host(hid), [self.get_host_cpu_device(hid)])
for hid in range(self.num_hosts)
]
return values.DatasetIterator(dataset, worker_devices,
self._num_replicas_in_sync)
def _distribute_dataset(self, dataset_fn):
worker_devices = [
(self.get_host(hid), [self.get_host_cpu_device(hid)])
for hid in range(self.num_hosts)
]
return values.MultiWorkerDataset(
functools.partial(self._call_dataset_fn, dataset_fn), worker_devices)
# TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.
# TODO(sourabhbajaj): Remove the initial_loop_values parameter when we have
# a mechanism to infer the outputs of `fn`. Pending b/110550782.
def _experimental_run_steps_on_iterator(
self, fn, multi_worker_iterator, iterations, initial_loop_values=None):
output_shapes = multi_worker_iterator.output_shapes
shapes = nest.flatten(output_shapes)
if any(not s.is_fully_defined() for s in shapes):
raise ValueError(
"TPU currently requires fully defined shapes. Either use "
"set_shape() on the input tensors or use "
"dataset.batch(..., drop_remainder=True).")
types = nest.flatten(multi_worker_iterator.output_types)
enqueue_ops = [
self._get_enqueue_op_per_host(host_id, multi_worker_iterator, shapes,
iterations)
for host_id in range(self.num_hosts)]
def dequeue_fn():
dequeued = tpu_ops.infeed_dequeue_tuple(dtypes=types, shapes=shapes)
return nest.pack_sequence_as(output_shapes, dequeued)
# Wrap `fn` for repeat.
if initial_loop_values is None:
initial_loop_values = {}
initial_loop_values = nest.flatten(initial_loop_values)
ctx = values.MultiStepContext()
def run_fn():
"""Single step on the TPU device."""
fn_inputs = dequeue_fn()
if not isinstance(fn_inputs, tuple):
fn_inputs = (fn_inputs,)
fn_result = fn(ctx, fn_inputs)
flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)
if flat_last_step_outputs:
with ops.control_dependencies([fn_result]):
return [array_ops.identity(f) for f in flat_last_step_outputs]
else:
return fn_result
# We capture the control_flow_context at this point, before we run `fn`
# inside a while_loop and TPU replicate context. This is useful in cases
# where we might need to exit these contexts and get back to the outer
# context to do some things, for e.g. create an op which should be
# evaluated only once at the end of the loop on the host. One such usage
# is in creating metrics' value op.
self._outer_control_flow_context = (
ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access
def rewrite_fn(*args):
"""The rewritten step fn running on TPU."""
del args
replicate_inputs = [[]] * self._num_replicas_in_sync
replicate_outputs = tpu.replicate(run_fn, replicate_inputs)
# If run_fn has tensor outputs, tpu.replicate returns a list of list. We
# will flatten it in this case. If run_fn has no tensor outputs,
# tpu.replicate returns a list of no_ops, we will keep the output as it
# is.
if isinstance(replicate_outputs[0], list):
replicate_outputs = nest.flatten(replicate_outputs)
return replicate_outputs
# TODO(sourabhbajaj): The input to while loop should be based on the output
# type of the step_fn
assert isinstance(initial_loop_values, list)
initial_loop_values = initial_loop_values * self._num_replicas_in_sync
# Put the while loop op on host 0.
with ops.device(self.get_host_cpu_device(0)):
replicate_outputs = training_loop.repeat(iterations, rewrite_fn,
initial_loop_values)
del self._outer_control_flow_context
ctx.run_op = control_flow_ops.group(replicate_outputs, enqueue_ops)
if isinstance(replicate_outputs, list):
# Filter out any ops from the outputs, typically this would be the case
# when there were no tensor outputs.
last_step_tensor_outputs = [
x for x in replicate_outputs if not isinstance(x, ops.Operation)
]
# Outputs are currently of the structure (flattened)
# [output0_device0, output1_device0, output2_device0,
# output0_device1, output1_device1, output2_device1,
# ...]
# Convert this to the following structure instead: (grouped by output)
# [[output0_device0, output0_device1],
# [output1_device0, output1_device1],
# [output2_device0, output2_device1]]
output_num = len(last_step_tensor_outputs) // self._num_replicas_in_sync
last_step_tensor_outputs = [
last_step_tensor_outputs[i::output_num] for i in range(output_num)
]
else:
# no tensors returned.
last_step_tensor_outputs = []
# Convert replicate_outputs to the original dict structure of
# last_step_outputs.
last_step_tensor_outputs_dict = nest.pack_sequence_as(
ctx.last_step_outputs, last_step_tensor_outputs)
for name, reduce_op in ctx._last_step_outputs_reduce_ops.items(): # pylint: disable=protected-access
output = last_step_tensor_outputs_dict[name]
# For outputs that have already been reduced, take the first value
# from the list as each value should be the same. Else return the full
# list of values.
# TODO(josh11b): If reduce_op is NONE, we should return a PerReplica
# value.
if reduce_op is not None:
# TODO(priyag): Should this return the element or a list with 1 element
last_step_tensor_outputs_dict[name] = output[0]
ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access
return ctx
def _call_for_each_replica(self, fn, args, kwargs):
# TODO(jhseu): Consider making it so call_for_each_replica implies that
# we're in a tpu.rewrite(), and update TPUMirroredVariable accordingly.
with _TPUReplicaContext(self._container_strategy()):
return fn(*args, **kwargs)
def _initialize(self):
if context.executing_eagerly():
# TODO(priyag): Add appopriate call here when eager is supported for TPUs.
raise NotImplementedError("Eager mode not supported in TPUStrategy.")
else:
return []
def _finalize(self):
if context.executing_eagerly():
# TODO(priyag): Add appopriate call here when eager is supported for TPUs.
raise NotImplementedError("Eager mode not supported in TPUStrategy.")
else:
return []
def _get_devices_from(self, colocate_with=None):
# TODO(jhseu): Change this when we support model parallelism.
return self._tpu_devices
def _create_variable(self, next_creator, *args, **kwargs):
"""Create a TPUMirroredVariable. See `DistributionStrategy.scope`."""
colocate_with = kwargs.pop("colocate_with", None)
devices = self._get_devices_from(colocate_with)
def _real_mirrored_creator(devices, *args, **kwargs): # pylint: disable=g-missing-docstring
index = {}
for i, d in enumerate(devices):
with ops.device(d):
if i > 0:
# Give replicas meaningful distinct names:
var0name = index[devices[0]].name.split(":")[0]
# We append a / to variable names created on replicas with id > 0 to
# ensure that we ignore the name scope and instead use the given
# name as the absolute name of the variable.
kwargs["name"] = "%s/replica_%d/" % (var0name, i)
# Initialize replicas with the same value:
if context.executing_eagerly():
kwargs["initial_value"] = array_ops.identity(
index[devices[0]].value())
else:
def initial_value_fn(device=d):
with ops.device(device):
return array_ops.identity(index[devices[0]].initial_value)
kwargs["initial_value"] = initial_value_fn
with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):
v = next_creator(*args, **kwargs)
assert not isinstance(v, values.TPUMirroredVariable)
index[d] = v
return index
return _create_tpu_mirrored_variable(devices, _real_mirrored_creator, *args,
**kwargs)
def _reduce_to(self, reduce_op, value, destinations):
if values._enclosing_tpu_context() is not None: # pylint: disable=protected-access
if reduce_op == reduce_util.ReduceOp.MEAN:
# TODO(jhseu): Revisit once we support model-parallelism.
value *= (1. / self._num_replicas_in_sync)
elif reduce_op != reduce_util.ReduceOp.SUM:
raise NotImplementedError(
"Currently only support sum & mean in TPUStrategy.")
return tpu_ops.cross_replica_sum(value)
if not isinstance(value, values.DistributedValues):
# This function handles reducing values that are not PerReplica or
# Mirrored values. For example, the same value could be present on all
# replicas in which case `value` would be a single value or value could
# be 0.
return cross_device_ops_lib.reduce_non_distributed_value(
self, reduce_op, value, destinations)
# Validate that the destination is same as the host device
# Note we don't do this when in replicate context as the reduction is
# performed on the TPU device itself.
devices = cross_device_ops_lib.get_devices_from(destinations)
if len(devices) == 1:
assert device_util.canonicalize(devices[0]) == device_util.canonicalize(
self._host_device)
else:
raise ValueError("Multiple devices are not supported for TPUStrategy")
output = math_ops.add_n(value)
if reduce_op == reduce_util.ReduceOp.MEAN:
return output * (1. / len(value))
return output
def _update(self, var, fn, args, kwargs, group):
assert isinstance(var, values.TPUMirroredVariable)
if values._enclosing_tpu_context() is not None: # pylint: disable=protected-access
if group:
return fn(var, *args, **kwargs)
else:
return [fn(var, *args, **kwargs)]
# Otherwise, we revert to MirroredStrategy behavior and update each variable
# directly.
updates = {}
for d, v in var._index.items(): # pylint: disable=protected-access
name = "update_%d" % self._device_index.get(d)
with ops.device(d), distribute_lib.UpdateContext(d), ops.name_scope(name):
# If args and kwargs are not mirrored, the value is returned as is.
updates[d] = fn(v,
*values.select_device_mirrored(d, args),
**values.select_device_mirrored(d, kwargs))
return values.update_regroup(self, updates, group)
def read_var(self, var):
assert isinstance(var, values.TPUMirroredVariable)
return var.read_value()
def _unwrap(self, val):
if isinstance(val, values.DistributedValues):
# Return in a deterministic order.
return tuple(val.get(device=d) for d in sorted(val.devices))
elif isinstance(val, list):
# TODO(josh11b): We need to remove this case; per device values should
# be represented using a PerReplica wrapper instead of a list with
# one entry per device.
return tuple(val)
return (val,)
def value_container(self, value):
return value
def _broadcast_to(self, tensor, destinations):
del destinations
return tensor
@property
def num_hosts(self):
return self._tpu_metadata.num_hosts
@property
def num_replicas_per_host(self):
return self._tpu_metadata.num_of_cores_per_host
@property
def _num_replicas_in_sync(self):
return self._num_cores_override or self._tpu_metadata.num_cores
@property
def experimental_between_graph(self):
return False
@property
def experimental_should_init(self):
return True
@property
def should_checkpoint(self):
return True
@property
def should_save_summary(self):
return True
@property
def worker_devices(self):
return self._tpu_devices
@property
def parameter_devices(self):
return self._tpu_devices
def non_slot_devices(self, var_list):
return self._host_device
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
del colocate_with
with ops.device(self._host_device), distribute_lib.UpdateContext(
self._host_device):
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._unwrap, result)
def get_host(self, host_id):
if self._tpu_cluster_resolver.get_master() in ("", "local"):
return "/replica:0/task:0"
job_name = self._tpu_cluster_resolver.get_job_name() or "tpu_worker"
return "/job:%s/task:%d" % (job_name, host_id)
def get_host_cpu_device(self, host_id):
return self.get_host(host_id) + "/device:CPU:0"
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
del cluster_spec, task_type, task_id
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
updated_config.isolate_session_state = True
cluster_spec = self._tpu_cluster_resolver.cluster_spec()
if cluster_spec:
updated_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
return updated_config
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
return True
class _TPUReplicaContext(distribute_lib.ReplicaContext):
"""Replication Context class for TPU Strategy."""
# TODO(sourabhbajaj): Call for each tower should be updating this.
def __init__(self, distribution_strategy):
distribute_lib.ReplicaContext.__init__(
self,
distribution_strategy,
# TODO(b/118385803): properly initialize replica_id, instead of always 0
replica_id_in_sync_group=constant_op.constant(0, dtypes.int32))
@property
def devices(self):
distribute_lib.require_replica_context(self)
ds = self._distribution_strategy
replica_id = tensor_util.constant_value(self._replica_id_in_sync_group)
return (ds.extended.worker_devices[replica_id],)
|
|
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy import array
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
run_module_suite, assert_raises, assert_allclose,
assert_equal, assert_, assert_array_less)
from scipy import signal, fftpack
window_funcs = [
('boxcar', ()),
('triang', ()),
('parzen', ()),
('bohman', ()),
('blackman', ()),
('nuttall', ()),
('blackmanharris', ()),
('flattop', ()),
('bartlett', ()),
('hanning', ()),
('barthann', ()),
('hamming', ()),
('kaiser', (1,)),
('gaussian', (0.5,)),
('general_gaussian', (1.5, 2)),
('chebwin', (1,)),
('slepian', (2,)),
('cosine', ()),
('hann', ()),
('exponential', ()),
('tukey', (0.5,)),
]
class TestBartHann(object):
def test_basic(self):
assert_allclose(signal.barthann(6, sym=True),
[0, 0.35857354213752, 0.8794264578624801,
0.8794264578624801, 0.3585735421375199, 0])
assert_allclose(signal.barthann(7),
[0, 0.27, 0.73, 1.0, 0.73, 0.27, 0])
assert_allclose(signal.barthann(6, False),
[0, 0.27, 0.73, 1.0, 0.73, 0.27])
class TestBartlett(object):
def test_basic(self):
assert_allclose(signal.bartlett(6), [0, 0.4, 0.8, 0.8, 0.4, 0])
assert_allclose(signal.bartlett(7), [0, 1/3, 2/3, 1.0, 2/3, 1/3, 0])
assert_allclose(signal.bartlett(6, False),
[0, 1/3, 2/3, 1.0, 2/3, 1/3])
class TestBlackman(object):
def test_basic(self):
assert_allclose(signal.blackman(6, sym=False),
[0, 0.13, 0.63, 1.0, 0.63, 0.13], atol=1e-14)
assert_allclose(signal.blackman(6),
[0, 0.2007701432625305, 0.8492298567374694,
0.8492298567374694, 0.2007701432625305, 0],
atol=1e-14)
assert_allclose(signal.blackman(7, True),
[0, 0.13, 0.63, 1.0, 0.63, 0.13, 0], atol=1e-14)
class TestBlackmanHarris(object):
def test_basic(self):
assert_allclose(signal.blackmanharris(6, False),
[6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645])
assert_allclose(signal.blackmanharris(6),
[6.0e-05, 0.1030114893456638, 0.7938335106543362,
0.7938335106543364, 0.1030114893456638, 6.0e-05])
assert_allclose(signal.blackmanharris(7, sym=True),
[6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645,
6.0e-05])
class TestBohman(object):
def test_basic(self):
assert_allclose(signal.bohman(6),
[0, 0.1791238937062839, 0.8343114522576858,
0.8343114522576858, 0.1791238937062838, 0])
assert_allclose(signal.bohman(7, sym=True),
[0, 0.1089977810442293, 0.6089977810442293, 1.0,
0.6089977810442295, 0.1089977810442293, 0])
assert_allclose(signal.bohman(6, False),
[0, 0.1089977810442293, 0.6089977810442293, 1.0,
0.6089977810442295, 0.1089977810442293])
class TestBoxcar(object):
def test_basic(self):
assert_allclose(signal.boxcar(6), [1, 1, 1, 1, 1, 1])
assert_allclose(signal.boxcar(7), [1, 1, 1, 1, 1, 1, 1])
assert_allclose(signal.boxcar(6, False), [1, 1, 1, 1, 1, 1])
cheb_odd_true = array([0.200938, 0.107729, 0.134941, 0.165348,
0.198891, 0.235450, 0.274846, 0.316836,
0.361119, 0.407338, 0.455079, 0.503883,
0.553248, 0.602637, 0.651489, 0.699227,
0.745266, 0.789028, 0.829947, 0.867485,
0.901138, 0.930448, 0.955010, 0.974482,
0.988591, 0.997138, 1.000000, 0.997138,
0.988591, 0.974482, 0.955010, 0.930448,
0.901138, 0.867485, 0.829947, 0.789028,
0.745266, 0.699227, 0.651489, 0.602637,
0.553248, 0.503883, 0.455079, 0.407338,
0.361119, 0.316836, 0.274846, 0.235450,
0.198891, 0.165348, 0.134941, 0.107729,
0.200938])
cheb_even_true = array([0.203894, 0.107279, 0.133904,
0.163608, 0.196338, 0.231986,
0.270385, 0.311313, 0.354493,
0.399594, 0.446233, 0.493983,
0.542378, 0.590916, 0.639071,
0.686302, 0.732055, 0.775783,
0.816944, 0.855021, 0.889525,
0.920006, 0.946060, 0.967339,
0.983557, 0.994494, 1.000000,
1.000000, 0.994494, 0.983557,
0.967339, 0.946060, 0.920006,
0.889525, 0.855021, 0.816944,
0.775783, 0.732055, 0.686302,
0.639071, 0.590916, 0.542378,
0.493983, 0.446233, 0.399594,
0.354493, 0.311313, 0.270385,
0.231986, 0.196338, 0.163608,
0.133904, 0.107279, 0.203894])
class TestChebWin(object):
def test_basic(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
assert_allclose(signal.chebwin(6, 100),
[0.1046401879356917, 0.5075781475823447, 1.0, 1.0,
0.5075781475823447, 0.1046401879356917])
assert_allclose(signal.chebwin(7, 100),
[0.05650405062850233, 0.316608530648474,
0.7601208123539079, 1.0, 0.7601208123539079,
0.316608530648474, 0.05650405062850233])
assert_allclose(signal.chebwin(6, 10),
[1.0, 0.6071201674458373, 0.6808391469897297,
0.6808391469897297, 0.6071201674458373, 1.0])
assert_allclose(signal.chebwin(7, 10),
[1.0, 0.5190521247588651, 0.5864059018130382,
0.6101519801307441, 0.5864059018130382,
0.5190521247588651, 1.0])
assert_allclose(signal.chebwin(6, 10, False),
[1.0, 0.5190521247588651, 0.5864059018130382,
0.6101519801307441, 0.5864059018130382,
0.5190521247588651])
def test_cheb_odd_high_attenuation(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
cheb_odd = signal.chebwin(53, at=-40)
assert_array_almost_equal(cheb_odd, cheb_odd_true, decimal=4)
def test_cheb_even_high_attenuation(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
cheb_even = signal.chebwin(54, at=40)
assert_array_almost_equal(cheb_even, cheb_even_true, decimal=4)
def test_cheb_odd_low_attenuation(self):
cheb_odd_low_at_true = array([1.000000, 0.519052, 0.586405,
0.610151, 0.586405, 0.519052,
1.000000])
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
cheb_odd = signal.chebwin(7, at=10)
assert_array_almost_equal(cheb_odd, cheb_odd_low_at_true, decimal=4)
def test_cheb_even_low_attenuation(self):
cheb_even_low_at_true = array([1.000000, 0.451924, 0.51027,
0.541338, 0.541338, 0.51027,
0.451924, 1.000000])
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
cheb_even = signal.chebwin(8, at=-10)
assert_array_almost_equal(cheb_even, cheb_even_low_at_true, decimal=4)
exponential_data = {
(4, None, 0.2, False):
array([4.53999297624848542e-05,
6.73794699908546700e-03, 1.00000000000000000e+00,
6.73794699908546700e-03]),
(4, None, 0.2, True): array([0.00055308437014783, 0.0820849986238988,
0.0820849986238988, 0.00055308437014783]),
(4, None, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233]),
(4, None, 1.0, True): array([0.22313016014842982, 0.60653065971263342,
0.60653065971263342, 0.22313016014842982]),
(4, 2, 0.2, False):
array([4.53999297624848542e-05, 6.73794699908546700e-03,
1.00000000000000000e+00, 6.73794699908546700e-03]),
(4, 2, 0.2, True): None,
(4, 2, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233]),
(4, 2, 1.0, True): None,
(5, None, 0.2, False):
array([4.53999297624848542e-05,
6.73794699908546700e-03, 1.00000000000000000e+00,
6.73794699908546700e-03, 4.53999297624848542e-05]),
(5, None, 0.2, True):
array([4.53999297624848542e-05,
6.73794699908546700e-03, 1.00000000000000000e+00,
6.73794699908546700e-03, 4.53999297624848542e-05]),
(5, None, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233, 0.1353352832366127]),
(5, None, 1.0, True): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233, 0.1353352832366127]),
(5, 2, 0.2, False):
array([4.53999297624848542e-05, 6.73794699908546700e-03,
1.00000000000000000e+00, 6.73794699908546700e-03,
4.53999297624848542e-05]),
(5, 2, 0.2, True): None,
(5, 2, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233, 0.1353352832366127]),
(5, 2, 1.0, True): None
}
def test_exponential():
for k, v in exponential_data.items():
if v is None:
assert_raises(ValueError, signal.exponential, *k)
else:
win = signal.exponential(*k)
assert_allclose(win, v, rtol=1e-14)
class TestFlatTop(object):
def test_basic(self):
assert_allclose(signal.flattop(6, sym=False),
[-0.000421051, -0.051263156, 0.19821053, 1.0,
0.19821053, -0.051263156])
assert_allclose(signal.flattop(6),
[-0.000421051, -0.0677142520762119, 0.6068721525762117,
0.6068721525762117, -0.0677142520762119,
-0.000421051])
assert_allclose(signal.flattop(7, True),
[-0.000421051, -0.051263156, 0.19821053, 1.0,
0.19821053, -0.051263156, -0.000421051])
class TestGaussian(object):
def test_basic(self):
assert_allclose(signal.gaussian(6, 1.0),
[0.04393693362340742, 0.3246524673583497,
0.8824969025845955, 0.8824969025845955,
0.3246524673583497, 0.04393693362340742])
assert_allclose(signal.gaussian(7, 1.2),
[0.04393693362340742, 0.2493522087772962,
0.7066482778577162, 1.0, 0.7066482778577162,
0.2493522087772962, 0.04393693362340742])
assert_allclose(signal.gaussian(7, 3),
[0.6065306597126334, 0.8007374029168081,
0.9459594689067654, 1.0, 0.9459594689067654,
0.8007374029168081, 0.6065306597126334])
assert_allclose(signal.gaussian(6, 3, False),
[0.6065306597126334, 0.8007374029168081,
0.9459594689067654, 1.0, 0.9459594689067654,
0.8007374029168081])
class TestHamming(object):
def test_basic(self):
assert_allclose(signal.hamming(6, False),
[0.08, 0.31, 0.77, 1.0, 0.77, 0.31])
assert_allclose(signal.hamming(6),
[0.08, 0.3978521825875242, 0.9121478174124757,
0.9121478174124757, 0.3978521825875242, 0.08])
assert_allclose(signal.hamming(7, sym=True),
[0.08, 0.31, 0.77, 1.0, 0.77, 0.31, 0.08])
class TestHann(object):
def test_basic(self):
assert_allclose(signal.hann(6, sym=False),
[0, 0.25, 0.75, 1.0, 0.75, 0.25])
assert_allclose(signal.hann(6, True),
[0, 0.3454915028125263, 0.9045084971874737,
0.9045084971874737, 0.3454915028125263, 0])
assert_allclose(signal.hann(7),
[0, 0.25, 0.75, 1.0, 0.75, 0.25, 0])
class TestKaiser(object):
def test_basic(self):
assert_allclose(signal.kaiser(6, 0.5),
[0.9403061933191572, 0.9782962393705389,
0.9975765035372042, 0.9975765035372042,
0.9782962393705389, 0.9403061933191572])
assert_allclose(signal.kaiser(7, 0.5),
[0.9403061933191572, 0.9732402256999829,
0.9932754654413773, 1.0, 0.9932754654413773,
0.9732402256999829, 0.9403061933191572])
assert_allclose(signal.kaiser(6, 2.7),
[0.2603047507678832, 0.6648106293528054,
0.9582099802511439, 0.9582099802511439,
0.6648106293528054, 0.2603047507678832])
assert_allclose(signal.kaiser(7, 2.7),
[0.2603047507678832, 0.5985765418119844,
0.8868495172060835, 1.0, 0.8868495172060835,
0.5985765418119844, 0.2603047507678832])
assert_allclose(signal.kaiser(6, 2.7, False),
[0.2603047507678832, 0.5985765418119844,
0.8868495172060835, 1.0, 0.8868495172060835,
0.5985765418119844])
class TestNuttall(object):
def test_basic(self):
assert_allclose(signal.nuttall(6, sym=False),
[0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298,
0.0613345])
assert_allclose(signal.nuttall(6),
[0.0003628, 0.1105152530498718, 0.7982580969501282,
0.7982580969501283, 0.1105152530498719, 0.0003628])
assert_allclose(signal.nuttall(7, True),
[0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298,
0.0613345, 0.0003628])
class TestParzen(object):
def test_basic(self):
assert_allclose(signal.parzen(6),
[0.009259259259259254, 0.25, 0.8611111111111112,
0.8611111111111112, 0.25, 0.009259259259259254])
assert_allclose(signal.parzen(7, sym=True),
[0.00583090379008747, 0.1574344023323616,
0.6501457725947521, 1.0, 0.6501457725947521,
0.1574344023323616, 0.00583090379008747])
assert_allclose(signal.parzen(6, False),
[0.00583090379008747, 0.1574344023323616,
0.6501457725947521, 1.0, 0.6501457725947521,
0.1574344023323616])
class TestTriang(object):
def test_basic(self):
assert_allclose(signal.triang(6, True),
[1/6, 1/2, 5/6, 5/6, 1/2, 1/6])
assert_allclose(signal.triang(7),
[1/4, 1/2, 3/4, 1, 3/4, 1/2, 1/4])
assert_allclose(signal.triang(6, sym=False),
[1/4, 1/2, 3/4, 1, 3/4, 1/2])
tukey_data = {
(4, 0.5, True): array([0.0, 1.0, 1.0, 0.0]),
(4, 0.9, True): array([0.0, 0.84312081893436686,
0.84312081893436686, 0.0]),
(4, 1.0, True): array([0.0, 0.75, 0.75, 0.0]),
(4, 0.5, False): array([0.0, 1.0, 1.0, 1.0]),
(4, 0.9, False): array([0.0, 0.58682408883346526,
1.0, 0.58682408883346526]),
(4, 1.0, False): array([0.0, 0.5, 1.0, 0.5]),
(5, 0.0, True): array([1.0, 1.0, 1.0, 1.0, 1.0]),
(5, 0.8, True): array([0.0, 0.69134171618254492,
1.0, 0.69134171618254492, 0.0]),
(5, 1.0, True): array([0.0, 0.5, 1.0, 0.5, 0.0]),
(6, 0): [1, 1, 1, 1, 1, 1],
(7, 0): [1, 1, 1, 1, 1, 1, 1],
(6, .25): [0, 1, 1, 1, 1, 0],
(7, .25): [0, 1, 1, 1, 1, 1, 0],
(6,): [0, 0.9045084971874737, 1.0, 1.0, 0.9045084971874735, 0],
(7,): [0, 0.75, 1.0, 1.0, 1.0, 0.75, 0],
(6, .75): [0, 0.5522642316338269, 1.0, 1.0, 0.5522642316338267, 0],
(7, .75): [0, 0.4131759111665348, 0.9698463103929542, 1.0,
0.9698463103929542, 0.4131759111665347, 0],
(6, 1): [0, 0.3454915028125263, 0.9045084971874737, 0.9045084971874737,
0.3454915028125263, 0],
(7, 1): [0, 0.25, 0.75, 1.0, 0.75, 0.25, 0],
}
class TestTukey(object):
def test_basic(self):
# Test against hardcoded data
for k, v in tukey_data.items():
if v is None:
assert_raises(ValueError, signal.tukey, *k)
else:
win = signal.tukey(*k)
assert_allclose(win, v, rtol=1e-14)
def test_extremes(self):
# Test extremes of alpha correspond to boxcar and hann
tuk0 = signal.tukey(100, 0)
box0 = signal.boxcar(100)
assert_array_almost_equal(tuk0, box0)
tuk1 = signal.tukey(100, 1)
han1 = signal.hann(100)
assert_array_almost_equal(tuk1, han1)
class TestGetWindow(object):
def test_boxcar(self):
w = signal.get_window('boxcar', 12)
assert_array_equal(w, np.ones_like(w))
# window is a tuple of len 1
w = signal.get_window(('boxcar',), 16)
assert_array_equal(w, np.ones_like(w))
def test_cheb_odd(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
w = signal.get_window(('chebwin', -40), 53, fftbins=False)
assert_array_almost_equal(w, cheb_odd_true, decimal=4)
def test_cheb_even(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
w = signal.get_window(('chebwin', 40), 54, fftbins=False)
assert_array_almost_equal(w, cheb_even_true, decimal=4)
def test_kaiser_float(self):
win1 = signal.get_window(7.2, 64)
win2 = signal.kaiser(64, 7.2, False)
assert_allclose(win1, win2)
def test_invalid_inputs(self):
# Window is not a float, tuple, or string
assert_raises(ValueError, signal.get_window, set('hann'), 8)
# Unknown window type error
assert_raises(ValueError, signal.get_window, 'broken', 4)
def test_array_as_window(self):
# github issue 3603
osfactor = 128
sig = np.arange(128)
win = signal.get_window(('kaiser', 8.0), osfactor // 2)
assert_raises(ValueError, signal.resample,
(sig, len(sig) * osfactor), {'window': win})
def test_windowfunc_basics():
for window_name, params in window_funcs:
window = getattr(signal, window_name)
with warnings.catch_warnings(record=True): # window is not suitable...
w1 = window(7, *params, sym=True)
w2 = window(7, *params, sym=False)
assert_array_almost_equal(w1, w2)
# Check that functions run and output lengths are correct
assert_equal(len(window(6, *params, sym=True)), 6)
assert_equal(len(window(6, *params, sym=False)), 6)
assert_equal(len(window(7, *params, sym=True)), 7)
assert_equal(len(window(7, *params, sym=False)), 7)
# Check invalid lengths
assert_raises(ValueError, window, 5.5, *params)
assert_raises(ValueError, window, -7, *params)
# Check degenerate cases
assert_array_equal(window(0, *params, sym=True), [])
assert_array_equal(window(0, *params, sym=False), [])
assert_array_equal(window(1, *params, sym=True), [1])
assert_array_equal(window(1, *params, sym=False), [1])
# Check dtype
assert_(window(0, *params, sym=True).dtype == 'float')
assert_(window(0, *params, sym=False).dtype == 'float')
assert_(window(1, *params, sym=True).dtype == 'float')
assert_(window(1, *params, sym=False).dtype == 'float')
assert_(window(6, *params, sym=True).dtype == 'float')
assert_(window(6, *params, sym=False).dtype == 'float')
# Check normalization
assert_array_less(window(10, *params, sym=True), 1.01)
assert_array_less(window(10, *params, sym=False), 1.01)
assert_array_less(window(9, *params, sym=True), 1.01)
assert_array_less(window(9, *params, sym=False), 1.01)
# Check periodic spectrum
assert_allclose(fftpack.fft(window(10, *params, sym=False)).imag,
0, atol=1e-14)
def test_needs_params():
for winstr in ['kaiser', 'ksr', 'gaussian', 'gauss', 'gss',
'general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs',
'slepian', 'optimal', 'slep', 'dss', 'dpss',
'chebwin', 'cheb', 'exponential', 'poisson', 'tukey',
'tuk']:
assert_raises(ValueError, signal.get_window, winstr, 7)
if __name__ == "__main__":
run_module_suite()
|
|
from handlers.base import BaseHandler
from tornado import gen
import momoko
import tornado.web
import logging
logger = logging.getLogger('kisspy.' + __name__)
MAX_ADMIN_UID=2
from models import A,B,C,User, Link, Visit, Config
from settings import MEDIA_ROOT
import os
import time
import forms
class BaseAdminMixin(object):
#def prepare(self):
# pass
@gen.coroutine
def get_visits(self, uid=None, page_size=100, offset=0):
params={'offset':offset,'limit':page_size}
visits = Visit.select().paginate((offset % page_size)+1, page_size)
if uid:
params.update({'uid':uid})
visits = (Visit.select(Visit, User.nickname)
.where(Visit.uid==uid)
.join(User)
.group_by(Visit)
.paginate((offset % page_size)+1, page_size))
else:
visits = (Visit.select(Visit, User.nickname)
.join(User)
.group_by(Visit)
.paginate((offset % page_size)+1, page_size))
raise gen.Return(visits)
@gen.coroutine
def get_visits_total(self, uid=None):
if uid:
results_count=Visit.select().where(Visit.uid==uid).count()
else:
results_count=Visit.select().count()
raise gen.Return(results_count)
@gen.coroutine
def check_superuser(self):
user=self.get_current_user()
if user:
logger.error(str(user))
if user['uid']>MAX_ADMIN_UID:
raise tornado.web.HTTPError(404,'Page Not Found!')
class AdminHandler(BaseAdminMixin, BaseHandler):
@tornado.web.authenticated
@gen.coroutine
def get(self):
user=self.get_current_user()
if user:
logger.error(str(user))
if user['uid']>MAX_ADMIN_UID:
raise tornado.web.HTTPError(404,'Page Not Found!')
users_total=User.select().count()
msg=None
kwargs={
'msg':msg,
'users_total':users_total,
}
self.render("admin/index.html",**kwargs)
class AdminSystemHandler(BaseHandler):
@tornado.web.authenticated
@gen.coroutine
def get(self):
user=self.get_current_user()
if user:
logger.error(str(user))
if user['uid']>MAX_ADMIN_UID:
raise tornado.web.HTTPError(404,'Page Not Found!')
users_total=yield self.get_users_total()
users=User.select()
try:
config=Config.select().where(Config.id==1).get()
except Config.DoesNotExist:
config=Config()
msg=self.get_argument('msg','')
page = int(self.get_argument('page','1'))
page_size = 100
offset = page_size*(page-1)
kwargs={
'msg':msg,
'users':users,
'users_total':users_total,
'config':config,
}
self.render("admin/system.html",**kwargs)
@tornado.web.authenticated
@gen.coroutine
def post(self):
#print self.request
#print self.request.body
#print self.request.arguments
#print self.request.files.keys()
#print self.request.files['logo']
#print '-'*80
config_id = int(self.get_argument('config_id','1'))
ip = self.get_argument('ip','')
domain = self.get_argument('domain','')
sitename = self.get_argument('sitename','')
siteurl = self.get_argument('siteurl','')
title = self.get_argument('title','')
keywords = self.get_argument('keywords','')
description = self.get_argument('description','')
copyright = self.get_argument('copyright','')
shutdown = int(self.get_argument('shutdown','0'))
reason = self.get_argument('reason','')
logo = self.get_argument('logo','')
print logo
try:
file_dict_list = self.request.files['logo']
except KeyError:
filename = None # no image uploaded
else:
for fd in file_dict_list:
filename = fd["filename"]
ext=filename.split('.')[-1]
filename = 'logo%s.%s' % (str(int(1000*(time.time()))), ext)
filepath = os.path.join(MEDIA_ROOT, 'images', filename)
f = open(filepath, "wb")
f.write(fd["body"])
f.close()
try:
config=Config.select().where(Config.id==config_id).get()
except:
config_count= Config.select().count()
if config_count>0:
raise tornado.web.HTTPError(500, 'Server Config is broken!')
else:
defaults={}
config=Config(sitename='ABCcms', siteurl='http://localhost')
config.save()
print config
print 'shutdown', bool(shutdown)
config.sitename=sitename
config.siteurl=siteurl
config.title=title
config.keywords=keywords
config.description=description
config.copyright=copyright
config.shutdown=bool(shutdown)
config.reason=reason
config.ip=ip
config.domain=domain
if filename:
config.logo=filename
config.save()
self.application.reload_config()
self.redirect('/admin/system')
class AdminThreadHandler(BaseHandler):
@tornado.web.authenticated
@gen.coroutine
def get(self):
user=self.get_current_user()
if user:
logger.error(str(user))
if user['uid']>MAX_ADMIN_UID:
raise tornado.web.HTTPError(404,'Page Not Found!')
users_total=yield self.get_users_total()
users=User.select()
msg=self.get_argument('msg','')
page = int(self.get_argument('page','1'))
page_size = 100
offset = page_size*(page-1)
kwargs={
'msg':msg,
'users':users,
'users_total':users_total,
}
self.render("admin/thread.html",**kwargs)
class AdminUserHandler(BaseHandler):
@tornado.web.authenticated
@gen.coroutine
def get(self):
user=self.get_current_user()
if user:
logger.error(str(user))
if user['uid']>MAX_ADMIN_UID:
raise tornado.web.HTTPError(404,'Page Not Found!')
users_total=yield self.get_users_total()
users=User.select()
msg=self.get_argument('msg','')
page = int(self.get_argument('page','1'))
page_size = 100
offset = page_size*(page-1)
kwargs={
'msg':msg,
'users':users,
'users_total':users_total,
}
self.render("admin/user.html",**kwargs)
class AdminUserAddHandler(BaseAdminMixin, BaseHandler):
def get(self):
users_total=User.select().count()
users=User.select()
form = forms.UserForm()
print form
print dir(form)
kwargs={
'form':form,
'users':users,
'users_total':users_total,
}
self.render('admin/user_add.html', **kwargs)
@gen.coroutine
def post(self):
form = forms.UserForm(self)
if form.validate():
self.write('Hello %s' % form.planet.data)
else:
self.render('index.html', form=form)
email = self.get_argument('email', '').strip()
username = self.get_argument('username', '').strip()
password1 = self.get_argument('password1', '').strip()
password2 = self.get_argument('password2', '').strip()
if password1 != password2:
error_msg = tornado.escape.url_escape("Password is not match!")
self.write(u'/user/register?error=' + error_msg)
return
if email == '':
error_msg = tornado.escape.url_escape("Email is required!")
self.redirect(u"/user/register?error=" + error_msg)
return
else:
if email.find('@')==-1:
error_msg = tornado.escape.url_escape("Email is invalid!")
self.redirect(u"/user/register?error=" + error_msg)
if not username:
username=email.split('@')[0]
exist,msg = yield self.exist(email=email, username=username)
if exist:
# exist user email or username
error_msg = u'?error=' + tornado.escape.url_escape('Login name already taken')
self.redirect(u'/user/register?error=' + error_msg)
return
if password1:
password = password1
else:
error_msg = u'?error=' + tornado.escape.url_escape('Password not set')
self.redirect(u'/user/register?error=' + error_msg)
return
user = {}
user['email'] = email
user['username'] = username
user['password'] = password
user = yield self.add_user(**user)
if user:
self.set_current_user(user)
self.redirect('/admin/user')
return
class AdminVisitHandler(BaseAdminMixin, BaseHandler):
@tornado.web.authenticated
@gen.coroutine
def get(self):
user=self.get_current_user()
if user:
logger.error(str(user))
if user['uid']>MAX_ADMIN_UID:
raise tornado.web.HTTPError(404,'Page Not Found!')
users_total=yield self.get_users_total()
users=User.select()
msg=self.get_argument('msg','')
page = int(self.get_argument('page','1'))
page_size = 100
offset = page_size*(page-1)
results_count=yield self.get_visits_total()
visits=yield self.get_visits(offset=offset)
kwargs={
'msg':msg,
'users':users,
'users_total':users_total,
'visits':visits,
'results_count':results_count,
'page_size':page_size,
'page':page
}
self.render("admin/visit.html",**kwargs)
class AdminLoginHandler(BaseAdminMixin, BaseHandler):
@tornado.web.authenticated
@gen.coroutine
def get(self):
user=self.get_current_user()
if user:
logger.error(str(user))
if user['uid']>MAX_ADMIN_UID:
raise tornado.web.HTTPError(404,'Page Not Found!')
kwargs={
}
self.render("admin/login.html",**kwargs)
class AdminLogoutHandler(BaseAdminMixin, BaseHandler):
@tornado.web.authenticated
@gen.coroutine
def get(self):
self.clear_cookie(self.djbhash('user'))
self.redirect('/')
class AdminChannelHandler(BaseHandler):
@tornado.web.authenticated
@gen.coroutine
def get(self):
#print repr(self.application.sitename)
user=self.get_current_user()
if user:
logger.error(str(user))
if user['uid']>MAX_ADMIN_UID:
raise tornado.web.HTTPError(404,'Page Not Found!')
users_total=yield self.get_users_total()
users=User.select()
channels=A.select()
try:
config=Config.select().where(Config.id==1).get()
except Config.DoesNotExist:
config=Config()
msg=self.get_argument('msg','')
page = int(self.get_argument('page','1'))
page_size = 100
offset = page_size*(page-1)
kwargs={
'msg':msg,
'users':users,
'channels': channels,
'users_total':users_total,
'config':config,
}
self.render("admin/channel.html",**kwargs)
@tornado.web.authenticated
@gen.coroutine
def post(self):
#print self.request
#print self.request.body
#print self.request.arguments
#print self.request.files.keys()
#print self.request.files['logo']
#print '-'*80
config_id = int(self.get_argument('config_id','1'))
ip = self.get_argument('ip','')
domain = self.get_argument('domain','')
sitename = self.get_argument('sitename','')
siteurl = self.get_argument('siteurl','')
title = self.get_argument('title','')
keywords = self.get_argument('keywords','')
description = self.get_argument('description','')
copyright = self.get_argument('copyright','')
shutdown = int(self.get_argument('shutdown','0'))
reason = self.get_argument('reason','')
logo = self.get_argument('logo','')
print logo
try:
file_dict_list = self.request.files['logo']
except KeyError:
filename = None # no image uploaded
else:
for fd in file_dict_list:
filename = fd["filename"]
ext=filename.split('.')[-1]
filename = 'logo%s.%s' % (str(int(1000*(time.time()))), ext)
filepath = os.path.join(MEDIA_ROOT, 'images', filename)
f = open(filepath, "wb")
f.write(fd["body"])
f.close()
try:
config=Config.select().where(Config.id==config_id).get()
except:
config_count= Config.select().count()
if config_count>0:
raise tornado.web.HTTPError(500, 'Server Config is broken!')
else:
defaults={}
config=Config(sitename='ABCcms', siteurl='http://localhost')
config.save()
print config
print 'shutdown', bool(shutdown)
config.sitename=sitename
config.siteurl=siteurl
config.title=title
config.keywords=keywords
config.description=description
config.copyright=copyright
config.shutdown=bool(shutdown)
config.reason=reason
config.ip=ip
config.domain=domain
if filename:
config.logo=filename
config.save()
self.redirect('/admin/channel')
|
|
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
from django.test import modify_settings, override_settings
from .test_base import MigrationTestBase
@modify_settings(INSTALLED_APPS={'append': 'migrations2'})
class ExecutorTests(MigrationTestBase):
"""
Tests the migration executor (full end-to-end running).
Bear in mind that if these are failing you should fix the other
test failures first, as they may be propagating into here.
"""
available_apps = ["migrations", "migrations2"]
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_run(self):
"""
Tests running a simple set of migrations.
"""
executor = MigrationExecutor(connection)
# Let's look at the plan first and make sure it's up to scratch
plan = executor.migration_plan([("migrations", "0002_second")])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
(executor.loader.graph.nodes["migrations", "0002_second"], False),
],
)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
# Alright, let's try running it
executor.migrate([("migrations", "0002_second")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_book")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Alright, let's undo what we did
plan = executor.migration_plan([("migrations", None)])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0002_second"], True),
(executor.loader.graph.nodes["migrations", "0001_initial"], True),
],
)
executor.migrate([("migrations", None)])
# Are the tables gone?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_run_with_squashed(self):
"""
Tests running a squashed migration from zero (should ignore what it replaces)
"""
executor = MigrationExecutor(connection)
# Check our leaf node is the squashed one
leaves = [key for key in executor.loader.graph.leaf_nodes() if key[0] == "migrations"]
self.assertEqual(leaves, [("migrations", "0001_squashed_0002")])
# Check the plan
plan = executor.migration_plan([("migrations", "0001_squashed_0002")])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_squashed_0002"], False),
],
)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
# Alright, let's try running it
executor.migrate([("migrations", "0001_squashed_0002")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_book")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Alright, let's undo what we did. Should also just use squashed.
plan = executor.migration_plan([("migrations", None)])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_squashed_0002"], True),
],
)
executor.migrate([("migrations", None)])
# Are the tables gone?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
@override_settings(MIGRATION_MODULES={
"migrations": "migrations.test_migrations",
"migrations2": "migrations2.test_migrations_2",
})
def test_empty_plan(self):
"""
Tests that re-planning a full migration of a fully-migrated set doesn't
perform spurious unmigrations and remigrations.
There was previously a bug where the executor just always performed the
backwards plan for applied migrations - which even for the most recent
migration in an app, might include other, dependent apps, and these
were being unmigrated.
"""
# Make the initial plan, check it
executor = MigrationExecutor(connection)
plan = executor.migration_plan([
("migrations", "0002_second"),
("migrations2", "0001_initial"),
])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
(executor.loader.graph.nodes["migrations", "0002_second"], False),
(executor.loader.graph.nodes["migrations2", "0001_initial"], False),
],
)
# Fake-apply all migrations
executor.migrate([
("migrations", "0002_second"),
("migrations2", "0001_initial")
], fake=True)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Now plan a second time and make sure it's empty
plan = executor.migration_plan([
("migrations", "0002_second"),
("migrations2", "0001_initial"),
])
self.assertEqual(plan, [])
# Erase all the fake records
executor.recorder.record_unapplied("migrations2", "0001_initial")
executor.recorder.record_unapplied("migrations", "0002_second")
executor.recorder.record_unapplied("migrations", "0001_initial")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_soft_apply(self):
"""
Tests detection of initial migrations already having been applied.
"""
state = {"faked": None}
def fake_storer(phase, migration, fake):
state["faked"] = fake
executor = MigrationExecutor(connection, progress_callback=fake_storer)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Run it normally
self.assertEqual(
executor.migration_plan([("migrations", "0001_initial")]),
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
],
)
executor.migrate([("migrations", "0001_initial")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# We shouldn't have faked that one
self.assertEqual(state["faked"], False)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Fake-reverse that
executor.migrate([("migrations", None)], fake=True)
# Are the tables still there?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# Make sure that was faked
self.assertEqual(state["faked"], True)
# Finally, migrate forwards; this should fake-apply our initial migration
executor.loader.build_graph()
self.assertEqual(
executor.migration_plan([("migrations", "0001_initial")]),
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
],
)
executor.migrate([("migrations", "0001_initial")])
self.assertEqual(state["faked"], True)
# And migrate back to clean up the database
executor.loader.build_graph()
executor.migrate([("migrations", None)])
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_custom_user"},
AUTH_USER_MODEL="migrations.Author",
)
def test_custom_user(self):
"""
Regression test for #22325 - references to a custom user model defined in the
same app are not resolved correctly.
"""
executor = MigrationExecutor(connection)
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Migrate forwards
executor.migrate([("migrations", "0001_initial")])
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# And migrate back to clean up the database
executor.loader.build_graph()
executor.migrate([("migrations", None)])
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
|
|
# ===============================================================================
# Copyright 2016 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= standard library imports ========================
from datetime import datetime, timedelta
from numpy import array
from traits.api import File
from pychron.data_mapper.sources.file_source import (
FileSource,
get_float,
get_int,
get_next,
get_ufloat,
)
from pychron.processing.isotope import Isotope, Baseline
from pychron.processing.isotope_group import IsotopeGroup
from pychron.pychron_constants import INTERFERENCE_KEYS
# ============= local library imports ==========================
def make_ed(s):
k = s[3]
ed = ""
if k == "L":
ed = "CO2"
elif k == "F":
ed = "Furnace"
return ed
class USGSVSCSource(FileSource):
_delimiter = "\t"
irradiation_path = File
class USGSVSCNuSource(USGSVSCSource):
pass
class USGSVSCMAPSource(USGSVSCSource):
# def _path_default(self):
# return '/Users/ross/Programming/github/pychron_dev/pychron/data_mapper/tests/data/16Z0071/16K0071A.TXT'
#
# def _irradiation_path_default(self):
# return '/Users/ross/Programming/github/pychron_dev/pychron/data_mapper/tests/data/IRR330.txt'
#
# def _directory_default(self):
# return '/Users/ross/Downloads/MAPdataToJake/Unknown/16Z0071'
def get_irradiation_import_spec(self, *args, **kw):
from pychron.data_mapper.import_spec import (
ImportSpec,
Irradiation,
Level,
Position,
Production,
)
spec = ImportSpec()
delimiter = "\t"
with open(self.irradiation_path, "r") as rfile:
i = Irradiation()
i.name = next(rfile).strip()
spec.irradiation = i
note = next(rfile)
_, nsteps = next(rfile).split(delimiter)
doses = []
for _ in range(int(nsteps)):
duration, start, power = next(rfile).split(delimiter)
sd = datetime.fromtimestamp(float(start))
sd = sd.replace(year=sd.year - 66)
ed = sd + timedelta(hours=float(duration))
dose = (float(power), sd, ed)
doses.append(dose)
i.doses = doses
level = Level()
level.name = "A"
nlevels = [level]
prod = Production()
prod.name = i.name
for line in rfile:
name, v, e = line.split(delimiter)
name = name.replace("/", "")
for attr in INTERFERENCE_KEYS:
if name in (attr[1:], attr[2:]):
setattr(prod, attr, (float(v), float(e)))
level.production = prod
pp = Position()
pp.position = 0
pp.identifier = i.name
poss = [pp]
level.positions = poss
i.levels = nlevels
return spec
def get_analysis_import_spec(self, delimiter=None):
pspec = self.new_persistence_spec()
rspec = pspec.run_spec
f = self.file_gen(delimiter)
row = next(f)
rspec.identifier = row[0][:-1]
rspec.aliquot = 1
rspec.step = row[0][-1]
rspec.extract_device = make_ed(row[0])
rspec.irradiation = row[1]
rspec.irradiation_position = get_int(f, 1)
rspec.irradiation_level = "A"
for attr in ("sample", "material", "project"):
setattr(rspec, attr, get_next(f, 1))
for attr in ("j", "j_err"):
setattr(pspec, attr, get_float(f, 1))
d = get_next(f, 1)
t = get_next(f, 1)
pspec.timestamp = datetime.strptime("{} {}".format(d, t), "%m/%d/%Y %H:%M:%S")
abundance_sens = get_float(f)
abundance_sens_err = get_float(f)
air = get_float(f)
disc = 295.5 / air
pspec.discrimination = disc
row = next(f) # MD errpr
row = next(f) # peakhop cycles
n40 = get_int(f)
n39 = get_int(f)
n38 = get_int(f)
n37 = get_int(f)
n36 = get_int(f)
n41 = get_int(f)
n355 = get_int(f)
_spare = next(f)
int40 = next(f)
int39 = next(f)
int38 = next(f)
int37 = next(f)
int36 = next(f)
int41 = next(f)
int355 = next(f)
bk40 = get_ufloat(f)
bk39 = get_ufloat(f)
bk38 = get_ufloat(f)
bk37 = get_ufloat(f)
bk36 = get_ufloat(f)
bk41 = get_ufloat(f)
bk40 += get_ufloat(f)
bk39 += get_ufloat(f)
bk38 += get_ufloat(f)
bk37 += get_ufloat(f)
bk36 += get_ufloat(f)
bk41 += get_ufloat(f)
bk40 += get_ufloat(f)
bk39 += get_ufloat(f)
bk38 += get_ufloat(f)
bk37 += get_ufloat(f)
bk36 += get_ufloat(f)
bk41 += get_ufloat(f)
isotopes = {
"Ar40": self._get_isotope(f, "Ar40", n40, bk40),
"Ar39": self._get_isotope(f, "Ar39", n39, bk39),
"Ar38": self._get_isotope(f, "Ar38", n38, bk38),
"Ar37": self._get_isotope(f, "Ar37", n37, bk37),
"Ar36": self._get_isotope(f, "Ar36", n36, bk36),
"Ar41": self._get_isotope(f, "Ar41", n41, bk41),
}
xs, ys = self._get_baseline(f, n355)
for iso in isotopes.values():
bs = Baseline(iso.name, iso.detector)
bs.set_fit("average")
bs.set_fit_error_type("SEM")
bs.xs = xs
bs.ys = ys
iso.baseline = bs
try:
next(f)
self.warning("Extra data in file")
except StopIteration:
pass
pspec.isotope_group = IsotopeGroup(isotopes=isotopes)
return pspec
def _get_baseline(self, f, ncnts):
rs = (next(f) for i in range(ncnts))
ys, xs = list(zip(*((float(r[0]), float(r[1])) for r in rs)))
return array(xs), array(ys)
def _get_isotope(self, f, name, ncnts, bk):
iso = Isotope(name, "Detector1")
iso.set_ublank(bk)
iso.name = name
iso.set_fit("linear")
iso.set_fit_error_type("SEM")
rs = (next(f) for i in range(ncnts))
ys, xs = list(zip(*((float(r[0]), float(r[1])) for r in rs)))
iso.xs = array(xs)
iso.ys = array(ys)
return iso
# ============= EOF =============================================
|
|
# This file is part of MANTIS OS, Operating System
# See http://mantis.cs.colorado.edu/
#
# Copyright (C) 2003-2005 University of Colorado, Boulder
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the mos license (see file LICENSE)
import wx, thread
import net_model
class node_view:
def __init__(self, model, color = 'BLUE'):
self.node_radius = 10 # Radius of a node
self.node_color = 'GREEN' # TODO not currently used
self.node_outline = 'BLACK' # TODO not currently used
# Setting this flag prevents drawing this node and links while dragging
self.dragging = False
self.model = model
# Now setup the node's bitmap so we can just blit to the screen
# rather than having to re-draw every time.
#self.bmp = wx.EmptyBitmap(2 * self.node_radius + 4, 2 * self.node_radius + 4)
self.bmp = wx.EmptyBitmap(2 * self.node_radius, 3 * self.node_radius)
self.Update()
def HitTest(self, point):
rect = self.GetRect()
return rect.InsideXY(point.x, point.y)
def GetRect(self):
x, y = self.model.GetPosition()
return wx.Rect(x-self.node_radius, y-self.node_radius,
self.bmp.GetWidth(), self.bmp.GetHeight())
def Erase(self, dc):
if self.dragging:
return
dc.SetBrush(wx.Brush("WHITE"))
dc.SetPen(wx.Pen("WHITE"))
x, y = self.model.GetPosition()
#dc.DrawRectangle(x-self.node_radius, y-self.node_radius,
# self.node_radius * 2 + 4, self.node_radius * 2 + 4)
dc.DrawRectangle(x-self.node_radius, y-self.node_radius,
2 * self.node_radius, 3 * self.node_radius)
def Draw(self, dc, op = wx.COPY):
if self.dragging:
return True
if self.bmp.Ok():
memDC = wx.MemoryDC()
memDC.SelectObject(self.bmp)
x, y = self.model.GetPosition()
dc.Blit(x-self.node_radius, y-self.node_radius,
self.bmp.GetWidth(), self.bmp.GetHeight(),
memDC, 0, 0, op, True)
return True
else:
return False
def Update(self):
#self.led = state
# create a DC for drawing in to the bitmap memory
bdc = wx.MemoryDC();
bdc.SelectObject(self.bmp);
# First clear the background
#bdc.SetBrush(wx.Brush("WHITE"))
#bdc.SetPen(wx.Pen("WHITE"))
#bdc.DrawRectangle(0, 0, self.node_radius * 2 + 4, self.node_radius * 2 + 4)
# Now draw our default node
#bdc.SetBrush(wx.Brush(self.node_color))
#if self.model.GetLedState() == 1:
# bdc.SetPen(wx.Pen(self.node_outline, 4))
#else:
# bdc.SetPen(wx.Pen("RED", 4))
#bdc.DrawEllipse(0, 0, self.node_radius * 2, self.node_radius * 2)
bdc.SetBrush(wx.Brush("DARKGREEN"))
bdc.SetPen(wx.Pen("DARKGREEN"))
bdc.DrawRectangle(0, 0, 2 * self.node_radius, 3 * self.node_radius)
# Now draw the led line
if self.model.led & 1:
bdc.SetBrush(wx.Brush("YELLOW"))
bdc.SetPen(wx.Pen("YELLOW"))
bdc.DrawRectangle(0, 16, self.node_radius*3/2, 8)
if self.model.led & 2: # green
bdc.SetBrush(wx.Brush("GREEN"))
bdc.SetPen(wx.Pen("GREEN"))
bdc.DrawRectangle(0, 8, self.node_radius*3/2, 8)
if self.model.led & 4: # red
bdc.SetBrush(wx.Brush("RED"))
bdc.SetPen(wx.Pen("RED"))
bdc.DrawRectangle(0, 0, self.node_radius*3/2, 8)
# must disconnect the bitmap from the dc so we can use it later
bdc.SelectObject(wx.NullBitmap);
# Create a mask so that we only blit the colored part
#if "__WXGTK__" not in wx.PlatformInfo:
#mask = wx.Mask(self.bmp, wx.WHITE)
mask = wx.Mask(self.bmp)
mask.colour = wx.WHITE
self.bmp.SetMask(mask)
def __str__(self):
return 'node_view:'+str(self.model.id)
class link_view:
def __init__(self, src, dst):
self.src = src
self.dst = dst
self.flashcount = 0
def Erase(self, dc):
if self.src.dragging or self.dst.dragging:
return
pen = wx.Pen("WHITE")
pen.SetWidth(4)
dc.SetPen(pen)
dc.DrawLine(self.src.model.pos[0], self.src.model.pos[1], self.dst.model.pos[0], self.dst.model.pos[1])
def Draw(self, dc, op = wx.COPY):
if self.src.dragging or self.dst.dragging:
return
if self.flashcount:
pen = wx.Pen("GOLD")
else:
pen = wx.Pen("BLUE")
pen.SetWidth(4)
dc.SetPen(pen)
dc.DrawLine(self.src.model.pos[0], self.src.model.pos[1], self.dst.model.pos[0], self.dst.model.pos[1])
class event_queue:
"Queue for storing net events and their callbacks. See net_view.DispatchEvent()."
def __init__(self):
self.lock = thread.allocate_lock()
self.list = []
def put(self, obj):
"Add an object to the queue atomically."
self.lock.acquire()
self.list.append(obj)
self.lock.release()
def get(self):
"Return the entire queue as a list and clear the queue atomically."
self.lock.acquire()
list = self.list
self.list = []
self.lock.release()
return list
class net_view(wx.ScrolledWindow):
"This component does the drawing of the network model."
def __init__(self, parent, id, model):
wx.ScrolledWindow.__init__(self, parent, id, style=wx.NO_FULL_REPAINT_ON_RESIZE)
self.model = model
self.node_dict = {}
self.link_dict = {}
self.node_size = 25
self.dragNode = None
self.dragImage = None
self.queue = event_queue()
self.SetBackgroundColour("WHITE")
self.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
# Mouse buttons and motion
wx.EVT_LEFT_DOWN(self, self.OnLeftDown)
wx.EVT_LEFT_UP(self, self.OnLeftUp)
wx.EVT_MOTION(self, self.OnMotion)
wx.EVT_PAINT(self, self.OnPaint)
wx.EVT_IDLE(self, self.OnIdle)
self.SetMode("Select")
# Register network events callback DispatchEvent.
# See net_view.DispatchEvent() for details.
model.Bind(net_model.ADD_NODE, self.DispatchEvent, self.add_node)
model.Bind(net_model.REMOVE_NODE, self.DispatchEvent, self.del_node)
model.Bind(net_model.ADD_LINK, self.DispatchEvent, self.add_radio_link)
model.Bind(net_model.REMOVE_LINK, self.DispatchEvent, self.del_radio_link)
model.Bind(net_model.NET_CHANGED, self.DispatchEvent, self.new_network)
model.Bind(net_model.FORWARD_PACKET, self.DispatchEvent, self.forward_radio_packet)
def DispatchEvent(self, callback, *args):
""""Queue a net event to be handled on the GUI thread.
Many wxPython functions do not work when invoked from a thread other
than the main GUI thread. This is a problem for network events, because
they occur during the listen thread that was spawned by simClient.py.
The solution is to register a meta-callback, this method, with the
network model. When DispatchEvent is invoked by the network model,
it puts the original GUI callback, along with the arguments,
on self.queue and then calls wx.WakeUpIdle(). This causes OnIdle to be
invoked on the main GUI thread, which in turn invokes every callback
that is on the queue, and these callbacks can invoke wxPython functions
without fear of being on the wrong thread. This greatly simplifies the
implementation of the callbacks (trust me)."""
self.queue.put((callback, args))
# Cause an idle event to occur, which will invoke our idle handler.
wx.WakeUpIdle()
def FindNode(self, point):
"Return the node that contains the point."
for n in self.node_dict.itervalues():
if n.HitTest(point):
return n
return None
def OnLeftDown(self, evt):
node = self.FindNode(evt.GetPosition())
if node:
self.dragNode = node
self.dragStartPos = evt.GetPosition()
def OnLeftUp(self, evt):
if not self.dragImage or not self.dragNode:
self.dragImage = None
self.dragNode = None
return
# Hide the image, end dragging, and nuke out the drag image.
self.dragImage.Hide()
self.dragImage.EndDrag()
self.dragImage = None
dc = wx.ClientDC(self)
# reposition and draw the shape
self.dragNode.model.pos = (
self.dragNode.model.pos[0] + evt.GetPosition()[0] - self.dragStartPos[0],
self.dragNode.model.pos[1] + evt.GetPosition()[1] - self.dragStartPos[1]
)
self.dragNode.dragging = False
self.dragNode.Draw(dc)
# Update the network model.
self.model.MoveNode(self.dragNode.model.id, self.dragNode.model.pos[0], self.dragNode.model.pos[1])
self.dragNode = None
def OnRightDown(self, event):
pass
def OnRightUp(self, event):
pass
def OnMotion(self, evt):
# Ignore mouse movement if we're not dragging.
if not self.dragNode or not evt.Dragging() or not evt.LeftIsDown():
return
# if we have a node, but haven't started dragging yet
if self.dragNode and not self.dragImage:
# only start the drag after having moved a couple pixels
tolerance = 2
pt = evt.GetPosition()
dx = abs(pt.x - self.dragStartPos.x)
dy = abs(pt.y - self.dragStartPos.y)
if dx <= tolerance and dy <= tolerance:
return
# Create a DragImage to draw this node while it is moving
# (The drag image will update even as the bitmap is updating. Magical!)
self.dragImage = wx.DragImage(self.dragNode.bmp,
wx.StockCursor(wx.CURSOR_HAND))
hotspot = self.dragStartPos - self.dragNode.model.pos + [self.dragNode.node_radius, self.dragNode.node_radius]
self.dragImage.BeginDrag(hotspot, self, False)
self.dragImage.Move(pt)
# erase the node since it will be drawn by the DragImage now
dc = wx.ClientDC(self)
for link in self.dragNode.model.incoming.itervalues():
if link not in self.link_dict: continue
l = self.link_dict[link]
l.Erase(dc)
l.src.Draw(dc)
for link in self.dragNode.model.outgoing.itervalues():
if link not in self.link_dict: continue
l = self.link_dict[link]
l.Erase(dc)
l.dst.Draw(dc)
self.dragNode.Erase(dc)
self.dragNode.dragging = True
self.dragImage.Show()
# if we have node and image then move it
elif self.dragNode and self.dragImage:
self.dragImage.Move(evt.GetPosition())
def OnSize(self, event):
pass
def OnIdle(self, event):
"""Handle queued network events. See net_view.DispatchEvent()."""
for callback, args in self.queue.get():
callback(*args)
def OnPaint(self, event):
""" Window expose events come here to refresh. """
dc = wx.PaintDC(self)
self.Draw(dc)
def Draw(self, dc):
dc.BeginDrawing() # for Windows compatibility
# Since we are a scrolling window we need to prepare the DC
self.PrepareDC(dc)
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
for link in self.link_dict.itervalues():
link.Draw(dc)
for node in self.node_dict.itervalues():
node.Draw(dc)
dc.EndDrawing()
def SetMode(self, mode):
self.mode = mode
if self.mode == "Select":
self.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
else:
self.SetCursor(wx.StockCursor(wx.STANDARD_CURSOR))
# TODO do something about this color parm
def add_node(self, nodemodel, color = 'BLUE'):
n = node_view(nodemodel, color)
self.node_dict[nodemodel] = n
nodemodel.Bind(net_model.LED_CHANGED, self.DispatchEvent, self.node_state_changed)
n.Update()
dc = wx.ClientDC(self)
n.Draw(dc)
def del_node(self, node):
if self.node_dict.has_key(node):
dc = wx.ClientDC(self)
self.node_dict[node].Erase(dc)
del self.node_dict[node]
def node_state_changed(self, node):
if self.node_dict.has_key(node):
n = self.node_dict[node]
n.Update()
dc = wx.ClientDC(self)
n.Draw(dc)
def add_radio_link(self, link):
if self.node_dict.has_key(link.src) and self.node_dict.has_key(link.dst):
src = self.node_dict[link.src]
dst = self.node_dict[link.dst]
l = link_view(src, dst)
self.link_dict[link] = l
dc = wx.ClientDC(self)
l.Draw(dc)
l.src.Draw(dc)
l.dst.Draw(dc)
def del_radio_link(self, link):
if self.link_dict.has_key(link):
l = self.link_dict[link]
dc = wx.ClientDC(self)
l.Erase(dc)
l.src.Draw(dc)
l.dst.Draw(dc)
del self.link_dict[link]
def new_network(self, model):
self.node_dict.clear()
self.link_dict.clear()
self.dragNode = None
self.dragImage = None
dummy = self.queue.get() # empties the list
for nodemodel in model.IterNodes():
n = node_view(nodemodel, 'BLUE')
self.node_dict[nodemodel] = n
nodemodel.Bind(net_model.LED_CHANGED, self.DispatchEvent, self.node_state_changed)
n.Update()
for link in model.IterLinks():
l = link_view(self.node_dict[link.src], self.node_dict[link.dst])
self.link_dict[link] = l
dc = wx.ClientDC(self)
self.Draw(dc)
def forward_radio_packet(self, link):
if link in self.link_dict:
l = self.link_dict[link]
l.flashcount += 1
# Return the link to its original color after a delay.
wx.FutureCall(500, self.flash_link_off, l, link)
dc = wx.ClientDC(self)
l.Draw(dc)
l.src.Draw(dc)
l.dst.Draw(dc)
def flash_link_off(self, link, linkmodel):
# make sure this link hasn't been deleted
if linkmodel in self.link_dict:
link.flashcount -= 1
dc = wx.ClientDC(self)
link.Draw(dc)
link.src.Draw(dc)
link.dst.Draw(dc)
|
|
#!/usr/bin/env python
# This code will check the folder and some subfolder assigned by the user to see if the mp-*** listed in a text file are available or not
# If available, the selected property at selected concentration and temperature will be reported
# If not, N.A would be typed in front of the list
# By: Alireza Faghaninia
#!/usr/bin/env python
import argparse
import os
scripts_path = '~/dekode/'
os.system('cp ' + scripts_path + 'run_aMoBT.py .')
from run_aMoBT import find_reference, get_me_dielectrics
os.system("cp " + scripts_path + "calc_def_potential.py .")
from calc_def_potential import calc_def_potential
os.system('cp ' + scripts_path + 'find_DOS_peaks.py .')
from find_DOS_peaks import find_DOS_peaks
def find_properties(filename, n, T): #find the transport properties at a given n and T from aMoBT output
count = 0
mobility = 0
conductivity = 0
thermopower = 0
at_correct_n = False
if os.path.exists(filename):
with open(filename) as aMoBT_output:
for row in aMoBT_output:
line = row.split()
if at_correct_n:
if float(line[0]) == T:
mobility = float(line[1])
conductivity = float(line[2])
thermopower = float(line[3])
at_correct_n = False
if len(line) > 3:
if 'Carrier' in line[0] and float(line[3]) == n:
at_correct_n = True
return mobility, conductivity, thermopower
def find_effective_mass(filename):
m_e = 0.0000
m_h = 0.0000
if os.path.exists(filename):
with open(filename) as aMoBT_output:
for row in aMoBT_output:
line = row.split()
if len(line) >= 2:
if line[0] == '***Proceed':
if 'm*_e/m' in line[7]:
m_e = float(line[9])
if 'm*_h/m' in line[7]:
m_h = float(line[9])
return m_e, m_h
def total_time(job_list): # job_list is a list of string of jobs in the subfolders, i.e. geom, self, phonon, etc
t_total = 0.0
for job in job_list:
if os.path.exists(job):
os.chdir(job)
if os.path.exists('OUTCAR'):
with open('OUTCAR', 'r') as outcar:
for line in outcar:
if 'Elapsed time (sec):' in line:
l = line.split()
t_total += float(l[3])
if 'deform' in job:
for j in ['-10', '-9', '-8', '-7', '-6', '-5', '-4', '-3', '-2', '-1', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10']:
if os.path.exists('part' + j):
os.chdir('part' + j)
if os.path.exists('OUTCAR'):
with open('OUTCAR', 'r') as outcar:
for line in outcar:
if 'Elapsed time (sec):' in line:
l = line.split()
t_total += float(l[3])
os.chdir('../')
os.chdir('../')
return(t_total)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--filename", help="The filename that contains the list, default=checklist", required=False, default="checklist")
parser.add_argument("-n", "--n", help="Carrier concentration", required=False, default=1e20)
parser.add_argument("-d", "--detail", help="How much detail in data, more or less", required=False, default="less")
parser.add_argument("-dir", "--dir", help="The folder name under which the AMSET calculations are, default = None", required=False, default=None)
parser.add_argument("-T", "--T", help="Temperature(K)", required=False, default=300)
parser.add_argument("-fr", "--free_e", help="To use free-electron density of states or not (true or false)", required=False, default="true")
parser.add_argument("-fo", "--formula", help="Whether to print formulas or not (options: T or F)", required=False, default=False)
args = parser.parse_args()
if args.formula in ['T', 't', 'true', 'True', 'TRUE']:
args.formula = True
else:
print('\nYou can include the formula of each material in status.txt by using --fo t\n')
print("You can get more data (e.g. dielectric constants) by using -d more option\n")
folders = ['./', 'new_TEs/', 'new_TCOs/']
if args.dir:
n_type_folder = os.path.join(args.dir, '1_n-type_aMoBT_free-e=' + args.free_e)
p_type_folder = os.path.join(args.dir, '2_p-type_aMoBT_free-e=' + args.free_e)
else:
n_type_folder = '1_n-type_aMoBT_free-e=' + args.free_e
p_type_folder = '2_p-type_aMoBT_free-e=' + args.free_e
clist = []
swd = os.getcwd()
with open(args.filename,'r') as raw_list:
for line in raw_list:
if len(line)>1:
supposed_id = line.split()[0]
# if ('mp-' in supposed_id) or ('mvc-' in supposed_id):
clist.append(line.split()[0])
stat = open('status.txt', 'w')
rem = open('remaining.txt', 'w')
stat.write('n={} T={}\n'.format(str(args.n), str(args.T)))
rem.write('n={} T={}\n'.format(str(args.n), str(args.T)))
if args.detail == "less":
stat.write('%30s%12s%12s%12s %10s%7s%10s%10s%10s%7s%9s%9s\n' % ('location_of_mp-id_if-any', 'formula', 'mu-cm2/V.s', 'sigma-S/cm', 'S-uV/K', 'PF', 'p_mu', 'p_sigma', 'p_S', 'p_PF', 'm_e', 'm_h'))
elif args.detail == "more":
stat.write('%30s%12s%12s%12s %10s%7s%10s%10s%10s%7s%9s%9s%8s%8s%7s%7s%6s%6s\n' % ('location_of_mp-id_if-any', 'formula', 'mu-cm2/V.s', 'sigma-S/cm', \
'S-uV/K', 'PF', 'p_mu', 'p_sigma', 'p_S', 'p_PF', 'm_e', 'm_h',\
'omegaLO', 'omegaTO', 'eps0', 'epsinf', 'nEdef', 'pEdef'))
for c in clist:
if ":" in c:
stat.write(c + "\n")
continue
formula = c
if args.formula:
try:
apikey = 'fDJKEZpxSyvsXdCt'
from pymatgen.matproj.rest import MPRester
matproj = MPRester(apikey)
formula = matproj.get_data(c, prop="pretty_formula")[0]["pretty_formula"]
spacegroup = matproj.get_data(c, prop="spacegroup")[0]["spacegroup"]
except:
formula = 'API-failed'
proceed = False
for subf in folders:
if os.path.exists(subf + c):
proceed = True
c_path = subf + c
if proceed:
os.chdir(c_path)
if args.detail == "more":
os.chdir('nself')
val_kpoint, con_kpoint, eval, econ, core = find_reference(scripts_path)
os.chdir('../')
try:
LO_phonon, TO_phonon = find_DOS_peaks('phonon/total_dos.dat')
try:
static_dielectric, highf_dielectric = get_me_dielectrics('./dielectric/OUTCAR', LO_phonon, TO_phonon)
except:
static_dielectric = highf_dielectric = 0
except:
LO_phonon= TO_phonon = static_dielectric = highf_dielectric = 0
try:
E_deformation_p, E_deformation_n = calc_def_potential('deform/ENERGY_INFO.txt')
except:
E_deformation_p = E_deformation_n = 0
#t_tot = total_time(['geom', 'self', 'nself', 'nself_aMoBT', 'p_nself_aMoBT', 'dielectric', 'phonon', 'deform'])
mobility_n, conductivity_n, thermopower_n = find_properties(n_type_folder + '/aMoBT_output.txt', float(args.n), float(args.T))
mobility_p, conductivity_p, thermopower_p = find_properties(p_type_folder + '/aMoBT_output.txt', float(args.n), float(args.T))
m_e, m_h_dummy = find_effective_mass(n_type_folder + '/log.out')
m_e_dummy, m_h = find_effective_mass(p_type_folder + '/log.out')
os.chdir(swd)
if mobility_n > 10000:
mobility_n = 10000
if abs(mobility_p) > 10000:
mobility_p = 10000
if abs(thermopower_n) > 10000:
thermopower_n = 10000
if abs(thermopower_p) > 10000:
thermopower_p = 10000
if abs(mobility_n) >= 10000 or abs(mobility_p) >= 10000 or abs(thermopower_n) >= 10000 or abs(thermopower_p) >= 10000:
mobility_n = "N/A"
mobility_p = "N/A"
thermopower_n = "N/A"
thermopower_p = "N/A"
if args.detail == "less":
try:
stat.write('%30s,%12s,%12.2f,%12.2f ,%10.2f,%7.2f,%10.2f,%10.2f,%10.2f,%7.2f,%9.4f,%9.4f\n' % (c_path.split("/")[-1], formula, mobility_n, conductivity_n, thermopower_n, conductivity_n*thermopower_n**2/1e6, mobility_p, conductivity_p, thermopower_p, conductivity_p*thermopower_p**2/1e6, m_e, m_h))
except:
stat.write('%30s,%12s,%12s,%12s ,%10s,%7s,%10s,%10s,%10s,%7s,%9.4f,%9.4f\n' % (c_path.split("/")[-1], formula, mobility_n, "N/A", thermopower_n, "N/A", mobility_p,"N/A", thermopower_p, "N/A", m_e, m_h))
elif args.detail == "more":
stat.write('%30s,%12s,%12.2f,%12.2f ,%10.2f,%7.2f,%10.2f,%10.2f,%10.2f,%7.2f,%9.4f,%9.4f,%8.2f,%8.2f,%7.2f,%7.2f,%6.2f,%6.2f\n' %\
(c_path, formula, mobility_n, conductivity_n, thermopower_n, conductivity_n*thermopower_n**2/1e6,
mobility_p, conductivity_p, thermopower_p, conductivity_p*thermopower_p**2/1e6, m_e, m_h,
LO_phonon, TO_phonon, static_dielectric, highf_dielectric, E_deformation_n, E_deformation_p))
else:
stat.write('%30s,%12s\n' % (c, 'N/A'))
# mpstart = c.find('mp-')
# rem.write(c[mpstart:] + '\n')
rem.write(c + '\n')
stat.close()
rem.close()
print('\nDONE! see status.txt and remaining.txt')
print('Number of entries {0}'.format(len(clist)))
print('Number of unique IDs {0}'.format(len(set(clist))))
|
|
#!/usr/bin/env python
"""
A language-agnostic configuration parser.
Currently supports YAML, JSON, INI and TOML serialization formats.
"""
import collections
import contextlib
import functools
import inspect
import json
import logging
import multiprocessing
import os
import re
import sys
import threading
import warnings
try:
import configparser # py3
except ImportError:
import ConfigParser as configparser
__all__ = [
# constants
"version_info", "__version__",
# functions
'register', 'parse', 'parse_with_envvars', 'discard', 'schema',
'get_parsed_conf',
# validators
'isemail', 'isin', 'isnotin', 'istrue', 'isurl', 'isip46', 'isip4',
'isip6',
# exceptions
'Error', 'ValidationError', 'AlreadyParsedError', 'NotParsedError',
'RequiredSettingKeyError', 'TypesMismatchError', 'AlreadyRegisteredError',
'UnrecognizedSettingKeyError',
]
__version__ = '0.2.2'
__author__ = 'Giampaolo Rodola'
__license__ = 'MIT'
version_info = tuple([int(num) for num in __version__.split('.')])
_PY3 = sys.version_info >= (3, )
# TODO: these are currently treated as case-insensitive; instead we should
# do "True", "TRUE" etc and ignore "TrUe".
_STR_BOOL_TRUE = set(("1", "yes", "true", "on"))
_STR_BOOL_FALSE = set(("0", "no", "false", "off"))
_EMAIL_RE = re.compile(r"^.+@.+\..+$")
# http://stackoverflow.com/a/7995979/376587
_URL_RE = re.compile(
r'^https?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|' # domain
r'localhost|' # localhost
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # or IPv4
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
_DEFAULT = object()
_threading_lock = threading.Lock()
_multiprocessing_lock = multiprocessing.Lock()
_conf_map = {}
_parsed = False
logger = logging.getLogger(__name__)
if _PY3:
basestring = str
unicode = str
# =============================================================================
# exceptions
# =============================================================================
class Error(Exception):
"""Base exception class from which derive all others."""
def __repr__(self):
return self.__str__()
class ValidationError(Error):
"""Raised when validation through schema(validator=callable)
doesn't pass (callable return False).
This can be used within your validator in order to throw custom
error messages.
"""
def __init__(self, msg=None):
self.msg = msg
# these are set later in parse()
self.section = None
self.key = None
self.value = None
def __str__(self):
key = "'%s.%s'" % (self.section, self.key) if self.section else \
repr(self.key)
msg = "%s setting key with value %r didn't pass validation" % (
key, self.value)
if self.msg:
msg += "; %s" % self.msg
return msg
class AlreadyParsedError(Error):
"""Raised when parse() or parse_with_envvars() is called twice."""
def __str__(self):
return 'configuration was already parsed once; you may want to use ' \
'discard() and parse() again'
class AlreadyRegisteredError(Error):
"""Raised by @register when registering the same section twice."""
def __init__(self, section):
self.section = section
def __str__(self):
return "a configuration class was already registered for " \
"section %r" % self.section
class NotParsedError(Error):
"""Raised when get_parsed_conf() is called but parse() has not been
called yet.
"""
def __str__(self):
return 'configuration is not parsed yet; use parse() first'
# --- exceptions raised on parse()
class UnrecognizedSettingKeyError(Error):
"""Raised on parse if the configuration file defines a setting key
which is not defined by the default configuration class.
"""
def __init__(self, section, key, new_value):
self.section = section
self.key = key
self.new_value = new_value
def __str__(self):
if not _has_multi_conf_classes() and _conf_map:
klass = _conf_map[None]
txt = "config class %s.%s" % (klass.__module__, klass.__name__)
else:
txt = "any of the config classes"
key = "%s.%s" % (self.section, self.key) if self.section else self.key
return ("config file provides setting key %r with value %r but "
"setting key %r is not defined in %s" % (
key, self.new_value, key, txt))
class RequiredSettingKeyError(Error):
"""Raised when the config file doesn't specify a setting key which
was required via schema(required=True).
"""
def __init__(self, section, key):
self.section = section
self.key = key
def __str__(self):
key = "%s.%s" % (self.section, self.key) if self.section else self.key
return "configuration class requires %r setting key to be specified " \
"via config file or environment variable" % (key)
class TypesMismatchError(Error):
"""Raised when config file overrides a setting key having a type
which is different than the original one defined in the
configuration class.
"""
def __init__(self, section, key, default_value, new_value):
self.section = section
self.key = key
self.default_value = default_value
self.new_value = new_value
def __str__(self):
key = "%s.%s" % (self.section, self.key) if self.section else self.key
return "type mismatch for setting key %r (default_value=%r, %s) got " \
"%r (%s)" % (key, self.default_value, type(self.default_value),
self.new_value, type(self.new_value))
# =============================================================================
# internal utils
# =============================================================================
def _log(s):
logger.debug(s)
def _has_multi_conf_classes():
"""Return True if more than one config class has been register()ed."""
return len(_conf_map) > 1
def _has_sectionless_conf(cmap=None):
if cmap is None:
cmap = _conf_map
return None in cmap
@contextlib.contextmanager
def _lock_ctx():
with _threading_lock:
with _multiprocessing_lock:
yield
def _isiter(obj):
if _PY3:
return isinstance(obj, collections.abc.Iterable)
else:
try:
iter(obj)
except TypeError:
return False
return True
# =============================================================================
# validators
# =============================================================================
def istrue(value):
"""Assert value evaluates to True."""
if not bool(value):
raise ValidationError("bool(%r) evaluates to False" % value)
return True
def isin(seq):
"""Assert value is in a sequence."""
def wrapper(seq, value):
if value not in seq:
raise ValidationError(
"expected a value amongst %r, got %r" % (seq, value))
return True
if not _isiter(seq):
raise TypeError("%r is not iterable" % (seq))
if not seq:
raise ValueError("%r sequence can't be empty" % (seq))
return functools.partial(wrapper, seq)
def isnotin(seq):
"""Assert value is not in a sequence."""
def wrapper(seq, value):
if value in seq:
raise ValidationError(
"expected a value not in %r sequence, got %r" % (seq, value))
return True
if not _isiter(seq):
raise TypeError("%r is not iterable".format(seq))
if not seq:
raise ValueError("%r sequence can't be empty".format(seq))
return functools.partial(wrapper, seq)
def isemail(value):
"""Assert value is a valid email."""
if not isinstance(value, basestring):
raise ValidationError("expected a string, got %r" % value)
if re.match(_EMAIL_RE, value) is None:
raise ValidationError("not a valid email")
return True
def isurl(value):
"""Assert value is a valid url. This includes urls starting with
"http" and "https", IPv4 urls (e.g. "http://127.0.0.1") and
optional port (e.g. "http://localhost:8080").
"""
if not isinstance(value, basestring):
raise ValidationError("expected a string, got %r" % value)
if re.match(_URL_RE, value) is None:
raise ValidationError("not a valid URL")
return True
def isip46(value):
"""Assert value is a valid IPv4 or IPv6 address.
On Python < 3.3 requires ipaddress module to be installed.
"""
import ipaddress # requires "pip install ipaddress" on python < 3.3
if not isinstance(value, basestring):
raise ValidationError("expected a string, got %r" % value)
if not _PY3 and not isinstance(value, unicode):
value = unicode(value)
try:
if "/" in value:
raise ValueError
ipaddress.ip_address(value)
except ValueError:
raise ValidationError("not a valid IP address")
return True
def isip4(value):
"""Assert value is a valid IPv4 address."""
if not isinstance(value, basestring):
raise ValidationError("expected a string, got %r" % value)
octs = value.split('.')
try:
assert len(octs) == 4
for x in octs:
x = int(x)
assert x >= 0 and x <= 255
except (AssertionError, ValueError):
raise ValidationError("not a valid IPv4 address")
return True
def isip6(value):
"""Assert value is a valid IPv6 address.
On Python < 3.3 requires ipaddress module to be installed.
"""
import ipaddress # requires "pip install ipaddress" on python < 3.3
if not isinstance(value, basestring):
raise ValidationError("expected a string, got %r" % value)
if not _PY3 and not isinstance(value, unicode):
value = unicode(value)
try:
ipaddress.IPv6Address(value)
except ValueError:
raise ValidationError("not a valid IPv6 address")
return True
# =============================================================================
# parsers
# =============================================================================
def parse_yaml(file):
import yaml # requires pip install pyyaml
return yaml.load(file, Loader=yaml.FullLoader)
def parse_toml(file):
import toml # requires pip install toml
return toml.loads(file.read())
def parse_json(file):
content = file.read()
if not content.strip():
# empty JSON file; do not explode in order to be consistent with
# other formats (for now at least...)
return {}
return json.loads(content)
def parse_ini(file):
config = configparser.ConfigParser()
config.read(file.name)
ret = {}
for section, values in config._sections.items():
ret[section] = {}
for key, value in values.items():
ret[section][key] = value
ret[section].pop('__name__', None)
return ret
# =============================================================================
# rest of public API
# =============================================================================
class schema(collections.namedtuple('field',
['default', 'required', 'validator', 'type_check'])):
def __new__(cls, default=_DEFAULT, required=False, validator=None,
type_check=True):
if not required and default is _DEFAULT:
raise ValueError("specify a default value or set required=True")
if validator is not None:
if not _isiter(validator):
if not callable(validator):
raise TypeError("%r is not callable" % validator)
else:
for v in validator:
if not callable(v):
raise TypeError("%r is not callable" % v)
return super(schema, cls).__new__(
cls, default, required, validator, type_check)
def register(section=None):
"""A decorator which registers a configuration class which will
be parsed later.
If `section` is `None` it is assumed that the configuration file
will not be split in sub-sections otherwise *section* is the name
of a specific section which will be referenced by the config file.
All class attributes starting with an underscore will be ignored,
same for methods, classmethods or any other non-callable type.
A class decoratored with this method becomes dict()-able.
"""
class meta_wrapper(type):
def __iter__(self):
# this will make the class dict()able
for k, v in inspect.getmembers(self):
if not k.startswith('_') and not inspect.isroutine(v):
yield (k, v)
def __getitem__(self, key):
return getattr(self, key)
def __delitem__(self, key):
delattr(self, key)
def __contains__(self, key):
return hasattr(self, key)
def __len__(self):
return len(dict(self))
def add_metaclass(klass):
name = klass.__name__
bases = klass.__bases__
# is this really necessary?
skip = set(('__dict__', '__weakref__'))
dct = dict((k, v) for k, v in vars(klass).items() if k not in skip)
new_class = meta_wrapper(name, bases, dct)
return new_class
def wrapper(klass):
if not inspect.isclass(klass):
raise TypeError("register decorator is supposed to be used "
"against a class (got %r)" % klass)
_log("registering %s.%s" % (klass.__module__, klass.__name__))
with _lock_ctx():
new_class = add_metaclass(klass)
_conf_map[section] = new_class
return new_class
with _lock_ctx():
if section in _conf_map:
raise AlreadyRegisteredError(section)
if _parsed:
msg = "configuration class defined after parse(); global " \
"configuration will not reflect it and it will remain " \
"unparsed"
warnings.warn(msg, UserWarning)
return lambda klass: add_metaclass(klass)
if _has_sectionless_conf():
# There's a root section. Verify the new key does not
# override any of the keys in the root section.
root_conf_class = _conf_map.get(None)
if section in root_conf_class:
raise Error(
"attempting to register section %r when previously "
"registered root class %r already defines a section with "
"the same name" % (section, root_conf_class))
if section is not None and not isinstance(section, basestring):
raise TypeError("invalid section; expected either string or None, "
"got %r" % section)
if isinstance(section, basestring):
if " " in section or not section.strip():
raise ValueError("invalid section name %r" % section)
return wrapper
def get_parsed_conf():
"""Return the whole parsed configuration as a dict.
Raise NotParsedError if parse() hasn't been called yet it.
"""
with _lock_ctx():
if not _parsed:
raise NotParsedError
conf_map = _conf_map.copy()
ret = {}
# root section
if _has_sectionless_conf(conf_map):
conf_class = conf_map.pop(None)
ret = dict(conf_class)
# other sections
for section, conf_class in conf_map.items():
ret[section] = dict(conf_class)
return ret
class _Parser:
def __init__(self, conf_file=None, file_parser=None, type_check=True,
parse_envvars=False, envvar_case_sensitive=False):
"""Do all the work."""
global _parsed
if _parsed:
raise AlreadyParsedError
self.conf_file = conf_file
self.file_parser = file_parser
self.type_check = type_check
self.envvar_case_sensitive = envvar_case_sensitive
self.file_ext = None
self.new_conf = self.get_conf_from_file()
if parse_envvars:
self.update_conf_from_envvars()
self.process_conf(self.new_conf)
_parsed = True
def get_conf_from_file(self):
"""Parse config file (if any) and returns a dict representation
of it (can also be an empty dict).
"""
# no conf file
if self.conf_file is None:
_log("conf file not specified")
if self.file_parser is not None:
raise ValueError(
"can't specify 'file_parser' option and no 'conf_file'")
else:
return {}
# parse conf file
if isinstance(self.conf_file, basestring):
file = open(self.conf_file, 'r')
_log("using conf file %s" % (self.conf_file))
else:
file = self.conf_file
_log("using conf file-like object %s" % (self.conf_file))
with file:
pmap = {'.yaml': parse_yaml,
'.yml': parse_yaml,
'.toml': parse_toml,
'.json': parse_json,
'.ini': parse_ini}
if self.file_parser is None:
if not hasattr(file, 'name'):
raise Error("can't determine file format from a file "
"object with no 'name' attribute")
try:
self.file_ext = os.path.splitext(file.name)[1]
parser = pmap[self.file_ext]
except KeyError:
raise ValueError("don't know how to parse %r (extension "
"not supported)" % file.name)
if self.file_ext == '.ini' and _has_sectionless_conf():
raise Error("can't parse ini files if a sectionless "
"configuration class has been registered")
else:
parser = self.file_parser
return parser(file) or {}
def update_conf_from_envvars(self):
"""Iterate over all process env vars and return a dict() of
env vars whose name match they setting keys defined by conf
class.
"""
conf_map = _conf_map.copy()
env = os.environ.copy()
env_names = set([x for x in env.keys() if x.isupper()])
for section, conf_class in conf_map.items():
for key_name in dict(conf_class).keys():
check_name = (
key_name.upper() if not self.envvar_case_sensitive
else key_name)
if check_name in env_names:
default_value = getattr(conf_class, key_name)
raw_value = env[key_name.upper()]
new_value = self.cast_value(
section, key_name, default_value, raw_value)
if section is None:
self.new_conf[key_name] = new_value
else:
if section not in self.new_conf:
self.new_conf[section] = {}
self.new_conf[section][key_name] = new_value
def cast_value(self, section, key, default_value, new_value):
"""Cast a value depending on default value type."""
type_check = self.type_check # global opt
if isinstance(default_value, schema):
type_check = default_value.type_check # per-schema opt
default_value = default_value.default
if isinstance(default_value, bool):
if new_value.lower() in _STR_BOOL_TRUE:
new_value = True
elif new_value.lower() in _STR_BOOL_FALSE:
new_value = False
else:
if type_check:
raise TypesMismatchError(
section, key, default_value, new_value)
elif isinstance(default_value, int):
try:
new_value = int(new_value)
except ValueError:
if type_check:
raise TypesMismatchError(
section, key, default_value, new_value)
elif isinstance(default_value, float):
try:
new_value = float(new_value)
except ValueError:
if type_check:
raise TypesMismatchError(
section, key, default_value, new_value)
else:
# leave the new value unmodified (str)
pass
return new_value
def process_conf(self, new_conf):
conf_map = _conf_map.copy()
if not conf_map:
raise Error("no registered conf classes were found")
# iterate over file / envvar conf
for key, new_value in new_conf.items():
# this should never happen
assert key is not None, key
if key in conf_map:
# We're dealing with a section.
# Possibly we may have multiple regeister()ed conf classes.
# "new_value" in this case is actually a dict of sub-section
# items.
section = key
conf_class = conf_map[section]
# TODO: turn this into a proper error
assert isinstance(new_value, dict), new_value
# assert new_value, new_value
for k, nv in new_value.items():
self.process_pair(section, k, nv, conf_class)
else:
# We're not dealing with a section.
section = None
try:
conf_class = conf_map[None]
except KeyError:
raise UnrecognizedSettingKeyError(None, key, new_value)
self.process_pair(section, key, new_value, conf_class)
self.run_last_schemas()
def process_pair(self, section, key, new_value, conf_class):
"""Given a setting key / value pair extracted either from the
config file or env vars process it (validate it) and override
the config class original key value.
"""
try:
# The default value defined in the conf class.
default_value = getattr(conf_class, key)
except AttributeError:
# Conf file defines a key which does not exist in the
# conf class.
raise UnrecognizedSettingKeyError(section, key, new_value)
# Cast values for ini files (which only support string type).
if self.file_ext == '.ini':
new_value = self.cast_value(section, key, default_value, new_value)
# Look for type mismatch.
is_schema = isinstance(default_value, schema)
if not is_schema:
self.check_type(section, key, default_value, new_value)
else:
schema_ = default_value
if schema_.type_check:
self.check_type(section, key, schema_.default, new_value)
# Run validators.
if is_schema:
schema_ = default_value
if schema_.validator is not None:
self.run_validators(schema_, section, key, new_value)
# Finally replace key value.
sec_key = key if section is None else "%s.%s" % (section, key)
_log("overriding setting key %r (value=%r) to new value %r".format(
sec_key, default_value, new_value))
setattr(conf_class, key, new_value)
def check_type(self, section, key, default_value, new_value):
"""Raise TypesMismatchError if config file or env var wants to
override a setting key with a type which is different than the
original one defined in the config class.
"""
doit = (self.type_check and
default_value is not None and
new_value is not None)
if doit and type(new_value) != type(default_value):
if (not _PY3 and
isinstance(new_value, basestring) and
isinstance(default_value, basestring)):
# On Python 2 we don't want to make a distinction
# between str and unicode.
pass
else:
raise TypesMismatchError(
section, key, default_value, new_value)
@staticmethod
def run_validators(schema_, section, key, new_value):
"""Run schema validators and raise ValidationError on failure."""
validators = schema_.validator
if not _isiter(validators):
validators = [validators]
for validator in validators:
exc = None
sec_key = key if section is None else "%s.%s" % (section, key)
_log("running validator %r for key %r with value "
"%r".format(validator, sec_key, new_value))
try:
ok = validator(new_value)
except ValidationError as err:
exc = ValidationError(err.msg)
else:
if not ok:
exc = ValidationError()
if exc is not None:
exc.section = section
exc.key = key
exc.value = new_value
raise exc
@staticmethod
def run_last_schemas():
"""Iterate over configuration classes in order to collect all
schemas which were not overwritten by the config file.
"""
conf_map = _conf_map.copy()
for section, conf_class in conf_map.items():
for key, value in conf_class.__dict__.items():
if isinstance(value, schema):
schema_ = value
if schema_.required:
raise RequiredSettingKeyError(section, key)
if schema_.validator is not None:
_Parser.run_validators(schema_, section, key, value)
setattr(conf_class, key, value.default)
def parse(conf_file=None, file_parser=None, type_check=True):
"""Parse configuration class(es) replacing values if a
configuration file is provided.
Params:
- (str|file) conf_file: a path to a configuration file or an
existing file-like object or None.
If `None` configuration class will be parsed anyway in order
to validate `schema`s.
- (callable) file_parser: the function parsing the configuration
file and converting it to a dict. If `None` a default parser
will be picked up depending on the file extension.
You may want to override this either to support new file
extensions or types.
- (bool) type_check: when `True` raise `TypesMismatchError` in
case an option specified in the configuration file has a different
type than the one defined in the configuration class.
"""
with _lock_ctx():
_Parser(conf_file=conf_file, file_parser=file_parser,
type_check=type_check)
def parse_with_envvars(conf_file=None, file_parser=None, type_check=True,
case_sensitive=False):
"""Same as parse() but also takes environment variables into account.
It must be noted that env vars take precedence over the config file
(if specified).
Only upper cased environment variables are taken into account.
By default (case_sensitive=False) env var "FOO" will override a
setting key with the same name in a non case sensitive fashion
('foo', 'Foo', 'FOO', etc.).
Also "sections" are not supported so if multiple config classes
define a setting key "foo" all of them will be overwritten.
If `case_sensitive` is True then it is supposed that the config
class(es) define all upper cased setting keys.
"""
with _lock_ctx():
_Parser(conf_file=conf_file,
file_parser=file_parser,
type_check=type_check,
parse_envvars=True,
envvar_case_sensitive=case_sensitive)
def discard():
"""Discard previous configuration (if any)."""
global _parsed
with _lock_ctx():
_conf_map.clear()
_parsed = False
if not _PY3:
del num
|
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
from datetime import datetime
import click
import semantic_version
from platformio import util
from platformio.commands import PlatformioCLI
from platformio.compat import ci_strings_are_equal
from platformio.package.exception import ManifestException, MissingPackageManifestError
from platformio.package.lockfile import LockFile
from platformio.package.manager._download import PackageManagerDownloadMixin
from platformio.package.manager._install import PackageManagerInstallMixin
from platformio.package.manager._legacy import PackageManagerLegacyMixin
from platformio.package.manager._registry import PackageManageRegistryMixin
from platformio.package.manager._uninstall import PackageManagerUninstallMixin
from platformio.package.manager._update import PackageManagerUpdateMixin
from platformio.package.manifest.parser import ManifestParserFactory
from platformio.package.meta import (
PackageItem,
PackageMetaData,
PackageSpec,
PackageType,
)
from platformio.project.helpers import get_project_cache_dir
class BasePackageManager( # pylint: disable=too-many-public-methods,too-many-instance-attributes
PackageManagerDownloadMixin,
PackageManageRegistryMixin,
PackageManagerInstallMixin,
PackageManagerUninstallMixin,
PackageManagerUpdateMixin,
PackageManagerLegacyMixin,
):
_MEMORY_CACHE = {}
def __init__(self, pkg_type, package_dir):
self.pkg_type = pkg_type
self.package_dir = package_dir
self.log = self._setup_logger()
self._MEMORY_CACHE = {}
self._lockfile = None
self._download_dir = None
self._tmp_dir = None
self._registry_client = None
def __repr__(self):
return (
f"{self.__class__.__name__} <type={self.pkg_type} "
f"package_dir={self.package_dir}>"
)
def _setup_logger(self):
logger = logging.getLogger(str(self.__class__.__name__).replace("Package", " "))
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(name)s: %(message)s")
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
logger.handlers.clear()
logger.addHandler(sh)
return logger
def set_log_level(self, level):
self.log.setLevel(level)
def lock(self):
if self._lockfile:
return
self.ensure_dir_exists(os.path.dirname(self.package_dir))
self._lockfile = LockFile(self.package_dir)
self.ensure_dir_exists(self.package_dir)
self._lockfile.acquire()
def unlock(self):
if hasattr(self, "_lockfile") and self._lockfile:
self._lockfile.release()
self._lockfile = None
def __del__(self):
self.unlock()
def memcache_get(self, key, default=None):
return self._MEMORY_CACHE.get(key, default)
def memcache_set(self, key, value):
self._MEMORY_CACHE[key] = value
def memcache_reset(self):
self._MEMORY_CACHE.clear()
@staticmethod
def is_system_compatible(value):
if not value or "*" in value:
return True
return util.items_in_list(value, util.get_systype())
@staticmethod
def ensure_dir_exists(path):
if not os.path.isdir(path):
os.makedirs(path)
assert os.path.isdir(path)
return path
@staticmethod
def ensure_spec(spec):
return spec if isinstance(spec, PackageSpec) else PackageSpec(spec)
@property
def manifest_names(self):
raise NotImplementedError
def get_download_dir(self):
if not self._download_dir:
self._download_dir = self.ensure_dir_exists(
os.path.join(get_project_cache_dir(), "downloads")
)
return self._download_dir
def get_tmp_dir(self):
if not self._tmp_dir:
self._tmp_dir = self.ensure_dir_exists(
os.path.join(get_project_cache_dir(), "tmp")
)
return self._tmp_dir
def find_pkg_root(self, path, spec): # pylint: disable=unused-argument
if self.manifest_exists(path):
return path
for root, _, _ in os.walk(path):
if self.manifest_exists(root):
return root
raise MissingPackageManifestError(", ".join(self.manifest_names))
def get_manifest_path(self, pkg_dir):
if not os.path.isdir(pkg_dir):
return None
for name in self.manifest_names:
manifest_path = os.path.join(pkg_dir, name)
if os.path.isfile(manifest_path):
return manifest_path
return None
def manifest_exists(self, pkg_dir):
return self.get_manifest_path(pkg_dir)
def load_manifest(self, src):
path = src.path if isinstance(src, PackageItem) else src
cache_key = "load_manifest-%s" % path
result = self.memcache_get(cache_key)
if result:
return result
candidates = (
[os.path.join(path, name) for name in self.manifest_names]
if os.path.isdir(path)
else [path]
)
for item in candidates:
if not os.path.isfile(item):
continue
try:
result = ManifestParserFactory.new_from_file(item).as_dict()
self.memcache_set(cache_key, result)
return result
except ManifestException as e:
if not PlatformioCLI.in_silence():
self.log.warning(click.style(str(e), fg="yellow"))
raise MissingPackageManifestError(", ".join(self.manifest_names))
@staticmethod
def generate_rand_version():
return datetime.now().strftime("0.0.0+%Y%m%d%H%M%S")
def build_metadata(self, pkg_dir, spec, vcs_revision=None):
manifest = self.load_manifest(pkg_dir)
metadata = PackageMetaData(
type=self.pkg_type,
name=manifest.get("name"),
version=manifest.get("version"),
spec=spec,
)
if not metadata.name or spec.has_custom_name():
metadata.name = spec.name
if vcs_revision:
metadata.version = "%s+sha.%s" % (
metadata.version if metadata.version else "0.0.0",
vcs_revision,
)
if not metadata.version:
metadata.version = self.generate_rand_version()
return metadata
def get_installed(self):
if not os.path.isdir(self.package_dir):
return []
cache_key = "get_installed"
if self.memcache_get(cache_key):
return self.memcache_get(cache_key)
result = []
for name in sorted(os.listdir(self.package_dir)):
if name.startswith("_tmp_installing"): # legacy tmp folder
continue
pkg_dir = os.path.join(self.package_dir, name)
if not os.path.isdir(pkg_dir):
continue
pkg = PackageItem(pkg_dir)
if not pkg.metadata:
try:
spec = self.build_legacy_spec(pkg_dir)
pkg.metadata = self.build_metadata(pkg_dir, spec)
except MissingPackageManifestError:
pass
if not pkg.metadata:
continue
if self.pkg_type == PackageType.TOOL:
try:
if not self.is_system_compatible(
self.load_manifest(pkg).get("system")
):
continue
except MissingPackageManifestError:
pass
result.append(pkg)
self.memcache_set(cache_key, result)
return result
def get_package(self, spec):
if isinstance(spec, PackageItem):
return spec
spec = self.ensure_spec(spec)
best = None
for pkg in self.get_installed():
if not self.test_pkg_spec(pkg, spec):
continue
assert isinstance(pkg.metadata.version, semantic_version.Version)
if spec.requirements and pkg.metadata.version not in spec.requirements:
continue
if not best or (pkg.metadata.version > best.metadata.version):
best = pkg
return best
@staticmethod
def test_pkg_spec(pkg, spec):
# "id" mismatch
if spec.id and spec.id != pkg.metadata.spec.id:
return False
# external "URL" mismatch
if spec.external:
# local folder mismatch
if os.path.abspath(spec.url) == os.path.abspath(pkg.path) or (
spec.url.startswith("file://")
and os.path.abspath(pkg.path) == os.path.abspath(spec.url[7:])
):
return True
if spec.url != pkg.metadata.spec.url:
return False
# "owner" mismatch
elif spec.owner and not ci_strings_are_equal(
spec.owner, pkg.metadata.spec.owner
):
return False
# "name" mismatch
elif not spec.id and not ci_strings_are_equal(spec.name, pkg.metadata.name):
return False
return True
|
|
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [db] dan@danbrooks.net
#
# Diarc topology objects
#
# A Diarc topology consists of two types of objects - logical objects and graphical
# objects which visually represent logical objects.
#
# Logical objects: Graphical Objects:
# Vertex Block
# Edge Band(s)
# Connection, Source, Sink Snap(s)
#
#
# TODO:
# - Document how to create a topology, and how to remove objects from it.
#
# t = Topology()
#
# v1 = Vertext(t)
# v2 = Vertext(t)
# v3 = Vertext(t)
#
# Create an edge from v1 to v2
# e1 = Edge(t)
# src1 = Source(t,v1,e1)
# snk1 = Sink(t,v2,e1)
#
# Add connect the edge to v3 as well
# snk2 = Sink(t,v3,e1)
#
# Add an edge from v3 to v1
# e2 = Edge(t)
# src2 = Source(t,v3,e2)
# snk3 = Sink(t,v1,e2)
#
# arrange the vertices in order v1 v2 v3
# v1.block.index = 0
# v2.block.index = 1
# v3.block.index = 2
from util import *
from snapkey import *
import types
import logging
class Topology(object):
def __init__(self):
self._vertices = TypedList(Vertex)
self._edges = TypedList(Edge)
self._sources = TypedList(Source)
self._sinks = TypedList(Sink)
# Visual Settings
self._hide_disconnected_snaps = False
@property
def vertices(self):
""" returns an unordered list of vertex objects in the topology """
return self._vertices
@property
def edges(self):
""" returns an unordered list of edge objects in the topology """
return self._edges
@property
def blocks(self):
""" Returns dictionary of all blocks who have a proper index value assigned """
return dict(filter(lambda x: isinstance(x[0],int),[(v.block.index,v.block) for v in self._vertices]))
@property
def bands(self):
""" Returns dictionary of all bands, by altitude. Bands which have not
been assigned altitudes are not reported. All bands that have an altitude
(regardless of if they are being used (indicated by isUsed) are reported.
"""
allBands = [band for edge in self._edges for band in [edge.posBand,edge.negBand]]
if None is [band.altitude for band in allBands]:
logging.warning("WARNING: There are bands lacking altitude information! Not all bands are represented")
return dict([(band.altitude,band) for band in filter(lambda x: isinstance(x.altitude,int),allBands)])
@property
def snaps(self):
""" Returns dictionary of all snaps, by snapkey. Snaps which have not been
assigned an order are not reported. All snaps that have an order regardless
of if they are being used (indicated by isUsed) are reported.
"""
containers = [container for block in [[v.block.emitter, v.block.collector] for v in self._vertices] for container in block]
snaps = [(snap.snapkey(),snap) for snaps in [container.values() for container in containers] for snap in snaps]
return dict(snaps)
def __get_hide_disconnected_snaps(self):
return self._hide_disconnected_snaps
def __set_hide_disconnected_snaps(self, state):
typecheck(state, bool, "state")
self._hide_disconnected_snaps = state
hide_disconnected_snaps = property(__get_hide_disconnected_snaps, __set_hide_disconnected_snaps)
class Vertex(object):
""" A Vertex in a directional graph.
A vertex can connect to multiple edges as either an input (source) or output
(sink) to the edge. It is graphically represented by a Block object.
Sources - outgoing connections to Edges
Sinks - incomming connections from Edges
"""
def __init__(self,topology):
self._topology = typecheck(topology,Topology,"topology")
self._topology._vertices.append(self)
# Visual Component
self._block = Block(self)
def release(self):
logging.debug("releasing vertex %r"%self)
logging.debug("... removing from topology")
# Release yourself from the topology and remove the reference. This
# needs to be done before destroying blocks, since we preclaculate
# block neighbors and that depends on iterating over the vertex list.
# If we don't cache block neighbors, then the order no longer matters.
self._topology._vertices.remove(self)
# Release connections to and from the vertex
logging.debug("... destroying connections")
for connection in self._topology._sources + self._topology._sinks:
if connection.vertex == self:
connection.release()
logging.debug("... releasing associated block")
# Release the block object associated with this vertex
self._block._release()
self._block = None
logging.debug("... destroying reference to topology")
self._topology = None
@property
def sources(self):
""" Returns an unordered list of outgoing connections (Source objects)
from this vertex.
"""
return filter(lambda x: x.vertex == self, self._topology._sources)
@property
def sinks(self):
""" Returns an unordered list of outgoing connections (Sink objects)
from this vertex.
"""
return filter(lambda x: x.vertex == self, self._topology._sinks)
@property
def block(self):
""" Returns the relative graphical object (Block) for this Vertex.
The block cannot be changed
"""
return self._block
class Edge(object):
""" A directional multiple-input multiGple-output edge in the graph. Inputs
(sources) and outputs (sinks) are linked to vertices. An edge is represented
graphically by either 1 or 2 Band objects.
Sources - inputs from vertices
Sinks - outputs to vertices
"""
def __init__(self,topology):
self._topology = typecheck(topology,Topology,"topology")
self._topology._edges.append(self)
# Visual Component
self._pBand = Band(self,True)
self._nBand = Band(self,False)
def release(self):
""" Removes this edge from the topology """
logging.debug("releasing edge %r"%self)
# Release connections to and from this edge
logging.debug("... destroying connections")
for connection in self._topology._sources + self._topology._sinks:
if connection.edge == self:
connection.release()
# Release each of your bands
logging.debug("... releasing associated bands")
self._pBand._release()
self._nBand._release()
# Remove references to your bands
self._pBand = None
self._nBand = None
logging.debug("... removing from topology")
# Release youself from the topology
self._topology._edges.remove(self)
# Remove reference to the topology
self._topology = None
@property
def sources(self):
""" returns list of all source connections to this edge """
return filter(lambda x: x.edge == self, self._topology._sources)
@property
def sinks(self):
""" returns list of all sink connections from this edge """
return filter(lambda x: x.edge == self, self._topology._sinks)
@property
def posBand(self):
return self._pBand
@property
def negBand(self):
return self._nBand
class Connection(object):
""" A base class for connecting a vertex to an edge, but without specifing
the nature of the connection (input or output). Rather then using this
class directly, Source or Sink objects should be used.
"""
def __init__(self,topology,vertex,edge):
self._topology = typecheck(topology,Topology,"topology")
self._vertex = typecheck(vertex,Vertex,"vertex")
self._edge = typecheck(edge,Edge,"edge")
if (not isinstance(self,Source)) and (not isinstance(self,Sink)):
raise Exception("Do not create connections directly! Use Source or Sink")
self._snap = Snap(self)
def release(self):
""" Removes this connection between a vertex and an edge from the topology.
This does NOT release either the vertex or the edge objects, it simply
removes this particular reference to them.
"""
logging.debug("... releasing associated snap")
# Release and remove the reference to your snap
self._snap._release()
self._snap = None
logging.debug("... deleting pointer to vertex and edge")
# Remove references to vertex and edge
self._vertex = None
self._edge = None
@property
def snap(self):
return self._snap
@property
def edge(self):
return self._edge
@property
def vertex(self):
return self._vertex
@property
def block(self):
return self.vertex.block
class Source(Connection):
""" A logical connection from a Vertex to an Edge. Graphically represented
by a Snap object.
"""
def __init__(self,topology,vertex,edge):
super(Source,self).__init__(topology,vertex,edge)
# Check to make sure there is not already a source going from this vertex to this edge
for source in vertex.sources + edge.sources:
if vertex == source.vertex and edge == source.edge:
raise Exception("Duplicate Source!")
self._topology._sources.append(self)
def release(self):
logging.debug("Releasing Source %r"%self)
super(Source,self).release()
# Remove yourself from the topology
logging.debug("... removing from topology")
self._topology._sources.remove(self)
self._topology = None
class Sink(Connection):
""" A logical connection from an Edge to a Vertex. Graphically represented
by a Snap object.
"""
def __init__(self,topology,vertex,edge):
super(Sink,self).__init__(topology,vertex,edge)
# Check to make sure there is not already a sink going from this edge to this vertex
for sink in vertex.sinks + edge.sinks:
if vertex == sink.vertex and edge == sink.edge:
if vertex.location == sink.vertex.location:
raise Exception("Duplicate Sink!")
self._topology._sinks.append(self)
def release(self):
logging.debug("Releasing Sink %r"%self)
super(Sink,self).release()
# Remove youself from the topology
logging.debug("... removing from topology")
self._topology._sinks.remove(self)
self._topology = None
class Block(object):
""" Visual Representation of a Vertex
Visual Parameters
Index - Unique int value to determine order in which to draw blocks.
Lower values to the left, higher to the right. Indices do not
necessarily need to be consecutive.
"""
def __init__(self,vertex):
self._vertex = typecheck(vertex,Vertex,"vertex")
self._topology = vertex._topology
# Visual Properties
self._index = None
# blocks to left and right
# self._leftBlock = None
# self._rightBlock = None
def _release(self):
""" releases this block from the topology.
This should only be called by Vertex.release()
"""
logging.debug("removing block %r"%self)
logging.debug("... removing references to left and right blocks")
#This needs to recalculate the left and right blocks on either side
#NOTE: This does not collapse index values, so there becomes a "hole"
# in the index values
# if self._leftBlock:
# self._leftBlock._updateNeighbors()
# if self._rightBlock:
# self._rightBlock._updateNeighbors()
# Remove cached references to left and right blocks
# self._leftBlock = None
# self._rightBlock = None
logging.debug("... remove reference to vertex")
# We don't need to call release() on the vertex, it should already be
# called, we just need to remove the reference
self._vertex = None
logging.debug("... removing reference to topology")
self._topology = None
@property
def vertex(self):
""" Returns the logical component (Vertex) for this relative object.
The vertex is bound to this block, and cannot be changed.
"""
return self._vertex
@property
def emitter(self):
""" Dictionary of Snaps that represent source connections for this block.
Only snaps which have been assigned an order value are represented, since
the order is used as the dictionary key. If hide_disconnected_snaps is
set in the topology, only return snaps where isLinked() is true.
"""
snaps = [(s.snap.order, s.snap) for s in self._vertex.sources if isinstance(s.snap.order, int)]
if self._topology.hide_disconnected_snaps:
snaps = [tup for tup in snaps if tup[1].isLinked()]
return dict(snaps)
# return dict(filter(lambda x: isinstance(x[0],int), [(s.snap.order, s.snap) for s in self._vertex.sources]))
@property
def collector(self):
""" Dictionary of Snaps that represent sink connections for this block.
Only snaps which have been assigned an order value are represented, since
the order is used as the dictionary key. If hide_disconnected_snaps is
set in the topology, only return snaps where isLinked() is true.
"""
snaps = [(s.snap.order, s.snap) for s in self._vertex.sinks if isinstance(s.snap.order, int)]
if self._topology.hide_disconnected_snaps:
snaps = [tup for tup in snaps if tup[1].isLinked()]
return dict(snaps)
# return dict(filter(lambda x: isinstance(x[0],int),[(s.snap.order,s.snap) for s in self._vertex.sinks]))
@property
def leftBlock(self):
# """ Returns the block to the left, determined by block wich has the next
# lowest index value. This value is cached when the index is set.
# """
# return self._leftBlock
if not isinstance(self._index,int):
return None
blocks = self._topology.blocks
if len(blocks) == 0:
return None
if self._index > min(blocks.keys()):
return blocks[max([b for b in blocks.keys() if b < self._index])]
# Else
return None
@property
def rightBlock(self):
# """ returns the block to the right, determined by block which has the next
# highest index value. This value is cached when the index is set.
# """
# return self._rightBlock
if not isinstance(self._index,int):
return None
blocks = self._topology.blocks
if len(blocks) == 0:
return None
if self._index < max(blocks.keys()):
return blocks[min([b for b in blocks.keys() if b > self._index])]
# Else:
return None
# def _updateNeighbors(self):
# """ Update leftIndex and rightIndex, as well as previous neighbors """
# blocks = self._topology.blocks
# # First update your former neighbor's left and right values
# # If there was an item to the left, it needs a new right hand value
# if len(blocks) > 0:
# # update old neighbors
# if not isinstance(self._leftBlock,types.NoneType):
# if self._leftBlock.index < max(blocks.keys()):
# self._leftBlock._rightBlock = blocks[min([b for b in blocks.keys() if b > self._leftBlock.index])]
# else:
# self._leftBlock._rightBlock = None
#
# if not isinstance(self._rightBlock,types.NoneType):
# if self._rightBlock.index > min(blocks.keys()):
# self._rightBlock._leftBlock = blocks[max([b for b in blocks.keys() if b < self._rightBlock.index])]
#
# else:
# self._rightBlock._leftBlock = None
#
# # Set my current neighbors
# if isinstance(self._index,types.NoneType):
# self._leftBlock = None
# self._rightBlock = None
# else:
# # Calculate new values of left and right blocks
# # update the right value of the left block and left value of the right block
# # If you are on an edge, leave the value at None
# if self._index > min(blocks.keys()):
# self._leftBlock = blocks[max([b for b in blocks.keys() if b < self._index])]
# self._leftBlock._rightBlock = self
# else:
# self._leftBlock = None
#
# if self._index < max(blocks.keys()):
# self._rightBlock = blocks[min([b for b in blocks.keys() if b > self._index])]
# self._rightBlock._leftBlock = self
# else:
# self._rightBlock = None
def __get_index(self):
return self._index
def __set_index(self,value):
""" Check to see if a block with the same index already exists """
if self._index == value:
return
if isinstance(value,types.NoneType):
self._index = value
# self._updateNeighbors()
return
allVertices = self._topology._vertices
allBlocks = [v.block for v in allVertices]
if value in [b.index for b in allBlocks]:
raise Exception("Block with index %r already exists!"%value)
self._index = value
# self._updateNeighbors()
index = property(__get_index,__set_index)
class Band(object):
""" Visual Representation of an Edge.
An Edge can have up to two Bands - one with positive altitude and one negative.
Visual Parameters
Rank - the Z drawing order (higher values closer to user)
Altitude - the distance above or below the Block ribbon
"""
def __init__(self,edge,isPositive):
self._edge = typecheck(edge,Edge,"edge")
self._topology = edge._topology
# Visual Properties
self._isPositive = isPositive
self._altitude = None
self._rank = None
def _release(self):
""" Release all dependent references this object holds """
logging.debug("removing band %r"%self)
logging.debug("... removing edge reference")
self._edge = None
logging.debug("... removing reference to topology")
self._topology = None
@property
def emitters(self):
""" returns a list of source snaps that reach this band """
# We compare the position of each source against the position of the furthest
# away sink (depending on pos/neg altitude).
sinkBlockIndices = [s.block.index for s in self.edge.sinks]
sinkBlockIndices = filter(lambda x: isinstance(x,int), sinkBlockIndices)
if len(sinkBlockIndices) < 1:
return list()
sources = list()
# Find Sources if this is a Positive Bands
if self._altitude and self._altitude > 0:
maxSinkIndex = max(sinkBlockIndices)
sources = filter(lambda src: src.block.index < maxSinkIndex, self.edge.sources)
# Find Sources if this is a Negative Bands
elif self._altitude and self._altitude < 0:
minSinkIndex = min(sinkBlockIndices)
sources = filter(lambda src: src.block.index >= minSinkIndex, self.edge.sources)
return [s.snap for s in sources]
@property
def collectors(self):
""" returns list of sink snaps that reach this band """
sourceBlockIndices = [s.block.index for s in self.edge.sources]
sourceBlockIndices = filter(lambda x: isinstance(x,int), sourceBlockIndices)
if len(sourceBlockIndices) < 1:
return list()
sinks = list()
# Find Sinks if this is a Positive Bands
if self._altitude and self._altitude > 0:
minSourceIndex = min(sourceBlockIndices)
sinks = filter(lambda sink: sink.block.index > minSourceIndex, self.edge.sinks)
# Find Sinks if this is a Negative Bands
elif self._altitude and self._altitude < 0:
maxSourceIndex = max(sourceBlockIndices)
sinks = filter(lambda sink: sink.block.index <= maxSourceIndex, self.edge.sinks)
return [s.snap for s in sinks]
def isUsed(self):
""" returns true if this band is needed to represent connections on
its edge, else false. This is determined by checking if any sources
reach this band.
"""
# This should be equivalent to checking if any sinks reach this band,
# but this has not been tested or proven.
# sinkBlockIndices = [s.block.index for s in self.edge.sinks if isinstance(s.block.index,int)]
# sourceBlockIndices = [s.block.index for s in self.edge.sources if isinstance(s.block.index,int)]
sinkBlockIndices = [s.block.index for s in self.collectors]
sourceBlockIndices = [s.block.index for s in self.emitters]
if len(sinkBlockIndices) == 0 or len(sourceBlockIndices) == 0:
return False
# If positive and there is a sink to the left of any source
if self._isPositive and max(sinkBlockIndices) > min(sourceBlockIndices):
return True
elif (not self._isPositive) and min(sinkBlockIndices) <= max(sourceBlockIndices):
return True
else:
return False
@property
def isPositive(self):
return self._isPositive
@property
def topBand(self):
""" Returns the band with the next highest altitude, or None if either
there is no band above this one or the block ribbon is above it.
Bands for which isUsed() is false are skipped over.
"""
if not isinstance(self._altitude,int):
return None
bands = self._topology.bands
available = [altitude for altitude in bands.keys() if altitude > self._altitude]
if self._isPositive:
# TODO: we probably dont need band._isPositive if altitude > self._altitude
available = [altitude for altitude in available if bands[altitude]._isPositive and bands[altitude].isUsed()]
else:
available = [altitude for altitude in available if (not bands[altitude]._isPositive) and bands[altitude].isUsed()]
return bands[min(available)] if len(available) > 0 else None
# posMax = max([band.altitude for band in bands.values() if band.isUsed()])
# negVals = [altitude for altitude in bands.keys() if altitude < 0]
# negMax = max(negVals) if len(negVals) > 0 else 0
# if (self._isPositive and self._altitude < posMax) or ((not self._isPositive) and self._altitude < negMax) :
# return bands[min([a for a in bands.keys() if a > self._altitude])]
# return None
@property
def bottomBand(self):
""" Returns the band with the next lowest altitude, or None if either
there is no band below this one or the block ribbon is below it.
Bands for which isUsed() is false are skipped over.
"""
if not isinstance(self._altitude,int):
return None
bands = self._topology.bands
available = [altitude for altitude in bands.keys() if altitude < self._altitude]
if self._isPositive:
available = [altitude for altitude in available if bands[altitude]._isPositive and bands[altitude].isUsed()]
else:
available = [altitude for altitude in available if (not bands[altitude]._isPositive) and bands[altitude].isUsed()]
return bands[max(available)] if len(available) > 0 else None
# posVals = [altitude for altitude in bands.keys() if altitude > 0]
# posMin = min(posVals) if len(posVals) > 0 else 0
# negMin = min(bands.keys())
# if (self._isPositive and self._altitude > posMin) or ((not self._isPositive) and self._altitude > negMin):
# return bands[max([a for a in bands.keys() if a < self._altitude])]
# return None
def __get_edge(self):
return self._edge
def __get_rank(self):
return self._rank
def __set_rank(self,val):
if self._rank == val: return
# Allow "unsetting" rank
if val is None:
self._rank = val
return
typecheck(val,int,"val")
if val < 0:
raise Exception("Rank must be >= 0, received %d"%val)
# Make sure the rank is unique among all bands of the same altitude
allBands = [edge.posBand if self.isPositive else edge.negBand for edge in self._topology._edges]
if val in [b._rank for b in allBands]:
raise Exception("%s Band with rank %d already exists!"%("Positive" if self._isPositive else "Negative",val))
self._rank = val
def __get_altitude(self):
return self._altitude
def __set_altitude(self,value):
if self._altitude == value:
return
# Always allow "unsetting" value
if value is None:
self._altitude = value
return
if self._isPositive and value <= 0:
raise Exception("Altitude must be positive")
if (not self._isPositive) and value >= 0:
raise Exception("Altitude must be negative")
# Make sure the altitude is unique among all bands
allEdges = self._topology._edges
allBands = filter(lambda x: isinstance(x,Band),[band for edge in allEdges for band in [edge.posBand,edge.negBand]])
if value in [b.altitude for b in allBands]:
raise Exception("Band with altitude %d already exists!"%value)
self._altitude = value
edge = property(__get_edge)
rank = property(__get_rank,__set_rank)
altitude = property(__get_altitude,__set_altitude)
class Snap(object):
""" Visual Representation of a Source or Sink.
Snaps are layedout horizontally inside of an Emitter or Collector of a Block.
A Snap provides a mapping between a Source/Sink and one or two Bands associated with a single Edge.
Visual Layout Paramters
Order - 0-indexed order in which to draw snaps within an Emitter or Collector
"""
def __init__(self,connection):
self._connection = typecheck(connection,Connection,"connection")
self._order = None
def snapkey(self):
""" generates the snapkey for this snap """
return gen_snapkey(self.block.index, "collector" if self.isSink() else "emitter", self._order)
def _release(self):
""" This should only be called by a Connection.release() """
logging.debug("releasing snap %r"%self)
# the connection should
logging.debug("... removing reference to connection")
self._connection = None
# print "... removing reference to topology"
# self._topology = None
@property
def posBandLink(self):
""" returns the positive band connection - if it exists.
Just because a positive band link exists does not mean that it should
be drawn. The check for if we should draw the connection happens at drawing
time when we decide if we should be using positive or negative"""
return self._connection.edge._pBand
@property
def negBandLink(self):
""" returns the negative band connection ...which for fabrik does not exist."""
return None
@property
def block(self):
return self._connection.vertex.block
@property
def connection(self):
return self._connection
@property
def bandLinks(self):
return filter(lambda x: isinstance(x,Band), [self.posBandLink,self.negBandLink])
def isSource(self):
return isinstance(self._connection,Source)
def isSink(self):
return isinstance(self._connection,Sink)
def isLinked(self):
""" returns true if this snap is connected to at least one sink, else false. """
return True if self.posBandLink or self.negBandLink else False
def isUsed(self):
""" returns true if topology.hide_disconnected_snaps is True and isLinked is True,
or if topology.hide_disconnected_snaps is false. Otherwise, return true.
"""
if self._connection._topology.hide_disconnected_snaps:
return True if self.isLinked() else False
else:
return True
@property
def leftSnap(self):
""" Returns the snap directly to the left of this snap within either an
emitter or collector. Returns None if this is leftmost snap.
"""
snaps = self.block.emitter if self.isSource() else self.block.collector
if isinstance(self._order,int) and self._order > min(snaps.keys()):
return snaps[max([s for s in snaps.keys() if s < self._order])]
else:
return None
@property
def rightSnap(self):
""" Returns the snap directly to the right of this snap within either
an emitter or collector. Returns None if this is rightmost snap.
"""
snaps = self.block.emitter if self.isSource() else self.block.collector
if isinstance(self._order,int) and self._order < max(snaps.keys()):
return snaps[min([s for s in snaps.keys() if s > self._order])]
else:
return None
def __get_order(self):
return self._order
def __set_order(self,value):
""" Check to see if a snap with the same order already exists """
if self._order == value:
return
# Always allow "unsetting values"
if value is None:
self._order = value
return
snaps = list()
# Check to see if the order value exists in this emitter or collector
if isinstance(self._connection,Source):
snaps = [e.snap for e in self._connection.vertex.sources]
if isinstance(self._connection,Sink):
snaps = [e.snap for e in self._connection.vertex.sinks]
orders = filter(lambda x: not isinstance(x,types.NoneType),[s.order for s in snaps])
if value in orders:
raise Exception("Order value %d already exists!"%value)
# Update value
self._order = value
order = property(__get_order,__set_order)
|
|
#!/usr/bin/env python
import roslib
import rospy
import smach
import smach_ros
import actionlib
import time
import threading
from smach_ros import SimpleActionState
from std_msgs.msg import String
from std_msgs.msg import Float32MultiArray
RECOGNIZER_CALLBACK = None
RECOGNIZER_CALLBACK2 = None
def handleRecognizerMessage(msg):
if RECOGNIZER_CALLBACK is not None:
RECOGNIZER_CALLBACK(msg)
def handleRecognizerMessage2(msg):
if RECOGNIZER_CALLBACK2 is not None:
RECOGNIZER_CALLBACK2(msg)
# define state Idle
class Idle(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['Stop', 'Sarah'],
input_keys=['Idle_lastWord_in',
'Idle_lastState_in'],
output_keys=['Idle_lastWord_out',
'Idle_lastState_out',
'Idle_lastCommand_out'])
self.word = ""
self.state = "Idle"
self.pub = rospy.Publisher('SaraVoice', String, queue_size=1)
self.sub = rospy.Subscriber("/recognizer_1/output", String, self.callback, queue_size=1)
def execute(self, userdata):
'''global RECOGNIZER_CALLBACK
RECOGNIZER_CALLBACK = self.callback'''
rospy.loginfo('Executing state Idle')
rospy.loginfo('Idle - Waiting for keyword: SARAH')
self.word = ""
while True:
if self.word == 'stop':
userdata.Idle_lastWord_out = self.word
userdata.Idle_lastState_out = self.state
userdata.Idle_lastCommand_out = 'stop'
self.sub.unregister()
return 'Stop'
if self.word == 'sarah':
userdata.Idle_lastWord_out = self.word
userdata.Idle_lastState_out = self.state
self.sub.unregister()
return 'Sarah'
def callback(self,data):
if data.data == "stop":
rospy.loginfo('Idle - Keyword STOP detected !!')
self.word = data.data
if data.data == "sarah":
rospy.loginfo('Idle - Keyword SARAH detected !!')
self.word = data.data
def SayX(self, ToSay_str):
rospy.loginfo(ToSay_str)
self.pub.publish(ToSay_str)
# define state WaitingCommand
class WaitingCommand(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['Command','DoIt','Sarah','Stop','Timeout'],
input_keys=['WComm_lastWord_in',
'WComm_lastState_in'],
output_keys=['WComm_lastWord_out',
'WComm_lastState_out',
'WComm_lastCommand_out'])
self.word = ""
self.state = "WaitingCommand"
self.pub = rospy.Publisher('SaraVoice', String, queue_size=1)
def execute(self, userdata):
global RECOGNIZER_CALLBACK
RECOGNIZER_CALLBACK = self.callback
rospy.loginfo('Executing state WaitingCommand')
userdata.WComm_lastState_out = self.state
self.SayX('Yes master')
self.word = ""
timeout = time.time() + 15 # 15 sec
while True:
if self.word == 'stop':
userdata.WComm_lastWord_out = self.word
userdata.WComm_lastCommand_out = self.word
return 'Stop'
if self.word == 'sarah':
userdata.WComm_lastWord_out = self.word
userdata.WComm_lastCommand_out = self.word
return 'Sarah'
if self.word == 'say hello':
userdata.WComm_lastWord_out = self.word
userdata.WComm_lastCommand_out = self.word
self.SayX('Hi. I am a assistance robot here to serve you. I am not totally fonctionnal for now, but soon i will be able to do the chores for you.')
return 'Timeout'
if self.word == 'what do you see':
userdata.WComm_lastWord_out = self.word
userdata.WComm_lastCommand_out = self.word
return 'DoIt'
if self.word == 'get me the beer':
userdata.WComm_lastWord_out = self.word
userdata.WComm_lastCommand_out = self.word
return 'Command'
if self.word == 'be happy':
userdata.WComm_lastWord_out = self.word
userdata.WComm_lastCommand_out = self.word
return 'DoIt'
if self.word == 'be sad':
userdata.WComm_lastWord_out = self.word
userdata.WComm_lastCommand_out = self.word
return 'DoIt'
if self.word == 'follow me':
userdata.WComm_lastWord_out = self.word
userdata.WComm_lastCommand_out = self.word
return 'Command'
if self.word == 'go foward':
userdata.WComm_lastWord_out = self.word
userdata.WComm_lastCommand_out = self.word
return 'Command'
if self.word == 'go backward':
userdata.WComm_lastWord_out = self.word
userdata.WComm_lastCommand_out = self.word
return 'Command'
if self.word == 'rotate left':
userdata.WComm_lastWord_out = self.word
userdata.WComm_lastCommand_out = self.word
return 'Command'
if self.word == 'rotate right':
userdata.WComm_lastWord_out = self.word
userdata.WComm_lastCommand_out = self.word
return 'Command'
'''
if self.word == 'sarah':
userdata.WComm_lastWord_out = self.word
userdata.WComm_lastCommand_out = self.word
return 'Sarah'
'''
if time.time() > timeout:
userdata.WComm_lastState_out = self.state
return 'Timeout'
def callback(self,data):
if data.data == "stop":
rospy.loginfo('Wcomm - Keyword STOP detected !!')
self.word = data.data
if data.data == "get me the beer":
rospy.loginfo('Wcomm - Phrase SAY HI detected !!')
self.word = data.data
if data.data == "what do you see":
rospy.loginfo('Wcomm - Phrase WHAT DO YOU SEE detected !!')
self.word = data.data
if data.data == "follow me":
rospy.loginfo('Wcomm - Phrase FOLLOW ME detected !!')
self.word = data.data
if data.data == "be happy":
rospy.loginfo('Wcomm - Phrase BE HAPPY detected !!')
self.word = data.data
if data.data == "be sad":
rospy.loginfo('Wcomm - Phrase BE SAD detected !!')
self.word = data.data
if data.data == "say hello":
rospy.loginfo('Wcomm - Phrase SAY HI detected !!')
self.word = data.data
if data.data == 'go foward':
rospy.loginfo('Wcomm - Phrase GO FORWARD detected !!')
self.word = data.data
if data.data == 'go backward':
rospy.loginfo('Wcomm - Phrase GO BACKWARD detected !!')
self.word = data.data
if data.data == 'rotate left':
rospy.loginfo('Wcomm - Phrase ROTATE LEFT detected !!')
self.word = data.data
if data.data == 'rotate right':
rospy.loginfo('Wcomm - Phrase ROTATE RIGHT detected !!')
self.word = data.data
if data.data == "sarah":
rospy.loginfo('Wcomm - Keyword SARAH detected !!')
self.word = data.data
def SayX(self, ToSay_str):
rospy.loginfo(ToSay_str)
self.pub.publish(ToSay_str)
# define state WaitingConfirmation
class WaitingConfirmation(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['Timeout','Yes','No','Stop','Sarah'],
input_keys=['WConf_lastWord_in',
'WConf_lastState_in'],
output_keys=['WConf_lastWord_out',
'WConf_lastState_out'])
self.word = ""
self.state = "WaitingConfirmation"
self.lastWord = ''
self.pub = rospy.Publisher('SaraVoice', String, queue_size=10)
def execute(self, userdata):
global RECOGNIZER_CALLBACK
RECOGNIZER_CALLBACK = self.callback
rospy.loginfo('Executing state WaitingConfirmation')
userdata.WConf_lastState_out = self.state
self.lastWord = userdata.WConf_lastWord_in
self.SayX('Did you say')
self.SayX(self.lastWord)
self.word = ""
timeout = time.time() + 15 # 15 sec
while True:
if self.word == 'stop':
userdata.WConf_lastWord_out = self.word
return 'Stop'
if self.word == 'No':
userdata.WConf_lastWord_out = self.word
self.SayX('Sorry, can you repeat your command please')
return 'No'
if self.word == 'yes':
userdata.WConf_lastWord_out = self.word
self.SayX('I will now execute your order')
return 'Yes'
if time.time() > timeout:
return 'Timeout'
def callback(self,data):
if data.data == "stop":
rospy.loginfo('Keyword STOP detected !!')
self.word = data.data
if data.data == 'yes':
rospy.loginfo('Keyword YES detected !!')
self.word = data.data
if data.data == 'no':
rospy.loginfo('Keyword NO detected !!')
self.word = data.data
'''
if data.data == "sarah":
rospy.loginfo('Keyword SARAH detected !!')
self.word = data.data
'''
def SayX(self, ToSay_str):
rospy.loginfo(ToSay_str)
self.pub.publish(ToSay_str)
# define state DoSomething
class DoSomething(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['Done'],
input_keys=['DSome_lastWord_in',
'DSome_lastState_in',
'DSome_lastCommand_in'],
output_keys=['DSome_lastWord_out',
'DSome_lastState_out'])
self.pub = rospy.Publisher('SaraVoice', String, queue_size=10)
self.pubFollow = rospy.Publisher('voice_follow_flag', String, queue_size=10)
#self.pubEmo = rospy.Publisher('control_emo', int, queue_size=10)
self.nbObj = ''
self.lastWord = ""
self.lastState = ""
self.lastCommand = ""
self.state = "DoSomething"
self.str_follow = "stop"
def execute(self, userdata):
global RECOGNIZER_CALLBACK2
RECOGNIZER_CALLBACK2 = self.callback
rospy.loginfo('-- Executing state DoSomething --')
self.lastWord = userdata.DSome_lastWord_in
self.lastState = userdata.DSome_lastState_in
self.lastCommand = userdata.DSome_lastCommand_in
userdata.DSome_lastState_out = self.state
self.nbObj = ''
'''if self.lastCommand == "be happy":
userdata.DSome_lastState_out = self.state
self.pubEmo.publish(1)
return 'Done'
if self.lastCommand == "be sad":
userdata.DSome_lastState_out = self.state
self.pubEmo.publish(2)
return 'Done' '''
if self.lastCommand == "what do you see":
userdata.DSome_lastState_out = self.state
rospy.loginfo('DSomm - Waiting for object table !!')
while self.nbObj == '':
continue
rospy.loginfo('DSomm - object table received !!')
if self.nbObj == 0:
self.SayX('i see nothing')
else:
phrase = "i see"
for i in range(0, self.nbObj):
if i == self.nbObj - 1 and self.nbObj >= 2 :
phrase += " and "
if self.objectTable[i*12] == 3:
phrase += " a can, "
if self.objectTable[i*12] == 4:
phrase += " some mexican food, "
if self.objectTable[i*12] == 5:
phrase += " a videogame controller, "
self.SayX(phrase)
return 'Done'
rospy.loginfo(self.lastCommand)
if self.lastCommand == "follow me":
rospy.loginfo('publishing follow')
self.str_follow = 'follow'
userdata.DSome_lastState_out = self.state
self.pubFollow.publish(self.str_follow)
return 'Done'
if self.lastCommand == "stop":
rospy.loginfo('publishing stop')
self.SayX('stopping')
self.str_follow = 'stop'
userdata.DSome_lastState_out = self.state
self.pubFollow.publish(self.str_follow)
return 'Done'
if self.lastCommand == "go foward":
userdata.DSome_lastState_out = self.state
return 'Done'
if self.lastCommand == "go backward":
userdata.DSome_lastState_out = self.state
return 'Done'
if self.lastCommand == "rotate left":
userdata.DSome_lastState_out = self.state
return 'Done'
if self.lastCommand == "rotate right":
userdata.DSome_lastState_out = self.state
return 'Done'
else:
return 'Idle'
def SayX(self, ToSay_str):
rospy.loginfo(ToSay_str)
self.pub.publish(ToSay_str)
def callback(self,data):
self.nbObj = len(data.data)/12
self.objectTable = data.data
# main
def main():
rospy.init_node('interpreter')
rospy.Subscriber("/objects", Float32MultiArray, handleRecognizerMessage2, queue_size=1)
# Create a SMACH state machine
sm = smach.StateMachine(outcomes=[])
with sm:
# Add states to the container
smach.StateMachine.add('Idle', Idle(),
transitions={'Sarah':'WaitingCommand',
'Stop':'DoSomething'},
remapping={'Idle_lastWord_in':'lastWord',
'Idle_lastState_in':'lastState',
'Idle_lastWord_out':'lastWord',
'Idle_lastState_out':'lastState',
'Idle_lastCommand_out':'lastCommand'})
smach.StateMachine.add('WaitingCommand', WaitingCommand(),
transitions={'Stop':'DoSomething',
'DoIt':'DoSomething',
'Sarah':'WaitingCommand',
'Command':'WaitingConfirmation',
'Timeout':'Idle'},
remapping={'WComm_lastWord_in':'lastWord',
'WComm_lastState_in':'lastState',
'WComm_lastWord_out':'lastWord',
'WComm_lastState_out':'lastState',
'WComm_lastCommand_out':'lastCommand'})
smach.StateMachine.add('WaitingConfirmation', WaitingConfirmation(),
transitions={'Timeout':'Idle',
'Yes':'DoSomething',
'No':'Idle',
'Stop':'DoSomething',
'Sarah':'WaitingCommand'},
remapping={'WConf_lastWord_in':'lastWord',
'WConf_lastState_in':'lastState',
'WConf_lastWord_out':'lastWord',
'WConf_lastState_out':'lastState'})
smach.StateMachine.add('DoSomething', DoSomething(),
transitions={'Done': 'Idle'},
remapping={'DSome_lastWord_in': 'lastWord',
'DSome_lastState_in': 'lastState',
'DSome_lastCommand_in': 'lastCommand',
'DSome_lastWord_out': 'lastWord',
'DSome_lastState_out': 'lastState',
'DSome_result_out': 'result'})
# Create a thread to execute the smach container
smach_thread = threading.Thread(target=sm.execute)
smach_thread.start()
smach_thread.join()
rospy.spin()
# Request the container to preempt
sm.request_preempt()
if __name__ == '__main__':
main()
|
|
from __future__ import print_function
import codecs
import os
import fileinput
import sys
# Import XML parser
import xml.etree.ElementTree as ET
# Import Postgres connector
import psycopg2 as pgsql
# Portability
try:
register_namespace = ET.register_namespace
except AttributeError:
def register_namespace(prefix, uri):
ET._namespace_map[uri] = prefix
# Error printing
def warning(*objs):
print("WARN: ", *objs, file=sys.stderr)
# Get the name of a tag
def valueOf(tag):
return tag.split("}")[1][0:]
# Get the value of a term
def termOnly(term):
return term.split("#")[1][0:]
def lookup_db(db, table, var) :
# Try to select on the exact string we're inserting. If exists, then return that ID.
keys = []
values = []
for k in var.keys():
keys.append(k)
values.append(var[k])
selstr = ''.join(["SELECT id FROM ", table, " WHERE ", "=%s AND ".join(keys), "=%s LIMIT 1"])
db.execute(selstr, values)
tmp = db.fetchone()
if tmp is not None:
return tmp[0]
return insert_db(db, table, var);
# Insert into database
def insert_db(db, table, var) :
# Select didn't return any rows, so do the normal insert.
insstr = ''.join(["INSERT INTO ", table, " (", ",".join(var.keys()), ") values ( %(", ")s,%(".join(var.keys()), ")s ) RETURNING id;"])
db.execute(insstr, var)
return db.fetchone()[0]
# Update a table in the database
def update_db(db, table, var, where) :
insstr = ''.join(["UPDATE ", table, " SET (", ",".join(var.keys()), ") = ( %(", ")s,%(".join(var.keys()), ")s ) WHERE ", where, " RETURNING id;"])
db.execute(insstr, var)
return db.fetchone()[0]
# Define the namespaces to use
namespaces = { "snac" : "urn:isbn:1-931666-33-4" ,
"snac2" : "http://socialarchive.iath.virginia.edu/control/term#",
"schema" : "http://schema.org/",
"xlink" : "http://www.w3.org/1999/xlink",
"snac3" : "http://socialarchive.iath.virginia.edu/"}
# Register the namespaces
ET.register_namespace("eac-cpf", "urn:isbn:1-931666-33-4")
ET.register_namespace("snac2", "http://socialarchive.iath.virginia.edu/control/term#")
ET.register_namespace("snac", "http://socialarchive.iath.virginia.edu/")
ET.register_namespace("xlink", "http://www.w3.org/1999/xlink")
languages = {}
scripts = {}
# Connect to the postgres DB
db = pgsql.connect("host=localhost dbname=eaccpf user=snac password=snacsnac")
db_cur = db.cursor()
# Counter
i = 0
# For each file given on standard input, parse and look at
for filename in fileinput.input():
print("Parsing: ", filename.strip(), file=sys.stderr)
tree = ET.parse(filename.strip())
root = tree.getroot()
# Tables in SQL
cpf = {}
names = []
dates = []
sources = []
documents = []
occupations = []
places = []
subjects = []
nationalities = []
biogHists = []
cpf_otherids = []
cpf_history = []
cpf_relations = []
# Parse each known tag, in order. Any missing, report to the warning function.
# That way, we can keep track of all problematic or missing tags from the schema
for node in root:
tag = valueOf(node.tag)
if (tag == "control"):
# Handle control elements
for control in node:
ctag = valueOf(control.tag)
if (ctag == "recordId"):
cpf["ark_id"] = control.text
elif (ctag == "otherRecordId"):
cpf_otherids.append({'link_type': termOnly(control.get('localType')), 'other_id': control.text})
elif (ctag == "maintenanceStatus"):
cpf["maintenance_status"] = control.text
elif (ctag == "maintenanceAgency"):
cpf["maintenance_agency"] = control[0].text
elif (ctag == "languageDeclaration"):
cpf["language_code"] = control[0].get('languageCode')
languages[control[0].get('languageCode')] = control[0].text
cpf["script_code"] = control[1].get('scriptCode')
scripts[control[1].get('scriptCode')] = control[1].text
elif (ctag == "conventionDeclaration"):
cpf["conven_dec_citation"] = control[0].text
elif (ctag == "maintenanceHistory"):
for maint_event in control:
# handle each event individually
maint_history = {}
if (valueOf(maint_event.tag) == 'maintenanceEvent'):
#handle
for maint_part in maint_event:
if (valueOf(maint_part.tag) == 'eventType'):
maint_history["event_type"] = maint_part.text
elif (valueOf(maint_part.tag) == 'eventDateTime'):
maint_history["modified_time"] = maint_part.text
elif (valueOf(maint_part.tag) == 'agentType'):
maint_history["agent_type"] = maint_part.text
elif (valueOf(maint_part.tag) == 'agent'):
maint_history["agent"] = maint_part.text
elif (valueOf(maint_part.tag) == 'eventDescription'):
maint_history["description"] = maint_part.text
else:
warning("Unknown Tag: ", tag, ctag, valueOf(maint_event.tag), valueOf(maint_part.tag))
else:
warning("Unknown Tag: ", tag, ctag, valueOf(maint_event.tag))
cpf_history.append(maint_history)
elif (ctag == "sources"):
for source in control:
sources.append({'source_type': source.get('{http://www.w3.org/1999/xlink}type'), 'href': source.get('{http://www.w3.org/1999/xlink}href')});
# TODO: what about the full text of the source?
else:
warning("Unknown Tag: ", tag, ctag)
elif (tag == "cpfDescription"):
# Handle cpfDescription
for desc in node:
dtag = valueOf(desc.tag)
if (dtag == "identity"):
for ident in desc:
itag = valueOf(ident.tag)
if (itag == "entityType"):
cpf["entity_type"] = ident.text
elif(itag == "nameEntry"):
# convention: first name in the name table is the preferred name
# language, preference_score, authorized_form,original, corporate_name,
# contributor[{contributor, name_type}]
name = {}
name_contrib = []
name["preference_score"] = ident.get("{http://socialarchive.iath.virginia.edu/}preferenceScore")
for name_part in ident:
if (valueOf(name_part.tag) == "part"):
name["original"] = name_part.text
elif (valueOf(name_part.tag) == "alternativeForm" or
valueOf(name_part.tag) == "authorizedForm"):
name_contrib.append({"contributor":name_part.text, "name_type":valueOf(name_part.tag)})
else:
warning("Unknown Tag: ", tag, dtag, itag, valueOf(name_part.tag))
name["contributor"] = name_contrib
names.append(name)
else:
warning("Unknown Tag: ", tag, dtag, itag, valueOf(name_part.tag))
elif (dtag == "description"):
for description in desc:
d2tag = valueOf(description.tag)
if (d2tag == "existDates"):
for edates in description:
if (valueOf(edates.tag) == "dateRange"):
date = {}
date["is_range"] = True
if (valueOf(edates[0].tag) == "fromDate"):
if (edates[0].text is not None):
stddate = edates[0].get("standardDate")
if stddate[:1] == "-":
date["from_bc"] = True
stddate = stddate[1:]
date["from_date"] = stddate
date["from_original"] = edates[0].text
date["from_type"] = termOnly(edates[0].get("localType"))
if (len(edates) > 1 and valueOf(edates[1].tag) == "toDate" and edates[1].text is not None):
stddate = edates[1].get("standardDate")
if stddate[:1] == "-":
date["to_bc"] = True
stddate = stddate[1:]
date["to_date"] = stddate
date["to_original"] = edates[1].text
date["to_type"] = termOnly(edates[1].get("localType"))
elif (valueOf(edates[0].tag) == "toDate"):
if (edates[0].text is not None):
stddate = edates[0].get("standardDate")
if stddate[:1] == "-":
date["to_bc"] = True
stddate = stddate[1:]
date["to_date"] = stddate
date["to_original"] = edates[0].text
date["to_type"] = termOnly(edates[0].get("localType"))
else:
warning("Unknown Tag: ", tag, dtag, d2tag, valueOf(edates.tag), valueOf(edates[0].tag))
dates.append(date)
elif (valueOf(edates.tag) == "date"):
date = {}
date["is_range"] = False
date["from_date"] = edates.get("standardDate")
date["from_original"] = edates.text
date["from_type"] = termOnly(edates.get("localType"))
dates.append(date)
else:
warning("Unknown Tag: ", tag, dtag, d2tag, valueOf(edates.tag))
elif (d2tag == "place"):
#TODO Handle place tags and snac:placeEntry items
None
elif (d2tag == "localDescription"):
if (termOnly(description.get("localType")) == "AssociatedSubject"):
subjects.append(description[0].text)
if (len(description) > 1):
warning("Unknown Tag: ", tag, dtag, d2tag, description[1].tag)
elif (termOnly(description.get("localType")) == "nationalityOfEntity"):
nationalities.append(description[0].text)
if (len(description) > 1):
warning("Unknown Tag: ", tag, dtag, d2tag, description[1].tag)
elif (termOnly(description.get("localType")) == "gender"):
cpf["gender"] = description[0].text
if (len(description) > 1):
warning("Unknown Tag: ", tag, dtag, d2tag, description[1].tag)
else:
warning("Unknown Attribute: ", tag, dtag, d2tag, "localType = ", description.get("localType"))
elif (d2tag == "languageUsed"):
for lang in description:
if (valueOf(lang.tag) == "language"):
cpf["language_used"] = lang.get("languageCode")
elif (valueOf(lang.tag) == "script"):
cpf["script_used"] = lang.get("scriptCode")
else:
warning("Unknown Tag: ", tag, dtag, d2tag, lang.tag)
elif (d2tag == "occupation"):
occupations.append(description[0].text)
if (len(description) > 1):
warning("Unknown Tag: ", tag, dtag, d2tag, description[1].tag)
elif (d2tag == "biogHist"):
biogHists.append(ET.tostring(description, encoding="UTF-8"))
elif (dtag == "relations"):
for rel in desc:
rtag = valueOf(rel.tag)
if (rtag == "cpfRelation"):
relation = {}
if (len(rel) > 1):
warning("Unknown Tag: ", tag, dtag, d2tag, description[1].tag)
relation["relation_type"] = termOnly(rel.get("{http://www.w3.org/1999/xlink}arcrole"))
relation["relation_ark_id"] = rel.get("{http://www.w3.org/1999/xlink}href")
relation["relation_other_type"] = termOnly(rel.get("{http://www.w3.org/1999/xlink}role"))
if (len(rel) > 0):
relation["relation_entry"] = rel[0].text
else:
relation["relation_entry"] = ""
cpf_relations.append(relation)
elif (rtag == "resourceRelation"):
relation = {}
relation["document_role"] = termOnly(rel.get("{http://www.w3.org/1999/xlink}arcrole"))
relation["href"] = rel.get("{http://www.w3.org/1999/xlink}href")
relation["document_type"] = termOnly(rel.get("{http://www.w3.org/1999/xlink}role"))
relation["link_type"] = rel.get("{http://www.w3.org/1999/xlink}type")
for relitem in rel:
if (valueOf(relitem.tag) == "relationEntry"):
relation["name"] = relitem.text
elif (valueOf(relitem.tag) == "objectXMLWrap"):
relation["xml_source"] = ET.tostring(relitem, encoding="UTF-8")
elif (valueOf(relitem.tag) == "descriptiveNote"):
relation["notes"] = ET.tostring(relitem, encoding="UTF-8")
else:
warning("Unknown Tag: ", tag, dtag, rtag, relitem.tag)
documents.append(relation)
else:
warning("Unknown Tag: ", tag, dtag, rtag)
else:
warning("Unknown Tag: ", tag, dtag)
else:
warning("Unknown Tag: ", tag)
# DB interactions:
# db_cur.execute("SQL STATEMENT %(name)s", {name:"blah",...})
# db_cur.execute("SQL STATEMENT %s, %s", ("first", "second"))
# INSERT INTO table (var, var) VALUES (%s, %s);
# TODO Handle the following data
#print("PLACES", places)
#print("RELS", cpf_relations)
# Create CPF record in database and get ID, returns id
# Lookup the types that need to be changed
if "entity_type" in cpf:
cpf["entity_type"] = lookup_db(db_cur, "vocabulary", {'type':'entity_type','value':cpf["entity_type"]})
if "gender" in cpf:
cpf["gender"] = lookup_db(db_cur, "vocabulary", {'type':'gender','value':cpf["gender"]})
if "language_code" in cpf:
cpf["language_code"] = lookup_db(db_cur, "vocabulary", {'type':'language_code','value':cpf["language_code"]})
if "script_code" in cpf:
cpf["script_code"] = lookup_db(db_cur, "vocabulary", {'type':'script_code','value':cpf["script_code"]})
if "language_used" in cpf:
cpf["language_used"] = lookup_db(db_cur, "vocabulary", {'type':'language_code','value':cpf["language_used"]})
if "script_used" in cpf:
cpf["script_used"] = lookup_db(db_cur, "vocabulary", {'type':'script_code','value':cpf["script_used"]})
if "maintenance_status" in cpf:
cpf["maintenance_status"] = lookup_db(db_cur, "vocabulary", {'type':'script_code','value':cpf["maintenance_status"]})
cpfid = insert_db(db_cur, "cpf", cpf)
print(" This record given PostgreSQL CPF_ID: ", cpfid)
#cpfid = 0 # temporary
for date_entry in dates:
date_entry["cpf_id"] = cpfid
if "to_type" in date_entry:
date_entry["to_type"] = lookup_db(db_cur, "vocabulary", {'type':'date_type','value':date_entry["to_type"]})
if "from_type" in date_entry:
date_entry["from_type"] = lookup_db(db_cur, "vocabulary", {'type':'date_type','value':date_entry["from_type"]})
insert_db(db_cur, "dates", date_entry)
for source in sources:
if "source_type" in source:
source["source_type"] = lookup_db(db_cur, "vocabulary", {'type':'source_type','value':source["source_type"]})
s_id = lookup_db(db_cur, "source", {'href':source["href"]})
if s_id is None:
s_id = insert_db(db_cur, "source", source)
insert_db(db_cur, "cpf_sources", {'cpf_id':cpfid, 'source_id':s_id})
for occupation in occupations:
if occupation is not None:
o_id = lookup_db(db_cur, "vocabulary", {'type': 'occupation', 'value':occupation})
insert_db(db_cur, "cpf_occupation", {'cpf_id':cpfid, 'occupation_id':o_id})
for subject in subjects:
if subject is not None:
s_id = lookup_db(db_cur, "vocabulary", {'type':'subject', 'value':subject})
insert_db(db_cur, "cpf_subject", {'cpf_id':cpfid, 'subject_id':s_id})
for nationality in nationalities:
if nationality is not None:
n_id = lookup_db(db_cur, "vocabulary", {'type':'nationality', 'value':nationality})
insert_db(db_cur, "cpf_nationality", {'cpf_id':cpfid, 'nationality_id':n_id})
for history in cpf_history:
history["cpf_id"] = cpfid
if "event_type" in history:
history["event_type"] = lookup_db(db_cur, "vocabulary", {'type':'event_type','value':history["event_type"]})
if "agent_type" in history:
history["agent_type"] = lookup_db(db_cur, "vocabulary", {'type':'agent_type','value':history["agent_type"]})
insert_db(db_cur, "cpf_history", history)
for otherid in cpf_otherids:
otherid["cpf_id"] = cpfid
if "link_type" in otherid:
otherid["link_type"] = lookup_db(db_cur, "vocabulary", {'type':'record_type','value':otherid["link_type"]})
insert_db(db_cur, "cpf_otherids", otherid)
for document in documents:
if "document_type" in document:
document["document_type"] = lookup_db(db_cur, "vocabulary", {'type':'document_type','value':document["document_type"]})
if "document_role" in document:
document["document_role"] = lookup_db(db_cur, "vocabulary", {'type':'document_role','value':document["document_role"]})
doc_insert = {'name':document["name"],'href':document["href"],'document_type':document["document_type"]}
if document.has_key('xml_source'):
doc_insert['xml_source'] = document["xml_source"]
d_id = lookup_db(db_cur, "document", {'href':document["href"]})
insert_db(db_cur, "cpf_document", {'cpf_id':cpfid,'document_id':d_id,'document_role':document["document_role"],'link_type':document["link_type"]})
first_name = True
for name in names:
n_id = insert_db(db_cur, "name", {'cpf_id':cpfid, 'original': name["original"], 'preference_score':name["preference_score"]})
for contributor in name["contributor"]:
c_id = lookup_db(db_cur, "contributor", {'short_name': contributor["contributor"]})
if "name_type" in contributor:
contributor["name_type"] = lookup_db(db_cur, "vocabulary", {'type':'name_type','value':contributor["name_type"]})
insert_db(db_cur, "name_contributor", {'name_id':n_id, 'contributor_id':c_id, 'name_type': contributor["name_type"]})
if first_name:
# update the cpf table to have this name id
update_db(db_cur, "cpf", {'name_id':n_id}, "".join(['id=',str(cpfid)]))
first_name = False
# Handle merging biog hists to one cell
first_bh = True
bh = None
for biogHist in biogHists:
if first_bh:
bh = ET.fromstring(biogHist)
first_bh = False
else:
bh.extend(ET.fromstring(biogHist))
if bh is not None:
update_db(db_cur, "cpf", {'biog_hist': ET.tostring(bh)}, "".join(['id=',str(cpfid)]))
# Commit the changes every 1000
i = i + 1
if i % 100000 == 0:
db.commit()
print("** Completed 100000 inserts **")
db.commit()
print("====================\n", "Inserted ", i, " total records")
# Close the database connection
db_cur.close()
db.close()
|
|
#
#
# Copyright (C) 2011, 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""IP address pool management functions.
"""
import ipaddr
from bitarray import bitarray
from base64 import b64encode
from base64 import b64decode
from ganeti import errors
def _ComputeIpv4NumHosts(network_size):
"""Derives the number of hosts in an IPv4 network from the size.
"""
return 2 ** (32 - network_size)
IPV4_NETWORK_MIN_SIZE = 30
# FIXME: This limit is for performance reasons. Remove when refactoring
# for performance tuning was successful.
IPV4_NETWORK_MAX_SIZE = 16
IPV4_NETWORK_MIN_NUM_HOSTS = _ComputeIpv4NumHosts(IPV4_NETWORK_MIN_SIZE)
IPV4_NETWORK_MAX_NUM_HOSTS = _ComputeIpv4NumHosts(IPV4_NETWORK_MAX_SIZE)
class AddressPool(object):
"""Address pool class, wrapping an C{objects.Network} object.
This class provides methods to manipulate address pools, backed by
L{objects.Network} objects.
"""
FREE = bitarray("0")
RESERVED = bitarray("1")
def __init__(self, network):
"""Initialize a new IPv4 address pool from an L{objects.Network} object.
@type network: L{objects.Network}
@param network: the network object from which the pool will be generated
"""
self.network = None
self.gateway = None
self.network6 = None
self.gateway6 = None
self.net = network
self.network = ipaddr.IPNetwork(self.net.network)
if self.network.numhosts > IPV4_NETWORK_MAX_NUM_HOSTS:
raise errors.AddressPoolError("A big network with %s host(s) is currently"
" not supported. please specify at most a"
" /%s network" %
(str(self.network.numhosts),
IPV4_NETWORK_MAX_SIZE))
if self.network.numhosts < IPV4_NETWORK_MIN_NUM_HOSTS:
raise errors.AddressPoolError("A network with only %s host(s) is too"
" small, please specify at least a /%s"
" network" %
(str(self.network.numhosts),
IPV4_NETWORK_MIN_SIZE))
if self.net.gateway:
self.gateway = ipaddr.IPAddress(self.net.gateway)
if self.net.network6:
self.network6 = ipaddr.IPv6Network(self.net.network6)
if self.net.gateway6:
self.gateway6 = ipaddr.IPv6Address(self.net.gateway6)
if self.net.reservations:
self.reservations = bitarray()
# pylint: disable=E1103
self.reservations.frombytes(b64decode(self.net.reservations))
else:
self.reservations = bitarray(self.network.numhosts)
# pylint: disable=E1103
self.reservations.setall(False)
if self.net.ext_reservations:
self.ext_reservations = bitarray()
# pylint: disable=E1103
self.ext_reservations.frombytes(b64decode(self.net.ext_reservations))
else:
self.ext_reservations = bitarray(self.network.numhosts)
# pylint: disable=E1103
self.ext_reservations.setall(False)
assert len(self.reservations) == self.network.numhosts
assert len(self.ext_reservations) == self.network.numhosts
def Contains(self, address):
if address is None:
return False
addr = ipaddr.IPAddress(address)
return addr in self.network
def _GetAddrIndex(self, address):
addr = ipaddr.IPAddress(address)
if not addr in self.network:
raise errors.AddressPoolError("%s does not contain %s" %
(self.network, addr))
return int(addr) - int(self.network.network)
def Update(self):
"""Write address pools back to the network object.
"""
# pylint: disable=E1103
self.net.ext_reservations = b64encode(self.ext_reservations.tobytes())
self.net.reservations = b64encode(self.reservations.tobytes())
def _Mark(self, address, value=True, external=False):
idx = self._GetAddrIndex(address)
if external:
self.ext_reservations[idx] = value
else:
self.reservations[idx] = value
self.Update()
def _GetSize(self):
return 2 ** (32 - self.network.prefixlen)
@property
def all_reservations(self):
"""Return a combined map of internal and external reservations.
"""
return (self.reservations | self.ext_reservations)
def Validate(self):
assert len(self.reservations) == self._GetSize()
assert len(self.ext_reservations) == self._GetSize()
if self.gateway is not None:
assert self.gateway in self.network
if self.network6 and self.gateway6:
assert self.gateway6 in self.network6 or self.gateway6.is_link_local
return True
def IsFull(self):
"""Check whether the network is full.
"""
return self.all_reservations.all()
def GetReservedCount(self):
"""Get the count of reserved addresses.
"""
return self.all_reservations.count(True)
def GetFreeCount(self):
"""Get the count of unused addresses.
"""
return self.all_reservations.count(False)
def GetMap(self):
"""Return a textual representation of the network's occupation status.
"""
return self.all_reservations.to01().replace("1", "X").replace("0", ".")
def IsReserved(self, address, external=False):
"""Checks if the given IP is reserved.
"""
idx = self._GetAddrIndex(address)
if external:
return self.ext_reservations[idx]
else:
return self.reservations[idx]
def Reserve(self, address, external=False):
"""Mark an address as used.
"""
if self.IsReserved(address, external):
if external:
msg = "IP %s is already externally reserved" % address
else:
msg = "IP %s is already used by an instance" % address
raise errors.AddressPoolError(msg)
self._Mark(address, external=external)
def Release(self, address, external=False):
"""Release a given address reservation.
"""
if not self.IsReserved(address, external):
if external:
msg = "IP %s is not externally reserved" % address
else:
msg = "IP %s is not used by an instance" % address
raise errors.AddressPoolError(msg)
self._Mark(address, value=False, external=external)
def GetFreeAddress(self):
"""Returns the first available address.
"""
if self.IsFull():
raise errors.AddressPoolError("%s is full" % self.network)
idx = self.all_reservations.index(False)
address = str(self.network[idx])
self.Reserve(address)
return address
def GenerateFree(self):
"""Returns the first free address of the network.
@raise errors.AddressPoolError: Pool is full
"""
idx = self.all_reservations.search(self.FREE, 1)
if idx:
return str(self.network[idx[0]])
else:
raise errors.AddressPoolError("%s is full" % self.network)
def GetExternalReservations(self):
"""Returns a list of all externally reserved addresses.
"""
# pylint: disable=E1103
idxs = self.ext_reservations.search(self.RESERVED)
return [str(self.network[idx]) for idx in idxs]
@classmethod
def InitializeNetwork(cls, net):
"""Initialize an L{objects.Network} object.
Reserve the network, broadcast and gateway IP addresses.
"""
obj = cls(net)
obj.Update()
for ip in [obj.network[0], obj.network[-1]]:
obj.Reserve(ip, external=True)
if obj.net.gateway is not None:
obj.Reserve(obj.net.gateway, external=True)
obj.Validate()
return obj
|
|
"""Tests for the Input slider component."""
# pylint: disable=protected-access
import datetime
import pytest
import voluptuous as vol
from homeassistant.components.input_datetime import (
ATTR_DATE,
ATTR_DATETIME,
ATTR_EDITABLE,
ATTR_TIME,
ATTR_TIMESTAMP,
CONF_HAS_DATE,
CONF_HAS_TIME,
CONF_ID,
CONF_INITIAL,
CONF_NAME,
DEFAULT_TIME,
DOMAIN,
SERVICE_RELOAD,
SERVICE_SET_DATETIME,
)
from homeassistant.const import ATTR_ENTITY_ID, ATTR_FRIENDLY_NAME, ATTR_NAME
from homeassistant.core import Context, CoreState, State
from homeassistant.exceptions import Unauthorized
from homeassistant.helpers import entity_registry
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from tests.async_mock import patch
from tests.common import mock_restore_cache
INITIAL_DATE = "2020-01-10"
INITIAL_TIME = "23:45:56"
INITIAL_DATETIME = f"{INITIAL_DATE} {INITIAL_TIME}"
@pytest.fixture
def storage_setup(hass, hass_storage):
"""Storage setup."""
async def _storage(items=None, config=None):
if items is None:
hass_storage[DOMAIN] = {
"key": DOMAIN,
"version": 1,
"data": {
"items": [
{
CONF_ID: "from_storage",
CONF_NAME: "datetime from storage",
CONF_INITIAL: INITIAL_DATETIME,
CONF_HAS_DATE: True,
CONF_HAS_TIME: True,
}
]
},
}
else:
hass_storage[DOMAIN] = {
"key": DOMAIN,
"version": 1,
"data": {"items": items},
}
if config is None:
config = {DOMAIN: {}}
return await async_setup_component(hass, DOMAIN, config)
return _storage
async def async_set_date_and_time(hass, entity_id, dt_value):
"""Set date and / or time of input_datetime."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_DATETIME,
{
ATTR_ENTITY_ID: entity_id,
ATTR_DATE: dt_value.date(),
ATTR_TIME: dt_value.time(),
},
blocking=True,
)
async def async_set_datetime(hass, entity_id, dt_value):
"""Set date and / or time of input_datetime."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_DATETIME,
{ATTR_ENTITY_ID: entity_id, ATTR_DATETIME: dt_value},
blocking=True,
)
async def async_set_timestamp(hass, entity_id, timestamp):
"""Set date and / or time of input_datetime."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_DATETIME,
{ATTR_ENTITY_ID: entity_id, ATTR_TIMESTAMP: timestamp},
blocking=True,
)
async def test_invalid_configs(hass):
"""Test config."""
invalid_configs = [
None,
{},
{"name with space": None},
{"test_no_value": {"has_time": False, "has_date": False}},
]
for cfg in invalid_configs:
assert not await async_setup_component(hass, DOMAIN, {DOMAIN: cfg})
async def test_set_datetime(hass):
"""Test set_datetime method using date & time."""
await async_setup_component(
hass, DOMAIN, {DOMAIN: {"test_datetime": {"has_time": True, "has_date": True}}}
)
entity_id = "input_datetime.test_datetime"
dt_obj = datetime.datetime(2017, 9, 7, 19, 46, 30)
await async_set_date_and_time(hass, entity_id, dt_obj)
state = hass.states.get(entity_id)
assert state.state == str(dt_obj)
assert state.attributes["has_time"]
assert state.attributes["has_date"]
assert state.attributes["year"] == 2017
assert state.attributes["month"] == 9
assert state.attributes["day"] == 7
assert state.attributes["hour"] == 19
assert state.attributes["minute"] == 46
assert state.attributes["second"] == 30
assert state.attributes["timestamp"] == dt_obj.timestamp()
async def test_set_datetime_2(hass):
"""Test set_datetime method using datetime."""
await async_setup_component(
hass, DOMAIN, {DOMAIN: {"test_datetime": {"has_time": True, "has_date": True}}}
)
entity_id = "input_datetime.test_datetime"
dt_obj = datetime.datetime(2017, 9, 7, 19, 46, 30)
await async_set_datetime(hass, entity_id, dt_obj)
state = hass.states.get(entity_id)
assert state.state == str(dt_obj)
assert state.attributes["has_time"]
assert state.attributes["has_date"]
assert state.attributes["year"] == 2017
assert state.attributes["month"] == 9
assert state.attributes["day"] == 7
assert state.attributes["hour"] == 19
assert state.attributes["minute"] == 46
assert state.attributes["second"] == 30
assert state.attributes["timestamp"] == dt_obj.timestamp()
async def test_set_datetime_3(hass):
"""Test set_datetime method using timestamp."""
await async_setup_component(
hass, DOMAIN, {DOMAIN: {"test_datetime": {"has_time": True, "has_date": True}}}
)
entity_id = "input_datetime.test_datetime"
dt_obj = datetime.datetime(2017, 9, 7, 19, 46, 30)
await async_set_timestamp(hass, entity_id, dt_util.as_utc(dt_obj).timestamp())
state = hass.states.get(entity_id)
assert state.state == str(dt_obj)
assert state.attributes["has_time"]
assert state.attributes["has_date"]
assert state.attributes["year"] == 2017
assert state.attributes["month"] == 9
assert state.attributes["day"] == 7
assert state.attributes["hour"] == 19
assert state.attributes["minute"] == 46
assert state.attributes["second"] == 30
assert state.attributes["timestamp"] == dt_obj.timestamp()
async def test_set_datetime_time(hass):
"""Test set_datetime method with only time."""
await async_setup_component(
hass, DOMAIN, {DOMAIN: {"test_time": {"has_time": True, "has_date": False}}}
)
entity_id = "input_datetime.test_time"
dt_obj = datetime.datetime(2017, 9, 7, 19, 46, 30)
time_portion = dt_obj.time()
await async_set_date_and_time(hass, entity_id, dt_obj)
state = hass.states.get(entity_id)
assert state.state == str(time_portion)
assert state.attributes["has_time"]
assert not state.attributes["has_date"]
assert state.attributes["timestamp"] == (19 * 3600) + (46 * 60) + 30
async def test_set_invalid(hass):
"""Test set_datetime method with only time."""
initial = "2017-01-01"
await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_date": {"has_time": False, "has_date": True, "initial": initial}
}
},
)
entity_id = "input_datetime.test_date"
dt_obj = datetime.datetime(2017, 9, 7, 19, 46)
time_portion = dt_obj.time()
with pytest.raises(vol.Invalid):
await hass.services.async_call(
"input_datetime",
"set_datetime",
{"entity_id": entity_id, "time": time_portion},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == initial
async def test_set_invalid_2(hass):
"""Test set_datetime method with date and datetime."""
initial = "2017-01-01"
await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_date": {"has_time": False, "has_date": True, "initial": initial}
}
},
)
entity_id = "input_datetime.test_date"
dt_obj = datetime.datetime(2017, 9, 7, 19, 46)
time_portion = dt_obj.time()
with pytest.raises(vol.Invalid):
await hass.services.async_call(
"input_datetime",
"set_datetime",
{"entity_id": entity_id, "time": time_portion, "datetime": dt_obj},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == initial
async def test_set_datetime_date(hass):
"""Test set_datetime method with only date."""
await async_setup_component(
hass, DOMAIN, {DOMAIN: {"test_date": {"has_time": False, "has_date": True}}}
)
entity_id = "input_datetime.test_date"
dt_obj = datetime.datetime(2017, 9, 7, 19, 46)
date_portion = dt_obj.date()
await async_set_date_and_time(hass, entity_id, dt_obj)
state = hass.states.get(entity_id)
assert state.state == str(date_portion)
assert not state.attributes["has_time"]
assert state.attributes["has_date"]
date_dt_obj = datetime.datetime(2017, 9, 7)
assert state.attributes["timestamp"] == date_dt_obj.timestamp()
async def test_restore_state(hass):
"""Ensure states are restored on startup."""
mock_restore_cache(
hass,
(
State("input_datetime.test_time", "19:46:00"),
State("input_datetime.test_date", "2017-09-07"),
State("input_datetime.test_datetime", "2017-09-07 19:46:00"),
State("input_datetime.test_bogus_data", "this is not a date"),
State("input_datetime.test_was_time", "19:46:00"),
State("input_datetime.test_was_date", "2017-09-07"),
),
)
hass.state = CoreState.starting
initial = datetime.datetime(2017, 1, 1, 23, 42)
default = datetime.datetime(1970, 1, 1, 0, 0)
await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_time": {"has_time": True, "has_date": False},
"test_date": {"has_time": False, "has_date": True},
"test_datetime": {"has_time": True, "has_date": True},
"test_bogus_data": {
"has_time": True,
"has_date": True,
"initial": str(initial),
},
"test_was_time": {"has_time": False, "has_date": True},
"test_was_date": {"has_time": True, "has_date": False},
}
},
)
dt_obj = datetime.datetime(2017, 9, 7, 19, 46)
state_time = hass.states.get("input_datetime.test_time")
assert state_time.state == str(dt_obj.time())
state_date = hass.states.get("input_datetime.test_date")
assert state_date.state == str(dt_obj.date())
state_datetime = hass.states.get("input_datetime.test_datetime")
assert state_datetime.state == str(dt_obj)
state_bogus = hass.states.get("input_datetime.test_bogus_data")
assert state_bogus.state == str(initial)
state_was_time = hass.states.get("input_datetime.test_was_time")
assert state_was_time.state == str(default.date())
state_was_date = hass.states.get("input_datetime.test_was_date")
assert state_was_date.state == str(default.time())
async def test_default_value(hass):
"""Test default value if none has been set via initial or restore state."""
await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_time": {"has_time": True, "has_date": False},
"test_date": {"has_time": False, "has_date": True},
"test_datetime": {"has_time": True, "has_date": True},
}
},
)
dt_obj = datetime.datetime(1970, 1, 1, 0, 0)
state_time = hass.states.get("input_datetime.test_time")
assert state_time.state == str(dt_obj.time())
assert state_time.attributes.get("timestamp") is not None
state_date = hass.states.get("input_datetime.test_date")
assert state_date.state == str(dt_obj.date())
assert state_date.attributes.get("timestamp") is not None
state_datetime = hass.states.get("input_datetime.test_datetime")
assert state_datetime.state == str(dt_obj)
assert state_datetime.attributes.get("timestamp") is not None
async def test_input_datetime_context(hass, hass_admin_user):
"""Test that input_datetime context works."""
assert await async_setup_component(
hass, "input_datetime", {"input_datetime": {"only_date": {"has_date": True}}}
)
state = hass.states.get("input_datetime.only_date")
assert state is not None
await hass.services.async_call(
"input_datetime",
"set_datetime",
{"entity_id": state.entity_id, "date": "2018-01-02"},
blocking=True,
context=Context(user_id=hass_admin_user.id),
)
state2 = hass.states.get("input_datetime.only_date")
assert state2 is not None
assert state.state != state2.state
assert state2.context.user_id == hass_admin_user.id
async def test_reload(hass, hass_admin_user, hass_read_only_user):
"""Test reload service."""
count_start = len(hass.states.async_entity_ids())
ent_reg = await entity_registry.async_get_registry(hass)
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"dt1": {"has_time": False, "has_date": True, "initial": "2019-1-1"},
"dt3": {CONF_HAS_TIME: True, CONF_HAS_DATE: True},
}
},
)
assert count_start + 2 == len(hass.states.async_entity_ids())
state_1 = hass.states.get("input_datetime.dt1")
state_2 = hass.states.get("input_datetime.dt2")
state_3 = hass.states.get("input_datetime.dt3")
dt_obj = datetime.datetime(2019, 1, 1, 0, 0)
assert state_1 is not None
assert state_2 is None
assert state_3 is not None
assert str(dt_obj.date()) == state_1.state
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "dt1") == f"{DOMAIN}.dt1"
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "dt2") is None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "dt3") == f"{DOMAIN}.dt3"
with patch(
"homeassistant.config.load_yaml_config_file",
autospec=True,
return_value={
DOMAIN: {
"dt1": {"has_time": True, "has_date": False, "initial": "23:32"},
"dt2": {"has_time": True, "has_date": True},
}
},
):
with pytest.raises(Unauthorized):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
blocking=True,
context=Context(user_id=hass_read_only_user.id),
)
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
blocking=True,
context=Context(user_id=hass_admin_user.id),
)
await hass.async_block_till_done()
assert count_start + 2 == len(hass.states.async_entity_ids())
state_1 = hass.states.get("input_datetime.dt1")
state_2 = hass.states.get("input_datetime.dt2")
state_3 = hass.states.get("input_datetime.dt3")
assert state_1 is not None
assert state_2 is not None
assert state_3 is None
assert str(DEFAULT_TIME) == state_1.state
assert str(datetime.datetime(1970, 1, 1, 0, 0)) == state_2.state
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "dt1") == f"{DOMAIN}.dt1"
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "dt2") == f"{DOMAIN}.dt2"
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "dt3") is None
async def test_load_from_storage(hass, storage_setup):
"""Test set up from storage."""
assert await storage_setup()
state = hass.states.get(f"{DOMAIN}.datetime_from_storage")
assert state.state == INITIAL_DATETIME
assert state.attributes.get(ATTR_EDITABLE)
async def test_editable_state_attribute(hass, storage_setup):
"""Test editable attribute."""
assert await storage_setup(
config={
DOMAIN: {
"from_yaml": {
CONF_HAS_DATE: True,
CONF_HAS_TIME: True,
CONF_NAME: "yaml datetime",
CONF_INITIAL: "2001-01-02 12:34:56",
}
}
}
)
state = hass.states.get(f"{DOMAIN}.datetime_from_storage")
assert state.state == INITIAL_DATETIME
assert state.attributes.get(ATTR_EDITABLE)
state = hass.states.get(f"{DOMAIN}.from_yaml")
assert state.state == "2001-01-02 12:34:56"
assert not state.attributes[ATTR_EDITABLE]
async def test_ws_list(hass, hass_ws_client, storage_setup):
"""Test listing via WS."""
assert await storage_setup(config={DOMAIN: {"from_yaml": {CONF_HAS_DATE: True}}})
client = await hass_ws_client(hass)
await client.send_json({"id": 6, "type": f"{DOMAIN}/list"})
resp = await client.receive_json()
assert resp["success"]
storage_ent = "from_storage"
yaml_ent = "from_yaml"
result = {item["id"]: item for item in resp["result"]}
assert len(result) == 1
assert storage_ent in result
assert yaml_ent not in result
assert result[storage_ent][ATTR_NAME] == "datetime from storage"
async def test_ws_delete(hass, hass_ws_client, storage_setup):
"""Test WS delete cleans up entity registry."""
assert await storage_setup()
input_id = "from_storage"
input_entity_id = f"{DOMAIN}.datetime_from_storage"
ent_reg = await entity_registry.async_get_registry(hass)
state = hass.states.get(input_entity_id)
assert state is not None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, input_id) == input_entity_id
client = await hass_ws_client(hass)
await client.send_json(
{"id": 6, "type": f"{DOMAIN}/delete", f"{DOMAIN}_id": f"{input_id}"}
)
resp = await client.receive_json()
assert resp["success"]
state = hass.states.get(input_entity_id)
assert state is None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, input_id) is None
async def test_update(hass, hass_ws_client, storage_setup):
"""Test updating min/max updates the state."""
assert await storage_setup()
input_id = "from_storage"
input_entity_id = f"{DOMAIN}.datetime_from_storage"
ent_reg = await entity_registry.async_get_registry(hass)
state = hass.states.get(input_entity_id)
assert state.attributes[ATTR_FRIENDLY_NAME] == "datetime from storage"
assert state.state == INITIAL_DATETIME
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, input_id) == input_entity_id
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 6,
"type": f"{DOMAIN}/update",
f"{DOMAIN}_id": f"{input_id}",
ATTR_NAME: "even newer name",
CONF_HAS_DATE: False,
}
)
resp = await client.receive_json()
assert resp["success"]
state = hass.states.get(input_entity_id)
assert state.state == INITIAL_TIME
assert state.attributes[ATTR_FRIENDLY_NAME] == "even newer name"
async def test_ws_create(hass, hass_ws_client, storage_setup):
"""Test create WS."""
assert await storage_setup(items=[])
input_id = "new_datetime"
input_entity_id = f"{DOMAIN}.{input_id}"
ent_reg = await entity_registry.async_get_registry(hass)
state = hass.states.get(input_entity_id)
assert state is None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, input_id) is None
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 6,
"type": f"{DOMAIN}/create",
CONF_NAME: "New DateTime",
CONF_INITIAL: "1991-01-02 01:02:03",
CONF_HAS_DATE: True,
CONF_HAS_TIME: True,
}
)
resp = await client.receive_json()
assert resp["success"]
state = hass.states.get(input_entity_id)
assert state.state == "1991-01-02 01:02:03"
assert state.attributes[ATTR_FRIENDLY_NAME] == "New DateTime"
assert state.attributes[ATTR_EDITABLE]
async def test_setup_no_config(hass, hass_admin_user):
"""Test component setup with no config."""
count_start = len(hass.states.async_entity_ids())
assert await async_setup_component(hass, DOMAIN, {})
with patch(
"homeassistant.config.load_yaml_config_file", autospec=True, return_value={}
):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
blocking=True,
context=Context(user_id=hass_admin_user.id),
)
await hass.async_block_till_done()
assert count_start == len(hass.states.async_entity_ids())
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2017, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import itertools
import logging
import os
import sys
from . import task
from .barrier import BarrierError
from .utils import prepare_task_matcher
from .hooks import jug_hook
_is_jug_running = False
def is_jug_running():
'''
Returns True if this script is being executed by jug instead of regular
Python
'''
return _is_jug_running
def init(jugfile=None, jugdir=None, on_error='exit', store=None):
'''
store,jugspace = init(jugfile={'jugfile'}, jugdir={'jugdata'}, on_error='exit', store=None)
Initializes jug (create backend connection, ...).
Imports jugfile
Parameters
----------
jugfile : str, optional
jugfile to import (default: 'jugfile')
jugdir : str, optional
jugdir to use (could be a path)
on_error : str, optional
What to do if import fails (default: exit)
store : storage object, optional
If used, this is returned as ``store`` again.
Returns
-------
store : storage object
jugspace : dictionary
'''
import imp
from .options import set_jugdir
assert on_error in ('exit', 'propagate'), 'jug.init: on_error option is not valid.'
if jugfile is None:
jugfile = 'jugfile'
if store is None:
store = set_jugdir(jugdir)
sys.path.insert(0, os.path.abspath('.'))
# The reason for this implementation is that it is the only that seems to
# work with both barrier and pickle()ing of functions inside the jugfile
#
# Just doing __import__() will not work because if there is a BarrierError
# thrown, then functions defined inside the jugfile end up in a confusing
# state.
#
# Alternatively, just execfile()ing will make any functions defined in the
# jugfile unpickle()able which makes mapreduce not work
#
# Therefore, we simulate (partially) __import__ and set sys.modules *even*
# if BarrierError is raised.
#
jugmodname = os.path.basename(jugfile[:-len('.py')])
jugmodule = imp.new_module(jugmodname)
jugmodule.__file__ = os.path.abspath(jugfile)
jugspace = jugmodule.__dict__
sys.modules[jugmodname] = jugmodule
jugfile_contents = open(jugfile).read()
try:
exec(compile(jugfile_contents, jugfile, 'exec'), jugspace, jugspace)
except BarrierError:
jugspace['__jug__hasbarrier__'] = True
except Exception as e:
logging.critical("Could not import file '%s' (error: %s)", jugfile, e)
if on_error == 'exit':
import traceback
print(traceback.format_exc())
sys.exit(1)
else:
raise
# The store may have been changed by the jugfile.
store = task.Task.store
return store, jugspace
def execution_loop(tasks, options):
from time import sleep
logging.info('Execute start (%s tasks)' % len(tasks))
# For the special (but common) case where most (if not all) of the tasks
# can be loaded directly, just skip them as fast as possible:
first_unloadable = 0
while (first_unloadable < len(tasks)) and tasks[first_unloadable].can_load():
t = tasks[first_unloadable]
jug_hook('execute.task-loadable', (tasks[first_unloadable],))
first_unloadable += 1
del tasks[:first_unloadable]
if options.debug:
start_task_set = set([id(t) for t in task.alltasks])
# If we are running with a target, exclude non-matching tasks
if options.execute_target:
task_matcher = prepare_task_matcher(options.execute_target)
tasks = [t for t in tasks if task_matcher(t.name)]
logging.info('Non-matching tasks discarded. Remaining (%s tasks)' % len(tasks))
failures = False
prevtask = None
while tasks:
upnext = [] # tasks that can be run
nr_wait_cycles = int(options.execute_nr_wait_cycles)
for i in range(nr_wait_cycles):
max_cannot_run = min(len(tasks), 128)
if i == nr_wait_cycles - 1:
# in the last loop iteration, check all tasks to ensure we don't miss any
max_cannot_run = len(tasks)
for i in range(max_cannot_run):
# The argument for this is the following:
# if T' is dependent on the result of T, it is better if the
# processor that ran T, also runs T'. By having everyone else
# push T' to the end of tasks, this is more likely to happen.
#
# Furthermore, this avoids always querying the same tasks.
if tasks[0].can_run():
break
tasks.append(tasks.pop(0))
while tasks and tasks[0].can_run():
upnext.append(tasks.pop(0))
if upnext:
break
for ti,t in enumerate(tasks):
if t.can_run():
upnext.append(tasks.pop(ti))
break
if upnext:
break
logging.info('waiting %s secs for an open task...' % options.execute_wait_cycle_time)
sleep(int(options.execute_wait_cycle_time))
if not upnext:
logging.info('No tasks can be run!')
break
for t in upnext:
if t.can_load():
jug_hook('execute.task-loadable', (t,))
continue
locked = False
try:
locked = t.lock()
if t.can_load(): # This can be true if the task ran between the check above and this one
jug_hook('execute.task-loadable', (t,))
elif locked:
logging.info('Executing %s...' % t.name)
jug_hook('execute.task-pre-execute', (t,))
if options.aggressive_unload:
if prevtask is not None:
active = set([id(d) for d in t.dependencies()])
for d in itertools.chain(prevtask.dependencies(), [prevtask]):
if id(d) not in active:
d.unload()
prevtask = t
t.run(debug_mode=options.debug)
jug_hook('execute.task-executed1', (t,))
if options.debug:
for nt in task.alltasks:
if id(nt) not in start_task_set:
raise RuntimeError('Creating tasks while executing another task is not supported.\n'
'Error detected while running task `{0}`'.format(t.name))
else:
logging.info('Already in execution %s...' % t.name)
except SystemExit:
raise
except Exception as e:
if options.pdb:
import sys
_,_, tb = sys.exc_info()
# The code below is a complex attempt to load IPython
# debugger which works with multiple versions of IPython.
#
# Unfortunately, their API kept changing prior to the 1.0.
try:
import IPython
try:
import IPython.core.debugger
try:
from IPython.terminal.ipapp import load_default_config
config = load_default_config()
colors = config.TerminalInteractiveShell.colors
except:
import IPython.core.ipapi
ip = IPython.core.ipapi.get()
colors = ip.colors
try:
debugger = IPython.core.debugger.Pdb(colors.get_value(initial='Linux'))
except AttributeError:
debugger = IPython.core.debugger.Pdb(colors)
except ImportError:
#Fallback to older version of IPython API
import IPython.ipapi
import IPython.Debugger
shell = IPython.Shell.IPShell(argv=[''])
ip = IPython.ipapi.get()
debugger = IPython.Debugger.Pdb(ip.options.colors)
except ImportError:
#Fallback to standard debugger
import pdb
debugger = pdb.Pdb()
debugger.reset()
debugger.interaction(None, tb)
else:
logging.critical('Exception while running %s: %s' % (t.name,e))
for other in itertools.chain(upnext, tasks):
for dep in other.dependencies():
if dep is t:
logging.critical('Other tasks are dependent on this one! Parallel processors will be held waiting!')
if not options.execute_keep_going:
raise
else:
failures = True
finally:
if locked:
t.unlock()
if options.aggressive_unload and prevtask is not None:
prevtask.unload()
return failures
def main(argv=None):
global _is_jug_running
_is_jug_running = True
from .options import parse
if argv is None:
from sys import argv
options = parse()
jugspace = None
store = None
if options.subcommand not in ('demo', 'status', 'execute', 'webstatus', 'test-jug'):
store, jugspace = init(options.jugfile, options.jugdir)
from .subcommands import cmdapi
cmdapi.run(options.subcommand, options=options, store=store, jugspace=jugspace)
if store is not None:
store.close()
if __name__ == '__main__':
try:
main()
except Exception as exc:
logging.critical('Unhandled Jug Error!')
raise
|
|
import functools
import numpy as np
import pytest
import tensorflow as tf
from mock import mock
from tests.helper import assert_variables
from tests.layers.convolutional.helper import *
from tests.layers.core.test_gated import safe_sigmoid
from tests.layers.helper import l2_normalize
from tfsnippet.layers import *
from tfsnippet.layers.convolutional.utils import get_deconv_output_length
from tfsnippet.ops import flatten_to_ndims, unflatten_from_ndims
from tfsnippet.utils import is_integer
tf_conv2d = tf.nn.conv2d
tf_atrous_conv2d = tf.nn.atrous_conv2d
tf_conv2d_transpose = tf.nn.conv2d_transpose
tf_atrous_conv2d_transpose = tf.nn.atrous_conv2d_transpose
def patched_conv2d(input, filter, strides, padding, data_format,
dilations):
"""A patched version of `tf.nn.conv2d`, emulates NCHW by NHWC."""
input = input_maybe_to_channels_last(input, data_format=data_format)
[strides, dilations] = strides_tuple_to_channels_last(
[strides, dilations], data_format=data_format)
output = tf_conv2d(
input=input, filter=filter, strides=strides, padding=padding,
data_format='NHWC', dilations=dilations
)
output = output_maybe_to_channels_first(output, data_format=data_format)
return output
class Conv2dTestCase(tf.test.TestCase):
@staticmethod
def conv2d_ans(input, padding, kernel, bias, strides, dilations,
activation_fn=None, normalizer_fn=None, gated=False,
gate_sigmoid_bias=2.):
"""Produce the expected answer of conv2d."""
strides = (strides,) * 2 if is_integer(strides) else tuple(strides)
strides = (1,) + strides + (1,)
session = tf.get_default_session()
input, s1, s2 = flatten_to_ndims(input, 4)
padding = padding.upper()
if dilations > 1:
assert(not any(i > 1 for i in strides))
output = tf.nn.atrous_conv2d(
value=input,
filters=kernel,
rate=dilations,
padding=padding
)
else:
output = tf.nn.conv2d(
input=input,
filter=kernel,
strides=strides,
padding=padding,
data_format='NHWC',
dilations=[1] * 4
)
if bias is not None:
output += bias
if normalizer_fn:
output = normalizer_fn(output)
if gated:
output, gate = tf.split(output, 2, axis=-1)
if activation_fn:
output = activation_fn(output)
if gated:
output = output * tf.sigmoid(gate + gate_sigmoid_bias)
output = unflatten_from_ndims(output, s1, s2)
output = session.run(output)
return output
@staticmethod
def run_conv2d(input, out_channels, kernel_size, padding, kernel, bias,
strides, dilations, channels_last, ph=None, **kwargs):
"""Run `tfsnippet.layers.conv2d` and get the output."""
i_shape = input.shape
if not channels_last:
input = np.transpose(
input,
tuple(i for i in range(len(i_shape) - 3)) + (-1, -3, -2)
)
session = tf.get_default_session()
output = session.run(
conv2d(
input=ph if ph is not None else input,
out_channels=out_channels,
kernel_size=kernel_size,
channels_last=channels_last,
padding=padding,
strides=strides,
dilations=dilations,
kernel=kernel,
bias=bias,
**kwargs
),
feed_dict={ph: input} if ph is not None else None
)
if not channels_last:
output = np.transpose(
output,
tuple(i for i in range(len(i_shape) - 3)) + (-2, -1, -3)
)
return output
def test_conv2d_1x1(self):
with self.test_session() as sess:
np.random.seed(1234)
x = np.random.normal(size=[17, 11, 32, 31, 5]).astype(np.float32)
kernel = np.random.random(size=[1, 1, 5, 7]).astype(np.float32)
bias = np.random.random(size=[7]).astype(np.float32)
# test strides 1, kernel size 1, valid padding, NHWC
np.testing.assert_allclose(
self.run_conv2d(x, 7, 1, 'valid', kernel, bias, 1, 1,
channels_last=True),
self.conv2d_ans(x, 'valid', kernel, bias, 1, 1)
)
# test strides (2, 3), kernel size 1, valid padding, NHWC
np.testing.assert_allclose(
self.run_conv2d(x, 7, 1, 'same', kernel, bias, (2, 3), 1,
channels_last=True),
self.conv2d_ans(x, 'same', kernel, bias, (2, 3), 1)
)
def test_conv2d(self):
with mock.patch('tensorflow.nn.conv2d', patched_conv2d), \
self.test_session() as sess:
np.random.seed(1234)
x = np.random.normal(size=[17, 11, 32, 31, 5]).astype(np.float32)
kernel = np.random.random(size=[3, 4, 5, 7]).astype(np.float32)
bias = np.random.random(size=[7]).astype(np.float32)
# test strides 1, skip 1, same padding, NHWC
np.testing.assert_allclose(
self.run_conv2d(x, 7, (3, 4), 'same', kernel, bias, 1, 1,
channels_last=True),
self.conv2d_ans(x, 'same', kernel, bias, 1, 1)
)
# test strides 1, skip 1, valid padding, NCHW
np.testing.assert_allclose(
self.run_conv2d(x, 7, (3, 4), 'valid', kernel, bias, (1, 1), 1,
channels_last=False),
self.conv2d_ans(x, 'valid', kernel, bias, 1, 1)
)
# test strides (3, 2), skip 1, same padding, NHWC
np.testing.assert_allclose(
self.run_conv2d(x, 7, (3, 4), 'same', kernel, bias, (3, 2), 1,
channels_last=True),
self.conv2d_ans(x, 'same', kernel, bias, (3, 2), 1)
)
# test strides 1, skip 2, valid padding, NHWC
np.testing.assert_allclose(
self.run_conv2d(x, 7, (3, 4), 'valid', kernel, bias, 1, 2,
channels_last=True),
self.conv2d_ans(x, 'valid', kernel, bias, 1, 2)
)
# test dynamic shape, same padding, NHWC
ph = tf.placeholder(dtype=tf.float32,
shape=(None, None, None, None, 5))
np.testing.assert_allclose(
self.run_conv2d(x, 7, (3, 4), 'same', kernel, bias, 1, 1,
channels_last=True, ph=ph),
self.conv2d_ans(x, 'same', kernel, bias, 1, 1)
)
# test dynamic shape, valid padding NCHW
ph = tf.placeholder(dtype=tf.float32,
shape=(None, None, 5, None, None))
np.testing.assert_allclose(
self.run_conv2d(x, 7, (3, 4), 'valid', kernel, bias, 1, 1,
channels_last=False, ph=ph),
self.conv2d_ans(x, 'valid', kernel, bias, 1, 1)
)
# test errors
with pytest.raises(ValueError,
match='Invalid value for argument `strides`: '
'expected to be one or two positive '
'integers'):
_ = self.run_conv2d(x, 7, (3, 4), 'valid', kernel, bias, 0, 2,
channels_last=False)
with pytest.raises(ValueError,
match='`channels_last` == False is incompatible '
'with `dilations` > 1'):
_ = self.run_conv2d(x, 7, (3, 4), 'valid', kernel, bias, 1, 2,
channels_last=False)
with pytest.raises(ValueError,
match='`strides` > 1 is incompatible with '
'`dilations` > 1'):
_ = self.run_conv2d(x, 7, (3, 4), 'valid', kernel, bias, 2, 2,
channels_last=True)
# test create variables
with tf.Graph().as_default():
# test NHWC
_ = conv2d(x, 7, (3, 4), padding='same', channels_last=True)
assert_variables(['kernel', 'bias'], trainable=True, scope='conv2d',
collections=[tf.GraphKeys.MODEL_VARIABLES])
kernel_var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[-2]
bias_var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[-1]
self.assertEqual(kernel_var.shape, kernel.shape)
self.assertEqual(bias_var.shape, bias.shape)
# test NCHW
_ = conv2d(np.transpose(x, [0, 1, -1, -3, -2]), 7, (3, 4),
padding='valid', channels_last=False)
assert_variables(['kernel', 'bias'], trainable=True,
scope='conv2d_1',
collections=[tf.GraphKeys.MODEL_VARIABLES])
kernel_var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[-2]
bias_var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[-1]
self.assertEqual(kernel_var.shape, kernel.shape)
self.assertEqual(bias_var.shape, bias.shape)
# test create variables, non-trainable
with tf.Graph().as_default():
# test NHWC
_ = conv2d(x, 7, (3, 4), padding='same', channels_last=True,
trainable=False)
assert_variables(['kernel', 'bias'], trainable=False,
scope='conv2d',
collections=[tf.GraphKeys.MODEL_VARIABLES])
# test create variables with use_bias = False
with tf.Graph().as_default():
_ = conv2d(x, 7, (3, 4), padding='same', channels_last=True,
use_bias=False)
assert_variables(['kernel'], trainable=True, scope='conv2d',
collections=[tf.GraphKeys.MODEL_VARIABLES])
assert_variables(['bias'], exist=False, scope='conv2d')
def test_normalization_and_activation(self):
assert_allclose = functools.partial(
np.testing.assert_allclose, rtol=1e-5, atol=1e-5)
with mock.patch('tensorflow.nn.conv2d', patched_conv2d), \
self.test_session() as sess:
np.random.seed(1234)
x = np.random.normal(size=[17, 11, 32, 31, 5]).astype(np.float32)
kernel = np.random.random(size=[3, 4, 5, 7]).astype(np.float32)
normalized_kernel = l2_normalize(kernel, axis=(0, 1, 2))
kernel = kernel.astype(np.float32)
bias = np.random.random(size=[7]).astype(np.float32)
normalizer_fn = lambda x: x * 1.5 - 3.
activation_fn = lambda x: x * 2. + 1.
# test weight_norm + normalizer + activation, NHWC
assert_allclose(
self.run_conv2d(x, 7, (3, 4), 'same', kernel, bias, 1, 1,
channels_last=True, weight_norm=True,
normalizer_fn=normalizer_fn,
activation_fn=activation_fn),
self.conv2d_ans(x, 'same', normalized_kernel, None, 1, 1,
normalizer_fn=normalizer_fn,
activation_fn=activation_fn)
)
# test weight_norm + normalizer + activation, NCHW, use_bias = True
assert_allclose(
self.run_conv2d(x, 7, (3, 4), 'valid', kernel, bias, 1, 1,
channels_last=False, weight_norm=True,
use_bias=True,
normalizer_fn=normalizer_fn,
activation_fn=activation_fn),
self.conv2d_ans(x, 'valid', normalized_kernel, bias, 1, 1,
normalizer_fn=normalizer_fn,
activation_fn=activation_fn)
)
def test_kernel_mask(self):
with mock.patch('tensorflow.nn.conv2d', patched_conv2d), \
self.test_session() as sess:
np.random.seed(1234)
x = np.random.normal(size=[17, 11, 32, 31, 5]).astype(np.float32)
kernel = np.random.random(size=[3, 4, 5, 7]).astype(np.float32)
mask = np.random.binomial(n=1, p=.5, size=kernel.shape). \
astype(np.float32)
bias = np.random.random(size=[7]).astype(np.float32)
# test strides 1, skip 1, same padding, NHWC
np.testing.assert_allclose(
self.run_conv2d(x, 7, (3, 4), 'same', kernel, bias, 1, 1,
channels_last=True, kernel_mask=mask),
self.conv2d_ans(x, 'same', kernel * mask, bias, 1, 1)
)
def test_gated(self):
assert_allclose = functools.partial(
np.testing.assert_allclose, rtol=1e-5, atol=1e-5)
with mock.patch('tensorflow.nn.conv2d', patched_conv2d), \
self.test_session() as sess:
np.random.seed(1234)
x = np.random.normal(size=[17, 11, 32, 31, 5]).astype(np.float32)
kernel = np.random.random(size=[3, 4, 5, 14]).astype(np.float32)
normalized_kernel = l2_normalize(kernel, axis=(0, 1, 2))
kernel = kernel.astype(np.float32)
bias = np.random.random(size=[14]).astype(np.float32)
normalizer_fn = lambda x: x * 1.5 - 3.
activation_fn = lambda x: x * 2. + 1.
assert_allclose(
self.run_conv2d(x, 7, (3, 4), 'same', kernel, bias, 1, 1,
channels_last=True, weight_norm=True,
normalizer_fn=normalizer_fn,
activation_fn=activation_fn,
gated=True,
gate_sigmoid_bias=1.1),
self.conv2d_ans(x, 'same', normalized_kernel, None, 1, 1,
normalizer_fn=normalizer_fn,
activation_fn=activation_fn,
gated=True, gate_sigmoid_bias=1.1)
)
def patched_conv2d_transpose(value, filter, output_shape, strides, padding,
data_format):
"""A patched version of `tf.nn.conv2d_transpose`, emulates NCHW by NHWC."""
value = input_maybe_to_channels_last(value, data_format=data_format)
[strides, output_shape] = strides_tuple_to_channels_last(
[strides, output_shape], data_format=data_format)
output = tf_conv2d_transpose(
value=value, filter=filter, output_shape=output_shape, strides=strides,
padding=padding, data_format='NHWC'
)
output = output_maybe_to_channels_first(output, data_format=data_format)
return output
class Deconv2dTestCase(tf.test.TestCase):
def check(self, x, padding, kernel, bias, strides):
"""Integrated tests for specific argument combinations."""
assert_allclose = functools.partial(
np.testing.assert_allclose, rtol=1e-5, atol=1e-5)
strides = (strides,) * 2 if is_integer(strides) else tuple(strides)
x_shape = (x.shape[-3], x.shape[-2])
x_channels = x.shape[-1]
kernel_size = kernel.shape[0], kernel.shape[1]
# compute the input for the deconv
y = Conv2dTestCase.conv2d_ans(x, padding, kernel, None, strides, 1)
y_shape = (y.shape[-3], y.shape[-2])
y_channels = y.shape[-1]
# test explicit output_shape, NHWC
deconv_out = Deconv2dTestCase.run_deconv2d(
y, x_channels, kernel_size, x_shape, padding,
kernel, None, strides, channels_last=True, use_bias=False
)
self.assertEqual(deconv_out.shape, x.shape)
# memorize the linear output for later tests
linear_out = np.copy(deconv_out)
# test explicit output_shape, NCHW
deconv_out = Deconv2dTestCase.run_deconv2d(
y, x_channels, kernel_size, x_shape, padding,
kernel, None, strides, channels_last=False, use_bias=False
)
assert_allclose(deconv_out, linear_out)
# test explicit dynamic output_shape, NHWC
deconv_out = Deconv2dTestCase.run_deconv2d(
y, x_channels, kernel_size, tf.constant(x_shape), padding,
kernel, None, strides, channels_last=True, use_bias=False
)
assert_allclose(deconv_out, linear_out)
# test explicit dynamic output_shape, NCHW
deconv_out = Deconv2dTestCase.run_deconv2d(
y, x_channels, kernel_size, tf.constant(x_shape), padding,
kernel, None, strides, channels_last=False, use_bias=False
)
assert_allclose(deconv_out, linear_out)
# test dynamic input, explicit dynamic output_shape, NHWC
ph = tf.placeholder(
dtype=tf.float32,
shape=(None,) * (len(y.shape) - 3) + (None, None, y_channels)
)
deconv_out = Deconv2dTestCase.run_deconv2d(
y, x_channels, kernel_size, tf.constant(x_shape), padding,
kernel, None, strides, channels_last=True, ph=ph, use_bias=False
)
assert_allclose(deconv_out, linear_out)
# test dynamic input, explicit dynamic output_shape, NCHW
ph = tf.placeholder(
dtype=tf.float32,
shape=(None,) * (len(y.shape) - 3) + (y_channels, None, None)
)
deconv_out = Deconv2dTestCase.run_deconv2d(
y, x_channels, kernel_size, tf.constant(x_shape), padding,
kernel, None, strides, channels_last=False, ph=ph, use_bias=False
)
assert_allclose(deconv_out, linear_out)
# if the given payload shape matches the auto-inferred shape
# further test not giving explicit output_shape
def axis_matches(i):
return x_shape[i] == get_deconv_output_length(
y_shape[i], kernel_size[i], strides[i], padding)
if all(axis_matches(i) for i in (0, 1)):
# test static input, implicit output_shape, NHWC
deconv_out = Deconv2dTestCase.run_deconv2d(
y, x_channels, kernel_size, None, padding, kernel, None,
strides, channels_last=True, use_bias=False
)
assert_allclose(deconv_out, linear_out)
# test static input, implicit output_shape, NCHW
deconv_out = Deconv2dTestCase.run_deconv2d(
y, x_channels, kernel_size, None, padding, kernel, None,
strides, channels_last=False, use_bias=False
)
assert_allclose(deconv_out, linear_out)
# test dynamic input, implicit output_shape, NHWC
ph = tf.placeholder(
dtype=tf.float32,
shape=(None,) * (len(y.shape) - 3) + (None, None, y_channels)
)
deconv_out = Deconv2dTestCase.run_deconv2d(
y, x_channels, kernel_size, None, padding, kernel, None,
strides, channels_last=True, ph=ph,
use_bias=False
)
assert_allclose(deconv_out, linear_out)
# test dynamic input, implicit output_shape, NCHW
ph = tf.placeholder(
dtype=tf.float32,
shape=(None,) * (len(y.shape) - 3) + (y_channels, None, None)
)
deconv_out = Deconv2dTestCase.run_deconv2d(
y, x_channels, kernel_size, None, padding, kernel, None,
strides, channels_last=False, ph=ph, use_bias=False
)
assert_allclose(deconv_out, linear_out)
# test normalization and activation
activation_fn = lambda x: x * 2. + 1.
normalizer_fn = lambda x: x * 1.5 - 3.
ans = activation_fn(normalizer_fn(linear_out))
deconv_out = Deconv2dTestCase.run_deconv2d(
y, x_channels, kernel_size, x_shape, padding,
kernel, bias, strides, channels_last=True,
normalizer_fn=normalizer_fn, activation_fn=activation_fn
)
assert_allclose(deconv_out, ans)
# test normalization and activation and force using bias
ans = activation_fn(normalizer_fn(linear_out + bias))
deconv_out = Deconv2dTestCase.run_deconv2d(
y, x_channels, kernel_size, x_shape, padding,
kernel, bias, strides, channels_last=False, use_bias=True,
normalizer_fn=normalizer_fn, activation_fn=activation_fn
)
assert_allclose(deconv_out, ans)
# test weight norm
normalized_kernel = l2_normalize(kernel, axis=(0, 1, 2))
ans = Deconv2dTestCase.run_deconv2d(
y, x_channels, kernel_size, x_shape, padding,
normalized_kernel, None, strides, channels_last=True,
use_bias=False
)
deconv_out = Deconv2dTestCase.run_deconv2d(
y, x_channels, kernel_size, x_shape, padding,
kernel, None, strides, channels_last=False,
use_bias=False, weight_norm=True,
# this can force not using scale in weight_norm
normalizer_fn=(lambda x: x)
)
assert_allclose(deconv_out, ans)
# test gated
activation_fn = lambda x: x * 2. + 1.
normalizer_fn = lambda x: x * 1.5 - 3.
output, gate = np.split(normalizer_fn(linear_out), 2, axis=-1)
ans = activation_fn(output) * safe_sigmoid(gate + 1.1)
deconv_out = Deconv2dTestCase.run_deconv2d(
y, x_channels // 2, kernel_size, x_shape, padding,
kernel, bias, strides, channels_last=True,
normalizer_fn=normalizer_fn, activation_fn=activation_fn,
gated=True, gate_sigmoid_bias=1.1
)
assert_allclose(deconv_out, ans)
@staticmethod
def run_deconv2d(input, out_channels, kernel_size, output_shape, padding,
kernel, bias, strides, channels_last, ph=None, **kwargs):
"""Run `tfsnippet.layers.conv2d` and get the output."""
i_shape = input.shape
if not channels_last:
input = np.transpose(
input,
tuple(i for i in range(len(i_shape) - 3)) + (-1, -3, -2)
)
session = tf.get_default_session()
output = session.run(
deconv2d(
input=ph if ph is not None else input,
out_channels=out_channels,
kernel_size=kernel_size,
output_shape=output_shape,
channels_last=channels_last,
padding=padding,
strides=strides,
kernel=kernel,
bias=bias,
**kwargs
),
feed_dict={ph: input} if ph is not None else None
)
if not channels_last:
output = np.transpose(
output,
tuple(i for i in range(len(i_shape) - 3)) + (-2, -1, -3)
)
return output
def test_deconv2d(self):
with mock.patch('tensorflow.nn.conv2d_transpose',
patched_conv2d_transpose), \
self.test_session() as sess:
np.random.seed(1234)
x = np.random.normal(size=[17, 11, 32, 31, 6]).astype(np.float32)
kernel = np.random.random(size=[3, 4, 6, 7]).astype(np.float32)
bias = np.random.random(size=[6]).astype(np.float32)
self.check(x, 'valid', kernel, bias, strides=1)
self.check(x, 'same', kernel, bias, strides=1)
self.check(x, 'valid', kernel, bias, strides=(3, 2))
self.check(x, 'same', kernel, bias, strides=(3, 2))
# special check: strides == x.shape
self.check(x, 'valid', kernel, bias, strides=(32, 31))
self.check(x, 'same', kernel, bias, strides=(32, 31))
def test_deconv2d_vars(self):
np.random.seed(1234)
x = np.random.normal(size=[17, 11, 32, 31, 7]).astype(np.float32)
kernel = np.random.random(size=[3, 4, 5, 7]).astype(np.float32)
bias = np.random.random(size=[5]).astype(np.float32)
# test create variables
with tf.Graph().as_default():
# test NHWC
_ = deconv2d(x, 5, (3, 4), padding='same', channels_last=True)
assert_variables(['kernel', 'bias'], trainable=True,
scope='deconv2d',
collections=[tf.GraphKeys.MODEL_VARIABLES])
kernel_var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[-2]
bias_var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[-1]
self.assertEqual(kernel_var.shape, kernel.shape)
self.assertEqual(bias_var.shape, bias.shape)
# test NCHW
_ = deconv2d(np.transpose(x, [0, 1, -1, -3, -2]), 5, (3, 4),
padding='valid', channels_last=False)
assert_variables(['kernel', 'bias'], trainable=True,
scope='deconv2d_1',
collections=[tf.GraphKeys.MODEL_VARIABLES])
kernel_var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[-2]
bias_var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[-1]
self.assertEqual(kernel_var.shape, kernel.shape)
self.assertEqual(bias_var.shape, bias.shape)
# test create variables, non-trainable
with tf.Graph().as_default():
# test NHWC
_ = deconv2d(x, 5, (3, 4), padding='same', channels_last=True,
trainable=False)
assert_variables(['kernel', 'bias'], trainable=False,
scope='deconv2d',
collections=[tf.GraphKeys.MODEL_VARIABLES])
# test create variables with use_bias = False
with tf.Graph().as_default():
_ = deconv2d(x, 5, (3, 4), padding='same', channels_last=True,
use_bias=False)
assert_variables(['kernel'], trainable=True, scope='deconv2d',
collections=[tf.GraphKeys.MODEL_VARIABLES])
assert_variables(['bias'], exist=False, scope='deconv2d')
|
|
# coding: utf-8
from __future__ import absolute_import
import six
from unittest import expectedFailure
from django.test import TransactionTestCase
from celery_rpc.exceptions import remote_exception_registry
from celery_rpc.tests import factories
from ..client import Pipe, Client
from .utils import SimpleModelTestMixin, unpack_exception
from .models import SimpleModel, FkSimpleModel
class BasePipelineTests(SimpleModelTestMixin, TransactionTestCase):
""" Abstract base class for pipe tests.
"""
def setUp(self):
super(BasePipelineTests, self).setUp()
self.client = Client()
@property
def pipe(self):
return self.client.pipe()
class PipelineTests(BasePipelineTests):
""" Pipeline related tests.
"""
def testClientCanCreatePipe(self):
""" Client able to start pipeline
"""
p = self.client.pipe()
self.assertIsInstance(p, Pipe)
def testPipeCanSendRequest(self):
""" Pipe can send complex request to RPC server.
"""
r = self.client.pipe().run()
self.assertEqual([], r)
def testSeveralFilters(self):
""" Several filters in the chain work well.
"""
p = self.pipe.filter(self.MODEL_SYMBOL,
kwargs=dict(filters={'pk': self.models[0].pk}))
p = p.filter(self.MODEL_SYMBOL,
kwargs=dict(filters={'pk': self.models[1].pk}))
r = p.run()
expected = [[self.get_model_dict(self.models[0])],
[self.get_model_dict(self.models[1])]]
self.assertEqual(expected, r)
def testUpdate(self):
""" Update works well in pipeline.
"""
p = self.pipe.update(self.MODEL_SYMBOL,
{'pk': self.models[0].pk, 'char': 'hello'})
r = p.run()
m = SimpleModel.objects.get(pk=self.models[0].pk)
self.assertEqual('hello', m.char)
expected = [self.get_model_dict(m)]
self.assertEqual(expected, r)
def testAtomicPipeline(self):
""" Pipeline is atomic by default.
"""
p = self.pipe
p = p.delete(self.MODEL_SYMBOL, self.get_model_dict(self.models[0]))
p = p.delete('invalid model symbol raise exception', {})
with self.assertRaisesRegexp(Exception, "No module named"):
with unpack_exception():
p.run()
self.assertTrue(SimpleModel.objects.filter(
pk=self.models[0].pk).exists())
def testAtomicPipelineRemoteError(self):
""" Perform testAtomicPipeline with remote errors handling
in another mode."""
old = self.client._app.conf['WRAP_REMOTE_ERRORS']
self.client._app.conf['WRAP_REMOTE_ERRORS'] = not old
return self.testAtomicPipeline()
def testWrapRemoteErrors(self):
""" Errors wrap correctly
"""
self.client._app.conf['WRAP_REMOTE_ERRORS'] = True
p = self.pipe
p = p.delete(self.MODEL_SYMBOL, self.get_model_dict(self.models[0]))
p = p.delete('invalid model symbol raise exception', {})
with self.assertRaisesRegexp(remote_exception_registry.RemoteError,
"No module named") as ctx:
p.run(propagate=False)
self.assertIsInstance(ctx.exception, ImportError)
@expectedFailure
def testPatchTransformer(self):
""" TODO `patch` updates result of previous task.
"""
p = self.pipe.filter(self.MODEL_SYMBOL,
kwargs=dict(filters={'pk': self.models[0].pk}))
r = p.patch({'char': 'abc'})
expected = [[self.get_model_dict(self.models[0])],
[self.get_model_dict(self.models[0])]]
expected[1].update(char='abc')
self.assertEqual(expected, r)
class TransformTests(BasePipelineTests):
""" Tests on different transformation.
"""
FK_MODEL_SYMBOL = 'celery_rpc.tests.models:FkSimpleModel'
TRANSFORM_MAP = {'fk': 'id'}
def setUp(self):
super(TransformTests, self).setUp()
self.model = factories.SimpleModelFactory()
self.fk_model = factories.FkSimpleModelFactory(fk=self.model)
def testDeleteTransformer(self):
""" Delete transformation works well.
"""
p = self.pipe.filter(self.MODEL_SYMBOL,
kwargs=dict(filters={'pk': self.models[0].pk}))
p = p.delete(self.MODEL_SYMBOL)
r = p.run()
expected = [[self.get_model_dict(self.models[0])], []]
self.assertEqual(expected, r)
self.assertRaises(SimpleModel.DoesNotExist,
SimpleModel.objects.get, pk=self.models[0].pk)
def testDeleteTransformerRemoteError(self):
""" Perform testDeleteTransformer with remote errors handling
in another mode."""
old = self.client._app.conf['WRAP_REMOTE_ERRORS']
self.client._app.conf['WRAP_REMOTE_ERRORS'] = not old
return self.testDeleteTransformer()
def testCreateTransformer(self):
p = self.pipe.filter(self.MODEL_SYMBOL,
kwargs=dict(filters={'pk': self.models[0].pk}))
p = p.translate(self.TRANSFORM_MAP)
p = p.create(self.FK_MODEL_SYMBOL)
r = p.run()
self.assertTrue(FkSimpleModel.objects.get(**r[2][0]))
def testCreateTransformerDefaults(self):
p = self.pipe.create(self.MODEL_SYMBOL, data={"char": "parent"})
p = p.translate(self.TRANSFORM_MAP,
kwargs=dict(defaults={"char": "child"}))
p = p.create(self.FK_MODEL_SYMBOL)
r = p.run()
model = FkSimpleModel.objects.get(**r[2])
self.assertEqual(model.fk_id, r[0]["id"])
self.assertEqual(model.char, "child")
def testUpdateOrCreateCreateTransformer(self):
""" Test creating with update_or_create
"""
p = self.pipe.filter(self.MODEL_SYMBOL,
kwargs=dict(filters={'pk': self.models[0].pk}))
p = p.translate(self.TRANSFORM_MAP)
p = p.update_or_create(self.FK_MODEL_SYMBOL)
r = p.run()
self.assertTrue(FkSimpleModel.objects.get(**r[2][0]))
def testUpdateOrCreateUpdateTransformer(self):
self.assertNotEqual(self.fk_model.id, self.models[1].pk)
p = self.pipe.filter(self.MODEL_SYMBOL,
kwargs=dict(filters={'pk': self.models[1].pk}))
p = p.translate(self.TRANSFORM_MAP,
kwargs=dict(defaults={'id': self.fk_model.id}))
p = p.update_or_create(self.FK_MODEL_SYMBOL)
r = p.run()
expect_obj = FkSimpleModel.objects.get(**r[2][0])
self.assertEquals(expect_obj.fk.id, self.models[1].pk)
def testUpdateTransformer(self):
p = self.pipe.filter(self.MODEL_SYMBOL,
kwargs=dict(filters={'pk': self.models[0].pk}))
p = p.translate(self.TRANSFORM_MAP,
kwargs=dict(defaults={'id': self.fk_model.id}))
p = p.update(self.FK_MODEL_SYMBOL)
r = p.run()
self.assertEqual(r[2][0]['fk'], self.models[0].pk)
def testGetSetTransformer(self):
p = self.pipe.filter(self.MODEL_SYMBOL,
kwargs=dict(filters={'pk': self.models[3].pk}))
p = p.translate(self.TRANSFORM_MAP,
kwargs=dict(defaults={'id': self.fk_model.id}))
p = p.getset(self.FK_MODEL_SYMBOL)
r = p.run()
expect_obj = FkSimpleModel.objects.get(fk=self.models[3].pk)
self.assertEquals(expect_obj.fk.id, self.models[3].pk)
# return previous state
self.assertNotEqual(r[2][0]['fk'], self.models[3].pk)
class ResultTests(TransformTests):
def testResult(self):
DEFAULTS_COUNT = 10
defaults = [dict(char=i) for i in six.moves.range(DEFAULTS_COUNT)]
p = self.pipe.create(self.MODEL_SYMBOL, data={'char': 123})
for el in defaults:
p = p.result(0)
p = p.translate(self.TRANSFORM_MAP,
kwargs=dict(defaults=el))
p = p.create(self.FK_MODEL_SYMBOL)
r = p.run()
expect_fk_id = r[0]['id']
expect = FkSimpleModel.objects.filter(
char__in=six.moves.range(DEFAULTS_COUNT),
fk=expect_fk_id)
self.assertEquals(expect.count(), DEFAULTS_COUNT)
|
|
import re
import Qualysv2
import pytest
import requests
from Qualysv2 import is_empty_result, format_and_validate_response, \
parse_two_keys_dict, create_ip_list_dicts, build_args_dict, handle_general_result, \
change_dict_keys, COMMANDS_ARGS_DATA, limit_ip_results, Client, build_host_list_detection_outputs, \
COMMANDS_PARSE_AND_OUTPUT_DATA, validate_depended_args, Dict, validate_at_most_one_group, parse_raw_response, \
get_simple_response_from_raw, validate_required_group
from CommonServerPython import DemistoException
class TestIsEmptyResult:
def test_is_empty_xml_empty_input(self):
"""
Given
- A json parsed result from qualys
When
- result has no keys
Then
- return true since result is empty
"""
reponse = {}
res = is_empty_result(reponse)
assert res
def test_is_empty_xml_only_datetime(self):
"""
Given
- A json parsed result from qualys
When
- result has only datetime key
Then
- return true since result has no content
"""
response = {'DATETIME': 'sometime'}
res = is_empty_result(response)
assert res
def test_is_empty_xml_non_empty_result(self):
"""
Given
- A json parsed result from qualys
When
- result has some keys
Then
- return false since result has content
"""
response = {'IP_SET': {'IP': ['1.1.1.1']},
'DATETIME': 'sometime'}
res = is_empty_result(response)
assert not res
def test_is_empty_xml_none_result(self):
"""
Given
- A result from qualys
When
- result is None
Then
- return true
"""
response = None
assert is_empty_result(response)
class TestFormatAndValidateResponse:
raw_xml_response_success = '''<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE SIMPLE_RETURN SYSTEM
"https://qualysapi.qg2.apps.qualys.com/api/2.0/simple_return.dtd">
<SIMPLE_RETURN><RESPONSE>
<DATETIME>2021-03-24T15:40:23Z</DATETIME>
<TEXT>IPs successfully added to Vulnerability Management</TEXT>
</RESPONSE></SIMPLE_RETURN>'''
raw_xml_response_failue = '''<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE SIMPLE_RETURN SYSTEM
"https://qualysapi.qg2.apps.qualys.com/api/2.0/simple_return.dtd">
<SIMPLE_RETURN>
<RESPONSE><DATETIME>2021-03-24T16:35:44Z</DATETIME>
<CODE>1905</CODE><TEXT>IP(s) do not exist.</TEXT></RESPONSE></SIMPLE_RETURN>'''
bad_format_raw_xml_response = '''<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE SIMPLE_RETURN SYSTEM
"https://qualysapi.qg2.apps.qualys.com/api/2.0/simple_return.dtd">
<SIMPLE_RETURN>
<RESPONSE><DATETIME>2021-03-24T16:35:44Z</DATETIME>
<CODE>1905</CODE><TEXT>IP(s) do not exist.</TEXT></RESPONSE>'''
def test_format_and_validate_response_proper_response(self):
"""
Given
- raw xml response
When
- the response is valid
Then
- return the parsed response
"""
raw_json_response = format_and_validate_response(self.raw_xml_response_success)
assert raw_json_response.get('SIMPLE_RETURN').get('RESPONSE')
assert not raw_json_response.get('CODE')
def test_format_and_validate_response_error_response(self):
"""
Given
- raw xml response
When
- the response has an error code provided by qualys
Then
- raise a DemistoException
"""
with pytest.raises(DemistoException):
format_and_validate_response(self.raw_xml_response_failue)
def test_format_and_validate_response_bad_format(self):
"""
Given
- raw xml response
When
- the xml format is incorrect
Then
- return empty dictionary
"""
result = format_and_validate_response(self.bad_format_raw_xml_response)
assert not result
def test_format_and_validate_response_none(self):
"""
Given
- raw xml response
When
- the xml format is incorrect
Then
- return empty dictionary
"""
raw_xml_response = None
result = format_and_validate_response(raw_xml_response)
assert not result
def test_format_and_validate_response_json(self):
"""
Given
- raw json response
When
- the json response is formatted correctly
Then
- return the raw response
"""
raw_json_response = '[{"ip": "1.1.1.1"},{"ip": "1.1.1.1"}]'
result = format_and_validate_response(raw_json_response)
assert len(result) == 2
def test_format_and_validate_response_bad_json(self):
"""
Given
- raw json response
When
- the json response is formatted incorrectly
Then
- return empty result
"""
raw_json_response = '[{"ip": "1.1.1.1",{"ip": "1.1.1.1"}]'
result = format_and_validate_response(raw_json_response)
assert not result
PARSE_RAW_RESPONSE_INPUTS = [('[{"ip": "1.1.1.1"},{"ip": "1.1.1.1"}]', [{'ip': '1.1.1.1'}, {'ip': '1.1.1.1'}]),
(raw_xml_response_success, {'SIMPLE_RETURN': {
'RESPONSE': {'DATETIME': '2021-03-24T15:40:23Z',
'TEXT': 'IPs successfully added to Vulnerability Management'}}}),
# Invalid case - should return empty dict
('[{"ip": "1.1.1.1"ip": "1.1.1.1"}]', {})]
@pytest.mark.parametrize('response, expected', PARSE_RAW_RESPONSE_INPUTS)
def test_parse_raw_response(self, response, expected):
"""
Given
- Response.
When
- Parsing the raw response.
Then
- Ensure expected object is returned from parsing.
"""
assert parse_raw_response(response) == expected
SIMPLE_FROM_RAW_INPUTS = [({'SIMPLE_RETURN': {'RESPONSE': {'DATETIME': '2021-03-24T15:40:23Z',
'TEXT': 'IPs successfully added to Vulnerability '
'Management'}}},
{'DATETIME': '2021-03-24T15:40:23Z',
'TEXT': 'IPs successfully added to Vulnerability Management'})]
@pytest.mark.parametrize('raw_response, expected', SIMPLE_FROM_RAW_INPUTS)
def test_get_simple_response_from_raw(self, raw_response, expected):
"""
Given
- Parsed raw response.
When
- Getting simple response from parsed raw response.
Then
- Ensure expected object is returned from parsing.
"""
assert get_simple_response_from_raw(raw_response) == expected
class TestHandleGeneralResult:
def test_handle_general_result_path_exists(self, mocker):
"""
Given
- response in json format
- path to a specific field
When
- the json object is well formed
- the path is correct
Then
- return the path requested
"""
json_obj = {'IP_LIST_OUTPUT': {'RESPONSE': {'DATETIME': 'sometime', 'IP_SET': {'IP': ['1.1.1.1']}}}}
mocker.patch.object(Qualysv2, 'format_and_validate_response', return_value=json_obj)
dummy_response = requests.Response()
assert handle_general_result(dummy_response, 'qualys-ip-list') == {'DATETIME': 'sometime',
'IP_SET': {'IP': ['1.1.1.1']}}
def test_handle_general_result_doesnt_exist(self, mocker):
"""
Given
- response in json format and
- a path to be returned from the object
When
- the json object is well formed
- the path doesn't exist
Then
- raise DemistoException Exception
"""
with pytest.raises(ValueError):
json_obj = {'IP_LIST_OUTPUT': {'RESPONSE': {'DATETIME': 'sometime', 'IP_SET': {'IP': ['1.1.1.1']}}}}
path = {'qualys-ip-list': {'json_path': ['IP_SET', 'WHAT']}}
mocker.patch.object(Qualysv2, 'format_and_validate_response', return_value=json_obj)
mocker.patch.object(Qualysv2, 'COMMANDS_PARSE_AND_OUTPUT_DATA', path)
dummy_response = requests.Response()
handle_general_result(dummy_response, 'qualys-ip-list')
def test_handle_general_result_empty_json(self, mocker):
"""
Given
- response in json format
- a path to be returned from the object
When
- the json object is empty formed
- the path doesn't exist
Then
- raise DemistoException Exception
"""
with pytest.raises(ValueError):
json_obj = {}
path = {'qualys-ip-list': {'json_path': ['IP_SET', 'WHAT']}}
mocker.patch.object(Qualysv2, 'format_and_validate_response', return_value=json_obj)
mocker.patch.object(Qualysv2, 'COMMANDS_PARSE_AND_OUTPUT_DATA', path)
dummy_response = requests.Response()
handle_general_result(dummy_response, 'qualys-ip-list')
def test_handle_general_result_none_value(self, mocker):
"""
Given
- response in json format
- a path to be returned from the object
When
- the json object is none formed
- the path doesn't exist
Then
- raise DemistoException Exception
"""
with pytest.raises(ValueError):
json_obj = None
path = {'qualys-ip-list': {'json_path': ['IP_SET', 'WHAT']}}
mocker.patch.object(Qualysv2, 'format_and_validate_response', return_value=json_obj)
mocker.patch.object(Qualysv2, 'COMMANDS_PARSE_AND_OUTPUT_DATA', path)
dummy_response = requests.Response()
handle_general_result(dummy_response, 'qualys-ip-list')
def test_handle_general_result_empty_path(self, mocker):
"""
Given
- response in json format
- a path to be returned from the object
When
- the json object is formed correctly
- the path is empty
Then
- return the json object without any changes
"""
json_obj = {'IP_LIST_OUTPUT': {'RESPONSE': {'DATETIME': 'sometime', 'IP_SET': {'IP': ['1.1.1.1']}}}}
path = {'qualys-ip-list': {'json_path': []}}
mocker.patch.object(Qualysv2, 'format_and_validate_response', return_value=json_obj)
mocker.patch.object(Qualysv2, 'COMMANDS_PARSE_AND_OUTPUT_DATA', path)
dummy_response = requests.Response()
result = handle_general_result(dummy_response, 'qualys-ip-list')
assert result == json_obj
class TestParseTwoKeysDict:
def test_parse_two_keys_dict_unexpected_format(self):
"""
Given
- json object
When
- the json object has unexpected format
Then
- raise a KeyError Exception
"""
with pytest.raises(KeyError):
json_obj = {'not_key': ' ', 'not_val': ' '}
parse_two_keys_dict(json_obj)
def test_parse_two_keys_dict_expected_format(self):
"""
Given
- json object
When
- the json object has the expected format
Then
- return a new dictionary with correct key and value
"""
json_obj = {'KEY': 'a key', 'VALUE': 'a value'}
res = parse_two_keys_dict(json_obj)
assert res['a key'] == 'a value'
def test_parse_two_keys_dict_none_value(self):
"""
Given
- json object
When
- the json object is None
Then
- raise a TypeError Exception
"""
with pytest.raises(TypeError):
json_obj = None
parse_two_keys_dict(json_obj)
class TestChangeDictKeys:
def test_change_dict_keys_expected_format(self):
"""
Given
- dictionary to be changed
- dictionary with new keys' names
When
- the dictionaries are well formatted
Then
- return the dictionary with the new keys
"""
new_names_dict = {'old_name_1': 'new_name_1',
'old_name_2': 'new_name_2'}
dict_to_change = {'old_name_1': 'some_value_1',
'old_name_2': 'some_value_2'}
changed_dict = change_dict_keys(new_names_dict, dict_to_change)
assert changed_dict['new_name_1']
assert changed_dict['new_name_2']
assert 'old_name_1' not in changed_dict
assert 'old_name_2' not in changed_dict
def test_change_dict_keys_missing_key(self):
"""
Given
- dictionary to be changed
- dictionary with new keys' names
When
- the output dictionary is missing a key to be changed
Then
- change only the keys that exist
"""
new_names_dict = {'old_name_1': 'new_name_1',
'old_name_2': 'new_name_2'}
dict_to_change = {'old_name_2': 'some_value_2'}
changed_dict = change_dict_keys(new_names_dict, dict_to_change)
assert changed_dict['new_name_2']
assert 'new_name_1' not in changed_dict
assert 'old_name_1' not in changed_dict
assert 'old_name_2' not in changed_dict
def test_change_dict_keys_output_is_none(self):
"""
Given
- dictionary to be changed
- dictionary with new keys' names
When
- the output dictionary is None
Then
- raise a TypeError Exception
"""
with pytest.raises(TypeError):
new_names_dict = {'old_name_1': 'new_name_1',
'old_name_2': 'new_name_2'}
dict_to_change = None
changed_dict = change_dict_keys(new_names_dict, dict_to_change)
assert changed_dict['new_name_1']
assert changed_dict['new_name_2']
assert 'old_name_1' not in changed_dict
assert 'old_name_2' not in changed_dict
class TestCreateIPListDicts:
def test_create_ip_list_dicts_expected_format(self):
"""
Given
- dictionary of ip list command result
When
- the dictionary has the expected format
Then
- create a list of dictionaries
"""
ip_dict = {'Address': ['1.1.1.1', '1.2.3.4'],
'Range': ['1.1.1.3-1.1.2.1']}
dicts = create_ip_list_dicts(ip_dict)
assert len(dicts[0]) == 2
assert len(dicts[1]) == 1
def test_create_ip_list_dicts_expected_format_single_value(self):
"""
Given
- dictionary of ip list command result
When
- the dictionary has the expected format but only single value
Then
- create a list of dictionaries
"""
ip_dict = {'Address': '1.1.1.1'}
dicts = create_ip_list_dicts(ip_dict)
assert len(dicts) == 1
assert len(dicts[0]) == 1
def test_create_ip_list_dicts_expected_format_single_value_is_dict(self):
"""
Given
- dictionary of ip list command result
When
- the dictionary has the expected format but only single value and
is a dictionary of values
Then
- create a list of dictionaries
"""
ip_dict = {'Address': {'key1': 'value1', 'key2': 'value2'}}
dicts = create_ip_list_dicts(ip_dict)
assert len(dicts) == 1
assert len(dicts[0]) == 1
def test_create_ip_list_dicts_bad_keys(self):
"""
Given
- dictionary of ip list command result
When
- the dictionary has wrong keys
Then
- raise DemistoException exception
"""
with pytest.raises(DemistoException):
ip_dict = {'bad_key_1': ['1.1.1.1', '1.2.3.4'],
'bad_key_2': ['1.1.1.3-1.1.2.1']}
create_ip_list_dicts(ip_dict)
def test_create_ip_list_dicts_one_good_key(self):
"""
Given
- dictionary of ip list command result
When
- the dictionary has one wrong key
Then
- change only one key
"""
ip_dict = {'Address': ['1.1.1.1', '1.2.3.4'],
'bad_key_2': ['1.1.1.3-1.1.2.1']}
dicts = create_ip_list_dicts(ip_dict)
assert len(dicts) == 1
assert len(dicts[0]) == 2
def test_create_ip_list_dicts_none_json(self):
"""
Given
- dictionary of ip list command result
When
- the dictionary is None
Then
- raise TypeError Exception
"""
with pytest.raises(TypeError):
ip_dict = None
create_ip_list_dicts(ip_dict)
class TestLimitIPResults:
def test_limit_ip_results_high_limit(self):
"""
Given
- IPs data that contains both single IP's and ranges
- Limit value
When
- the limit value is high enough so data will be taken from both lists
Then
- Change the lists so all addresses will show and part of the Ranges
"""
data = {
'Address': ['1.1.1.1', '1.2.3.4'],
'Range': ['1.4.3.1-1.4.3.5', '1.4.3.6-1.4.3.9']
}
limit = 3
data = limit_ip_results(data, limit)
assert len(data['Address']) == 2
assert len(data['Range']) == 1
def test_limit_ip_results_low_limit(self):
"""
Given
- IPs data that contains both single IP's and ranges
- Limit value
When
- Limit values is low
Then
- Data will be changed so only Address's list can be shown
"""
data = {
'Address': ['1.1.1.1', '1.2.3.4'],
'Range': ['1.4.3.1-1.4.3.5', '1.4.3.6-1.4.3.9']
}
limit = 1
limit_ip_results(data, limit)
assert len(data['Address']) == 1
assert len(data['Range']) == 0
def test_limit_ip_results_only_range_entry(self):
"""
Given
- IPs data that contains only ranges
- Limit value
When
- limit value will be applied only to ranges entry
Then
- data will have a Range list with up to 'limit' entries
"""
data = {
'Range': ['1.4.3.1-1.4.3.5', '1.4.3.6-1.4.3.9']
}
limit = 1
limit_ip_results(data, limit)
assert len(data['Range']) == 1
def test_limit_ip_results_single_ip_and_range(self):
"""
Given
- arguments received by the user
- command name to be run
When
- all arguments where provided and there are only API args
Then
- create a dictionary with all the arguments
"""
data = {
'Address': '1.1.1.1',
'Range': '1.4.3.1-1.4.3.5'
}
limit = 1
limit_ip_results(data, limit)
assert data['Address'] == '1.1.1.1'
assert len(data['Range']) == 0
class TestBuildArgsDict:
def test_build_api_args_dict_all_args(self):
"""
Given
- arguments received by the user
- command name to be run
When
- all arguments where provided and there are only API args
Then
- create a dictionary with all the arguments
"""
args = {'ips': 'ip',
'network_id': 'id',
'tracking_method': 'method',
'compliance_enabled': True}
command_args_data = COMMANDS_ARGS_DATA['qualys-ip-list']
build_args_dict(args, command_args_data, False)
assert Qualysv2.args_values == args
def test_build_api_args_dict_missing_args(self):
"""
Given
- arguments received by the user
- command name to be run
When
- Some arguments were not provided and there are only API args
Then
- create a dictionary with the provided arguments values and
None value for arguments that were not provided
"""
args = {'ips': 'ip'}
command_args_data = COMMANDS_ARGS_DATA['qualys-ip-list']
build_args_dict(args, command_args_data, False)
assert Qualysv2.args_values == args
def test_build_api_args_dict_empty_date(self):
"""
Given
- arguments received by the user
- command name to be run
When
- Some arguments were not provided and there are only API args
Then
- create a dictionary with the provided arguments values and
None value for arguments that were not provided
"""
args = {'published_before': ''}
command_args_data = COMMANDS_ARGS_DATA['qualys-vulnerability-list']
build_args_dict(args, command_args_data, False)
assert Qualysv2.args_values == {}
def test_build_inner_args_dict_all_args(self):
"""
Given
- arguments received by the user
- command name to be run
When
- all arguments where provided and there are both API args and inner-use args
Then
- create a dictionary with all the arguments
"""
args = {'id': 'id', 'file_format': 'xml'}
command_args_data = COMMANDS_ARGS_DATA['qualys-report-fetch']
build_args_dict(args, command_args_data, True)
assert Qualysv2.inner_args_values == {'file_format': 'xml'}
def test_build_args_dict_none_args(self):
"""
Given
- arguments received by the user
- command name to be run
When
- No arguments were provided
Then
- create a dictionary with no arguments' values
"""
args = None
command_args_data = COMMANDS_ARGS_DATA['test-module']
build_args_dict(args, command_args_data, False)
assert Qualysv2.args_values == {}
def test_build_args_dict_date_args(self):
"""
Given:
- Cortex XSOAR arguments.
- Command arg names.
When:
- Parsing date parameters.
Then:
- Ensure date parameters values are updated accordingly.
"""
args = {'published_before': '1640508554',
'launched_after_datetime': '2021-12-26T08:49:29Z',
'start_date': '2021-12-26T08:49:29Z'}
expected_result = {'launched_after_datetime': '2021-12-26',
'published_before': '2021-12-26',
'start_date': '12/26/2021'}
build_args_dict(args, {'args': ['published_before', 'launched_after_datetime', 'start_date']}, False)
assert Qualysv2.args_values == expected_result
def test_build_args_dict_default_added_depended_args(self):
"""
Given:
- Cortex XSOAR arguments.
- Command arg names.
When:
- There are arguments who should be added depending on an arguments.
Then:
- Ensure arguments are added as expected.
"""
args = {'arg_to_depend_on': '1'}
expected_result = {'arg_to_depend_on': '1', 'dep1': 2, 'dep2': 3}
build_args_dict(args, {'args': ['arg_to_depend_on'],
'default_added_depended_args': {'arg_to_depend_on': {'dep1': 2, 'dep2': 3}}}, False)
assert Qualysv2.args_values == expected_result
def test_handle_general_result_missing_output_builder():
"""
Given
- raw xml result
- command name
- output builder function
When
- output builder is None
Then
- raise a TypeError exception, None is not callable, must be provided
"""
with pytest.raises(TypeError):
raw_xml_response = '<?xml version="1.0" encoding="UTF-8" ?>' \
'<!DOCTYPE SIMPLE_RETURN SYSTEM' \
' "https://qualysapi.qg2.apps.qualys.com/api/2.0/simple_return.dtd">' \
'<SIMPLE_RETURN><RESPONSE>' \
'<DATETIME>2021-03-24T15:40:23Z</DATETIME>' \
'<TEXT>IPs successfully added to Vulnerability Management</TEXT>' \
'</RESPONSE></SIMPLE_RETURN>'
command_name = 'qualys-ip-add'
handle_general_result(result=raw_xml_response, command_name=command_name, output_builder=None)
class TestHostDetectionOutputBuilder:
DETECTION_INPUTS = [({'HOST_LIST': {'HOST_ITEM': []}}, '### Host Detection List\n\n**No entries.**\n', []),
({'HOST_LIST': {'HOST_ITEM': [{'ID': 'ID123', 'IP': '1.1.1.1', 'DNS_DATA': {'data': 'dns data'},
'DETECTION_LIST': {
'DETECTION': [
{'QID': '123', 'RESULTS': 'FOUND DETECTION'}]}}]}},
"### Host Detection List\n\n|DETECTIONS|DNS_DATA|ID|IP|\n|---|---|---|---|\n| {"
"'QID': '123', 'RESULTS': 'FOUND DETECTION'} | data: dns data | ID123 | "
"1.1.1.1 |\n", [{'DETECTION_LIST': {'DETECTION': [{'QID': '123',
'RESULTS': 'FOUND DETECTION'}]},
'DNS_DATA': {'data': 'dns data'},
'ID': 'ID123',
'IP': '1.1.1.1'}])
]
@pytest.mark.parametrize('result, readable, expected_outputs', DETECTION_INPUTS)
def test_build_host_list_detection_outputs(self, result, readable, expected_outputs):
"""
Given:
- Result of Qualys service for host list detection.
When:
- Parsing result into outputs and readable output.
Then:
- Ensure resultes are parsed as expected.
"""
Qualysv2.inner_args_values['limit'] = 1
assert build_host_list_detection_outputs({'command_parse_and_output_data': COMMANDS_PARSE_AND_OUTPUT_DATA[
'qualys-host-list-detection'], 'handled_result': result}) == (expected_outputs, readable)
class MockResponse:
def __init__(self, text, status_code, json=None, reason=None):
self.text = text
self.json = json
self.status_code = status_code
self.reason = reason
def json(self):
if self.json:
return self.json
raise Exception('No JSON')
class TestClientClass:
ERROR_HANDLER_INPUTS = [
(MockResponse('''<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE SIMPLE_RETURN SYSTEM "https://qualysapi.qg2.apps.qualys.com/api/2.0/simple_return.dtd">
<SIMPLE_RETURN>
<RESPONSE>
<DATETIME>2021-12-21T08:59:39Z</DATETIME>
<CODE>999</CODE>
<TEXT>Internal error. Please contact customer support.</TEXT>
<ITEM_LIST>
<ITEM>
<KEY>Incident Signature</KEY>
<VALUE>8ecaf66401cf247f5a6d75afd56bf847</VALUE>
</ITEM>
</ITEM_LIST>
</RESPONSE>
</SIMPLE_RETURN>''', 500),
'Error in API call [500] - None\nError Code: 999\nError Message: Internal error. Please '
'contact customer support.'),
(MockResponse('Invalid XML', 500), 'Error in API call [500] - None\nInvalid XML')
]
@pytest.mark.parametrize('response, error_message', ERROR_HANDLER_INPUTS)
def test_error_handler(self, response, error_message):
"""
Given:
- Qualys error response
When:
- Parsing error to readable message
Then:
- Ensure readable message is as expected
"""
client: Client = Client('test.com', 'testuser', 'testpassword', False, False, None)
with pytest.raises(DemistoException, match=re.escape(error_message)):
client.error_handler(response)
class TestInputValidations:
DEPENDANT_ARGS = {'day_of_month': 'frequency_months', 'day_of_week': 'frequency_months',
'week_of_month': 'frequency_months', 'weekdays': 'frequency_weeks', }
VALIDATE_DEPENDED_ARGS_INPUT = [({}, {}),
({'required_depended_args': DEPENDANT_ARGS}, {}),
({'required_depended_args': DEPENDANT_ARGS},
{k: 3 for k, v in DEPENDANT_ARGS.items() if v == 'frequency_months'})]
@pytest.mark.parametrize('command_data, args', VALIDATE_DEPENDED_ARGS_INPUT)
def test_validate_depended_args_valid(self, command_data: Dict, args: Dict):
"""
Given:
- Command data.
- Cortex XSOAR arguments.
When:
- Validating depended args are supplied as expected.
Then:
- Ensure no exception is thrown.
"""
Qualysv2.args_values = args
validate_depended_args(command_data)
def test_validate_depended_args_invalid(self):
"""
Given:
- Command data.
- Cortex XSOAR arguments.
When:
- Validating depended args are not supplied as expected.
Then:
- Ensure exception is thrown.
"""
Qualysv2.args_values = {'frequency_months': 1}
with pytest.raises(DemistoException,
match='Argument day_of_month is required when argument frequency_months is given.'):
validate_depended_args({'required_depended_args': self.DEPENDANT_ARGS})
EXACTLY_ONE_GROUP_ARGS = [['asset_group_ids', 'asset_groups', 'ip', ],
['frequency_days', 'frequency_weeks', 'frequency_months', ],
['scanners_in_ag', 'default_scanner', ], ]
EXACTLY_ONE_ARGS_INPUT = [({}, {}),
({'required_groups': EXACTLY_ONE_GROUP_ARGS},
{'asset_group_ids': 1, 'scanners_in_ag': 1, 'frequency_days': 1}),
({'required_groups': EXACTLY_ONE_GROUP_ARGS},
{'asset_groups': 1, 'scanners_in_ag': 1, 'frequency_weeks': 1}),
({'required_groups': EXACTLY_ONE_GROUP_ARGS},
{'ip': '1.1.1.1', 'default_scanner': 1, 'frequency_months': 1})
]
@pytest.mark.parametrize('command_data, args', EXACTLY_ONE_ARGS_INPUT)
def test_validate_required_group_valid(self, command_data: Dict, args: Dict):
"""
Given:
- Command data.
- Cortex XSOAR arguments.
When:
- Validating required groups are supplied as expected.
Then:
- Ensure no exception is thrown.
"""
Qualysv2.args_values = args
validate_required_group(command_data)
EXACTLY_ONE_INVALID_INPUT = [({}), ({'ip': '1.1.1.1', 'asset_group_ids': 1, 'frequency_months': 1})]
@pytest.mark.parametrize('args', EXACTLY_ONE_INVALID_INPUT)
def test_validate_required_group_invalid(self, args):
"""
Given:
- Command data.
- Cortex XSOAR arguments.
When:
- Validating required groups are not supplied as expected.
Then:
- Ensure exception is thrown.
"""
Qualysv2.args_values = args
err_msg = "Exactly one of the arguments ['asset_group_ids', 'asset_groups', 'ip'] must be provided."
with pytest.raises(DemistoException, match=re.escape(err_msg)):
validate_required_group({'required_groups': self.EXACTLY_ONE_GROUP_ARGS})
AT_MOST_ONE_GROUP_ARGS = [['asset_group_ids', 'asset_groups', 'ip', ],
['frequency_days', 'frequency_weeks', 'frequency_months', ],
['scanners_in_ag', 'default_scanner', ], ]
AT_MOST_ONE_ARGS_INPUT = [({}, {}),
({'at_most_one_groups': AT_MOST_ONE_GROUP_ARGS}, {}),
({'at_most_one_groups': AT_MOST_ONE_GROUP_ARGS},
{'asset_group_ids': 1, 'scanners_in_ag': 1, 'frequency_days': 1}),
({'at_most_one_groups': AT_MOST_ONE_GROUP_ARGS},
{'asset_groups': 1, 'scanners_in_ag': 1, 'frequency_weeks': 1}),
({'at_most_one_groups': AT_MOST_ONE_GROUP_ARGS},
{'ip': '1.1.1.1', 'default_scanner': 1, 'frequency_months': 1})
]
@pytest.mark.parametrize('command_data, args', AT_MOST_ONE_ARGS_INPUT)
def test_validate_at_most_one_group_valid(self, command_data: Dict, args: Dict):
"""
Given:
- Command data.
- Cortex XSOAR arguments.
When:
- Validating depended args are supplied as expected.
Then:
- Ensure no exception is thrown.
"""
Qualysv2.args_values = args
validate_at_most_one_group(command_data)
def test_validate_at_most_one_group_invalid(self):
"""
Given:
- Command data.
- Cortex XSOAR arguments.
When:
- Validating depended args are not supplied as expected.
Then:
- Ensure exception is thrown.
"""
Qualysv2.args_values = {'scanners_in_ag': 1, 'default_scanner': 1}
err_msg = "At most one of the following args can be given: ['scanners_in_ag', 'default_scanner']"
with pytest.raises(DemistoException, match=re.escape(err_msg)):
validate_at_most_one_group({'at_most_one_groups': self.AT_MOST_ONE_GROUP_ARGS})
|
|
"""
Install a Custom Terraform Version for CloudBolt
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Install a user-specified version of Terraform on CloudBolt.
This can be set as the Global Default in Admin Miscellaneous Settings.
Version Requirements
~~~~~~~~~~~~~~~~~~~~
CloudBolt 9.0
"""
import requests
import os
import hashlib
import zipfile
import tempfile
import shutil
import sys
from common.methods import set_progress
from django.conf import settings
TERRAFORM_DIR = settings.TERRAFORM_DIR
TERRAFORM_BIN_DIR = os.path.join(TERRAFORM_DIR, 'bin')
def run(job, *args, **kwargs):
set_progress("Terraforming your CloudBolt...")
# Initialize variables
version_name = "{{ version_name }}"
zip_url = "{{ zip_url }}"
status, message, error = install_custom_terraform_binary(zip_url, version_name)
return status, message, error
def install_custom_terraform_binary(zip_url: str, version_name: str):
"""
Installs a version of Terraform from a custom ZIP URL and verifies it was installed correctly.
Returns a Tuple:
Status string
Success message
Error message
"""
terraform_binary_base_directory = TERRAFORM_BIN_DIR
response = None
try:
# Creates a temporary directory
temp_dir = _create_temp_dir()
if temp_dir is None:
raise Exception('Failed to create a temp directory for the Terraform installation')
# Download the ZIP to a temporary directory
zip_file_path = _download_zip_file(zip_url, temp_dir.name)
if zip_file_path is None:
raise Exception(f'Failed to download the zip {zip_url} to {temp_dir.name}')
if _is_zip_file(zip_file_path) is not True:
raise Exception(f'The file provided at {zip_url} was not a Zip file!')
# Unpacks the ZIP file
if _unzip_file(zip_file_path, temp_dir.name) is not True:
raise Exception(f'Failed to unzip {zip_file_path}')
# Verifies a file named 'terraform' was unpacked
terraform_binary_path = _get_terraform_binary_path(temp_dir.name)
if terraform_binary_path is None:
raise Exception(f'Failed to find a binary called `terraform` in the unpacked {zip_url}')
# Verifies 'terraform' is a Linux binary
if _is_linux_binary(terraform_binary_path) is not True:
raise Exception(f'The provided binary was not a Liunx binary. Terraform Linux zip usually include `linux` in the name.')
# Moves the `terraform` binary to /var/opt/cloudbolt/terraform/bin/terraform_version
new_terraform_binary_path = os.path.join(terraform_binary_base_directory, f'terraform_{version_name}')
if _move_terraform_version(terraform_binary_path, new_terraform_binary_path) is not True:
raise Exception(f'Failed to copy terraform_{version_name} to {terraform_binary_base_directory}')
if _set_terraform_binary_permissions(new_terraform_binary_path) is not True:
raise Exception(f'Failed to set permissions on {new_terraform_binary_path}')
except Exception as err:
response = err.args[0]
finally:
# Cleans up temporary files
cleanup_status = _cleanup_temp_dir(temp_dir)
if cleanup_status is not True:
response = ('WARNING', '', f'Failed to clean up temporary files on disk in {temp_dir.name}')
if response is None:
return 'SUCCESS', f'Successfully instaled Terraform! Go to Miscellaneous Settings to set terraform_{version_name} as the CloudBolt global default.', ''
else:
return 'FAILURE', '', response
def _create_temp_dir():
"""
Returns a Temporary Directory.
If that fails, it returns None
"""
try:
return tempfile.TemporaryDirectory()
except:
return None
def _download_zip_file(zip_url, temp_dir):
"""
Downloads a given zip URL into the desired temp directory.
"""
with requests.get(zip_url, stream=True) as request:
zip_fname = zip_url.split('/')[-1]
zip_file_path = os.path.join(temp_dir, zip_fname)
with open(zip_file_path, 'wb') as zip_file:
zip_file.write(request.content)
if os.path.isfile(zip_file_path):
return zip_file_path
else:
return None
def _is_zip_file(zip_file_path):
"""
Return True or False if a given path is a Zip file
"""
return zipfile.is_zipfile(zip_file_path)
def _unzip_file(zip_file_path, temp_dir):
"""
Unzips a zip_file to the given temp_dir.
Returns True if successful, False if unsuccessful.
"""
with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
zip_ref.extractall(temp_dir)
return True
def _get_terraform_binary_path(temp_dir):
"""
Returns the path to the `terraform` binary in the given temp_dir.
Returns None otherwise.
"""
terraform_location = os.path.join(temp_dir, 'terraform')
if os.path.isfile(terraform_location):
return terraform_location
else:
return None
def _is_linux_binary(fpath):
"""
Reads a magic byte and determines if the file given is a Linux (ELF) binary.
"""
with open(fpath, 'rb') as f:
return f.read(4) == b'\x7fELF'
def _move_terraform_version(temp_terraform_binary_location, new_terraform_binary_location):
"""
Moves the `terraform` file in the temp directory to
/var/opt/cloudbolt/terraform/bin/terraform_{version}
return True if successful
return False if not successful
"""
try:
shutil.move(temp_terraform_binary_location, new_terraform_binary_location)
return True
except FileNotFoundError as e:
set_progress(e)
return False
def _set_terraform_binary_permissions(binary_path):
"""
Sets the new terraform binary to be executable.
"""
try:
os.chmod(binary_path, 0o755)
try:
shutil.chown(binary_path, user='apache', group='apache')
except:
set_progress(f'Unable to set permissions to apache:apache on {binary_path}. This may cause problems!')
pass
return True
except OSError:
return False
def _cleanup_temp_dir(temp_dir):
"""
Runs cleanup on a TemporaryDirectory
If successful it returns True
If that fails it returns None
"""
try:
temp_dir.cleanup()
return True
except:
return None
|
|
import sys
sys.path.append('.')
import lxmls.sequences.crf_online as crfo
import lxmls.sequences.structured_perceptron as spc
import lxmls.readers.pos_corpus as pcc
import lxmls.sequences.id_feature as idfc
import lxmls.sequences.extended_feature as exfc
print "CRF Exercise"
corpus = pcc.PostagCorpus()
train_seq = corpus.read_sequence_list_conll("data/train-02-21.conll",max_sent_len=10, max_nr_sent=1000)
test_seq = corpus.read_sequence_list_conll("data/test-23.conll",max_sent_len=10, max_nr_sent=1000)
dev_seq = corpus.read_sequence_list_conll("data/dev-22.conll",max_sent_len=10, max_nr_sent=1000)
feature_mapper = idfc.IDFeatures(train_seq)
feature_mapper.build_features()
crf_online = crfo.CRFOnline(corpus.word_dict, corpus.tag_dict, feature_mapper)
crf_online.num_epochs = 20
crf_online.train_supervised(train_seq)
pred_train = crf_online.viterbi_decode_corpus(train_seq)
pred_dev = crf_online.viterbi_decode_corpus(dev_seq)
pred_test = crf_online.viterbi_decode_corpus(test_seq)
eval_train = crf_online.evaluate_corpus(train_seq, pred_train)
eval_dev = crf_online.evaluate_corpus(dev_seq, pred_dev)
eval_test = crf_online.evaluate_corpus(test_seq, pred_test)
print "CRF - ID Features Accuracy Train: %.3f Dev: %.3f Test: %.3f"%(eval_train, eval_dev, eval_test)
feature_mapper = exfc.ExtendedFeatures(train_seq)
feature_mapper.build_features()
crf_online = crfo.CRFOnline(corpus.word_dict, corpus.tag_dict, feature_mapper)
crf_online.num_epochs = 20
crf_online.train_supervised(train_seq)
pred_train = crf_online.viterbi_decode_corpus(train_seq)
pred_dev = crf_online.viterbi_decode_corpus(dev_seq)
pred_test = crf_online.viterbi_decode_corpus(test_seq)
eval_train = crf_online.evaluate_corpus(train_seq, pred_train)
eval_dev = crf_online.evaluate_corpus(dev_seq, pred_dev)
eval_test = crf_online.evaluate_corpus(test_seq, pred_test)
print "CRF - Extended Features Accuracy Train: %.3f Dev: %.3f Test: %.3f"%(eval_train, eval_dev,eval_test)
print "Perceptron Exercise"
feature_mapper = idfc.IDFeatures(train_seq)
feature_mapper.build_features()
sp = spc.StructuredPerceptron(corpus.word_dict, corpus.tag_dict, feature_mapper)
sp.num_epochs = 20
sp.train_supervised(train_seq)
pred_train = sp.viterbi_decode_corpus(train_seq)
pred_dev = sp.viterbi_decode_corpus(dev_seq)
pred_test = sp.viterbi_decode_corpus(test_seq)
eval_train = sp.evaluate_corpus(train_seq, pred_train)
eval_dev = sp.evaluate_corpus(dev_seq, pred_dev)
eval_test = sp.evaluate_corpus(test_seq, pred_test)
print "Structured Perceptron - ID Features Accuracy Train: %.3f Dev: %.3f Test: %.3f"%(eval_train,eval_dev,eval_test)
feature_mapper = exfc.ExtendedFeatures(train_seq)
feature_mapper.build_features()
sp = spc.StructuredPerceptron(corpus.word_dict, corpus.tag_dict, feature_mapper)
sp.num_epochs = 20
sp.train_supervised(train_seq)
pred_train = sp.viterbi_decode_corpus(train_seq)
pred_dev = sp.viterbi_decode_corpus(dev_seq)
pred_test = sp.viterbi_decode_corpus(test_seq)
eval_train = sp.evaluate_corpus(train_seq, pred_train)
eval_dev = sp.evaluate_corpus(dev_seq, pred_dev)
eval_test = sp.evaluate_corpus(test_seq, pred_test)
print "Structured Perceptron - Extended Features Accuracy Train: %.3f Dev: %.3f Test: %.3f"%(eval_train,eval_dev,eval_test)
#
#
#import sequences.structured_perceptron as spc
#import sequences.crf_batch as crfc
#import sequences.crf_online as crfo
#import readers.pos_corpus as pcc
#import sequences.id_feature as idfc
#import sequences.extended_feature as exfc
#import pdb
#
#corpus = pcc.PostagCorpus()
#train_seq = corpus.read_sequence_list_conll("../data/train-02-21.conll",max_sent_len=10,max_nr_sent=1000)
#test_seq = corpus.read_sequence_list_conll("../data/test-23.conll",max_sent_len=10,max_nr_sent=1000)
#dev_seq = corpus.read_sequence_list_conll("../data/dev-22.conll",max_sent_len=10,max_nr_sent=1000)
##corpus.add_sequence_list(train_seq)
##id_f = idfc.IDFeatures(corpus)
#feature_mapper = idfc.IDFeatures(train_seq)
#feature_mapper.build_features()
#
#
#print "Perceptron Exercise"
#
#sp = spc.StructuredPerceptron(corpus.word_dict, corpus.tag_dict, feature_mapper)
#sp.num_epochs = 20
#sp.train_supervised(train_seq)
#
#pred_train = sp.viterbi_decode_corpus(train_seq)
#pred_dev = sp.viterbi_decode_corpus(dev_seq)
#pred_test = sp.viterbi_decode_corpus(test_seq)
#
#eval_train = sp.evaluate_corpus(train_seq, pred_train)
#eval_dev = sp.evaluate_corpus(dev_seq, pred_dev)
#eval_test = sp.evaluate_corpus(test_seq, pred_test)
#
#print "Structured Perceptron - ID Features Accuracy Train: %.3f Dev: %.3f Test: %.3f"%(eval_train,eval_dev,eval_test)
#
#
#feature_mapper = exfc.ExtendedFeatures(train_seq)
#feature_mapper.build_features()
#sp = spc.StructuredPerceptron(corpus.word_dict, corpus.tag_dict, feature_mapper)
#sp.num_epochs = 20
#sp.train_supervised(train_seq)
#
#pred_train = sp.viterbi_decode_corpus(train_seq)
#pred_dev = sp.viterbi_decode_corpus(dev_seq)
#pred_test = sp.viterbi_decode_corpus(test_seq)
#
#eval_train = sp.evaluate_corpus(train_seq, pred_train)
#eval_dev = sp.evaluate_corpus(dev_seq, pred_dev)
#eval_test = sp.evaluate_corpus(test_seq, pred_test)
#
#print "Structured Perceptron - Extended Features Accuracy Train: %.3f Dev: %.3f Test: %.3f"%(eval_train,eval_dev,eval_test)
#
#
##pdb.set_trace()
#
## print "CRF Exercise"
#
#feature_mapper = idfc.IDFeatures(train_seq)
#feature_mapper.build_features()
#
#print "Online CRF Exercise"
#
#crf_online = crfo.CRFOnline(corpus.word_dict, corpus.tag_dict, feature_mapper)
#crf_online.num_epochs = 20
## crf_online.initial_learning_rate = 10 #100 #1.0/crf_online.regularizer
#crf_online.train_supervised(train_seq)
#
#pred_train = crf_online.viterbi_decode_corpus(train_seq)
#pred_dev = crf_online.viterbi_decode_corpus(dev_seq)
#pred_test = crf_online.viterbi_decode_corpus(test_seq)
#
#eval_train = crf_online.evaluate_corpus(train_seq, pred_train)
#eval_dev = crf_online.evaluate_corpus(dev_seq, pred_dev)
#eval_test = crf_online.evaluate_corpus(test_seq, pred_test)
#
#print "Online CRF - ID Features Accuracy Train: %.3f Dev: %.3f Test: %.3f"%(eval_train,eval_dev,eval_test)
#
#
#crf = crfc.CRF_batch(corpus.word_dict, corpus.tag_dict, feature_mapper)
#crf.train_supervised(train_seq)
#
#pred_train = crf.viterbi_decode_corpus(train_seq)
#pred_dev = crf.viterbi_decode_corpus(dev_seq)
#pred_test = crf.viterbi_decode_corpus(test_seq)
#
#eval_train = crf.evaluate_corpus(train_seq, pred_train)
#eval_dev = crf.evaluate_corpus(dev_seq, pred_dev)
#eval_test = crf.evaluate_corpus(test_seq, pred_test)
#
#print "CRF - ID Features Accuracy Train: %.3f Dev: %.3f Test: %.3f"%(eval_train,eval_dev,eval_test)
#
#
##pdb.set_trace()
#
#
#feature_mapper = exfc.ExtendedFeatures(train_seq)
#feature_mapper.build_features()
#
#
#
#print "Online CRF Exercise"
#
#crf_online = crfo.CRFOnline(corpus.word_dict, corpus.tag_dict, feature_mapper)
#crf_online.num_epochs = 20
## for eta in [1, 10, 100, 1000]:
## crf_online.initial_learning_rate = 10 #1.0/crf_online.regularizer
#crf_online.train_supervised(train_seq)
#
#pred_train = crf_online.viterbi_decode_corpus(train_seq)
#pred_dev = crf_online.viterbi_decode_corpus(dev_seq)
#pred_test = crf_online.viterbi_decode_corpus(test_seq)
#
#eval_train = crf_online.evaluate_corpus(train_seq, pred_train)
#eval_dev = crf_online.evaluate_corpus(dev_seq, pred_dev)
#eval_test = crf_online.evaluate_corpus(test_seq, pred_test)
#
#print "Online CRF - Extended Features Accuracy Train: %.3f Dev: %.3f Test: %.3f"%(eval_train,eval_dev,eval_test)
#
##pdb.set_trace()
#
#
#crf = crfc.CRF_batch(corpus.word_dict, corpus.tag_dict, feature_mapper)
#crf.train_supervised(train_seq)
#
#pred_train = crf.viterbi_decode_corpus(train_seq)
#pred_dev = crf.viterbi_decode_corpus(dev_seq)
#pred_test = crf.viterbi_decode_corpus(test_seq)
#
#eval_train = crf.evaluate_corpus(train_seq, pred_train)
#eval_dev = crf.evaluate_corpus(dev_seq, pred_dev)
#eval_test = crf.evaluate_corpus(test_seq, pred_test)
#
#print "CRF - Extended Features Accuracy Train: %.3f Dev: %.3f Test: %.3f"%(eval_train,eval_dev,eval_test)
#
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
"""
This script allows to find missing segbits in the database.
For each tile the script loads its 'tile_type_*.json' file and looks for all
non-pseudo pips there. Next it loads corresponding 'segbits_*.db' file (if found)
and checks if those pips are listed there.
Missing segbits for pips are reported as well as missing segbit files.
"""
import sys
import logging
import json
import argparse
import os
import re
# =============================================================================
def read_pips_from_tile(tile_file):
"""
Loads pip definition from a tile type JSON file and returns non-pseudo
PIP name strings. Names are formatted as <dst_wire>.<src_wire>
"""
with open(tile_file, "r") as fp:
root = json.load(fp)
pips = root["pips"]
pip_names = []
for pip in pips.values():
if int(pip["is_pseudo"]) == 0:
pip_names.append(
"{}.{}".format(pip["dst_wire"], pip["src_wire"]))
return pip_names
def read_ppips(ppips_file):
"""
Loads and parses ppips_*.db file. Returns a dict indexed by PIP name which
contains their types ("always", "default" or "hint")
"""
ppips = {}
with open(ppips_file, "r") as fp:
for line in fp.readlines():
line = line.split()
if len(line) == 2:
full_pip_name = line[0].split(".")
pip_name = ".".join(full_pip_name[1:])
ppips[pip_name] = line[1]
return ppips
def read_segbits(segbits_file):
"""
Loads and parses segbits_*.db file. Returns only segbit names.
"""
segbits = []
with open(segbits_file, "r") as fp:
for line in fp.readlines():
line = line.split()
if len(line) > 1:
fields = line[0].split(".")
segbit = ".".join(fields[1:])
segbits.append(segbit)
return segbits
# =============================================================================
def main(argv):
"""
The main
"""
exitcode = 0
# Parse arguments
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"--db-root", type=str, required=True, help="Database root")
parser.add_argument(
"--verbose", type=int, default=0, help="Verbosity level 0-5")
parser.add_argument(
"--skip-tiles",
type=str,
nargs="*",
default=[],
help="Tile type name regex list for tile types to skip")
parser.add_argument(
"--incl-tiles",
type=str,
nargs="*",
default=[],
help="Tile type name regex list for tile types to include")
args = parser.parse_args(argv[1:])
logging.basicConfig(level=50 - args.verbose * 10, format="%(message)s")
# List files in DB root
files = os.listdir(args.db_root)
# List tile types
tile_types = []
for file in files:
match = re.match("^tile_type_(\\w+).json$", file)
if match:
tile_types.append(match.group(1))
tile_types.sort()
# Look for missing bits
for tile_type in tile_types:
# Check if we should include this tile
do_skip = len(args.incl_tiles) > 0
for pattern in args.incl_tiles:
if re.match(pattern, tile_type):
do_skip = False
break
# Check if we should skip this tile
for pattern in args.skip_tiles:
if re.match(pattern, tile_type):
do_skip = True
break
if do_skip:
continue
logging.critical(tile_type)
# DB file names
tile_file = os.path.join(
args.db_root, "tile_type_{}.json".format(tile_type.upper()))
ppips_file = os.path.join(
args.db_root, "ppips_{}.db".format(tile_type.lower()))
segbits_file = os.path.join(
args.db_root, "segbits_{}.db".format(tile_type.lower()))
# Load pips
pips = read_pips_from_tile(tile_file)
# Load ppips (if any)
if os.path.isfile(ppips_file):
ppips = read_ppips(ppips_file)
else:
ppips = {}
# Load segbits (if any)
if os.path.isfile(segbits_file):
segbits = read_segbits(segbits_file)
else:
segbits = []
# There are non-pseudo pips in this tile
if len(pips):
missing_bits = 0
known_bits = 0
# Build a list of pips to check. If a pip is listed in the ppips
# file and it is not "default" then make it a pseudo one
pips_to_check = []
for pip in pips:
if pip in ppips.keys() and ppips[pip] != "default":
continue
pips_to_check.append(pip)
# Missing segbits file
if len(segbits) == 0:
missing_bits = len(pips_to_check)
logging.critical(" MISSING: no segbits file!")
exitcode = -1
# Segbits file present
else:
# Check pips
for pip in pips_to_check:
if pip not in segbits:
# A "default" pip
if pip in ppips.keys() and ppips[pip] == "default":
missing_bits += 1
logging.error(
" WARNING: no bits for pip '{}' which defaults to VCC_WIRE"
.format(pip))
exitcode = -1
# A regular pip
else:
missing_bits += 1
logging.error(
" MISSING: no bits for pip '{}'".format(pip))
exitcode = -1
# The pip has segbits
else:
known_bits += 1
# Report missing bit count
if missing_bits > 0:
logging.critical(
" MISSING: no bits for {}/{} pips!".format(
missing_bits, missing_bits + known_bits))
exitcode = -1
else:
logging.critical(" OK: no missing bits")
# No pips
else:
logging.warning(" OK: no pips")
return exitcode
# =============================================================================
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
|
import django
import warnings
from django.conf import settings
from django.core.exceptions import FieldError
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import Expression
from django.db.models.fields import FieldDoesNotExist
from django.db.models.fields.related import RelatedField, ForeignObjectRel
from django.utils import six, timezone
from django.utils.encoding import force_text
from django.utils.translation import ugettext as _
try:
from django.forms.utils import pretty_name
except ImportError: # Django 1.8
from django.forms.forms import pretty_name
from .compat import make_aware, remote_field, remote_model
from .exceptions import FieldLookupError
def deprecate(msg, level_modifier=0):
warnings.warn(
"%s See: https://django-filter.readthedocs.io/en/develop/migration.html" % msg,
DeprecationWarning, stacklevel=3 + level_modifier)
def try_dbfield(fn, field_class):
"""
Try ``fn`` with the DB ``field_class`` by walking its
MRO until a result is found.
ex::
_try_dbfield(field_dict.get, models.CharField)
"""
# walk the mro, as field_class could be a derived model field.
for cls in field_class.mro():
# skip if cls is models.Field
if cls is models.Field:
continue
data = fn(cls)
if data:
return data
def get_all_model_fields(model):
opts = model._meta
return [
f.name for f in sorted(opts.fields + opts.many_to_many)
if not isinstance(f, models.AutoField) and
not (getattr(remote_field(f), 'parent_link', False))
]
def get_model_field(model, field_name):
"""
Get a ``model`` field, traversing relationships
in the ``field_name``.
ex::
f = get_model_field(Book, 'author__first_name')
"""
fields = get_field_parts(model, field_name)
return fields[-1] if fields else None
def get_field_parts(model, field_name):
"""
Get the field parts that represent the traversable relationships from the
base ``model`` to the final field, described by ``field_name``.
ex::
>>> parts = get_field_parts(Book, 'author__first_name')
>>> [p.verbose_name for p in parts]
['author', 'first name']
"""
parts = field_name.split(LOOKUP_SEP)
opts = model._meta
fields = []
# walk relationships
for name in parts:
try:
field = opts.get_field(name)
except FieldDoesNotExist:
return None
fields.append(field)
if isinstance(field, RelatedField):
opts = remote_model(field)._meta
elif isinstance(field, ForeignObjectRel):
opts = field.related_model._meta
return fields
def resolve_field(model_field, lookup_expr):
"""
Resolves a ``lookup_expr`` into its final output field, given
the initial ``model_field``. The lookup expression should only contain
transforms and lookups, not intermediary model field parts.
Note:
This method is based on django.db.models.sql.query.Query.build_lookup
For more info on the lookup API:
https://docs.djangoproject.com/en/1.9/ref/models/lookups/
"""
query = model_field.model._default_manager.all().query
lhs = Expression(model_field)
lookups = lookup_expr.split(LOOKUP_SEP)
assert len(lookups) > 0
try:
while lookups:
name = lookups[0]
args = (lhs, name)
if django.VERSION < (2, 0):
# rest_of_lookups was removed in Django 2.0
args += (lookups,)
# If there is just one part left, try first get_lookup() so
# that if the lhs supports both transform and lookup for the
# name, then lookup will be picked.
if len(lookups) == 1:
final_lookup = lhs.get_lookup(name)
if not final_lookup:
# We didn't find a lookup. We are going to interpret
# the name as transform, and do an Exact lookup against
# it.
lhs = query.try_transform(*args)
final_lookup = lhs.get_lookup('exact')
return lhs.output_field, final_lookup.lookup_name
lhs = query.try_transform(*args)
lookups = lookups[1:]
except FieldError as e:
six.raise_from(FieldLookupError(model_field, lookup_expr), e)
def handle_timezone(value, is_dst=None):
if settings.USE_TZ and timezone.is_naive(value):
return make_aware(value, timezone.get_default_timezone(), is_dst)
elif not settings.USE_TZ and timezone.is_aware(value):
return timezone.make_naive(value, timezone.utc)
return value
def verbose_field_name(model, field_name):
"""
Get the verbose name for a given ``field_name``. The ``field_name``
will be traversed across relationships. Returns '[invalid name]' for
any field name that cannot be traversed.
ex::
>>> verbose_field_name(Article, 'author__name')
'author name'
"""
if field_name is None:
return '[invalid name]'
parts = get_field_parts(model, field_name)
if not parts:
return '[invalid name]'
names = []
for part in parts:
if isinstance(part, ForeignObjectRel):
names.append(force_text(part.related_name))
else:
names.append(force_text(part.verbose_name))
return ' '.join(names)
def verbose_lookup_expr(lookup_expr):
"""
Get a verbose, more humanized expression for a given ``lookup_expr``.
Each part in the expression is looked up in the ``FILTERS_VERBOSE_LOOKUPS``
dictionary. Missing keys will simply default to itself.
ex::
>>> verbose_lookup_expr('year__lt')
'year is less than'
# with `FILTERS_VERBOSE_LOOKUPS = {}`
>>> verbose_lookup_expr('year__lt')
'year lt'
"""
from .conf import settings as app_settings
VERBOSE_LOOKUPS = app_settings.VERBOSE_LOOKUPS or {}
lookups = [
force_text(VERBOSE_LOOKUPS.get(lookup, _(lookup)))
for lookup in lookup_expr.split(LOOKUP_SEP)
]
return ' '.join(lookups)
def label_for_filter(model, field_name, lookup_expr, exclude=False):
"""
Create a generic label suitable for a filter.
ex::
>>> label_for_filter(Article, 'author__name', 'in')
'auther name is in'
"""
name = verbose_field_name(model, field_name)
verbose_expression = [_('exclude'), name] if exclude else [name]
# iterable lookups indicate a LookupTypeField, which should not be verbose
if isinstance(lookup_expr, six.string_types):
verbose_expression += [verbose_lookup_expr(lookup_expr)]
verbose_expression = [force_text(part) for part in verbose_expression if part]
verbose_expression = pretty_name(' '.join(verbose_expression))
return verbose_expression
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Zhongyue Luo, Intel Corporation.
#
import mock
from webob import exc
import webtest
from neutron.api.v2 import resource as wsgi_resource
from neutron.common import exceptions as q_exc
from neutron import context
from neutron.openstack.common import gettextutils
from neutron.tests import base
from neutron import wsgi
class RequestTestCase(base.BaseTestCase):
def setUp(self):
super(RequestTestCase, self).setUp()
self.req = wsgi_resource.Request({'foo': 'bar'})
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = "<body />"
self.assertEqual(None, request.get_content_type())
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type()
self.assertEqual(result, "application/json")
def test_content_type_from_accept(self):
for content_type in ('application/xml',
'application/json'):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = content_type
result = request.best_match_content_type()
self.assertEqual(result, content_type)
def test_content_type_from_accept_best(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml, application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3, "
"application/xml; q=0.9")
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
def test_content_type_from_query_extension(self):
request = wsgi.Request.blank('/tests/123.xml')
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
request = wsgi.Request.blank('/tests/123.json')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123.invalid')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_accept_and_query_extension(self):
request = wsgi.Request.blank('/tests/123.xml')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_context_with_neutron_context(self):
ctxt = context.Context('fake_user', 'fake_tenant')
self.req.environ['neutron.context'] = ctxt
self.assertEqual(self.req.context, ctxt)
def test_context_without_neutron_context(self):
self.assertTrue(self.req.context.is_admin)
def test_best_match_language(self):
# Test that we are actually invoking language negotiation by webop
request = wsgi.Request.blank('/')
gettextutils.get_available_languages = mock.MagicMock()
gettextutils.get_available_languages.return_value = ['known-language',
'es', 'zh']
request.headers['Accept-Language'] = 'known-language'
language = request.best_match_language()
self.assertEqual(language, 'known-language')
# If the Accept-Leader is an unknown language, missing or empty,
# the best match locale should be None
request.headers['Accept-Language'] = 'unknown-language'
language = request.best_match_language()
self.assertEqual(language, None)
request.headers['Accept-Language'] = ''
language = request.best_match_language()
self.assertEqual(language, None)
request.headers.pop('Accept-Language')
language = request.best_match_language()
self.assertEqual(language, None)
class ResourceTestCase(base.BaseTestCase):
def test_unmapped_neutron_error_with_json(self):
msg = u'\u7f51\u7edc'
class TestException(q_exc.NeutronException):
message = msg
expected_res = {'body': {
'NeutronError': {
'type': 'TestException',
'message': msg,
'detail': ''}}}
controller = mock.MagicMock()
controller.test.side_effect = TestException()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
self.assertEqual(wsgi.JSONDeserializer().deserialize(res.body),
expected_res)
def test_unmapped_neutron_error_with_xml(self):
msg = u'\u7f51\u7edc'
class TestException(q_exc.NeutronException):
message = msg
expected_res = {'body': {
'NeutronError': {
'type': 'TestException',
'message': msg,
'detail': ''}}}
controller = mock.MagicMock()
controller.test.side_effect = TestException()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'xml'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
self.assertEqual(wsgi.XMLDeserializer().deserialize(res.body),
expected_res)
@mock.patch('neutron.openstack.common.gettextutils.get_localized_message')
def test_unmapped_neutron_error_localized(self, mock_translation):
gettextutils.install('blaa', lazy=True)
msg_translation = 'Translated error'
mock_translation.return_value = msg_translation
msg = _('Unmapped error')
class TestException(q_exc.NeutronException):
message = msg
controller = mock.MagicMock()
controller.test.side_effect = TestException()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
self.assertIn(msg_translation,
str(wsgi.JSONDeserializer().deserialize(res.body)))
def test_mapped_neutron_error_with_json(self):
msg = u'\u7f51\u7edc'
class TestException(q_exc.NeutronException):
message = msg
expected_res = {'body': {
'NeutronError': {
'type': 'TestException',
'message': msg,
'detail': ''}}}
controller = mock.MagicMock()
controller.test.side_effect = TestException()
faults = {TestException: exc.HTTPGatewayTimeout}
resource = webtest.TestApp(wsgi_resource.Resource(controller,
faults=faults))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code)
self.assertEqual(wsgi.JSONDeserializer().deserialize(res.body),
expected_res)
def test_mapped_neutron_error_with_xml(self):
msg = u'\u7f51\u7edc'
class TestException(q_exc.NeutronException):
message = msg
expected_res = {'body': {
'NeutronError': {
'type': 'TestException',
'message': msg,
'detail': ''}}}
controller = mock.MagicMock()
controller.test.side_effect = TestException()
faults = {TestException: exc.HTTPGatewayTimeout}
resource = webtest.TestApp(wsgi_resource.Resource(controller,
faults=faults))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'xml'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code)
self.assertEqual(wsgi.XMLDeserializer().deserialize(res.body),
expected_res)
@mock.patch('neutron.openstack.common.gettextutils.get_localized_message')
def test_mapped_neutron_error_localized(self, mock_translation):
gettextutils.install('blaa', lazy=True)
msg_translation = 'Translated error'
mock_translation.return_value = msg_translation
msg = _('Unmapped error')
class TestException(q_exc.NeutronException):
message = msg
controller = mock.MagicMock()
controller.test.side_effect = TestException()
faults = {TestException: exc.HTTPGatewayTimeout}
resource = webtest.TestApp(wsgi_resource.Resource(controller,
faults=faults))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code)
self.assertIn(msg_translation,
str(wsgi.JSONDeserializer().deserialize(res.body)))
def test_http_error(self):
controller = mock.MagicMock()
controller.test.side_effect = exc.HTTPGatewayTimeout()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code)
def test_unhandled_error_with_json(self):
expected_res = {'body': {'NeutronError':
_('Request Failed: internal server error '
'while processing your request.')}}
controller = mock.MagicMock()
controller.test.side_effect = Exception()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
self.assertEqual(wsgi.JSONDeserializer().deserialize(res.body),
expected_res)
def test_unhandled_error_with_xml(self):
expected_res = {'body': {'NeutronError':
_('Request Failed: internal server error '
'while processing your request.')}}
controller = mock.MagicMock()
controller.test.side_effect = Exception()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'xml'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
self.assertEqual(wsgi.XMLDeserializer().deserialize(res.body),
expected_res)
def test_status_200(self):
controller = mock.MagicMock()
controller.test = lambda request: {'foo': 'bar'}
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, 200)
def test_status_204(self):
controller = mock.MagicMock()
controller.test = lambda request: {'foo': 'bar'}
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'delete'})}
res = resource.delete('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, 204)
def test_no_route_args(self):
controller = mock.MagicMock()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
def test_post_with_body(self):
controller = mock.MagicMock()
controller.test = lambda request, body: {'foo': 'bar'}
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
res = resource.post('', params='{"key": "val"}',
extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, 200)
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import routes
from trove.backup.service import BackupController
from trove.cluster.service import ClusterController
from trove.common import wsgi
from trove.configuration.service import ConfigurationsController
from trove.configuration.service import ParametersController
from trove.datastore.service import DatastoreController
from trove.flavor.service import FlavorController
from trove.instance.service import InstanceController
from trove.limits.service import LimitsController
from trove.module.service import ModuleController
from trove.versions import VersionsController
class API(wsgi.Router):
"""Defines the API routes."""
def __init__(self):
mapper = routes.Mapper()
super(API, self).__init__(mapper)
self._instance_router(mapper)
self._cluster_router(mapper)
self._datastore_router(mapper)
self._flavor_router(mapper)
self._versions_router(mapper)
self._limits_router(mapper)
self._backups_router(mapper)
self._configurations_router(mapper)
self._modules_router(mapper)
def _versions_router(self, mapper):
versions_resource = VersionsController().create_resource()
mapper.connect("/",
controller=versions_resource,
action="show",
conditions={'method': ['GET']})
def _datastore_router(self, mapper):
datastore_resource = DatastoreController().create_resource()
mapper.resource("datastore", "/{tenant_id}/datastores",
controller=datastore_resource)
mapper.connect("/{tenant_id}/datastores/{datastore}/versions",
controller=datastore_resource,
action="version_index")
mapper.connect("/{tenant_id}/datastores/{datastore}/versions/{id}",
controller=datastore_resource,
action="version_show")
mapper.connect(
"/{tenant_id}/datastores/{datastore}/versions/"
"{version_id}/flavors",
controller=datastore_resource,
action="list_associated_flavors",
conditions={'method': ['GET']}
)
mapper.connect("/{tenant_id}/datastores/versions/{uuid}",
controller=datastore_resource,
action="version_show_by_uuid")
def _instance_router(self, mapper):
instance_resource = InstanceController().create_resource()
mapper.connect("/{tenant_id}/instances",
controller=instance_resource,
action="index",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/instances",
controller=instance_resource,
action="create",
conditions={'method': ['POST']})
mapper.connect("/{tenant_id}/instances/{id}",
controller=instance_resource,
action="show",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/instances/{id}/action",
controller=instance_resource,
action="action",
conditions={'method': ['POST']})
mapper.connect("/{tenant_id}/instances/{id}",
controller=instance_resource,
action="update",
conditions={'method': ['PUT']})
mapper.connect("/{tenant_id}/instances/{id}",
controller=instance_resource,
action="edit",
conditions={'method': ['PATCH']})
mapper.connect("/{tenant_id}/instances/{id}",
controller=instance_resource,
action="delete",
conditions={'method': ['DELETE']})
mapper.connect("/{tenant_id}/instances/{id}/backups",
controller=instance_resource,
action="backups",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/instances/{id}/configuration",
controller=instance_resource,
action="configuration",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/instances/{id}/log",
controller=instance_resource,
action="guest_log_list",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/instances/{id}/log",
controller=instance_resource,
action="guest_log_action",
conditions={'method': ['POST']})
mapper.connect("/{tenant_id}/instances/{id}/modules",
controller=instance_resource,
action="module_list",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/instances/{id}/modules",
controller=instance_resource,
action="module_apply",
conditions={'method': ['POST']})
mapper.connect("/{tenant_id}/instances/{id}/modules/{module_id}",
controller=instance_resource,
action="module_remove",
conditions={'method': ['DELETE']})
def _cluster_router(self, mapper):
cluster_resource = ClusterController().create_resource()
mapper.connect("/{tenant_id}/clusters",
controller=cluster_resource,
action="index",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/clusters/{id}",
controller=cluster_resource,
action="show",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/clusters",
controller=cluster_resource,
action="create",
conditions={'method': ['POST']})
mapper.connect("/{tenant_id}/clusters/{id}",
controller=cluster_resource,
action="action",
conditions={'method': ['POST']})
mapper.connect("/{tenant_id}/clusters/{cluster_id}/instances/"
"{instance_id}",
controller=cluster_resource,
action="show_instance",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/clusters/{id}",
controller=cluster_resource,
action="delete",
conditions={'method': ['DELETE']})
def _flavor_router(self, mapper):
flavor_resource = FlavorController().create_resource()
mapper.connect("/{tenant_id}/flavors",
controller=flavor_resource,
action="index",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/flavors/{id}",
controller=flavor_resource,
action="show",
conditions={'method': ['GET']})
def _limits_router(self, mapper):
limits_resource = LimitsController().create_resource()
mapper.connect("/{tenant_id}/limits",
controller=limits_resource,
action="index",
conditions={'method': ['GET']})
def _backups_router(self, mapper):
backups_resource = BackupController().create_resource()
mapper.connect("/{tenant_id}/backups",
controller=backups_resource,
action="index",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/backups",
controller=backups_resource,
action="create",
conditions={'method': ['POST']})
mapper.connect("/{tenant_id}/backups/{id}",
controller=backups_resource,
action="show",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/backups/{id}",
controller=backups_resource,
action="action",
conditions={'method': ['POST']})
mapper.connect("/{tenant_id}/backups/{id}",
controller=backups_resource,
action="delete",
conditions={'method': ['DELETE']})
def _modules_router(self, mapper):
modules_resource = ModuleController().create_resource()
mapper.resource("modules", "/{tenant_id}/modules",
controller=modules_resource)
mapper.connect("/{tenant_id}/modules",
controller=modules_resource,
action="index",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/modules",
controller=modules_resource,
action="create",
conditions={'method': ['POST']})
mapper.connect("/{tenant_id}/modules/{id}",
controller=modules_resource,
action="show",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/modules/{id}",
controller=modules_resource,
action="update",
conditions={'method': ['PUT']})
mapper.connect("/{tenant_id}/modules/{id}",
controller=modules_resource,
action="delete",
conditions={'method': ['DELETE']})
mapper.connect("/{tenant_id}/modules/{id}/instances",
controller=modules_resource,
action="instances",
conditions={'method': ['GET']})
def _configurations_router(self, mapper):
parameters_resource = ParametersController().create_resource()
path = '/{tenant_id}/datastores/versions/{version}/parameters'
mapper.connect(path,
controller=parameters_resource,
action='index_by_version',
conditions={'method': ['GET']})
path = '/{tenant_id}/datastores/versions/{version}/parameters/{name}'
mapper.connect(path,
controller=parameters_resource,
action='show_by_version',
conditions={'method': ['GET']})
path = '/{tenant_id}/datastores/{datastore}/versions/{id}'
mapper.connect(path + '/parameters',
controller=parameters_resource,
action='index',
conditions={'method': ['GET']})
mapper.connect(path + '/parameters/{name}',
controller=parameters_resource,
action='show',
conditions={'method': ['GET']})
configuration_resource = ConfigurationsController().create_resource()
mapper.connect('/{tenant_id}/configurations',
controller=configuration_resource,
action='index',
conditions={'method': ['GET']})
mapper.connect('/{tenant_id}/configurations',
controller=configuration_resource,
action='create',
conditions={'method': ['POST']})
mapper.connect('/{tenant_id}/configurations/{id}',
controller=configuration_resource,
action='show',
conditions={'method': ['GET']})
mapper.connect('/{tenant_id}/configurations/{id}/instances',
controller=configuration_resource,
action='instances',
conditions={'method': ['GET']})
mapper.connect('/{tenant_id}/configurations/{id}',
controller=configuration_resource,
action='edit',
conditions={'method': ['PATCH']})
mapper.connect('/{tenant_id}/configurations/{id}',
controller=configuration_resource,
action='update',
conditions={'method': ['PUT']})
mapper.connect('/{tenant_id}/configurations/{id}',
controller=configuration_resource,
action='delete',
conditions={'method': ['DELETE']})
def app_factory(global_conf, **local_conf):
return API()
|
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Translation methods for generating localized strings.
To load a locale and generate a translated string::
user_locale = locale.get("es_LA")
print user_locale.translate("Sign out")
locale.get() returns the closest matching locale, not necessarily the
specific locale you requested. You can support pluralization with
additional arguments to translate(), e.g.::
people = [...]
message = user_locale.translate(
"%(list)s is online", "%(list)s are online", len(people))
print message % {"list": user_locale.list(people)}
The first string is chosen if len(people) == 1, otherwise the second
string is chosen.
Applications should call one of load_translations (which uses a simple
CSV format) or load_gettext_translations (which uses the .mo format
supported by gettext and related tools). If neither method is called,
the locale.translate method will simply return the original string.
"""
import csv
import datetime
import logging
import os
import re
_default_locale = "en_US"
_translations = {}
_supported_locales = frozenset([_default_locale])
_use_gettext = False
def get(*locale_codes):
"""Returns the closest match for the given locale codes.
We iterate over all given locale codes in order. If we have a tight
or a loose match for the code (e.g., "en" for "en_US"), we return
the locale. Otherwise we move to the next code in the list.
By default we return en_US if no translations are found for any of
the specified locales. You can change the default locale with
set_default_locale() below.
"""
return Locale.get_closest(*locale_codes)
def set_default_locale(code):
"""Sets the default locale, used in get_closest_locale().
The default locale is assumed to be the language used for all strings
in the system. The translations loaded from disk are mappings from
the default locale to the destination locale. Consequently, you don't
need to create a translation file for the default locale.
"""
global _default_locale
global _supported_locales
_default_locale = code
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
def load_translations(directory):
"""Loads translations from CSV files in a directory.
Translations are strings with optional Python-style named placeholders
(e.g., "My name is %(name)s") and their associated translations.
The directory should have translation files of the form LOCALE.csv,
e.g. es_GT.csv. The CSV files should have two or three columns: string,
translation, and an optional plural indicator. Plural indicators should
be one of "plural" or "singular". A given string can have both singular
and plural forms. For example "%(name)s liked this" may have a
different verb conjugation depending on whether %(name)s is one
name or a list of names. There should be two rows in the CSV file for
that string, one with plural indicator "singular", and one "plural".
For strings with no verbs that would change on translation, simply
use "unknown" or the empty string (or don't include the column at all).
The file is read using the csv module in the default "excel" dialect.
In this format there should not be spaces after the commas.
Example translation es_LA.csv:
"I love you","Te amo"
"%(name)s liked this","A %(name)s les gust\u00f3 esto","plural"
"%(name)s liked this","A %(name)s le gust\u00f3 esto","singular"
"""
global _translations
global _supported_locales
_translations = {}
for path in os.listdir(directory):
if not path.endswith(".csv"): continue
locale, extension = path.split(".")
if not re.match("[a-z]+(_[A-Z]+)?$", locale):
logging.error("Unrecognized locale %r (path: %s)", locale,
os.path.join(directory, path))
continue
f = open(os.path.join(directory, path), "r")
_translations[locale] = {}
for i, row in enumerate(csv.reader(f)):
if not row or len(row) < 2: continue
row = [c.decode("utf-8").strip() for c in row]
english, translation = row[:2]
if len(row) > 2:
plural = row[2] or "unknown"
else:
plural = "unknown"
if plural not in ("plural", "singular", "unknown"):
logging.error("Unrecognized plural indicator %r in %s line %d",
plural, path, i + 1)
continue
_translations[locale].setdefault(plural, {})[english] = translation
f.close()
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
logging.info("Supported locales: %s", sorted(_supported_locales))
def load_gettext_translations(directory, domain):
"""Loads translations from gettext's locale tree
Locale tree is similar to system's /usr/share/locale, like:
{directory}/{lang}/LC_MESSAGES/{domain}.mo
Three steps are required to have you app translated:
1. Generate POT translation file
xgettext --language=Python --keyword=_:1,2 -d cyclone file1.py file2.html etc
2. Merge against existing POT file:
msgmerge old.po cyclone.po > new.po
3. Compile:
msgfmt cyclone.po -o {directory}/pt_BR/LC_MESSAGES/cyclone.mo
"""
import gettext
global _translations
global _supported_locales
global _use_gettext
_translations = {}
for lang in os.listdir(directory):
if lang.startswith('.'): continue # skip .svn, etc
if os.path.isfile(os.path.join(directory, lang)): continue
try:
os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain+".mo"))
_translations[lang] = gettext.translation(domain, directory,
languages=[lang])
except Exception as e:
logging.error("Cannot load translation for '%s': %s", lang, str(e))
continue
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
_use_gettext = True
logging.info("Supported locales: %s", sorted(_supported_locales))
def get_supported_locales(cls):
"""Returns a list of all the supported locale codes."""
return _supported_locales
class Locale(object):
"""Object representing a locale.
After calling one of `load_translations` or `load_gettext_translations`,
call `get` or `get_closest` to get a Locale object.
"""
@classmethod
def get_closest(cls, *locale_codes):
"""Returns the closest match for the given locale code."""
for code in locale_codes:
if not code: continue
code = code.replace("-", "_")
parts = code.split("_")
if len(parts) > 2:
continue
elif len(parts) == 2:
code = parts[0].lower() + "_" + parts[1].upper()
if code in _supported_locales:
return cls.get(code)
if parts[0].lower() in _supported_locales:
return cls.get(parts[0].lower())
return cls.get(_default_locale)
@classmethod
def get(cls, code):
"""Returns the Locale for the given locale code.
If it is not supported, we raise an exception.
"""
if not hasattr(cls, "_cache"):
cls._cache = {}
if code not in cls._cache:
assert code in _supported_locales
translations = _translations.get(code, None)
if translations is None:
locale = CSVLocale(code, {})
elif _use_gettext:
locale = GettextLocale(code, translations)
else:
locale = CSVLocale(code, translations)
cls._cache[code] = locale
return cls._cache[code]
def __init__(self, code, translations):
self.code = code
self.name = LOCALE_NAMES.get(code, {}).get("name", "Unknown")
self.rtl = False
for prefix in ["fa", "ar", "he"]:
if self.code.startswith(prefix):
self.rtl = True
break
self.translations = translations
# Initialize strings for date formatting
_ = self.translate
self._months = [
_("January"), _("February"), _("March"), _("April"),
_("May"), _("June"), _("July"), _("August"),
_("September"), _("October"), _("November"), _("December")]
self._weekdays = [
_("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"),
_("Friday"), _("Saturday"), _("Sunday")]
def translate(self, message, plural_message=None, count=None):
"""Returns the translation for the given message for this locale.
If plural_message is given, you must also provide count. We return
plural_message when count != 1, and we return the singular form
for the given message when count == 1.
"""
raise NotImplementedError()
def format_date(self, date, gmt_offset=0, relative=True, shorter=False,
full_format=False):
"""Formats the given date (which should be GMT).
By default, we return a relative time (e.g., "2 minutes ago"). You
can return an absolute date string with relative=False.
You can force a full format date ("July 10, 1980") with
full_format=True.
This method is primarily intended for dates in the past.
For dates in the future, we fall back to full format.
"""
if self.code.startswith("ru"):
relative = False
if type(date) in (int, int, float):
date = datetime.datetime.utcfromtimestamp(date)
now = datetime.datetime.utcnow()
if date > now:
if relative and (date - now).seconds < 60:
# Due to click skew, things are some things slightly
# in the future. Round timestamps in the immediate
# future down to now in relative mode.
date = now
else:
# Otherwise, future dates always use the full format.
full_format = True
local_date = date - datetime.timedelta(minutes=gmt_offset)
local_now = now - datetime.timedelta(minutes=gmt_offset)
local_yesterday = local_now - datetime.timedelta(hours=24)
difference = now - date
seconds = difference.seconds
days = difference.days
_ = self.translate
format = None
if not full_format:
if relative and days == 0:
if seconds < 50:
return _("1 second ago", "%(seconds)d seconds ago",
seconds) % { "seconds": seconds }
if seconds < 50 * 60:
minutes = round(seconds / 60.0)
return _("1 minute ago", "%(minutes)d minutes ago",
minutes) % { "minutes": minutes }
hours = round(seconds / (60.0 * 60))
return _("1 hour ago", "%(hours)d hours ago",
hours) % { "hours": hours }
if days == 0:
format = _("%(time)s")
elif days == 1 and local_date.day == local_yesterday.day and \
relative:
format = _("yesterday") if shorter else \
_("yesterday at %(time)s")
elif days < 5:
format = _("%(weekday)s") if shorter else \
_("%(weekday)s at %(time)s")
elif days < 334: # 11mo, since confusing for same month last year
format = _("%(month_name)s %(day)s") if shorter else \
_("%(month_name)s %(day)s at %(time)s")
if format is None:
format = _("%(month_name)s %(day)s, %(year)s") if shorter else \
_("%(month_name)s %(day)s, %(year)s at %(time)s")
tfhour_clock = self.code not in ("en", "en_US", "zh_CN")
if tfhour_clock:
str_time = "%d:%02d" % (local_date.hour, local_date.minute)
elif self.code == "zh_CN":
str_time = "%s%d:%02d" % (
('\u4e0a\u5348', '\u4e0b\u5348')[local_date.hour >= 12],
local_date.hour % 12 or 12, local_date.minute)
else:
str_time = "%d:%02d %s" % (
local_date.hour % 12 or 12, local_date.minute,
("am", "pm")[local_date.hour >= 12])
return format % {
"month_name": self._months[local_date.month - 1],
"weekday": self._weekdays[local_date.weekday()],
"day": str(local_date.day),
"year": str(local_date.year),
"time": str_time
}
def format_day(self, date, gmt_offset=0, dow=True):
"""Formats the given date as a day of week.
Example: "Monday, January 22". You can remove the day of week with
dow=False.
"""
local_date = date - datetime.timedelta(minutes=gmt_offset)
_ = self.translate
if dow:
return _("%(weekday)s, %(month_name)s %(day)s") % {
"month_name": self._months[local_date.month - 1],
"weekday": self._weekdays[local_date.weekday()],
"day": str(local_date.day),
}
else:
return _("%(month_name)s %(day)s") % {
"month_name": self._months[local_date.month - 1],
"day": str(local_date.day),
}
def list(self, parts):
"""Returns a comma-separated list for the given list of parts.
The format is, e.g., "A, B and C", "A and B" or just "A" for lists
of size 1.
"""
_ = self.translate
if len(parts) == 0: return ""
if len(parts) == 1: return parts[0]
comma = ' \u0648 ' if self.code.startswith("fa") else ", "
return _("%(commas)s and %(last)s") % {
"commas": comma.join(parts[:-1]),
"last": parts[len(parts) - 1],
}
def friendly_number(self, value):
"""Returns a comma-separated number for the given integer."""
if self.code not in ("en", "en_US"):
return str(value)
value = str(value)
parts = []
while value:
parts.append(value[-3:])
value = value[:-3]
return ",".join(reversed(parts))
class CSVLocale(Locale):
"""Locale implementation using tornado's CSV translation format."""
def translate(self, message, plural_message=None, count=None):
if plural_message is not None:
assert count is not None
if count != 1:
message = plural_message
message_dict = self.translations.get("plural", {})
else:
message_dict = self.translations.get("singular", {})
else:
message_dict = self.translations.get("unknown", {})
return message_dict.get(message, message)
class GettextLocale(Locale):
"""Locale implementation using the gettext module."""
def translate(self, message, plural_message=None, count=None):
if plural_message is not None:
assert count is not None
return self.translations.ungettext(message, plural_message, count)
else:
return self.translations.ugettext(message)
LOCALE_NAMES = {
"af_ZA": {"name_en": "Afrikaans", "name": "Afrikaans"},
"am_ET": {"name_en": "Amharic", "name": '\u12a0\u121b\u122d\u129b'},
"ar_AR": {"name_en": "Arabic", "name": "\u0627\u0644\u0639\u0631\u0628\u064a\u0629"},
"bg_BG": {"name_en": "Bulgarian", "name": "\u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438"},
"bn_IN": {"name_en": "Bengali", "name": "\u09ac\u09be\u0982\u09b2\u09be"},
"bs_BA": {"name_en": "Bosnian", "name": "Bosanski"},
"ca_ES": {"name_en": "Catalan", "name": "Catal\xe0"},
"cs_CZ": {"name_en": "Czech", "name": "\u010ce\u0161tina"},
"cy_GB": {"name_en": "Welsh", "name": "Cymraeg"},
"da_DK": {"name_en": "Danish", "name": "Dansk"},
"de_DE": {"name_en": "German", "name": "Deutsch"},
"el_GR": {"name_en": "Greek", "name": "\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac"},
"en_GB": {"name_en": "English (UK)", "name": "English (UK)"},
"en_US": {"name_en": "English (US)", "name": "English (US)"},
"es_ES": {"name_en": "Spanish (Spain)", "name": "Espa\xf1ol (Espa\xf1a)"},
"es_LA": {"name_en": "Spanish", "name": "Espa\xf1ol"},
"et_EE": {"name_en": "Estonian", "name": "Eesti"},
"eu_ES": {"name_en": "Basque", "name": "Euskara"},
"fa_IR": {"name_en": "Persian", "name": "\u0641\u0627\u0631\u0633\u06cc"},
"fi_FI": {"name_en": "Finnish", "name": "Suomi"},
"fr_CA": {"name_en": "French (Canada)", "name": "Fran\xe7ais (Canada)"},
"fr_FR": {"name_en": "French", "name": "Fran\xe7ais"},
"ga_IE": {"name_en": "Irish", "name": "Gaeilge"},
"gl_ES": {"name_en": "Galician", "name": "Galego"},
"he_IL": {"name_en": "Hebrew", "name": "\u05e2\u05d1\u05e8\u05d9\u05ea"},
"hi_IN": {"name_en": "Hindi", "name": "\u0939\u093f\u0928\u094d\u0926\u0940"},
"hr_HR": {"name_en": "Croatian", "name": "Hrvatski"},
"hu_HU": {"name_en": "Hungarian", "name": "Magyar"},
"id_ID": {"name_en": "Indonesian", "name": "Bahasa Indonesia"},
"is_IS": {"name_en": "Icelandic", "name": "\xcdslenska"},
"it_IT": {"name_en": "Italian", "name": "Italiano"},
"ja_JP": {"name_en": "Japanese", "name": "\u65e5\u672c\u8a9e"},
"ko_KR": {"name_en": "Korean", "name": "\ud55c\uad6d\uc5b4"},
"lt_LT": {"name_en": "Lithuanian", "name": "Lietuvi\u0173"},
"lv_LV": {"name_en": "Latvian", "name": "Latvie\u0161u"},
"mk_MK": {"name_en": "Macedonian", "name": "\u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438"},
"ml_IN": {"name_en": "Malayalam", "name": "\u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02"},
"ms_MY": {"name_en": "Malay", "name": "Bahasa Melayu"},
"nb_NO": {"name_en": "Norwegian (bokmal)", "name": "Norsk (bokm\xe5l)"},
"nl_NL": {"name_en": "Dutch", "name": "Nederlands"},
"nn_NO": {"name_en": "Norwegian (nynorsk)", "name": "Norsk (nynorsk)"},
"pa_IN": {"name_en": "Punjabi", "name": "\u0a2a\u0a70\u0a1c\u0a3e\u0a2c\u0a40"},
"pl_PL": {"name_en": "Polish", "name": "Polski"},
"pt_BR": {"name_en": "Portuguese (Brazil)", "name": "Portugu\xeas (Brasil)"},
"pt_PT": {"name_en": "Portuguese (Portugal)", "name": "Portugu\xeas (Portugal)"},
"ro_RO": {"name_en": "Romanian", "name": "Rom\xe2n\u0103"},
"ru_RU": {"name_en": "Russian", "name": "\u0420\u0443\u0441\u0441\u043a\u0438\u0439"},
"sk_SK": {"name_en": "Slovak", "name": "Sloven\u010dina"},
"sl_SI": {"name_en": "Slovenian", "name": "Sloven\u0161\u010dina"},
"sq_AL": {"name_en": "Albanian", "name": "Shqip"},
"sr_RS": {"name_en": "Serbian", "name": "\u0421\u0440\u043f\u0441\u043a\u0438"},
"sv_SE": {"name_en": "Swedish", "name": "Svenska"},
"sw_KE": {"name_en": "Swahili", "name": "Kiswahili"},
"ta_IN": {"name_en": "Tamil", "name": "\u0ba4\u0bae\u0bbf\u0bb4\u0bcd"},
"te_IN": {"name_en": "Telugu", "name": "\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41"},
"th_TH": {"name_en": "Thai", "name": "\u0e20\u0e32\u0e29\u0e32\u0e44\u0e17\u0e22"},
"tl_PH": {"name_en": "Filipino", "name": "Filipino"},
"tr_TR": {"name_en": "Turkish", "name": "T\xfcrk\xe7e"},
"uk_UA": {"name_en": "Ukraini ", "name": "\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430"},
"vi_VN": {"name_en": "Vietnamese", "name": "Ti\u1ebfng Vi\u1ec7t"},
"zh_CN": {"name_en": "Chinese (Simplified)", "name": "\u4e2d\u6587(\u7b80\u4f53)"},
"zh_TW": {"name_en": "Chinese (Traditional)", "name": "\u4e2d\u6587(\u7e41\u9ad4)"},
}
|
|
"""Handles the instrospection of REST Framework Views and ViewSets."""
from abc import ABCMeta, abstractmethod
import re
from django.contrib.admindocs.utils import trim_docstring
from rest_framework.views import get_view_name, get_view_description
def get_resolved_value(obj, attr, default=None):
value = getattr(obj, attr, default)
if callable(value):
value = value()
return value
class IntrospectorHelper(object):
__metaclass__ = ABCMeta
@staticmethod
def strip_params_from_docstring(docstring):
"""
Strips the params from the docstring (ie. myparam -- Some param) will
not be removed from the text body
"""
split_lines = trim_docstring(docstring).split('\n')
cut_off = None
for index, line in enumerate(split_lines):
line = line.strip()
if line.find('--') != -1:
cut_off = index
break
if cut_off is not None:
split_lines = split_lines[0:cut_off]
return "<br/>".join(split_lines)
@staticmethod
def get_serializer_name(serializer):
if serializer is None:
return None
return serializer.__name__
@staticmethod
def get_view_description(callback):
"""
Returns the first sentence of the first line of the class docstring
"""
return get_view_description(callback).split("\n")[0].split(".")[0]
class BaseViewIntrospector(object):
__metaclass__ = ABCMeta
def __init__(self, callback, path, pattern):
self.callback = callback
self.path = path
self.pattern = pattern
@abstractmethod
def __iter__(self):
pass
def get_iterator(self):
return self.__iter__()
def get_serializer_class(self):
if hasattr(self.callback, 'get_serializer_class'):
return self.callback().get_serializer_class()
def get_description(self):
"""
Returns the first sentence of the first line of the class docstring
"""
return IntrospectorHelper.get_view_description(self.callback)
class BaseMethodIntrospector(object):
__metaclass__ = ABCMeta
def __init__(self, view_introspector, method):
self.method = method
self.parent = view_introspector
self.callback = view_introspector.callback
self.path = view_introspector.path
def get_serializer_class(self):
return self.parent.get_serializer_class()
def get_summary(self):
docs = self.get_docs()
# If there is no docstring on the method, get class docs
if docs is None:
docs = self.parent.get_description()
docs = trim_docstring(docs).split('\n')[0]
return docs
def get_nickname(self):
""" Returns the APIView's nickname """
return get_view_name(self.callback).replace(' ', '_')
def get_notes(self):
"""
Returns the body of the docstring trimmed before any parameters are
listed. First, get the class docstring and then get the method's. The
methods will always inherit the class comments.
"""
docstring = ""
class_docs = trim_docstring(get_view_description(self.callback))
method_docs = self.get_docs()
if class_docs is not None:
docstring += class_docs
if method_docs is not None:
docstring += '\n' + method_docs
docstring = IntrospectorHelper.strip_params_from_docstring(docstring)
docstring = docstring.replace("\n\n", "<br/>")
return docstring
def get_parameters(self):
"""
Returns parameters for an API. Parameters are a combination of HTTP
query parameters as well as HTTP body parameters that are defined by
the DRF serializer fields
"""
params = []
path_params = self.build_path_parameters()
body_params = self.build_body_parameters()
form_params = self.build_form_parameters()
query_params = self.build_query_params_from_docstring()
if path_params:
params += path_params
if self.get_http_method() not in ["GET", "DELETE"]:
params += form_params
if not form_params and body_params is not None:
params.append(body_params)
if query_params:
params += query_params
return params
def get_http_method(self):
return self.method
@abstractmethod
def get_docs(self):
return ''
def retrieve_docstring(self):
"""
Attempts to fetch the docs for a class method. Returns None
if the method does not exist
"""
method = str(self.method).lower()
if not hasattr(self.callback, method):
return None
return getattr(self.callback, method).__doc__
def build_body_parameters(self):
serializer = self.get_serializer_class()
serializer_name = IntrospectorHelper.get_serializer_name(serializer)
if serializer_name is None:
return
return {
'name': serializer_name,
'dataType': serializer_name,
'paramType': 'body',
}
def build_path_parameters(self):
"""
Gets the parameters from the URL
"""
url_params = re.findall('/{([^}]*)}', self.path)
params = []
for param in url_params:
params.append({
'name': param,
'dataType': 'string',
'paramType': 'path',
'required': True
})
return params
def build_form_parameters(self):
"""
Builds form parameters from the serializer class
"""
data = []
serializer = self.get_serializer_class()
if serializer is None:
return data
fields = serializer().get_fields()
for name, field in fields.items():
if getattr(field, 'read_only', False):
continue
data_type = field.type_label
max_length = getattr(field, 'max_length', None)
min_length = getattr(field, 'min_length', None)
allowable_values = None
if max_length is not None or min_length is not None:
allowable_values = {
'max': max_length,
'min': min_length,
'valueType': 'RANGE'
}
data.append({
'paramType': 'form',
'name': name,
'dataType': data_type,
'allowableValues': allowable_values,
'description': getattr(field, 'help_text', ''),
'defaultValue': get_resolved_value(field, 'default'),
'required': getattr(field, 'required', None)
})
return data
def build_query_params_from_docstring(self):
params = []
docstring = self.retrieve_docstring() if None else ''
docstring += "\n" + get_view_description(self.callback)
if docstring is None:
return params
split_lines = docstring.split('\n')
for line in split_lines:
param = line.split(' -- ')
if len(param) == 2:
params.append({'paramType': 'query',
'name': param[0].strip(),
'description': param[1].strip(),
'dataType': ''})
return params
class APIViewIntrospector(BaseViewIntrospector):
def __iter__(self):
methods = self.callback().allowed_methods
for method in methods:
yield APIViewMethodIntrospector(self, method)
class APIViewMethodIntrospector(BaseMethodIntrospector):
def get_docs(self):
"""
Attempts to retrieve method specific docs for an
endpoint. If none are available, the class docstring
will be used
"""
return self.retrieve_docstring()
class ViewSetIntrospector(BaseViewIntrospector):
"""Handle ViewSet introspection."""
def __iter__(self):
methods = self._resolve_methods()
for method in methods:
yield ViewSetMethodIntrospector(self, methods[method], method)
def _resolve_methods(self):
if not hasattr(self.pattern.callback, 'func_code') or \
not hasattr(self.pattern.callback, 'func_closure') or \
not hasattr(self.pattern.callback.func_code, 'co_freevars') or \
'actions' not in self.pattern.callback.func_code.co_freevars:
raise RuntimeError('Unable to use callback invalid closure/function specified.')
idx = self.pattern.callback.func_code.co_freevars.index('actions')
return self.pattern.callback.func_closure[idx].cell_contents
class ViewSetMethodIntrospector(BaseMethodIntrospector):
def __init__(self, view_introspector, method, http_method):
super(ViewSetMethodIntrospector, self).__init__(view_introspector, method)
self.http_method = http_method.upper()
def get_http_method(self):
return self.http_method
def get_docs(self):
"""
Attempts to retrieve method specific docs for an
endpoint. If none are available, the class docstring
will be used
"""
return self.retrieve_docstring()
|
|
import logging
from six import add_metaclass
from abc import ABCMeta, abstractmethod
from agentml.parser import Element
from agentml.common import attribute
from agentml.parser.trigger.response import Response
@add_metaclass(ABCMeta)
class BaseCondition(object):
"""
AgentML Base Condition class
"""
def __init__(self, agentml, element, **kwargs):
"""
Initialize a new Base Condition instance
:param agentml: The parent AgentML instance
:type agentml: AgentML
:param element: The XML Element object
:type element: etree._Element
:param kwargs: Default attributes
"""
self.agentml = agentml
self._element = element
# Containers and default attributes
self.statements = []
self.else_statement = None
self.type = kwargs['type'] if 'type' in kwargs else attribute(self._element, 'type', 'user_var')
self._log = logging.getLogger('agentml.parser.trigger.condition')
def evaluate(self, user):
"""
Evaluate the conditional statement and return its contents if a successful evaluation takes place
:param user: The active user object
:type user: agentml.User or None
:return: True if the condition evaluates successfully, otherwise False
:rtype : bool
"""
for statement in self.statements:
evaluated = statement.evaluate(self.agentml, user)
if evaluated:
return evaluated
return self.else_statement or False
@abstractmethod
def get_contents(self, element):
"""
Retrieve the contents of an element
:param element: The XML Element object
:type element: etree._Element
:return: A list of text and/or XML elements
:rtype : list of etree._Element or str
"""
pass
def _parse(self):
"""
Loop through all child elements and execute any available parse methods for them
"""
self.type = attribute(self._element, 'type') or self.type
for child in self._element:
method_name = '_parse_{0}'.format(str(child.tag)) # TODO: This is a hack, skip comment objects here
if hasattr(self, method_name):
parse = getattr(self, method_name)
parse(child)
def _parse_if(self, element):
"""
Parse the if statement
:param element: The XML Element object
:type element: etree._Element
"""
# Get the key
name = attribute(element, 'name')
cond_type = attribute(element, 'type', self.type)
# Get the comparison operator and its value (if implemented)
operator = None
value = None
for o in ConditionStatement.operators:
if o in element.attrib:
operator = o
value = element.attrib[operator]
break
# Get the contents of the element in tuple form and append our if statement
contents = tuple(self.get_contents(element))
self.statements.append(ConditionStatement(cond_type, operator, contents, value, name))
def _parse_elif(self, element):
"""
Parse an elif statement
:param element: The XML Element object
:type element: etree._Element
"""
self._parse_if(element)
def _parse_else(self, element):
"""
Parse the else statement
:param element: The XML Element object
:type element: etree._Element
"""
self.else_statement = self.get_contents(element)
class Condition(Element, BaseCondition):
"""
AgentML Condition object
"""
def __init__(self, trigger, element, file_path):
"""
Initialize a new Condition instance
:param trigger: The Trigger instance
:type trigger: Trigger
:param element: The XML Element object
:type element: etree._Element
:param file_path: The absolute path to the AgentML file
:type file_path: str
"""
self.trigger = trigger
BaseCondition.__init__(self, trigger.agentml, element)
Element.__init__(self, trigger.agentml, element, file_path)
self._log = logging.getLogger('agentml.parser.trigger.condition')
def get_contents(self, element):
"""
Retrieve the contents of an element
:param element: The XML Element object
:type element: etree._Element
:return: A list of responses
:rtype : list of Response
"""
return [Response(self.trigger, child, self.file_path)
for child in element if child.tag in ['response', 'template']]
class ConditionStatement:
"""
Condition Statement object
"""
# Condition operators
IS = 'is'
IS_NOT = 'is_not'
GREATER_THAN = 'gt'
GREATER_THAN_OR_EQUAL = 'gte'
LESS_THAN = 'lt'
LESS_THAN_OR_EQUAL = 'lte'
operators = [IS, IS_NOT, GREATER_THAN, GREATER_THAN_OR_EQUAL, LESS_THAN, LESS_THAN_OR_EQUAL]
def __init__(self, cond_type, operator, contents, value=None, name=None):
"""
Initialize a new Condition Statement object
:param cond_type: The type of the condition statement
:type cond_type: str
:param operator: The operator of the condition statement
:type operator: str
:param contents: The contents of the condition statement
:type contents: tuple
:param value: The value of the condition statement
:type value: str, int, float or None
:param name: The name of the variable if the condition type is USER_VAR or GLOBAL_VAR
:type name: str
"""
self.type = cond_type
self.operator = operator
self.contents = contents
self.value = value
self.name = name
self._log = logging.getLogger('agentml.parser.trigger.condition.statement')
def evaluate(self, agentml, user=None):
"""
Evaluate the conditional statement and return its contents if a successful evaluation takes place
:param user: The active user object
:type user: agentml.User or None
:param agentml: The active AgentML instance
:type agentml: AgentML
:return: Condition contents if the condition evaluates successfully, otherwise False
:rtype : tuple or bool
"""
self._log.debug('Evaluating conditional statement: {statement}'
.format(statement=' '.join(filter(None, [self.type, self.name, self.operator, self.value]))))
# Get the value of our key type
if self.type not in agentml.conditions:
self._log.error('Unknown condition type, "{type}", unable to evaluate condition statement'
.format(type=self.type))
return
key_value = agentml.conditions[self.type].get(agentml, user, self.name)
# Atomic comparisons
if self.operator is None and key_value:
return self.contents
if (self.operator == self.IS) and (key_value == self.value):
return self.contents
if (self.operator == self.IS_NOT) and (key_value != self.value):
return self.contents
# All remaining self.operators are numeric based, so key_value must contain a valid integer or float
try:
key_value = float(key_value)
value = float(self.value)
except (ValueError, TypeError):
return False
# Numeric comparisons
if (self.operator == self.GREATER_THAN) and (key_value > value):
return self.contents
if (self.operator == self.GREATER_THAN_OR_EQUAL) and (key_value >= value):
return self.contents
if (self.operator == self.LESS_THAN) and (key_value < value):
return self.contents
if (self.operator == self.LESS_THAN_OR_EQUAL) and (key_value <= value):
return self.contents
return False
|
|
# -*- coding: utf-8 -*-
# batteries included
import os
from copy import deepcopy
# thirdparty
import requests
import xmltodict
# this package
from utils import normalize_url
from exceptions import ConfigFetchError, InvalidAPICallError, InvalidCredentialsError, UnknownArtifactoryRestError
ART_REPO_TYPES = ["ALL", "LOCAL", "REMOTE", "VIRTUAL"]
ART_DEFAULT_REPOS = [
'ext-release-local',
'ext-snapshot-local',
'libs-release-local',
'libs-snapshot-local',
'plugins-release-local',
'plugins-snapshot-local',
'jcenter-cache',
'libs-release',
'libs-snapshot',
'plugins-release',
'plugins-snapshot',
'remote-repos',
'jcenter'
]
def update_password(host_url, username, orig_pass, target_pass):
""" set the password for the user to the target_pass
Parameters
----------
host_url : string
A url of the form http(s)://domainname:port/context or
http(s)://ip:port/context
username : string
username of the password to change
orig_pass : string
original password to use for the update
target_pass : string
the desired new password
Returns
-------
changed : boolean
True if changes were made
Raises
------
InvalidCredentialsError :
If neither the original or target credentials work to update the password
UnknownArtifactoryRestError :
If we get a response we haven't encountered and don't kow what to do with
"""
orig_auth = (username, orig_pass)
target_auth = (username, target_pass)
get_pass_url = '{}/artifactory/api/security/encryptedPassword'.format(
normalize_url(host_url)
)
orig_resp = requests.get(get_pass_url, auth=orig_auth)
if orig_resp.status_code == 401:
resp = requests.get(get_pass_url, auth=target_auth)
auth = target_auth
elif orig_resp.status_code == 200:
resp = orig_resp
auth = orig_auth
else:
raise UnknownArtifactoryRestError(
"Unexpected response when verifying credentials",
orig_resp
)
if resp.status_code != 200:
raise InvalidCrentialsError
if auth == target_auth:
return False
user_json_url = '{}/artifactory/api/security/users/{}'.format(
normalize_url(host_url),
username
)
headers = {'Content-type': 'application/json'}
user_dict_resp = requests.get(user_json_url, auth=auth)
if not user_dict_resp.ok:
if user_dict_resp.status == 401:
msg = "Received an unauthorized message after authorization "
msg += "has been checked. Wtf?"
raise UnknownArtifactoryRestError(msg, user_dict_resp)
else:
raise UnknownArtifactoryRestError(
"Couldn't get user information",
user_dict_resp
)
admin_dict = user_dict_resp.json()
admin_dict.pop('lastLoggedIn')
admin_dict.pop('realm')
admin_dict['password'] = target_pass
update_resp = requests.post(
user_json_url,
auth=auth,
json=admin_dict,
headers=headers
)
if not update_resp.ok:
if update_resp.status == 401:
msg = "Received an unauthorized message after authorization "
msg += "has been checked. Wtf?"
raise UnknownArtifactoryRestError(msg, update_resp)
else:
raise UnknownArtifactoryRestError(
"Couldn't post user password update",
update_resp
)
final_check_resp = requests.get(get_pass_url, auth=target_auth)
if not final_check_resp.ok:
raise UnknownArtifactoryRestError(
"Final password check failed. Could not use new credentials",
final_check_resp
)
else:
return True
def get_artifactory_config_from_url(host_url, auth):
"""retrieve the artifactory configuration xml doc
Parameters
----------
host_url: string
A url of the form http(s)://domainname:port/context or
http(s)://ip:port/context
auth: tuple
a tuple a la requests auth of the form (user, password)
"""
headers = {'Accept': 'application/xml'}
config_url = "{}/artifactory/api/system/configuration".format(
normalize_url(host_url)
)
r = requests.get(config_url, auth=auth, headers=headers)
if r.ok:
return(xmltodict.parse(r.text))
else:
raise ConfigFetchError("Something went wrong getting the config", r)
def update_ldapSettings_from_dict(config_dict, desired_dict):
"""match the ldap settings in the config_dict to the desired endstate
Parameters
----------
config_dict : dictionary
the source configuration dictionary
desired_dict : dictionary
the ldap subdictionary that we want to use
Returns
-------
return_dict : dictonary
A copy of the original config dict, plus any modfications made
changed : boolean
Whether or not changes were made
"""
return_dict = deepcopy(config_dict)
orig_ldap_settings = return_dict['config']['security']['ldapSettings']
if orig_ldap_settings == desired_dict:
return return_dict, False
# RED at the very least, this should validate the resulting xml
# or, it should only update the changed keys so we know what they are
# consider using easyXSD, but might want to avoid lxml
else:
return_dict['config']['security']['ldapSettings'] = desired_dict
return return_dict, True
def update_artifactory_config(host_url, auth, config_dict):
""" take a configuraiton dict and upload it to artifactory
Parameters
----------
host_url : string
A url of the form http(s)://domainname:port[/context] or http(s)://ip:port[/context]
auth : tuple
A tuple of (user, password), as used by requests
config_dict : OrderedDict
a dict representation that will be returned to xml
Returns:
--------
success : boolean
true if we succeeded
"""
headers = {'Content-type': 'application/xml'}
config_url = "{}/artifactory/api/system/configuration".format(
normalize_url(host_url)
)
xml_config = xmltodict.unparse(config_dict)
r = requests.post(config_url, auth=auth, headers=headers, data=xml_config)
if r.ok:
return True
else:
return False
def cr_repository(host_url, repo_dict, auth=None, session=None):
""" take a configuration dict and post it host_url
Should use
https://www.jfrog.com/confluence/display/RTF/Repository+Configuration+JSON
for the inputs.
Does not error checking; will fail if the json is malformed.
Parameters
----------
host_url : string
A url of the form
http(s)://domainname:port[/context] or http(s)://ip:port[/context]
repo_dict : OrderedDict
a dictionary of the inputs required by artifactroy. see above.
auth : tuple, optional
A tuple of (user, password), as used by requests
session : requests Session object, optional
A session object (that has any necessary cookies / headers defined)
Either auth or session must be defined. Session overrides auth.
Returns
-------
success : boolean
true if succeeded
"""
ses = _get_artifactory_session(auth, session)
if 'key' not in repo_dict:
raise InvalidAPICallError("The repo_dict must include a repo key (repo_dict['key'])")
repo_url = '{}/artifactory/api/repositories/{}'.format(
normalize_url(host_url),
repo_dict['key']
)
headers = {'Content-type': 'application/json'}
exists_resp = ses.get(repo_url)
if exists_resp.ok:
resp = ses.post(repo_url, json=repo_dict, headers=headers)
else:
resp = ses.put(repo_url, json=repo_dict, headers=headers)
# YELLOW need to add more logic to make this aware of if the configuration
# is changing
if resp.ok:
return True
else:
return False
def _get_artifactory_session(username=None, passwd=None, auth=None,
session=None):
""" return a session with auth set. prioritizes existing sessions,
but validates that auth is set
Parameters
----------
username : string, optional
username to create auth tuple from
password : string, optional
password for auth tuple
auth : tuple, optional
A tuple of (user, password), as used by requests
session : requests.Session
A requests.Session object, with auth
Returns
-------
InvalidAPICallError
When no combination of required inputs is given
"""
if session is None and auth is None and username is None and passwd is None:
raise InvalidAPICallError(
"You must pass either username/password, auth, or session"
)
ses = None
if session:
if s.auth:
ses = session
if auth and not ses:
ses = requests.Session()
ses.auth = auth
if (username and passwd) and not ses:
auth = (username, passwd)
ses = requests.Session()
ses.auth = auth
if not ses:
raise InvalidAPICallError(
"You must pass either username/password, auth, or session"
)
return ses
def get_repo_configs(host_url, repo_list, username=None, passwd=None,
auth=None, session=None):
""" return repository configuration dictionaries for specified set of repos
Parameters
----------
host_url : string
An artifactory url of the form
http(s)://domainname:port[/context] or http(s)://ip:port[/context]
repo_list : list of strings
A list of repo keys that you want to get configs for. repo
keys should match the url in the artifactory rest call
username : string, optional
username to create auth tuple from
passwd : string, optional
password for auth tuple
auth : tuple, optional
A tuple of (user, password), as used by requests
session : requests.Session
A requests.Session object, with auth
Either session, auth, or user/pass must be defined.
Session overrides auth overides username/password
See _get_artifactory_session for details
"""
ses = _get_artifactory_session(
username=username,
passwd=passwd,
auth=auth,
session=session
)
repo_configs_list = []
for repo in repo_list:
repo_url = '{}/artifactory/api/repositories/{}'.format(
normalize_url(host_url),
repo
)
resp = ses.get(repo_url)
if not resp.ok:
msg = "Failed to fetch config for {}".format(repo)
raise UnknownArtifactoryRestError(msg, resp)
repo_dict = resp.json()
repo_configs_list.append(repo_dict)
return repo_configs_list
def get_repo_list(host_url, repo_type="ALL", include_defaults=False,
include_filter=None):
""" return repository configuration dictionaries for specified set of repos
Parameters
----------
host_url : string
An artifactory url of the form
http(s)://domainname:port[/context] or http(s)://ip:port[/context]
repo_type : {'all', 'LOCAL', 'REMOTE', 'VIRTUAL'}
What types of repo (as defined by artifactory) to fetch.
include_defaults : boolean
Whether to include repos that ship with artifactory
include_filter : string
String which is used to do a simple filter of repo names. (in)
Using this + naming convention can filter by package type.
Either session, auth, or user/pass must be defined.
Session overrides auth overides username/password
See _get_artifactory_session for details
"""
repos_url = '{}/artifactory/api/repositories'.format(host_url)
resp = requests.get(repos_url)
if not resp.ok:
raise UnknownArtifactoryRestError("Error fetching repos", resp)
final_repo_list = resp.json()
if repo_type.upper() != "ALL":
if repo_type.upper() not in ART_REPO_TYPES:
raise InvalidAPICallError("repo_type must be one of {}".format(
ART_REPO_TYPES
)
)
final_repo_list = [r for r in final_repo_list
if r['type'] == repo_type.upper()]
if include_filter:
final_repo_list = [r for r in final_repo_list
if include_filter in r['url'].split('/')[-1]]
if not include_defaults:
final_repo_list = [r for r in final_repo_list
if r['key'] not in ART_DEFAULT_REPOS]
return final_repo_list
if __name__ == '__main__':
print("This file is not the entrypoint for artifactory_tool")
sys.exit(1)
|
|
# coding=utf-8
import math
import hashlib
import base64
from tornado import web
from tornado.web import authenticated
from vanellope.handlers import BaseHandler
from vanellope import config
from vanellope.handlers import Days
class WelcomePage(BaseHandler):
def get(self):
admin = self.user.get_admin_user()
if admin:
return self.redirect('/')
self.render("welcome.html",
title=self.concat_page_title('Welcome'),
page=u'welcome')
def post(self):
"""Signup"""
admin = self.user.get_admin_user()
if admin:
self.set_status(400)
return self.finish({
'status': u'error'
})
email = self.get_argument('email', None)
pwd = self.get_argument('pwd', None)
role = self.get_argument('role', None)
err = 0
admin_user = self.user.get_admin_user()
if role == 'admin' and admin_user:
err += 1
else:
password_hash = self.user.create_user({
"username": "Admin",
"email": email,
"password": pwd,
"role": role
})
self.clear_all_cookies()
cookie = base64.b64encode('Admin:' + password_hash)
self.set_cookie(name="vanellope",
value=cookie,
expires_days=90)
admin_user = self.user.get_admin_user()
self.settings['admin'] = admin_user
if not err:
self.redirect('/controlpanel')
else:
self.redirect('/welcome')
class IndexPage(BaseHandler):
def get(self):
ENTRIES_PER_PAGE = config.posts_per_page
current_page = int(self.get_argument(u'p', 1))
current_user = self.get_current_user()
if current_user and current_user['role'] == 'admin':
drafts = self.posts.get_drafts()
else:
drafts = []
articles = self.posts.find(
states=['published'],
limit=ENTRIES_PER_PAGE,
skip=(current_page - 1) * ENTRIES_PER_PAGE
)
total_entries = self.posts.count(states=['published'])
pages = int(math.floor(total_entries / ENTRIES_PER_PAGE))
if total_entries > pages * ENTRIES_PER_PAGE:
pages += 1
next_page = current_page + 1 if current_page < pages else pages
previous_page = current_page - 1 if current_page > 1 else 1
self.render("index.html",
title=self.concat_page_title('Home'),
page=u'index',
current_page=current_page,
next_page=next_page,
previous_page=previous_page,
pages=pages,
drafts=drafts,
articles=articles)
class SnippetsPage(BaseHandler):
def get(self):
self.render("snippets.html",
title=self.concat_page_title('Snippets'),
page=u'snippets',
current_page=0,
next_page=0,
previous_page=0,
pages=1,
drafts=[],
articles=[])
class TagsPage(BaseHandler):
def get(self):
current_page = int(self.get_argument(u'p', 1))
self.render("tags.html",
title=self.concat_page_title('Tags'),
page=u'tags',
previous_page=current_page - 1 if current_page > 1 else 1,
next_page=current_page + 1)
class TagPage(BaseHandler):
def get(self, tag):
current_page = int(self.get_argument(u'p', 1))
articles = self.posts.find_posts_with_tag(tag)
self.render("tag.html",
title=self.concat_page_title("Tag:{0}".format(tag)),
page=u'tag',
previous_page=current_page - 1 if current_page > 1 else 1,
next_page=current_page + 1,
current_tag=tag,
articles=articles)
class ArchivesPage(BaseHandler):
def get(self):
self.render('archives.html',
title=self.concat_page_title('Archive'),
page=u'archives')
class ArchivePage(BaseHandler):
def get(self, year=u'', month=u'', day=u'12345'):
if day is None:
day = u''
if month is None:
month = u''
if year is None:
year = u''
current_page = int(self.get_argument(u'p', 1))
from_date = end_date = None
if year and month and day:
from_date = (
Days()
.day(u"{0}-{1}-{2}".format(year, month, day))
.timezone('UTC')
.beginning()
)
end_date = (
Days()
.day(u"{0}-{1}-{2}".format(year, month, day))
.timezone('UTC')
.next_day()
.beginning()
)
elif year and month and not day:
from_date = (
Days()
.day(u"{0}-{1}-{2}".format(year, month, u'01'))
.timezone('UTC')
.beginning()
)
end_date = (
Days()
.day(u"{0}-{1}-{2}".format(year, month, u'01'))
.timezone('UTC')
.next_month()
.beginning()
)
elif year and not month and day:
from_date = (
Days()
.day(u"{0}-{1}-{2}".format(year, u'01', day))
.timezone('UTC')
.beginning()
)
end_date = (
Days()
.day(u"{0}-{1}-{2}".format(year, u'12', day))
.timezone('UTC')
.beginning()
)
elif year and not month and not day:
from_date = (
Days().day(u"{0}-01-01".format(year))
.timezone('UTC')
.beginning()
)
end_date = (
Days().day(u"{0}-12-31".format(year))
.timezone('UTC')
.beginning()
)
elif not year:
self.redirect('/')
posts = self.posts.find_posts_between_date(from_date, end_date)
articles = []
for article in posts:
if 'tags' not in article:
article['tags'] = []
articles.append(article)
self.render("archive.html",
title=self.concat_page_title('Archive'),
page=u'archive',
from_date=from_date,
end_date=end_date,
current_uri=self.request.uri,
previous_page=current_page - 1 if current_page > 1 else 1,
next_page=current_page + 1,
articles=articles)
class CategoryPage(BaseHandler):
def get(self, cate):
current_page = int(self.get_argument(u'p', 1))
articles = self.posts.find_by_category(cate, ["published"])
self.render("category.html",
title=self.concat_page_title('Category:{0}'.format(cate)),
page=u'category',
previous_page=current_page - 1 if current_page > 1 else 1,
next_page=current_page + 1,
current_category=cate,
articles=articles)
class LoginPage(BaseHandler):
def get(self):
self.render("login.html",
title=self.concat_page_title('Login'),
page=u'login')
def post(self):
"""Login"""
pwd = self.get_argument('pwd', None)
err = 0
admin_user = self.user.get_admin_user()
if not admin_user:
return self.redirect('/welcome')
new_hash = hashlib.sha256(pwd + admin_user['salt']).hexdigest()
if admin_user['passwd'] == new_hash:
self.clear_all_cookies()
cookie = base64.b64encode('Admin:' + new_hash)
self.set_cookie(name="vanellope",
value=cookie,
expires_days=90)
else:
err += 1
if err == 0:
self.redirect('/')
else:
self.redirect('/login')
class Logout(BaseHandler):
def get(self):
self.clear_all_cookies()
self.redirect('/')
class DraftPage(BaseHandler):
@authenticated
def get(self, article_id):
article = self.posts.find_by_id(article_id)
if not article:
self.send_error(404)
return
if 'tags' not in article:
article['tags'] = []
siblings = []
self.render("article.html",
title=article['title'],
page=u'draft',
related_articles=[],
siblings=siblings,
article=article)
class ArticlePage(BaseHandler):
def get(self, article_id):
try:
article = self.posts.find(id_list=[article_id],
states=['published'])[0]
except IndexError:
self.send_error(404)
return
# add a view count
self.posts.views_count(article_id)
# state should be pass
comments = self.comments.find(post_id=article_id, state="checking")
if 'tags' not in article:
article['tags'] = []
siblings = []
self.render("article.html",
title=article['title'],
page=u'article',
related_articles=[],
siblings=siblings,
article=article,
comments=comments)
def post(self):
"""Create new article"""
pass
@authenticated
def put(self, article_id=None):
""" Update an existing article """
pass
@authenticated
def delete(self, article_id):
""" Delete article """
pass
class UploadedFileHandler(web.StaticFileHandler):
def initialize(self):
uploaded_path = self.settings['uploaded_path']
return web.StaticFileHandler.initialize(self, uploaded_path)
|
|
# -*- coding: utf-8 -*-
from datetime import date
from pathlib import Path
from allauth.account.models import EmailAddress
from django import forms
from django.contrib import messages
from djspace.core.models import DISABILITY_CHOICES
from djspace.core.models import EMPLOYMENT_CHOICES
from djspace.core.models import REG_TYPE
from djspace.core.models import GenericChoice
from djspace.core.models import Photo
from djspace.core.models import UserFiles
from djspace.core.models import UserProfile
from djtools.fields import BINARY_CHOICES
from djtools.fields import GENDER_CHOICES
from djtools.fields import SALUTATION_TITLES
from djtools.fields import STATE_CHOICES
from djtools.fields.localflavor import USPhoneNumberField
DOB_YEAR = date.today().year - 10
RACES = GenericChoice.objects.filter(tags__name__in=['Race']).order_by('ranking')
class EmailApplicantsForm(forms.Form):
"""Email form for sending email to applicants."""
content = forms.CharField(
required=True, widget=forms.Textarea, label="Email content",
)
title = forms.CharField(max_length=50, widget=forms.HiddenInput())
content_type = forms.CharField(max_length=8, widget=forms.HiddenInput())
def clean_content(self):
"""Form validation."""
content = self.cleaned_data['content']
self._errors['content'] = self.error_class(
["Please provide the content of the email"],
)
return content
class SignupForm(forms.Form):
"""Gathers auth and user profile data."""
registration_type = forms.CharField(
max_length=32,
widget=forms.Select(choices=REG_TYPE),
)
salutation = forms.CharField(
widget=forms.Select(choices=SALUTATION_TITLES),
max_length=16,
required=False,
)
first_name = forms.CharField(max_length=30)
second_name = forms.CharField(
label="Second name, middle name or initial",
max_length=30,
)
last_name = forms.CharField(max_length=30)
date_of_birth = forms.DateField(
label="Date of birth",
required=False,
widget=forms.SelectDateWidget(years=range(DOB_YEAR, 1929, -1)),
)
gender = forms.TypedChoiceField(
choices=GENDER_CHOICES,
widget=forms.RadioSelect(),
)
race = forms.ModelMultipleChoiceField(
label="Race and Ethnicity",
queryset=RACES,
help_text='Check all that apply',
widget=forms.CheckboxSelectMultiple(),
)
tribe = forms.CharField(
max_length=128,
required=False,
)
disability = forms.CharField(
label="Disability status",
widget=forms.Select(choices=DISABILITY_CHOICES),
)
disability_specify = forms.CharField(
label="Specify if not listed",
max_length=255,
required=False,
)
employment = forms.CharField(
label="Employment status",
widget=forms.Select(choices=EMPLOYMENT_CHOICES),
)
military = forms.TypedChoiceField(
label="Have you served in the United States military?",
choices=BINARY_CHOICES,
widget=forms.RadioSelect(),
)
us_citizen = forms.TypedChoiceField(
label="United States Citizen",
choices=BINARY_CHOICES,
widget=forms.RadioSelect(),
)
address1 = forms.CharField(label="Address", max_length=128)
address2 = forms.CharField(label="", max_length=128, required=False)
city = forms.CharField(max_length=128)
state = forms.CharField(widget=forms.Select(choices=STATE_CHOICES))
postal_code = forms.CharField(label="Postal Code", max_length=10)
address1_current = forms.CharField(
label="Address",
max_length=128,
required=False,
)
address2_current = forms.CharField(label="", max_length=128, required=False)
city_current = forms.CharField(label="City", max_length=128, required=False)
state_current = forms.CharField(
label="State",
widget=forms.Select(choices=STATE_CHOICES),
required=False,
)
postal_code_current = forms.CharField(
label="Postal Code",
max_length=10,
required=False,
)
phone_primary = USPhoneNumberField(
label="Primary phone",
widget=forms.TextInput(attrs={'placeholder': 'eg. 123-456-7890'}),
)
phone_mobile = USPhoneNumberField(
label="Cell phone",
widget=forms.TextInput(attrs={'placeholder': 'eg. 123-456-7890'}),
)
email_secondary = forms.EmailField(
label='Secondary e-mail',
widget=forms.TextInput(attrs={'placeholder': 'Secondary e-mail address'}),
)
def clean(self):
"""Form validation."""
cd = super(SignupForm, self).clean()
# dob is required for this form
if not cd.get('date_of_birth'):
self._errors['date_of_birth'] = self.error_class(
["Required field"],
)
# current address is required for students
if cd.get('registration_type') in {'Undergraduate', 'Graduate'}:
if not cd.get('address1_current'):
self._errors['address1_current'] = self.error_class(
["Required field"],
)
if not cd.get('city_current'):
self._errors['city_current'] = self.error_class(
["Required field"],
)
if not cd.get('state_current'):
self._errors['state_current'] = self.error_class(
["Required field"],
)
if not cd.get('postal_code_current'):
self._errors['postal_code_current'] = self.error_class(
["Required field"],
)
# check disability and description
dis_err = (
cd.get('disability') == 'I have a disability, but it is not listed' and
cd.get('disability_specify') == ''
)
if dis_err:
self._errors['disability_specify'] = self.error_class(
["Please describe your disability"],
)
# check if secondary email already exists in the system
if cd.get('email_secondary'):
try:
EmailAddress.objects.get(email=cd.get('email_secondary'))
self._errors['email_secondary'] = self.error_class(
["That email already exists in the system"],
)
raise forms.ValidationError(
"""
You have submitted an email that already exists
in the system. Please provide a different email.
""",
)
except Exception:
pass
return cd
def signup(self, request, user):
"""Required method for the allauth package."""
cd = self.cleaned_data
user.first_name = cd['first_name']
user.last_name = cd['last_name']
user.save()
profile = UserProfile(
user=user,
updated_by=user,
salutation=cd['salutation'],
second_name=cd['second_name'],
registration_type=cd['registration_type'],
gender=cd['gender'],
tribe=cd.get('tribe'),
disability=cd['disability'],
disability_specify=cd['disability_specify'],
employment=cd['employment'],
military=cd['military'],
us_citizen=cd['us_citizen'],
date_of_birth=cd['date_of_birth'],
address1=cd['address1'],
address2=cd.get('address2'),
city=cd['city'],
state=cd['state'],
postal_code=cd['postal_code'],
address1_current=cd['address1_current'],
address2_current=cd.get('address2_current'),
city_current=cd['city_current'],
state_current=cd['state_current'],
postal_code_current=cd['postal_code_current'],
phone_primary=cd['phone_primary'],
phone_mobile=cd['phone_mobile'],
)
profile.save()
for raza in request.POST.getlist('race'):
profile.race.add(raza)
profile.save()
if profile.us_citizen == 'No':
messages.warning(
request,
"""
You must be a United States citizen in order to
apply for grants from NASA.
""",
)
class Meta:
"""Information about the form class."""
fields = [
'registration_type',
'salutation',
'first_name',
'second_name',
'last_name',
'date_of_birth',
'gender',
'race',
'tribe',
'disability',
'disability_specify',
'employment',
'military',
'us_citizen',
'address1_current',
'address2_current',
'city_current',
'state_current',
'postal_code_current',
'address1',
'address2',
'city',
'state',
'postal_code',
'phone_primary',
'phone_mobile',
'email_secondary',
]
class PhotoForm(forms.ModelForm):
"""Optional photos."""
class Meta:
"""Information about the form class."""
model = Photo
exclude = ('caption', 'content_type', 'object_id', 'content_object')
fields = ['phile']
class UserFilesForm(forms.ModelForm):
"""Files required after funding has been approved."""
def __init__(self, *args, **kwargs):
"""Override of the initialization method to obtain the required list."""
self.required = kwargs.pop('required', None)
super(UserFilesForm, self).__init__(*args, **kwargs)
def clean(self):
"""Form validation."""
cd = self.cleaned_data
if self.required:
for require in self.required:
if not cd.get(require):
self.add_error(require, "Required field")
return cd
def clean_biography(self):
"""Touch file so we can update the timestamp."""
biography = self.cleaned_data.get('biography')
if biography and self.instance.biography:
Path(self.instance.biography.path).touch()
return biography
def clean_irs_w9(self):
"""Touch file so we can update the timestamp."""
irs_w9 = self.cleaned_data.get('irs_w9')
if irs_w9 and self.instance.irs_w9:
Path(self.instance.irs_w9.path).touch()
return irs_w9
def clean_mugshot(self):
"""Touch file so we can update the timestamp."""
mugshot = self.cleaned_data.get('mugshot')
if mugshot and self.instance.mugshot:
Path(self.instance.mugshot.path).touch()
return mugshot
def clean_media_release(self):
"""Touch file so we can update the timestamp."""
media_release = self.cleaned_data.get('media_release')
if media_release and self.instance.media_release:
Path(self.instance.media_release.path).touch()
return media_release
class Meta:
"""Information about the form class."""
model = UserFiles
exclude = ('user', 'status')
fields = ['mugshot', 'biography', 'media_release', 'irs_w9']
|
|
import numpy as np
import sys
import sklearn.tree
import Context, Argmax, Metrics
from Policy import *
import string
import ContextIterators
import Metrics
class BanditSim(object):
def __init__(self, X, N, K, eps, one_pass=False, reward_noise=0.0):
"""
Simulate a K-armed contextual bandit game with X contexts, N
policies, and K actions with gap eps. Use the uniform
distribution over contexts.
Policies are effectively random. Pick one policy to be the best
and for each context, the reward distribution Ber(1/2+eps) on the
action of that policy and Ber(1/2-eps) on the other actions.
Exposes methods:
get_new_context() -- construct and return new context
get_num_actions() -- enables context-specific K
get_reward(a) -- get reward for an action
get_all_rewards() -- return rewards for all actions.
get_best_reward() -- returns reward for pi^\star for book-keeping purposes.
@Deprecated
"""
self.L = 1
self.N = N
self.K = K
self.eps = eps
self.X = X
self.one_pass = one_pass
self.reward_noise = reward_noise
## Initialize the contexts.
self.contexts = []
for i in range(self.X):
self.contexts.append(Context.Context(i, np.zeros(1), self.K, self.L))
## Initialize the policies.
piMat = np.matrix(np.random.randint(0, K, size=[N,X,self.L]))
self.Pi = []
for i in range(N):
pi = EnumerationPolicy(dict(zip(range(self.X), [piMat[i,j] for j in range(self.X)])))
self.Pi.append(pi)
## Initialize the optimal policy
self.Pistar = np.random.randint(0, N)
def get_new_context(self):
if self.one_pass:
self.curr_idx += 1
if self.curr_idx >= self.X:
return None
self.curr_x = self.contexts[self.curr_idx]
else:
self.curr_x = self.contexts[np.random.randint(0, self.X)]
## r = np.random.binomial(1, 0.5-self.eps, self.K)
r = (np.random.binomial(1, 0.5-self.eps, self.K)*2)-1
r[self.Pi[self.Pistar].get_action(self.curr_x)] = (np.random.binomial(1, 0.5+self.eps, self.L)*2)-1
self.curr_r = r
self.r_noise = np.random.normal(0, self.reward_noise)
return self.curr_x
def get_num_actions(self):
return self.curr_x.get_K()
def get_curr_context(self):
return self.curr_x
def get_reward(self, a):
return self.curr_r[a]
def get_all_rewards(self):
return self.curr_r
def get_best_reward(self):
return np.sum(self.curr_r[self.Pi[self.Pistar].get_action(self.curr_x)])
class SemibanditSim(BanditSim):
def __init__(self, X, N, K, L, eps, one_pass=False):
"""
Simulate a contextual semi-bandit problem with X contexts, N
policies, K base actions and action lists of length L. Use the
uniform distribution over contexts.
Policies are effectively random, on each context they play L
random actions. One policy is optimal and the reward
distribution is Ber(1/2+eps) on each base action played by
that policy and Bet(1/2-eps) on each other base action.
Additionally exposes:
get_slate_reward(A) -- total reward for a slate
get_base_rewards(A) -- rewards for each base action in slate.
"""
self.L = L
self.N = N
self.K = K
self.eps = eps
self.X = X
self.K = K
self.one_pass = False
## Initialize the contexts.
self.contexts = []
for i in range(self.X):
self.contexts.append(Context.Context(i, np.zeros((1,1)), self.K, self.L))
## Initialize the policies.
piMat = np.zeros((N,X,L), dtype=np.int)
for n in range(N):
for x in range(X):
piMat[n,x,:] = np.random.choice(range(K), L, replace=False)
self.Pi = []
for i in range(N):
pi = EnumerationPolicy(dict(zip(range(self.X), [piMat[i,j,:] for j in range(self.X)])))
self.Pi.append(pi)
## Initialize the optimal policy
self.Pistar = np.random.randint(0, N)
self.curr_idx = -1
def get_slate_reward(self, A):
return np.sum(self.curr_r[A]) + self.r_noise
def get_base_rewards(self, A):
return self.curr_r[A]
class OrderedSBSim(SemibanditSim):
def __init__(self, X, N, K, L, eps, w_vec=None, link="linear", one_pass=False,reward_noise=0.1):
self.X = X
self.N = N
self.K = K
self.L = L
self.eps = eps
self.one_pass = one_pass
self.link = link
if w_vec is None:
self.weight = np.ones(L)
else:
self.weight = w_vec
self.reward_noise=0.1
assert len(self.weight) == self.L
## Initialize contexts.
self.contexts = []
for i in range(self.X):
self.contexts.append(Context.Context(i,np.zeros((1,1)), self.K, self.L))
piMat = np.zeros((N,X,L), dtype=np.int)
for n in range(N):
for x in range(X):
piMat[n,x,:] = np.random.choice(range(K), L, replace=False)
self.Pi = []
for i in range(N):
pi = EnumerationPolicy(dict(zip(range(self.X), [piMat[i,j,:] for j in range(self.X)])))
self.Pi.append(pi)
## Initialize the optimal policy
self.Pistar = np.random.randint(0, N)
self.curr_idx = -1
def get_slate_reward(self, A):
if self.link == "linear":
return np.dot(self.weight,self.curr_r[A]) + self.r_noise
if self.link == "logistic":
## return np.random.binomial(1, 1.0/(1+np.exp(-np.dot(self.weight, self.curr_r[A]))))
return np.random.binomial(1, 1.0/(1.0+np.exp(-np.dot(self.weight, self.curr_r[A]))))
## return self.link(np.dot(self.weight,self.curr_r[A])) + self.r_noise
def get_best_reward(self):
return self.get_slate_reward(self.Pi[self.Pistar].get_action(self.curr_x))
class MQBandit(SemibanditSim):
def __init__(self, dataset, L):
"""
Use the MQ style dataset as a contextual semi-bandit problem.
This dataset consists of (query, doc, relevance) triples where
each query,doc pair also comes with a feature vector.
L is slate length
On each round for this problem, the learner plays L documents
and the total reward is the sum of the relevances for those
documents (TODO: maybe incorporate some weights). The
relevances are expose via the get_base_rewards method. Thus it
is a semibandit problem.
We are use a linear policies for now. TODO: If "policies" is
set to None or unspecified, then we allow all linear
functions. If "policies" is assigned some number N, then we
draw N random unit vectors and use those as policies. Here we
explicitly enumerate the policy class and build the (context,
policy, slate) table.
"""
self.L = L
self.policies = (policies != None)
if dataset == "MQ2008":
self.loadMQ2008()
elif dataset == "MQ2007":
self.loadMQ2007()
else:
print("Error misspecified dataset")
return
numQueries, numDocs, numFeatures = np.shape(self.features)
print("Datasets:loadNpz [INFO] Loaded",
" NumQueries, [Min,Max]NumDocs, totalDocs, MaxNumFeatures: ", numQueries, np.min(self.docsPerQuery), np.max(self.docsPerQuery), numDocs, numFeatures)
sys.stdout.flush()
self.K = np.max(self.docsPerQuery)
self.X = len(self.docsPerQuery)
self.d = self.features.shape[2]
pistar = self.get_best_policy(learning_alg=lambda: sklearn.tree.DecisionTreeClassifier(max_depth=5), classification=True)
self.Pi = [pistar]
self.Pistar = 0
self.curr_x = None
self.curr_r = None
def loadMQ2008(self):
npFile = np.load("MQ2008.npz")
self.relevances = npFile['relevances']/2
self.features = npFile['features']
self.docsPerQuery = npFile['docsPerQuery']
self.relevances = self.relevances[:,:np.min(self.docsPerQuery)]
self.features = self.features[:,:np.min(self.docsPerQuery),:]
self.docsPerQuery = np.min(self.docsPerQuery)*np.ones(len(self.docsPerQuery))
self.docsPerQuery = np.array(self.docsPerQuery, dtype=np.int)
def loadMQ2007(self):
npFile = np.load("MQ2007.npz")
self.relevances = npFile['relevances']/2
self.features = npFile['features']
self.docsPerQuery = npFile['docsPerQuery']
## Filtering to make K much smaller.
toretain = np.where(self.docsPerQuery >= 40)[0]
self.relevances = self.relevances[toretain,:]
self.features = self.features[toretain,:]
self.docsPerQuery = self.docsPerQuery[toretain]
self.relevances = self.relevances[:,:np.min(self.docsPerQuery)]
self.features = self.features[:,:np.min(self.docsPerQuery),:]
self.docsPerQuery = np.min(self.docsPerQuery)*np.ones(len(self.docsPerQuery))
self.docsPerQuery = np.array(self.docsPerQuery, dtype=np.int)
def get_new_context(self):
context_idx = np.random.randint(0, self.X)
self.curr_x = Context.Context(context_idx, self.features[context_idx,:,:], self.docsPerQuery[context_idx], self.L)
self.curr_r = self.relevances[context_idx,:]
return self.curr_x
def get_best_reward(self):
if "Pi" in dir(self):
return np.sum(self.curr_r[self.Pi[self.Pistar, self.curr_x,:]])
else:
idx = np.argsort(self.curr_r)
return np.sum(self.curr_r[idx[len(self.curr_r)-self.L:len(self.curr_r)]])
def get_best_policy(self, learning_alg=None, classification=True):
if learning_alg == None:
return self.Pi[self.Pistar]
else:
## Prepare dataset
## Train best depth 3 decision tree.
dataset = []
for x in range(self.X):
context = Context.Context(x, self.features[x,:,:], self.docsPerQuery[x], self.L)
dataset.append((context, self.relevances[x,:]))
if classification:
return Argmax.argmax(self, dataset, policy_type=ClassificationPolicy, learning_alg=learning_alg)
else:
return Argmax.argmax(self, dataset, policy_type=RegressionPolicy, learning_alg=learning_alg)
def offline_evaluate(self, policy):
score = 0.0
for x in range(self.X):
score += np.sum(self.relevances[x,policy.get_action(Context.Context(x, self.features[x,:,:], self.docsPerQuery[x], self.L))])
return score
class LinearBandit(SemibanditSim):
class LinearContext():
def __init__(self,name,features):
self.features = features
self.name = name
def get_ld_features(self):
return self.features
def get_K(self):
return self.features.shape[0]
def get_L(self):
return 1
def get_ld_dim(self):
return self.features.shape[1]
def get_name(self):
return self.name
def __init__(self, d, L, K, noise=False, seed=None, pos=False, quad=False, low=None):
"""
A Linear semi-bandit simulator. Generates a random unit weight
vector upon initialization. At each round, random unit-normed
feature vectors are drawn for each action, and reward is (if
noise) a bernoulli with mean (1+x_a^Tw)/2 or just (1+x_a^Tw)/2.
The learner plays a slate of L actions and K actions are
available per round.
d is the dimension of the feature space
L actions per slate
K actions per context
"""
self.d = d
self.L = L
self.K = K
self.N = None
self.X = None
self.noise = noise
self.seed = seed
self.pos = pos
self.quad = quad
self.low = low
if seed is not None:
np.random.seed(574)
if self.pos:
self.weights = np.matrix(np.random.dirichlet(d*np.ones(self.d))).T
self.weights = self.weights/np.linalg.norm(self.weights)
else:
self.weights = np.matrix(np.random.normal(0, 1, [self.d,1]))
self.weights = self.weights/np.sqrt(self.weights.T*self.weights)
if self.low is not None:
self.weights = np.matrix(np.random.normal(0, 1, [self.d,1]))
self.weights[self.low:self.d] = 0
self.weights = self.weights/np.sqrt(self.weights.T*self.weights)
if seed is not None:
np.random.seed(seed)
self.t = 0
self.curr_x = None
self.features = None
self.all_features = []
self.curr_r = None
self.curr_x = None
def get_new_context(self):
## Generate random feature matrix and normalize.
if self.seed is not None:
np.random.seed((self.t+17)*(self.seed+1) + 37)
if self.pos:
self.features = np.matrix(np.random.dirichlet(1.0/self.d*np.ones(self.d), self.K))
else:
self.features = np.matrix(np.random.normal(0, 1, [self.K, self.d]))
self.features[0,:] = 0.05*self.features[0,:] + np.matrix(self.weights.T)
self.features = np.diag(1./np.sqrt(np.diag(self.features*self.features.T)))*self.features
# self.all_features.append(self.features)
self.curr_means = np.array((self.features*self.weights).T)[0]
if self.quad:
self.curr_means = self.curr_means**2
if self.noise and type(self.noise) == float:
self.noise_term = np.random.normal(0,self.noise)
self.curr_r = np.array(self.curr_means+self.noise_term)
elif self.noise:
self.noise_term = np.random.normal(0, 0.1)
self.curr_r = np.array(self.curr_means+self.noise_term)
else:
self.curr_r = np.array(self.curr_means)
old_t = self.t
self.t += 1
self.curr_x = LinearBandit.LinearContext(self.t, self.features)
return self.curr_x
def get_best_reward(self):
idx = np.argsort(self.curr_means)
return np.sum(self.curr_r[idx[len(idx)-self.L:len(idx)]])
def get_slate_reward(self, A):
return self.curr_r[A]
class SemiparametricBandit(SemibanditSim):
class LinearContext():
def __init__(self,name,features):
self.features = features
self.name = name
def get_ld_features(self):
return self.features
def get_K(self):
return self.features.shape[0]
def get_L(self):
return 1
def get_ld_dim(self):
return self.features.shape[1]
def get_name(self):
return self.name
def __init__(self, d, L, K, noise=False, seed=None, pos=False):
"""
A Linear semi-bandit simulator. Generates a random unit weight
vector upon initialization. At each round, random unit-normed
feature vectors are drawn for each action, and reward is (if
noise) a bernoulli with mean (1+x_a^Tw)/2 or just (1+x_a^Tw)/2.
The learner plays a slate of L actions and K actions are
available per round.
d is the dimension of the feature space
L actions per slate
K actions per context
noise = Add gaussian noise?
seed = random seed
pos = all vectors in positive orthant?
"""
self.d = d
self.L = L
self.K = K
self.N = None
self.X = None
self.noise = noise
self.seed = seed
self.pos = pos
if seed is not None:
np.random.seed(574)
if self.pos:
self.weights = np.matrix(np.random.dirichlet(np.ones(self.d))).T
self.weights = self.weights/np.linalg.norm(self.weights)
else:
self.weights = np.matrix(np.random.normal(0, 1, [self.d,1]))
self.weights = self.weights/np.sqrt(self.weights.T*self.weights)
if seed is not None:
np.random.seed(seed)
self.t = 0
self.curr_x = None
self.features = None
self.all_features = []
self.curr_r = None
self.curr_x = None
def get_new_context(self):
## Generate random feature matrix and normalize.
if self.seed is not None:
np.random.seed((self.t+17)*(self.seed+1) + 37)
if self.pos:
self.features = np.matrix(np.random.dirichlet(1.0/self.d*np.ones(self.d), self.K))
else:
self.features = np.matrix(np.random.normal(0, 1, [self.K, self.d]))
self.features = np.diag(1./np.sqrt(np.diag(self.features*self.features.T)))*self.features
self.all_features.append(self.features)
self.curr_means = np.array((self.features*self.weights).T)[0]
self.curr_offset = -1*np.max(self.curr_means)
## self.curr_offset = 0
## self.curr_offset = (self.features[0,:]*self.weights)**2
if self.noise and type(self.noise) == float:
self.noise_term = np.random.normal(0, self.noise)
self.curr_r = np.array(self.curr_means+self.curr_offset+self.noise_term)
elif self.noise:
self.noise_term = np.random.normal(0, 0.1)
self.curr_r = np.array(self.curr_means+self.curr_offset+self.noise_term)
else:
self.curr_r = np.array(self.curr_means+self.curr_offset)
## self.curr_r = np.array(self.curr_r.T)[0]
old_t = self.t
self.t += 1
self.curr_x = SemiparametricBandit.LinearContext(self.t, self.features)
return self.curr_x
def get_best_reward(self):
idx = np.argsort(self.curr_means)
return np.sum(self.curr_r[idx[len(idx)-self.L:len(idx)]])
def get_slate_reward(self, A):
return self.curr_r[A]
class MultiArmBandit(SemibanditSim):
def __init__(self, d, L, noise=False, seed=None):
self.d = d
self.L = L
self.K = d
self.N = None
self.X = None
self.noise = noise
self.seed = seed
if seed is not None:
np.random.seed(574)
self.weights = np.matrix(0.25*np.ones(self.d)).T
idx = np.random.choice(d)
self.weights[idx] += 0.5
# self.weights = np.matrix(np.random.dirichlet(self.d*np.ones(self.d))).T
# self.weights = self.weights/np.linalg.norm(self.weights)
if seed is not None:
np.random.seed(seed)
self.t = 0
self.curr_x = None
self.features = None
self.curr_r = None
self.curr_x = None
def get_new_context(self):
if self.seed is not None:
np.random.seed((self.t+17)*(self.seed+1) + 37)
self.features = np.matrix(np.eye(self.d))
self.curr_means = np.array((self.features*self.weights).T)[0]
if self.noise and type(self.noise) == float:
self.noise_term = np.random.normal(0,self.noise)
self.curr_r = np.array(self.curr_means+self.noise_term)
elif self.noise:
self.noise_term = np.random.normal(0, 0.1)
self.curr_r = np.array(self.curr_means+self.noise_term)
else:
self.curr_r = np.array(self.curr_means)
old_t = self.t
self.t += 1
self.curr_x = LinearBandit.LinearContext(self.t, self.features)
return self.curr_x
def get_best_reward(self):
idx = np.argsort(self.curr_means)
return np.sum(self.curr_r[idx[len(idx)-self.L:len(idx)]])
def get_slate_reward(self, A):
return self.curr_r[A]
class MSLRBandit(SemibanditSim):
"""
Currently deprecated
"""
def __init__(self, L, path_to_mslr="/home/akshay/Downloads/"):
self.L = L
self.curr_fold = 1
self.path = path_to_mslr
self.f = open(self.path+"Fold%d/train.txt" % (self.curr_fold), "r")
self.contexts = MSLRBandit.ContextIterator(self.f)
self.features = {}
self.Ks = {}
self.curr_x
self.curr_r
def get_new_context(self):
context = self.contexts.next()
if context == None:
return context
(query, features, relevances) = context
self.features[query] = features
self.Ks[query] = features.shape[0]
self.curr_x = query
self.curr_r = relevances
self.curr_k = self.Ks[query]
return query
class DatasetBandit(SemibanditSim):
DATASETS = {
'mq2008': ContextIterators.MQ2008ContextIterator,
'mq2008val': ContextIterators.MQ2008ValContextIterator,
'mq2007': ContextIterators.MQ2007ContextIterator,
'mq2007val': ContextIterators.MQ2007ValContextIterator,
'mslr': ContextIterators.MSLRContextIterator2,
'mslrsmall': ContextIterators.MSLRSmall,
'mslr30k': ContextIterators.MSLR30k,
'yahoo': ContextIterators.YahooContextIterator,
'xor': ContextIterators.XORContextIterator
}
def __init__(self, L=5, loop=False, dataset="xor", metric=Metrics.NDCG, structure="none", noise=0.1):
self.L = L
self.dataset = dataset
self.contexts = DatasetBandit.DATASETS[dataset](L=self.L,loop=loop)
self.K = self.contexts.K
self.d = self.contexts.d
self.has_ldf = self.contexts.has_ldf
self.structure = structure
self.noise_rate = noise
self.gaussian = True
self.seed = None
if self.structure == "cluster":
self.contexts.cluster_docs()
self.curr_x = None
self.curr_r = None
if metric == Metrics.NDCG:
assert 'get_all_relevances' in dir(self.contexts), "Cannot initialize metric"
self.metric = metric(self.L, self.contexts.get_all_relevances(), False)
elif metric == Metrics.NavigationalTTS:
assert 'get_all_relevances' in dir(self.contexts), "Cannot initialize metric"
self.metric = metric(self.L, 60, np.max(self.contexts.get_all_relevances())+1)
else:
self.metric = None
def set_policies(self, policies):
## First make sure all policies respect the L requirement
self.Pi = []
for pi in policies:
assert type(pi) == EnumerationPolicy, "cannot set to non EnumerationPolicy"
actions = pi.actions
new_actions = {}
for (k,v) in actions.items():
new_actions[k] = v[0:self.L]
self.Pi.append(EnumerationPolicy(new_actions))
## Compute Pistar
print("---- Evaluating All Policies ----")
Pistar = None
best_score = 0.0
for p in range(len(self.Pi)):
pi = self.Pi[p]
score = 0.0
for i in range(len(self.contexts.docsPerQuery)):
curr_x = Context.Context(i, self.contexts.features[i,:,:], self.contexts.docsPerQuery[i], self.L)
curr_r = self.contexts.relevances[i,:]
A = pi.get_action(curr_x)
if self.metric != None:
(val, clickeddocs, blah) = self.metric.computeMetric(curr_r[A], self.L, curr_x.get_name())
else:
val = np.sum(curr_r[A])
score += val
print("Policy %d Score %0.3f" % (p, score))
if Pistar == None or score >= best_score:
Pistar = p
best_score = score
print("---- Best Policy is %d ----" % (Pistar))
self.Pistar = Pistar
def set_best_policy(self, policy):
self.Pi = [policy]
self.Pistar = 0
def set_seed(self, i):
self.seed = i
def get_new_context(self):
tmp = self.contexts.next()
if tmp == None:
return tmp
self.curr_x = tmp[0]
if self.noise_rate == None:
self.curr_r = self.transform_reward(tmp[1])
else:
if self.seed is not None:
np.random.seed(self.curr_x.name+self.seed)
if self.gaussian:
self.curr_r = np.random.normal(self.transform_reward(tmp[1]), self.noise_rate)
else:
self.curr_r = np.random.binomial(1, self.transform_reward(tmp[1]))
self.clickeddocs = None
self.played = None
return self.curr_x
def get_slate_reward(self, A):
if self.metric != None:
self.played = A
(val, clicked_docs, dwell_times) = self.metric.computeMetric(self.curr_r[A], self.L, self.curr_x.get_name())
self.clicked_docs = clicked_docs
self.dwell_times = dwell_times
return val
else:
return np.sum(self.curr_r[A])
def get_base_rewards(self, A):
if self.metric != None:
assert self.played is not None and (A == self.played).all(), "Cannot call get_base_rewards before get_slate_rewards"
return self.clicked_docs ## *self.dwell_times
else:
return self.curr_r[A]
def offline_evaluate(self, policy, T=None, train=True):
score = 0.0
context_iter = DatasetBandit.DATASETS[self.dataset](train=train,L=self.L)
t = 0
while True:
if T is not None and t >= T:
return score/T
tmp = context_iter.next()
if tmp == None:
return score/t
(x,reward) = tmp
score += np.sum(self.transform_reward(reward[policy.get_action(x)]))
t += 1
def get_best_reward(self):
if "Pi" in dir(self):
if self.metric != None:
(val, clickeddocs, t) = self.metric.computeMetric(self.curr_r[self.Pi[self.Pistar].get_action(self.curr_x)], self.L, self.curr_x.get_name())
return val
else:
return np.sum(self.transform_reward(self.curr_r[self.Pi[self.Pistar].get_action(self.curr_x)]))
else:
idx = np.argsort(self.curr_r)[::-1][:self.L]
if self.metric != None:
val = self.metric.computeMetric(self.curr_r[idx], self.L, self.curr_x.get_name())[0]
return val
else:
return np.sum(self.transform_reward(self.curr_r[idx]))
def get_max_achievable(self, T=None):
ctx_iter = DatasetBandit.DATASETS[self.dataset](L=self.L, train=False, loop=False)
t = 0
score = 0.0
while True:
if T is not None and t >= T:
break
t += 1
tmp = ctx_iter.next()
if tmp == None:
break
reward = tmp[1]
idx = np.argsort(reward)
score += np.sum(self.transform_reward(reward[idx[len(reward)-self.L:len(reward)]]))
return score
def get_best_policy(self, T=None, learning_alg=None, classification=True):
if learning_alg == None:
return self.Pi[self.Pistar]
else:
## Prepare dataset
dataset = []
if T == None:
ctx_iter = DatasetBandit.DATASETS[self.dataset](L=self.L, train=True, loop=False)
else:
ctx_iter = DatasetBandit.DATASETS[self.dataset](L=self.L, train=True, loop=True)
t = 0
while True:
if T is not None and t >= T:
break
t += 1
tmp = ctx_iter.next()
if tmp == None:
break
dataset.append((tmp[0], self.transform_reward(tmp[1])))
if classification:
return Argmax.argmax(self, dataset, policy_type=ClassificationPolicy, learning_alg=learning_alg)
else:
return Argmax.argmax(self, dataset, policy_type=RegressionPolicy, learning_alg=learning_alg)
def transform_reward(self, r):
if self.noise_rate == None or self.gaussian:
return r
else:
return np.minimum((1.0+r)*self.noise_rate, 1)
class SemiSynthBandit(OrderedSBSim):
def __init__(self,L=5,loop=True,dataset="letter",N=100,metric="ndcg"):
self.one_pass = not loop
self.L = L
self.dataset = dataset
self.data = DatasetBandit.DATASETS[dataset](L=self.L)
self.K = self.data.K
self.d = self.data.d
self.has_ldf = self.data.has_ldf
self.X = self.data.X
self.contexts = []
for i in range(self.X):
self.contexts.append(Context.Context(i, np.zeros((1,1)), self.K, self.L))
self.r_feats = []
for i in range(self.X):
self.r_feats.append(self.data.get_r_feat(i))
if metric == "ndcg":
self.metric = Metrics.NDCG(self.L, self.data.relevances,False)
else:
self.metric = Metrics.SumRelevance(self.L)
self.N = N
tmp_policies = self.build_policies(N)
piMat = np.zeros((N,self.X,self.L),dtype=np.int)
for n in range(N):
for x in range(self.X):
act = tmp_policies[n].get_action(self.data.get_context(x))
piMat[n,x,:] = act
self.Pi = []
for i in range(N):
pi = EnumerationPolicy(dict(zip(range(self.X), [piMat[i,j,:] for j in range(self.X)])))
self.Pi.append(pi)
self.Pistar = self.get_best_policy()
self.curr_idx = 0
self.curr_x = None
self.curr_r = None
def build_policies(self,n):
c2 = DatasetBandit.DATASETS[self.dataset](L=1,loop=True)
Policies = []
for p in range(n):
X = np.zeros((100, c2.d))
r = np.zeros((100,))
for n in range(100):
(curr_x, curr_r) = c2.next()
a = np.random.choice(curr_x.get_K())
X[n,:] = curr_x.get_ld_features()[a,:]
r[n] = curr_r[a]
tree = sklearn.tree.DecisionTreeRegressor(max_depth=3)
tree.fit(X,r)
Policies.append(RegressionPolicy(tree))
return(Policies)
def get_best_policy(self):
scores = np.zeros(len(self.Pi))
for i in range(self.X):
self.curr_x = self.contexts[i]
self.curr_r = self.r_feats[i]
scores += np.array([self.get_slate_reward(pi.get_action(self.curr_x)) for pi in self.Pi])
return np.argmax(scores)
def get_new_context(self):
if self.one_pass:
self.curr_idx += 1
if self.curr_idx >= self.X:
return None
self.curr_x = self.contexts[self.curr_idx]
else:
self.curr_x = self.contexts[np.random.randint(0, self.X)]
self.curr_r = self.r_feats[self.curr_x.name]
return self.curr_x
def get_slate_reward(self, A):
## TODO: implement some metric here.
return self.metric.computeMetric(self.curr_r[A],self.curr_x.name)[0]
|
|
import datetime
import re
from flask import abort
from flask import current_app as app
from flask import redirect, request, session, url_for
from CTFd.cache import cache
from CTFd.constants.teams import TeamAttrs
from CTFd.constants.users import UserAttrs
from CTFd.models import Fails, Teams, Tracking, Users, db
from CTFd.utils import get_config
from CTFd.utils.security.auth import logout_user
from CTFd.utils.security.signing import hmac
def get_current_user():
if authed():
user = Users.query.filter_by(id=session["id"]).first()
# Check if the session is still valid
session_hash = session.get("hash")
if session_hash:
if session_hash != hmac(user.password):
logout_user()
if request.content_type == "application/json":
error = 401
else:
error = redirect(url_for("auth.login", next=request.full_path))
abort(error)
return user
else:
return None
def get_current_user_attrs():
if authed():
return get_user_attrs(user_id=session["id"])
else:
return None
@cache.memoize(timeout=300)
def get_user_attrs(user_id):
user = Users.query.filter_by(id=user_id).first()
if user:
d = {}
for field in UserAttrs._fields:
d[field] = getattr(user, field)
return UserAttrs(**d)
return None
@cache.memoize(timeout=300)
def get_user_place(user_id):
user = Users.query.filter_by(id=user_id).first()
if user:
return user.account.place
return None
@cache.memoize(timeout=300)
def get_user_score(user_id):
user = Users.query.filter_by(id=user_id).first()
if user:
return user.account.score
return None
@cache.memoize(timeout=300)
def get_team_place(team_id):
team = Teams.query.filter_by(id=team_id).first()
if team:
return team.place
return None
@cache.memoize(timeout=300)
def get_team_score(team_id):
team = Teams.query.filter_by(id=team_id).first()
if team:
return team.score
return None
def get_current_team():
if authed():
user = get_current_user()
return user.team
else:
return None
def get_current_team_attrs():
if authed():
user = get_user_attrs(user_id=session["id"])
if user.team_id:
return get_team_attrs(team_id=user.team_id)
return None
@cache.memoize(timeout=300)
def get_team_attrs(team_id):
team = Teams.query.filter_by(id=team_id).first()
if team:
d = {}
for field in TeamAttrs._fields:
d[field] = getattr(team, field)
return TeamAttrs(**d)
return None
def get_current_user_type(fallback=None):
if authed():
user = get_current_user_attrs()
return user.type
else:
return fallback
def authed():
return bool(session.get("id", False))
def is_admin():
if authed():
user = get_current_user_attrs()
return user.type == "admin"
else:
return False
def is_verified():
if get_config("verify_emails"):
user = get_current_user_attrs()
if user:
return user.verified
else:
return False
else:
return True
def get_ip(req=None):
""" Returns the IP address of the currently in scope request. The approach is to define a list of trusted proxies
(in this case the local network), and only trust the most recently defined untrusted IP address.
Taken from http://stackoverflow.com/a/22936947/4285524 but the generator there makes no sense.
The trusted_proxies regexes is taken from Ruby on Rails.
This has issues if the clients are also on the local network so you can remove proxies from config.py.
CTFd does not use IP address for anything besides cursory tracking of teams and it is ill-advised to do much
more than that if you do not know what you're doing.
"""
if req is None:
req = request
trusted_proxies = app.config["TRUSTED_PROXIES"]
combined = "(" + ")|(".join(trusted_proxies) + ")"
route = req.access_route + [req.remote_addr]
for addr in reversed(route):
if not re.match(combined, addr): # IP is not trusted but we trust the proxies
remote_addr = addr
break
else:
remote_addr = req.remote_addr
return remote_addr
def get_current_user_recent_ips():
if authed():
return get_user_recent_ips(user_id=session["id"])
else:
return None
@cache.memoize(timeout=300)
def get_user_recent_ips(user_id):
hour_ago = datetime.datetime.now() - datetime.timedelta(hours=1)
addrs = (
Tracking.query.with_entities(Tracking.ip.distinct())
.filter(Tracking.user_id == user_id, Tracking.date >= hour_ago)
.all()
)
return set([ip for (ip,) in addrs])
def get_wrong_submissions_per_minute(account_id):
"""
Get incorrect submissions per minute.
:param account_id:
:return:
"""
one_min_ago = datetime.datetime.utcnow() + datetime.timedelta(minutes=-1)
fails = (
db.session.query(Fails)
.filter(Fails.account_id == account_id, Fails.date >= one_min_ago)
.all()
)
return len(fails)
|
|
import numpy as np
import openmc
import pytest
def assert_infinite_bb(s):
ll, ur = (-s).bounding_box
assert np.all(np.isinf(ll))
assert np.all(np.isinf(ur))
ll, ur = (+s).bounding_box
assert np.all(np.isinf(ll))
assert np.all(np.isinf(ur))
def test_plane():
s = openmc.Plane(A=1, B=2, C=-1, D=3, name='my plane')
assert s.a == 1
assert s.b == 2
assert s.c == -1
assert s.d == 3
assert s.boundary_type == 'transmission'
assert s.name == 'my plane'
assert s.type == 'plane'
# Generic planes don't have well-defined bounding boxes
assert_infinite_bb(s)
# evaluate method
x, y, z = (4, 3, 6)
assert s.evaluate((x, y, z)) == pytest.approx(s.a*x + s.b*y + s.c*z - s.d)
# Make sure repr works
repr(s)
def test_xplane():
s = openmc.XPlane(x0=3., boundary_type='reflective')
assert s.x0 == 3.
assert s.boundary_type == 'reflective'
# Check bounding box
ll, ur = (+s).bounding_box
assert ll == pytest.approx((3., -np.inf, -np.inf))
assert np.all(np.isinf(ur))
ll, ur = (-s).bounding_box
assert ur == pytest.approx((3., np.inf, np.inf))
assert np.all(np.isinf(ll))
# __contains__ on associated half-spaces
assert (5, 0, 0) in +s
assert (5, 0, 0) not in -s
assert (-2, 1, 10) in -s
assert (-2, 1, 10) not in +s
# evaluate method
assert s.evaluate((5., 0., 0.)) == pytest.approx(2.)
# Make sure repr works
repr(s)
def test_yplane():
s = openmc.YPlane(y0=3.)
assert s.y0 == 3.
# Check bounding box
ll, ur = (+s).bounding_box
assert ll == pytest.approx((-np.inf, 3., -np.inf))
assert np.all(np.isinf(ur))
ll, ur = s.bounding_box('-')
assert ur == pytest.approx((np.inf, 3., np.inf))
assert np.all(np.isinf(ll))
# __contains__ on associated half-spaces
assert (0, 5, 0) in +s
assert (0, 5, 0) not in -s
assert (-2, 1, 10) in -s
assert (-2, 1, 10) not in +s
# evaluate method
assert s.evaluate((0., 0., 0.)) == pytest.approx(-3.)
def test_zplane():
s = openmc.ZPlane(z0=3.)
assert s.z0 == 3.
# Check bounding box
ll, ur = (+s).bounding_box
assert ll == pytest.approx((-np.inf, -np.inf, 3.))
assert np.all(np.isinf(ur))
ll, ur = (-s).bounding_box
assert ur == pytest.approx((np.inf, np.inf, 3.))
assert np.all(np.isinf(ll))
# __contains__ on associated half-spaces
assert (0, 0, 5) in +s
assert (0, 0, 5) not in -s
assert (-2, 1, -10) in -s
assert (-2, 1, -10) not in +s
# evaluate method
assert s.evaluate((0., 0., 10.)) == pytest.approx(7.)
# Make sure repr works
repr(s)
def test_xcylinder():
y, z, r = 3, 5, 2
s = openmc.XCylinder(y0=y, z0=z, R=r)
assert s.y0 == y
assert s.z0 == z
assert s.r == r
# Check bounding box
ll, ur = (+s).bounding_box
assert np.all(np.isinf(ll))
assert np.all(np.isinf(ur))
ll, ur = (-s).bounding_box
assert ll == pytest.approx((-np.inf, y-r, z-r))
assert ur == pytest.approx((np.inf, y+r, z+r))
# evaluate method
assert s.evaluate((0, y, z)) == pytest.approx(-r**2)
# Make sure repr works
repr(s)
def test_periodic():
x = openmc.XPlane(boundary_type='periodic')
y = openmc.YPlane(boundary_type='periodic')
x.periodic_surface = y
assert y.periodic_surface == x
with pytest.raises(TypeError):
x.periodic_surface = openmc.Sphere()
def test_ycylinder():
x, z, r = 3, 5, 2
s = openmc.YCylinder(x0=x, z0=z, R=r)
assert s.x0 == x
assert s.z0 == z
assert s.r == r
# Check bounding box
ll, ur = (+s).bounding_box
assert np.all(np.isinf(ll))
assert np.all(np.isinf(ur))
ll, ur = (-s).bounding_box
assert ll == pytest.approx((x-r, -np.inf, z-r))
assert ur == pytest.approx((x+r, np.inf, z+r))
# evaluate method
assert s.evaluate((x, 0, z)) == pytest.approx(-r**2)
def test_zcylinder():
x, y, r = 3, 5, 2
s = openmc.ZCylinder(x0=x, y0=y, R=r)
assert s.x0 == x
assert s.y0 == y
assert s.r == r
# Check bounding box
ll, ur = (+s).bounding_box
assert np.all(np.isinf(ll))
assert np.all(np.isinf(ur))
ll, ur = (-s).bounding_box
assert ll == pytest.approx((x-r, y-r, -np.inf))
assert ur == pytest.approx((x+r, y+r, np.inf))
# evaluate method
assert s.evaluate((x, y, 0)) == pytest.approx(-r**2)
# Make sure repr works
repr(s)
def test_sphere():
x, y, z, r = -3, 5, 6, 2
s = openmc.Sphere(x0=x, y0=y, z0=z, R=r)
assert s.x0 == x
assert s.y0 == y
assert s.z0 == z
assert s.r == r
# Check bounding box
ll, ur = (+s).bounding_box
assert np.all(np.isinf(ll))
assert np.all(np.isinf(ur))
ll, ur = (-s).bounding_box
assert ll == pytest.approx((x-r, y-r, z-r))
assert ur == pytest.approx((x+r, y+r, z+r))
# evaluate method
assert s.evaluate((x, y, z)) == pytest.approx(-r**2)
# Make sure repr works
repr(s)
def cone_common(apex, r2, cls):
x, y, z = apex
s = cls(x0=x, y0=y, z0=z, R2=r2)
assert s.x0 == x
assert s.y0 == y
assert s.z0 == z
assert s.r2 == r2
# Check bounding box
assert_infinite_bb(s)
# evaluate method -- should be zero at apex
assert s.evaluate((x, y, z)) == pytest.approx(0.0)
# Make sure repr works
repr(s)
def test_xcone():
apex = (10, 0, 0)
r2 = 4
cone_common(apex, r2, openmc.XCone)
def test_ycone():
apex = (10, 0, 0)
r2 = 4
cone_common(apex, r2, openmc.YCone)
def test_zcone():
apex = (10, 0, 0)
r2 = 4
cone_common(apex, r2, openmc.ZCone)
def test_quadric():
# Make a sphere from a quadric
r = 10.0
coeffs = {'a': 1, 'b': 1, 'c': 1, 'k': -r**2}
s = openmc.Quadric(**coeffs)
assert s.a == coeffs['a']
assert s.b == coeffs['b']
assert s.c == coeffs['c']
assert s.k == coeffs['k']
# All other coeffs should be zero
for coeff in ('d', 'e', 'f', 'g', 'h', 'j'):
assert getattr(s, coeff) == 0.0
# Check bounding box
assert_infinite_bb(s)
# evaluate method
assert s.evaluate((0., 0., 0.)) == pytest.approx(coeffs['k'])
assert s.evaluate((1., 1., 1.)) == pytest.approx(3 + coeffs['k'])
|
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import re
from dataclasses import dataclass
from typing import Any, Dict, Iterable, Optional, Tuple, Union, cast
from pants.base.exceptions import DuplicateNameError, MappingError, UnaddressableObjectError
from pants.build_graph.address import Address, BuildFileAddress
from pants.engine.internals.objects import Serializable
from pants.engine.internals.parser import BuildFilePreludeSymbols, Parser
from pants.option.global_options import BuildFileImportsBehavior
from pants.util.memo import memoized_property
from pants.util.meta import frozen_after_init
ThinAddressableObject = Union[Serializable, Any]
@dataclass(frozen=True)
class AddressMap:
"""Maps addressable Serializable objects from a byte source.
To construct an AddressMap, use `parse`.
:param path: The path to the byte source this address map's objects were passed from.
:param objects_by_name: A dict mapping from object name to the parsed 'thin' addressable object.
"""
path: str
objects_by_name: Dict[str, ThinAddressableObject]
@classmethod
def parse(
cls,
filepath: str,
filecontent: bytes,
parser: Parser,
extra_symbols: BuildFilePreludeSymbols,
) -> "AddressMap":
"""Parses a source for addressable Serializable objects.
No matter the parser used, the parsed and mapped addressable objects are all 'thin'; ie: any
objects they point to in other namespaces or even in the same namespace but from a separate
source are left as unresolved pointers.
:param filepath: The path to the byte source containing serialized objects.
:param filecontent: The content of byte source containing serialized objects to be parsed.
:param parser: The parser cls to use.
"""
try:
objects = parser.parse(filepath, filecontent, extra_symbols)
except Exception as e:
raise MappingError(f"Failed to parse {filepath}:\n{e!r}")
objects_by_name: Dict[str, ThinAddressableObject] = {}
for obj in objects:
if not Serializable.is_serializable(obj):
raise UnaddressableObjectError("Parsed a non-serializable object: {!r}".format(obj))
attributes = obj._asdict()
name = attributes.get("name")
if not name:
raise UnaddressableObjectError("Parsed a non-addressable object: {!r}".format(obj))
if name in objects_by_name:
raise DuplicateNameError(
"An object already exists at {!r} with name {!r}: {!r}. Cannot "
"map {!r}".format(filepath, name, objects_by_name[name], obj)
)
objects_by_name[name] = obj
return cls(filepath, dict(sorted(objects_by_name.items())))
class DifferingFamiliesError(MappingError):
"""Indicates an attempt was made to merge address maps from different families together."""
@dataclass(frozen=True)
class AddressFamily:
"""Represents the family of addressed objects in a namespace.
To create an AddressFamily, use `create`.
An address family can be composed of the addressed objects from zero or more underlying address
sources. An "empty" AddressFamily is legal, and is the result when there are not build files in a
particular namespace.
:param namespace: The namespace path of this address family.
:param objects_by_name: A dict mapping from object name to the parsed 'thin' addressable object.
"""
namespace: str
objects_by_name: Dict[str, Tuple[str, ThinAddressableObject]]
@classmethod
def create(cls, spec_path: str, address_maps: Iterable[AddressMap]) -> "AddressFamily":
"""Creates an address family from the given set of address maps.
:param spec_path: The directory prefix shared by all address_maps.
:param address_maps: The family of maps that form this namespace.
:raises: :class:`MappingError` if the given address maps do not form a family.
"""
if spec_path == ".":
spec_path = ""
for address_map in address_maps:
if not address_map.path.startswith(spec_path):
raise DifferingFamiliesError(
"Expected AddressMaps to share the same parent directory {}, "
"but received: {}".format(spec_path, address_map.path)
)
objects_by_name: Dict[str, Tuple[str, ThinAddressableObject]] = {}
for address_map in address_maps:
current_path = address_map.path
for name, obj in address_map.objects_by_name.items():
previous = objects_by_name.get(name)
if previous:
previous_path, _ = previous
raise DuplicateNameError(
"An object with name {name!r} is already defined in "
"{previous_path!r}, will not overwrite with {obj!r} from "
"{current_path!r}.".format(
name=name,
previous_path=previous_path,
obj=obj,
current_path=current_path,
)
)
objects_by_name[name] = (current_path, obj)
return AddressFamily(
namespace=spec_path,
objects_by_name={
name: (path, obj) for name, (path, obj) in sorted(objects_by_name.items())
},
)
@memoized_property
def addressables(self) -> Dict[BuildFileAddress, ThinAddressableObject]:
"""Return a mapping from BuildFileAddress to thin addressable objects in this namespace.
:rtype: dict from `BuildFileAddress` to thin addressable objects.
"""
return {
BuildFileAddress(rel_path=path, target_name=name): obj
for name, (path, obj) in self.objects_by_name.items()
}
@property
def addressables_as_address_keyed(self) -> Dict[Address, ThinAddressableObject]:
"""Identical to `addresses`, but with a `cast` to allow for type safe lookup of `Address`es.
:rtype: dict from `Address` to thin addressable objects.
"""
return cast(Dict[Address, ThinAddressableObject], self.addressables)
def __hash__(self):
return hash(self.namespace)
def __repr__(self):
return "AddressFamily(namespace={!r}, objects_by_name={!r})".format(
self.namespace, list(self.objects_by_name.keys())
)
@frozen_after_init
@dataclass(unsafe_hash=True)
class AddressMapper:
"""Configuration to parse build files matching a filename pattern."""
parser: Parser
prelude_glob_patterns: Tuple[str, ...]
build_file_imports_behavior: BuildFileImportsBehavior
build_patterns: Tuple[str, ...]
build_ignore_patterns: Tuple[str, ...]
exclude_target_regexps: Tuple[str, ...]
subproject_roots: Tuple[str, ...]
def __init__(
self,
parser: Parser,
prelude_glob_patterns,
build_file_imports_behavior,
build_patterns: Optional[Iterable[str]] = None,
build_ignore_patterns: Optional[Iterable[str]] = None,
exclude_target_regexps: Optional[Iterable[str]] = None,
subproject_roots: Optional[Iterable[str]] = None,
) -> None:
"""Create an AddressMapper.
Both the set of files that define a mappable BUILD files and the parser used to parse those
files can be customized. See the `pants.engine.parsers` module for example parsers.
:param parser: The BUILD file parser to use.
:param build_patterns: A tuple of fnmatch-compatible patterns for identifying BUILD files
used to resolve addresses.
:param build_ignore_patterns: A list of path ignore patterns used when searching for BUILD files.
:param exclude_target_regexps: A list of regular expressions for excluding targets.
"""
self.parser = parser
self.prelude_glob_patterns = prelude_glob_patterns
self.build_file_imports_behavior = build_file_imports_behavior
self.build_patterns = tuple(build_patterns or ["BUILD", "BUILD.*"])
self.build_ignore_patterns = tuple(build_ignore_patterns or [])
self.exclude_target_regexps = tuple(exclude_target_regexps or [])
self.subproject_roots = tuple(subproject_roots or [])
def __repr__(self):
return "AddressMapper(parser={}, build_patterns={})".format(
self.parser, self.build_patterns
)
@memoized_property
def exclude_patterns(self):
return tuple(re.compile(pattern) for pattern in self.exclude_target_regexps)
|
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.test
@file marine-integrations/mi/dataset/parser/test/test_dosta_ln_wfp_sio.py
@author Christopher Fortin
@brief Test code for a dosta_ln_wfp_sio data parser
"""
import os
import struct
import ntplib
from nose.plugins.attrib import attr
from mi.core.exceptions import UnexpectedDataException
from mi.core.log import get_logger
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.dosta_ln.wfp_sio.resource import RESOURCE_PATH
from mi.dataset.parser.dosta_ln_wfp_sio import DostaLnWfpSioParser, DostaLnWfpSioDataParticle
from mi.dataset.test.test_parser import ParserUnitTestCase
log = get_logger()
@attr('UNIT', group='mi')
class DostaLnWfpSioParserUnitTestCase(ParserUnitTestCase):
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.dosta_ln_wfp_sio',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'DostaLnWfpSioDataParticle'
}
# the hex characters used to create the expected particles below were extracted
# from the first 4 E records in the file node58p1_0.we_wfp.dat by hand
# and used here to verify the correct raw data was used to create the particles
self.timestamp_1a = self.timestamp_to_ntp(b'\x52\x04\xCC\x2D')
self.particle_1a = DostaLnWfpSioDataParticle(
b'\x52\x04\xCC\x2D\x00\x00\x00\x00\x41\x3B\x6F\xD2\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x37\x00\x60\x02\x3E',
internal_timestamp=self.timestamp_1a)
self.timestamp_1b = self.timestamp_to_ntp(b'\x52\x04\xCD\x70')
self.particle_1b = DostaLnWfpSioDataParticle(
b'\x52\x04\xCD\x70\x43\x66\x2F\x90\x41\x32\xDE\x01\x45\x7D\xA7'
b'\x85\x43\x13\x9F\x7D\x3F\xBF\xBE\x77\x00\x37\x00\x61\x02\x3C',
internal_timestamp=self.timestamp_1b)
self.timestamp_1c = self.timestamp_to_ntp(b'\x52\x04\xCE\xB0')
self.particle_1c = DostaLnWfpSioDataParticle(
b'\x52\x04\xCE\xB0\x43\x6D\xEA\x30\x41\x2F\xE5\xC9\x45\x78\x56'
b'\x66\x43\x12\x94\x39\x3F\xBF\x9D\xB2\x00\x37\x00\x73\x02\x3B',
internal_timestamp=self.timestamp_1c)
self.timestamp_1d = self.timestamp_to_ntp(b'\x52\x04\xCF\xF0')
self.particle_1d = DostaLnWfpSioDataParticle(
b'\x52\x04\xCF\xF0\x43\x6E\x7C\x78\x41\x2E\xF4\xF1\x45\x73\x1B'
b'\x0A\x43\x11\x9F\x7D\x3F\xBF\x7C\xEE\x00\x37\x00\x5E\x02\x3B',
internal_timestamp=self.timestamp_1d)
def timestamp_to_ntp(self, hex_timestamp):
fields = struct.unpack('>I', hex_timestamp)
timestamp = float(fields[0])
return ntplib.system_to_ntp_time(timestamp)
def test_simple(self):
"""
Read test data from the file and pull out data particles one at a time.
Assert that the results are those we expected.
This test only verifies the raw data in the particle is correct
"""
log.debug('-Starting test_simple')
self.stream_handle = open(os.path.join(RESOURCE_PATH,
'node58p1_0.we_wfp.dat'))
self.parser = DostaLnWfpSioParser(self.config, self.stream_handle, self.exception_callback)
result = self.parser.get_records(1)
self.assertEqual(result, [self.particle_1a])
result = self.parser.get_records(1)
self.assertEqual(result, [self.particle_1b])
result = self.parser.get_records(1)
self.assertEqual(result, [self.particle_1c])
result = self.parser.get_records(1)
self.assertEqual(result, [self.particle_1d])
self.assertEquals(self.exception_callback_value, [])
self.stream_handle.close()
def test_get_many(self):
"""
Read test data from the file and pull out multiple data particles at one time.
Assert that the results are those we expected.
"""
log.debug('--Starting test_get_many')
self.stream_handle = open(os.path.join(RESOURCE_PATH,
'node58p1_0.we_wfp.dat'))
self.parser = DostaLnWfpSioParser(self.config, self.stream_handle, self.exception_callback)
result = self.parser.get_records(4)
self.assertEqual(result,
[self.particle_1a, self.particle_1b, self.particle_1c, self.particle_1d])
self.assertEquals(self.exception_callback_value, [])
self.stream_handle.close()
def test_long_stream(self):
"""
Test a long stream
"""
log.debug('--Starting test_long_stream')
self.stream_handle = open(os.path.join(RESOURCE_PATH,
'node58p1_0.we_wfp.dat'))
self.stream_handle.seek(0)
self.parser = DostaLnWfpSioParser(self.config, self.stream_handle, self.exception_callback)
result = self.parser.get_records(100)
self.assert_particles(result, 'node58p1_0.we_wfp.yml', RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
self.stream_handle.close()
def test_bad_data(self):
"""
Ensure that the bad record ( in this case a currupt status message ) causes a sample exception
"""
self.stream_handle = open(os.path.join(RESOURCE_PATH, 'node58p1_BADFLAGS.dat'))
log.debug('--Starting test_bad_data')
self.parser = DostaLnWfpSioParser(self.config, self.stream_handle, self.exception_callback)
self.parser.get_records(1)
self.assert_(isinstance(self.exception_callback_value[0], UnexpectedDataException))
def test_bad_e_record(self):
"""
Ensure that the bad record causes a sample exception. The file 'bad_e_record.dat'
includes a record containing one byte less than the expected 30 for the
dosta_ln_wfp_sio. The 'Number of Data Bytes' and the 'CRC Checksum' values in the
SIO Mule header have been modified accordingly.
"""
self.stream_handle = open(os.path.join(RESOURCE_PATH, 'bad_e_record.dat'))
self.parser = DostaLnWfpSioParser(self.config, self.stream_handle, self.exception_callback)
self.parser.get_records(1)
self.assert_(isinstance(self.exception_callback_value[0], UnexpectedDataException))
def particle_to_yml(self, particles, filename, mode='w'):
"""
This is added as a testing helper, not actually as part of the parser tests. Since the same particles
will be used for the driver test it is helpful to write them to .yml in the same form they need in the
results.yml fids here.
"""
# open write append, if you want to start from scratch manually delete this fid
fid = open(os.path.join(RESOURCE_PATH, filename), mode)
fid.write('header:\n')
fid.write(" particle_object: 'MULTIPLE'\n")
fid.write(" particle_type: 'MULTIPLE'\n")
fid.write('data:\n')
for i in range(0, len(particles)):
particle_dict = particles[i].generate_dict()
fid.write(' - _index: %d\n' % (i+1))
fid.write(' particle_object: %s\n' % particles[i].__class__.__name__)
fid.write(' particle_type: %s\n' % particle_dict.get('stream_name'))
fid.write(' internal_timestamp: %f\n' % particle_dict.get('internal_timestamp'))
for val in particle_dict.get('values'):
if isinstance(val.get('value'), float):
fid.write(' %s: %16.5f\n' % (val.get('value_id'), val.get('value')))
else:
fid.write(' %s: %s\n' % (val.get('value_id'), val.get('value')))
fid.close()
def create_yml(self):
"""
This utility creates a yml file
"""
#ADCP_data_20130702.PD0 has one record in it
fid = open(os.path.join(RESOURCE_PATH, 'node58p1_0.we_wfp.dat'), 'rb')
stream_handle = fid
parser = DostaLnWfpSioParser(self.config, stream_handle,
self.exception_callback)
particles = parser.get_records(100)
self.particle_to_yml(particles, 'node58p1_0.we_wfp.yml')
fid.close()
|
|
# coding: utf-8
from __future__ import division, unicode_literals, print_function
"""
This module implements plotter for DOS and band structure.
"""
__author__ = "Shyue Ping Ong, Geoffroy Hautier"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "May 1, 2012"
import logging
import math
import itertools
from collections import OrderedDict
import numpy as np
from monty.json import jsanitize
from pymatgen.electronic_structure.core import Spin
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine
logger = logging.getLogger('BSPlotter')
class DosPlotter(object):
"""
Class for plotting DOSs. Note that the interface is extremely flexible
given that there are many different ways in which people want to view
DOS. The typical usage is::
# Initializes plotter with some optional args. Defaults are usually
# fine,
plotter = DosPlotter()
# Adds a DOS with a label.
plotter.add_dos("Total DOS", dos)
# Alternatively, you can add a dict of DOSs. This is the typical
# form returned by CompleteDos.get_spd/element/others_dos().
plotter.add_dos_dict({"dos1": dos1, "dos2": dos2})
plotter.add_dos_dict(complete_dos.get_spd_dos())
Args:
zero_at_efermi: Whether to shift all Dos to have zero energy at the
fermi energy. Defaults to True.
stack: Whether to plot the DOS as a stacked area graph
key_sort_func: function used to sort the dos_dict keys.
sigma: A float specifying a standard deviation for Gaussian smearing
the DOS for nicer looking plots. Defaults to None for no
smearing.
"""
def __init__(self, zero_at_efermi=True, stack=False, sigma=None):
self.zero_at_efermi = zero_at_efermi
self.stack = stack
self.sigma = sigma
self._doses = OrderedDict()
def add_dos(self, label, dos):
"""
Adds a dos for plotting.
Args:
label:
label for the DOS. Must be unique.
dos:
Dos object
"""
energies = dos.energies - dos.efermi if self.zero_at_efermi \
else dos.energies
densities = dos.get_smeared_densities(self.sigma) if self.sigma \
else dos.densities
efermi = dos.efermi
self._doses[label] = {'energies': energies, 'densities': densities,
'efermi': efermi}
def add_dos_dict(self, dos_dict, key_sort_func=None):
"""
Add a dictionary of doses, with an optional sorting function for the
keys.
Args:
dos_dict: dict of {label: Dos}
key_sort_func: function used to sort the dos_dict keys.
"""
if key_sort_func:
keys = sorted(dos_dict.keys(), key=key_sort_func)
else:
keys = dos_dict.keys()
for label in keys:
self.add_dos(label, dos_dict[label])
def get_dos_dict(self):
"""
Returns the added doses as a json-serializable dict. Note that if you
have specified smearing for the DOS plot, the densities returned will
be the smeared densities, not the original densities.
Returns:
Dict of dos data. Generally of the form, {label: {'energies':..,
'densities': {'up':...}, 'efermi':efermi}}
"""
return jsanitize(self._doses)
def get_plot(self, xlim=None, ylim=None):
"""
Get a matplotlib plot showing the DOS.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
import prettyplotlib as ppl
from prettyplotlib import brewer2mpl
from pymatgen.util.plotting_utils import get_publication_quality_plot
ncolors = max(3, len(self._doses))
ncolors = min(9, ncolors)
colors = brewer2mpl.get_map('Set1', 'qualitative', ncolors).mpl_colors
y = None
alldensities = []
allenergies = []
plt = get_publication_quality_plot(12, 8)
# Note that this complicated processing of energies is to allow for
# stacked plots in matplotlib.
for key, dos in self._doses.items():
energies = dos['energies']
densities = dos['densities']
if not y:
y = {Spin.up: np.zeros(energies.shape),
Spin.down: np.zeros(energies.shape)}
newdens = {}
for spin in [Spin.up, Spin.down]:
if spin in densities:
if self.stack:
y[spin] += densities[spin]
newdens[spin] = y[spin].copy()
else:
newdens[spin] = densities[spin]
allenergies.append(energies)
alldensities.append(newdens)
keys = list(self._doses.keys())
keys.reverse()
alldensities.reverse()
allenergies.reverse()
allpts = []
for i, key in enumerate(keys):
x = []
y = []
for spin in [Spin.up, Spin.down]:
if spin in alldensities[i]:
densities = list(int(spin) * alldensities[i][spin])
energies = list(allenergies[i])
if spin == Spin.down:
energies.reverse()
densities.reverse()
x.extend(energies)
y.extend(densities)
allpts.extend(list(zip(x, y)))
if self.stack:
plt.fill(x, y, color=colors[i % ncolors],
label=str(key))
else:
ppl.plot(x, y, color=colors[i % ncolors],
label=str(key),linewidth=3)
if not self.zero_at_efermi:
ylim = plt.ylim()
ppl.plot([self._doses[key]['efermi'],
self._doses[key]['efermi']], ylim,
color=colors[i % ncolors],
linestyle='--', linewidth=2)
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
else:
xlim = plt.xlim()
relevanty = [p[1] for p in allpts
if xlim[0] < p[0] < xlim[1]]
plt.ylim((min(relevanty), max(relevanty)))
if self.zero_at_efermi:
ylim = plt.ylim()
plt.plot([0, 0], ylim, 'k--', linewidth=2)
plt.xlabel('Energies (eV)')
plt.ylabel('Density of states')
plt.legend()
leg = plt.gca().get_legend()
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize=30)
plt.tight_layout()
return plt
def save_plot(self, filename, img_format="eps", xlim=None, ylim=None):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(xlim, ylim)
plt.savefig(filename, format=img_format)
def show(self, xlim=None, ylim=None):
"""
Show the plot using matplotlib.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(xlim, ylim)
plt.show()
class BSPlotter(object):
"""
Class to plot or get data to facilitate the plot of band structure objects.
Args:
bs: A BandStructureSymmLine object.
"""
def __init__(self, bs):
if not isinstance(bs, BandStructureSymmLine):
raise ValueError(
"BSPlotter only works with BandStructureSymmLine objects. "
"A BandStructure object (on a uniform grid for instance and "
"not along symmetry lines won't work)")
self._bs = bs
#TODO: come with an intelligent way to cut the highest unconverged bands
self._nb_bands = self._bs._nb_bands
def _maketicks(self, plt):
"""
utility private method to add ticks to a band structure
"""
ticks = self.get_ticks()
#Sanitize only plot the uniq values
uniq_d = []
uniq_l = []
temp_ticks = list(zip(ticks['distance'], ticks['label']))
for i in range(len(temp_ticks)):
if i == 0:
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
else:
if temp_ticks[i][1] == temp_ticks[i - 1][1]:
logger.debug("Skipping label {i}".format(
i=temp_ticks[i][1]))
else:
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Unique labels are %s" % list(zip(uniq_d, uniq_l)))
plt.gca().set_xticks(uniq_d)
plt.gca().set_xticklabels(uniq_l)
for i in range(len(ticks['label'])):
if ticks['label'][i] is not None:
# don't print the same label twice
if i != 0:
if ticks['label'][i] == ticks['label'][i - 1]:
logger.debug("already print label... "
"skipping label {i}".format(
i=ticks['label'][i]))
else:
logger.debug("Adding a line at {d}"
" for label {l}".format(
d=ticks['distance'][i], l=ticks['label'][i]))
plt.axvline(ticks['distance'][i], color='k')
else:
logger.debug("Adding a line at {d} for label {l}".format(
d=ticks['distance'][i], l=ticks['label'][i]))
plt.axvline(ticks['distance'][i], color='k')
return plt
def bs_plot_data(self, zero_to_efermi=True):
"""
Get the data nicely formatted for a plot
Args:
zero_to_efermi: Automatically subtract off the Fermi energy from the
eigenvalues and plot.
Returns:
A dict of the following format:
ticks: A dict with the 'distances' at which there is a kpoint (the
x axis) and the labels (None if no label)
energy: A dict storing bands for spin up and spin down data
[{Spin:[band_index][k_point_index]}] as a list (one element
for each branch) of energy for each kpoint. The data is
stored by branch to facilitate the plotting
vbm: A list of tuples (distance,energy) marking the vbms. The
energies are shifted with respect to the fermi level is the
option has been selected.
cbm: A list of tuples (distance,energy) marking the cbms. The
energies are shifted with respect to the fermi level is the
option has been selected.
lattice: The reciprocal lattice.
zero_energy: This is the energy used as zero for the plot.
band_gap:A string indicating the band gap and its nature (empty if
it's a metal).
is_metal: True if the band structure is metallic (i.e., there is at
least one band crossing the fermi level).
"""
distance = []
energy = []
if self._bs.is_metal():
zero_energy = self._bs.efermi
else:
zero_energy = self._bs.get_vbm()['energy']
if not zero_to_efermi:
zero_energy = 0.0
for b in self._bs._branches:
if self._bs.is_spin_polarized:
energy.append({str(Spin.up): [], str(Spin.down): []})
else:
energy.append({str(Spin.up): []})
distance.append([self._bs._distance[j]
for j in range(b['start_index'],
b['end_index']+1)])
ticks = self.get_ticks()
for i in range(self._nb_bands):
energy[-1][str(Spin.up)].append(
[self._bs._bands[Spin.up][i][j] - zero_energy
for j in range(b['start_index'], b['end_index']+1)])
if self._bs.is_spin_polarized:
for i in range(self._nb_bands):
energy[-1][str(Spin.down)].append(
[self._bs._bands[Spin.down][i][j] - zero_energy
for j in range(b['start_index'], b['end_index']+1)])
vbm = self._bs.get_vbm()
cbm = self._bs.get_cbm()
vbm_plot = []
cbm_plot = []
for index in cbm['kpoint_index']:
cbm_plot.append((self._bs._distance[index],
cbm['energy'] - zero_energy if zero_to_efermi
else cbm['energy']))
for index in vbm['kpoint_index']:
vbm_plot.append((self._bs._distance[index],
vbm['energy'] - zero_energy if zero_to_efermi
else vbm['energy']))
bg = self._bs.get_band_gap()
direct = "Indirect"
if bg['direct']:
direct = "Direct"
return {'ticks': ticks, 'distances': distance, 'energy': energy,
'vbm': vbm_plot, 'cbm': cbm_plot,
'lattice': self._bs._lattice_rec.as_dict(),
'zero_energy': zero_energy, 'is_metal': self._bs.is_metal(),
'band_gap': "{} {} bandgap = {}".format(direct,
bg['transition'],
bg['energy'])
if not self._bs.is_metal() else ""}
def get_plot(self, zero_to_efermi=True, ylim=None, smooth=False):
"""
get a matplotlib object for the bandstructure plot.
Blue lines are up spin, red lines are down
spin.
Args:
zero_to_efermi: Automatically subtract off the Fermi energy from
the eigenvalues and plot (E-Ef).
ylim: Specify the y-axis (energy) limits; by default None let
the code choose. It is vbm-4 and cbm+4 if insulator
efermi-10 and efermi+10 if metal
smooth: interpolates the bands by a spline cubic
"""
from pymatgen.util.plotting_utils import get_publication_quality_plot
plt = get_publication_quality_plot(12, 8)
from matplotlib import rc
import scipy.interpolate as scint
rc('text', usetex=True)
#main internal config options
e_min = -4
e_max = 4
if self._bs.is_metal():
e_min = -10
e_max = 10
band_linewidth = 3
data = self.bs_plot_data(zero_to_efermi)
if not smooth:
for d in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][d],
[data['energy'][d][str(Spin.up)][i][j]
for j in range(len(data['distances'][d]))], 'b-',
linewidth=band_linewidth)
if self._bs.is_spin_polarized:
plt.plot(data['distances'][d],
[data['energy'][d][str(Spin.down)][i][j]
for j in range(len(data['distances'][d]))],
'r--', linewidth=band_linewidth)
else:
for d in range(len(data['distances'])):
for i in range(self._nb_bands):
tck = scint.splrep(
data['distances'][d],
[data['energy'][d][str(Spin.up)][i][j]
for j in range(len(data['distances'][d]))])
step = (data['distances'][d][-1]
- data['distances'][d][0]) / 1000
plt.plot([x * step+data['distances'][d][0]
for x in range(1000)],
[scint.splev(x * step+data['distances'][d][0],
tck, der=0)
for x in range(1000)], 'b-',
linewidth=band_linewidth)
if self._bs.is_spin_polarized:
tck = scint.splrep(
data['distances'][d],
[data['energy'][d][str(Spin.down)][i][j]
for j in range(len(data['distances'][d]))])
step = (data['distances'][d][-1]
- data['distances'][d][0]) / 1000
plt.plot([x * step+data['distances'][d][0]
for x in range(1000)],
[scint.splev(x * step+data['distances'][d][0],
tck, der=0)
for x in range(1000)], 'r--',
linewidth=band_linewidth)
self._maketicks(plt)
#Main X and Y Labels
plt.xlabel(r'$\mathrm{Wave\ Vector}$', fontsize=30)
ylabel = r'$\mathrm{E\ -\ E_f\ (eV)}$' if zero_to_efermi \
else r'$\mathrm{Energy\ (eV)}$'
plt.ylabel(ylabel, fontsize=30)
# Draw Fermi energy, only if not the zero
if not zero_to_efermi:
ef = self._bs.efermi
plt.axhline(ef, linewidth=2, color='k')
# X range (K)
#last distance point
x_max = data['distances'][-1][-1]
plt.xlim(0, x_max)
if ylim is None:
if self._bs.is_metal():
# Plot A Metal
if zero_to_efermi:
plt.ylim(e_min, e_max)
else:
plt.ylim(self._bs.efermi + e_min, self._bs._efermi + e_max)
else:
for cbm in data['cbm']:
plt.scatter(cbm[0], cbm[1], color='r', marker='o', s=100)
for vbm in data['vbm']:
plt.scatter(vbm[0], vbm[1], color='g', marker='o', s=100)
plt.ylim(data['vbm'][0][1] + e_min, data['cbm'][0][1] + e_max)
else:
plt.ylim(ylim)
plt.tight_layout()
return plt
def show(self, zero_to_efermi=True, ylim=None, smooth=False):
"""
Show the plot using matplotlib.
Args:
zero_to_efermi: Automatically subtract off the Fermi energy from
the eigenvalues and plot (E-Ef).
ylim: Specify the y-axis (energy) limits; by default None let
the code choose. It is vbm-4 and cbm+4 if insulator
efermi-10 and efermi+10 if metal
smooth: interpolates the bands by a spline cubic
"""
plt = self.get_plot(zero_to_efermi, ylim, smooth)
plt.show()
def save_plot(self, filename, img_format="eps", ylim=None,
zero_to_efermi=True, smooth=False):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(ylim=ylim, zero_to_efermi=zero_to_efermi,
smooth=smooth)
plt.savefig(filename, format=img_format)
plt.close()
def get_ticks(self):
"""
Get all ticks and labels for a band structure plot.
Returns:
A dict with 'distance': a list of distance at which ticks should
be set and 'label': a list of label for each of those ticks.
"""
tick_distance = []
tick_labels = []
previous_label = self._bs._kpoints[0].label
previous_branch = self._bs._branches[0]['name']
for i, c in enumerate(self._bs._kpoints):
if c.label is not None:
tick_distance.append(self._bs._distance[i])
this_branch = None
for b in self._bs._branches:
if b['start_index'] <= i <= b['end_index']:
this_branch = b['name']
break
if c.label != previous_label \
and previous_branch != this_branch:
label1 = c.label
if label1.startswith("\\") or label1.find("_") != -1:
label1 = "$" + label1 + "$"
label0 = previous_label
if label0.startswith("\\") or label0.find("_") != -1:
label0 = "$" + label0 + "$"
tick_labels.pop()
tick_distance.pop()
tick_labels.append(label0 + "$\mid$" + label1)
else:
if c.label.startswith("\\") or c.label.find("_") != -1:
tick_labels.append("$" + c.label + "$")
else:
tick_labels.append(c.label)
previous_label = c.label
previous_branch = this_branch
return {'distance': tick_distance, 'label': tick_labels}
def plot_compare(self, other_plotter):
"""
plot two band structure for comparison. One is in red the other in blue
(no difference in spins). The two band structures need to be defined
on the same symmetry lines! and the distance between symmetry lines is
the one of the band structure used to build the BSPlotter
Args:
another band structure object defined along the same symmetry lines
Returns:
a matplotlib object with both band structures
"""
#TODO: add exception if the band structures are not compatible
plt = self.get_plot()
data_orig = self.bs_plot_data()
data = other_plotter.bs_plot_data()
band_linewidth = 3
for i in range(other_plotter._nb_bands):
plt.plot(data_orig['distances'],
[e for e in data['energy'][str(Spin.up)][i]],
'r-', linewidth=band_linewidth)
if other_plotter._bs.is_spin_polarized:
plt.plot(data_orig['distances'],
[e for e in data['energy'][str(Spin.down)][i]],
'r-', linewidth=band_linewidth)
return plt
def plot_brillouin(self):
"""
plot the Brillouin zone
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = Axes3D(fig)
vec1 = self._bs.lattice.matrix[0]
vec2 = self._bs.lattice.matrix[1]
vec3 = self._bs.lattice.matrix[2]
#make the grid
max_x = -1000
max_y = -1000
max_z = -1000
min_x = 1000
min_y = 1000
min_z = 1000
list_k_points = []
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
for k in [-1, 0, 1]:
list_k_points.append(i * vec1 + j * vec2 + k * vec3)
if list_k_points[-1][0] > max_x:
max_x = list_k_points[-1][0]
if list_k_points[-1][1] > max_y:
max_y = list_k_points[-1][1]
if list_k_points[-1][2] > max_z:
max_z = list_k_points[-1][0]
if list_k_points[-1][0] < min_x:
min_x = list_k_points[-1][0]
if list_k_points[-1][1] < min_y:
min_y = list_k_points[-1][1]
if list_k_points[-1][2] < min_z:
min_z = list_k_points[-1][0]
vertex = _qvertex_target(list_k_points, 13)
lines = get_lines_voronoi(vertex)
for i in range(len(lines)):
vertex1 = lines[i]['start']
vertex2 = lines[i]['end']
ax.plot([vertex1[0], vertex2[0]], [vertex1[1], vertex2[1]],
[vertex1[2], vertex2[2]], color='k')
for b in self._bs._branches:
vertex1 = self._bs.kpoints[b['start_index']].cart_coords
vertex2 = self._bs.kpoints[b['end_index']].cart_coords
ax.plot([vertex1[0], vertex2[0]], [vertex1[1], vertex2[1]],
[vertex1[2], vertex2[2]], color='r', linewidth=3)
for k in self._bs.kpoints:
if k.label:
label = k.label
if k.label.startswith("\\") or k.label.find("_") != -1:
label = "$" + k.label + "$"
off = 0.01
ax.text(k.cart_coords[0] + off, k.cart_coords[1] + off,
k.cart_coords[2] + off, label, color='b', size='25')
ax.scatter([k.cart_coords[0]], [k.cart_coords[1]],
[k.cart_coords[2]], color='b')
# make ticklabels and ticklines invisible
for a in ax.w_xaxis.get_ticklines() + ax.w_xaxis.get_ticklabels():
a.set_visible(False)
for a in ax.w_yaxis.get_ticklines() + ax.w_yaxis.get_ticklabels():
a.set_visible(False)
for a in ax.w_zaxis.get_ticklines() + ax.w_zaxis.get_ticklabels():
a.set_visible(False)
ax.grid(False)
plt.show()
ax.axis("off")
class BSPlotterProjected(BSPlotter):
"""
Class to plot or get data to facilitate the plot of band structure objects
projected along orbitals, elements or sites.
Args:
bs: A BandStructureSymmLine object with projections.
"""
def __init__(self, bs):
if len(bs._projections) == 0:
raise ValueError("try to plot projections"
" on a band structure without any")
BSPlotter.__init__(self, bs)
def _get_projections_by_branches(self, dictio):
proj = self._bs.get_projections_on_elts_and_orbitals(dictio)
proj_br = []
print(len(proj[Spin.up]))
print(len(proj[Spin.up][0]))
for c in proj[Spin.up][0]:
print(c)
for b in self._bs._branches:
print(b)
if self._bs.is_spin_polarized:
proj_br.append({str(Spin.up): [[] for l in range(self._nb_bands)],
str(Spin.down): [[] for l in range(self._nb_bands)]})
else:
proj_br.append({str(Spin.up): [[] for l in range(self._nb_bands)]})
print((len(proj_br[-1][str(Spin.up)]), self._nb_bands))
for i in range(self._nb_bands):
for j in range(b['start_index'], b['end_index']+1):
proj_br[-1][str(Spin.up)][i].append({e: {o: proj[Spin.up][i][j][e][o]
for o in proj[Spin.up][i][j][e]}
for e in proj[Spin.up][i][j]})
if self._bs.is_spin_polarized:
for b in self._bs._branches:
for i in range(self._nb_bands):
for j in range(b['start_index'], b['end_index']+1):
proj_br[-1][str(Spin.down)][i].append({e: {o: proj[Spin.down][i][j][e][o]
for o in proj[Spin.down][i][j][e]}
for e in proj[Spin.down][i][j]})
return proj_br
def get_projected_plots_dots(self, dictio, zero_to_efermi=True, ylim=None):
"""
Method returning a plot composed of subplots along different elements
and orbitals.
Args:
dictio: The element and orbitals you want a projection on. The
format is {Element:[Orbitals]} for instance
{'Cu':['d','s'],'O':['p']} will give projections for Cu on
d and s orbitals and on oxygen p.
Returns:
a pylab object with different subfigures for each projection
The blue and red colors are for spin up and spin down.
The bigger the red or blue dot in the band structure the higher
character for the corresponding element and orbital.
"""
from pymatgen.util.plotting_utils import get_publication_quality_plot
band_linewidth = 1.0
fig_number = sum([len(v) for v in dictio.values()])
proj = self._get_projections_by_branches(dictio)
data = self.bs_plot_data(zero_to_efermi)
plt = get_publication_quality_plot(12, 8)
e_min = -4
e_max = 4
if self._bs.is_metal():
e_min = -10
e_max = 10
count = 1
for el in dictio:
for o in dictio[el]:
plt.subplot(100 * math.ceil(fig_number / 2) + 20 + count)
self._maketicks(plt)
for b in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][b],
[data['energy'][b][str(Spin.up)][i][j]
for j in range(len(data['distances'][b]))], 'b-',
linewidth=band_linewidth)
if self._bs.is_spin_polarized:
plt.plot(data['distances'][b],
[data['energy'][b][str(Spin.down)][i][j]
for j in range(len(data['distances'][b]))],
'r--', linewidth=band_linewidth)
for j in range(len(data['energy'][b][str(Spin.up)][i])):
plt.plot(data['distances'][b][j],
data['energy'][b][str(Spin.down)][i][j], 'ro',
markersize=proj[b][str(Spin.down)][i][j][str(el)][o] * 15.0)
for j in range(len(data['energy'][b][str(Spin.up)][i])):
plt.plot(data['distances'][b][j],
data['energy'][b][str(Spin.up)][i][j], 'bo',
markersize=proj[b][str(Spin.up)][i][j][str(el)][o] * 15.0)
if ylim is None:
if self._bs.is_metal():
if zero_to_efermi:
plt.ylim(e_min, e_max)
else:
plt.ylim(self._bs.efermi + e_min, self._bs._efermi
+ e_max)
else:
for cbm in data['cbm']:
plt.scatter(cbm[0], cbm[1], color='r', marker='o',
s=100)
for vbm in data['vbm']:
plt.scatter(vbm[0], vbm[1], color='g', marker='o',
s=100)
plt.ylim(data['vbm'][0][1] + e_min, data['cbm'][0][1]
+ e_max)
else:
plt.ylim(ylim)
plt.title(str(el) + " " + str(o))
count += 1
return plt
def get_elt_projected_plots(self, zero_to_efermi=True, ylim=None):
"""
Method returning a plot composed of subplots along different elements
Returns:
a pylab object with different subfigures for each projection
The blue and red colors are for spin up and spin down
The bigger the red or blue dot in the band structure the higher
character for the corresponding element and orbital
"""
band_linewidth = 1.0
proj = self._get_projections_by_branches({e.symbol: ['s', 'p', 'd']
for e in self._bs._structure.composition.elements})
data = self.bs_plot_data(zero_to_efermi)
from pymatgen.util.plotting_utils import get_publication_quality_plot
plt = get_publication_quality_plot(12, 8)
e_min = -4
e_max = 4
if self._bs.is_metal():
e_min = -10
e_max = 10
count = 1
for el in self._bs._structure.composition.elements:
plt.subplot(220 + count)
self._maketicks(plt)
for b in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][b], [data['energy'][b][str(Spin.up)][i][j]
for j in range(len(data['distances'][b]))], 'b-',
linewidth=band_linewidth)
if self._bs.is_spin_polarized:
plt.plot(data['distances'][b],
[data['energy'][b][str(Spin.down)][i][j]
for j in range(len(data['distances'][b]))],
'r--', linewidth=band_linewidth)
for j in range(len(data['energy'][b][str(Spin.up)][i])):
plt.plot(data['distances'][b][j], data['energy'][b][str(Spin.down)][i][j], 'ro',
markersize=sum([proj[b][str(Spin.down)][i][j][str(el)][o] for o in proj[b]
[str(Spin.down)][i][j][str(el)]]) * 15.0)
for j in range(len(data['energy'][b][str(Spin.up)][i])):
plt.plot(data['distances'][b][j],
data['energy'][b][str(Spin.up)][i][j], 'bo',
markersize=sum([proj[b][str(Spin.up)][i][j][str(el)][o] for o in proj[b]
[str(Spin.up)][i][j][str(el)]]) * 15.0)
if ylim is None:
if self._bs.is_metal():
if zero_to_efermi:
plt.ylim(e_min, e_max)
else:
plt.ylim(self._bs.efermi + e_min, self._bs._efermi
+ e_max)
else:
for cbm in data['cbm']:
plt.scatter(cbm[0], cbm[1], color='r', marker='o',
s=100)
for vbm in data['vbm']:
plt.scatter(vbm[0], vbm[1], color='g', marker='o',
s=100)
plt.ylim(data['vbm'][0][1] + e_min, data['cbm'][0][1]
+ e_max)
else:
plt.ylim(ylim)
plt.title(str(el))
count += 1
return plt
def get_elt_projected_plots_color(self, zero_to_efermi=True,
elt_ordered=None):
"""
returns a pylab plot object with one plot where the band structure
line color depends on the character of the band (along different
elements). Each element is associated with red, green or blue
and the corresponding rgb color depending on the character of the band
is used. The method can only deal with binary and ternary compounds
spin up and spin down are differientiated by a '-' and a '--' line
Args:
elt_ordered: A list of Element ordered. The first one is red,
second green, last blue
Returns:
a pylab object
"""
band_linewidth = 3.0
if len(self._bs._structure.composition.elements) > 3:
raise ValueError
if elt_ordered is None:
elt_ordered = self._bs._structure.composition.elements
proj = self._get_projections_by_branches(
{e.symbol: ['s', 'p', 'd']
for e in self._bs._structure.composition.elements})
data = self.bs_plot_data(zero_to_efermi)
from pymatgen.util.plotting_utils import get_publication_quality_plot
plt = get_publication_quality_plot(12, 8)
spins = [Spin.up]
if self._bs.is_spin_polarized:
spins = [Spin.up, Spin.down]
self._maketicks(plt)
for s in spins:
for b in range(len(data['distances'])):
for i in range(self._nb_bands):
for j in range(len(data['energy'][b][str(s)][i]) - 1):
sum_e = 0.0
for el in elt_ordered:
sum_e = sum_e + \
sum([proj[b][str(s)][i][j][str(el)][o]
for o
in proj[b][str(s)][i][j][str(el)]])
if sum_e == 0.0:
color = [0.0] * len(elt_ordered)
else:
color = [sum([proj[b][str(s)][i][j][str(el)][o]
for o
in proj[b][str(s)][i][j][str(el)]])
/ sum_e
for el in elt_ordered]
if len(color) == 2:
color.append(0.0)
color[2] = color[1]
color[1] = 0.0
sign = '-'
if s == Spin.down:
sign = '--'
plt.plot([data['distances'][b][j],
data['distances'][b][j + 1]],
[data['energy'][b][str(s)][i][j],
data['energy'][b][str(s)][i][j + 1]], sign,
color=color, linewidth=band_linewidth)
plt.ylim(data['vbm'][0][1] - 4.0, data['cbm'][0][1] + 2.0)
return plt
def _qvertex_target(data, index):
"""
Input data should be in the form of a list of a list of floats.
index is the index of the targeted point
Returns the vertices of the voronoi construction around this target point.
"""
from pyhull import qvoronoi
output = qvoronoi("p QV"+str(index), data)
output.pop(0)
output.pop(0)
return [[float(i) for i in row.split()] for row in output]
def get_lines_voronoi(data):
from pyhull import qconvex
output = qconvex("o", data)
nb_points = int(output[1].split(" ")[0])
list_lines = []
list_points = []
for i in range(2, 2 + nb_points):
list_points.append([float(c) for c in output[i].strip().split()])
facets = []
for i in range(2 + nb_points, len(output)):
if output[i] != '':
tmp = output[i].strip().split(" ")
facets.append([int(tmp[j]) for j in range(1, len(tmp))])
for i in range(len(facets)):
for line in itertools.combinations(facets[i], 2):
for j in range(len(facets)):
if i != j and line[0] in facets[j] and line[1] in facets[j]:
#check if the two facets i and j are not coplanar
vector1 = np.array(list_points[facets[j][0]])\
- np.array(list_points[facets[j][1]])
vector2 = np.array(list_points[facets[j][0]])\
- np.array(list_points[facets[j][2]])
n1 = np.cross(vector1, vector2)
vector1 = np.array(list_points[facets[i][0]])\
- np.array(list_points[facets[i][1]])
vector2 = np.array(list_points[facets[i][0]])\
- np.array(list_points[facets[i][2]])
n2 = np.cross(vector1, vector2)
dot = math.fabs(np.dot(n1, n2) / (np.linalg.norm(n1)
* np.linalg.norm(n2)))
if 1.05 > dot > 0.95:
continue
list_lines.append({'start': list_points[line[0]],
'end': list_points[line[1]]})
break
return list_lines
|
|
#!/usr/bin/env python
"""
Calculate the angular distribution function (ADF) from files.
Take an average over atoms in a file or files.
Usage:
adf.py [options] INFILE [INFILE...]
Options:
-h, --help Show this help message and exit.
-w DEG Width of the angular degree. [default: 1.0]
-r RCUT Cutoff radius of the bonding pair. [default: 3.0]
--gsmear=SIGMA
Width of Gaussian smearing, zero means no smearing. [default: 0]
--specorder=SPECORDER
Order of species separated by comma, like, --specorder=Si,O. [default: None]
--triplets=TRIPLETS
Triplets whose angles are to be computed. Three species should be specified connected by hyphen,
and separated by comma, e.g.) P-O-O,Li-O-O. [default: None]
-o OUT Output file name [default: None]
--out4fp Flag to write out in general fp.py format. [default: Fault]
--skip=NSKIP
Skip first NSKIP steps from the statistics. [default: 0]
--no-average
Not to take average over files.
--plot Plot figures. [default: False]
"""
from __future__ import print_function
import os,sys
import numpy as np
from docopt import docopt
from nappy.gaussian_smear import gsmear
from nappy.common import get_key
from nappy.io import read
__author__ = "Ryo KOBAYASHI"
__version__ = "200505"
def norm(vector):
norm= 0.0
for e in vector:
norm += e*e
return np.sqrt(norm)
def adf_atom(ia,dang,rcut,nsys,poss,lspr,symbols,sj,sk):
"""
Compute number of atoms in the every range of angle [0:180].
"""
na= int(180.0/dang) +1
hmat= nsys.get_hmat()
nda= np.zeros(na,dtype=int)
natm= nsys.num_atoms()
rcut2= rcut*rcut
# pi= nsys.get_atom_attr(ia,'pos')
# lspri = nsys.get_atom_attr(ia,'lspr')
pi = poss[ia]
lspri = lspr[ia]
for ji in range(len(lspri)):
ja= lspri[ji]
if ja == ia:
continue
sji = symbols[ja]
if sji not in (sj,sk):
continue
# pj= nsys.get_atom_attr(ja,'pos')
pj = poss[ja]
pij= pj-pi
pij= pij -np.round(pij)
vij= np.dot(hmat,pij)
rij2= np.dot(vij,vij)
if rij2 >= rcut2:
continue
rij= np.sqrt(rij2)
for ki in range(len(lspri)):
ka= lspri[ki]
if ka == ia or ka <= ja:
continue
ski = symbols[ka]
if set((sji,ski)) != set((sj,sk)):
continue
# pk= nsys.get_atom_attr(ka,'pos')
pk = poss[ka]
pik= pk-pi
pik= pik -np.round(pik)
vik= np.dot(hmat,pik)
rik2= np.dot(vik,vik)
if rik2 >= rcut2:
continue
rik= np.sqrt(rik2)
cs= np.dot(vij,vik)/rij/rik
if cs <= -1.0:
rad= np.pi
else:
rad= np.arccos(cs)
deg= rad/np.pi *180.0
nda[int(deg/dang)] += 1
return nda
def adf(nsys,dang,rcut,triplets):
natm0= nsys.num_atoms()
n1,n2,n3= nsys.get_expansion_num(2.0*rcut)
if not (n1==1 and n2==1 and n3==1):
print(' system to be repeated, n1,n2,n3=',n1,n2,n3)
nsys.repeat(n1,n2,n3)
nsys.assign_pbc()
nsys.make_pair_list(rcut=rcut)
na= int(180.0/dang)+1
anda= np.zeros((len(triplets),na),dtype=float)
angd= np.array([ dang*ia for ia in range(na) ])
symbols = nsys.get_symbols()
poss = np.array(nsys.atoms.pos)
lspr = nsys.atoms.lspr
for it,t in enumerate(triplets):
si,sj,sk = t
for ia in range(natm0):
if symbols[ia] != si:
continue
adfa= adf_atom(ia,dang,rcut,nsys,poss,lspr,symbols,sj,sk)
for iang in range(na):
anda[it,iang]= anda[it,iang] +adfa[iang]
return angd,anda,natm0
def adf_average(infiles,dang=1.0,rcut=3.0,triplets=[],no_average=False,
specorder=None):
na= int(180.0/dang) +1
aadf= np.zeros((len(triplets),na),dtype=float)
nsum= 0
for infname in infiles:
if not os.path.exists(infname):
print("[Error] File, {0}, does not exist !!!".format(infname))
sys.exit()
#nsys= NAPSystem(fname=infname,specorder=specorder)
print(' File = ',infname)
nsys = read(fname=infname,specorder=specorder)
angd,df,n= adf(nsys,dang,rcut,triplets)
aadf += df
nsum += n
if not no_average:
aadf /= nsum
return angd,aadf
def write_normal(fname,triplets,na,angd,agr):
"""
Write out ADF data in normal ADF format.
"""
outfile= open(fname,'w')
outfile.write('# 1:theta[i], ')
for it,t in enumerate(triplets):
outfile.write(' {0:d}:{1:s}-{2:s}-{3:s},'.format(it+2,*t))
outfile.write('\n')
for i in range(na):
outfile.write(' {0:10.4f}'.format(angd[i]))
for it,t in enumerate(triplets):
outfile.write(' {0:11.3e}'.format(agr[it,i]))
outfile.write('\n')
outfile.close()
return None
def write_out4fp(fname,triplets,na,angd,rcut,nperline=6):
"""
Write out ADF data in general fp.py format.
Parameters
----------
nperline : int
Number of data in a line. [default: 6]
"""
ndat = na*len(triplets)
data = np.zeros(ndat)
n = 0
for it,tri in enumerate(triplets):
for i in range(na):
data[n] = agr[it,i]
n += 1
with open(fname,'w') as f:
f.write('# ADF for triplets:')
for it,t in enumerate(triplets):
f.write(' {0:s}-{1:s}-{2:s},'.format(*t))
f.write('\n')
f.write('# rcut, na = {0:.3f}, {1:d}\n'.format(rcut,na))
f.write('#\n')
#...Num of data, weight for the data
f.write(' {0:6d} {1:7.3f}\n'.format(ndat,1.0))
j0 = 0
while True:
f.write(' '.join('{0:12.4e}'.format(data[j]) for j in range(j0,j0+nperline) if j < ndat))
f.write('\n')
j0 += nperline
if j0 >= ndat:
break
return None
def plot_figures(angd,agr,triplets):
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(context='talk',style='ticks')
for i,t in enumerate(triplets):
plt.plot(angd,agr[i],legend='{0:s}-{1:s}-{2:s}'.format(*t))
plt.xlabel('Angle (degree)')
plt.ylabel('ADF')
plt.savefig("graph_adf.png", format='png', dpi=300, bbox_inches='tight')
if __name__ == "__main__":
args= docopt(__doc__)
infiles= args['INFILE']
triplets = args['--triplets']
specorder = [ x for x in args['--specorder'].split(',') ]
if specorder == ['None']:
specorder = []
if triplets == 'None':
raise ValueError('Triplets must be specified.')
triplets = [ t.split('-') for t in triplets.split(',') ]
if len(triplets) == 0:
raise ValueError('There must be at least one triplet.')
out4fp = args['--out4fp']
dang= float(args['-w'])
drad= np.pi *dang/180.0
rcut= float(args['-r'])
sigma= int(args['--gsmear'])
no_average = args['--no-average']
ofname= args['-o']
if ofname == 'None':
ofname = None
flag_plot= args['--plot']
nskip = int(args['--skip'])
if out4fp and ofname is None:
raise ValueError("Output file name must be specified with option -o.")
if nskip > len(infiles):
raise ValueError('NSKIP must be less than num of files given: ',len(infiles))
infiles.sort(key=get_key,reverse=True)
del infiles[:nskip]
na= int(180.0/dang) +1
angd,agr= adf_average(infiles,dang=dang,
rcut=rcut,triplets=triplets,
no_average=no_average,
specorder=specorder)
if not sigma == 0:
print(' Gaussian smearing...')
for it,t in enumerate(triplets):
agr[it,:] = gsmear(angd,agr[it,:],sigma)
if flag_plot:
plot_figures(angd,agr,triplets)
print('')
print(' RDF graphes are plotted.')
print(' Check graph_adf.png')
#...Regardless ofname, write out.adf in normal format
write_normal('out.adf',triplets,na,angd,agr)
#...Format of output (named by ofname) depends on out4fp
if ofname is not None:
if out4fp:
write_out4fp(ofname,triplets,na,angd,rcut)
else:
write_normal(ofname,triplets,na,angd,agr)
print(' Wrote out.adf')
if ofname is not None:
print(' Wrote {0:s}'.format(ofname))
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "streamlink-"
cfg.versionfile_source = "src/streamlink/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long", "--abbrev=7",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""File logging handler for tasks."""
import logging
import os
from typing import Optional
import requests
from airflow.configuration import AirflowConfigException, conf
from airflow.models import TaskInstance
from airflow.utils.file import mkdirs
from airflow.utils.helpers import parse_template_string
class FileTaskHandler(logging.Handler):
"""
FileTaskHandler is a python log handler that handles and reads
task instance logs. It creates and delegates log handling
to `logging.FileHandler` after receiving task instance context.
It reads logs from task instance's host machine.
:param base_log_folder: Base log folder to place logs.
:param filename_template: template filename string
"""
def __init__(self, base_log_folder: str, filename_template: str):
super().__init__()
self.handler = None # type: Optional[logging.FileHandler]
self.local_base = base_log_folder
self.filename_template, self.filename_jinja_template = \
parse_template_string(filename_template)
def set_context(self, ti: TaskInstance):
"""
Provide task_instance context to airflow task handler.
:param ti: task instance object
"""
local_loc = self._init_file(ti)
self.handler = logging.FileHandler(local_loc, encoding='utf-8')
if self.formatter:
self.handler.setFormatter(self.formatter)
self.handler.setLevel(self.level)
def emit(self, record):
if self.handler:
self.handler.emit(record)
def flush(self):
if self.handler:
self.handler.flush()
def close(self):
if self.handler:
self.handler.close()
def _render_filename(self, ti, try_number):
if self.filename_jinja_template:
jinja_context = ti.get_template_context()
jinja_context['try_number'] = try_number
return self.filename_jinja_template.render(**jinja_context)
return self.filename_template.format(dag_id=ti.dag_id,
task_id=ti.task_id,
execution_date=ti.execution_date.isoformat(),
try_number=try_number)
def _read(self, ti, try_number, metadata=None): # pylint: disable=unused-argument
"""
Template method that contains custom logic of reading
logs given the try_number.
:param ti: task instance record
:param try_number: current try_number to read log from
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
:return: log message as a string and metadata.
"""
# Task instance here might be different from task instance when
# initializing the handler. Thus explicitly getting log location
# is needed to get correct log path.
log_relative_path = self._render_filename(ti, try_number)
location = os.path.join(self.local_base, log_relative_path)
log = ""
if os.path.exists(location):
try:
with open(location) as file:
log += "*** Reading local file: {}\n".format(location)
log += "".join(file.readlines())
except Exception as e: # pylint: disable=broad-except
log = "*** Failed to load local log file: {}\n".format(location)
log += "*** {}\n".format(str(e))
else:
url = os.path.join(
"http://{ti.hostname}:{worker_log_server_port}/log", log_relative_path
).format(
ti=ti,
worker_log_server_port=conf.get('celery', 'WORKER_LOG_SERVER_PORT')
)
log += "*** Log file does not exist: {}\n".format(location)
log += "*** Fetching from: {}\n".format(url)
try:
timeout = None # No timeout
try:
timeout = conf.getint('webserver', 'log_fetch_timeout_sec')
except (AirflowConfigException, ValueError):
pass
response = requests.get(url, timeout=timeout)
response.encoding = "utf-8"
# Check if the resource was properly fetched
response.raise_for_status()
log += '\n' + response.text
except Exception as e: # pylint: disable=broad-except
log += "*** Failed to fetch log file from worker. {}\n".format(str(e))
return log, {'end_of_log': True}
def read(self, task_instance, try_number=None, metadata=None):
"""
Read logs of given task instance from local machine.
:param task_instance: task instance object
:param try_number: task instance try_number to read logs from. If None
it returns all logs separated by try_number
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
:return: a list of logs
"""
# Task instance increments its try number when it starts to run.
# So the log for a particular task try will only show up when
# try number gets incremented in DB, i.e logs produced the time
# after cli run and before try_number + 1 in DB will not be displayed.
if try_number is None:
next_try = task_instance.next_try_number
try_numbers = list(range(1, next_try))
elif try_number < 1:
logs = [
'Error fetching the logs. Try number {} is invalid.'.format(try_number),
]
return logs
else:
try_numbers = [try_number]
logs = [''] * len(try_numbers)
metadata_array = [{}] * len(try_numbers)
for i, try_number_element in enumerate(try_numbers):
log, metadata = self._read(task_instance, try_number_element, metadata)
logs[i] += log
metadata_array[i] = metadata
return logs, metadata_array
def _init_file(self, ti):
"""
Create log directory and give it correct permissions.
:param ti: task instance object
:return: relative log path of the given task instance
"""
# To handle log writing when tasks are impersonated, the log files need to
# be writable by the user that runs the Airflow command and the user
# that is impersonated. This is mainly to handle corner cases with the
# SubDagOperator. When the SubDagOperator is run, all of the operators
# run under the impersonated user and create appropriate log files
# as the impersonated user. However, if the user manually runs tasks
# of the SubDagOperator through the UI, then the log files are created
# by the user that runs the Airflow command. For example, the Airflow
# run command may be run by the `airflow_sudoable` user, but the Airflow
# tasks may be run by the `airflow` user. If the log files are not
# writable by both users, then it's possible that re-running a task
# via the UI (or vice versa) results in a permission error as the task
# tries to write to a log file created by the other user.
relative_path = self._render_filename(ti, ti.try_number)
full_path = os.path.join(self.local_base, relative_path)
directory = os.path.dirname(full_path)
# Create the log file and give it group writable permissions
# TODO(aoen): Make log dirs and logs globally readable for now since the SubDag
# operator is not compatible with impersonation (e.g. if a Celery executor is used
# for a SubDag operator and the SubDag operator has a different owner than the
# parent DAG)
if not os.path.exists(directory):
# Create the directory as globally writable using custom mkdirs
# as os.makedirs doesn't set mode properly.
mkdirs(directory, 0o777)
if not os.path.exists(full_path):
open(full_path, "a").close()
# TODO: Investigate using 444 instead of 666.
os.chmod(full_path, 0o666)
return full_path
|
|
# -- coding: utf-8 --
from __future__ import absolute_import
from bs4 import BeautifulSoup
from urlparse import urlparse, urljoin
from re import search
from chime import repo_functions
class ChimeTestClient:
''' Stateful client for Chime Flask test client.
'''
def __init__(self, client, test):
''' Create a new client, with Flask test client and TestCase instances.
'''
self.client = client
self.test = test
response = self.client.get('/')
self.test.assertFalse('Start' in response.data)
self.path, self.soup, self.headers = '/', BeautifulSoup(response.data), response.headers
def sign_in(self, email):
''' Sign in with a given email address.
Should be used inside an HTTMock that overrides Chime's internal
call to Persona verifier: https://verifier.login.persona.org/verify
'''
response = self.client.post('/sign-in', data={'assertion': email})
self.test.assertEqual(response.status_code, 200)
response = self.client.get('/')
self.test.assertTrue('Start' in response.data)
def reload(self):
''' Reload the current path.
'''
self.open_link(self.path)
def open_link(self, url, expected_status_code=200):
''' Open a link
'''
response = self.client.get(url)
self.test.assertEqual(response.status_code, expected_status_code)
self.path, self.soup, self.headers = url, BeautifulSoup(response.data), response.headers
def open_link_blindly(self, url):
''' Open a link without testing
'''
response = self.client.get(url)
self.path, self.soup, self.headers = url, BeautifulSoup(response.data), response.headers
def follow_link(self, href):
''' Follow a link after making sure it's present in the page.
'''
# Look for the link
link = self.soup.find(lambda tag: bool(tag.name == 'a' and tag['href'] == href))
response = self.client.get(link['href'])
redirect = href
redirect_count = 0
while response.status_code in (301, 302) and redirect_count < 3:
redirect = urlparse(response.headers['Location']).path
response = self.client.get(redirect)
redirect_count = redirect_count + 1
self.test.assertEqual(response.status_code, 200)
self.path, self.soup, self.headers = redirect, BeautifulSoup(response.data), response.headers
def follow_redirect(self, response, code):
''' Expect and follow a response HTTP redirect.
'''
self.test.assertEqual(response.status_code, code, 'Status {} should have been {}'.format(response.status_code, code))
if code in range(500, 599):
self.soup, self.headers = BeautifulSoup(response.data), response.headers
else:
redirect = urlparse(response.headers['Location']).path
response = self.client.get(redirect)
self.test.assertEqual(response.status_code, 200)
self.path, self.soup, self.headers = redirect, BeautifulSoup(response.data), response.headers
def get_branch_name(self):
''' Extract and return the branch name from the current soup.
'''
# Assumes there is an HTML comment in the format '<!-- branch: 1234567 -->'
branch_search = search(r'<!-- branch: (.{{{}}}) -->'.format(repo_functions.BRANCH_NAME_LENGTH), unicode(self.soup))
self.test.assertIsNotNone(branch_search)
try:
branch_name = branch_search.group(1)
except AttributeError:
raise Exception('No match for generated branch name.')
return branch_name
def start_task(self, description):
''' Start a new task.
'''
data = {'task_description': description}
response = self.client.post('/start', data=data)
if response.status_code == 200:
self.soup, self.headers = BeautifulSoup(response.data), response.headers
else:
self.follow_redirect(response, 303)
def delete_task(self, branch_name):
''' Look for button to delete a task, click it.
'''
hidden = self.soup.find(lambda tag: bool(tag.name == 'input' and tag.get('value') == branch_name))
form = hidden.find_parent('form')
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'')
for i in form.find_all(['input', 'button'])}
delete_task_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(delete_task_path, data=data)
self.follow_redirect(response, 303)
def add_category(self, category_name):
''' Look for form to add a category, submit it.
'''
input = self.soup.find(lambda tag: bool(tag.name == 'input' and tag.get('placeholder') == 'Add topic'))
form = input.find_parent('form')
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'') for i in form.find_all(['input', 'button'])}
data[input['name']] = category_name
add_category_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(add_category_path, data=data)
# Drop down to where the subcategories are.
self.follow_redirect(response, 303)
def add_categories(self, category_list):
''' Add many categories.
'''
for category_name in category_list:
self.add_category(category_name)
def add_subcategory(self, subcategory_name):
''' Look for form to add a subcategory, submit it..
'''
input = self.soup.find(lambda tag: bool(tag.name == 'input' and tag.get('placeholder') == 'Add subtopic'))
form = input.find_parent('form')
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'') for i in form.find_all(['input', 'button'])}
data[input['name']] = subcategory_name
add_subcategory_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(add_subcategory_path, data=data)
# Drop down into the subcategory where the articles are.
self.follow_redirect(response, 303)
def add_article(self, article_name):
''' Look for form to add an article, submit it.
'''
# Create a new article.
input = self.soup.find(lambda tag: bool(tag.name == 'input' and tag.get('placeholder') == 'Add article'))
form = input.find_parent('form')
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'') for i in form.find_all(['input', 'button'])}
data[input['name']] = article_name
add_article_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(add_article_path, data=data)
# View the new article.
self.follow_redirect(response, 303)
def quick_activity_setup(self, description, category_name=u'', subcategory_name=u'', article_name=u''):
''' Set up an activity quickly, with topic, sub-topic, and article if requested.
'''
# Start a new task
self.start_task(description=description)
branch_name = self.get_branch_name()
# Look for an "other" link that we know about - is it a category?
self.follow_link(href='/tree/{}/edit/other/'.format(branch_name))
# Create a new category, subcategory, and article.
if category_name:
self.add_category(category_name=category_name)
if subcategory_name:
self.add_subcategory(subcategory_name=subcategory_name)
if article_name:
self.add_article(article_name=article_name)
return branch_name
def submit_edit_article_form(self, title_str, body_str):
''' Submit the edit article form and return the response for testing or passing on.
Note: This will submit the form even if it doesn't have a save/submit button.
'''
body = self.soup.find(lambda tag: bool(tag.name == 'textarea' and tag.get('name') == 'en-body'))
form = body.find_parent('form')
title = form.find(lambda tag: bool(tag.name == 'input' and tag.get('name') == 'en-title'))
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'')
for i in form.find_all(['input', 'button', 'textarea'])
if i.get('type') != 'submit'}
data[title['name']] = title_str
data[body['name']] = body_str
edit_article_path = urlparse(urljoin(self.path, form['action'])).path
return self.client.post(edit_article_path, data=data)
def edit_article(self, title_str, body_str):
''' Look for form to edit an article, submit it. This will submit even if there's no
save button available for the form.
'''
response = self.submit_edit_article_form(title_str, body_str)
# View the updated article.
self.follow_redirect(response, 303)
def edit_article_and_fail(self, title_str, body_str, expected_status_code=400):
''' Look for form to edit an article we know to be published, submit it and assert that the sumbission fails.
'''
response = self.submit_edit_article_form(title_str, body_str)
# Assert that the submission failed
self.test.assertTrue(response.status_code in range(expected_status_code, expected_status_code + 99))
def preview_article(self, title_str, body_str):
''' Look for form to edit an article, preview it.
'''
body = self.soup.find(lambda tag: bool(tag.name == 'textarea' and tag.get('name') == 'en-body'))
form = body.find_parent('form')
title = form.find(lambda tag: bool(tag.name == 'input' and tag.get('name') == 'en-title'))
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'')
for i in form.find_all(['input', 'button', 'textarea'])
if i.get('type') != 'submit' or i.get('value') == 'Preview'}
data[title['name']] = title_str
data[body['name']] = body_str
edit_article_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(edit_article_path, data=data)
# View the updated article.
self.follow_redirect(response, 303)
def follow_modify_category_link(self, title_str):
''' Find the (sub-)category edit button in the last soup and follow it.
'''
mod_link = self.soup.find(lambda tag: bool(tag.name == 'a' and tag.text == title_str))
mod_li = mod_link.find_parent('li')
mod_span = mod_li.find(lambda tag: bool(tag.name == 'span' and 'fa-pencil' in tag.get('class')))
mod_link = mod_span.find_parent('a')
self.follow_link(mod_link['href'])
def delete_category(self):
''' Look for the delete button, submit it.
'''
body = self.soup.find(lambda tag: bool(tag.name == 'textarea' and tag.get('name') == 'en-description'))
form = body.find_parent('form')
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'')
for i in form.find_all(['input', 'button', 'textarea'])
if i.get('name') != 'save'}
delete_category_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(delete_category_path, data=data)
self.follow_redirect(response, 303)
def delete_article(self, title_str):
''' Look for the article delete button, submit it
'''
del_link = self.soup.find(lambda tag: bool(tag.name == 'a' and tag.text == title_str))
del_li = del_link.find_parent('li')
del_span = del_li.find(lambda tag: bool(tag.name == 'span' and 'fa-trash' in tag.get('class')))
del_form = del_span.find_parent('form')
self.test.assertEqual(del_form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'')
for i in del_form.find_all(['input', 'button', 'textarea'])}
delete_article_path = urlparse(urljoin(self.path, del_form['action'])).path
response = self.client.post(delete_article_path, data=data)
self.follow_redirect(response, 303)
def request_feedback(self, feedback_str=u''):
''' Look for form to request feedback, submit it.
'''
body = self.soup.find(lambda tag: bool(tag.name == 'textarea' and tag.get('name') == 'comment_text'))
form = body.find_parent('form')
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'')
for i in form.find_all(['input', 'button', 'textarea'])
if i.get('value') != 'Leave a Comment'}
data[body['name']] = feedback_str
save_feedback_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(save_feedback_path, data=data)
# View the saved feedback.
self.follow_redirect(response, 303)
def leave_feedback(self, feedback_str=u''):
''' Look for form to leave feedback, submit it.
'''
body = self.soup.find(lambda tag: bool(tag.name == 'textarea' and tag.get('name') == 'comment_text'))
form = body.find_parent('form')
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'')
for i in form.find_all(['input', 'button', 'textarea'])
if i.get('value') != 'Endorse Edits'}
data[body['name']] = feedback_str
save_feedback_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(save_feedback_path, data=data)
# View the saved feedback.
self.follow_redirect(response, 303)
def approve_activity(self):
''' Look for form to approve activity, submit it.
'''
button = self.soup.find(lambda tag: bool(tag.name == 'button' and tag.get('value') == 'Endorse Edits'))
form = button.find_parent('form')
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'')
for i in form.find_all(['input', 'button', 'textarea'])
if i.get('value') != 'Leave a Comment'}
approve_activity_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(approve_activity_path, data=data)
# View the saved feedback.
self.follow_redirect(response, 303)
def publish_activity(self, expected_status_code=303):
''' Look for form to publish activity, submit it.
'''
button = self.soup.find(lambda tag: bool(tag.name == 'button' and tag.get('value') == 'Publish'))
form = button.find_parent('form')
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'')
for i in form.find_all(['input', 'button', 'textarea'])
if i.get('value') != 'Leave a Comment'}
publish_activity_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(publish_activity_path, data=data)
# View the published activity.
self.follow_redirect(response, expected_status_code)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
import time
from kubernetes_py import K8sConfig
from kubernetes_py.K8sContainer import K8sContainer
from kubernetes_py.K8sExceptions import NotFoundException, TimedOutException
from kubernetes_py.K8sObject import K8sObject
from kubernetes_py.models.v1.Pod import Pod
from kubernetes_py.models.v1.PodStatus import PodStatus
from kubernetes_py.models.v1.Probe import Probe
from kubernetes_py.utils import is_valid_dict, is_valid_string, is_valid_list
from kubernetes_py.models.v1.Toleration import Toleration
from kubernetes_py.models.unversioned.BaseUrls import BaseUrls
class K8sPod(K8sObject):
POD_READY_TIMEOUT_SECONDS = 120
def __init__(self, config=None, name=None):
super(K8sPod, self).__init__(config=config, obj_type="Pod", name=name)
if self.config.pull_secret is not None:
self.add_image_pull_secrets(self.config.pull_secret)
# ------------------------------------------------------------------------------------- override
def create(self):
super(K8sPod, self).create()
self.get()
self._wait_for_readiness()
return self
def update(self):
super(K8sPod, self).update()
self.get()
self._wait_for_readiness()
return self
def list(self, pattern=None, labels=None):
ls = super(K8sPod, self).list(labels=labels)
pods = list(map(lambda pod: Pod(pod), ls))
if pattern is not None:
pods = list(filter(lambda pod: pattern in pod.name, pods))
k8s = list()
for x in pods:
p = K8sPod(config=self.config, name=x.name).from_model(m=x)
k8s.append(p)
return k8s
# ------------------------------------------------------------------------------------- wait
def _wait_for_readiness(self):
start_time = time.time()
while not self.is_ready():
time.sleep(0.2)
elapsed_time = time.time() - start_time
if elapsed_time >= self.POD_READY_TIMEOUT_SECONDS:
raise TimedOutException("Timed out on Pod readiness: [ {0} ]".format(self.name))
# ------------------------------------------------------------------------------------- add
def add_container(self, container=None):
if not isinstance(container, K8sContainer):
raise SyntaxError("K8sPod.add_container() container: [ {0} ] is invalid.".format(container))
containers = self.model.spec.containers
if container not in containers:
containers.append(container.model)
self.model.spec.containers = containers
return self
def add_image_pull_secrets(self, secrets=None):
self.model.spec.add_image_pull_secrets(secrets)
return self
def add_volume(self, volume=None):
volumes = self.model.spec.volumes
if volume not in volumes:
volumes.append(volume.model)
self.model.spec.volumes = volumes
return self
def add_toleration(self, key=None, value=None, effect=None):
exists = False
for tol in self.tolerations:
if tol.key == key and tol.value == value and tol.effect == effect:
exists = True
if not exists:
tol = Toleration()
tol.key = key
tol.value = value
tol.effect = effect
self.tolerations.append(tol)
return self
# ------------------------------------------------------------------------------------- delete
def del_node_name(self):
self.model.spec.node_name = None
return self
def del_toleration(self, key=None, value=None, effect=None):
remaining_tolerations = []
for tol in self.tolerations:
if tol.key != key and tol.value != value and tol.effect != effect:
remaining_tolerations.append(tol)
if self.tolerations != remaining_tolerations:
self.tolerations = remaining_tolerations
return self
# ------------------------------------------------------------------------------------- get
def get(self):
self.model = Pod(self.get_model())
return self
# ------------------------------------------------------------------------------------- polling readiness
def is_ready(self):
self.get()
if self.status is not None and isinstance(self.status, PodStatus):
pod_phase = self.status.phase
conditions = self.status.conditions
conditions_ok = 0
for cond in conditions:
if cond.status == "True":
conditions_ok += 1
if pod_phase == "Running" and len(conditions) == conditions_ok:
return True
return False
# ------------------------------------------------------------------------------------- logs
def get_log(self, container=None):
url = "{base}/{name}/log".format(base=self.base_url, name=self.name)
if container:
url = "{url}?container={container}".format(url=url, container=container)
state = self.request(method="GET", url=url)
if not state.get("success"):
status = state.get("status", "")
reason = state.get("data", dict()).get("message", None)
message = "K8sPod: GET [ {0}:{1} ] failed: HTTP {2} : {3} ".format(self.obj_type, self.name, status, reason)
raise NotFoundException(message)
if "data" in state and state.get("data") is not None:
logs = state.get("data").splitlines()
return logs
return ""
# ------------------------------------------------------------------------------------- metrics
def get_metrics(self):
bu = BaseUrls(api=self.config.version, namespace=self.config.namespace)
base_url = bu.get_base_url(object_type="PodMetrics")
url = "{base}/{name}".format(base=base_url, name=self.name)
state = self.request(method="GET", url=url)
if not state.get("success"):
status = state.get("status", "")
reason = state.get("data", dict()).get("message", None)
message = "K8sPod: GET [ {0}:{1} ] failed: HTTP {2} : {3} ".format(self.obj_type, self.name, status, reason)
raise NotFoundException(message)
if "data" in state and state.get("data") is not None:
return state.get("data")
return ""
# ------------------------------------------------------------------------------------- set
def set_container_image(self, name=None, image=None):
containers = []
for c in self.model.spec.containers:
if c.name == name:
c.image = image
containers.append(c)
self.model.spec.containers = containers
return self
# ------------------------------------------------------------------------------------- activeDeadline
@property
def active_deadline(self):
return self.model.spec.active_deadline_seconds
@active_deadline.setter
def active_deadline(self, secs=None):
self.model.spec.active_deadline_seconds = secs
# ------------------------------------------------------------------------------------- automountServiceAccountToken
@property
def automount_service_account_token(self):
return self.model.spec.automount_service_account_token
@automount_service_account_token.setter
def automount_service_account_token(self, automount=None):
self.model.spec.automount_service_account_token = automount
# ------------------------------------------------------------------------------------- containers
@property
def containers(self):
_list = []
for c in self.model.spec.containers:
k8scontainer = K8sContainer(name=c.name, image=c.image)
k8scontainer.model = c
_list.append(k8scontainer)
return _list
@containers.setter
def containers(self, containers=None):
if not is_valid_list(containers, K8sContainer):
self.model.spec.containers = [x.model for x in containers]
# ------------------------------------------------------------------------------------- dnsPolicy
@property
def dns_policy(self):
return self.model.spec.dns_policy
@dns_policy.setter
def dns_policy(self, policy=None):
self.model.spec.dns_policy = policy
# ------------------------------------------------------------------------------------- generateName
@property
def generate_name(self):
return self.model.metadata.generate_name
@generate_name.setter
def generate_name(self, name=None):
self.model.metadata.generate_name = name
# ------------------------------------------------------------------------------------- namespace
@property
def namespace(self):
return self.model.metadata.namespace
@namespace.setter
def namespace(self, nspace=None):
self.model.metadata.namespace = nspace
# ------------------------------------------------------------------------------------- nodeName
@property
def node_name(self):
return self.model.spec.node_name
@node_name.setter
def node_name(self, name=None):
self.model.spec.node_name = name
# ------------------------------------------------------------------------------------- nodeSelector
@property
def node_selector(self):
return self.model.spec.node_selector
@node_selector.setter
def node_selector(self, selector=None):
self.model.spec.node_selector = selector
# ------------------------------------------------------------------------------------- livenessProbes
@property
def liveness_probes(self):
data = {}
containers = self.model.spec.containers
for c in containers:
if c.liveness_probe is not None:
data[c.name] = c.liveness_probe
return data
@liveness_probes.setter
def liveness_probes(self, tup=None):
if not isinstance(tup, tuple):
raise SyntaxError("K8sPod: liveness_probes: [ {} ] is invalid.".format(tup))
c_name, probe = tup
container_names = [c.name for c in self.model.spec.containers]
if c_name not in container_names:
raise SyntaxError("K8sPod: liveness_probes: container [ {} ] not found.".format(c_name))
if not isinstance(probe, Probe):
raise SyntaxError("K8sPod: liveness_probe: probe: [ {} ] is invalid.".format(probe))
containers = []
for c in self.model.spec.containers:
if c.name == c_name:
c.liveness_probe = probe
containers.append(c)
self.model.spec.template.spec.containers = containers
# ------------------------------------------------------------------------------------- readinessProbes
@property
def readiness_probes(self):
data = {}
containers = self.model.spec.containers
for c in containers:
if c.readiness_probe is not None:
data[c.name] = c.readiness_probe
return data
@readiness_probes.setter
def readiness_probes(self, tup=None):
if not isinstance(tup, tuple):
raise SyntaxError("K8sPod: readiness_probes: [ {} ] is invalid.".format(tup))
c_name, probe = tup
container_names = [c.name for c in self.model.spec.template.spec.containers]
if c_name not in container_names:
raise SyntaxError("K8sPod: readiness_probes: container [ {} ] not found.".format(c_name))
if not isinstance(probe, Probe):
raise SyntaxError("K8sPod: readiness_probes: probe: [ {} ] is invalid.".format(probe))
containers = []
for c in self.model.spec.template.spec.containers:
if c.name == c_name:
c.readiness_probe = probe
containers.append(c)
self.model.spec.template.spec.containers = containers
# ------------------------------------------------------------------------------------- restartPolicy
@property
def restart_policy(self):
return self.model.spec.restart_policy
@restart_policy.setter
def restart_policy(self, policy=None):
self.model.spec.restart_policy = policy
# ------------------------------------------------------------------------------------- serviceAccountName
@property
def service_account_name(self):
return self.model.spec.service_account_name
@service_account_name.setter
def service_account_name(self, name=None):
self.model.spec.service_account_name = name
# ------------------------------------------------------------------------------------- status
@property
def status(self):
self.get()
return self.model.status
@status.setter
def status(self, status=None):
self.model.status = status
# ------------------------------------------------------------------------------------- terminationGracePeriod
@property
def termination_grace_period(self):
return self.model.spec.termination_grace_period_seconds
@termination_grace_period.setter
def termination_grace_period(self, secs=None):
self.model.spec.termination_grace_period_seconds = secs
# ------------------------------------------------------------------------------------- volumes
@property
def volumes(self):
return self.model.spec.volumes
@volumes.setter
def volumes(self, v=None):
self.model.spec.volumes = v
# ------------------------------------------------------------------------------------- start time
@property
def start_time(self):
return self.model.status.start_time
@start_time.setter
def start_time(self, t=None):
raise NotImplementedError()
# ------------------------------------------------------------------------------------- phase
@property
def phase(self):
return self.model.status.phase
@phase.setter
def phase(self, p=None):
raise NotImplementedError()
# ------------------------------------------------------------------------------------- affinity
@property
def affinity(self):
return self.model.spec.affinity
@affinity.setter
def affinity(self, a):
self.model.spec.affinity = a
# ------------------------------------------------------------------------------------- tolerations
@property
def tolerations(self):
return self.model.spec.tolerations
@tolerations.setter
def tolerations(self, t=None):
self.model.spec.tolerations = t
# ------------------------------------------------------------------------------------- host_ip
@property
def host_ip(self):
return self.model.status.host_ip
@host_ip.setter
def host_ip(self, ip=None):
raise NotImplementedError()
# ------------------------------------------------------------------------------------- pod_ip
@property
def pod_ip(self):
return self.model.status.pod_ip
@pod_ip.setter
def pod_ip(self, ip=None):
raise NotImplementedError()
# ------------------------------------------------------------------------------------- filtering
@classmethod
def get_by_name(cls, config=None, name=None, name_label="name"):
if not is_valid_string(name):
raise SyntaxError("K8sPod.get_by_name(): name: [ {0} ] is invalid.".format(name))
return cls.get_by_labels(config=config, labels={name_label: name,})
@staticmethod
def get_by_labels(config=None, labels=None):
if config is None:
config = K8sConfig()
if not is_valid_dict(labels):
raise SyntaxError("K8sPod.get_by_labels(): labels: [ {} ] is invalid.".format(labels))
pods = K8sPod(config=config, name="whatever").list(labels=labels)
return pods
@staticmethod
def get_by_pod_ip(config=None, ip=None, labels=None):
if config is None:
config = K8sConfig()
if not is_valid_string(ip):
raise SyntaxError("K8sPod.get_by_pod_ip(): ip: [ {0} ] is invalid.".format(ip))
found = None
pods = K8sPod(config=config, name="throwaway").list(labels=labels)
for pod in pods:
try:
assert isinstance(pod, K8sPod)
if pod.pod_ip == ip:
found = pod
break
except NotFoundException:
pass
return found
|
|
# Copyright (c) 2009-2012, Geoffrey Biggs
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Geoffrey Biggs nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# File: pkgsearcher.py
# Author: Geoffrey Biggs
# Part of pykg-config.
"""Searches for a pkg-config file matching a given specification.
"""
__version__ = "$Revision: $"
# $Source$
from os import getenv, listdir
from os.path import isdir, isfile, join, split, splitext
import sys
if sys.platform == 'win32':
if sys.version_info[0] < 3:
import _winreg
else:
import winreg as _winreg
from pykg_config.exceptions import PykgConfigError
from pykg_config.options import Options
from pykg_config.errorprinter import ErrorPrinter
from pykg_config.package import Package
from pykg_config.substitute import UndefinedVarError
try:
from pykg_config.install_config import pc_path
except ImportError:
# If the install_config module is not available (which is the case when
# running from the source instead of an installed version), use defaults
pc_path = None
##############################################################################
# Exceptions
class PackageNotFoundError(PykgConfigError):
"""A .pc file matching the given package name could not be found.
Attributes:
pkgname -- The name of the package that could not be found.
"""
def __init__(self, pkgname):
self.pkgname = pkgname
def __str__(self):
return "No package '{0}' found".format(self.pkgname)
class NoOpenableFilesError(PackageNotFoundError):
pass
class BadPathError(PykgConfigError):
"""A specified path is bad in some way.
Attributes:
path - The bad path.
"""
def __init__(self, path):
self.path = path
def __str__(self):
return 'Bad path: {0}'.format(self.path)
class NotAFileError(BadPathError):
pass
class NotAPCFileError(BadPathError):
pass
class TargetTriple:
__slots__ = ("arch", "bitness", "os", "abi")
def __init__(self, arch = None, os = None, abi = None, bitness = None) -> None:
pythonTripl = sys.implementation._multiarch.split("-")
self.arch = arch if arch is not None else pythonTripl[0]
self.os = os if os is not None else pythonTripl[1]
self.abi = abi if abi is not None else pythonTripl[2]
self.bitness = bitness
def __str__(self) -> str:
return "-".join((self.arch, self.os, self.abi))
thisArchTriple = TargetTriple()
##############################################################################
# PkgSearcher object
class PkgSearcher:
def __init__(self, globals):
# This is a dictionary of packages found in the search path. Each
# package name is linked to a list of full paths to .pc files, in
# order of priority. Earlier in the list is preferred over later.
self._known_pkgs = {}
self.globals = globals
self._init_search_dirs()
def search_for_package(self, dep, globals):
"""Search for a package matching the given dependency specification
(name and version restriction). Raise PackageNotFoundError if no
matching package is found.
Returns a parsed package object.
"""
# Get a list of pc files matching the package name
if isfile(dep.name) and splitext(dep.name)[1] == '.pc':
# No need to search for a pc file
ErrorPrinter().debug_print('Using provided pc file %s', (dep.name))
pcfiles = [dep.name]
else:
ErrorPrinter().debug_print('Searching for package matching %s', (dep))
pcfiles = self.search_for_pcfile(dep.name)
ErrorPrinter().debug_print('Found .pc files: %s', (str(pcfiles)))
if not pcfiles:
raise PackageNotFoundError(str(dep))
# Filter the list by those files that meet the version specification
pkgs = []
for pcfile in pcfiles:
try:
pkgs.append(Package(pcfile, globals))
except IOError as e:
ErrorPrinter().verbose_error("Failed to open '{0}': \
{1}".format(pcfile, e.strerror))
continue
except UndefinedVarError as e:
raise UndefinedVarError(e.variable, pcfile)
if not pkgs and pcfiles:
# Raise an error indicating that all pc files we could try were
# unopenable. This is necessary to match pkg-config's odd lack of
# the standard "Package not found" error when a bad file is
# encountred.
raise NoOpenableFilesError(str(dep))
pkgs = [pkg for pkg in pkgs \
if dep.meets_requirement(pkg.properties['version'])]
ErrorPrinter().debug_print('Filtered to %s',
([pkg.properties['name'] for pkg in pkgs]))
if not pkgs:
raise PackageNotFoundError(str(dep))
return pkgs[0]
def search_for_pcfile(self, pkgname):
"""Search for one or more pkg-config files matching the given
package name. If a matching pkg-config file cannot be found,
an empty list will be returned.
The dictionary of known packages is stored in _known_pkgs and is
initialised by calling init_search_dirs().
"""
ErrorPrinter().debug_print('Looking for files matching %s', (pkgname))
if Options().get_option('prefer_uninstalled'):
if pkgname + '-uninstalled' in self._known_pkgs:
# Prefer uninstalled version of a package
ErrorPrinter().debug_print('Using uninstalled package %s',
(self._known_pkgs[pkgname + '-uninstalled']))
return self._known_pkgs[pkgname + '-uninstalled']
elif Options().get_option('uninstalled_only'):
ErrorPrinter().debug_print('Uninstalled only, no suitable package.')
return []
if pkgname in self._known_pkgs:
ErrorPrinter().debug_print('Using any package: %s',
(self._known_pkgs[pkgname]))
return self._known_pkgs[pkgname]
else:
ErrorPrinter().debug_print('No suitable package found')
return []
def known_packages_list(self):
"""Return a list of all packages found on the system, giving a name and
a description (from the .pc file) for each, and also a list of any
errors encountered.
"""
result = []
errors = []
for pkgname in self._known_pkgs:
# Use the highest-priority version of the package
try:
pkg = Package(self._known_pkgs[pkgname][0])
except IOError as e:
ErrorPrinter().verbose_error("Failed to open '{0}': \
{1}".format(self._known_pkgs[pkgname][0], e.strerror))
continue
except UndefinedVarError as e:
errors.append("Variable '{0}' not defined in '{1}'".format(e,
self._known_pkgs[pkgname][0]))
continue
result.append((pkgname, pkg.properties['name'], pkg.properties['description']))
return result, errors
def _init_search_dirs(self):
# Append dirs in PKG_CONFIG_PATH
if "config_path" in self.globals and self.globals["config_path"]:
for d in self.globals["config_path"]:
if not d or not isdir(d):
continue
self._append_packages(d)
# Append dirs in PKG_CONFIG_LIBDIR
if "config_libdir" in self.globals and self.globals["config_libdir"]:
for d in self.globals["config_libdir"]:
if not d or not isdir(d):
continue
self._append_packages(d)
if sys.platform == 'win32':
key_path = 'Software\\pkg-config\\PKG_CONFIG_PATH'
for root in ((_winreg.HKEY_CURRENT_USER, 'HKEY_CURRENT_USER'),
(_winreg.HKEY_LOCAL_MACHINE, 'HKEY_LOCAL_MACHINE')):
try:
key = _winreg.OpenKey(root[0], key_path)
except WindowsError as e:
ErrorPrinter().debug_print('Failed to add paths from \
{0}\\{1}: {2}'.format(root[1], key_path, e))
continue
try:
num_subkeys, num_vals, modified = _winreg.QueryInfoKey(key)
for ii in range(num_vals):
name, val, type = _winreg.EnumValue(key, ii)
if type == _winreg.REG_SZ and isdir(val):
self._append_packages(val)
except WindowsError as e:
ErrorPrinter().debug_print('Failed to add paths from \
{0}\\{1}: {2}'.format(root[1], key_path, e))
finally:
_winreg.CloseKey(key)
# Default path: If a hard-coded path has been set, use that (excluding
# paths that don't exist)
if "prefix" in self.globals:
prefix = self.globals["prefix"]
else:
prefix = sys.prefix
if pc_path:
for d in pc_path.split(self._split_char()):
if d and isdir(d):
self._append_packages(d)
# Default path: Else append prefix/lib/pkgconfig, prefix/share/pkgconfig
else:
if Options().get_option('is_64bit'):
suffix = '64'
else:
suffix = ''
dirs2check = (
join(prefix, 'lib' + suffix),
join(prefix, 'lib', str(thisArchTriple)),
join(prefix, 'share'),
join(prefix, "lib")
)
for d in dirs2check:
d = join(d, "pkgconfig")
if isdir(d):
self._append_packages(d)
def _append_packages(self, d):
ErrorPrinter().debug_print('Adding .pc files from %s to known packages',
(d))
files = listdir(d)
for filename in files:
if filename.endswith('.pc'):
# Test if the file can be opened (pkg-config glosses over,
# e.g. links that are now dead, as if they were never there).
full_path = join(d, filename)
name = filename[:-3]
if name in self._known_pkgs:
if full_path not in self._known_pkgs[name]:
self._known_pkgs[name].append(full_path)
ErrorPrinter().debug_print('Package %s has a duplicate file: %s',
(name, self._known_pkgs[name]))
else:
self._known_pkgs[name] = [full_path]
def _split_char(self):
# Get the character used to split a list of directories.
if sys.platform == 'win32':
return ';'
return ':'
def _can_open_file(self, filename):
try:
result = open(filename, 'r')
except IOError as e:
ErrorPrinter().debug_print('Could not open {0}'.format(filename))
search_string = Options().get_option('search_string').split()
if (not search_string and \
Options().get_option('command') == 'list-all') or \
True in [p.startswith(split(filename)[-1].split('.')[0]) \
for p in search_string]:
ErrorPrinter().verbose_error("Failed to open '{0}': {1}".format(filename,
e.strerror))
return False
return True
# vim: tw=79
|
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import unittest, json
import importlib
from frappe.modules import load_doctype_module, get_module_name
from frappe.utils import cstr
def main(app=None, module=None, doctype=None, verbose=False, tests=()):
frappe.flags.print_messages = verbose
frappe.flags.in_test = True
if not frappe.db:
frappe.connect()
if not frappe.conf.get("db_name").startswith("test_"):
raise Exception, 'db_name must start with "test_"'
# workaround! since there is no separate test db
frappe.clear_cache()
if verbose:
print 'Running "before_tests" hooks'
for fn in frappe.get_hooks("before_tests", app_name=app):
frappe.get_attr(fn)()
if doctype:
ret = run_tests_for_doctype(doctype, verbose=verbose, tests=tests)
elif module:
ret = run_tests_for_module(module, verbose=verbose, tests=tests)
else:
ret = run_all_tests(app, verbose)
frappe.db.commit()
# workaround! since there is no separate test db
frappe.clear_cache()
return ret
def run_all_tests(app=None, verbose=False):
import os
apps = [app] if app else frappe.get_installed_apps()
test_suite = unittest.TestSuite()
for app in apps:
for path, folders, files in os.walk(frappe.get_pymodule_path(app)):
for dontwalk in ('locals', '.git', 'public'):
if dontwalk in folders:
folders.remove(dontwalk)
# print path
for filename in files:
filename = cstr(filename)
if filename.startswith("test_") and filename.endswith(".py"):
# print filename[:-3]
_add_test(path, filename, verbose, test_suite=test_suite)
return unittest.TextTestRunner(verbosity=1+(verbose and 1 or 0)).run(test_suite)
def run_tests_for_doctype(doctype, verbose=False, tests=()):
module = frappe.db.get_value("DocType", doctype, "module")
test_module = get_module_name(doctype, module, "test_")
make_test_records(doctype, verbose=verbose)
module = frappe.get_module(test_module)
return _run_unittest(module, verbose=verbose, tests=tests)
def run_tests_for_module(module, verbose=False, tests=()):
module = importlib.import_module(module)
if hasattr(module, "test_dependencies"):
for doctype in module.test_dependencies:
make_test_records(doctype, verbose=verbose)
return _run_unittest(module=module, verbose=verbose, tests=tests)
def _run_unittest(module, verbose=False, tests=()):
test_suite = unittest.TestSuite()
module_test_cases = unittest.TestLoader().loadTestsFromModule(module)
if tests:
for each in module_test_cases:
for test_case in each.__dict__["_tests"]:
if test_case.__dict__["_testMethodName"] in tests:
test_suite.addTest(test_case)
else:
test_suite.addTest(module_test_cases)
return unittest.TextTestRunner(verbosity=1+(verbose and 1 or 0)).run(test_suite)
def _add_test(path, filename, verbose, test_suite=None):
import os, imp
if not test_suite:
test_suite = unittest.TestSuite()
if os.path.basename(os.path.dirname(path))=="doctype":
txt_file = os.path.join(path, filename[5:].replace(".py", ".json"))
with open(txt_file, 'r') as f:
doc = json.loads(f.read())
doctype = doc["name"]
make_test_records(doctype, verbose)
module = imp.load_source(filename[:-3], os.path.join(path, filename))
test_suite.addTest(unittest.TestLoader().loadTestsFromModule(module))
def make_test_records(doctype, verbose=0):
frappe.flags.mute_emails = True
if not frappe.db:
frappe.connect()
for options in get_dependencies(doctype):
if options == "[Select]":
continue
if options not in frappe.local.test_objects:
frappe.local.test_objects[options] = []
make_test_records(options, verbose)
make_test_records_for_doctype(options, verbose)
def get_modules(doctype):
module = frappe.db.get_value("DocType", doctype, "module")
try:
test_module = load_doctype_module(doctype, module, "test_")
if test_module:
reload(test_module)
except ImportError:
test_module = None
return module, test_module
def get_dependencies(doctype):
module, test_module = get_modules(doctype)
meta = frappe.get_meta(doctype)
link_fields = meta.get_link_fields()
for df in meta.get_table_fields():
link_fields.extend(frappe.get_meta(df.options).get_link_fields())
options_list = [df.options for df in link_fields] + [doctype]
if hasattr(test_module, "test_dependencies"):
options_list += test_module.test_dependencies
options_list = list(set(options_list))
if hasattr(test_module, "test_ignore"):
for doctype_name in test_module.test_ignore:
if doctype_name in options_list:
options_list.remove(doctype_name)
return options_list
def make_test_records_for_doctype(doctype, verbose=0):
module, test_module = get_modules(doctype)
if verbose:
print "Making for " + doctype
if hasattr(test_module, "_make_test_records"):
frappe.local.test_objects[doctype] += test_module._make_test_records(verbose)
elif hasattr(test_module, "test_records"):
frappe.local.test_objects[doctype] += make_test_objects(doctype, test_module.test_records, verbose)
else:
test_records = frappe.get_test_records(doctype)
if test_records:
frappe.local.test_objects[doctype] += make_test_objects(doctype, test_records, verbose)
elif verbose:
print_mandatory_fields(doctype)
def make_test_objects(doctype, test_records, verbose=None):
records = []
if not frappe.get_meta(doctype).issingle:
existing = frappe.get_list(doctype, filters={"name":("like", "_T-" + doctype + "-%")})
if existing:
return [d.name for d in existing]
existing = frappe.get_list(doctype, filters={"name":("like", "_Test " + doctype + "%")})
if existing:
return [d.name for d in existing]
for doc in test_records:
if not doc.get("doctype"):
doc["doctype"] = doctype
d = frappe.copy_doc(doc)
if doc.get('name'):
d.name = doc.get('name')
if frappe.local.test_objects.get(d.doctype):
# do not create test records, if already exists
return []
if d.meta.get_field("naming_series"):
if not d.naming_series:
d.naming_series = "_T-" + d.doctype + "-"
# submit if docstatus is set to 1 for test record
docstatus = d.docstatus
d.docstatus = 0
try:
d.insert()
if docstatus == 1:
d.submit()
except frappe.NameError:
pass
records.append(d.name)
frappe.db.commit()
return records
def print_mandatory_fields(doctype):
print "Please setup make_test_records for: " + doctype
print "-" * 60
meta = frappe.get_meta(doctype)
print "Autoname: " + (meta.autoname or "")
print "Mandatory Fields: "
for d in meta.get("fields", {"reqd":1}):
print d.parent + ":" + d.fieldname + " | " + d.fieldtype + " | " + (d.options or "")
print
|
|
import base64
import importlib
import json
import random
import re
import string
import unicodedata
from collections import OrderedDict
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.core.serializers.json import DjangoJSONEncoder
from django.core.validators import ValidationError, validate_email
from django.db.models import FieldDoesNotExist, FileField
from django.db.models.fields import (
BinaryField,
DateField,
DateTimeField,
EmailField,
TimeField,
)
from django.utils import dateparse, six
from django.utils.encoding import force_bytes, force_text
from django.utils.six.moves.urllib.parse import urlsplit
# Magic number 7: if you run into collisions with this number, then you are
# of big enough scale to start investing in a decent user model...
MAX_USERNAME_SUFFIX_LENGTH = 7
USERNAME_SUFFIX_CHARS = (
[string.digits] * 4 +
[string.ascii_letters] * (MAX_USERNAME_SUFFIX_LENGTH - 4))
def _generate_unique_username_base(txts, regex=None):
from .account.adapter import get_adapter
adapter = get_adapter()
username = None
regex = regex or r'[^\w\s@+.-]'
for txt in txts:
if not txt:
continue
username = unicodedata.normalize('NFKD', force_text(txt))
username = username.encode('ascii', 'ignore').decode('ascii')
username = force_text(re.sub(regex, '', username).lower())
# Django allows for '@' in usernames in order to accomodate for
# project wanting to use e-mail for username. In allauth we don't
# use this, we already have a proper place for putting e-mail
# addresses (EmailAddress), so let's not use the full e-mail
# address and only take the part leading up to the '@'.
username = username.split('@')[0]
username = username.strip()
username = re.sub(r'\s+', '_', username)
# Finally, validating base username without database lookups etc.
try:
username = adapter.clean_username(username, shallow=True)
break
except ValidationError:
pass
return username or 'user'
def get_username_max_length():
from .account.app_settings import USER_MODEL_USERNAME_FIELD
if USER_MODEL_USERNAME_FIELD is not None:
User = get_user_model()
max_length = User._meta.get_field(USER_MODEL_USERNAME_FIELD).max_length
else:
max_length = 0
return max_length
def generate_username_candidate(basename, suffix_length):
max_length = get_username_max_length()
suffix = ''.join(
random.choice(USERNAME_SUFFIX_CHARS[i])
for i in range(suffix_length))
return basename[0:max_length - len(suffix)] + suffix
def generate_username_candidates(basename):
ret = [basename]
max_suffix_length = min(
get_username_max_length(),
MAX_USERNAME_SUFFIX_LENGTH)
for suffix_length in range(2, max_suffix_length):
ret.append(generate_username_candidate(basename, suffix_length))
return ret
def generate_unique_username(txts, regex=None):
from .account.app_settings import USER_MODEL_USERNAME_FIELD
from .account.adapter import get_adapter
from allauth.account.utils import filter_users_by_username
adapter = get_adapter()
basename = _generate_unique_username_base(txts, regex)
candidates = generate_username_candidates(basename)
existing_usernames = filter_users_by_username(*candidates).values_list(
USER_MODEL_USERNAME_FIELD, flat=True)
existing_usernames = set([n.lower() for n in existing_usernames])
for candidate in candidates:
if candidate.lower() not in existing_usernames:
try:
return adapter.clean_username(candidate, shallow=True)
except ValidationError:
pass
# This really should not happen
raise NotImplementedError('Unable to find a unique username')
def valid_email_or_none(email):
ret = None
try:
if email:
validate_email(email)
if len(email) <= EmailField().max_length:
ret = email
except ValidationError:
pass
return ret
def email_address_exists(email, exclude_user=None):
from .account import app_settings as account_settings
from .account.models import EmailAddress
emailaddresses = EmailAddress.objects
if exclude_user:
emailaddresses = emailaddresses.exclude(user=exclude_user)
ret = emailaddresses.filter(email__iexact=email).exists()
if not ret:
email_field = account_settings.USER_MODEL_EMAIL_FIELD
if email_field:
users = get_user_model().objects
if exclude_user:
users = users.exclude(pk=exclude_user.pk)
ret = users.filter(**{email_field + '__iexact': email}).exists()
return ret
def import_attribute(path):
assert isinstance(path, six.string_types)
pkg, attr = path.rsplit('.', 1)
ret = getattr(importlib.import_module(pkg), attr)
return ret
def import_callable(path_or_callable):
if not hasattr(path_or_callable, '__call__'):
ret = import_attribute(path_or_callable)
else:
ret = path_or_callable
return ret
SERIALIZED_DB_FIELD_PREFIX = '_db_'
def serialize_instance(instance):
"""
Since Django 1.6 items added to the session are no longer pickled,
but JSON encoded by default. We are storing partially complete models
in the session (user, account, token, ...). We cannot use standard
Django serialization, as these are models are not "complete" yet.
Serialization will start complaining about missing relations et al.
"""
data = {}
for k, v in instance.__dict__.items():
if k.startswith('_') or callable(v):
continue
try:
field = instance._meta.get_field(k)
if isinstance(field, BinaryField):
v = force_text(base64.b64encode(v))
elif isinstance(field, FileField):
if v and not isinstance(v, six.string_types):
v = v.name
# Check if the field is serializable. If not, we'll fall back
# to serializing the DB values which should cover most use cases.
try:
json.dumps(v, cls=DjangoJSONEncoder)
except TypeError:
v = field.get_prep_value(v)
k = SERIALIZED_DB_FIELD_PREFIX + k
except FieldDoesNotExist:
pass
data[k] = v
return json.loads(json.dumps(data, cls=DjangoJSONEncoder))
def deserialize_instance(model, data):
ret = model()
for k, v in data.items():
is_db_value = False
if k.startswith(SERIALIZED_DB_FIELD_PREFIX):
k = k[len(SERIALIZED_DB_FIELD_PREFIX):]
is_db_value = True
if v is not None:
try:
f = model._meta.get_field(k)
if isinstance(f, DateTimeField):
v = dateparse.parse_datetime(v)
elif isinstance(f, TimeField):
v = dateparse.parse_time(v)
elif isinstance(f, DateField):
v = dateparse.parse_date(v)
elif isinstance(f, BinaryField):
v = force_bytes(
base64.b64decode(
force_bytes(v)))
elif is_db_value:
try:
# This is quite an ugly hack, but will cover most
# use cases...
v = f.from_db_value(v, None, None, None)
except Exception:
raise ImproperlyConfigured(
"Unable to auto serialize field '{}', custom"
" serialization override required".format(k)
)
except FieldDoesNotExist:
pass
setattr(ret, k, v)
return ret
def set_form_field_order(form, field_order):
"""
This function is a verbatim copy of django.forms.Form.order_fields() to
support field ordering below Django 1.9.
field_order is a list of field names specifying the order. Append fields
not included in the list in the default order for backward compatibility
with subclasses not overriding field_order. If field_order is None, keep
all fields in the order defined in the class. Ignore unknown fields in
field_order to allow disabling fields in form subclasses without
redefining ordering.
"""
if field_order is None:
return
fields = OrderedDict()
for key in field_order:
try:
fields[key] = form.fields.pop(key)
except KeyError: # ignore unknown fields
pass
fields.update(form.fields) # add remaining fields in original order
form.fields = fields
def build_absolute_uri(request, location, protocol=None):
"""request.build_absolute_uri() helper
Like request.build_absolute_uri, but gracefully handling
the case where request is None.
"""
from .account import app_settings as account_settings
if request is None:
site = Site.objects.get_current()
bits = urlsplit(location)
if not (bits.scheme and bits.netloc):
uri = '{proto}://{domain}{url}'.format(
proto=account_settings.DEFAULT_HTTP_PROTOCOL,
domain=site.domain,
url=location)
else:
uri = location
else:
uri = request.build_absolute_uri(location)
# NOTE: We only force a protocol if we are instructed to do so
# (via the `protocol` parameter, or, if the default is set to
# HTTPS. The latter keeps compatibility with the debatable use
# case of running your site under both HTTP and HTTPS, where one
# would want to make sure HTTPS links end up in password reset
# mails even while they were initiated on an HTTP password reset
# form.
if not protocol and account_settings.DEFAULT_HTTP_PROTOCOL == 'https':
protocol = account_settings.DEFAULT_HTTP_PROTOCOL
# (end NOTE)
if protocol:
uri = protocol + ':' + uri.partition(':')[2]
return uri
def get_form_class(forms, form_id, default_form):
form_class = forms.get(form_id, default_form)
if isinstance(form_class, six.string_types):
form_class = import_attribute(form_class)
return form_class
def get_request_param(request, param, default=None):
return request.POST.get(param) or request.GET.get(param, default)
|
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import inspect
import re
import textwrap
from builtins import object, range
from collections import OrderedDict, namedtuple
from pants.base.exceptions import TaskError
from pants.build_graph.target import Target
from pants.util.memo import memoized_method
class FunctionArg(namedtuple('_FunctionArg', ['name', 'description', 'has_default', 'default'])):
"""An argument to a function."""
pass
class BuildSymbolInfo(namedtuple('_BuildSymbolInfo',
['symbol', 'description', 'details_lines', 'args'])):
"""A container for help information about a symbol that can be used in a BUILD file.
symbol: The name of the symbol.
description: A single line of text providing a summary description.
details_lines: A list of lines of text providing further details (possibly empty).
args: A list of FunctionArg instances.
"""
def details(self):
return '\n'.join(self.details_lines)
class BuildDictionaryInfoExtracter(object):
"""Extracts help information about the symbols that may be used in BUILD files."""
ADD_DESCR = '<Add description>'
basic_target_args = [
FunctionArg('dependencies', '', True, []),
FunctionArg('description', '', True, None),
FunctionArg('name', '', False, None),
FunctionArg('no_cache', '', True, False),
FunctionArg('tags', '', True, None),
]
@classmethod
def get_description_from_docstring(cls, obj):
"""Returns a pair (description, details) from the obj's docstring.
description is a single line.
details is a list of subsequent lines, possibly empty.
"""
doc = obj.__doc__ or ''
p = doc.find('\n')
if p == -1:
return doc, []
else:
description = doc[:p]
details = textwrap.dedent(doc[p+1:]).splitlines()
# Remove leading and trailing empty lines.
while details and not details[0].strip():
details = details[1:]
while details and not details[-1].strip():
details.pop()
recording = True
details_without_params = []
for detail_line in details:
if ":param" in detail_line:
recording = False
if not detail_line.strip():
recording = True
if recording:
details_without_params.append(detail_line)
return description, details_without_params
@classmethod
@memoized_method
def _get_stanza_first_line_re(cls):
"""Returns a regex that can be used to find the first line of a stanza in a docstring.
The returned regex can be used to find the first line where there is not a data type
in the arg name (e.g., :param a:), where there is a data type in the arg name
(e.g., :param str a:), where there is a single word between the colons (e.g., :returns:),
and where a newline immediately follows the second colon in the stanza.
"""
return re.compile(':(\w+)\s*(\w+\s+)?(\w*):\s*(.*)')
@classmethod
@memoized_method
def _get_default_value_re(cls):
return re.compile(' \([Dd]efault: (.*)\)')
@classmethod
def get_arg_descriptions_from_docstring(cls, obj):
"""Returns an ordered map of arg name -> arg description found in :param: stanzas."""
ret = OrderedDict()
name = ''
doc = obj.__doc__ or ''
lines = [s.strip() for s in doc.split('\n')]
stanza_first_line_re = cls._get_stanza_first_line_re()
for line in lines:
m = stanza_first_line_re.match(line)
if m and m.group(1) == 'param':
# If first line of a parameter description, set name and description.
name, description = m.group(3, 4)
ret[name] = description
elif m and m.group(1) != 'param':
# If first line of a description of an item other than a parameter, clear name.
name = ''
elif name and line:
# If subsequent line of a parameter description, add to existing description (if any) for
# that parameter.
ret[name] += (' ' + line) if ret[name] else line
# Ignore subsequent lines of descriptions of items other than parameters.
return ret
@classmethod
def get_args_for_target_type(cls, target_type):
return list(cls._get_args_for_target_type(target_type))
@classmethod
def _get_args_for_target_type(cls, target_type):
args = {} # name: info.
# Target.__init__ has several args that are passed to it by TargetAddressable and not by
# the BUILD file author, so we can't naively inspect it. Instead we special-case its
# true BUILD-file-facing arguments here.
for arg in cls.basic_target_args:
args[arg.name] = arg # Don't yield yet; subclass might supply a better description.
# Non-BUILD-file-facing Target.__init__ args that some Target subclasses capture in their
# own __init__ for various reasons.
ignore_args = {'address', 'payload'}
# Now look at the MRO, in reverse (so we see the more 'common' args first).
# If we see info for an arg, it's more specific than whatever description we have so far,
# so clobber its entry in the args dict.
methods_seen = set() # Ensure we only look at each __init__ method once.
for _type in reversed([t for t in target_type.mro() if issubclass(t, Target)]):
if (inspect.ismethod(_type.__init__) and
_type.__init__ not in methods_seen and
_type.__init__ != Target.__init__):
for arg in cls._get_function_args(_type.__init__):
args[arg.name] = arg
methods_seen.add(_type.__init__)
for arg_name in sorted(args.keys()):
if not arg_name in ignore_args:
yield args[arg_name]
@classmethod
def get_function_args(cls, func):
"""Returns pairs (arg, default) for each argument of func, in declaration order.
Ignores *args, **kwargs. Ignores self for methods.
"""
return list(cls._get_function_args(func))
@classmethod
def _get_function_args(cls, func):
arg_descriptions = cls.get_arg_descriptions_from_docstring(func)
argspec = inspect.getargspec(func)
arg_names = argspec.args
if inspect.ismethod(func) or func.__name__ == '__new__':
arg_names = arg_names[1:]
num_defaulted_args = len(argspec.defaults) if argspec.defaults is not None else 0
first_defaulted_arg = len(arg_names) - num_defaulted_args
for i in range(0, first_defaulted_arg):
yield FunctionArg(arg_names[i], arg_descriptions.pop(arg_names[i], ''), False, None)
for i in range(first_defaulted_arg, len(arg_names)):
yield FunctionArg(arg_names[i], arg_descriptions.pop(arg_names[i], ''), True,
argspec.defaults[i - first_defaulted_arg])
if argspec.varargs:
yield FunctionArg('*{}'.format(argspec.varargs), arg_descriptions.pop(argspec.varargs, None),
False, None)
if argspec.keywords:
# Any remaining arg_descriptions are for kwargs.
for arg_name, descr in arg_descriptions.items():
# Get the default value out of the description, if present.
mo = cls._get_default_value_re().search(descr)
default_value = mo.group(1) if mo else None
descr_sans_default = '{}{}'.format(descr[:mo.start()], descr[mo.end():]) if mo else descr
yield FunctionArg(arg_name, descr_sans_default, True, default_value)
def __init__(self, buildfile_aliases):
self._buildfile_aliases = buildfile_aliases
def get_target_args(self, alias):
"""Returns a list of FunctionArgs for the specified target_type."""
target_types = list(self._buildfile_aliases.target_types_by_alias.get(alias))
if not target_types:
raise TaskError('No such target type: {}'.format(alias))
return self.get_args_for_target_type(target_types[0])
def get_object_args(self, alias):
obj_type = self._buildfile_aliases.objects.get(alias)
if not obj_type:
raise TaskError('No such object type: {}'.format(alias))
if inspect.isfunction(obj_type) or inspect.ismethod(obj_type):
return self.get_function_args(obj_type)
elif inspect.isclass(obj_type) and inspect.ismethod(obj_type.__init__):
return self.get_function_args(obj_type.__init__)
elif inspect.isclass(obj_type):
return self.get_function_args(obj_type.__new__)
elif hasattr(obj_type, '__call__'):
return self.get_function_args(obj_type.__call__)
else:
return []
def get_object_factory_args(self, alias):
obj_factory = self._buildfile_aliases.context_aware_object_factories.get(alias)
if not obj_factory:
raise TaskError('No such context aware object factory: {}'.format(alias))
return self.get_function_args(obj_factory.__call__)
def get_target_type_info(self):
"""Returns a sorted list of BuildSymbolInfo for all known target types."""
return sorted(self._get_target_type_info())
def _get_target_type_info(self):
for alias, target_type in self._buildfile_aliases.target_types.items():
description, details = self.get_description_from_docstring(target_type)
description = description or self.ADD_DESCR
yield BuildSymbolInfo(alias, description, details, self.get_target_args(alias))
for alias, target_macro_factory in self._buildfile_aliases.target_macro_factories.items():
# Take the description from the first target type we encounter that has one.
target_args = self.get_target_args(alias)
for target_type in target_macro_factory.target_types:
description, details = self.get_description_from_docstring(target_type)
if description:
yield BuildSymbolInfo(alias, description, details, target_args)
break
else:
yield BuildSymbolInfo(alias, self.ADD_DESCR, [], target_args)
def get_object_info(self):
return sorted(self._get_object_info())
def _get_object_info(self):
for alias, obj in self._buildfile_aliases.objects.items():
description, details = self.get_description_from_docstring(obj)
description = description or self.ADD_DESCR
yield BuildSymbolInfo(alias, description, details, self.get_object_args(alias))
def get_object_factory_info(self):
return sorted(self._get_object_factory_info())
def _get_object_factory_info(self):
for alias, factory_type in self._buildfile_aliases.context_aware_object_factories.items():
description, details = self.get_description_from_docstring(factory_type)
description = description or self.ADD_DESCR
yield BuildSymbolInfo(alias, description, details, self.get_object_factory_args(alias))
|
|
#!/usr/bin/python2.4
# encoding: utf-8
"""
scop.py
Functions for interacting with the SCOPe database.
Created by Shane O'Connor 2015.
"""
import sys, os
import pprint
import traceback
if __name__ == "__main__":
sys.path.insert(0, "../../")
from klab.db.mysql import DatabaseInterface
from klab import colortext
from klab.fs.fsio import read_file, write_file
from klab.bio.pfam import Pfam
from klab.bio.sifts import SIFTS
from klab.bio.pdb import PDB
installed_database = 'SCOPe205' # rename this to the new database name on updates
installed_database_version = '2.05' # rename this to the new database version on updates
class SCOPeTableCollection(object):
def __init__(self, SCOPe_database):
self.SCOPe_database = SCOPe_database
self.pdb_table = []
self.pfam_table = []
def __add__(self, other):
new_t = SCOPeTableCollection(self.SCOPe_database)
new_t.pdb_table = self.pdb_table + other.pdb_table
new_t.pfam_table = self.pfam_table + other.pfam_table
return new_t
def add_pdb_line(self, details):
if details:
self.pdb_table.append([str(details[f] or '') for f in self.SCOPe_database.pdb_csv_fields])
def add_pfam_line(self, details):
if details:
self.pfam_table.append([str(details[f] or '') for f in self.SCOPe_database.pfam_csv_fields])
def get_csv_tables(self, field_separator = '\t', line_separator = '\n'):
d = dict.fromkeys(['PDB', 'Pfam'], None)
if self.pfam_table:
d['Pfam'] = line_separator.join([field_separator.join(l) for l in [self.SCOPe_database.pfam_csv_headers] + self.pfam_table])
if self.pdb_table:
d['PDB'] = line_separator.join([field_separator.join(l) for l in [self.SCOPe_database.pdb_csv_headers] + self.pdb_table])
return d
def get_tables(self):
d = dict.fromkeys(['PDB', 'Pfam'], None)
if self.pfam_table:
d['Pfam'] = [self.SCOPe_database.pfam_csv_headers] + self.pfam_table
if self.pdb_table:
d['PDB'] = [self.SCOPe_database.pdb_csv_headers] + self.pdb_table
return d
class SCOPeDatabase(DatabaseInterface):
def __init__(self, passwd = None, username = 'anonymous', use_utf=False, fallback_on_failures = True, cache_dir = '/kortemmelab/data/oconchus/SIFTS'):
super(SCOPeDatabase, self).__init__({},
isInnoDB = True,
numTries = 32,
host = "guybrush.ucsf.edu",
db = installed_database,
user = username,
passwd = None,
port = 3306,
unix_socket = "/var/lib/mysql/mysql.sock",
use_utf = use_utf)
self.cache_dir = cache_dir
self.fallback_on_failures = fallback_on_failures
self.levels = self.get_SCOPe_levels()
del self.levels[1] # remove the root level
level_names = [v for k, v in sorted(self.levels.items()) if k != 1] # skip the root level
search_fields = ['SCOPe_sources', 'SCOPe_search_fields', 'SCOPe_trust_level']
search_headers = ['SCOPe sources', 'Search fields', 'Trustiness']
self.pfam_api = None
self.SIFTS = {}
# Set up CSV fields
self.pdb_csv_fields = [
'pdb_id', 'chain', 'is_polypeptide', 'chain_description', 'resolution', 'pdbe_residue_range',
'sunid', 'sccs', 'sid']
self.pdb_csv_headers = [
'PDB id', 'Chain', 'Is polypeptide', 'Description', 'Resolution', 'PDBe residues',
'sunid', 'sccs', 'sid']
self.pdb_csv_fields += level_names + search_fields
self.pdb_csv_headers += level_names + search_headers
self.pfam_csv_fields = [
'pfam_accession', 'pfam_name', 'pfam_description', 'pfam_type_description', 'pfam_length',
'sunid', 'sccs', 'sid', 'SCOPe_sources', 'SCOPe_search_fields']
self.pfam_csv_headers = [
'Pfam accession', 'Name', 'Description', 'Type', 'Length',
'sunid', 'sccs', 'sid', 'SCOPe sources', 'Search fields']
self.pfam_csv_fields += level_names[:4] + search_fields
self.pfam_csv_headers += level_names[:4] + search_headers
assert(len(self.pdb_csv_fields) == len(self.pdb_csv_headers))
assert(len(self.pfam_csv_fields) == len(self.pfam_csv_headers))
def get_SCOPe_levels(self):
d = {}
results = self.execute_select('SELECT * FROM scop_level ORDER BY id')
for r in results:
d[r['id']] = r['description']
return d
def get_sifts(self, pdb_id, fail_on_error = False, require_uniprot_residue_mapping = False):
try:
pdb_id = pdb_id.lower()
if self.SIFTS.get(pdb_id):
return self.SIFTS[pdb_id]
self.SIFTS[pdb_id] = SIFTS.retrieve(pdb_id, cache_dir = self.cache_dir, acceptable_sequence_percentage_match = 70.0, require_uniprot_residue_mapping = require_uniprot_residue_mapping)
return self.SIFTS[pdb_id]
except Exception as e:
colortext.error('An exception happened retrieving the SIFTS file for %s: "%s". Ignoring this exception and continuing on...' % (pdb_id, str(e)))
colortext.error(traceback.format_exc())
if fail_on_error:
raise
return None
def get_pfam_api(self):
if not(self.pfam_api):
self.pfam_api = Pfam()
return self.pfam_api
def get_basic_pdb_chain_information(self, pdb_id, chain_id):
is_polypeptide, chain_description, resolution = None, None, None
results = self.execute_select('''
SELECT DISTINCT pdb_entry.code, pdb_chain.chain, pdb_chain.is_polypeptide, pdb_entry.description AS ChainDescription, pdb_release.resolution
FROM pdb_chain
INNER JOIN pdb_release ON pdb_release_id = pdb_release.id
INNER JOIN pdb_entry ON pdb_entry_id = pdb_entry.id
WHERE pdb_entry.code=%s AND pdb_chain.chain=%s
ORDER BY pdb_release.revision_date DESC''', parameters = (pdb_id, chain_id))
if results:
is_polypeptide = results[0]['is_polypeptide']
chain_description = results[0]['ChainDescription']
resolution = results[0]['resolution']
return dict(
pdb_id = pdb_id,
chain = chain_id,
is_polypeptide = is_polypeptide,
chain_description = chain_description,
resolution = resolution)
def get_common_fields(self, family_details):
# Get the common SCOPe fields. For the sccs class, we take the longest common prefix
sunid = set([f['sunid'] for f in family_details if f['sunid']]) or None
sccs = set([f['sccs'] for f in family_details if f['sccs']]) or None
sid = set([f['sid'] for f in family_details if f['sid']]) or None
scop_release_id = set([f['scop_release_id'] for f in family_details if f['scop_release_id']]) or None
if sunid:
if len(sunid) > 1:
sunid = None
else:
sunid = sunid.pop()
if sccs:
# take the longest common prefix
sccs = os.path.commonprefix(sccs) or None
if sccs and sccs.endswith('.'):
sccs = sccs[:-1]
if sid:
if len(sid) > 1:
sid = None
else:
sid = sid.pop()
if scop_release_id:
if len(scop_release_id) > 1:
scop_release_id = None
else:
scop_release_id = scop_release_id.pop()
return dict(
sunid = sunid,
sccs = sccs,
sid = sid,
scop_release_id = scop_release_id,
)
def get_common_hierarchy(self, family_details):
d = {}
level = 2
while level < 9:
classification_level = self.levels[level]
family_values = set([f[classification_level] for f in family_details]) # allow null fields - if we get a filled in field for one Pfam accession number and a null field for another then we should discount this field entirely and break out
if len(family_values) == 1:
family_value = family_values.pop()
if family_value == None:
break
else:
d[classification_level] = family_value
else:
break
level += 1
return d
def get_chain_details_by_related_pdb_chains(self, pdb_id, chain_id, pfam_accs):
''' Returns a dict of SCOPe details using info
This returns Pfam-level information for a PDB chain i.e. no details on the protein, species, or domain will be returned.
If there are SCOPe entries for the associated Pfam accession numbers which agree then this function returns
pretty complete information.
'''
if not pfam_accs:
return None
associated_pdb_chains = set()
pfam_api = self.get_pfam_api()
for pfam_acc in pfam_accs:
associated_pdb_chains = associated_pdb_chains.union(pfam_api.get_pdb_chains_from_pfam_accession_number(pfam_acc))
hits = []
#class_count = {}
pfam_scop_mapping = {}
for pdb_chain_pair in associated_pdb_chains:
ass_pdb_id, ass_chain_id = pdb_chain_pair[0], pdb_chain_pair[1]
hit = self.get_chain_details(ass_pdb_id, chain = ass_chain_id, internal_function_call = True, pfam_scop_mapping = pfam_scop_mapping)
if hit and hit.get('chains'):
assert(len(hit['chains']) == 1)
hits.append(hit['chains'][ass_chain_id])
#for k, v in hit.iteritems():
#class_count[v['sccs']] = class_count.get(v['sccs'], 0)
#class_count[v['sccs']] += 1
#print(' %s, %s: %s' % (v['pdb_id'], k, v['sccs']))
#pprint.pprint(class_count)
allowed_scop_domains = list(map(int, list(map(set.intersection, list(pfam_scop_mapping.values())))[0]))
allowed_scop_domains = list(set((allowed_scop_domains or []) + (self.get_sunid_for_pfam_accs(pfam_accs) or [])))
filtered_hits = []
print(pfam_accs)
print(allowed_scop_domains)
print(('%d hits' % len(hits)))
for hit in hits:
domains_to_ignore = []
for k, v in hit['domains'].items():
if v['sunid'] in allowed_scop_domains:
filtered_hits.append(v)
print(('%d filtered_hits' % len(filtered_hits)))
if not filtered_hits:
return None
d = self.get_basic_pdb_chain_information(pdb_id, chain_id)
d.update(self.get_common_fields(filtered_hits))
d.update(dict(
SCOPe_sources = 'Pfam + SCOPe',
SCOPe_search_fields = 'Pfam + link_pdb.pdb_chain_id',
SCOPe_trust_level = 3
))
# Add the lowest common classification over all related Pfam families
for k, v in sorted(self.levels.items()):
d[v] = None
d.update(dict(self.get_common_hierarchy(filtered_hits)))
return d
def get_chain_details_by_pfam(self, pdb_id, chain = None):
''' Returns a dict pdb_id -> chain(s) -> chain and SCOPe details.
This returns Pfam-level information for a PDB chain i.e. no details on the protein, species, or domain will be returned.
If there are SCOPe entries for the associated Pfam accession numbers which agree then this function returns
pretty complete information.
'''
pfam_api = self.get_pfam_api()
if chain:
pfam_accs = pfam_api.get_pfam_accession_numbers_from_pdb_chain(pdb_id, chain)
if pfam_accs:
pfam_accs = {chain : pfam_accs}
else:
pfam_accs = pfam_api.get_pfam_accession_numbers_from_pdb_id(pdb_id)
if not pfam_accs:
# There were no associated Pfam accession numbers so we return
return None
d = {}
for chain_id, pfam_acc_set in pfam_accs.items():
family_details = []
for pfam_accession in pfam_acc_set:
family_details.append(self.get_pfam_details(pfam_accession))
family_details = [f for f in family_details if f]
if not family_details:
if self.fallback_on_failures:
# Fallback - There were no associated SCOPe entries with the associated Pfam accession numbers so we will
# search all PDB chains associated with those Pfam accession numbers instead
d[chain_id] = self.get_chain_details_by_related_pdb_chains(pdb_id, chain_id, pfam_accs.get(chain_id))
else:
d[chain_id] = None
continue
# Get the common SCOPe fields. For the sccs class, we take the longest common prefix
d[chain_id] = self.get_basic_pdb_chain_information(pdb_id, chain_id)
d[chain_id].update(self.get_common_fields(family_details))
d[chain_id].update(dict(
SCOPe_sources = 'Pfam + SCOPe',
SCOPe_search_fields = 'Pfam + link_pfam.pfam_accession',
SCOPe_trust_level = 2
))
# Add the lowest common classification over all related Pfam families
for k, v in sorted(self.levels.items()):
d[chain_id][v] = None
d[chain_id].update(dict(self.get_common_hierarchy(family_details)))
return d
def get_list_of_pdb_chains(self, pdb_id):
results = self.execute_select('''
SELECT DISTINCT pdb_chain.chain, pdb_release.id as release_id
FROM pdb_chain
INNER JOIN pdb_release ON pdb_release_id = pdb_release.id
INNER JOIN pdb_entry ON pdb_entry_id = pdb_entry.id
WHERE pdb_entry.code=%s''', parameters = (pdb_id,))
if results:
max_release_id = max([r['release_id'] for r in results])
return set([r['chain'] for r in results if r['release_id'] == max_release_id])
return None
def get_chain_details(self, pdb_id, chain = None, internal_function_call = False, pfam_scop_mapping = {}):
''' Returns a dict pdb_id -> chain(s) -> chain and SCOPe details.
This is the main function for getting details for a PDB chain. If there is an associated SCOPe entry for this
chain then this function returns the most information.
internal_function_call is used to prevent potential infinite loops
'''
query = '''
SELECT DISTINCT scop_node.id AS scop_node_id, scop_node.*, pdb_entry.code, pdb_chain_id, pdb_chain.chain, pdb_chain.is_polypeptide, pdb_entry.description AS ChainDescription, pdb_release.resolution
FROM `link_pdb`
INNER JOIN scop_node on node_id=scop_node.id
INNER JOIN pdb_chain ON pdb_chain_id = pdb_chain.id
INNER JOIN pdb_release ON pdb_release_id = pdb_release.id
INNER JOIN pdb_entry ON pdb_entry_id = pdb_entry.id
WHERE pdb_entry.code=%s'''
if chain:
query += ' AND pdb_chain.chain=%s'
parameters=(pdb_id, chain)
else:
parameters = (pdb_id, )
query += ' ORDER BY release_id DESC'
results = self.execute_select(query, parameters = parameters)
if not results:
if self.fallback_on_failures and not internal_function_call:
# Fallback - use any Pfam accession numbers associated with the chain to get partial information
# Note: this fallback has another fallback in case none of the Pfam entries exist in SCOPe
searched_deeper = True
return self.get_chain_details_by_pfam(pdb_id, chain)
else:
return None
# I am making the assumption here that sids are consistent through releases i.e. that if d1aqt_1 is used in release
# 3 then it will be used for any other releases where the domain is named
sid_map = {}
for r in results:
sid = r['sid']
c_id = r['chain']
if not(sid_map.get(sid)) or sid_map[sid] == ' ':
sid_map[sid] = c_id
chain_to_sid_map = {}
for k, v in sid_map.items():
chain_to_sid_map[v] = chain_to_sid_map.get(v, set())
chain_to_sid_map[v].add(k)
leaf_node_chains = set()
searched_deeper = False
if pdb_id and chain:
leaf_node_chains.add(chain)
else:
pdb_chain_ids = self.get_list_of_pdb_chains(pdb_id)
if pdb_chain_ids:
leaf_node_chains = pdb_chain_ids
else:
return None
leaf_nodes = {}
for c in leaf_node_chains:
if c in chain_to_sid_map:
for sid in chain_to_sid_map[c]:
leaf_nodes[(c, sid)] = None
# Only consider the most recent records
for r in results:
chain_id = r['chain']
sid = r['sid']
k = (chain_id, sid)
if (not leaf_nodes.get(k)) or (r['release_id'] > leaf_nodes[k]['release_id']):
leaf_nodes[k] = r
# Older revisions of SCOPe have blank chain IDs for some records while newer revisions have the chain ID
# The best solution to avoid redundant results seems to be to remove all blank chain records if at least one
# more recent named chain exists. There could be some nasty cases - we only keep the most recent unnamed chain
# but this may correspond to many chains if the PDB has multiple chains since we only look at the chain ID.
# I think that it should be *unlikely* that we will have much if any bad behavior though.
for k1, v2 in leaf_nodes.items():
if k1[0] == ' ':
release_id_of_blank_record = leaf_nodes[k1]['release_id']
for k2, v2 in leaf_nodes.items():
if k2[0] != ' ':
assert(k2[0].isalpha() and len(k2[0]) == 1)
if v2['release_id'] > release_id_of_blank_record:
del leaf_nodes[k1] # we are modifying a structure while iterating over it but we break immediately afterwards
break
d = {}
for chain_sid_pair, details in leaf_nodes.items():
chain_id = chain_sid_pair[0]
sid = chain_sid_pair[1]
if sid.strip() == '':
colortext.warning('FOUND AN EMPTY SID FIELD')
assert(sid == details['sid'])
# Get the details for all chains
if details:
if d.get('resolution'):
assert(d['resolution'] == details['resolution'])
else:
d['resolution'] = details['resolution']
d['chains'] = d.get('chains', {})
if d['chains'].get(chain_id):
assert(d['chains'][chain_id]['is_polypeptide'] == details['is_polypeptide'])
assert(d['chains'][chain_id]['chain_description'] == details['ChainDescription'])
else:
d['chains'][chain_id] = {}
d['chains'][chain_id]['is_polypeptide'] = details['is_polypeptide']
d['chains'][chain_id]['chain_description'] = details['ChainDescription']
d['chains'][chain_id]['domains'] = d['chains'][chain_id].get('domains', {})
domain_information = dict(
#pdb_id = details['code'],
#chain = details['chain'],
#is_polypeptide = details['is_polypeptide'],
#chain_description = details['ChainDescription'],
sunid = details['sunid'],
sccs = details['sccs'],
sid = details['sid'],
scop_release_id = details['release_id'],
SCOPe_sources = 'SCOPe',
SCOPe_search_fields = 'link_pdb.pdb_chain_id',
SCOPe_trust_level = 1
)
for k, v in sorted(self.levels.items()):
domain_information[v] = None
pfam = None
level, parent_node_id = details['level_id'], details['parent_node_id']
pfam = pfam or self.get_pfam_for_node(details['scop_node_id'])
# Store the top-level description
domain_information[self.levels[level]] = details['description']
# Wind up the level hierarchy and retrieve the descriptions
c = 0
while level > 2:
parent_details = self.execute_select('SELECT * FROM scop_node WHERE id=%s', parameters = (parent_node_id,))
assert(len(parent_details) <= 1)
if parent_details:
parent_details = parent_details[0]
level, parent_node_id = parent_details['level_id'], parent_details['parent_node_id']
pfam = pfam or self.get_pfam_for_node(parent_details['id'])
domain_information[self.levels[level]] = parent_details['description']
else:
break
# This should never trigger but just in case...
c += 1
if c > 20:
raise Exception('There is a logical error in the script or database which may result in an infinite lookup loop.')
domain_information['Pfam'] = pfam
# Fill in the residue range data
domain_information['pdbe_residue_range'] = None
sifts_object = self.get_sifts(pdb_id)
if sifts_object:
colortext.message(pdb_id)
region_mapping = sifts_object.region_mapping
ps_map = sifts_object.pfam_scop_mapping or {}
for k, v in ps_map.items():
pfam_scop_mapping[k] = pfam_scop_mapping.get(k, set())
pfam_scop_mapping[k] = pfam_scop_mapping[k].union(v.get_matches('SCOP'))
residue_ranges = region_mapping.get(chain_id, {}).get('SCOP', {}).get(str(details['sunid']))
if residue_ranges:
residue_ranges = ', '.join(['%d-%d' % (t[0], t[1]) for t in residue_ranges])
domain_information['pdbe_residue_range'] = residue_ranges
d['chains'][chain_id]['domains'][sid] = domain_information
else:
if self.fallback_on_failures and not(internal_function_call) and not(searched_deeper):
fallback_results = self.get_chain_details_by_pfam(pdb_id, chain_id)
if fallback_results and fallback_results.get(chain_id):
domain_information = fallback_results[chain_id]
return d
def get_pfam_for_node(self, scop_node_id):
results = self.execute_select('SELECT pfam_accession FROM link_pfam WHERE node_id=%s', parameters = (scop_node_id,))
if results:
return results[0]['pfam_accession']
return None
def get_sunid_for_pfam_accs(self, pfam_accs):
sunids = set()
for pfam_acc in pfam_accs:
results = self.execute_select('SELECT scop_node.sunid FROM link_pfam INNER JOIN scop_node ON node_id=scop_node.id WHERE pfam_accession=%s', parameters = (pfam_acc,))
if results:
sunids.add(results[0]['sunid'])
return list(sunids) or None
def get_pdb_list_details(self, pdb_ids):
d = {}
for pdb_id in pdb_ids:
results = self.get_chain_details(pdb_id)
d[pdb_id] = results
return d
def get_pdb_list_details_as_table(self, pdb_ids):
t = SCOPeTableCollection(self)
d = self.get_pdb_list_details(list(set(pdb_ids)))
failed_pdb_ids = []
if d:
for pdb_id, pdb_details in sorted(d.items()):
if pdb_details:
for chain_id, chain_details in sorted(pdb_details.items()):
t.add_pdb_line(chain_details)
else:
failed_pdb_ids.append(pdb_ids)
return t
def get_pdb_list_details_as_csv(self, pdb_ids, field_separator = '\t', line_separator = '\n'):
return self.get_details_as_csv(self.get_pdb_list_details_as_table(pdb_ids), field_separator = field_separator, line_separator = line_separator)
def get_details_as_csv(self, tbl, field_separator = '\t', line_separator = '\n'):
return tbl.get_csv_tables(field_separator, line_separator)
def get_pfam_details(self, pfam_accession):
'''Returns a dict pdb_id -> chain(s) -> chain and SCOPe details.'''
results = self.execute_select('''
SELECT DISTINCT scop_node.*, scop_node.release_id AS scop_node_release_id,
pfam.release_id AS pfam_release_id, pfam.name AS pfam_name, pfam.accession, pfam.description AS pfam_description, pfam.length AS pfam_length,
pfam_type.description AS pfam_type_description
FROM `link_pfam`
INNER JOIN scop_node on node_id=scop_node.id
INNER JOIN pfam ON link_pfam.pfam_accession = pfam.accession
INNER JOIN pfam_type ON pfam.pfam_type_id = pfam_type.id
WHERE pfam.accession=%s ORDER BY scop_node.release_id DESC''', parameters = (pfam_accession,))
if not results:
return None
# Only consider the most recent Pfam releases and most recent SCOPe records, giving priority to SCOPe revisions over Pfam revisions
most_recent_record = None
for r in results:
accession = r['accession']
if (not most_recent_record) or (r['scop_node_release_id'] > most_recent_record['scop_node_release_id']):
most_recent_record = r
elif r['pfam_release_id'] > most_recent_record['pfam_release_id']:
most_recent_record = r
d = dict(
pfam_accession = most_recent_record['accession'],
pfam_name = most_recent_record['pfam_name'],
pfam_description = most_recent_record['pfam_description'],
pfam_type_description = most_recent_record['pfam_type_description'],
pfam_length = most_recent_record['pfam_length'],
pfam_release_id = most_recent_record['pfam_release_id'],
sunid = most_recent_record['sunid'],
sccs = most_recent_record['sccs'],
sid = most_recent_record['sid'],
scop_release_id = most_recent_record['scop_node_release_id'],
SCOPe_sources = 'SCOPe',
SCOPe_search_fields = 'link_pfam.pfam_accession',
SCOPe_trust_level = 1
)
for k, v in sorted(self.levels.items()):
d[v] = None
level, parent_node_id = most_recent_record['level_id'], most_recent_record['parent_node_id']
# Store the top-level description
d[self.levels[level]] = most_recent_record['description']
# Wind up the level hierarchy and retrieve the descriptions
c = 0
while level > 2 :
parent_details = self.execute_select('SELECT * FROM scop_node WHERE id=%s', parameters = (parent_node_id,))
assert(len(parent_details) <= 1)
if parent_details:
parent_details = parent_details[0]
level, parent_node_id = parent_details['level_id'], parent_details['parent_node_id']
d[self.levels[level]] = parent_details['description']
else:
break
# This should never trigger but just in case...
c += 1
if c > 20:
raise Exception('There is a logical error in the script or database which may result in an infinite lookup loop.')
assert(d['Protein'] == d['Species'] == d['PDB Entry Domain'] == None)
return d
def get_pfam_list_details(self, pfam_accs):
d = {}
for pfam_accession in pfam_accs:
results = self.get_pfam_details(pfam_accession)
d[pfam_accession] = results
return d
def get_pfam_list_details_as_table(self, pfam_accs):
t = SCOPeTableCollection(self)
d = self.get_pfam_list_details(pfam_accs)
if d:
for pfam_accession, pfam_details in sorted(d.items()):
if pfam_details:
t.add_pfam_line(pfam_details)
return t
def get_pfam_list_details_as_csv(self, pfam_accs, field_separator = '\t', line_separator = '\n'):
#, field_separator = '\t', line_separator = '\n'):
return self.get_details_as_csv(self.get_pfam_list_details_as_table(pfam_accs), field_separator = field_separator, line_separator = line_separator)
def determine_SCOPe_class_of_pdb_residue(self, pdb_id, pdb_chain_id, pdb_residue_id):
sifts_object = self.get_sifts(pdb_id, fail_on_error = True, require_uniprot_residue_mapping = False)
scop_class = None
if sifts_object:
PDBeResidueID = sifts_object.atom_to_seqres_sequence_maps.get(pdb_chain_id, {}).get(PDB.ChainResidueID2String(pdb_chain_id, pdb_residue_id))
if PDBeResidueID:
scop_hits = set()
scop_regions = sifts_object.region_mapping.get(pdb_chain_id, {}).get('SCOP')
if scop_regions:
for sunid, ranges in scop_regions.items():
for r in ranges:
assert(r[0] <= r[1])
if r[0] <= PDBeResidueID <= r[1]:
scop_hits.add(sunid)
if scop_hits:
assert(len(scop_hits) == 1)
scop_class = self.execute_select('SELECT sccs FROM scop_node WHERE sunid=%s', parameters = (scop_hits.pop(),))
if scop_class:
scop_class = scop_class[0]['sccs']
return scop_class
def determine_Pfam_class_of_pdb_residue(self, pdb_id, pdb_chain_id, pdb_residue_id):
sifts_object = self.get_sifts(pdb_id, fail_on_error = True, require_uniprot_residue_mapping = False)
pfam_accs = []
if sifts_object:
PDBeResidueID = sifts_object.atom_to_seqres_sequence_maps.get(pdb_chain_id, {}).get(PDB.ChainResidueID2String(pdb_chain_id, pdb_residue_id))
if PDBeResidueID:
pfam_hits = set()
pfam_regions = sifts_object.region_mapping.get(pdb_chain_id, {}).get('Pfam')
if pfam_regions:
for pfam_acc, ranges in pfam_regions.items():
for r in ranges:
assert(r[0] <= r[1])
if r[0] <= PDBeResidueID <= r[1]:
pfam_hits.add(pfam_acc)
if pfam_hits:
pfam_accs = sorted(pfam_hits)
return pfam_accs
def __pick_cases_for_manual_inspection():
import json
import random
scopdb = SCOPeDatabase()
datasets = [
'/kortemmelab/shared/benchmarks/ddg/input/json/potapov.json',
'/kortemmelab/shared/benchmarks/ddg/input/json/kellogg.json',
'/kortemmelab/shared/benchmarks/ddg/input/json/alascan-gpk.json',
'/kortemmelab/shared/benchmarks/ddg/input/json/curatedprotherm.json',
'/kortemmelab/shared/benchmarks/ddg/input/json/guerois.json'
]
for dataset in datasets:
data = json.loads(read_file(dataset))['data']
c = 1
random.seed(1357986420)
random_checks = [random.randint(0, len(data) - 1) for i in range(5)]
for n in random_checks:
d = data[n]
for m in d['Mutations']:
if dataset == '/kortemmelab/shared/benchmarks/ddg/input/json/curatedprotherm.json':
if c in [2338, 2339, 2385, 2435, 2436, 2795, 2796, 2812, 2813, 2814]:
continue
if d['PDBFileID'] == '2IMM':
continue
s = 'Mutation #%d: %s, chain %s, residue %s - ' % (c, d['PDBFileID'], m['Chain'], m['ResidueID'])
try:
sys.stdout.flush()
sccs = scopdb.determine_SCOPe_class_of_pdb_residue(d['PDBFileID'], m['Chain'], m['ResidueID'])
pfam_accs = scopdb.determine_Pfam_class_of_pdb_residue(d['PDBFileID'], m['Chain'], m['ResidueID'])
if sccs:
s += colortext.make(' - %s' % sccs, 'cyan')
else:
s += colortext.make(' - %s' % sccs, 'yellow')
if pfam_accs:
s += colortext.make(', %s' % ', '.join(pfam_accs), 'green')
print(s)
except Exception as e:
print(s)
colortext.error(str(e))
colortext.error(traceback.format_exc())
c += 1
def __generate_benchmark_data():
import json
headers = 'Pfam ID,Pfam Name,Total # of sequences,PDB ID,Amino acid length,SCOP class'.split(',')
lines = [l for l in read_file('/kortemmelab/shared/benchmarks/covariation/input/domains.csv').split('\n') if l.strip() and not(l.startswith('#'))]
dataset_json = {
"information": "\nThis dataset was taken from Table 1 in the Ollikainen and Kortemme paper [1], doi: 10.1371/journal.pcbi.1003313.",
"references": {
"1": "PMID:24244128"
},
"version": "This dataset was last updated on 2015-04-07.",
"domains": []
}
for l in lines:
tokens = l.split(',')
assert(len(tokens) == len(headers))
d = {}
for x in range(len(headers)):
d[headers[x]] = tokens[x]
dataset_json["domains"].append(d)
print((json.dumps(dataset_json, indent=4, sort_keys=True)))
data = write_file('/kortemmelab/shared/benchmarks/covariation/input/domains.json', json.dumps(dataset_json, indent=4, sort_keys=True))
sys.exit(0)
scopdb = SCOPeDatabase()
datasets = [
'/kortemmelab/shared/benchmarks/ddg/input/json/potapov.json',
'/kortemmelab/shared/benchmarks/ddg/input/json/kellogg.json',
'/kortemmelab/shared/benchmarks/ddg/input/json/alascan-gpk.json',
'/kortemmelab/shared/benchmarks/ddg/input/json/curatedprotherm.json',
'/kortemmelab/shared/benchmarks/ddg/input/json/guerois.json'
]
for dataset in datasets:
dataset_json = json.loads(read_file(dataset))
data = dataset_json['data']
c = 1
skipped = 0
scss_breakdown = dict(
a = ['All alpha proteins', 0],
b = ['All beta proteins', 0],
c = ['Alpha and beta proteins (a/b)', 0],
d = ['Alpha and beta proteins (a+b)', 0],
e = ['Multi-domain proteins (alpha and beta)', 0],
f = ['Membrane and cell surface proteins and peptides', 0],
g = ['Small proteins', 0],
h = ['Coiled-coil proteins', 0],
i = ['Low resolution protein structures', 0],
j = ['Peptides', 0],
k = ['Designed proteins', 0]
)
for d in data:
for m in d['Mutations']:
s = 'Mutation #%d: %s, chain %s, residue %s - ' % (c, d['PDBFileID'], m['Chain'], m['ResidueID'])
try:
sys.stdout.flush()
sccs = scopdb.determine_SCOPe_class_of_pdb_residue(d['PDBFileID'], m['Chain'], m['ResidueID'])
pfam_accs = scopdb.determine_Pfam_class_of_pdb_residue(d['PDBFileID'], m['Chain'], m['ResidueID'])
if sccs:
s += colortext.make(' - %s' % sccs, 'cyan')
top_class = sccs.split('.')[0]
scss_breakdown[top_class][1] += 1
else:
s += colortext.make(' - %s' % sccs, 'yellow')
if pfam_accs:
s += colortext.make(', %s' % ', '.join(pfam_accs), 'green')
m['SCOP class'] = sccs
if pfam_accs:
m['Pfam domains'] = ', '.join(pfam_accs)
else:
m['Pfam domains'] = None
print(s)
except Exception as e:
print(s)
colortext.error(str(e))
colortext.error(traceback.format_exc())
raise
c += 1
print((len(data)))
keys_to_delete = [k for k in list(scss_breakdown.keys()) if scss_breakdown[k][1] == 0]
total_count = sum([scss_breakdown[k][1] for k in list(scss_breakdown.keys())])
print(total_count)
for k in keys_to_delete:
del scss_breakdown[k]
colortext.warning(pprint.pformat(scss_breakdown))
print(('skipped', skipped))
data = write_file(dataset + '.new', json.dumps(dataset_json, indent=4, sort_keys=True))
def __test():
scopdb = SCOPeDatabase()
if False:
# Outstanding issue 1
# PDB chains with multiple domains: only one domain returned.
# 1AQT chain A has a b.93.1.1 domain (residues 2-86) and a a.2.10.1 domain (residues 87-136). I only return the a.2.10.1 at present.
#colortext.message('\nGetting PDB details for 1AQT, chain A')
#colortext.warning(pprint.pformat(scopdb.get_chain_details('1AQT', 'A')))
# Outstanding issue 2
# For 3FYM, the mapping between PDB chains and Pfam accession numbers I am using (SIFTS) only has the mapping:
# 3fym A -> PF13413 whereas the RCSB PDB website reports two other domains, PF12844 (a.35.1.3) and
# PF01381 (a.35.1 or a.4.14.1). I need to use more information than just relying on SIFTS to be complete.
# This should be fixed after issue 1 since we should return results for both PF12844 and PF01381.
#colortext.message('\nGetting PDB details for PF13413')
#colortext.warning(pprint.pformat(scopdb.get_chain_details_by_related_pdb_chains('3FYM', 'A', set(['PF13413']))))
#colortext.message('\nGetting PDB details for PF12844')
#colortext.warning(pprint.pformat(scopdb.get_chain_details_by_related_pdb_chains('3FYM', 'A', set(['PF12844']))))
colortext.message('\nGetting PDB details for PF01381')
colortext.warning(pprint.pformat(scopdb.get_chain_details_by_related_pdb_chains('3FYM', 'A', set(['PF01381']))))
# todo: have a look at this case (scopdb.get_chain_details_by_related_pdb_chains('3FYM', 'A', set(['PF01381']))) to make sure that the results make sense
# The accession numbers set(['PF01381'])
# maps to sunids
# [127500, 127502, 127504, 127506, 127508, 127510, 127512, 127514, 127516, 127518, 127520, 127408, 127522, 127524, 127526, 127528, 127530, 125874, 127536, 127538, 127540, 127542, 17035, 17023, 17024, 17025, 17026, 17027, 17028, 17029, 17030, 17031, 17032, 17033, 17034, 149783, 17036, 17037, 17038, 17039, 17040, 17041, 17042, 147604, 147605, 17064, 97513, 97514, 140526L, 118551, 118552, 128835, 128837, 128839, 128841, 128843, 128845, 121704, 122729, 122730, 116592, 108929, 108931, 108933, 108935, 135560, 108937, 135562, 108939, 108941, 135566, 108943, 105888, 125872, 127410, 125876, 125878, 125880, 125882, 125884, 125886, 125888, 125890, 125892, 125894, 125896, 125898, 127435, 127437, 127439, 127441, 127443, 127445, 127447, 127449, 127451, 127453, 127455, 127457, 151525, 151526]
# These are of types a.35.1 or a.4.14 so we return a. Does this make sense?
colortext.message('\nGetting chain details for 2zxj, chain A')
colortext.warning(pprint.pformat(scopdb.get_chain_details('2zxj', 'A')))
colortext.message('\nGetting PDB details for 2zxj')
colortext.warning(pprint.pformat(scopdb.get_chain_details('2zXJ'))) # the lookup is not case-sensitive w.r.t. PDB ID
colortext.message('\nGetting dicts for 1ki1 and 1a2p')
colortext.warning(pprint.pformat(scopdb.get_pdb_list_details(['1ki1', '1a2p'])))
if False:
colortext.message('\nGetting details as CSV for 1ki1 and 1a2p')
colortext.warning(scopdb.get_pdb_list_details_as_csv(['1ki1', '1a2p']))
colortext.message('\nGetting PFAM details for PF01035, PF01833')
colortext.warning(pprint.pformat(scopdb.get_pfam_details('PF01035')))
if False:
colortext.message('\nGetting details as CSV for 1ki1 and 1a2p')
colortext.warning(scopdb.get_pdb_list_details_as_csv(['1ki1', '1a2p']))
colortext.message('\nGetting details as CSV for 1ki1 and 1a2p')
colortext.warning(scopdb.get_pfam_list_details_as_csv(['PF01035', 'PF01833'])['Pfam'])
# get_chain_details_by_pfam cases
# This case tests what happens when there is no PDB chain entry in SCOPe - we should find the Pfam entry instead and look that up
colortext.message('\nGetting chain details for 3GVA')
colortext.warning(pprint.pformat(scopdb.get_chain_details('3GVA')))
colortext.message('\nGetting chain details for 3GVA, chain A')
colortext.warning(pprint.pformat(scopdb.get_chain_details('3GVA', 'A')))
assert(scopdb.get_chain_details('3GVA', 'A')['A']['SCOPe_trust_level'] == 2)
# get_chain_details_by_related_pdb_chains cases
# This case tests what happens when there is no PDB chain entry in SCOPe and the associated Pfam entries also have no
# SCOPe entries but their associated PDB chains do. In these cases, there is not enough common information e.g. 2EVB
# resolves to b.84.1.1, b.84.1.0, and a.9.1.0 which have no common root whereas 2PND resolves to b.1.1.2, b.1.1.1,
# b.1.1.0, and i.6.1.1.
colortext.message('\nGetting chain details for 2EVB, 2PND, 2QLC, 3FYM')
colortext.warning(pprint.pformat(scopdb.get_pdb_list_details(['2EVB', '2PND', '2QLC', '3FYM'])))
assert(scopdb.get_chain_details('2EVB', 'A')['A']['SCOPe_trust_level'] == 3)
assert(scopdb.get_chain_details('2PND', 'A')['A']['SCOPe_trust_level'] == 3)
# However, 1a2c tests get_chain_details_by_related_pdb_chains since chain I needs to drop down to this level in order
# to get results
colortext.message('\nGetting chain details for 1a2c')
colortext.warning(pprint.pformat(scopdb.get_chain_details('1a2c')))
assert(scopdb.get_chain_details('1a2c', 'H')['H']['SCOPe_trust_level'] == 1)
assert(scopdb.get_chain_details('1a2c', 'I')['I']['SCOPe_trust_level'] == 3)
print('\n')
if __name__ == '__main__':
#__test()
__generate_benchmark_data()
|
|
#!/usr/bin/env python
# create a color map based on an image.
# vaguely octree-inspired
import sys
sys.path.append("..")
from fract4d import gradient
from PIL import ImageFile
import gtk
class Node:
def __init__(self,r,g,b,count):
self.branches = [None] * 8
self.r = r
self.g = g
self.b = b
self.count = count
self._isleaf = True
def isleaf(self):
return self._isleaf
def matches(self,r,g,b):
return self.r == r and self.g == g and self.b == b
def difference_from(self,node):
dr = self.r - node.r
dg = self.g - node.g
db = self.b - node.b
return (dr*dr + dg*dg + db*db) * self.count
class T:
R = 0
G = 1
B = 2
def __init__(self):
'Load an image from open stream "file"'
self.root = None
def addLeafNode(self,r,g,b,count):
return Node(r,g,b,count)
def addInternalNode(self,r,g,b):
n = Node(r,g,b,0)
n._isleaf = False
return n
def load(self,file):
p = ImageFile.Parser()
while 1:
s = file.read(1024)
if not s:
break
p.feed(s)
self.im = p.close()
def getdata(self):
return self.im.getdata()
def build(self,divby=1):
i = 0
for (r,g,b) in self.getdata():
if i % divby == 0:
self.insertPixel(r,g,b)
i += 1
def dump(self,node,indent=""):
if not node:
return ""
if node.isleaf():
leafness = "L"
else:
leafness = "I"
val = [ indent + "[(%s,%d,%d,%d,%d)" % \
(leafness, node.r, node.g, node.b, node.count)]
val += [self.dump(b,indent+" ") for b in node.branches]
val += [ indent + "]"]
return "\n".join(val)
def get_collapse_info(self,node):
maxchild = None
maxcount = 0
totalchildren = 0
for child in node.branches:
if child:
totalchildren += child.count
if child.count > maxcount:
maxchild = child
maxcount = child.count
return (maxchild, totalchildren)
def get_collapse_error(self,node):
"How much the image's error will increase if this node is collapsed"
# only works on internal nodes which only have leaves as children
if not node or node.isleaf():
return 0
(maxchild, totalchildren) = self.get_collapse_info(node)
error = 0
for child in node.branches:
if child:
error += child.difference_from(maxchild)
return error
def find_collapse_candidates(self,node,candidates):
if not node or node.isleaf():
return candidates
has_children = False
for b in node.branches:
if b and not b.isleaf():
self.find_collapse_candidates(b,candidates)
has_children = True
if not has_children:
cost = self.get_collapse_error(node)
candidates.append((cost,node))
return candidates
def collapse(self,node):
'''Collapse all children into this node, getting the most
popular child color'''
if not node or node.isleaf():
raise ArgumentError("Can't collapse")
(maxchild,totalchildren) = self.get_collapse_info(node)
node._isleaf = True
node.branches = [None] * 8
node.count = totalchildren
node.r = maxchild.r
node.g = maxchild.g
node.b = maxchild.b
def getBranch(self,r,g,b,nr,ng,nb):
branch = 0
if r > nr:
branch += 4
if g > ng:
branch += 2
if b > nb:
branch += 1
return branch
def getInteriorRGB(self,r,g,b,nr,ng,nb,size):
if r > nr:
nr += size
else:
nr -= size
if g > ng:
ng += size
else:
ng -= size
if b > nb:
nb += size
else:
nb -= size
return (nr,ng,nb)
def insertNode(self,parent,r,g,b,count,nr,ng,nb,size):
if parent == None:
parent = self.addLeafNode(r,g,b,count)
elif parent.matches(r,g,b) and parent.isleaf():
parent.count += 1
elif parent.isleaf():
# replace it with a new interior node, reinsert this leaf and the new one
currentleaf = parent
parent = self.addInternalNode(nr,ng,nb)
currentbranch = self.getBranch(
currentleaf.r,currentleaf.g,currentleaf.b,nr,ng,nb)
newbranch = self.getBranch(
r,g,b,nr,ng,nb)
if currentbranch == newbranch:
# need to subdivide further
(nr,ng,nb) = self.getInteriorRGB(r,g,b,nr,ng,nb,size/2)
parent.branches[newbranch] = self.addInternalNode(nr,ng,nb)
parent.branches[newbranch] = self.insertNode(
parent.branches[newbranch],r,g,b,1,nr,ng,nb,size/2)
parent.branches[newbranch] = self.insertNode(
parent.branches[newbranch],
currentleaf.r, currentleaf.g, currentleaf.b,
currentleaf.count,
nr,ng,nb,size/2)
else:
parent.branches[currentbranch] = currentleaf
parent.branches[newbranch] = self.addLeafNode(r,g,b,1)
else:
# parent is an interior node, recurse to appropriate branch
newbranch = self.getBranch(r,g,b,nr,ng,nb)
(nr,ng,nb) = self.getInteriorRGB(r,g,b,nr,ng,nb,size/2)
parent.branches[newbranch] = self.insertNode(
parent.branches[newbranch],r,g,b,1,nr,ng,nb,size/2)
return parent
def insertPixel(self,r,g,b):
self.root = self.insertNode(self.root,r,g,b,1,127,127,127,128)
def numColors(self):
return self._numColors(self.root)
def _numColors(self,node):
if not node:
return 0
if node.isleaf():
return 1
colors = 0
return sum(map(self._numColors,node.branches))
def _colors(self,node,list):
if not node:
return
if node.isleaf():
list.append((node.r,node.g,node.b))
for b in node.branches:
self._colors(b,list)
return list
def colors(self):
return self._colors(self.root,[])
def reduceColors(self,n):
while self.numColors() > n:
candidates = self.find_collapse_candidates(self.root,[])
self.collapse(candidates[0][1])
class MapMaker(gtk.Window):
def __init__(self,type=gtk.WINDOW_TOPLEVEL):
gtk.Window.__init__(self,type)
self.image = gtk.Image()
self.add(self.image)
self.resize(640,480)
self.connect('delete-event', self.quit)
def load(self,name):
self.image.set_from_file(name)
def quit(self,*args):
gtk.main_quit()
def main(args):
w = MapMaker()
w.load(args[0])
w.show_all()
gtk.main()
def old_main(args):
mm = T()
mm.load(open(args[0]))
mm.build(1)
mm.reduceColors(int(args[1]))
grad = gradient.Gradient()
colors = []
i = 0
for (r,g,b) in mm.colors():
colors.append((i/10.0,r,g,b,255))
i += 1
grad.load_list(colors)
grad.save(sys.stdout)
if __name__ == '__main__':
main(sys.argv[1:])
|
|
"""
This module contains the 'base' GEOSGeometry object -- all GEOS Geometries
inherit from this object.
"""
# Python, ctypes and types dependencies.
import re
from ctypes import addressof, byref, c_double, c_size_t
# super-class for mutable list behavior
from django.contrib.gis.geos.mutable_list import ListMixin
# GEOS-related dependencies.
from django.contrib.gis.geos.base import GEOSBase, gdal
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOS_PREPARE
from django.contrib.gis.geos.mutable_list import ListMixin
# All other functions in this module come from the ctypes
# prototypes module -- which handles all interaction with
# the underlying GEOS library.
from django.contrib.gis.geos import prototypes as capi
# These functions provide access to a thread-local instance
# of their corresponding GEOS I/O class.
from django.contrib.gis.geos.prototypes.io import wkt_r, wkt_w, wkb_r, wkb_w, ewkb_w, ewkb_w3d
# For recognizing geometry input.
from django.contrib.gis.geometry.regex import hex_regex, wkt_regex, json_regex
class GEOSGeometry(GEOSBase, ListMixin):
"A class that, generally, encapsulates a GEOS geometry."
# Raise GEOSIndexError instead of plain IndexError
# (see ticket #4740 and GEOSIndexError docstring)
_IndexError = GEOSIndexError
ptr_type = GEOM_PTR
#### Python 'magic' routines ####
def __init__(self, geo_input, srid=None):
"""
The base constructor for GEOS geometry objects, and may take the
following inputs:
* strings:
- WKT
- HEXEWKB (a PostGIS-specific canonical form)
- GeoJSON (requires GDAL)
* buffer:
- WKB
The `srid` keyword is used to specify the Source Reference Identifier
(SRID) number for this Geometry. If not set, the SRID will be None.
"""
if isinstance(geo_input, basestring):
if isinstance(geo_input, unicode):
# Encoding to ASCII, WKT or HEXEWKB doesn't need any more.
geo_input = geo_input.encode('ascii')
wkt_m = wkt_regex.match(geo_input)
if wkt_m:
# Handling WKT input.
if wkt_m.group('srid'): srid = int(wkt_m.group('srid'))
g = wkt_r().read(wkt_m.group('wkt'))
elif hex_regex.match(geo_input):
# Handling HEXEWKB input.
g = wkb_r().read(geo_input)
elif gdal.GEOJSON and json_regex.match(geo_input):
# Handling GeoJSON input.
g = wkb_r().read(gdal.OGRGeometry(geo_input).wkb)
else:
raise ValueError('String or unicode input unrecognized as WKT EWKT, and HEXEWKB.')
elif isinstance(geo_input, GEOM_PTR):
# When the input is a pointer to a geomtry (GEOM_PTR).
g = geo_input
elif isinstance(geo_input, buffer):
# When the input is a buffer (WKB).
g = wkb_r().read(geo_input)
elif isinstance(geo_input, GEOSGeometry):
g = capi.geom_clone(geo_input.ptr)
else:
# Invalid geometry type.
raise TypeError('Improper geometry input type: %s' % str(type(geo_input)))
if bool(g):
# Setting the pointer object with a valid pointer.
self.ptr = g
else:
raise GEOSException('Could not initialize GEOS Geometry with given input.')
# Post-initialization setup.
self._post_init(srid)
def _post_init(self, srid):
"Helper routine for performing post-initialization setup."
# Setting the SRID, if given.
if srid and isinstance(srid, int): self.srid = srid
# Setting the class type (e.g., Point, Polygon, etc.)
self.__class__ = GEOS_CLASSES[self.geom_typeid]
# Setting the coordinate sequence for the geometry (will be None on
# geometries that do not have coordinate sequences)
self._set_cs()
def __del__(self):
"""
Destroys this Geometry; in other words, frees the memory used by the
GEOS C++ object.
"""
if self._ptr: capi.destroy_geom(self._ptr)
def __copy__(self):
"""
Returns a clone because the copy of a GEOSGeometry may contain an
invalid pointer location if the original is garbage collected.
"""
return self.clone()
def __deepcopy__(self, memodict):
"""
The `deepcopy` routine is used by the `Node` class of django.utils.tree;
thus, the protocol routine needs to be implemented to return correct
copies (clones) of these GEOS objects, which use C pointers.
"""
return self.clone()
def __str__(self):
"WKT is used for the string representation."
return self.wkt
def __repr__(self):
"Short-hand representation because WKT may be very large."
return '<%s object at %s>' % (self.geom_type, hex(addressof(self.ptr)))
# Pickling support
def __getstate__(self):
# The pickled state is simply a tuple of the WKB (in string form)
# and the SRID.
return str(self.wkb), self.srid
def __setstate__(self, state):
# Instantiating from the tuple state that was pickled.
wkb, srid = state
ptr = wkb_r().read(buffer(wkb))
if not ptr: raise GEOSException('Invalid Geometry loaded from pickled state.')
self.ptr = ptr
self._post_init(srid)
# Comparison operators
def __eq__(self, other):
"""
Equivalence testing, a Geometry may be compared with another Geometry
or a WKT representation.
"""
if isinstance(other, basestring):
return self.wkt == other
elif isinstance(other, GEOSGeometry):
return self.equals_exact(other)
else:
return False
def __ne__(self, other):
"The not equals operator."
return not (self == other)
### Geometry set-like operations ###
# Thanks to Sean Gillies for inspiration:
# http://lists.gispython.org/pipermail/community/2007-July/001034.html
# g = g1 | g2
def __or__(self, other):
"Returns the union of this Geometry and the other."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
#### Coordinate Sequence Routines ####
@property
def has_cs(self):
"Returns True if this Geometry has a coordinate sequence, False if not."
# Only these geometries are allowed to have coordinate sequences.
if isinstance(self, (Point, LineString, LinearRing)):
return True
else:
return False
def _set_cs(self):
"Sets the coordinate sequence for this Geometry."
if self.has_cs:
self._cs = GEOSCoordSeq(capi.get_cs(self.ptr), self.hasz)
else:
self._cs = None
@property
def coord_seq(self):
"Returns a clone of the coordinate sequence for this Geometry."
if self.has_cs:
return self._cs.clone()
#### Geometry Info ####
@property
def geom_type(self):
"Returns a string representing the Geometry type, e.g. 'Polygon'"
return capi.geos_type(self.ptr)
@property
def geom_typeid(self):
"Returns an integer representing the Geometry type."
return capi.geos_typeid(self.ptr)
@property
def num_geom(self):
"Returns the number of geometries in the Geometry."
return capi.get_num_geoms(self.ptr)
@property
def num_coords(self):
"Returns the number of coordinates in the Geometry."
return capi.get_num_coords(self.ptr)
@property
def num_points(self):
"Returns the number points, or coordinates, in the Geometry."
return self.num_coords
@property
def dims(self):
"Returns the dimension of this Geometry (0=point, 1=line, 2=surface)."
return capi.get_dims(self.ptr)
def normalize(self):
"Converts this Geometry to normal form (or canonical form)."
return capi.geos_normalize(self.ptr)
#### Unary predicates ####
@property
def empty(self):
"""
Returns a boolean indicating whether the set of points in this Geometry
are empty.
"""
return capi.geos_isempty(self.ptr)
@property
def hasz(self):
"Returns whether the geometry has a 3D dimension."
return capi.geos_hasz(self.ptr)
@property
def ring(self):
"Returns whether or not the geometry is a ring."
return capi.geos_isring(self.ptr)
@property
def simple(self):
"Returns false if the Geometry not simple."
return capi.geos_issimple(self.ptr)
@property
def valid(self):
"This property tests the validity of this Geometry."
return capi.geos_isvalid(self.ptr)
#### Binary predicates. ####
def contains(self, other):
"Returns true if other.within(this) returns true."
return capi.geos_contains(self.ptr, other.ptr)
def crosses(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T****** (for a point and a curve,a point and an area or a line and
an area) 0******** (for two curves).
"""
return capi.geos_crosses(self.ptr, other.ptr)
def disjoint(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FF*FF****.
"""
return capi.geos_disjoint(self.ptr, other.ptr)
def equals(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**FFF*.
"""
return capi.geos_equals(self.ptr, other.ptr)
def equals_exact(self, other, tolerance=0):
"""
Returns true if the two Geometries are exactly equal, up to a
specified tolerance.
"""
return capi.geos_equalsexact(self.ptr, other.ptr, float(tolerance))
def intersects(self, other):
"Returns true if disjoint returns false."
return capi.geos_intersects(self.ptr, other.ptr)
def overlaps(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T***T** (for two points or two surfaces) 1*T***T** (for two curves).
"""
return capi.geos_overlaps(self.ptr, other.ptr)
def relate_pattern(self, other, pattern):
"""
Returns true if the elements in the DE-9IM intersection matrix for the
two Geometries match the elements in pattern.
"""
if not isinstance(pattern, basestring) or len(pattern) > 9:
raise GEOSException('invalid intersection matrix pattern')
return capi.geos_relatepattern(self.ptr, other.ptr, pattern)
def touches(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FT*******, F**T***** or F***T****.
"""
return capi.geos_touches(self.ptr, other.ptr)
def within(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**F***.
"""
return capi.geos_within(self.ptr, other.ptr)
#### SRID Routines ####
def get_srid(self):
"Gets the SRID for the geometry, returns None if no SRID is set."
s = capi.geos_get_srid(self.ptr)
if s == 0: return None
else: return s
def set_srid(self, srid):
"Sets the SRID for the geometry."
capi.geos_set_srid(self.ptr, srid)
srid = property(get_srid, set_srid)
#### Output Routines ####
@property
def ewkt(self):
"""
Returns the EWKT (WKT + SRID) of the Geometry. Note that Z values
are *not* included in this representation because GEOS does not yet
support serializing them.
"""
if self.get_srid(): return 'SRID=%s;%s' % (self.srid, self.wkt)
else: return self.wkt
@property
def wkt(self):
"Returns the WKT (Well-Known Text) representation of this Geometry."
return wkt_w().write(self)
@property
def hex(self):
"""
Returns the WKB of this Geometry in hexadecimal form. Please note
that the SRID and Z values are not included in this representation
because it is not a part of the OGC specification (use the `hexewkb`
property instead).
"""
# A possible faster, all-python, implementation:
# str(self.wkb).encode('hex')
return wkb_w().write_hex(self)
@property
def hexewkb(self):
"""
Returns the EWKB of this Geometry in hexadecimal form. This is an
extension of the WKB specification that includes SRID and Z values
that are a part of this geometry.
"""
if self.hasz:
if not GEOS_PREPARE:
# See: http://trac.osgeo.org/geos/ticket/216
raise GEOSException('Upgrade GEOS to 3.1 to get valid 3D HEXEWKB.')
return ewkb_w3d().write_hex(self)
else:
return ewkb_w().write_hex(self)
@property
def json(self):
"""
Returns GeoJSON representation of this Geometry if GDAL 1.5+
is installed.
"""
if gdal.GEOJSON:
return self.ogr.json
else:
raise GEOSException('GeoJSON output only supported on GDAL 1.5+.')
geojson = json
@property
def wkb(self):
"""
Returns the WKB (Well-Known Binary) representation of this Geometry
as a Python buffer. SRID and Z values are not included, use the
`ewkb` property instead.
"""
return wkb_w().write(self)
@property
def ewkb(self):
"""
Return the EWKB representation of this Geometry as a Python buffer.
This is an extension of the WKB specification that includes any SRID
and Z values that are a part of this geometry.
"""
if self.hasz:
if not GEOS_PREPARE:
# See: http://trac.osgeo.org/geos/ticket/216
raise GEOSException('Upgrade GEOS to 3.1 to get valid 3D EWKB.')
return ewkb_w3d().write(self)
else:
return ewkb_w().write(self)
@property
def kml(self):
"Returns the KML representation of this Geometry."
gtype = self.geom_type
return '<%s>%s</%s>' % (gtype, self.coord_seq.kml, gtype)
@property
def prepared(self):
"""
Returns a PreparedGeometry corresponding to this geometry -- it is
optimized for the contains, intersects, and covers operations.
"""
if GEOS_PREPARE:
return PreparedGeometry(self)
else:
raise GEOSException('GEOS 3.1+ required for prepared geometry support.')
#### GDAL-specific output routines ####
@property
def ogr(self):
"Returns the OGR Geometry for this Geometry."
if gdal.HAS_GDAL:
if self.srid:
return gdal.OGRGeometry(self.wkb, self.srid)
else:
return gdal.OGRGeometry(self.wkb)
else:
raise GEOSException('GDAL required to convert to an OGRGeometry.')
@property
def srs(self):
"Returns the OSR SpatialReference for SRID of this Geometry."
if gdal.HAS_GDAL:
if self.srid:
return gdal.SpatialReference(self.srid)
else:
return None
else:
raise GEOSException('GDAL required to return a SpatialReference object.')
@property
def crs(self):
"Alias for `srs` property."
return self.srs
def transform(self, ct, clone=False):
"""
Requires GDAL. Transforms the geometry according to the given
transformation object, which may be an integer SRID, and WKT or
PROJ.4 string. By default, the geometry is transformed in-place and
nothing is returned. However if the `clone` keyword is set, then this
geometry will not be modified and a transformed clone will be returned
instead.
"""
srid = self.srid
if gdal.HAS_GDAL and srid:
# Creating an OGR Geometry, which is then transformed.
g = gdal.OGRGeometry(self.wkb, srid)
g.transform(ct)
# Getting a new GEOS pointer
ptr = wkb_r().read(g.wkb)
if clone:
# User wants a cloned transformed geometry returned.
return GEOSGeometry(ptr, srid=g.srid)
if ptr:
# Reassigning pointer, and performing post-initialization setup
# again due to the reassignment.
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(g.srid)
else:
raise GEOSException('Transformed WKB was invalid.')
#### Topology Routines ####
def _topology(self, gptr):
"Helper routine to return Geometry from the given pointer."
return GEOSGeometry(gptr, srid=self.srid)
@property
def boundary(self):
"Returns the boundary as a newly allocated Geometry object."
return self._topology(capi.geos_boundary(self.ptr))
def buffer(self, width, quadsegs=8):
"""
Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry. The optional third parameter sets
the number of segment used to approximate a quarter circle (defaults to 8).
(Text from PostGIS documentation at ch. 6.1.3)
"""
return self._topology(capi.geos_buffer(self.ptr, width, quadsegs))
@property
def centroid(self):
"""
The centroid is equal to the centroid of the set of component Geometries
of highest dimension (since the lower-dimension geometries contribute zero
"weight" to the centroid).
"""
return self._topology(capi.geos_centroid(self.ptr))
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points
in the Geometry.
"""
return self._topology(capi.geos_convexhull(self.ptr))
def difference(self, other):
"""
Returns a Geometry representing the points making up this Geometry
that do not make up other.
"""
return self._topology(capi.geos_difference(self.ptr, other.ptr))
@property
def envelope(self):
"Return the envelope for this geometry (a polygon)."
return self._topology(capi.geos_envelope(self.ptr))
def intersection(self, other):
"Returns a Geometry representing the points shared by this Geometry and other."
return self._topology(capi.geos_intersection(self.ptr, other.ptr))
@property
def point_on_surface(self):
"Computes an interior point of this Geometry."
return self._topology(capi.geos_pointonsurface(self.ptr))
def relate(self, other):
"Returns the DE-9IM intersection matrix for this Geometry and the other."
return capi.geos_relate(self.ptr, other.ptr)
def simplify(self, tolerance=0.0, preserve_topology=False):
"""
Returns the Geometry, simplified using the Douglas-Peucker algorithm
to the specified tolerance (higher tolerance => less points). If no
tolerance provided, defaults to 0.
By default, this function does not preserve topology - e.g. polygons can
be split, collapse to lines or disappear holes can be created or
disappear, and lines can cross. By specifying preserve_topology=True,
the result will have the same dimension and number of components as the
input. This is significantly slower.
"""
if preserve_topology:
return self._topology(capi.geos_preservesimplify(self.ptr, tolerance))
else:
return self._topology(capi.geos_simplify(self.ptr, tolerance))
def sym_difference(self, other):
"""
Returns a set combining the points in this Geometry not in other,
and the points in other not in this Geometry.
"""
return self._topology(capi.geos_symdifference(self.ptr, other.ptr))
def union(self, other):
"Returns a Geometry representing all the points in this Geometry and other."
return self._topology(capi.geos_union(self.ptr, other.ptr))
#### Other Routines ####
@property
def area(self):
"Returns the area of the Geometry."
return capi.geos_area(self.ptr, byref(c_double()))
def distance(self, other):
"""
Returns the distance between the closest points on this Geometry
and the other. Units will be in those of the coordinate system of
the Geometry.
"""
if not isinstance(other, GEOSGeometry):
raise TypeError('distance() works only on other GEOS Geometries.')
return capi.geos_distance(self.ptr, other.ptr, byref(c_double()))
@property
def extent(self):
"""
Returns the extent of this geometry as a 4-tuple, consisting of
(xmin, ymin, xmax, ymax).
"""
env = self.envelope
if isinstance(env, Point):
xmin, ymin = env.tuple
xmax, ymax = xmin, ymin
else:
xmin, ymin = env[0][0]
xmax, ymax = env[0][2]
return (xmin, ymin, xmax, ymax)
@property
def length(self):
"""
Returns the length of this Geometry (e.g., 0 for point, or the
circumfrence of a Polygon).
"""
return capi.geos_length(self.ptr, byref(c_double()))
def clone(self):
"Clones this Geometry."
return GEOSGeometry(capi.geom_clone(self.ptr), srid=self.srid)
# Class mapping dictionary. Has to be at the end to avoid import
# conflicts with GEOSGeometry.
from django.contrib.gis.geos.linestring import LineString, LinearRing
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.polygon import Polygon
from django.contrib.gis.geos.collections import GeometryCollection, MultiPoint, MultiLineString, MultiPolygon
GEOS_CLASSES = {0 : Point,
1 : LineString,
2 : LinearRing,
3 : Polygon,
4 : MultiPoint,
5 : MultiLineString,
6 : MultiPolygon,
7 : GeometryCollection,
}
# If supported, import the PreparedGeometry class.
if GEOS_PREPARE:
from django.contrib.gis.geos.prepared import PreparedGeometry
|
|
import csv
import pickle
import requests
import os
import re
from elasticsearch import Elasticsearch
class MOD():
INDEX_NAME = 'searchable_items_blue'
DOC_TYPE = 'searchable_item'
go_blacklist = ("GO:0008150", "GO:0003674", "GO:0005575")
gene_bkp_filename = "genes_bkp.pickle"
go_bkp_filename = "go_bkp.pickle"
diseases_bkp_filename = "diseases_bkp.pickle"
go_dataset = {}
omim_dataset = {}
genes = {}
go = {}
diseases = {}
def __init__(self):
self._load_omim_dataset()
self._load_go_dataset()
self.es = Elasticsearch(os.environ['ES_URI'], retry_on_timeout=True)
@staticmethod
def factory(organism):
from worm import WormBase
if organism in ("Caenorhabditis elegans", "C. elegans", "CAEEL"):
return WormBase()
else:
return None
@staticmethod
def _process_gene_id_from_panther(gene_ids_panther, genes):
gene_ids = gene_ids_panther.split("|")
mod = MOD.factory(gene_ids[0])
if mod is None:
return None
gene_id = MOD.factory(gene_ids[0]).gene_id_from_panther(gene_ids[1])
gene_symbol = ""
if mod.__class__.__module__ == "human":
gene_symbol = gene_id
else:
if gene_id not in genes:
return None
else:
gene_symbol = genes[gene_id]["gene_symbol"]
return {
"id": gene_id,
"symbol": gene_symbol,
"href": mod.gene_href(gene_id),
"species": mod.species
}
def _load_omim_dataset(self):
if MOD.omim_dataset != {}:
return
print "loading OMIM dataset from file..."
with open("data/OMIM_diseases.txt", "r") as f:
reader = csv.reader(f, delimiter='\t')
next(reader, None)
next(reader, None)
next(reader, None)
for row in reader:
if len(row) < 3:
continue
name_column = row[2].split(";")
name = name_column[0].strip()
if len(name_column) > 1:
symbol = name_column[1].strip()
else:
symbol = None
synonyms = []
for r in (row[3], row[4]):
if r == '':
continue
alternative_names = r.split(";;")
for alt_name_symbol in alternative_names:
alt_name_symbol = alt_name_symbol.split(";")
alt_name = alt_name_symbol[0].strip().lower()
if len(alt_name_symbol) > 1:
alt_symbol = ", " + alt_name_symbol[1].strip()
else:
alt_symbol = ""
synonyms.append(alt_name + alt_symbol)
MOD.omim_dataset["OMIM:" + row[1]] = {
"prefix": row[0],
"name": name.lower(),
"symbol": symbol,
"disease_synonyms": synonyms
}
def _load_go_dataset(self):
if MOD.go_dataset != {}:
return
print "Loading GO dataset from file..."
with open("data/go.obo", "r") as f:
creating_term = None
for line in f:
line = line.strip()
if line == "[Term]":
creating_term = True
elif creating_term:
key = (line.split(":")[0]).strip()
value = ("".join(":".join(line.split(":")[1:]))).strip()
if key == "id":
creating_term = value
MOD.go_dataset[creating_term] = {}
else:
if key == "synonym":
if value.split(" ")[-2] == "EXACT":
value = (" ".join(value.split(" ")[:-2]))[1:-1]
else:
continue
if key == "def":
m = re.search('\"(.+)\"', value)
value = m.group(1)
if key in MOD.go_dataset[creating_term]:
MOD.go_dataset[creating_term][key].append(value)
else:
MOD.go_dataset[creating_term][key] = [value]
def add_go_annotation_to_gene(self, gene_id, go_id):
if go_id not in self.go_dataset or go_id in MOD.go_blacklist or gene_id not in self.genes:
return
gene_symbol = self.genes[gene_id]["gene_symbol"].upper()
if go_id in self.go:
if gene_symbol not in self.go[go_id]["go_genes"]:
self.go[go_id]["go_genes"].append(gene_symbol)
if self.species not in self.go[go_id]["go_species"]:
self.go[go_id]["go_species"].append(self.species)
else:
self.go[go_id] = {
"go_genes": [gene_symbol],
"go_species": [self.species],
"name": self.go_dataset[go_id]["name"][0],
"description": self.go_dataset[go_id]["def"][0],
"go_type": self.go_dataset[go_id]["namespace"][0],
"go_synonyms": self.go_dataset[go_id].get("synonym"),
"name_key": self.go_dataset[go_id]["name"][0],
"id": go_id,
"href": "http://amigo.geneontology.org/amigo/term/" + go_id,
"category": "go"
}
if self.go[go_id]["name"] not in self.genes[gene_id]["gene_" + self.go[go_id]["go_type"]]:
self.genes[gene_id]["gene_" + self.go[go_id]["go_type"]].append(self.go[go_id]["name"])
def add_disease_annotation_to_gene(self, gene_id, omim_id):
if omim_id not in self.omim_dataset or gene_id not in self.genes:
return
gene_symbol = self.genes[gene_id]["gene_symbol"].upper()
if omim_id in self.diseases:
if gene_symbol not in self.diseases[omim_id]["disease_genes"]:
self.diseases[omim_id]["disease_genes"].append(gene_symbol)
if self.species not in self.diseases[omim_id]["disease_species"]:
self.diseases[omim_id]["disease_species"].append(self.species)
else:
self.diseases[omim_id] = {
"disease_genes": [gene_symbol],
"disease_species": [self.species],
"name": self.omim_dataset[omim_id]["name"],
"symbol": self.omim_dataset[omim_id]["symbol"],
"disease_synonyms": self.omim_dataset[omim_id]["disease_synonyms"],
"name_key": self.omim_dataset[omim_id]["name"],
"id": omim_id,
"key": omim_id,
"href": "http://omim.org/entry/" + omim_id.split(":")[1],
"category": "disease"
}
def load_from_file(self, filename):
if os.path.isfile(filename):
with open(filename, "rb") as f:
return pickle.load(f)
return None
def load_data_from_file(self):
print "Loading genes from file..."
self.genes = self.load_from_file(self.gene_bkp_filename)
print "Loading go from file..."
self.go = self.load_from_file(self.go_bkp_filename)
print "Loading diseases from file..."
self.diseases = self.load_from_file(self.diseases_bkp_filename)
if self.genes is None or self.go is None or self.diseases is None:
print ("Fail loading data from backup")
def save_dict_into_file(self, data, filename):
with open(filename, "wb") as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
def save_into_file(self):
print "Saving genes into file..."
self.save_dict_into_file(self.genes, self.gene_bkp_filename)
print "Saving go into file..."
self.save_dict_into_file(self.go, self.go_bkp_filename)
print "Saving diseases into file..."
self.save_dict_into_file(self.diseases, self.diseases_bkp_filename)
def delete_mapping(self):
print "Deleting mapping..."
response = requests.delete(os.environ['ES_URI'] + self.INDEX_NAME + "/")
if response.status_code != 200:
print "ERROR: " + str(response.json())
else:
print "SUCCESS"
def put_mapping(self):
from mapping import mapping
print "Putting mapping... "
response = requests.put(os.environ['ES_URI'] + self.INDEX_NAME + "/", json=mapping)
if response.status_code != 200:
print "ERROR: " + str(response.json())
else:
print "SUCCESS"
def index_into_es(self, data):
bulk_data = []
for id in data:
bulk_data.append({
'index': {
'_index': self.INDEX_NAME,
'_type': self.DOC_TYPE,
'_id': id
}
})
bulk_data.append(data[id])
if len(bulk_data) == 300:
self.es.bulk(index=self.INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
self.es.bulk(index=self.INDEX_NAME, body=bulk_data, refresh=True)
def index_genes_into_es(self):
print "Indexing genes into ES..."
self.index_into_es(self.genes)
def index_go_into_es(self):
print "Indexing go into ES..."
self.index_into_es(self.go)
def index_diseases_into_es(self):
print "Indexing diseases into ES..."
self.index_into_es(self.diseases)
def index_all_into_es(self):
self.index_genes_into_es()
self.index_go_into_es()
self.index_diseases_into_es()
|
|
import json
import pytest
from werkzeug.exceptions import NotFound
from arrested import (
Endpoint, GetListMixin, GetObjectMixin,
ResponseHandler, RequestHandler, ObjectMixin
)
from mock import patch
from tests.endpoints import CharactersEndpoint, CharacterEndpoint
GET = pytest.mark.GET
POST = pytest.mark.POST
GET_OBJ = pytest.mark.GET_OBJ
PUT = pytest.mark.PUT
DELETE = pytest.mark.DELETE
class GetListEndpoint(CharactersEndpoint):
pass
def test_get_list_mixin_handle_get_request_no_objects(app):
"""assert the GetListMixin handles null objects correctly in the response payload.
"""
with patch.object(GetListEndpoint, 'get_objects', return_value=None) as mock_method:
endpoint = GetListEndpoint()
resp = endpoint.get()
assert mock_method.called
assert resp.data.decode("utf-8") == '{"payload": null}'
def test_get_list_mixin_handle_get_request_get_objects_not_defined(app):
"""assert the GetListMixin interface
"""
class GetListEndpoint(Endpoint, GetListMixin):
pass
with pytest.raises(NotImplementedError):
endpoint = GetListEndpoint()
endpoint.get()
def test_get_list_mixin_handle_request_sets_response_attr(app):
"""Ensure that when the handle_get_request method is called the ResponseHandler is
instantiated and set against the Endpoint.response attribute.
"""
endpoint = GetListEndpoint()
endpoint.get()
assert isinstance(endpoint.response, ResponseHandler)
def test_get_list_mixin_sets_objects(app):
"""Ensure objetcs param is set whenever a get request is handled by the mixin
"""
endpoint = GetListEndpoint()
mock_objects = [{'foo': 'bar'}]
with patch.object(GetListEndpoint, 'get_objects', return_value=mock_objects):
endpoint.get()
assert endpoint.objects == mock_objects
def test_get_list_mixin_list_response(app):
"""assert the list_response method returns a valid response object.
"""
endpoint = GetListEndpoint()
mock_objects = [{'foo': 'bar'}]
with patch.object(GetListEndpoint, 'get_objects', return_value=mock_objects):
resp = endpoint.get()
assert resp.mimetype == 'application/json'
assert 'Content-Type' in resp.headers
assert resp.status_code == 200
assert resp.data == b'{"payload": [{"foo": "bar"}]}'
def test_get_object_mixin_handle_get_request_none_not_allowed(app):
"""assert the GetObjectMixin handles raises a 404 when get_object returns none and
allow none is false.
"""
with patch.object(CharacterEndpoint, 'get_object', return_value=None):
endpoint = CharacterEndpoint()
endpoint.allow_none = False
with pytest.raises(NotFound):
endpoint.get()
def test_get_object_mixin_handle_get_request_allow_none(app):
"""assert the GetObjectMixin handles no object correctly when allow_none=True
"""
with patch.object(CharacterEndpoint, 'get_object', return_value=None) as mock_method:
endpoint = CharacterEndpoint()
endpoint.allow_none = True
resp = endpoint.get()
assert mock_method.called
assert resp.data.decode("utf-8") == '{"payload": null}'
def test_get_object_mixin_handle_get_request_get_object_not_defined(app):
"""assert the GetObjectMixin interface
"""
class MyObjectEndpoint(Endpoint, GetObjectMixin):
pass
with pytest.raises(NotImplementedError):
endpoint = MyObjectEndpoint()
endpoint.get()
def test_get_object_mixin_handle_request_sets_response_attr(app):
"""Ensure that when the handle_get_request method is called the ResponseHandler is
instantiated and set against the Endpoint.response attribute.
"""
endpoint = CharacterEndpoint()
endpoint.kwargs = {}
endpoint.kwargs['obj_id'] = 1 # simulate dispatch_request being called.
endpoint.get()
assert isinstance(endpoint.response, ResponseHandler)
def test_get_object_mixin_sets_object(app):
"""Ensure obj param is set whenever a get request is handled by the mixin
"""
endpoint = CharacterEndpoint()
mock_object = {'foo': 'bar'}
with patch.object(CharacterEndpoint, 'get_object', return_value=mock_object):
endpoint.get()
assert endpoint.obj == mock_object
def test_get_object_mixin_obj_response(app):
"""assert the list_response method returns a valid response object.
"""
endpoint = CharacterEndpoint()
mock_object = {'foo': 'bar'}
with patch.object(CharacterEndpoint, 'get_object', return_value=mock_object):
resp = endpoint.get()
assert resp.mimetype == 'application/json'
assert 'Content-Type' in resp.headers
assert resp.status_code == 200
assert resp.data == b'{"payload": {"foo": "bar"}}'
def test_create_mixin_response(app, client):
endpoint = CharactersEndpoint()
app.add_url_rule(
'/characters',
view_func=endpoint.as_view('characters'), methods=['POST']
)
resp = client.post(
'/characters',
data=json.dumps({'bar': 'baz'}),
headers={'content-type': 'application/json'}
)
assert resp.status_code == 201
assert resp.data == b'{"payload": {"bar": "baz"}}'
def test_create_mixin_sets_obj_from_request_handler(app, client):
class MockRequstHandler(RequestHandler):
def handle(self, data, **kwargs):
return {'mock': True}
class MyEndpoint(CharactersEndpoint):
request_handler = MockRequstHandler
endpoint = MyEndpoint()
endpoint.post()
assert endpoint.obj == {'mock': True}
def test_create_mixin_sets_request_handler(app):
class MockRequstHandler(RequestHandler):
def handle(self, data, **kwargs):
return {'mock': True}
class MyEndpoint(CharactersEndpoint):
request_handler = MockRequstHandler
endpoint = MyEndpoint()
endpoint.post()
assert isinstance(endpoint.request, MockRequstHandler)
def test_create_mixin_invalid_json(app, client):
endpoint = CharactersEndpoint()
app.add_url_rule(
'/characters',
view_func=endpoint.as_view('characters'), methods=['POST']
)
resp = client.post(
'/characters',
data='foo',
headers={'content-type': 'application/json'}
)
assert resp.status_code == 400
assert resp.data == b'{"message": "Invalid JSON data provided"}'
def test_create_mixin_calls_save_object(app):
class MockRequstHandler(RequestHandler):
def handle(self, data, **kwargs):
return {'mock': True}
class MyEndpoint(CharactersEndpoint):
request_handler = MockRequstHandler
def save_object(self, obj):
obj['is_saved'] = True
return obj
endpoint = MyEndpoint()
endpoint.post()
assert 'is_saved' in endpoint.obj
def test_object_mixin_obj_property_calls_get_object():
class MyEndpoint(Endpoint, ObjectMixin):
pass
with patch.object(MyEndpoint, 'get_object', return_value={'foo': 'bar'}) as mocked:
endpoint = MyEndpoint()
endpoint.obj
mocked.assert_called_once()
def test_object_mixin_obj_property_sets_obj():
class MyEndpoint(Endpoint, ObjectMixin):
pass
with patch.object(MyEndpoint, 'get_object', return_value={'foo': 'bar'}) as mocked:
endpoint = MyEndpoint()
endpoint.obj
assert endpoint._obj == {'foo': 'bar'}
@PUT
def test_PUT_calls_handle_put_request(app):
class MyEndpoint(CharacterEndpoint):
def get_object(self):
obj = {'foo': 'bar'}
return obj
with patch.object(MyEndpoint, 'handle_put_request') as mock_handle_meth:
endpoint = MyEndpoint()
endpoint.put()
mock_handle_meth.assert_called_once()
@PUT
def test_handle_PUT_request_calls_get_object(app):
class MyEndpoint(CharacterEndpoint):
def get_object(self):
obj = {'foo': 'bar'}
return obj
with patch.object(MyEndpoint, 'get_object', return_value={'foo': 'bar'}) as mock_get_object:
endpoint = MyEndpoint()
endpoint.put()
mock_get_object.assert_called_once()
@PUT
def test_PUT_mixin_sets_request_handler(app):
class MockRequstHandler(RequestHandler):
def handle(self, data, **kwargs):
return {'mock': True}
class MyEndpoint(CharacterEndpoint):
request_handler = MockRequstHandler
def get_object(self):
obj = {'foo': 'bar'}
return obj
endpoint = MyEndpoint()
endpoint.put()
assert isinstance(endpoint.request, MockRequstHandler)
@PUT
def test_handle_PUT_request_calls_update_object(app):
class MyEndpoint(CharacterEndpoint):
def get_object(self):
obj = {'foo': 'bar'}
return obj
with patch.object(MyEndpoint, 'update_object') as mock_update_object:
endpoint = MyEndpoint()
endpoint.put()
mock_update_object.assert_called_once()
@PUT
def test_PUT_request_response(app):
class MyEndpoint(CharacterEndpoint):
def get_object(self):
obj = {'foo': 'bar'}
return obj
endpoint = MyEndpoint()
resp = endpoint.put()
assert resp.status_code == 200
assert resp.data == b'{"payload": {"foo": "bar"}}'
@DELETE
def test_DELETE_calls_handle_delete_request(app):
class MyEndpoint(CharacterEndpoint):
def get_object(self):
obj = {'foo': 'bar'}
return obj
with patch.object(MyEndpoint, 'handle_delete_request') as mock_handle_meth:
endpoint = MyEndpoint()
endpoint.delete()
mock_handle_meth.assert_called_once()
@DELETE
def test_handle_DELETE_request_calls_get_object(app):
class MyEndpoint(CharacterEndpoint):
def get_object(self):
obj = {'foo': 'bar'}
return obj
with patch.object(MyEndpoint, 'get_object') as mock_get_object:
endpoint = MyEndpoint()
endpoint.delete()
mock_get_object.assert_called_once()
@DELETE
def test_handle_DELETE_request_calls_delete_object(app):
class MyEndpoint(CharacterEndpoint):
def get_object(self):
obj = {'foo': 'bar'}
return obj
with patch.object(MyEndpoint, 'delete_object') as mock_delete_object:
endpoint = MyEndpoint()
endpoint.delete()
mock_delete_object.assert_called_once_with({'foo': 'bar'})
@DELETE
def test_handle_DELETE_request_response(app):
class MyEndpoint(CharacterEndpoint):
def get_object(self):
obj = {'foo': 'bar'}
return obj
endpoint = MyEndpoint()
resp = endpoint.delete()
assert resp.status_code == 204
assert resp.data == b''
|
|
# -*-*- encoding: utf-8 -*-*-
#
# gateway4labs is free software: you can redistribute it and/or modify
# it under the terms of the BSD 2-Clause License
# gateway4labs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
"""
This will handle the main application routes. It will be in charge of
registering all the blueprint that it needs and exposing some of the
basic routes (like login and logout).
"""
import os
from cgi import escape
from labmanager.utils import FullyQuotedUrlConverter, EverythingConverter
from flask import Flask, render_template, redirect, url_for
app = Flask(__name__)
app.url_map.converters['quoted_url'] = FullyQuotedUrlConverter
app.url_map.converters['everything'] = EverythingConverter
app.config.from_object('config')
# Try to support SQLALCHEMY_ENGINE_STR
if 'SQLALCHEMY_DATABASE_URI' not in app.config and 'SQLALCHEMY_ENGINE_STR' in app.config:
print "WARNING: SQLALCHEMY_ENGINE_STR is deprecated. Change it for SQLALCHEMY_DATABASE_URI"
app.config['SQLALCHEMY_DATABASE_URI'] = app.config['SQLALCHEMY_ENGINE_STR']
if 'SQLALCHEMY_POOL_RECYCLE' not in app.config and app.config['SQLALCHEMY_DATABASE_URI'].startswith('mysql'):
print "WARNING: SQLALCHEMY_POOL_RECYCLE not set. Defaults to 3600. Put it in the configuration file"
app.config['SQLALCHEMY_POOL_RECYCLE'] = 3600
if 'SESSION_COOKIE_PATH' not in app.config or not app.config.get('SESSION_COOKIE_PATH'):
print "WARNING: You should always set SESSION_COOKIE_PATH to / or /whatever, wherever the application is, to avoid conflicts between different deployments"
if app.config['DEBUG']:
app.secret_key = 'secret'
import labmanager.views.fake_lms as fake_lms
assert fake_lms is not None # Avoid flakes warning
else:
app.secret_key = os.urandom(32)
app.config['SESSION_COOKIE_NAME'] = 'g4lsession'
# Initialize the logging mechanism to send error 500 mails to the administrators
if not app.debug and app.config.get("ADMINS") is not None and app.config.get("SMTP_SERVER") is not None:
import logging
import pprint
from logging.handlers import SMTPHandler
class MailLoggingFilter(logging.Filter):
def filter(self, record):
pass
record.environ = pprint.pformat(request.environ)
return True
app.logger.addFilter(MailLoggingFilter())
smtp_server = app.config.get("SMTP_SERVER")
from_addr = app.config.get("SENDER_ADDR")
to_addrs = app.config.get("ADMINS")
mail_handler = SMTPHandler(smtp_server,
from_addr,
to_addrs,
"gateway4labs Application Error Report")
formatter = logging.Formatter(
'''
Message type: %(levelname)s
Location: %(pathname)s:%(lineno)d
Module: %(module)s
Function: %(funcName)s
Time: %(asctime)s
Message:
%(message)s
Environment:
%(environ)s
Stack Trace:
''')
mail_handler.setFormatter(formatter)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
@app.route("/error")
def error():
return 2/0
@app.errorhandler(404)
def not_found(e):
return "404 not found", 404
@app.errorhandler(403)
def forbidden(e):
return "403 forbidden", 403
@app.errorhandler(412)
def precondition_failed(e):
return "412 precondition failed", 412
@app.route('/favicon.ico')
def favicon():
return redirect(url_for('static', filename='favicon.ico'))
@app.route("/site-map")
def site_map():
lines = []
for rule in app.url_map.iter_rules():
line = str(escape(repr(rule)))
lines.append(line)
ret = "<br>".join(lines)
return ret
@app.route("/")
def index():
"""Global index for the whole application."""
golab = app.config.get('GOLAB', False)
return render_template("index.html", golab = golab)
@app.route("/developers")
def developers():
"""Developer information about gateway4labs."""
return render_template("developers.html")
@app.route("/about")
def about():
"""Global information about gateway4labs."""
return render_template("about.html")
@app.teardown_request
def shutdown_session(exception = None):
db.session.remove()
from labmanager.babel import Babel
from flask import request
if Babel is None:
print "Not using Babel. Everything will be in English"
else:
babel = Babel(app)
supported_languages = ['en']
supported_languages.extend([ translation.language for translation in babel.list_translations() ])
@babel.localeselector
def get_locale():
if app.config.get('TRANSLATE_LABMANAGER', True):
locale = request.args.get('locale', None)
if locale is None:
locale = request.accept_languages.best_match(supported_languages)
if locale is None:
locale = 'en'
# print "Locale requested. Got: ", locale
return locale
else:
return 'en'
@babel.timezoneselector
def get_timezone():
#timezone = request.args.get('timezone', 'en')
#print "Timezone requested. Got: ", timezone
#return timezone
# TODO
return None
#
# Initialize administration panels
#
from labmanager.db import db
assert db is not None
from .views.admin import init_admin
init_admin(app)
from .views.public import init_public_admin
init_public_admin(app)
from .views.lms.admin import init_lms_admin
init_lms_admin(app)
from .views.lms.instructor import init_instructor_admin
init_instructor_admin(app)
from .views.ple.admin import init_ple_admin
init_ple_admin(app)
from .views.ple.instructor import init_ple_instructor_admin
init_ple_instructor_admin(app)
#
# Initialize login subsystem
#
from .views import authn
assert authn is not None # Avoid warnings
|
|
import collections
try: # pragma: no cover
import unittest2 as unittest
except ImportError: # pragma: no cover
import unittest
try: # pragma: no cover
from unittest import mock
except ImportError: # pragma: no cover
import mock
import irc.client
import nirc.core
import nirc.errors
def _privmsg_dispatch(m):
def f(connection, user, target, message):
m(connection=connection, user=user, target=target, message=message)
return f
class DispatchTestCase(unittest.TestCase):
def setUp(self):
self.dispatch = nirc.core.Dispatch()
self.dispatch.clear()
def test_init(self):
# Did I screw up __init__?
self.assertIsInstance(self.dispatch.event_specs, dict)
self.assertIsInstance(self.dispatch.events, collections.defaultdict)
self.assertIs(self.dispatch.events.default_factory, list)
def test_clear_events(self):
m = mock.MagicMock()
event = 'privmsg'
args = ['connection', 'user', 'target', 'message']
self.dispatch.add_event(event, args)
f = _privmsg_dispatch(m)
closure = self.dispatch.subscribe(event)
closure(f)
self.dispatch.clear()
self.assertEqual({}, self.dispatch.event_specs)
self.assertEqual({}, self.dispatch.events)
def test_add_event(self):
event = 'privmsg'
args = ['connection', 'user', 'target', 'message']
expected = {event: args}
self.dispatch.add_event(event, args)
assert expected.items() <= self.dispatch.event_specs.items()
def test_add_event_dup(self):
event = 'privmsg'
self.dispatch.add_event(event)
with self.assertRaisesRegexp(ValueError, 'already defined'):
self.dispatch.add_event(event)
def test_add_event_keyword(self):
with self.assertRaisesRegexp(ValueError, 'keyword'):
self.dispatch.add_event('privmsg', ['if'])
def test_add_event_invalid(self):
with self.assertRaisesRegexp(ValueError, 'identifier'):
self.dispatch.add_event('privmsg', ['not valid'])
def test_subscribe_returns_callable(self):
event = 'privmsg'
args = ['connection', 'user', 'target', 'message']
self.dispatch.add_event(event, args)
closure = self.dispatch.subscribe('privmsg')
assert callable(closure)
def test_subscribe_rejects_bad_event(self):
with self.assertRaisesRegexp(ValueError, 'not defined'):
self.dispatch.subscribe('privmsg')
def test_subscribe_rejects_bad_args(self):
event = 'privmsg'
args = ['connection', 'user', 'target', 'message']
self.dispatch.add_event(event, args)
closure = self.dispatch.subscribe(event)
with self.assertRaisesRegexp(ValueError, 'correct arguments'):
closure(mock.MagicMock().__call__)
def test_subscribe_adds_good_args(self):
m = mock.MagicMock()
event = 'privmsg'
args = ['connection', 'user', 'target', 'message']
self.dispatch.add_event(event, args)
f = _privmsg_dispatch(m)
expected = {event: [f]}
closure = self.dispatch.subscribe(event)
closure(f)
assert expected.items() <= self.dispatch.events.items()
def test_fire(self):
m = mock.MagicMock()
event = 'privmsg'
args = ['connection', 'user', 'target', 'message']
passargs = {
'connection': None,
'user': '|Nyx|',
'target': 'Tritium',
'message': 'Hello There'
}
self.dispatch.add_event(event, args)
f = _privmsg_dispatch(m)
closure = self.dispatch.subscribe(event)
closure(f)
exit = self.dispatch.fire(event, **passargs)
m.assert_called_once_with(**passargs)
self.assertIs(exit, True)
def test_fire_nocallback(self):
event = 'privmsg'
args = ['connection', 'user', 'target', 'message']
passargs = {
'connection': None,
'user': '|Nyx|',
'target': 'Tritium',
'message': 'Hello There'
}
self.dispatch.add_event(event, args)
exit = self.dispatch.fire(event, **passargs)
self.assertIs(exit, False)
def test_fire_noevent(self):
with self.assertRaisesRegexp(
nirc.errors.EventUndefinedError, 'not defined'):
self.dispatch.fire('privmsg')
def test_fire_bad_args(self):
m = mock.MagicMock()
event = 'privmsg'
args = ['connection', 'user', 'target', 'message']
self.dispatch.add_event(event, args)
f = _privmsg_dispatch(m)
closure = self.dispatch.subscribe(event)
closure(f)
with self.assertRaisesRegexp(ValueError, 'unexpected argument'):
self.dispatch.fire(event, nick='|Nyx|')
class ManagerTestCase(unittest.TestCase):
def setUp(self):
self.real = {
'IRC': irc.client.IRC,
'Dispatch': nirc.core.Dispatch,
}
self.IRC = irc.client.IRC = mock.MagicMock()
self.IRC.return_value = self.IRC
self.IRC.server = mock.MagicMock()
self.IRC.server.return_value = self.IRC.server
self.IRC.process_forever = mock.MagicMock()
self.IRC.add_global_handler = mock.MagicMock()
self.Dispatch = nirc.core.Dispatch = mock.MagicMock()
self.Dispatch.return_value = self.Dispatch
self.Dispatch.fire = mock.MagicMock()
self.manager = nirc.core.Manager()
def tearDown(self):
irc.client.IRC = self.real['IRC']
nirc.core.Dispatch = self.real['Dispatch']
def test_initialized(self):
self.IRC.assert_called_once_with()
self.assertEqual(self.manager.connections, [])
self.assertIs(self.manager.client, self.IRC)
self.assertIs(self.manager.dispatch, self.Dispatch)
self.IRC.add_global_handler.assert_called_once_with(
'all_events',
self.manager._default_handler,
-100
)
def test_run_calls_process_forever(self):
self.manager.run()
self.IRC.process_forever.assert_called_once_with()
def test_connection_calls_server(self):
self.manager.connection()
self.IRC.server.assert_called_once_with()
def test_connection_adds_to_connections(self):
self.manager.connection()
self.assertEqual(
self.manager.connections,
[self.IRC.server],
)
def test_handle_event_fires(self):
con = self.manager.connection()
scon = self.IRC.server
ev = irc.client.Event(
'any',
irc.client.NickMask('|Nyx|!alexis@venom.sdamon.com'),
None,
None
)
self.manager._default_handler(scon, ev)
self.Dispatch.fire.assert_called_once_with(
'any',
connection=con,
event=ev
)
|
|
# coding=utf-8
import logging
from django.conf import settings
from django.conf.urls import url, include
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse, Http404
from django.utils.functional import cached_property
from django.views.decorators.csrf import csrf_exempt
import sys
from odin.codecs import json_codec
from odin.exceptions import ValidationError, CodecDecodeError
from baldr import content_type_resolvers
from baldr.exceptions import ImmediateErrorHttpResponse, ImmediateHttpResponse
from baldr.resources import Error, Listing
CODECS = {json_codec.CONTENT_TYPE: json_codec}
# Attempt to load other codecs that have dependencies
try:
from odin.codecs import msgpack_codec
except ImportError:
pass
else:
CODECS[msgpack_codec.CONTENT_TYPE] = msgpack_codec
logger = logging.getLogger('baldr.request')
class ResourceApiCommon(object):
# The resource this API is modelled on.
resource = None
resource_id_regex = r'\d+'
# Handlers used to resolve the request content-type of the request body.
# These are checked in the order defined until one returns a content-type.
request_type_resolvers = [
content_type_resolvers.content_type_header(),
content_type_resolvers.accepts_header(),
content_type_resolvers.settings_default(),
]
# Handlers used to resolve the response content-type.
# These are checked in the order defined until one returns a content-type.
response_type_resolvers = [
content_type_resolvers.accepts_header(),
content_type_resolvers.content_type_header(),
content_type_resolvers.settings_default(),
]
# Codecs that are supported for Encoding/Decoding resources.
registered_codecs = CODECS
url_prefix = r''
def __init__(self, api_name=None):
if api_name:
self.api_name = api_name
elif not hasattr(self, 'api_name'):
self.api_name = "%ss" % self.resource._meta.name
def url(self, regex, view, kwargs=None, name=None, prefix=''):
"""
Behaves like the django built in ``url`` method but constrains the URL to the API name.
:param regex: This should be a part regex that applies only to the targeted method ie::
self.url("(\d+)", ...)
"""
if regex:
return url(r'^%s/%s/?$' % (self.url_prefix + self.api_name.lower(), regex), view, kwargs, name, prefix)
else:
return url(r'^%s/?$' % (self.url_prefix + self.api_name.lower()), view, kwargs, name, prefix)
@property
def urls(self):
"""
Return url conf for resource object.
"""
return self.base_urls()
def resolve_request_type(self, request):
"""
Resolve the request content type from the request.
:returns: Identified content type; or ``None`` if content type is not identified.
"""
for resolver in self.request_type_resolvers:
content_type = resolver(request)
if content_type:
return content_type
def resolve_response_type(self, request):
"""
Resolve the response content type from the request.
:returns: Identified content type; or ``None`` if content type is not identified.
"""
for resolver in self.response_type_resolvers:
content_type = resolver(request)
if content_type:
return content_type
@staticmethod
def handle_500(request, exception):
"""
Handle *un-handled* exceptions
:param request: The request object.
:param exception: The exception that was un-handled.
:return: An ``HttpError`` response resource.
"""
exc_info = sys.exc_info()
# This is an unknown exception, return an unknown error message.
if settings.DEBUG:
# If we are in debug mode return more details and the stack-trace.
import traceback
the_trace = '\n'.join(traceback.format_exception(*exc_info))
return Error(500, 50000, "An unknown error has occurred, the developers have been notified.",
str(exception), the_trace)
else:
logger.error('Internal Server Error: %s', request.path, exc_info=exc_info, extra={
'status_code': 500,
'request': request
})
return Error(500, 50000, "An unknown error has occurred, the developers have been notified.")
def base_urls(self):
"""
Base URL mappings for this API.
"""
return []
@staticmethod
def decode_body(request):
"""
Helper method that ensures that decodes any body content into a string object (this is needed by the json
module for example).
"""
body = request.body
if isinstance(body, bytes):
return body.decode('UTF8')
return body
def resource_from_body(self, request, allow_multiple=False, resource=None):
"""
Get a resource instance from ``request.body``.
"""
resource = resource or self.resource
try:
body = self.decode_body(request)
except UnicodeDecodeError as ude:
raise ImmediateErrorHttpResponse(400, 40099, "Unable to decode request body.", str(ude))
try:
resource = request.request_codec.loads(body, resource=resource, full_clean=False)
except ValueError as ve:
raise ImmediateErrorHttpResponse(400, 40098, "Unable to load resource.", str(ve))
except CodecDecodeError as cde:
raise ImmediateErrorHttpResponse(400, 40096, "Unable to decode body.", str(cde))
# Check an array of data hasn't been supplied
if not allow_multiple and isinstance(resource, list):
raise ImmediateErrorHttpResponse(400, 40097, "Expected a single resource not a list.")
return resource
def dispatch_to_view(self, view, request, *args, **kwargs):
raise NotImplementedError()
def wrap_view(self, view):
"""
This method provides the main entry point for URL mappings in the ``base_urls`` method.
"""
@csrf_exempt
def wrapper(request, *args, **kwargs):
# Resolve content type used to encode/decode request/response content.
response_type = self.resolve_response_type(request)
request_type = self.resolve_request_type(request)
try:
request.request_codec = self.registered_codecs[request_type]
request.response_codec = response_codec = self.registered_codecs[response_type]
except KeyError:
# This is just a plain HTTP response, we can't provide a rich response when the content type is unknown
return HttpResponse(content="Content cannot be returned in the format requested.", status=406)
try:
result = self.dispatch_to_view(view, request, *args, **kwargs)
except Http404 as e:
# Item is not found.
status = 404
resource = Error(status, 40400, str(e))
except ImmediateHttpResponse as e:
# An exception used to return a response immediately, skipping any further processing.
response = HttpResponse(
response_codec.dumps(e.resource),
content_type=response_codec.CONTENT_TYPE,
status=e.status
)
for key, value in (e.headers or {}).items():
response[key] = value
return response
except ValidationError as e:
# Validation of a resource has failed.
status = 400
if hasattr(e, 'message_dict'):
resource = Error(status, 40000, "Fields failed validation.", meta=e.message_dict)
else:
resource = Error(status, 40000, str(e))
except PermissionDenied as e:
status = 403
resource = Error(status, 40300, "Permission denied", str(e))
except NotImplementedError:
# A mixin method has not been implemented, as defining a mixing is explicit this is considered a server
# error that should be addressed.
status = 501
resource = Error(status, 50100, "This method has not been implemented.")
except Exception as e:
# Special case when a request raises a 500 error. If we are in debug mode and a default is used (ie
# request does not explicitly specify a content type) fall back to the Django default exception page.
if settings.DEBUG and getattr(response_type, 'is_default', False):
raise
# Catch any other exceptions and pass them to the 500 handler for evaluation.
resource = self.handle_500(request, e)
status = resource.status
else:
if isinstance(result, tuple) and len(result) == 2:
resource, status = result
else:
resource = result
status = 204 if result is None else 200 # Return 204 (No Content) if result is None.
if resource is None:
return HttpResponse(status=status)
elif isinstance(resource, HttpResponse):
return resource
else:
return HttpResponse(
response_codec.dumps(resource),
content_type=response_codec.CONTENT_TYPE,
status=status
)
return wrapper
class ResourceApi(ResourceApiCommon):
"""
Provides an API that returns a specified resource object.
"""
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
def dispatch_to_view(self, view, request, *args, **kwargs):
callback = getattr(self, view)
return callback(request, *args, **kwargs)
def dispatch(self, request, request_type, **kwargs):
"""
Primary method used to dispatch incoming requests to the appropriate method.
"""
allowed_methods = getattr(self, "%s_allowed_methods" % request_type, [])
request_method = self.method_check(request, allowed_methods)
request.type = request_type
method = getattr(self, "%s_%s" % (request_method, request_type), None)
if method is None:
raise Http404()
# Authorisation hook
if hasattr(self, 'handle_authorisation'):
self.handle_authorisation(request)
# Allow for a pre_dispatch hook, a response from pre_dispatch would indicate an override of kwargs
if hasattr(self, 'pre_dispatch'):
response = self.pre_dispatch(request, **kwargs)
if response is not None:
kwargs = response
result = method(request, **kwargs)
# Allow for a post_dispatch hook, the response of which is returned
if hasattr(self, 'post_dispatch'):
return self.post_dispatch(request, result)
else:
return result
def method_check(self, request, allowed):
request_method = request.method.lower()
if allowed is None:
raise Http404('`%s` not found.' % self.api_name)
if request_method not in allowed:
raise ImmediateErrorHttpResponse(405, 40500, "Method not allowed", headers={
'Allow': ','.join(map(str.upper, allowed))
})
return request_method
def base_urls(self):
return super(ResourceApi, self).base_urls() + [
# List URL
self.url(
r'',
self.wrap_view('dispatch_list')
),
# Detail URL
self.url(
r'(?P<resource_id>%s)' % self.resource_id_regex,
self.wrap_view('dispatch_detail')
)
]
def dispatch_list(self, request, **kwargs):
return self.dispatch(request, 'list', **kwargs)
def dispatch_detail(self, request, **kwargs):
return self.dispatch(request, 'detail', **kwargs)
class ActionMixin(ResourceApi):
"""
Mixin to the resource API to provide support for sub resources, actions, aggregations.
To hook up a action mixin specify a method that matches the type of request you want to handle ie::
def get_summary_action(self, request, resource_id):
pass
"""
actions = []
def base_urls(self):
urls = []
for action in self.actions:
urls += action.base_urls(self)
return urls + [
# List Action URL
self.url(
r'(?P<action>[-\w\d]+)',
self.wrap_view('dispatch_list_action')
),
# Detail Action URL
self.url(
r'(?P<resource_id>%s)/(?P<action>[-\w\d]+)' % self.resource_id_regex,
self.wrap_view('dispatch_detail_action')
),
] + super(ActionMixin, self).base_urls()
def dispatch_list_action(self, request, action, **kwargs):
return self.dispatch(request, "%s_list" % action, **kwargs)
def dispatch_detail_action(self, request, action, **kwargs):
return self.dispatch(request, "%s_detail" % action, **kwargs)
class ListMixin(ResourceApi):
"""
Mixin to the resource API that provides a nice listing API.
"""
def get_list(self, request):
offset = int(request.GET.get('offset', 0))
limit = int(request.GET.get('limit', 50))
result = self.list_resources(request, offset, limit)
return Listing(list(result), limit, offset)
def list_resources(self, request, offset, limit):
"""
Load resources
:param limit: Resource count limit.
:param offset: Offset within the list to return.
:return: List of resource objects.
"""
raise NotImplementedError
class CreateMixin(ResourceApi):
"""
Mixin to the resource API to provide a Create API.
"""
def __init__(self, *args, **kwargs):
super(CreateMixin, self).__init__(*args, **kwargs)
self.list_allowed_methods.append('post')
def post_list(self, request):
resource = self.resource_from_body(request)
return self.create_resource(request, resource, False)
def put_list(self, request):
resource = self.resource_from_body(request)
return self.create_resource(request, resource, True)
def create_resource(self, request, resource, is_complete):
"""
Create method.
:param request: Django HttpRequest object.
:param resource: The resource included with the request.
:param is_complete: This is a complete resource (ie a PUT method).
"""
raise NotImplementedError
class RetrieveMixin(ResourceApi):
"""
Mixin to the resource API to provide a Retrieve API.
"""
def get_detail(self, request, resource_id):
return self.retrieve_resource(request, resource_id)
def retrieve_resource(self, request, resource_id):
"""
Retrieve method
:param request: Django HttpRequest object.
:param resource_id: The ID of the resource to retrieve.
"""
raise NotImplementedError
class UpdateMixin(ResourceApi):
"""
Mixin to the resource API to provide a Update API.
"""
def __init__(self, *args, **kwargs):
super(UpdateMixin, self).__init__(*args, **kwargs)
self.detail_allowed_methods.append('post')
def post_detail(self, request, resource_id):
resource = self.resource_from_body(request)
return self.update_resource(request, resource_id, resource, False)
def put_detail(self, request, resource_id):
resource = self.resource_from_body(request)
return self.update_resource(request, resource_id, resource, True)
def update_resource(self, request, resource_id, resource, is_complete):
"""
Update method.
:param request: Django HttpRequest object.
:param resource_id: The ID of the resource to update.
:param resource: The resource included with the request.
:param is_complete: This is a complete resource (ie a PUT method).
"""
raise NotImplementedError
class DeleteMixin(ResourceApi):
"""
Mixin to the resource API to provide a Delete API.
"""
def __init__(self, *args, **kwargs):
super(DeleteMixin, self).__init__(*args, **kwargs)
self.detail_allowed_methods.append('delete')
def delete_detail(self, request, resource_id):
return self.delete_resource(request, resource_id)
def delete_resource(self, request, resource_id):
"""
Delete method
:param request: Django HttpRequest object.
:param resource_id: The ID of the resource to delete.
"""
raise NotImplementedError
class ApiCollection(object):
"""
Collection of several resource API's.
Along with helper methods for building URL patterns.
::
urlpatterns += Api(
ApiCollection(
UserApi(),
MyApi(),
)
).patterns()
"""
def __init__(self, *resource_apis, **kwargs):
self.api_name = kwargs.pop('api_name', 'api')
self.resource_apis = resource_apis
@cached_property
def urls(self):
urls = []
for resource_api in self.resource_apis:
urls.extend(resource_api.urls)
return urls
def include(self, namespace=None):
return include(self.urls, namespace)
def patterns(self, api_name=None):
api_name = api_name or self.api_name
return [url(r'^%s/' % api_name, self.include())]
class ApiVersion(ApiCollection):
"""
A versioned collection of several resource API's.
Along with helper methods for building URL patterns.
"""
def __init__(self, *resource_apis, **kwargs):
kwargs.setdefault('api_name', kwargs.pop('version', 'v1'))
super(ApiVersion, self).__init__(*resource_apis, **kwargs)
class Api(object):
"""
An API (made up of versions).
::
urlpatterns += Api(
ApiVersion(
UserApi(),
MyApi(),
version='v1',
)
).patterns()
"""
def __init__(self, *versions, **kwargs):
self.versions = versions
self.api_name = kwargs.get('api_name', 'api')
def patterns(self):
urls = [url(r'^%s/%s/' % (self.api_name, v.api_name), v.include()) for v in self.versions]
urls.append(url(r'^%s/$' % self.api_name, self._unknown_version))
return urls
def _unknown_version(self, _):
supported_versions = [v.api_name for v in self.versions]
return HttpResponse(
"Unsupported API version. Available versions: %s" % ', '.join(supported_versions),
status=418 # I'm a teapot... Is technically a bad request but makes sense to have a different status code.
)
|
|
"""
Functions for interacting with single game pgn files.
"""
import os
import time
from os.path import isfile, join
import random as rnd
import numpy as np
import chess.pgn
from pkg_resources import resource_filename
import guerilla.data_handler as dh
def read_pgn(filename, max_skip=80):
"""
Given a pgn filename, reads the file and returns a fen for each position in the game.
Input:
filename [String]:
the file name
max_skip [Int]:
The maximum number of half-moves which are skipped.
Output:
fens:
list of fen strings
"""
fens = []
with open(filename, 'r') as pgn:
game = chess.pgn.read_game(pgn)
while True:
fens.append(game.board().fen())
if game.is_end():
break
game = game.variation(0)
# Down sample based on half-move count
max_skip = min(max_skip, len(fens) - 1)
skip_start = rnd.randint(0, max_skip)
return fens[skip_start:]
def get_fens(generate_time, num_random=0, store_prob=0.0):
"""
Extracts fens from games and saves them.
Will read from all games in folder /pgn_files/single_game_pgns.
Inputs:
num_games:
Amount of time to generate games for.
"""
# Set seed so that results are reproducable
rnd.seed(123456)
checkpoint_path = resource_filename('guerilla', 'data/extracted_data/game_num.txt')
games_path = resource_filename('guerilla', 'data/pgn_files/single_game_pgns')
game_num = 0
if os.path.isfile(checkpoint_path):
with open(checkpoint_path) as f:
l = f.readline()
game_num = int(l)
files = [f for f in os.listdir(games_path) if isfile(join(games_path, f))]
start_time = time.clock()
fen_count = 0
fname = 'fens_sampled'
print "starting at game num: {}".format(game_num)
with open(resource_filename('guerilla', 'data/extracted_data/{}.csv'.format(fname)), 'w') as fen_file:
print "Opened fens output file..."
while (time.clock() - start_time) < generate_time and game_num < len(files):
fens = read_pgn(games_path + '/' + files[game_num], max_skip=0)
# Randomly choose 3
fens = np.random.choice(fens, 3, replace=False)
for fen in fens:
board = chess.Board(fen)
# When using random moves, store original board with some probability
out_fens = []
if num_random > 0 and rnd.random() < store_prob:
out_fens.append(fen)
# Default: Make EACH PLAYER do a random move and then store
for i in range(num_random):
if not list(board.legal_moves):
break
board.push(rnd.choice(list(board.legal_moves)))
else:
# only store if all random moves were applied
out_fens.append(board.fen())
for out_fen in out_fens:
# flip board if necessary
out_fen = dh.flip_to_white(out_fen)
fen_file.write(out_fen + '\n')
fen_count += 1
if game_num % 100 == 0:
print "Processed game %d [%d] fens..." % (game_num, fen_count)
game_num += 1
# Write out next game to be processed
with open(resource_filename('guerilla', 'data/extracted_data/game_num.txt'), 'w') as num_file:
num_file.write(str(game_num))
def iter_pgncollection(pgn_col):
"""
Iterator for games in the provided PGN collection. A PGN collection is a single file with multiple PGNs.
:param pgn_col: [File Object] File object for PGN collection. (i.e. already opened file)
:return: [chess.pgn.Game] yields chess games.
"""
new_pgn_key = '[Event'
temp_file = 'temp.pgn'
# Iterate through lines of pgn collection
pgn_lines = []
for line in pgn_col:
# We've reached a new pgn!
if line.split(' ')[0] == new_pgn_key:
# If we just completed reading a pgn, write PGN to file, read PGN and yield game
if pgn_lines:
with open(temp_file, 'w') as f:
f.writelines(pgn_lines)
with open(temp_file, 'r') as f:
game = chess.pgn.read_game(f)
yield game
# Reset PGN buffer
pgn_lines = []
# Add to PGN buffer
pgn_lines.append(line)
# Remove tempfile
os.remove(temp_file)
def get_checkmate_fens():
"""
Extracts checkmate and pre-mate FEN files from PGN collections.
Reads all PGN collections in pgn_files/kingbase/.
Can directly read KingBase files. Download from http://www.kingbase-chess.net/ then extract into kingbase directory.
"""
db_path = resource_filename('guerilla', 'data/pgn_files/kingbase')
game_count = mate_count = 0
pgn_collections = [join(db_path, f) for f in os.listdir(db_path) if join(db_path, f)]
with open(resource_filename('guerilla', 'data/extracted_data/checkmate_fens_temp.csv'), 'w') as mate_file, \
open(resource_filename('guerilla', 'data/extracted_data/premate_fens_temp.csv'), 'w') as pre_file:
print "Opened checkmate and premate fens output file..."
for f in pgn_collections:
print "Reading through collection {}...".format(f)
with open(f, 'r') as pgn_col:
for game in iter_pgncollection(pgn_col):
result = game.headers['Result']
if result != '1/2-1/2':
# Game was not a draw
last_board = game.end().board()
pre_board = game.end().parent.board()
if last_board.is_checkmate():
if result == '1-0':
# White checkmated black
mate_file.write(dh.flip_board(last_board.fen()) + '\n')
pre_file.write(pre_board.fen() + '\n')
else:
# Black checkmated white
mate_file.write(last_board.fen() + '\n')
pre_file.write(dh.flip_board(pre_board.fen()) + '\n')
mate_count += 1
game_count += 1
if game_count % 1000 == 0:
print "%d %d" % (game_count, mate_count)
def load_fens(filename='fens.csv', num_values=None):
"""
Loads the fens file.
Input:
filename:
Pickle filename.
num_values[int]:
Max number of stockfish values to return.
(will return min of num_values and number of values stored in file)
Output:
fens [List]
Loaded fens
"""
full_path = resource_filename('guerilla', 'data/extracted_data/' + filename)
fens = []
count = 0
with open(full_path, 'r') as fen_file:
for line in fen_file:
fens.append(line.strip())
count += 1
if num_values is not None and count >= num_values:
break
if num_values > count:
raise ValueError(
"Could not load desired number of fens! File %s only has %d FENs and requested load was %d FENs" % (
filename, count, num_values))
return fens
def main():
generate_time = raw_input("How many seconds do you want to generate fens for?: ")
get_fens(int(generate_time), num_random=4)
if __name__ == "__main__":
main()
|
|
#from __future__ import print_function
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import random
import math
import itertools
import pylab
from scipy import spatial
from matplotlib.path import Path
# user defined options
disk = False # this parameter defines if we look for Poisson-like distribution on a disk (center at 0, radius 1) or in a square (0-1 on x and y)
squareRepeatPattern = False # this parameter defines if we look for "repeating" pattern so if we should maximize distances also with pattern repetitions
#num_points = 32 # number of points we are looking for
num_iterations = 10 # number of iterations in which we take average minimum squared distances between points and try to maximize them
first_point_zero = disk # should be first point zero (useful if we already have such sample) or random
iterations_per_point = 30 # iterations per point trying to look for a new point with larger distance
sorting_buckets = 0 # if this option is > 0, then sequence will be optimized for tiled cache locality in n x n tiles (x followed by y)
def random_point_disk():
alpha = random.random() * math.pi * 2.0
radius = math.sqrt(random.random())
x = math.cos(alpha) * radius
y = math.sin(alpha) * radius
return np.array([x,y])
def random_point_square():
x = random.uniform(0,1)
y = random.uniform(0,1)
return np.array([x,y])
def first_point():
if first_point_zero == True:
return np.array([0,0])
elif disk == True:
return random_point_disk()
else:
return random_point_square()
# if we only compare it doesn't matter if it's squared
def min_dist_squared_pure(points, point):
diff = points - np.array([point])
return np.min(np.einsum('ij,ij->i',diff,diff))
def min_dist_squared_repeat(points, point):
dist = math.sqrt(2)
for y in range(-1,2):
for x in range(-1,2):
testing_point = np.array([point-[x,y]])
diff = points-testing_point
dist = min(np.min(np.einsum('ij,ij->i',diff,diff)),dist)
return dist
def find_next_point(current_points):
best_dist = 0
best_point = []
for i in range(iterations_per_point):
new_point = random_point()
dist = min_dist_squared(current_points, new_point)
if (dist > best_dist) and (dist > 0.00):
best_dist = dist
best_point = new_point
return best_point
def find_next_point_2(current_points):
best_dist = 0
best_point = []
for i in range(iterations_per_point):
new_point = getRandomPoint()
dist = min_dist_squared(current_points, new_point)
if (dist > best_dist) and (dist > 0.00):
best_dist = dist
best_point = new_point
return best_point
def getRandomPoint():
x = random.uniform(0,.9)
y = random.uniform(0,.9)
return np.array([x,y])
def find_point_set(num_points, num_iter):
best_point_set = []
best_dist_avg = num_points*math.sqrt(2.0)
for i in range(num_iter):
points = np.array([first_point()])
for i in range(num_points-1):
points = np.append(points, np.array(find_next_point(points),ndmin = 2), axis = 0)
current_set_dist = 0
for i in range(num_points):
dist = min_dist_squared(np.delete(points,i,0), points[i])
current_set_dist += dist
if current_set_dist < best_dist_avg:
best_dist_avg = current_set_dist
best_point_set = points
return best_point_set
if disk == True:
random_point = random_point_disk
else:
random_point = random_point_square
if disk == False and squareRepeatPattern == True:
min_dist_squared = min_dist_squared_repeat
else:
min_dist_squared = min_dist_squared_pure
def generatePoissonDisk(num_points):
points = find_point_set(num_points,num_iterations)
if sorting_buckets > 0:
points_discretized = np.floor(points * [sorting_buckets,-sorting_buckets])
# we multiply in following line by 2 because of -1,1 potential range
indices_cache_space = np.array(points_discretized[:,1] * sorting_buckets * 2 + points_discretized[:,0])
points = points[np.argsort(indices_cache_space)]
return points
def generatePDD(num_points, iterations, min_dist,limUp=1,limBot=0,inputPoints=np.array([]), virtualPoints=np.array([])):
nPoints = inputPoints.size / 2
vSize = virtualPoints.size / 2
points = np.zeros((num_points+vSize,2))
b = .5
s = int(limUp / (min_dist*b))
candidatePoints = np.random.uniform(limBot,limUp,(s,s,iterations,2))
if vSize > 0:
points[0:vSize,:] = virtualPoints
if nPoints == 0:
points[vSize,:] = np.random.uniform(limBot,limUp,(1,2))
nPoints += 1
else:
points[vSize:vSize+nPoints,:] = inputPoints
scale = (limUp - limBot) / float(limUp)
#andidatePoints = np.random.uniform(limBot,limUp,(s*s*iterations,2))
xs = np.arange(0,s,dtype=int)
ys = np.arange(0,s,dtype=int)
np.random.shuffle(xs)
np.random.shuffle(ys)
candidatePoints *= min_dist * b
for i in range(s):
i = xs[i]
for j in range(s):
j = ys[j]
candidatePoints[i,j,:,0] += scale * i * min_dist*b
candidatePoints[i,j,:,1] += scale * j * min_dist*b
dist = spatial.distance.cdist(candidatePoints[i,j,:,:], points[0:nPoints+vSize])
d = np.min(dist,axis=1)
k = np.argmax(d)
if d[k] > min_dist:
points[vSize+nPoints,:] = candidatePoints[i,j,k,:]
nPoints += 1
return points[vSize:,:]
def generatePDDLevels(num_points, iterations, min_dist,limUp,limBot,inputPoints, virtualPoints,inputLevels,virtualLevels):
nPoints = inputPoints.size / 2
vSize = virtualPoints.size / 2
levels = virtualLevels.shape[0]
points = np.zeros((num_points+vSize,2))
b = .5
if vSize > 0:
points[0:vSize,:] = virtualPoints
if nPoints == 0:
points[vSize,:] = np.random.uniform(limBot,limUp,(1,2))
nPoints += 1
else:
points[vSize:vSize+nPoints,:] = inputPoints
limUpNew = limUp - min_dist[0]
rest= np.zeros((levels,num_points-nPoints),dtype=bool)
maskLevels = np.concatenate((virtualLevels,inputLevels,rest),axis=1)
for l in range(0,levels):
s = int(limUp / (min_dist[l]*b))
mask = np.sum(maskLevels[0:l+1,:],axis=0) == 1
candidatePoints = np.random.uniform(limBot,limUpNew,(s,s,iterations,2))
xs = np.arange(0,s,dtype=int)
ys = np.arange(0,s,dtype=int)
np.random.shuffle(xs)
np.random.shuffle(ys)
scale = (limUpNew - limBot) / float(limUp)
candidatePoints *= min_dist[l] * b
for i in range(s):
i = xs[i]
for j in range(s):
j = ys[j]
candidatePoints[i,j,:,0] += scale * i * min_dist[l]*b
candidatePoints[i,j,:,1] += scale * j * min_dist[l]*b
dist = spatial.distance.cdist(candidatePoints[i,j,:,:], points[0:nPoints+vSize])
score = 0
m = np.logical_or(mask,maskLevels[l,:])
for l2 in range(l,levels):
if l2 == l:
distLevel = dist[:,m[0:nPoints+vSize]]
else:
distLevel = dist[:,maskLevels[l2,0:nPoints+vSize]]
d = np.min(distLevel,axis=1)
score += d > min_dist[l2]
if np.any(score == levels - l):
k = np.where(score==levels-l)[0][0]
# print points[vSize+nPoints,:]
points[vSize+nPoints,:] = candidatePoints[i,j,k,:]
# print maskLevels[:, vSize+nPoints], l
maskLevels[l, vSize+nPoints] = True
nPoints += 1
pointLevels = np.zeros((points[vSize:vSize+nPoints,:].shape[0]))
# print maskLevels
# print maskLevels[0,:]
# print maskLevels[1,:]
# print np.sum(maskLevels,axis=0)
for l in range(0,levels):
# print maskLevels[l,vSize:vSize+nPoints]
pointLevels[maskLevels[l,vSize:vSize+nPoints]] = l
return points[vSize:vSize+nPoints,:],pointLevels
def generatePDDwithPoints(num_points, maskPoints, existingPoints):
s1 = maskPoints.shape[0]
s2 = existingPoints.shape[0]
s = s1+s2
totPoints = np.empty((s,2))
totPoints[0:s1,:] = maskPoints
totPoints[s1:s,:] = existingPoints
#for i in range(num_iter):
points = totPoints
for i in range(num_points-s2):
points = np.append(points, np.array(find_next_point_2(points),ndmin = 2), axis = 0)
#current_set_dist = 0
points = points[s1::,:]
return points
def generatePDD_new(gridResolution, min_dist_array,level,mask,finalMask,minBoundX = 0.0, minBoundY = 0.0, maxBoundX = 1.0, maxBoundY = 1.0, existingPoints=np.array([]),itr=20):
min_dist = min_dist_array[level]
totLevels = min_dist_array.shape[0]
b = min_dist * .5
nGroups = int(np.ceil(1.0 / (b)))
nPoints = (nGroups+1)*(nGroups+1) + existingPoints.shape[0]
points = np.zeros((nPoints,3))
currentNPoints = 0
if existingPoints.shape[0] == 0:
rPoint = np.random.uniform(0,1,(1,2))
points[currentNPoints,0] = rPoint[0,0] * (maxBoundX - minBoundX) + minBoundX
points[currentNPoints,1] = rPoint[0,1] * (maxBoundY - minBoundY) + minBoundY
points[currentNPoints,2] = level
currentNPoints += 1
else:
points[0:existingPoints.shape[0],:] = existingPoints
currentNPoints += existingPoints.shape[0]
totalItr = nGroups*nGroups
x,y = np.indices((nGroups,nGroups)) * (1/nGroups)
x = x.ravel()
y = y.ravel()
randomPointsGrid = np.random.uniform(0,1,(totalItr,totalItr,2))
# randomPointsGrid /= float(nGroups)
# randomPointsGrid[:,:,0] += x[np.newaxis,:]
# randomPointsGrid[:,:,1] += y[np.newaxis,:]
randomPointsGrid[:,:,0] *= (maxBoundX - minBoundX)
randomPointsGrid[:,:,1] *= (maxBoundY - minBoundY)
randomPointsGrid[:,:,0] += minBoundX
randomPointsGrid[:,:,1] += minBoundY
#randomPoints = randomPointsGrid.reshape(nGroups*nGroups*itr,2)
# print randomPoints.shape
#allDistances = spatial.distance.cdist(randomPoints,np.concatenate((randomPoints,points[0:currentNPoints,0:2]),axis=0))
# plt.figure()
# plt.scatter(randomPointsGrid[:,:,:,0].ravel(),randomPointsGrid[:,:,:,1].ravel())
# plt.show()
t1 = mask.contains_points(randomPointsGrid.reshape(totalItr*totalItr,2))
withInBounds = t1.reshape(totalItr,totalItr).astype(int)
for i in xrange(0,totalItr):
dist = spatial.distance.cdist(randomPointsGrid[i,:,:], points[0:currentNPoints,0:2])
distances = 0
for l in range(level,totLevels):
mask = points[0:currentNPoints,2] <= l
min_dist = min_dist_array[l]
distL = dist[:,mask]
d = np.min(distL,axis=1)
distances += d
score = withInBounds[i,:]
score += (d > min_dist).astype(int)
inds = np.where(score == (1 + totLevels - level))[0]
if score[inds].size > 0:
minDistID = np.argmin(distances[inds])
k = inds[minDistID]
points[currentNPoints,0:2] = randomPointsGrid[i,k]
points[currentNPoints,2] = level
currentNPoints += 1
m = finalMask.contains_points(points[:currentNPoints,0:2])
return points[m,:]
|
|
from graphql.core.language.parser import parse
from graphql.core.type import GraphQLObjectType, GraphQLField, GraphQLString, GraphQLNonNull, GraphQLSchema
from graphql.core.execution import execute
sync_error = Exception('sync')
non_null_sync_error = Exception('nonNullSync')
class ThrowingData(object):
def sync(self):
raise sync_error
def nonNullSync(self):
raise non_null_sync_error
def nest(self):
return ThrowingData()
def nonNullNest(self):
return ThrowingData()
class NullingData(object):
def sync(self):
return None
def nonNullSync(self):
return None
def nest(self):
return NullingData()
def nonNullNest(self):
return NullingData()
DataType = GraphQLObjectType('DataType', lambda: {
'sync': GraphQLField(GraphQLString),
'nonNullSync': GraphQLField(GraphQLNonNull(GraphQLString)),
'nest': GraphQLField(DataType),
'nonNullNest': GraphQLField(GraphQLNonNull(DataType)),
})
schema = GraphQLSchema(DataType)
def test_nulls_a_nullable_field_that_throws_sync():
doc = '''
query Q {
sync
}
'''
ast = parse(doc)
result = execute(schema, ThrowingData(), ast, 'Q', {})
assert len(result.errors) == 1
# TODO: check error location
assert result.errors[0].message == str(sync_error)
assert result.data == {
'sync': None
}
def test_nulls_a_sync_returned_object_that_contains_a_non_nullable_field_that_throws():
doc = '''
query Q {
nest {
nonNullSync,
}
}
'''
ast = parse(doc)
result = execute(schema, ThrowingData(), ast, 'Q', {})
assert len(result.errors) == 1
# TODO: check error location
assert result.errors[0].message == str(non_null_sync_error)
assert result.data == {
'nest': None
}
def test_nulls_a_complex_tree_of_nullable_fields_that_throw():
doc = '''
query Q {
nest {
sync
#promise
nest {
sync
#promise
}
#promiseNest {
# sync
# promise
#}
}
#promiseNest {
# sync
# promise
# nest {
# sync
# promise
# }
# promiseNest {
# sync
# promise
# }
#}
}
'''
ast = parse(doc)
result = execute(schema, ThrowingData(), ast, 'Q', {})
assert len(result.errors) == 2
# TODO: check error location
assert result.errors[0].message == str(sync_error)
assert result.errors[1].message == str(sync_error)
assert result.data == {
'nest': {
'sync': None,
'nest': {
'sync': None
}
}
}
def test_nulls_a_nullable_field_that_returns_null():
doc = '''
query Q {
sync
}
'''
ast = parse(doc)
result = execute(schema, NullingData(), ast, 'Q', {})
assert not result.errors
assert result.data == {
'sync': None
}
def test_nulls_a_sync_returned_object_that_contains_a_non_nullable_field_that_returns_null():
doc = '''
query Q {
nest {
nonNullSync,
}
}
'''
ast = parse(doc)
result = execute(schema, NullingData(), ast, 'Q', {})
assert len(result.errors) == 1
# TODO: check error location
assert result.errors[0].message == 'Cannot return null for non-nullable field DataType.nonNullSync.'
assert result.data == {
'nest': None
}
def test_nulls_a_complex_tree_of_nullable_fields_that_returns_null():
doc = '''
query Q {
nest {
sync
#promise
nest {
sync
#promise
}
#promiseNest {
# sync
# promise
#}
}
#promiseNest {
# sync
# promise
# nest {
# sync
# promise
# }
# promiseNest {
# sync
# promise
# }
#}
}
'''
ast = parse(doc)
result = execute(schema, NullingData(), ast, 'Q', {})
assert not result.errors
assert result.data == {
'nest': {
'sync': None,
'nest': {
'sync': None
}
}
}
def test_nulls_the_top_level_if_sync_non_nullable_field_throws():
doc = '''
query Q { nonNullSync }
'''
ast = parse(doc)
result = execute(schema, ThrowingData(), ast)
assert result.data is None
assert len(result.errors) == 1
# TODO: check error location
assert result.errors[0].message == str(non_null_sync_error)
def test_nulls_the_top_level_if_sync_non_nullable_field_returns_null():
doc = '''
query Q { nonNullSync }
'''
ast = parse(doc)
result = execute(schema, NullingData(), ast)
assert result.data is None
assert len(result.errors) == 1
# TODO: check error location
assert result.errors[0].message == 'Cannot return null for non-nullable field DataType.nonNullSync.'
|
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Student progress trackers."""
__author__ = 'Sean Lip (sll@google.com)'
import datetime
import logging
import os
from collections import defaultdict
import courses
import transforms
from common import utils
from models import QuestionDAO
from models import QuestionGroupDAO
from models import StudentPropertyEntity
from tools import verify
# Names of component tags that are tracked for progress calculations.
TRACKABLE_COMPONENTS = [
'question',
'question-group',
]
class UnitLessonCompletionTracker(object):
"""Tracks student completion for a unit/lesson-based linear course."""
PROPERTY_KEY = 'linear-course-completion'
# Here are representative examples of the keys for the various entities
# used in this class:
# Unit 1: u.1
# Unit 1, Lesson 1: u.1.l.1
# Unit 1, Lesson 1, Activity 0: u.1.l.1.a.0
# Unit 1, Lesson 1, Activity 0, Block 4: u.1.l.1.a.0.b.4
# Assessment 'Pre': s.Pre
# At the moment, we do not divide assessments into blocks.
#
# The following keys were added in v1.5:
# Unit 1, Lesson 1, HTML: u.1.l.1.h.0
# Unit 1, Lesson 1, HTML, Component with instanceid id: u.1.l.1.h.0.c.id
#
# The number after the 'h' and 'a' codes is always zero, since a lesson may
# have at most one HTML body and one activity.
#
# IMPORTANT NOTE: The values of the keys mean different things depending on
# whether the entity is a composite entity or not.
# If it is a composite entity (unit, lesson, activity), then the value is
# - 0 if none of its sub-entities has been completed
# - 1 if some, but not all, of its sub-entities have been completed
# - 2 if all its sub-entities have been completed.
# If it is not a composite entity (i.e. block, assessment, component), then
# the value is just the number of times the event has been triggered.
# Constants for recording the state of composite entities.
# TODO(sll): Change these to enums.
NOT_STARTED_STATE = 0
IN_PROGRESS_STATE = 1
COMPLETED_STATE = 2
MULTIPLE_CHOICE = 'multiple choice'
MULTIPLE_CHOICE_GROUP = 'multiple choice group'
QUESTION_GROUP = 'question-group'
QUESTION = 'question'
EVENT_CODE_MAPPING = {
'course': 'r',
'course_forced': 'r',
'unit': 'u',
'unit_forced': 'u',
'lesson': 'l',
'activity': 'a',
'html': 'h',
'block': 'b',
'assessment': 's',
'component': 'c',
'custom_unit': 'x'
}
COMPOSITE_ENTITIES = [
EVENT_CODE_MAPPING['course'],
EVENT_CODE_MAPPING['unit'],
EVENT_CODE_MAPPING['lesson'],
EVENT_CODE_MAPPING['activity'],
EVENT_CODE_MAPPING['html'],
EVENT_CODE_MAPPING['custom_unit']
]
POST_UPDATE_PROGRESS_HOOK = []
def __init__(self, course):
self._course = course
def _get_course(self):
return self._course
def get_activity_as_python(self, unit_id, lesson_id):
"""Gets the corresponding activity as a Python object."""
root_name = 'activity'
course = self._get_course()
activity_text = course.app_context.fs.get(
os.path.join(course.app_context.get_home(),
course.get_activity_filename(unit_id, lesson_id)))
content, noverify_text = verify.convert_javascript_to_python(
activity_text, root_name)
activity = verify.evaluate_python_expression_from_text(
content, root_name, verify.Activity().scope, noverify_text)
return activity
def _get_course_key(self):
return '%s.0' % (
self.EVENT_CODE_MAPPING['course'],
)
def _get_unit_key(self, unit_id):
return '%s.%s' % (self.EVENT_CODE_MAPPING['unit'], unit_id)
def _get_custom_unit_key(self, unit_id):
return '%s.%s' % (self.EVENT_CODE_MAPPING['custom_unit'], unit_id)
def _get_lesson_key(self, unit_id, lesson_id):
return '%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id
)
def _get_activity_key(self, unit_id, lesson_id):
return '%s.%s.%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id,
self.EVENT_CODE_MAPPING['activity'], 0
)
def _get_html_key(self, unit_id, lesson_id):
return '%s.%s.%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id,
self.EVENT_CODE_MAPPING['html'], 0
)
def _get_component_key(self, unit_id, lesson_id, component_id):
return '%s.%s.%s.%s.%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id,
self.EVENT_CODE_MAPPING['html'], 0,
self.EVENT_CODE_MAPPING['component'], component_id
)
def _get_block_key(self, unit_id, lesson_id, block_id):
return '%s.%s.%s.%s.%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id,
self.EVENT_CODE_MAPPING['activity'], 0,
self.EVENT_CODE_MAPPING['block'], block_id
)
def _get_assessment_key(self, assessment_id):
assessment_key = '%s.%s' % (
self.EVENT_CODE_MAPPING['assessment'], assessment_id)
# If this assessment is used as a "lesson" within a unit, prepend
# the unit identifier.
parent_unit = self._get_course().get_parent_unit(assessment_id)
if parent_unit:
assessment_key = '.'.join([self._get_unit_key(parent_unit.unit_id),
assessment_key])
return assessment_key
def get_entity_type_from_key(self, progress_entity_key):
return progress_entity_key.split('.')[-2]
def determine_if_composite_entity(self, progress_entity_key):
return self.get_entity_type_from_key(
progress_entity_key) in self.COMPOSITE_ENTITIES
def get_valid_component_ids(self, unit_id, lesson_id):
"""Returns a list of cpt ids representing trackable components."""
components = []
for cpt_name in TRACKABLE_COMPONENTS:
all_cpts = self._get_course().get_components_with_name(
unit_id, lesson_id, cpt_name)
components += [
cpt['instanceid'] for cpt in all_cpts if cpt['instanceid']]
return components
def get_valid_block_ids(self, unit_id, lesson_id):
"""Returns a list of block ids representing interactive activities."""
valid_blocks_data = self._get_valid_blocks_data(unit_id, lesson_id)
return [block[0] for block in valid_blocks_data]
def get_valid_blocks(self, unit_id, lesson_id):
"""Returns a list of blocks representing interactive activities."""
valid_blocks_data = self._get_valid_blocks_data(unit_id, lesson_id)
return [block[1] for block in valid_blocks_data]
def _get_valid_blocks_data(self, unit_id, lesson_id):
"""Returns a list of (b_id, block) representing trackable activities."""
valid_blocks = []
# Check if activity exists before calling get_activity_as_python.
unit = self._get_course().find_unit_by_id(unit_id)
lesson = self._get_course().find_lesson_by_id(unit, lesson_id)
if unit and lesson and lesson.activity:
# Get the activity corresponding to this unit/lesson combination.
activity = self.get_activity_as_python(unit_id, lesson_id)
for block_id in range(len(activity['activity'])):
block = activity['activity'][block_id]
if isinstance(block, dict):
valid_blocks.append((block_id, block))
return valid_blocks
def get_id_to_questions_dict(self):
"""Returns a dict that maps each question to a list of its answers.
Returns:
A dict that represents the questions in lessons. The keys of this
dict are question ids, and the corresponding values are dicts, each
containing the following five key-value pairs:
- answers: a list of 0's with length corresponding to number of
choices a question has.
- location: str. href value of the location of the question in the
course.
- num_attempts: int. Number of attempts for this question. This is
used as the denominator when calculating the average score for a
question. This value may differ from the sum of the elements in
'answers' because of event entities that record an answer but
not a score.
- score: int. Aggregated value of the scores.
- label: str. Human readable identifier for this question.
"""
id_to_questions = {}
for unit in self._get_course().get_units_of_type(verify.UNIT_TYPE_UNIT):
unit_id = unit.unit_id
for lesson in self._get_course().get_lessons(unit_id):
lesson_id = lesson.lesson_id
# Add mapping dicts for questions in old-style activities.
if lesson.activity:
blocks = self._get_valid_blocks_data(unit_id, lesson_id)
for block_index, (block_id, block) in enumerate(blocks):
if block['questionType'] == self.MULTIPLE_CHOICE:
# Old style question.
id_to_questions.update(
self._create_old_style_question_dict(
block, block_id, block_index, unit, lesson))
elif (block['questionType'] ==
self.MULTIPLE_CHOICE_GROUP):
# Old style multiple choice group.
for ind, q in enumerate(block['questionsList']):
id_to_questions.update(
self._create_old_style_question_dict(
q, block_id, block_index, unit,
lesson, index=ind))
# Add mapping dicts for CBv1.5 style questions.
if lesson.objectives:
for cpt in self._get_course().get_question_components(
unit_id, lesson_id):
# CB v1.5 style questions.
id_to_questions.update(
self._create_v15_lesson_question_dict(
cpt, unit, lesson))
for cpt in self._get_course().get_question_group_components(
unit_id, lesson_id):
# CB v1.5 style question groups.
id_to_questions.update(
self._create_v15_lesson_question_group_dict(
cpt, unit, lesson))
return id_to_questions
def get_id_to_assessments_dict(self):
"""Returns a dict that maps each question to a list of its answers.
Returns:
A dict that represents the questions in assessments. The keys of
this dict are question ids, and the corresponding values are dicts,
each containing the following five key-value pairs:
- answers: a list of 0's with length corresponding to number of
choices a question has.
- location: str. href value of the location of the question in the
course.
- num_attempts: int. Number of attempts for this question. This is
used as the denominator when calculating the average score for a
question. This value may differ from the sum of the elements in
'answers' because of event entities that record an answer but
not a score.
- score: int. Aggregated value of the scores.
- label: str. Human readable identifier for this question.
"""
id_to_assessments = {}
for assessment in self._get_course().get_assessment_list():
if not self._get_course().needs_human_grader(assessment):
assessment_components = self._get_course(
).get_assessment_components(assessment.unit_id)
# CB v1.5 style assessments.
for cpt in assessment_components:
if cpt['cpt_name'] == self.QUESTION_GROUP:
id_to_assessments.update(
self._create_v15_assessment_question_group_dict(
cpt, assessment))
elif cpt['cpt_name'] == self.QUESTION:
id_to_assessments.update(
self._create_v15_assessment_question_dict(
cpt, assessment))
# Old style javascript assessments.
try:
content = self._get_course().get_assessment_content(
assessment)
id_to_assessments.update(
self._create_old_style_assessment_dict(
content['assessment'], assessment))
except AttributeError:
# Assessment file does not exist.
continue
return id_to_assessments
def _get_link_for_assessment(self, assessment_id):
return 'assessment?name=%s' % (assessment_id)
def _get_link_for_activity(self, unit_id, lesson_id):
return 'activity?unit=%s&lesson=%s' % (unit_id, lesson_id)
def _get_link_for_lesson(self, unit_id, lesson_id):
return 'unit?unit=%s&lesson=%s' % (unit_id, lesson_id)
def _create_v15_question_dict(self, q_id, label, link, num_choices):
"""Returns a dict that represents CB v1.5 style question."""
return {
q_id: {
'answer_counts': [0] * num_choices,
'label': label,
'location': link,
'score': 0,
'num_attempts': 0
}
}
def _create_v15_lesson_question_dict(self, cpt, unit, lesson):
try:
question = QuestionDAO.load(cpt['quid'])
if question.type == question.MULTIPLE_CHOICE:
q_id = 'u.%s.l.%s.c.%s' % (
unit.unit_id, lesson.lesson_id, cpt['instanceid'])
label = 'Unit %s Lesson %s, Question %s' % (
unit.index, lesson.index, question.description)
link = self._get_link_for_lesson(unit.unit_id, lesson.lesson_id)
num_choices = len(question.dict['choices'])
return self._create_v15_question_dict(
q_id, label, link, num_choices)
else:
return {}
except Exception as e: # pylint: disable=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, cpt)
return {}
def _create_v15_lesson_question_group_dict(self, cpt, unit, lesson):
try:
question_group = QuestionGroupDAO.load(cpt['qgid'])
questions = {}
for ind, quid in enumerate(question_group.question_ids):
question = QuestionDAO.load(quid)
if question.type == question.MULTIPLE_CHOICE:
q_id = 'u.%s.l.%s.c.%s.i.%s' % (
unit.unit_id, lesson.lesson_id, cpt['instanceid'], ind)
label = ('Unit %s Lesson %s, Question Group %s Question %s'
% (unit.index, lesson.index,
question_group.description,
question.description))
link = self._get_link_for_lesson(
unit.unit_id, lesson.lesson_id)
num_choices = len(question.dict['choices'])
questions.update(self._create_v15_question_dict(
q_id, label, link, num_choices))
return questions
except Exception as e: # pylint: disable=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, cpt)
return {}
def _create_v15_assessment_question_group_dict(self, cpt, assessment):
try:
question_group = QuestionGroupDAO.load(cpt['qgid'])
questions = {}
for ind, quid in enumerate(question_group.question_ids):
question = QuestionDAO.load(quid)
if question.type == question.MULTIPLE_CHOICE:
q_id = 's.%s.c.%s.i.%s' % (
assessment.unit_id, cpt['instanceid'], ind)
label = '%s, Question Group %s Question %s' % (
assessment.title, question_group.description,
question.description)
link = self._get_link_for_assessment(assessment.unit_id)
num_choices = len(question.dict['choices'])
questions.update(
self._create_v15_question_dict(
q_id, label, link, num_choices))
return questions
except Exception as e: # pylint: disable=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, cpt)
return {}
def _create_v15_assessment_question_dict(self, cpt, assessment):
try:
question = QuestionDAO.load(cpt['quid'])
if question.type == question.MULTIPLE_CHOICE:
q_id = 's.%s.c.%s' % (assessment.unit_id, cpt['instanceid'])
label = '%s, Question %s' % (
assessment.title, question.description)
link = self._get_link_for_assessment(assessment.unit_id)
num_choices = len(question.dict['choices'])
return self._create_v15_question_dict(
q_id, label, link, num_choices)
else:
return {}
except Exception as e: # pylint: disable=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, cpt)
return {}
def _create_old_style_question_dict(self, block, block_id, block_index,
unit, lesson, index=None):
try:
if index is not None:
# Question is in a multiple choice group.
b_id = 'u.%s.l.%s.b.%s.i.%s' % (
unit.unit_id, lesson.lesson_id, block_id, index)
label = 'Unit %s Lesson %s Activity, Item %s Part %s' % (
unit.index, lesson.index, block_index + 1, index + 1)
else:
b_id = 'u.%s.l.%s.b.%s' % (
unit.unit_id, lesson.lesson_id, block_id)
label = 'Unit %s Lesson %s Activity, Item %s' % (
unit.index, lesson.index, block_index + 1)
return {
b_id: {
'answer_counts': [0] * len(block['choices']),
'label': label,
'location': self._get_link_for_activity(
unit.unit_id, lesson.lesson_id),
'score': 0,
'num_attempts': 0
}
}
except Exception as e: # pylint: disable=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, block)
return {}
def _create_old_style_assessment_dict(self, content, assessment):
try:
questions = {}
for ind, question in enumerate(content['questionsList']):
if 'choices' in question:
questions.update(
{
's.%s.i.%s' % (assessment.unit_id, ind): {
'answer_counts': [0] * len(question['choices']),
'label': '%s, Question %s' % (
assessment.title, ind + 1),
'location': self._get_link_for_assessment(
assessment.unit_id),
'score': 0,
'num_attempts': 0
}
}
)
return questions
except Exception as e: # pylint: disable=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, content)
return {}
def _update_course(self, progress, student):
event_key = self._get_course_key()
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
course = self._get_course()
for unit in course.get_track_matching_student(student):
if course.get_parent_unit(unit.unit_id):
# Completion of an assessment-as-lesson rolls up to its
# containing unit; it is not considered for overall course
# completion (except insofar as assessment completion
# contributes to the completion of its owning unit)
pass
else:
if unit.type == verify.UNIT_TYPE_ASSESSMENT:
if not self.is_assessment_completed(progress, unit.unit_id):
return
elif unit.type == verify.UNIT_TYPE_UNIT:
unit_state = self.get_unit_status(progress, unit.unit_id)
if unit_state != self.COMPLETED_STATE:
return
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_course_forced(self, progress):
"""Force state of course to completed."""
event_key = self._get_course_key()
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_unit(self, progress, event_key):
"""Updates a unit's progress if all its lessons have been completed."""
split_event_key = event_key.split('.')
assert len(split_event_key) == 2
unit_id = split_event_key[1]
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
# Record that at least one lesson in this unit has been completed.
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
# Check if all lessons in this unit have been completed.
lessons = self._get_course().get_lessons(unit_id)
for lesson in lessons:
if (self.get_lesson_status(
progress,
unit_id, lesson.lesson_id) != self.COMPLETED_STATE):
return
# Check whether pre/post assessments in this unit have been completed.
unit = self._get_course().find_unit_by_id(unit_id)
pre_assessment_id = unit.pre_assessment
if (pre_assessment_id and
not self.get_assessment_status(progress, pre_assessment_id)):
return
post_assessment_id = unit.post_assessment
if (post_assessment_id and
not self.get_assessment_status(progress, post_assessment_id)):
return
# Record that all lessons in this unit have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_unit_forced(self, progress, event_key):
"""Force-mark a unit as completed, ignoring normal criteria."""
# Record that all lessons in this unit have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_lesson(self, progress, event_key):
"""Updates a lesson's progress based on the progress of its children."""
split_event_key = event_key.split('.')
assert len(split_event_key) == 4
unit_id = split_event_key[1]
lesson_id = split_event_key[3]
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
# Record that at least one part of this lesson has been completed.
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
lessons = self._get_course().get_lessons(unit_id)
for lesson in lessons:
if str(lesson.lesson_id) == lesson_id and lesson:
# Is the activity completed?
if (lesson.activity and self.get_activity_status(
progress, unit_id, lesson_id) != self.COMPLETED_STATE):
return
# Are all components of the lesson completed?
if (self.get_html_status(
progress, unit_id, lesson_id) != self.COMPLETED_STATE):
return
# Record that all activities in this lesson have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_activity(self, progress, event_key):
"""Updates activity's progress when all interactive blocks are done."""
split_event_key = event_key.split('.')
assert len(split_event_key) == 6
unit_id = split_event_key[1]
lesson_id = split_event_key[3]
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
# Record that at least one block in this activity has been completed.
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
valid_block_ids = self.get_valid_block_ids(unit_id, lesson_id)
for block_id in valid_block_ids:
if not self.is_block_completed(
progress, unit_id, lesson_id, block_id):
return
# Record that all blocks in this activity have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_html(self, progress, event_key):
"""Updates html's progress when all interactive blocks are done."""
split_event_key = event_key.split('.')
assert len(split_event_key) == 6
unit_id = split_event_key[1]
lesson_id = split_event_key[3]
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
# Record that at least one block in this activity has been completed.
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
cpt_ids = self.get_valid_component_ids(unit_id, lesson_id)
for cpt_id in cpt_ids:
if not self.is_component_completed(
progress, unit_id, lesson_id, cpt_id):
return
# Record that all blocks in this activity have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_custom_unit(self, student, event_key, state):
"""Update custom unit."""
if student.is_transient:
return
progress = self.get_or_create_progress(student)
current_state = self._get_entity_value(progress, event_key)
if current_state == state or current_state == self.COMPLETED_STATE:
return
self._set_entity_value(progress, event_key, state)
progress.updated_on = datetime.datetime.now()
progress.put()
UPDATER_MAPPING = {
'activity': _update_activity,
'course': _update_course,
'course_forced': _update_course_forced,
'html': _update_html,
'lesson': _update_lesson,
'unit': _update_unit,
'unit_forced': _update_unit_forced,
}
# Dependencies for recording derived events. The key is the current
# event, and the value is a tuple, each element of which contains:
# - the dependent entity to be updated
# - the transformation to apply to the id of the current event to get the
# id for the derived parent event
DERIVED_EVENTS = {
'block': (
{
'entity': 'activity',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2]))
},
),
'activity': (
{
'entity': 'lesson',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2]))
},
),
'lesson': (
{
'entity': 'unit',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2]))
},
),
'component': (
{
'entity': 'html',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2]))
},
),
'html': (
{
'entity': 'lesson',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2]))
},
),
'assessment': (
{
'entity': 'unit',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2]))
},
),
}
def force_course_completed(self, student):
self._put_event(
student, 'course_forced', self._get_course_key())
def force_unit_completed(self, student, unit_id):
"""Records that the given student has completed a unit.
NOTE: This should not generally be used directly. The definition
of completing a unit is generally taken to be completion of all
parts of all components of a unit (assessments, lessons,
activities in lessons, etc. Directly marking a unit as complete
is provided only for manual marking where the student feels "done",
but has not taken a fully completionist approach to the material.
Args:
student: A logged-in, registered student object.
unit_id: The ID of the unit to be marked as complete.
"""
self._put_event(
student, 'unit_forced', self._get_unit_key(unit_id))
def put_activity_completed(self, student, unit_id, lesson_id):
"""Records that the given student has completed an activity."""
if not self._get_course().is_valid_unit_lesson_id(unit_id, lesson_id):
return
self._put_event(
student, 'activity', self._get_activity_key(unit_id, lesson_id))
def put_html_completed(self, student, unit_id, lesson_id):
"""Records that the given student has completed a lesson page."""
if not self._get_course().is_valid_unit_lesson_id(unit_id, lesson_id):
return
self._put_event(
student, 'html', self._get_html_key(unit_id, lesson_id))
def put_block_completed(self, student, unit_id, lesson_id, block_id):
"""Records that the given student has completed an activity block."""
if not self._get_course().is_valid_unit_lesson_id(unit_id, lesson_id):
return
if block_id not in self.get_valid_block_ids(unit_id, lesson_id):
return
self._put_event(
student,
'block',
self._get_block_key(unit_id, lesson_id, block_id)
)
def put_component_completed(self, student, unit_id, lesson_id, cpt_id):
"""Records completion of a component in a lesson body."""
if not self._get_course().is_valid_unit_lesson_id(unit_id, lesson_id):
return
if cpt_id not in self.get_valid_component_ids(unit_id, lesson_id):
return
self._put_event(
student,
'component',
self._get_component_key(unit_id, lesson_id, cpt_id)
)
def put_assessment_completed(self, student, assessment_id):
"""Records that the given student has completed the given assessment."""
if not self._get_course().is_valid_assessment_id(assessment_id):
return
self._put_event(
student, 'assessment', self._get_assessment_key(assessment_id))
def put_custom_unit_completed(self, student, unit_id):
"""Records that the student has completed the given custom_unit."""
if not self._get_course().is_valid_custom_unit(unit_id):
return
self._update_custom_unit(
student, self._get_custom_unit_key(unit_id),
self.COMPLETED_STATE)
def put_custom_unit_in_progress(self, student, unit_id):
"""Records that the given student has started the given custom_unit."""
if not self._get_course().is_valid_custom_unit(unit_id):
return
self._update_custom_unit(
student, self._get_custom_unit_key(unit_id),
self.IN_PROGRESS_STATE)
def put_activity_accessed(self, student, unit_id, lesson_id):
"""Records that the given student has accessed this activity."""
# This method currently exists because we need to mark activities
# without interactive blocks as 'completed' when they are accessed.
if not self.get_valid_block_ids(unit_id, lesson_id):
self.put_activity_completed(student, unit_id, lesson_id)
def put_html_accessed(self, student, unit_id, lesson_id):
"""Records that the given student has accessed this lesson page."""
# This method currently exists because we need to mark lesson bodies
# without interactive blocks as 'completed' when they are accessed.
if not self.get_valid_component_ids(unit_id, lesson_id):
self.put_html_completed(student, unit_id, lesson_id)
def _put_event(self, student, event_entity, event_key):
"""Starts a cascade of updates in response to an event taking place."""
if student.is_transient or event_entity not in self.EVENT_CODE_MAPPING:
return
progress = self.get_or_create_progress(student)
self._update_event(
student, progress, event_entity, event_key, direct_update=True)
progress.updated_on = datetime.datetime.now()
progress.put()
def _update_event(self, student, progress, event_entity, event_key,
direct_update=False):
"""Updates statistics for the given event, and for derived events.
Args:
student: the student
progress: the StudentProgressEntity for the student
event_entity: the name of the affected entity (unit, lesson, etc.)
event_key: the key for the recorded event
direct_update: True if this event is being updated explicitly; False
if it is being auto-updated.
"""
if direct_update or event_entity not in self.UPDATER_MAPPING:
if event_entity in self.UPDATER_MAPPING:
# This is a derived event, so directly mark it as completed.
self._set_entity_value(
progress, event_key, self.COMPLETED_STATE)
else:
# This is not a derived event, so increment its counter by one.
self._inc(progress, event_key)
else:
self.UPDATER_MAPPING[event_entity](self, progress, event_key)
if event_entity in self.DERIVED_EVENTS:
for derived_event in self.DERIVED_EVENTS[event_entity]:
parent_event_key = derived_event['generate_parent_id'](
event_key)
if parent_event_key:
# Event entities may contribute upwards to more than one
# kind of container. Only pass the notification up to the
# handler that our event_key indicates we actually have.
leaf_type = self.get_entity_type_from_key(parent_event_key)
event_entity = derived_event['entity']
if leaf_type == self.EVENT_CODE_MAPPING[event_entity]:
self._update_event(
student=student,
progress=progress,
event_entity=event_entity,
event_key=parent_event_key)
else:
# Only update course status when we are at the top of
# a containment list
self._update_course(progress, student)
else:
# Or only update course status when we are doing something not
# in derived events (Unit, typically).
self._update_course(progress, student)
utils.run_hooks(self.POST_UPDATE_PROGRESS_HOOK, self._get_course(),
student, progress, event_entity, event_key)
def get_course_status(self, progress):
return self._get_entity_value(progress, self._get_course_key())
def get_unit_status(self, progress, unit_id):
return self._get_entity_value(progress, self._get_unit_key(unit_id))
def get_custom_unit_status(self, progress, unit_id):
return self._get_entity_value(
progress, self._get_custom_unit_key(unit_id))
def get_lesson_status(self, progress, unit_id, lesson_id):
return self._get_entity_value(
progress, self._get_lesson_key(unit_id, lesson_id))
def get_activity_status(self, progress, unit_id, lesson_id):
return self._get_entity_value(
progress, self._get_activity_key(unit_id, lesson_id))
def get_html_status(self, progress, unit_id, lesson_id):
return self._get_entity_value(
progress, self._get_html_key(unit_id, lesson_id))
def get_block_status(self, progress, unit_id, lesson_id, block_id):
return self._get_entity_value(
progress, self._get_block_key(unit_id, lesson_id, block_id))
def get_assessment_status(self, progress, assessment_id):
return self._get_entity_value(
progress, self._get_assessment_key(assessment_id))
def is_block_completed(self, progress, unit_id, lesson_id, block_id):
value = self._get_entity_value(
progress, self._get_block_key(unit_id, lesson_id, block_id))
return value is not None and value > 0
def is_component_completed(self, progress, unit_id, lesson_id, cpt_id):
value = self._get_entity_value(
progress, self._get_component_key(unit_id, lesson_id, cpt_id))
return value is not None and value > 0
def is_assessment_completed(self, progress, assessment_id):
value = self._get_entity_value(
progress, self._get_assessment_key(assessment_id))
return value is not None and value > 0
def is_custom_unit_completed(self, progress, unit_id):
value = self.get_custom_unit_status(progress, unit_id)
return self.COMPLETED_STATE == value
@classmethod
def get_or_create_progress(cls, student):
progress = StudentPropertyEntity.get(student, cls.PROPERTY_KEY)
if not progress:
progress = StudentPropertyEntity.create(
student=student, property_name=cls.PROPERTY_KEY)
progress.put()
return progress
def get_course_progress(self, student):
"""Return [NOT_STARTED|IN_PROGRESS|COMPLETED]_STATE for course."""
progress = self.get_or_create_progress(student)
return self.get_course_status(progress) or self.NOT_STARTED_STATE
def get_unit_progress(self, student, progress=None):
"""Returns a dict with the states of each unit."""
if student.is_transient:
return {}
units = self._get_course().get_units()
if progress is None:
progress = self.get_or_create_progress(student)
result = {}
for unit in units:
if unit.type == verify.UNIT_TYPE_ASSESSMENT:
result[unit.unit_id] = self.is_assessment_completed(
progress, unit.unit_id)
elif unit.type == verify.UNIT_TYPE_UNIT:
value = self.get_unit_status(progress, unit.unit_id)
result[unit.unit_id] = value or 0
elif unit.type == verify.UNIT_TYPE_CUSTOM:
value = self.get_custom_unit_status(progress, unit.unit_id)
result[unit.unit_id] = value or 0
return result
def get_unit_percent_complete(self, student):
"""Returns a dict with each unit's completion in [0.0, 1.0]."""
if student.is_transient:
return {}
course = self._get_course()
units = course.get_units()
assessment_scores = {int(s['id']): s['score'] / 100.0
for s in course.get_all_scores(student)}
result = {}
progress = self.get_or_create_progress(student)
for unit in units:
# Assessments are scored as themselves.
if unit.type == verify.UNIT_TYPE_ASSESSMENT:
result[unit.unit_id] = assessment_scores[unit.unit_id]
elif unit.type == verify.UNIT_TYPE_UNIT:
if (unit.pre_assessment and
assessment_scores[unit.pre_assessment] >= 1.0):
# Use pre-assessment iff it exists and student scored 100%
result[unit.unit_id] = 1.0
else:
# Otherwise, count % completion on lessons within unit.
num_completed = 0
lesson_progress = self.get_lesson_progress(
student, unit.unit_id, progress=progress)
if not lesson_progress:
result[unit.unit_id] = 0.0
else:
for lesson in lesson_progress.values():
if lesson['has_activity']:
# Lessons that have activities must be
# activity-complete as well as HTML complete.
if (lesson['html'] == self.COMPLETED_STATE and
lesson['activity'] == self.COMPLETED_STATE):
num_completed += 1
else:
# Lessons without activities just need HTML
if lesson['html'] == self.COMPLETED_STATE:
num_completed += 1
result[unit.unit_id] = round(
num_completed / float(len(lesson_progress)), 3)
return result
def get_lesson_progress(self, student, unit_id, progress=None):
"""Returns a dict saying which lessons in this unit are completed."""
if student.is_transient:
return {}
lessons = self._get_course().get_lessons(unit_id)
if progress is None:
progress = self.get_or_create_progress(student)
result = {}
for lesson in lessons:
result[lesson.lesson_id] = {
'html': self.get_html_status(
progress, unit_id, lesson.lesson_id) or 0,
'activity': self.get_activity_status(
progress, unit_id, lesson.lesson_id) or 0,
'has_activity': lesson.has_activity,
}
return result
def get_component_progress(self, student, unit_id, lesson_id, cpt_id):
"""Returns the progress status of the given component."""
if student.is_transient:
return 0
progress = self.get_or_create_progress(student)
return self.is_component_completed(
progress, unit_id, lesson_id, cpt_id) or 0
def _get_entity_value(self, progress, event_key):
if not progress.value:
return None
return transforms.loads(progress.value).get(event_key)
def _set_entity_value(self, student_property, key, value):
"""Sets the integer value of a student property.
Note: this method does not commit the change. The calling method should
call put() on the StudentPropertyEntity.
Args:
student_property: the StudentPropertyEntity
key: the student property whose value should be incremented
value: the value to increment this property by
"""
try:
progress_dict = transforms.loads(student_property.value)
except (AttributeError, TypeError):
progress_dict = {}
progress_dict[key] = value
student_property.value = transforms.dumps(progress_dict)
def _inc(self, student_property, key, value=1):
"""Increments the integer value of a student property.
Note: this method does not commit the change. The calling method should
call put() on the StudentPropertyEntity.
Args:
student_property: the StudentPropertyEntity
key: the student property whose value should be incremented
value: the value to increment this property by
"""
try:
progress_dict = transforms.loads(student_property.value)
except (AttributeError, TypeError):
progress_dict = {}
if key not in progress_dict:
progress_dict[key] = 0
progress_dict[key] += value
student_property.value = transforms.dumps(progress_dict)
@classmethod
def get_elements_from_key(cls, key):
"""Decomposes the key into a dictionary with its values.
Args:
key: a string, the key of an element of the progress. For
example, u.1.l.5.h.0
Returns:
A dictionary mapping each element in the key to its value. For
the key u.1.l.5.h.0 the result is:
{
'unit': 1,
'lesson': 5,
'html': 0,
'unit_forced': 1
}
"""
reversed_event_mapping = defaultdict(lambda: [])
for full_type, value in cls.EVENT_CODE_MAPPING.iteritems():
reversed_event_mapping[value].append(full_type)
key_elements = key.split('.')
assert len(key_elements) % 2 == 0
result = {}
for index in range(0, len(key_elements), 2):
element_type = key_elements[index]
element_value = key_elements[index + 1]
full_element_types = reversed_event_mapping.get(element_type)
for full_element_type in full_element_types:
result[full_element_type] = element_value
return result
class ProgressStats(object):
"""Defines the course structure definition for course progress tracking."""
def __init__(self, course):
self._course = course
self._tracker = UnitLessonCompletionTracker(course)
def compute_entity_dict(self, entity, parent_ids):
"""Computes the course structure dictionary.
Args:
entity: str. Represents for which level of entity the dict is being
computed. Valid entity levels are defined as keys to the dict
defined below, COURSE_STRUCTURE_DICT.
parent_ids: list of ids necessary to get children of the current
entity.
Returns:
A nested dictionary representing the structure of the course.
Every other level of the dictionary consists of a key, the label of
the entity level defined by EVENT_CODE_MAPPING in
UnitLessonCompletionTracker, whose value is a dictionary
INSTANCES_DICT. The keys of INSTANCES_DICT are instance_ids of the
corresponding entities, and the values are the entity_dicts of the
instance's children, in addition to a field called 'label'. Label
represents the user-facing name of the entity rather than
its intrinsic id. If one of these values is empty, this means
that the corresponding entity has no children.
Ex:
A Course with the following outlined structure:
Pre Assessment
Unit 1
Lesson 1
Unit 2
will have the following dictionary representation:
{
's': {
1: {
'label': 'Pre Assessment'
}
},
'u': {
2: {
'l': {
3: {
'label': 1
}
},
'label': 1
},
4: {
'label': 2
}
}
'label': 'UNTITLED COURSE'
}
"""
entity_dict = {'label': self._get_label(entity, parent_ids)}
for child_entity, get_children_ids in self.COURSE_STRUCTURE_DICT[
entity]['children']:
child_entity_dict = {}
for child_id in get_children_ids(self, *parent_ids):
new_parent_ids = parent_ids + [child_id]
child_entity_dict[child_id] = self.compute_entity_dict(
child_entity, new_parent_ids)
entity_dict[UnitLessonCompletionTracker.EVENT_CODE_MAPPING[
child_entity]] = child_entity_dict
return entity_dict
def _get_course(self):
return self._course
def _get_unit_ids_of_type_unit(self):
units = self._get_course().get_units_of_type(verify.UNIT_TYPE_UNIT)
return [unit.unit_id for unit in units]
def _get_assessment_ids(self):
assessments = self._get_course().get_assessment_list()
return [a.unit_id for a in assessments]
def _get_lesson_ids(self, unit_id):
lessons = self._get_course().get_lessons(unit_id)
return [lesson.lesson_id for lesson in lessons]
def _get_activity_ids(self, unit_id, lesson_id):
unit = self._get_course().find_unit_by_id(unit_id)
if self._get_course().find_lesson_by_id(unit, lesson_id).activity:
return [0]
return []
def _get_html_ids(self, unused_unit_id, unused_lesson_id):
return [0]
def _get_block_ids(self, unit_id, lesson_id, unused_activity_id):
return self._tracker.get_valid_block_ids(unit_id, lesson_id)
def _get_component_ids(self, unit_id, lesson_id, unused_html_id):
return self._tracker.get_valid_component_ids(unit_id, lesson_id)
def _get_label(self, entity, parent_ids):
return self.ENTITY_TO_HUMAN_READABLE_NAME_DICT[entity](
self, *parent_ids)
def _get_course_label(self):
# pylint: disable=protected-access
return courses.Course.get_environ(self._get_course().app_context)[
'course']['title']
def _get_unit_label(self, unit_id):
unit = self._get_course().find_unit_by_id(unit_id)
return 'Unit %s' % unit.index
def _get_assessment_label(self, unit_id):
assessment = self._get_course().find_unit_by_id(unit_id)
return assessment.title
def _get_lesson_label(self, unit_id, lesson_id):
unit = self._get_course().find_unit_by_id(unit_id)
lesson = self._get_course().find_lesson_by_id(unit, lesson_id)
return lesson.index
def _get_activity_label(self, unit_id, lesson_id, unused_activity_id):
return str('L%s.%s' % (
self._get_course().find_unit_by_id(unit_id).index,
self._get_lesson_label(unit_id, lesson_id)))
def _get_html_label(self, unit_id, lesson_id, unused_html_id):
return self._get_activity_label(unit_id, lesson_id, unused_html_id)
def _get_block_label(self, unit_id, lesson_id, unused_activity_id,
block_id):
return str('L%s.%s.%s' % (
self._get_course().find_unit_by_id(unit_id).index,
self._get_lesson_label(unit_id, lesson_id),
block_id))
def _get_component_label(self, unit_id, lesson_id, unused_html_id,
component_id):
return self._get_block_label(
unit_id, lesson_id, unused_html_id, component_id)
# Outlines the structure of the course. The key is the entity level, and
# its value is a dictionary with following keys and its values:
# 'children': list of tuples. Each tuple consists of string representation
# of the child entity(ex: 'lesson') and a function to get the
# children elements. If the entity does not have children, the
# value will be an empty list.
# 'id': instance_id of the entity. If the entity is represented by a class
# with an id attribute(ex: units), string representation of the
# attribute is stored here. If the entity is defined by a dictionary
# (ex: components), then the value is the string 'None'.
#
COURSE_STRUCTURE_DICT = {
'course': {
'children': [('unit', _get_unit_ids_of_type_unit),
('assessment', _get_assessment_ids)],
},
'unit': {
'children': [('lesson', _get_lesson_ids)],
},
'assessment': {
'children': [],
},
'lesson': {
'children': [('activity', _get_activity_ids),
('html', _get_html_ids)],
},
'activity': {
'children': [('block', _get_block_ids)],
},
'html': {
'children': [('component', _get_component_ids)],
},
'block': {
'children': [],
},
'component': {
'children': [],
}
}
ENTITY_TO_HUMAN_READABLE_NAME_DICT = {
'course': _get_course_label,
'unit': _get_unit_label,
'assessment': _get_assessment_label,
'lesson': _get_lesson_label,
'activity': _get_activity_label,
'html': _get_html_label,
'block': _get_block_label,
'component': _get_component_label
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.