code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import sys, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from config.shared import *
# Unique config for the Dutch language here:
pdf_documents = [
('index', u'SendCloud OpenCart 2 Extension Documentatie - NL', u'SendCloud OpenCart 2 Extension Documentation - Nederlands', u'SendCloud BV'),
]
|
Whazor/SendCloud-OpenCart
|
docs/nl/conf.py
|
Python
|
mit
| 331
|
<<<<<<< HEAD
<<<<<<< HEAD
"""distutils.fancy_getopt
Wrapper around the standard getopt module that provides the following
additional features:
* short and long options are tied together
* options have help strings, so fancy_getopt could potentially
create a complete usage summary
* options set attributes of a passed-in object
"""
import sys, string, re
import getopt
from distutils.errors import *
# Much like command_re in distutils.core, this is close to but not quite
# the same as a Python NAME -- except, in the spirit of most GNU
# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)
# The similarities to NAME are again not a coincidence...
longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
longopt_re = re.compile(r'^%s$' % longopt_pat)
# For recognizing "negative alias" options, eg. "quiet=!verbose"
neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat))
# This is used to translate long options to legitimate Python identifiers
# (for use as attributes of some object).
longopt_xlate = str.maketrans('-', '_')
class FancyGetopt:
"""Wrapper around the standard 'getopt()' module that provides some
handy extra functionality:
* short and long options are tied together
* options have help strings, and help text can be assembled
from them
* options set attributes of a passed-in object
* boolean options can have "negative aliases" -- eg. if
--quiet is the "negative alias" of --verbose, then "--quiet"
on the command line sets 'verbose' to false
"""
def __init__(self, option_table=None):
# The option table is (currently) a list of tuples. The
# tuples may have 3 or four values:
# (long_option, short_option, help_string [, repeatable])
# if an option takes an argument, its long_option should have '='
# appended; short_option should just be a single character, no ':'
# in any case. If a long_option doesn't have a corresponding
# short_option, short_option should be None. All option tuples
# must have long options.
self.option_table = option_table
# 'option_index' maps long option names to entries in the option
# table (ie. those 3-tuples).
self.option_index = {}
if self.option_table:
self._build_index()
# 'alias' records (duh) alias options; {'foo': 'bar'} means
# --foo is an alias for --bar
self.alias = {}
# 'negative_alias' keeps track of options that are the boolean
# opposite of some other option
self.negative_alias = {}
# These keep track of the information in the option table. We
# don't actually populate these structures until we're ready to
# parse the command-line, since the 'option_table' passed in here
# isn't necessarily the final word.
self.short_opts = []
self.long_opts = []
self.short2long = {}
self.attr_name = {}
self.takes_arg = {}
# And 'option_order' is filled up in 'getopt()'; it records the
# original order of options (and their values) on the command-line,
# but expands short options, converts aliases, etc.
self.option_order = []
def _build_index(self):
self.option_index.clear()
for option in self.option_table:
self.option_index[option[0]] = option
def set_option_table(self, option_table):
self.option_table = option_table
self._build_index()
def add_option(self, long_option, short_option=None, help_string=None):
if long_option in self.option_index:
raise DistutilsGetoptError(
"option conflict: already an option '%s'" % long_option)
else:
option = (long_option, short_option, help_string)
self.option_table.append(option)
self.option_index[long_option] = option
def has_option(self, long_option):
"""Return true if the option table for this parser has an
option with long name 'long_option'."""
return long_option in self.option_index
def get_attr_name(self, long_option):
"""Translate long option name 'long_option' to the form it
has as an attribute of some object: ie., translate hyphens
to underscores."""
return long_option.translate(longopt_xlate)
def _check_alias_dict(self, aliases, what):
assert isinstance(aliases, dict)
for (alias, opt) in aliases.items():
if alias not in self.option_index:
raise DistutilsGetoptError(("invalid %s '%s': "
"option '%s' not defined") % (what, alias, alias))
if opt not in self.option_index:
raise DistutilsGetoptError(("invalid %s '%s': "
"aliased option '%s' not defined") % (what, alias, opt))
def set_aliases(self, alias):
"""Set the aliases for this option parser."""
self._check_alias_dict(alias, "alias")
self.alias = alias
def set_negative_aliases(self, negative_alias):
"""Set the negative aliases for this option parser.
'negative_alias' should be a dictionary mapping option names to
option names, both the key and value must already be defined
in the option table."""
self._check_alias_dict(negative_alias, "negative alias")
self.negative_alias = negative_alias
def _grok_option_table(self):
"""Populate the various data structures that keep tabs on the
option table. Called by 'getopt()' before it can do anything
worthwhile.
"""
self.long_opts = []
self.short_opts = []
self.short2long.clear()
self.repeat = {}
for option in self.option_table:
if len(option) == 3:
long, short, help = option
repeat = 0
elif len(option) == 4:
long, short, help, repeat = option
else:
# the option table is part of the code, so simply
# assert that it is correct
raise ValueError("invalid option tuple: %r" % (option,))
# Type- and value-check the option names
if not isinstance(long, str) or len(long) < 2:
raise DistutilsGetoptError(("invalid long option '%s': "
"must be a string of length >= 2") % long)
if (not ((short is None) or
(isinstance(short, str) and len(short) == 1))):
raise DistutilsGetoptError("invalid short option '%s': "
"must a single character or None" % short)
self.repeat[long] = repeat
self.long_opts.append(long)
if long[-1] == '=': # option takes an argument?
if short: short = short + ':'
long = long[0:-1]
self.takes_arg[long] = 1
else:
# Is option is a "negative alias" for some other option (eg.
# "quiet" == "!verbose")?
alias_to = self.negative_alias.get(long)
if alias_to is not None:
if self.takes_arg[alias_to]:
raise DistutilsGetoptError(
"invalid negative alias '%s': "
"aliased option '%s' takes a value"
% (long, alias_to))
self.long_opts[-1] = long # XXX redundant?!
self.takes_arg[long] = 0
# If this is an alias option, make sure its "takes arg" flag is
# the same as the option it's aliased to.
alias_to = self.alias.get(long)
if alias_to is not None:
if self.takes_arg[long] != self.takes_arg[alias_to]:
raise DistutilsGetoptError(
"invalid alias '%s': inconsistent with "
"aliased option '%s' (one of them takes a value, "
"the other doesn't"
% (long, alias_to))
# Now enforce some bondage on the long option name, so we can
# later translate it to an attribute name on some object. Have
# to do this a bit late to make sure we've removed any trailing
# '='.
if not longopt_re.match(long):
raise DistutilsGetoptError(
"invalid long option name '%s' "
"(must be letters, numbers, hyphens only" % long)
self.attr_name[long] = self.get_attr_name(long)
if short:
self.short_opts.append(short)
self.short2long[short[0]] = long
def getopt(self, args=None, object=None):
"""Parse command-line options in args. Store as attributes on object.
If 'args' is None or not supplied, uses 'sys.argv[1:]'. If
'object' is None or not supplied, creates a new OptionDummy
object, stores option values there, and returns a tuple (args,
object). If 'object' is supplied, it is modified in place and
'getopt()' just returns 'args'; in both cases, the returned
'args' is a modified copy of the passed-in 'args' list, which
is left untouched.
"""
if args is None:
args = sys.argv[1:]
if object is None:
object = OptionDummy()
created_object = True
else:
created_object = False
self._grok_option_table()
short_opts = ' '.join(self.short_opts)
try:
opts, args = getopt.getopt(args, short_opts, self.long_opts)
except getopt.error as msg:
raise DistutilsArgError(msg)
for opt, val in opts:
if len(opt) == 2 and opt[0] == '-': # it's a short option
opt = self.short2long[opt[1]]
else:
assert len(opt) > 2 and opt[:2] == '--'
opt = opt[2:]
alias = self.alias.get(opt)
if alias:
opt = alias
if not self.takes_arg[opt]: # boolean option?
assert val == '', "boolean option can't have value"
alias = self.negative_alias.get(opt)
if alias:
opt = alias
val = 0
else:
val = 1
attr = self.attr_name[opt]
# The only repeating option at the moment is 'verbose'.
# It has a negative option -q quiet, which should set verbose = 0.
if val and self.repeat.get(attr) is not None:
val = getattr(object, attr, 0) + 1
setattr(object, attr, val)
self.option_order.append((opt, val))
# for opts
if created_object:
return args, object
else:
return args
def get_option_order(self):
"""Returns the list of (option, value) tuples processed by the
previous run of 'getopt()'. Raises RuntimeError if
'getopt()' hasn't been called yet.
"""
if self.option_order is None:
raise RuntimeError("'getopt()' hasn't been called yet")
else:
return self.option_order
def generate_help(self, header=None):
"""Generate help text (a list of strings, one per suggested line of
output) from the option table for this FancyGetopt object.
"""
# Blithely assume the option table is good: probably wouldn't call
# 'generate_help()' unless you've already called 'getopt()'.
# First pass: determine maximum length of long option names
max_opt = 0
for option in self.option_table:
long = option[0]
short = option[1]
l = len(long)
if long[-1] == '=':
l = l - 1
if short is not None:
l = l + 5 # " (-x)" where short == 'x'
if l > max_opt:
max_opt = l
opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter
# Typical help block looks like this:
# --foo controls foonabulation
# Help block for longest option looks like this:
# --flimflam set the flim-flam level
# and with wrapped text:
# --flimflam set the flim-flam level (must be between
# 0 and 100, except on Tuesdays)
# Options with short names will have the short name shown (but
# it doesn't contribute to max_opt):
# --foo (-f) controls foonabulation
# If adding the short option would make the left column too wide,
# we push the explanation off to the next line
# --flimflam (-l)
# set the flim-flam level
# Important parameters:
# - 2 spaces before option block start lines
# - 2 dashes for each long option name
# - min. 2 spaces between option and explanation (gutter)
# - 5 characters (incl. space) for short option name
# Now generate lines of help text. (If 80 columns were good enough
# for Jesus, then 78 columns are good enough for me!)
line_width = 78
text_width = line_width - opt_width
big_indent = ' ' * opt_width
if header:
lines = [header]
else:
lines = ['Option summary:']
for option in self.option_table:
long, short, help = option[:3]
text = wrap_text(help, text_width)
if long[-1] == '=':
long = long[0:-1]
# Case 1: no short option at all (makes life easy)
if short is None:
if text:
lines.append(" --%-*s %s" % (max_opt, long, text[0]))
else:
lines.append(" --%-*s " % (max_opt, long))
# Case 2: we have a short option, so we have to include it
# just after the long option
else:
opt_names = "%s (-%s)" % (long, short)
if text:
lines.append(" --%-*s %s" %
(max_opt, opt_names, text[0]))
else:
lines.append(" --%-*s" % opt_names)
for l in text[1:]:
lines.append(big_indent + l)
return lines
def print_help(self, header=None, file=None):
if file is None:
file = sys.stdout
for line in self.generate_help(header):
file.write(line + "\n")
def fancy_getopt(options, negative_opt, object, args):
parser = FancyGetopt(options)
parser.set_negative_aliases(negative_opt)
return parser.getopt(args, object)
WS_TRANS = {ord(_wschar) : ' ' for _wschar in string.whitespace}
def wrap_text(text, width):
"""wrap_text(text : string, width : int) -> [string]
Split 'text' into multiple lines of no more than 'width' characters
each, and return the list of strings that results.
"""
if text is None:
return []
if len(text) <= width:
return [text]
text = text.expandtabs()
text = text.translate(WS_TRANS)
chunks = re.split(r'( +|-+)', text)
chunks = [ch for ch in chunks if ch] # ' - ' results in empty strings
lines = []
while chunks:
cur_line = [] # list of chunks (to-be-joined)
cur_len = 0 # length of current line
while chunks:
l = len(chunks[0])
if cur_len + l <= width: # can squeeze (at least) this chunk in
cur_line.append(chunks[0])
del chunks[0]
cur_len = cur_len + l
else: # this line is full
# drop last chunk if all space
if cur_line and cur_line[-1][0] == ' ':
del cur_line[-1]
break
if chunks: # any chunks left to process?
# if the current line is still empty, then we had a single
# chunk that's too big too fit on a line -- so we break
# down and break it up at the line width
if cur_len == 0:
cur_line.append(chunks[0][0:width])
chunks[0] = chunks[0][width:]
# all-whitespace chunks at the end of a line can be discarded
# (and we know from the re.split above that if a chunk has
# *any* whitespace, it is *all* whitespace)
if chunks[0][0] == ' ':
del chunks[0]
# and store this line in the list-of-all-lines -- as a single
# string, of course!
lines.append(''.join(cur_line))
return lines
def translate_longopt(opt):
"""Convert a long option name to a valid Python identifier by
changing "-" to "_".
"""
return opt.translate(longopt_xlate)
class OptionDummy:
"""Dummy class just used as a place to hold command-line option
values as instance attributes."""
def __init__(self, options=[]):
"""Create a new OptionDummy instance. The attributes listed in
'options' will be initialized to None."""
for opt in options:
setattr(self, opt, None)
if __name__ == "__main__":
text = """\
Tra-la-la, supercalifragilisticexpialidocious.
How *do* you spell that odd word, anyways?
(Someone ask Mary -- she'll know [or she'll
say, "How should I know?"].)"""
for w in (10, 20, 30, 40):
print("width: %d" % w)
print("\n".join(wrap_text(text, w)))
print()
=======
"""distutils.fancy_getopt
Wrapper around the standard getopt module that provides the following
additional features:
* short and long options are tied together
* options have help strings, so fancy_getopt could potentially
create a complete usage summary
* options set attributes of a passed-in object
"""
import sys, string, re
import getopt
from distutils.errors import *
# Much like command_re in distutils.core, this is close to but not quite
# the same as a Python NAME -- except, in the spirit of most GNU
# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)
# The similarities to NAME are again not a coincidence...
longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
longopt_re = re.compile(r'^%s$' % longopt_pat)
# For recognizing "negative alias" options, eg. "quiet=!verbose"
neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat))
# This is used to translate long options to legitimate Python identifiers
# (for use as attributes of some object).
longopt_xlate = str.maketrans('-', '_')
class FancyGetopt:
"""Wrapper around the standard 'getopt()' module that provides some
handy extra functionality:
* short and long options are tied together
* options have help strings, and help text can be assembled
from them
* options set attributes of a passed-in object
* boolean options can have "negative aliases" -- eg. if
--quiet is the "negative alias" of --verbose, then "--quiet"
on the command line sets 'verbose' to false
"""
def __init__(self, option_table=None):
# The option table is (currently) a list of tuples. The
# tuples may have 3 or four values:
# (long_option, short_option, help_string [, repeatable])
# if an option takes an argument, its long_option should have '='
# appended; short_option should just be a single character, no ':'
# in any case. If a long_option doesn't have a corresponding
# short_option, short_option should be None. All option tuples
# must have long options.
self.option_table = option_table
# 'option_index' maps long option names to entries in the option
# table (ie. those 3-tuples).
self.option_index = {}
if self.option_table:
self._build_index()
# 'alias' records (duh) alias options; {'foo': 'bar'} means
# --foo is an alias for --bar
self.alias = {}
# 'negative_alias' keeps track of options that are the boolean
# opposite of some other option
self.negative_alias = {}
# These keep track of the information in the option table. We
# don't actually populate these structures until we're ready to
# parse the command-line, since the 'option_table' passed in here
# isn't necessarily the final word.
self.short_opts = []
self.long_opts = []
self.short2long = {}
self.attr_name = {}
self.takes_arg = {}
# And 'option_order' is filled up in 'getopt()'; it records the
# original order of options (and their values) on the command-line,
# but expands short options, converts aliases, etc.
self.option_order = []
def _build_index(self):
self.option_index.clear()
for option in self.option_table:
self.option_index[option[0]] = option
def set_option_table(self, option_table):
self.option_table = option_table
self._build_index()
def add_option(self, long_option, short_option=None, help_string=None):
if long_option in self.option_index:
raise DistutilsGetoptError(
"option conflict: already an option '%s'" % long_option)
else:
option = (long_option, short_option, help_string)
self.option_table.append(option)
self.option_index[long_option] = option
def has_option(self, long_option):
"""Return true if the option table for this parser has an
option with long name 'long_option'."""
return long_option in self.option_index
def get_attr_name(self, long_option):
"""Translate long option name 'long_option' to the form it
has as an attribute of some object: ie., translate hyphens
to underscores."""
return long_option.translate(longopt_xlate)
def _check_alias_dict(self, aliases, what):
assert isinstance(aliases, dict)
for (alias, opt) in aliases.items():
if alias not in self.option_index:
raise DistutilsGetoptError(("invalid %s '%s': "
"option '%s' not defined") % (what, alias, alias))
if opt not in self.option_index:
raise DistutilsGetoptError(("invalid %s '%s': "
"aliased option '%s' not defined") % (what, alias, opt))
def set_aliases(self, alias):
"""Set the aliases for this option parser."""
self._check_alias_dict(alias, "alias")
self.alias = alias
def set_negative_aliases(self, negative_alias):
"""Set the negative aliases for this option parser.
'negative_alias' should be a dictionary mapping option names to
option names, both the key and value must already be defined
in the option table."""
self._check_alias_dict(negative_alias, "negative alias")
self.negative_alias = negative_alias
def _grok_option_table(self):
"""Populate the various data structures that keep tabs on the
option table. Called by 'getopt()' before it can do anything
worthwhile.
"""
self.long_opts = []
self.short_opts = []
self.short2long.clear()
self.repeat = {}
for option in self.option_table:
if len(option) == 3:
long, short, help = option
repeat = 0
elif len(option) == 4:
long, short, help, repeat = option
else:
# the option table is part of the code, so simply
# assert that it is correct
raise ValueError("invalid option tuple: %r" % (option,))
# Type- and value-check the option names
if not isinstance(long, str) or len(long) < 2:
raise DistutilsGetoptError(("invalid long option '%s': "
"must be a string of length >= 2") % long)
if (not ((short is None) or
(isinstance(short, str) and len(short) == 1))):
raise DistutilsGetoptError("invalid short option '%s': "
"must a single character or None" % short)
self.repeat[long] = repeat
self.long_opts.append(long)
if long[-1] == '=': # option takes an argument?
if short: short = short + ':'
long = long[0:-1]
self.takes_arg[long] = 1
else:
# Is option is a "negative alias" for some other option (eg.
# "quiet" == "!verbose")?
alias_to = self.negative_alias.get(long)
if alias_to is not None:
if self.takes_arg[alias_to]:
raise DistutilsGetoptError(
"invalid negative alias '%s': "
"aliased option '%s' takes a value"
% (long, alias_to))
self.long_opts[-1] = long # XXX redundant?!
self.takes_arg[long] = 0
# If this is an alias option, make sure its "takes arg" flag is
# the same as the option it's aliased to.
alias_to = self.alias.get(long)
if alias_to is not None:
if self.takes_arg[long] != self.takes_arg[alias_to]:
raise DistutilsGetoptError(
"invalid alias '%s': inconsistent with "
"aliased option '%s' (one of them takes a value, "
"the other doesn't"
% (long, alias_to))
# Now enforce some bondage on the long option name, so we can
# later translate it to an attribute name on some object. Have
# to do this a bit late to make sure we've removed any trailing
# '='.
if not longopt_re.match(long):
raise DistutilsGetoptError(
"invalid long option name '%s' "
"(must be letters, numbers, hyphens only" % long)
self.attr_name[long] = self.get_attr_name(long)
if short:
self.short_opts.append(short)
self.short2long[short[0]] = long
def getopt(self, args=None, object=None):
"""Parse command-line options in args. Store as attributes on object.
If 'args' is None or not supplied, uses 'sys.argv[1:]'. If
'object' is None or not supplied, creates a new OptionDummy
object, stores option values there, and returns a tuple (args,
object). If 'object' is supplied, it is modified in place and
'getopt()' just returns 'args'; in both cases, the returned
'args' is a modified copy of the passed-in 'args' list, which
is left untouched.
"""
if args is None:
args = sys.argv[1:]
if object is None:
object = OptionDummy()
created_object = True
else:
created_object = False
self._grok_option_table()
short_opts = ' '.join(self.short_opts)
try:
opts, args = getopt.getopt(args, short_opts, self.long_opts)
except getopt.error as msg:
raise DistutilsArgError(msg)
for opt, val in opts:
if len(opt) == 2 and opt[0] == '-': # it's a short option
opt = self.short2long[opt[1]]
else:
assert len(opt) > 2 and opt[:2] == '--'
opt = opt[2:]
alias = self.alias.get(opt)
if alias:
opt = alias
if not self.takes_arg[opt]: # boolean option?
assert val == '', "boolean option can't have value"
alias = self.negative_alias.get(opt)
if alias:
opt = alias
val = 0
else:
val = 1
attr = self.attr_name[opt]
# The only repeating option at the moment is 'verbose'.
# It has a negative option -q quiet, which should set verbose = 0.
if val and self.repeat.get(attr) is not None:
val = getattr(object, attr, 0) + 1
setattr(object, attr, val)
self.option_order.append((opt, val))
# for opts
if created_object:
return args, object
else:
return args
def get_option_order(self):
"""Returns the list of (option, value) tuples processed by the
previous run of 'getopt()'. Raises RuntimeError if
'getopt()' hasn't been called yet.
"""
if self.option_order is None:
raise RuntimeError("'getopt()' hasn't been called yet")
else:
return self.option_order
def generate_help(self, header=None):
"""Generate help text (a list of strings, one per suggested line of
output) from the option table for this FancyGetopt object.
"""
# Blithely assume the option table is good: probably wouldn't call
# 'generate_help()' unless you've already called 'getopt()'.
# First pass: determine maximum length of long option names
max_opt = 0
for option in self.option_table:
long = option[0]
short = option[1]
l = len(long)
if long[-1] == '=':
l = l - 1
if short is not None:
l = l + 5 # " (-x)" where short == 'x'
if l > max_opt:
max_opt = l
opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter
# Typical help block looks like this:
# --foo controls foonabulation
# Help block for longest option looks like this:
# --flimflam set the flim-flam level
# and with wrapped text:
# --flimflam set the flim-flam level (must be between
# 0 and 100, except on Tuesdays)
# Options with short names will have the short name shown (but
# it doesn't contribute to max_opt):
# --foo (-f) controls foonabulation
# If adding the short option would make the left column too wide,
# we push the explanation off to the next line
# --flimflam (-l)
# set the flim-flam level
# Important parameters:
# - 2 spaces before option block start lines
# - 2 dashes for each long option name
# - min. 2 spaces between option and explanation (gutter)
# - 5 characters (incl. space) for short option name
# Now generate lines of help text. (If 80 columns were good enough
# for Jesus, then 78 columns are good enough for me!)
line_width = 78
text_width = line_width - opt_width
big_indent = ' ' * opt_width
if header:
lines = [header]
else:
lines = ['Option summary:']
for option in self.option_table:
long, short, help = option[:3]
text = wrap_text(help, text_width)
if long[-1] == '=':
long = long[0:-1]
# Case 1: no short option at all (makes life easy)
if short is None:
if text:
lines.append(" --%-*s %s" % (max_opt, long, text[0]))
else:
lines.append(" --%-*s " % (max_opt, long))
# Case 2: we have a short option, so we have to include it
# just after the long option
else:
opt_names = "%s (-%s)" % (long, short)
if text:
lines.append(" --%-*s %s" %
(max_opt, opt_names, text[0]))
else:
lines.append(" --%-*s" % opt_names)
for l in text[1:]:
lines.append(big_indent + l)
return lines
def print_help(self, header=None, file=None):
if file is None:
file = sys.stdout
for line in self.generate_help(header):
file.write(line + "\n")
def fancy_getopt(options, negative_opt, object, args):
parser = FancyGetopt(options)
parser.set_negative_aliases(negative_opt)
return parser.getopt(args, object)
WS_TRANS = {ord(_wschar) : ' ' for _wschar in string.whitespace}
def wrap_text(text, width):
"""wrap_text(text : string, width : int) -> [string]
Split 'text' into multiple lines of no more than 'width' characters
each, and return the list of strings that results.
"""
if text is None:
return []
if len(text) <= width:
return [text]
text = text.expandtabs()
text = text.translate(WS_TRANS)
chunks = re.split(r'( +|-+)', text)
chunks = [ch for ch in chunks if ch] # ' - ' results in empty strings
lines = []
while chunks:
cur_line = [] # list of chunks (to-be-joined)
cur_len = 0 # length of current line
while chunks:
l = len(chunks[0])
if cur_len + l <= width: # can squeeze (at least) this chunk in
cur_line.append(chunks[0])
del chunks[0]
cur_len = cur_len + l
else: # this line is full
# drop last chunk if all space
if cur_line and cur_line[-1][0] == ' ':
del cur_line[-1]
break
if chunks: # any chunks left to process?
# if the current line is still empty, then we had a single
# chunk that's too big too fit on a line -- so we break
# down and break it up at the line width
if cur_len == 0:
cur_line.append(chunks[0][0:width])
chunks[0] = chunks[0][width:]
# all-whitespace chunks at the end of a line can be discarded
# (and we know from the re.split above that if a chunk has
# *any* whitespace, it is *all* whitespace)
if chunks[0][0] == ' ':
del chunks[0]
# and store this line in the list-of-all-lines -- as a single
# string, of course!
lines.append(''.join(cur_line))
return lines
def translate_longopt(opt):
"""Convert a long option name to a valid Python identifier by
changing "-" to "_".
"""
return opt.translate(longopt_xlate)
class OptionDummy:
"""Dummy class just used as a place to hold command-line option
values as instance attributes."""
def __init__(self, options=[]):
"""Create a new OptionDummy instance. The attributes listed in
'options' will be initialized to None."""
for opt in options:
setattr(self, opt, None)
if __name__ == "__main__":
text = """\
Tra-la-la, supercalifragilisticexpialidocious.
How *do* you spell that odd word, anyways?
(Someone ask Mary -- she'll know [or she'll
say, "How should I know?"].)"""
for w in (10, 20, 30, 40):
print("width: %d" % w)
print("\n".join(wrap_text(text, w)))
print()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""distutils.fancy_getopt
Wrapper around the standard getopt module that provides the following
additional features:
* short and long options are tied together
* options have help strings, so fancy_getopt could potentially
create a complete usage summary
* options set attributes of a passed-in object
"""
import sys, string, re
import getopt
from distutils.errors import *
# Much like command_re in distutils.core, this is close to but not quite
# the same as a Python NAME -- except, in the spirit of most GNU
# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)
# The similarities to NAME are again not a coincidence...
longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
longopt_re = re.compile(r'^%s$' % longopt_pat)
# For recognizing "negative alias" options, eg. "quiet=!verbose"
neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat))
# This is used to translate long options to legitimate Python identifiers
# (for use as attributes of some object).
longopt_xlate = str.maketrans('-', '_')
class FancyGetopt:
"""Wrapper around the standard 'getopt()' module that provides some
handy extra functionality:
* short and long options are tied together
* options have help strings, and help text can be assembled
from them
* options set attributes of a passed-in object
* boolean options can have "negative aliases" -- eg. if
--quiet is the "negative alias" of --verbose, then "--quiet"
on the command line sets 'verbose' to false
"""
def __init__(self, option_table=None):
# The option table is (currently) a list of tuples. The
# tuples may have 3 or four values:
# (long_option, short_option, help_string [, repeatable])
# if an option takes an argument, its long_option should have '='
# appended; short_option should just be a single character, no ':'
# in any case. If a long_option doesn't have a corresponding
# short_option, short_option should be None. All option tuples
# must have long options.
self.option_table = option_table
# 'option_index' maps long option names to entries in the option
# table (ie. those 3-tuples).
self.option_index = {}
if self.option_table:
self._build_index()
# 'alias' records (duh) alias options; {'foo': 'bar'} means
# --foo is an alias for --bar
self.alias = {}
# 'negative_alias' keeps track of options that are the boolean
# opposite of some other option
self.negative_alias = {}
# These keep track of the information in the option table. We
# don't actually populate these structures until we're ready to
# parse the command-line, since the 'option_table' passed in here
# isn't necessarily the final word.
self.short_opts = []
self.long_opts = []
self.short2long = {}
self.attr_name = {}
self.takes_arg = {}
# And 'option_order' is filled up in 'getopt()'; it records the
# original order of options (and their values) on the command-line,
# but expands short options, converts aliases, etc.
self.option_order = []
def _build_index(self):
self.option_index.clear()
for option in self.option_table:
self.option_index[option[0]] = option
def set_option_table(self, option_table):
self.option_table = option_table
self._build_index()
def add_option(self, long_option, short_option=None, help_string=None):
if long_option in self.option_index:
raise DistutilsGetoptError(
"option conflict: already an option '%s'" % long_option)
else:
option = (long_option, short_option, help_string)
self.option_table.append(option)
self.option_index[long_option] = option
def has_option(self, long_option):
"""Return true if the option table for this parser has an
option with long name 'long_option'."""
return long_option in self.option_index
def get_attr_name(self, long_option):
"""Translate long option name 'long_option' to the form it
has as an attribute of some object: ie., translate hyphens
to underscores."""
return long_option.translate(longopt_xlate)
def _check_alias_dict(self, aliases, what):
assert isinstance(aliases, dict)
for (alias, opt) in aliases.items():
if alias not in self.option_index:
raise DistutilsGetoptError(("invalid %s '%s': "
"option '%s' not defined") % (what, alias, alias))
if opt not in self.option_index:
raise DistutilsGetoptError(("invalid %s '%s': "
"aliased option '%s' not defined") % (what, alias, opt))
def set_aliases(self, alias):
"""Set the aliases for this option parser."""
self._check_alias_dict(alias, "alias")
self.alias = alias
def set_negative_aliases(self, negative_alias):
"""Set the negative aliases for this option parser.
'negative_alias' should be a dictionary mapping option names to
option names, both the key and value must already be defined
in the option table."""
self._check_alias_dict(negative_alias, "negative alias")
self.negative_alias = negative_alias
def _grok_option_table(self):
"""Populate the various data structures that keep tabs on the
option table. Called by 'getopt()' before it can do anything
worthwhile.
"""
self.long_opts = []
self.short_opts = []
self.short2long.clear()
self.repeat = {}
for option in self.option_table:
if len(option) == 3:
long, short, help = option
repeat = 0
elif len(option) == 4:
long, short, help, repeat = option
else:
# the option table is part of the code, so simply
# assert that it is correct
raise ValueError("invalid option tuple: %r" % (option,))
# Type- and value-check the option names
if not isinstance(long, str) or len(long) < 2:
raise DistutilsGetoptError(("invalid long option '%s': "
"must be a string of length >= 2") % long)
if (not ((short is None) or
(isinstance(short, str) and len(short) == 1))):
raise DistutilsGetoptError("invalid short option '%s': "
"must a single character or None" % short)
self.repeat[long] = repeat
self.long_opts.append(long)
if long[-1] == '=': # option takes an argument?
if short: short = short + ':'
long = long[0:-1]
self.takes_arg[long] = 1
else:
# Is option is a "negative alias" for some other option (eg.
# "quiet" == "!verbose")?
alias_to = self.negative_alias.get(long)
if alias_to is not None:
if self.takes_arg[alias_to]:
raise DistutilsGetoptError(
"invalid negative alias '%s': "
"aliased option '%s' takes a value"
% (long, alias_to))
self.long_opts[-1] = long # XXX redundant?!
self.takes_arg[long] = 0
# If this is an alias option, make sure its "takes arg" flag is
# the same as the option it's aliased to.
alias_to = self.alias.get(long)
if alias_to is not None:
if self.takes_arg[long] != self.takes_arg[alias_to]:
raise DistutilsGetoptError(
"invalid alias '%s': inconsistent with "
"aliased option '%s' (one of them takes a value, "
"the other doesn't"
% (long, alias_to))
# Now enforce some bondage on the long option name, so we can
# later translate it to an attribute name on some object. Have
# to do this a bit late to make sure we've removed any trailing
# '='.
if not longopt_re.match(long):
raise DistutilsGetoptError(
"invalid long option name '%s' "
"(must be letters, numbers, hyphens only" % long)
self.attr_name[long] = self.get_attr_name(long)
if short:
self.short_opts.append(short)
self.short2long[short[0]] = long
def getopt(self, args=None, object=None):
"""Parse command-line options in args. Store as attributes on object.
If 'args' is None or not supplied, uses 'sys.argv[1:]'. If
'object' is None or not supplied, creates a new OptionDummy
object, stores option values there, and returns a tuple (args,
object). If 'object' is supplied, it is modified in place and
'getopt()' just returns 'args'; in both cases, the returned
'args' is a modified copy of the passed-in 'args' list, which
is left untouched.
"""
if args is None:
args = sys.argv[1:]
if object is None:
object = OptionDummy()
created_object = True
else:
created_object = False
self._grok_option_table()
short_opts = ' '.join(self.short_opts)
try:
opts, args = getopt.getopt(args, short_opts, self.long_opts)
except getopt.error as msg:
raise DistutilsArgError(msg)
for opt, val in opts:
if len(opt) == 2 and opt[0] == '-': # it's a short option
opt = self.short2long[opt[1]]
else:
assert len(opt) > 2 and opt[:2] == '--'
opt = opt[2:]
alias = self.alias.get(opt)
if alias:
opt = alias
if not self.takes_arg[opt]: # boolean option?
assert val == '', "boolean option can't have value"
alias = self.negative_alias.get(opt)
if alias:
opt = alias
val = 0
else:
val = 1
attr = self.attr_name[opt]
# The only repeating option at the moment is 'verbose'.
# It has a negative option -q quiet, which should set verbose = 0.
if val and self.repeat.get(attr) is not None:
val = getattr(object, attr, 0) + 1
setattr(object, attr, val)
self.option_order.append((opt, val))
# for opts
if created_object:
return args, object
else:
return args
def get_option_order(self):
"""Returns the list of (option, value) tuples processed by the
previous run of 'getopt()'. Raises RuntimeError if
'getopt()' hasn't been called yet.
"""
if self.option_order is None:
raise RuntimeError("'getopt()' hasn't been called yet")
else:
return self.option_order
def generate_help(self, header=None):
"""Generate help text (a list of strings, one per suggested line of
output) from the option table for this FancyGetopt object.
"""
# Blithely assume the option table is good: probably wouldn't call
# 'generate_help()' unless you've already called 'getopt()'.
# First pass: determine maximum length of long option names
max_opt = 0
for option in self.option_table:
long = option[0]
short = option[1]
l = len(long)
if long[-1] == '=':
l = l - 1
if short is not None:
l = l + 5 # " (-x)" where short == 'x'
if l > max_opt:
max_opt = l
opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter
# Typical help block looks like this:
# --foo controls foonabulation
# Help block for longest option looks like this:
# --flimflam set the flim-flam level
# and with wrapped text:
# --flimflam set the flim-flam level (must be between
# 0 and 100, except on Tuesdays)
# Options with short names will have the short name shown (but
# it doesn't contribute to max_opt):
# --foo (-f) controls foonabulation
# If adding the short option would make the left column too wide,
# we push the explanation off to the next line
# --flimflam (-l)
# set the flim-flam level
# Important parameters:
# - 2 spaces before option block start lines
# - 2 dashes for each long option name
# - min. 2 spaces between option and explanation (gutter)
# - 5 characters (incl. space) for short option name
# Now generate lines of help text. (If 80 columns were good enough
# for Jesus, then 78 columns are good enough for me!)
line_width = 78
text_width = line_width - opt_width
big_indent = ' ' * opt_width
if header:
lines = [header]
else:
lines = ['Option summary:']
for option in self.option_table:
long, short, help = option[:3]
text = wrap_text(help, text_width)
if long[-1] == '=':
long = long[0:-1]
# Case 1: no short option at all (makes life easy)
if short is None:
if text:
lines.append(" --%-*s %s" % (max_opt, long, text[0]))
else:
lines.append(" --%-*s " % (max_opt, long))
# Case 2: we have a short option, so we have to include it
# just after the long option
else:
opt_names = "%s (-%s)" % (long, short)
if text:
lines.append(" --%-*s %s" %
(max_opt, opt_names, text[0]))
else:
lines.append(" --%-*s" % opt_names)
for l in text[1:]:
lines.append(big_indent + l)
return lines
def print_help(self, header=None, file=None):
if file is None:
file = sys.stdout
for line in self.generate_help(header):
file.write(line + "\n")
def fancy_getopt(options, negative_opt, object, args):
parser = FancyGetopt(options)
parser.set_negative_aliases(negative_opt)
return parser.getopt(args, object)
WS_TRANS = {ord(_wschar) : ' ' for _wschar in string.whitespace}
def wrap_text(text, width):
"""wrap_text(text : string, width : int) -> [string]
Split 'text' into multiple lines of no more than 'width' characters
each, and return the list of strings that results.
"""
if text is None:
return []
if len(text) <= width:
return [text]
text = text.expandtabs()
text = text.translate(WS_TRANS)
chunks = re.split(r'( +|-+)', text)
chunks = [ch for ch in chunks if ch] # ' - ' results in empty strings
lines = []
while chunks:
cur_line = [] # list of chunks (to-be-joined)
cur_len = 0 # length of current line
while chunks:
l = len(chunks[0])
if cur_len + l <= width: # can squeeze (at least) this chunk in
cur_line.append(chunks[0])
del chunks[0]
cur_len = cur_len + l
else: # this line is full
# drop last chunk if all space
if cur_line and cur_line[-1][0] == ' ':
del cur_line[-1]
break
if chunks: # any chunks left to process?
# if the current line is still empty, then we had a single
# chunk that's too big too fit on a line -- so we break
# down and break it up at the line width
if cur_len == 0:
cur_line.append(chunks[0][0:width])
chunks[0] = chunks[0][width:]
# all-whitespace chunks at the end of a line can be discarded
# (and we know from the re.split above that if a chunk has
# *any* whitespace, it is *all* whitespace)
if chunks[0][0] == ' ':
del chunks[0]
# and store this line in the list-of-all-lines -- as a single
# string, of course!
lines.append(''.join(cur_line))
return lines
def translate_longopt(opt):
"""Convert a long option name to a valid Python identifier by
changing "-" to "_".
"""
return opt.translate(longopt_xlate)
class OptionDummy:
"""Dummy class just used as a place to hold command-line option
values as instance attributes."""
def __init__(self, options=[]):
"""Create a new OptionDummy instance. The attributes listed in
'options' will be initialized to None."""
for opt in options:
setattr(self, opt, None)
if __name__ == "__main__":
text = """\
Tra-la-la, supercalifragilisticexpialidocious.
How *do* you spell that odd word, anyways?
(Someone ask Mary -- she'll know [or she'll
say, "How should I know?"].)"""
for w in (10, 20, 30, 40):
print("width: %d" % w)
print("\n".join(wrap_text(text, w)))
print()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
ArcherSys/ArcherSys
|
Lib/distutils/fancy_getopt.py
|
Python
|
mit
| 53,492
|
from .base.dev import Device
class Sev_seg_dp(Device):
def __init__(self, dev, size=8, dev_id=0):
self.buf = [""] * size
self.is_shutdown = None
self.dev = dev
self.dev_id = dev_id
super().__init__()
def write(self, txt, pos=0, dot=0):
"""print text at pos"""
if self.disable_flag:
return
for i, c in enumerate(reversed(txt)):
# safe pos
sp = (i + pos) % len(self.buf)
if self.buf[sp] == txt[i]:
continue
else:
data = "{}{}{}{}{};".format(chr(77),
chr(self.dev_id),
chr(sp),
txt[i],
chr(dot))
self.dev.write(data.encode())
# update buf
self.buf[sp] = txt[i]
def clear(self):
"""clear display"""
data = "{}{};".format(chr(84), chr(self.dev_id))
self.dev.write(data.encode())
# clear buffer
self.buf = [""]* len(self.buf)
def printrow(self, row, hexval):
"""print hexval to row"""
if self.disable_flag:
return
data = b"".join(i.to_bytes(1, 'big') for i in (79, self.dev_id, row, hexval)) + b';'
self.dev.write(data)
def printcol(self, col, hexval):
"""print hexval to col"""
if self.disable_flag:
return
data = b"".join(i.to_bytes(1, 'big') for i in (80, self.dev_id, col, hexval)) + b';'
self.dev.write(data)
def setled(self, r, c, s):
"""set single lec"""
if self.disable_flag:
return
data = "{}{}{}{}{};".format(chr(81), chr(self.dev_id), chr(r), chr(c), chr(s))
self.dev.write(data.encode())
def shutdown(self, s):
"""turn on/off the devide"""
if self.is_shutdown != s:
data = "{}{}{};".format(chr(82), chr(self.dev_id), chr(s))
self.dev.write(data.encode())
self.is_shutdown = s
def setintensity(self, i):
"""set device off"""
data = "{}{}{};".format(chr(83), chr(self.dev_id), chr(i))
self.dev.write(data.encode())
def disable(self):
"""disable device, turn off device and disable writing to usb"""
self.shutdown(1)
self.disable_flag = True
def enable(self):
""" re-enable device after disable"""
self.shutdown(0)
self.disable_flag = False
|
icve/liv-Ard
|
hostscripts/lib/sev_seg_dp.py
|
Python
|
mit
| 2,571
|
"""
Created on 19 Nov 2016
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
https://opensensorsio.helpscoutdocs.com/article/84-overriding-timestamp-information-in-message-payload
"""
import optparse
from scs_core.osio.config.project import Project
# --------------------------------------------------------------------------------------------------------------------
class CmdOSIOTopicPublisher(object):
"""unix command line handler"""
def __init__(self):
"""
Constructor
"""
self.__parser = optparse.OptionParser(usage="%prog { -t TOPIC_PATH | -c { C | G | P | S | X } } "
"[-o] [-v]", version="%prog 1.0")
# compulsory...
self.__parser.add_option("--topic", "-t", type="string", nargs=1, action="store", dest="topic",
help="topic path")
self.__parser.add_option("--channel", "-c", type="string", nargs=1, action="store", dest="channel",
help="publication channel")
# optional...
self.__parser.add_option("--override", "-o", action="store_true", dest="override", default=False,
help="override OSIO reception datetime")
self.__parser.add_option("--verbose", "-v", action="store_true", dest="verbose", default=False,
help="report narrative to stderr")
self.__opts, self.__args = self.__parser.parse_args()
# ----------------------------------------------------------------------------------------------------------------
def is_valid(self):
if bool(self.topic) == bool(self.channel):
return False
if not bool(self.topic) and not bool(self.channel):
return False
if self.channel and not Project.is_valid_channel(self.channel):
return False
return True
# ----------------------------------------------------------------------------------------------------------------
@property
def topic(self):
return self.__opts.topic
@property
def channel(self):
return self.__opts.channel
@property
def override(self):
return self.__opts.override
@property
def verbose(self):
return self.__opts.verbose
# ----------------------------------------------------------------------------------------------------------------
def print_help(self, file):
self.__parser.print_help(file)
def __str__(self, *args, **kwargs):
return "CmdOSIOTopicPublisher:{topic:%s, channel:%s, override:%s, verbose:%s}" % \
(self.topic, self.channel, self.override, self.verbose)
|
south-coast-science/scs_dev
|
src/scs_dev/cmd/cmd_osio_topic_publisher.py
|
Python
|
mit
| 2,751
|
from turbogears import controllers, expose, identity, flash, config
import cherrypy
from subcontrollers.site import SiteController
from cherrypy import request, response
from calabro.model import Sites
import siterouter
class Root(controllers.RootController):
@expose()
def default(self, site_name, *path, **params):
return siterouter.route(site_name, *path, **params)
@expose()
def index(self, *args, **kw):
raise controllers.redirect(config.get('calabro.default_site'))
|
CarlosGabaldon/calabro
|
calabro/controllers.py
|
Python
|
mit
| 549
|
from django.apps import AppConfig
class ResourcecenterConfig(AppConfig):
name = 'apps.resourcecenter'
verbose_name = 'Resource center'
|
dotKom/onlineweb4
|
apps/resourcecenter/appconfig.py
|
Python
|
mit
| 145
|
# Copyright (c) 2008 Brian Zimmer <bzimmer@ziclix.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import logging
import collections
from pysmug.smugmug import SmugMug
from pysmug.keywords import smugmug_keywords
_c = re.compile('"(.*?)"')
def kwsplit(word):
"""
Some keyword samples::
a; b;
"abc" "def"
a
"""
x = _c.findall(word)
if x:
# space-separated quoted strings
return x
# semi-colon separated strings (will always create a list)
return [z.strip() for z in word.split(";") if z]
class SmugTool(SmugMug):
def categories_getTree(self):
"""Return the tree of categories and sub-categories.
The format of the response tree::
{u'Categories': [{u'Name': u'Other', 'SubCategories': {}, u'id': 0},
{u'Name': u'Airplanes', 'SubCategories': {}, u'id': 41},
{u'Name': u'Animals', 'SubCategories': {}, u'id': 1},
{u'Name': u'Aquariums', 'SubCategories': {}, u'id': 25},
{u'Name': u'Architecture', 'SubCategories': {}, u'id': 2},
{u'Name': u'Art', 'SubCategories': {}, u'id': 3},
{u'Name': u'Arts and Crafts', 'SubCategories': {}, u'id': 43},
...,
],
u'method': u'pysmug.categories.getTree',
u'stat': u'ok'}
"""
b = self.batch()
b.categories_get()
b.subcategories_getAll()
methods = dict()
for params, results in b():
methods[params["method"]] = results
subcategories = collections.defaultdict(list)
for subcategory in methods["smugmug.subcategories.getAll"]["SubCategories"]:
category = subcategory.pop("Category")
subcategories[category["id"]].append(subcategory)
categories = methods["smugmug.categories.get"]["Categories"]
for category in categories:
category["SubCategories"] = subcategories.get(category["id"], {})
return {u"method":u"pysmug.categories.getTree", u"Categories":categories, u"stat":u"ok"}
@smugmug_keywords
def albums_details(self, **kwargs):
"""Returns the full details of an album including EXIF data for all images. It
is the composition of calls to C{albums_getInfo}, C{images_getInfo} and
C{images_getEXIF} where the C{images_*} calls are done in batch. The primary purpose
for this method is to provide easy access to a full album worth of metadata quickly.
The format of the response tree::
{'Album': {'Attribute1': 'Value1',
'AttributeN': 'ValueN',
'Images': [{'EXIF': {'EXIFAttribute1': 'EXIFValue1',
'EXIFAttributeN': 'EXIFValueN'},
'ImageAttribute1': 'ImageValue1',
'ImageAttributeN': 'ImageAttributeN'},
{'EXIF': {'EXIFAttribute1': 'EXIFValue1',
'EXIFAttributeN': 'EXIFValueN'},
'ImageAttribute1': 'ImageValue1',
'ImageAttributeN': 'ImageAttributeN'}]},
'Statistics': {},
'method': 'pysmug.albums.details',
'stat': 'ok'}
@keyword albumId: the id of the album to query
@keyword albumKey: the key of the album to query
@keyword exif: returns EXIF metadata about each image
@return: a dictionary of the album and image details
"""
albumId = kwargs.get("AlbumID")
albumKey = kwargs.get("AlbumKey")
exif = kwargs.get("Exif")
album = self.albums_getInfo(albumId=albumId, albumKey=albumKey)
images = self.images_get(albumId=albumId, albumKey=albumKey)
# map
b = self.batch()
for imageId, imageKey in ((image["id"], image["Key"]) for image in images["Album"]["Images"]):
# add each image to the batch
b.images_getInfo(imageID=imageId, imageKey=imageKey)
if exif:
b.images_getEXIF(imageID=imageId, imageKey=imageKey)
# combine
responses = collections.defaultdict(dict)
for (params, value) in b():
imageIdKey = (params["ImageID"], params["ImageKey"])
responses[imageIdKey][params["method"]] = value
# reduce
album[u"Album"][u"Images"] = images = []
for value in responses.values():
img = value["smugmug.images.getInfo"]["Image"]
if exif:
img[u"EXIF"] = value["smugmug.images.getEXIF"]["Image"]
images.append(img)
# return
album.update({u"method":u"pysmug.albums.details", u"stat":u"ok", u"Statistics":{}})
return album
def unused_albums(self):
"""Returns a generator of albums with ImageCount == 0.
@return: a generator of albums with an image count == 0
"""
b = self.batch()
for album in self.albums_get()["Albums"]:
b.albums_getInfo(albumId=album["id"], albumKey=album["Key"])
return (info["Album"] for params, info in b() if info["Album"]["ImageCount"] == 0)
def unused_categories(self):
"""Returns a generator of categories or subcategories with no
albums.
@return: a generator of [sub]categories with no associated albums
"""
used = dict()
albums = self.albums_get()["Albums"]
for album in albums:
category = album["Category"]
used[("category", category["id"])] = category
subcategory = album.get("SubCategory", None)
if subcategory:
used[("subcategory", album["SubCategory"]["id"])] = subcategory
tree = self.categories_getTree()
for c in tree["Categories"]:
cid = ("category", c["id"])
if not cid in used:
c["Type"] = "Category"
yield c
for s in c["SubCategories"]:
sid = ("subcategory", s["id"])
if not sid in used:
s["Type"] = "SubCategory"
yield s
def tagcloud(self, kwfunc=None):
"""
Compute the occurrence count for all keywords for all images in all albums.
@keyword kwfunc: function taking a single string and returning a list of keywords
@return: a tuple of (number of albums, number of images, {keyword: occurences})
"""
b = self.batch()
albums = self.albums_get()["Albums"]
for album in albums:
b.images_get(AlbumID=album["id"], AlbumKey=album["Key"], Heavy=True)
images = 0
kwfunc = kwfunc or kwsplit
cloud = collections.defaultdict(lambda: 0)
for params, response in b():
album = response["Album"]
images += album["ImageCount"]
for m in (x for x in (y["Keywords"].strip() for y in album["Images"]) if x):
for k in kwfunc(m):
cloud[k] = cloud[k] + 1
return (len(albums), images, cloud)
|
bzimmer/pysmug
|
pysmug/smugtool.py
|
Python
|
mit
| 8,264
|
# -*- encoding: utf-8 -*-
from supriya.tools.ugentools.BEQSuite import BEQSuite
class BPeakEQ(BEQSuite):
r'''A parametric equalizer.
::
>>> source = ugentools.In.ar(0)
>>> bpeak_eq = ugentools.BPeakEQ.ar(
... frequency=1200,
... gain=0,
... reciprocal_of_q=1,
... source=source,
... )
>>> bpeak_eq
BPeakEQ.ar()
'''
### CLASS VARIABLES ###
__documentation_section__ = 'Filter UGens'
__slots__ = ()
_ordered_input_names = (
'source',
'frequency',
'reciprocal_of_q',
'gain',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
frequency=1200,
gain=0,
reciprocal_of_q=1,
source=None,
):
BEQSuite.__init__(
self,
calculation_rate=calculation_rate,
frequency=frequency,
gain=gain,
reciprocal_of_q=reciprocal_of_q,
source=source,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
frequency=1200,
gain=0,
reciprocal_of_q=1,
source=None,
):
r'''Constructs an audio-rate BPeakEQ.
::
>>> source = ugentools.In.ar(0)
>>> bpeak_eq = ugentools.BPeakEQ.ar(
... frequency=1200,
... gain=0,
... reciprocal_of_q=1,
... source=source,
... )
>>> bpeak_eq
BPeakEQ.ar()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
frequency=frequency,
gain=gain,
reciprocal_of_q=reciprocal_of_q,
source=source,
)
return ugen
# def coeffs(): ...
# def magResponse(): ...
# def magResponse2(): ...
# def magResponse5(): ...
# def magResponseN(): ...
# def sc(): ...
# def scopeResponse(): ...
### PUBLIC PROPERTIES ###
@property
def gain(self):
r'''Gets `gain` input of BPeakEQ.
::
>>> source = ugentools.In.ar(0)
>>> bpeak_eq = ugentools.BPeakEQ.ar(
... frequency=1200,
... gain=0,
... reciprocal_of_q=1,
... source=source,
... )
>>> bpeak_eq.gain
0.0
Returns ugen input.
'''
index = self._ordered_input_names.index('gain')
return self._inputs[index]
@property
def frequency(self):
r'''Gets `frequency` input of BPeakEQ.
::
>>> source = ugentools.In.ar(0)
>>> bpeak_eq = ugentools.BPeakEQ.ar(
... frequency=1200,
... gain=0,
... reciprocal_of_q=1,
... source=source,
... )
>>> bpeak_eq.frequency
1200.0
Returns ugen input.
'''
index = self._ordered_input_names.index('frequency')
return self._inputs[index]
@property
def reciprocal_of_q(self):
r'''Gets `reciprocal_of_q` input of BPeakEQ.
::
>>> source = ugentools.In.ar(0)
>>> bpeak_eq = ugentools.BPeakEQ.ar(
... frequency=1200,
... gain=0,
... reciprocal_of_q=1,
... source=source,
... )
>>> bpeak_eq.reciprocal_of_q
1.0
Returns ugen input.
'''
index = self._ordered_input_names.index('reciprocal_of_q')
return self._inputs[index]
@property
def source(self):
r'''Gets `source` input of BPeakEQ.
::
>>> source = ugentools.In.ar(0)
>>> bpeak_eq = ugentools.BPeakEQ.ar(
... frequency=1200,
... gain=0,
... reciprocal_of_q=1,
... source=source,
... )
>>> bpeak_eq.source
OutputProxy(
source=In(
bus=0.0,
calculation_rate=CalculationRate.AUDIO,
channel_count=1
),
output_index=0
)
Returns ugen input.
'''
index = self._ordered_input_names.index('source')
return self._inputs[index]
|
andrewyoung1991/supriya
|
supriya/tools/ugentools/BPeakEQ.py
|
Python
|
mit
| 4,652
|
# Copyright (c) 2015-2020 Rocky Bernstein
# Copyright (c) 2000-2002 by hartmut Goebel <h.goebel@crazy-compilers.com>
#
# Copyright (c) 1999 John Aycock
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Base grammar for Python 2.x.
However instead of terminal symbols being the usual ASCII text,
e.g. 5, myvariable, "for", etc. they are CPython Bytecode tokens,
e.g. "LOAD_CONST 5", "STORE NAME myvariable", "SETUP_LOOP", etc.
If we succeed in creating a parse tree, then we have a Python program
that a later phase can turn into a sequence of ASCII text.
"""
from __future__ import print_function
from uncompyle6.parsers.reducecheck import (except_handler_else, ifelsestmt, tryelsestmt)
from uncompyle6.parser import PythonParser, PythonParserSingle, nop_func
from uncompyle6.parsers.treenode import SyntaxTree
from spark_parser import DEFAULT_DEBUG as PARSER_DEFAULT_DEBUG
class Python2Parser(PythonParser):
def __init__(self, debug_parser=PARSER_DEFAULT_DEBUG):
super(Python2Parser, self).__init__(SyntaxTree, "stmts", debug=debug_parser)
self.new_rules = set()
def p_print2(self, args):
"""
stmt ::= print_items_stmt
stmt ::= print_nl
stmt ::= print_items_nl_stmt
print_items_stmt ::= expr PRINT_ITEM print_items_opt
print_items_nl_stmt ::= expr PRINT_ITEM print_items_opt PRINT_NEWLINE_CONT
print_items_opt ::= print_items?
print_items ::= print_item+
print_item ::= expr PRINT_ITEM_CONT
print_nl ::= PRINT_NEWLINE
"""
def p_print_to(self, args):
"""
stmt ::= print_to
stmt ::= print_to_nl
stmt ::= print_nl_to
print_to ::= expr print_to_items POP_TOP
print_to_nl ::= expr print_to_items PRINT_NEWLINE_TO
print_nl_to ::= expr PRINT_NEWLINE_TO
print_to_items ::= print_to_items print_to_item
print_to_items ::= print_to_item
print_to_item ::= DUP_TOP expr ROT_TWO PRINT_ITEM_TO
"""
def p_grammar(self, args):
"""
sstmt ::= stmt
sstmt ::= return RETURN_LAST
return_if_stmts ::= return_if_stmt
return_if_stmts ::= _stmts return_if_stmt
return_if_stmt ::= ret_expr RETURN_END_IF
return_stmt_lambda ::= ret_expr RETURN_VALUE_LAMBDA
stmt ::= break
break ::= BREAK_LOOP
stmt ::= continue
continue ::= CONTINUE
continues ::= _stmts lastl_stmt continue
continues ::= lastl_stmt continue
continues ::= continue
stmt ::= assert2
stmt ::= raise_stmt0
stmt ::= raise_stmt1
stmt ::= raise_stmt2
stmt ::= raise_stmt3
raise_stmt0 ::= RAISE_VARARGS_0
raise_stmt1 ::= expr RAISE_VARARGS_1
raise_stmt2 ::= expr expr RAISE_VARARGS_2
raise_stmt3 ::= expr expr expr RAISE_VARARGS_3
for ::= SETUP_LOOP expr for_iter store
for_block POP_BLOCK _come_froms
delete ::= delete_subscript
delete_subscript ::= expr expr DELETE_SUBSCR
delete ::= expr DELETE_ATTR
_mklambda ::= load_closure mklambda
kwarg ::= LOAD_CONST expr
kv3 ::= expr expr STORE_MAP
classdef ::= buildclass store
buildclass ::= LOAD_CONST expr mkfunc
CALL_FUNCTION_0 BUILD_CLASS
# Class decorators starting in 2.6
stmt ::= classdefdeco
classdefdeco ::= classdefdeco1 store
classdefdeco1 ::= expr classdefdeco1 CALL_FUNCTION_1
classdefdeco1 ::= expr classdefdeco2 CALL_FUNCTION_1
classdefdeco2 ::= LOAD_CONST expr mkfunc CALL_FUNCTION_0 BUILD_CLASS
assert_expr ::= expr
assert_expr ::= assert_expr_or
assert_expr ::= assert_expr_and
assert_expr_or ::= assert_expr jmp_true expr
assert_expr_and ::= assert_expr jmp_false expr
ifstmt ::= testexpr _ifstmts_jump
testexpr ::= testfalse
testexpr ::= testtrue
testfalse ::= expr jmp_false
testtrue ::= expr jmp_true
_ifstmts_jump ::= return_if_stmts
iflaststmt ::= testexpr c_stmts_opt JUMP_ABSOLUTE
iflaststmtl ::= testexpr c_stmts_opt JUMP_BACK
# this is nested inside a try_except
tryfinallystmt ::= SETUP_FINALLY suite_stmts_opt
POP_BLOCK LOAD_CONST
COME_FROM suite_stmts_opt END_FINALLY
lastc_stmt ::= tryelsestmtc
# Move to 2.7? 2.6 may use come_froms
tryelsestmtc ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
except_handler_else else_suitec COME_FROM
tryelsestmtl ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
except_handler_else else_suitel COME_FROM
try_except ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
except_handler COME_FROM
# Note: except_stmts may have many jumps after END_FINALLY
except_handler ::= JUMP_FORWARD COME_FROM except_stmts
END_FINALLY come_froms
except_handler ::= jmp_abs COME_FROM except_stmts
END_FINALLY
except_handler_else ::= except_handler
except_stmts ::= except_stmt+
except_stmt ::= except_cond1 except_suite
except_stmt ::= except
except_suite ::= c_stmts_opt JUMP_FORWARD
except_suite ::= c_stmts_opt jmp_abs
except_suite ::= returns
except ::= POP_TOP POP_TOP POP_TOP c_stmts_opt _jump
except ::= POP_TOP POP_TOP POP_TOP returns
jmp_abs ::= JUMP_ABSOLUTE
jmp_abs ::= JUMP_BACK
jmp_abs ::= CONTINUE
"""
def p_generator_exp2(self, args):
"""
generator_exp ::= LOAD_GENEXPR MAKE_FUNCTION_0 expr GET_ITER CALL_FUNCTION_1
"""
def p_expr2(self, args):
"""
expr ::= LOAD_LOCALS
expr ::= LOAD_ASSERT
expr ::= slice0
expr ::= slice1
expr ::= slice2
expr ::= slice3
expr ::= unary_convert
expr_jt ::= expr jmp_true
or ::= expr_jt expr come_from_opt
and ::= expr jmp_false expr come_from_opt
unary_convert ::= expr UNARY_CONVERT
# In Python 3, DUP_TOPX_2 is DUP_TOP_TWO
subscript2 ::= expr expr DUP_TOPX_2 BINARY_SUBSCR
"""
def p_slice2(self, args):
"""
store ::= expr STORE_SLICE+0
store ::= expr expr STORE_SLICE+1
store ::= expr expr STORE_SLICE+2
store ::= expr expr expr STORE_SLICE+3
aug_assign1 ::= expr expr inplace_op ROT_FOUR STORE_SLICE+3
aug_assign1 ::= expr expr inplace_op ROT_THREE STORE_SLICE+1
aug_assign1 ::= expr expr inplace_op ROT_THREE STORE_SLICE+2
aug_assign1 ::= expr expr inplace_op ROT_TWO STORE_SLICE+0
slice0 ::= expr SLICE+0
slice0 ::= expr DUP_TOP SLICE+0
slice1 ::= expr expr SLICE+1
slice1 ::= expr expr DUP_TOPX_2 SLICE+1
slice2 ::= expr expr SLICE+2
slice2 ::= expr expr DUP_TOPX_2 SLICE+2
slice3 ::= expr expr expr SLICE+3
slice3 ::= expr expr expr DUP_TOPX_3 SLICE+3
"""
def p_op2(self, args):
"""
inplace_op ::= INPLACE_DIVIDE
binary_operator ::= BINARY_DIVIDE
"""
def customize_grammar_rules(self, tokens, customize):
"""The base grammar we start out for a Python version even with the
subclassing is, well, is pretty base. And we want it that way: lean and
mean so that parsing will go faster.
Here, we add additional grammar rules based on specific instructions
that are in the instruction/token stream. In classes that
inherit from from here and other versions, grammar rules may
also be removed.
For example if we see a pretty rare JUMP_IF_NOT_DEBUG
instruction we'll add the grammar for that.
More importantly, here we add grammar rules for instructions
that may access a variable number of stack items. CALL_FUNCTION,
BUILD_LIST and so on are like this.
Without custom rules, there can be an super-exponential number of
derivations. See the deparsing paper for an elaboration of
this.
"""
if "PyPy" in customize:
# PyPy-specific customizations
self.addRule(
"""
stmt ::= assign3_pypy
stmt ::= assign2_pypy
assign3_pypy ::= expr expr expr store store store
assign2_pypy ::= expr expr store store
list_comp ::= expr BUILD_LIST_FROM_ARG for_iter store list_iter
JUMP_BACK
""",
nop_func,
)
# For a rough break out on the first word. This may
# include instructions that don't need customization,
# but we'll do a finer check after the rough breakout.
customize_instruction_basenames = frozenset(
(
"BUILD",
"CALL",
"CONTINUE",
"DELETE",
"DUP",
"EXEC",
"GET",
"JUMP",
"LOAD",
"LOOKUP",
"MAKE",
"SETUP",
"RAISE",
"UNPACK",
)
)
# Opcode names in the custom_seen_ops set have rules that get added
# unconditionally and the rules are constant. So they need to be done
# only once and if we see the opcode a second we don't have to consider
# adding more rules.
#
custom_seen_ops = set()
for i, token in enumerate(tokens):
opname = token.kind
# Do a quick breakout before testing potentially
# each of the dozen or so instruction in if elif.
if (
opname[: opname.find("_")] not in customize_instruction_basenames
or opname in custom_seen_ops
):
continue
opname_base = opname[: opname.rfind("_")]
# The order of opname listed is roughly sorted below
if opname_base in ("BUILD_LIST", "BUILD_SET", "BUILD_TUPLE"):
# We do this complicated test to speed up parsing of
# pathelogically long literals, especially those over 1024.
build_count = token.attr
thousands = build_count // 1024
thirty32s = (build_count // 32) % 32
if thirty32s > 0:
rule = "expr32 ::=%s" % (" expr" * 32)
self.add_unique_rule(rule, opname_base, build_count, customize)
if thousands > 0:
self.add_unique_rule(
"expr1024 ::=%s" % (" expr32" * 32),
opname_base,
build_count,
customize,
)
collection = opname_base[opname_base.find("_") + 1 :].lower()
rule = (
("%s ::= " % collection)
+ "expr1024 " * thousands
+ "expr32 " * thirty32s
+ "expr " * (build_count % 32)
+ opname
)
self.add_unique_rules(["expr ::= %s" % collection, rule], customize)
continue
elif opname_base == "BUILD_MAP":
if opname == "BUILD_MAP_n":
# PyPy sometimes has no count. Sigh.
self.add_unique_rules(
[
"kvlist_n ::= kvlist_n kv3",
"kvlist_n ::=",
"dict ::= BUILD_MAP_n kvlist_n",
],
customize,
)
if self.version >= 2.7:
self.add_unique_rule(
"dict_comp_func ::= BUILD_MAP_n LOAD_FAST FOR_ITER store "
"comp_iter JUMP_BACK RETURN_VALUE RETURN_LAST",
"dict_comp_func",
0,
customize,
)
else:
kvlist_n = " kv3" * token.attr
rule = "dict ::= %s%s" % (opname, kvlist_n)
self.addRule(rule, nop_func)
continue
elif opname_base == "BUILD_SLICE":
slice_num = token.attr
if slice_num == 2:
self.add_unique_rules(
[
"expr ::= build_slice2",
"build_slice2 ::= expr expr BUILD_SLICE_2",
],
customize,
)
else:
assert slice_num == 3, (
"BUILD_SLICE value must be 2 or 3; is %s" % slice_num
)
self.add_unique_rules(
[
"expr ::= build_slice3",
"build_slice3 ::= expr expr expr BUILD_SLICE_3",
],
customize,
)
continue
elif opname_base in (
"CALL_FUNCTION",
"CALL_FUNCTION_VAR",
"CALL_FUNCTION_VAR_KW",
"CALL_FUNCTION_KW",
):
args_pos, args_kw = self.get_pos_kw(token)
# number of apply equiv arguments:
nak = (len(opname_base) - len("CALL_FUNCTION")) // 3
rule = (
"call ::= expr "
+ "expr " * args_pos
+ "kwarg " * args_kw
+ "expr " * nak
+ opname
)
elif opname_base == "CALL_METHOD":
# PyPy only - DRY with parse3
args_pos, args_kw = self.get_pos_kw(token)
# number of apply equiv arguments:
nak = (len(opname_base) - len("CALL_METHOD")) // 3
rule = (
"call ::= expr "
+ "expr " * args_pos
+ "kwarg " * args_kw
+ "expr " * nak
+ opname
)
elif opname == "CONTINUE_LOOP":
self.addRule("continue ::= CONTINUE_LOOP", nop_func)
custom_seen_ops.add(opname)
continue
elif opname == "DELETE_ATTR":
self.addRule("delete ::= expr DELETE_ATTR", nop_func)
custom_seen_ops.add(opname)
continue
elif opname.startswith("DELETE_SLICE"):
self.addRule(
"""
del_expr ::= expr
delete ::= del_expr DELETE_SLICE+0
delete ::= del_expr del_expr DELETE_SLICE+1
delete ::= del_expr del_expr DELETE_SLICE+2
delete ::= del_expr del_expr del_expr DELETE_SLICE+3
""",
nop_func,
)
custom_seen_ops.add(opname)
self.check_reduce["del_expr"] = "AST"
continue
elif opname == "DELETE_DEREF":
self.addRule(
"""
stmt ::= del_deref_stmt
del_deref_stmt ::= DELETE_DEREF
""",
nop_func,
)
custom_seen_ops.add(opname)
continue
elif opname == "DELETE_SUBSCR":
self.addRule(
"""
delete ::= delete_subscript
delete_subscript ::= expr expr DELETE_SUBSCR
""",
nop_func,
)
self.check_reduce["delete_subscript"] = "AST"
custom_seen_ops.add(opname)
continue
elif opname == "GET_ITER":
self.addRule(
"""
expr ::= get_iter
attribute ::= expr GET_ITER
""",
nop_func,
)
custom_seen_ops.add(opname)
continue
elif opname_base in ("DUP_TOPX", "RAISE_VARARGS"):
# FIXME: remove these conditions if they are not needed.
# no longer need to add a rule
continue
elif opname == "EXEC_STMT":
self.addRule(
"""
stmt ::= exec_stmt
exec_stmt ::= expr exprlist DUP_TOP EXEC_STMT
exec_stmt ::= expr exprlist EXEC_STMT
exprlist ::= expr+
""",
nop_func,
)
continue
elif opname == "JUMP_IF_NOT_DEBUG":
self.addRule(
"""
jmp_true_false ::= POP_JUMP_IF_TRUE
jmp_true_false ::= POP_JUMP_IF_FALSE
stmt ::= assert_pypy
stmt ::= assert2_pypy
assert_pypy ::= JUMP_IF_NOT_DEBUG assert_expr jmp_true_false
LOAD_ASSERT RAISE_VARARGS_1 COME_FROM
assert2_pypy ::= JUMP_IF_NOT_DEBUG assert_expr jmp_true_false
LOAD_ASSERT expr CALL_FUNCTION_1
RAISE_VARARGS_1 COME_FROM
""",
nop_func,
)
continue
elif opname == "LOAD_ATTR":
self.addRule(
"""
expr ::= attribute
attribute ::= expr LOAD_ATTR
""",
nop_func,
)
custom_seen_ops.add(opname)
continue
elif opname == "LOAD_LISTCOMP":
self.addRule("expr ::= listcomp", nop_func)
custom_seen_ops.add(opname)
continue
elif opname == "LOAD_SETCOMP":
self.add_unique_rules(
[
"expr ::= set_comp",
"set_comp ::= LOAD_SETCOMP MAKE_FUNCTION_0 expr GET_ITER CALL_FUNCTION_1",
],
customize,
)
custom_seen_ops.add(opname)
continue
elif opname == "LOOKUP_METHOD":
# A PyPy speciality - DRY with parse3
self.addRule(
"""
expr ::= attribute
attribute ::= expr LOOKUP_METHOD
""",
nop_func,
)
custom_seen_ops.add(opname)
continue
elif opname_base == "MAKE_FUNCTION":
if i > 0 and tokens[i - 1] == "LOAD_LAMBDA":
self.addRule(
"mklambda ::= %s LOAD_LAMBDA %s"
% ("pos_arg " * token.attr, opname),
nop_func,
)
rule = "mkfunc ::= %s LOAD_CODE %s" % ("expr " * token.attr, opname)
elif opname_base == "MAKE_CLOSURE":
# FIXME: use add_unique_rules to tidy this up.
if i > 0 and tokens[i - 1] == "LOAD_LAMBDA":
self.addRule(
"mklambda ::= %s load_closure LOAD_LAMBDA %s"
% ("expr " * token.attr, opname),
nop_func,
)
if i > 0:
prev_tok = tokens[i - 1]
if prev_tok == "LOAD_GENEXPR":
self.add_unique_rules(
[
(
"generator_exp ::= %s load_closure LOAD_GENEXPR %s expr"
" GET_ITER CALL_FUNCTION_1"
% ("expr " * token.attr, opname)
)
],
customize,
)
pass
self.add_unique_rules(
[
(
"mkfunc ::= %s load_closure LOAD_CODE %s"
% ("expr " * token.attr, opname)
)
],
customize,
)
if self.version >= 2.7:
if i > 0:
prev_tok = tokens[i - 1]
if prev_tok == "LOAD_DICTCOMP":
self.add_unique_rules(
[
(
"dict_comp ::= %s load_closure LOAD_DICTCOMP %s expr"
" GET_ITER CALL_FUNCTION_1"
% ("expr " * token.attr, opname)
)
],
customize,
)
elif prev_tok == "LOAD_SETCOMP":
self.add_unique_rules(
[
"expr ::= set_comp",
(
"set_comp ::= %s load_closure LOAD_SETCOMP %s expr"
" GET_ITER CALL_FUNCTION_1"
% ("expr " * token.attr, opname)
),
],
customize,
)
pass
pass
continue
elif opname == "SETUP_EXCEPT":
if "PyPy" in customize:
self.add_unique_rules(
[
"stmt ::= try_except_pypy",
"try_except_pypy ::= SETUP_EXCEPT suite_stmts_opt except_handler_pypy",
"except_handler_pypy ::= COME_FROM except_stmts END_FINALLY COME_FROM",
],
customize,
)
custom_seen_ops.add(opname)
continue
elif opname == "SETUP_FINALLY":
if "PyPy" in customize:
self.addRule(
"""
stmt ::= tryfinallystmt_pypy
tryfinallystmt_pypy ::= SETUP_FINALLY suite_stmts_opt COME_FROM_FINALLY
suite_stmts_opt END_FINALLY""",
nop_func,
)
custom_seen_ops.add(opname)
continue
elif opname_base in ("UNPACK_TUPLE", "UNPACK_SEQUENCE"):
custom_seen_ops.add(opname)
rule = "unpack ::= " + opname + " store" * token.attr
elif opname_base == "UNPACK_LIST":
custom_seen_ops.add(opname)
rule = "unpack_list ::= " + opname + " store" * token.attr
else:
continue
self.addRule(rule, nop_func)
pass
self.reduce_check_table = {
# "and": and_check,
"except_handler_else": except_handler_else,
"ifelsestmt": ifelsestmt,
# "or": or_check,
"tryelsestmt": tryelsestmt,
"tryelsestmtl": tryelsestmt,
}
self.check_reduce["and"] = "AST"
self.check_reduce["except_handler_else"] = "tokens"
self.check_reduce["raise_stmt1"] = "tokens"
self.check_reduce["assert_expr_and"] = "AST"
self.check_reduce["tryelsestmt"] = "AST"
self.check_reduce["tryelsestmtl"] = "AST"
self.check_reduce["aug_assign2"] = "AST"
self.check_reduce["or"] = "AST"
self.check_reduce["ifstmt"] = "tokens"
# self.check_reduce['_stmts'] = 'AST'
# Dead code testing...
# self.check_reduce['while1elsestmt'] = 'tokens'
return
def reduce_is_invalid(self, rule, ast, tokens, first, last):
if tokens is None:
return False
lhs = rule[0]
n = len(tokens)
fn = self.reduce_check_table.get(lhs, None)
if fn:
if fn(self, lhs, n, rule, ast, tokens, first, last):
return True
pass
if rule == ("and", ("expr", "jmp_false", "expr", "\\e_come_from_opt")):
# If the instruction after the instructions forming the "and" is an "YIELD_VALUE"
# then this is probably an "if" inside a comprehension.
if tokens[last] == "YIELD_VALUE":
# Note: We might also consider testing last+1 being "POP_TOP"
return True
# Test that jump_false jump somewhere beyond the end of the "and"
# it might not be exactly the end of the "and" because this and can
# be a part of a larger condition. Oddly in 2.7 there doesn't seem to be
# an optimization where the "and" jump_false is back to a loop.
jmp_false = ast[1]
if jmp_false[0] == "POP_JUMP_IF_FALSE":
while (first < last and isinstance(tokens[last].offset, str)):
last -= 1
if jmp_false[0].attr < tokens[last].offset:
return True
# Test that jmp_false jumps to the end of "and"
# or that it jumps to the same place as the end of "and"
jmp_false = ast[1][0]
jmp_target = jmp_false.offset + jmp_false.attr + 3
return not (jmp_target == tokens[last].offset or
tokens[last].pattr == jmp_false.pattr)
# Dead code testing...
# if lhs == 'while1elsestmt':
# from trepan.api import debug; debug()
elif (
lhs in ("aug_assign1", "aug_assign2")
and ast[0]
and ast[0][0] in ("and", "or")
):
return True
elif lhs == "assert_expr_and":
jmp_false = ast[1]
jump_target = jmp_false[0].attr
return jump_target > tokens[last].off2int()
elif lhs in ("raise_stmt1",):
# We will assume 'LOAD_ASSERT' will be handled by an assert grammar rule
return tokens[first] == "LOAD_ASSERT" and (last >= len(tokens))
elif rule == ("or", ("expr", "jmp_true", "expr", "\\e_come_from_opt")):
expr2 = ast[2]
return expr2 == "expr" and expr2[0] == "LOAD_ASSERT"
elif lhs in ("delete_subscript", "del_expr"):
op = ast[0][0]
return op.kind in ("and", "or")
return False
class Python2ParserSingle(Python2Parser, PythonParserSingle):
pass
if __name__ == "__main__":
# Check grammar
p = Python2Parser()
p.check_grammar()
|
TeamSPoon/logicmoo_workspace
|
packs_web/butterfly/lib/python3.7/site-packages/uncompyle6/parsers/parse2.py
|
Python
|
mit
| 28,339
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Resource(Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: The resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, location=None, tags=None):
super(Resource, self).__init__()
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-containerinstance/azure/mgmt/containerinstance/models/resource.py
|
Python
|
mit
| 1,617
|
digits = [
" 000000000 \n 00:::::::::00 \n 00:::::::::::::00 \n0:::::::000:::::::0\n0::::::0 0::::::0\n0:::::0 0:::::0\n0:::::0 0:::::0\n0:::::0 0:::::0\n0:::::0 0:::::0\n0:::::0 0:::::0\n0:::::0 0:::::0\n0::::::0 0::::::0\n0:::::::000:::::::0\n 00:::::::::::::00 \n 00:::::::::00 \n 000000000 ",
" 1111111 \n 1::::::1 \n1:::::::1 \n111:::::1 \n 1::::1 \n 1::::1 \n 1::::1 \n 1::::l \n 1::::l \n 1::::l \n 1::::l \n 1::::l \n111::::::111\n1::::::::::1\n1::::::::::1\n111111111111",
" 222222222222222 \n2:::::::::::::::22 \n2::::::222222:::::2 \n2222222 2:::::2 \n 2:::::2 \n 2:::::2 \n 2222::::2 \n 22222::::::22 \n 22::::::::222 \n 2:::::22222 \n2:::::2 \n2:::::2 \n2:::::2 222222\n2::::::2222222:::::2\n2::::::::::::::::::2\n22222222222222222222",
" 333333333333333 \n3:::::::::::::::33 \n3::::::33333::::::3\n3333333 3:::::3\n 3:::::3\n 3:::::3\n 33333333:::::3 \n 3:::::::::::3 \n 33333333:::::3 \n 3:::::3\n 3:::::3\n 3:::::3\n3333333 3:::::3\n3::::::33333::::::3\n3:::::::::::::::33 \n 333333333333333 ",
" 444444444 \n 4::::::::4 \n 4:::::::::4 \n 4::::44::::4 \n 4::::4 4::::4 \n 4::::4 4::::4 \n 4::::4 4::::4 \n4::::444444::::444\n4::::::::::::::::4\n4444444444:::::444\n 4::::4 \n 4::::4 \n 4::::4 \n 44::::::44\n 4::::::::4\n 4444444444",
"555555555555555555 \n5::::::::::::::::5 \n5::::::::::::::::5 \n5:::::555555555555 \n5:::::5 \n5:::::5 \n5:::::5555555555 \n5:::::::::::::::5 \n555555555555:::::5 \n 5:::::5\n 5:::::5\n5555555 5:::::5\n5::::::55555::::::5\n 55:::::::::::::55 \n 55:::::::::55 \n 555555555 ",
" 66666666 \n 6::::::6 \n 6::::::6 \n 6::::::6 \n 6::::::6 \n 6::::::6 \n 6::::::6 \n 6::::::::66666 \n6::::::::::::::66 \n6::::::66666:::::6 \n6:::::6 6:::::6\n6:::::6 6:::::6\n6::::::66666::::::6\n 66:::::::::::::66 \n 66:::::::::66 \n 666666666 ",
"77777777777777777777\n7::::::::::::::::::7\n7::::::::::::::::::7\n777777777777:::::::7\n 7::::::7 \n 7::::::7 \n 7::::::7 \n 7::::::7 \n 7::::::7 \n 7::::::7 \n 7::::::7 \n 7::::::7 \n 7::::::7 \n 7::::::7 \n 7::::::7 \n77777777 ",
" 888888888 \n 88:::::::::88 \n 88:::::::::::::88 \n8::::::88888::::::8\n8:::::8 8:::::8\n8:::::8 8:::::8\n 8:::::88888:::::8 \n 8:::::::::::::8 \n 8:::::88888:::::8 \n8:::::8 8:::::8\n8:::::8 8:::::8\n8:::::8 8:::::8\n8::::::88888::::::8\n 88:::::::::::::88 \n 88:::::::::88 \n 888888888 ",
" 999999999 \n 99:::::::::99 \n 99:::::::::::::99 \n9::::::99999::::::9\n9:::::9 9:::::9\n9:::::9 9:::::9\n 9:::::99999::::::9\n 99::::::::::::::9\n 99999::::::::9 \n 9::::::9 \n 9::::::9 \n 9::::::9 \n 9::::::9 \n 9::::::9 \n 9::::::9 \n 99999999 "
]
|
jaysbays/digit-recognition
|
src/digitASCII.py
|
Python
|
mit
| 3,305
|
import sys
from ccv import PY_CCV_IO_GRAY, DenseMatrix, ClassifierCascade, detect_objects
matrix = DenseMatrix()
#from PIL import Image
#img = Image.open(sys.argv[1])
#matrix.set_buf(img.tostring(), img.mode, img.size[0], img.size[1], PY_CCV_IO_GRAY)
matrix.set_file(sys.argv[1], PY_CCV_IO_GRAY)
cascade = ClassifierCascade()
cascade.read(sys.argv[2])
print detect_objects(matrix, cascade, 1)
|
yihuang/pyccv
|
test.py
|
Python
|
mit
| 397
|
# filewriter.py
"""Write a dicom media file."""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from struct import pack
import logging
logger = logging.getLogger('pydicom')
from dicom.UID import ExplicitVRLittleEndian, ImplicitVRLittleEndian, ExplicitVRBigEndian
from dicom.filebase import DicomFile
from dicom.datadict import dictionaryVR
from dicom.dataset import Dataset
from dicom.dataelem import DataElement
from dicom.tag import Tag, ItemTag, ItemDelimiterTag, SequenceDelimiterTag
from dicom.sequence import Sequence
from dicom.valuerep import extra_length_VRs
def write_numbers(fp, data_element, struct_format):
"""Write a "value" of type struct_format from the dicom file.
"Value" can be more than one number.
struct_format -- the character format as used by the struct module.
"""
endianChar = '><'[fp.is_little_endian]
value = data_element.value
if value == "":
return # don't need to write anything for empty string
format_string = endianChar + struct_format
try:
try:
value.append # works only if list, not if string or number
except: # is a single value - the usual case
fp.write(pack(format_string, value))
else:
for val in value:
fp.write(pack(format_string, val))
except Exception as e:
raise IOError("{0}\nfor data_element:\n{1}".format(str(e), str(data_elemesnt)))
def write_OBvalue(fp, data_element):
"""Write a data_element with VR of 'other byte' (OB)."""
fp.write(data_element.value)
def write_OWvalue(fp, data_element):
"""Write a data_element with VR of 'other word' (OW).
Note: This **does not currently do the byte swapping** for Endian state.
"""
# XXX for now just write the raw bytes without endian swapping
fp.write(data_element.value)
def write_UI(fp, data_element):
"""Write a data_element with VR of 'unique identifier' (UI)."""
write_string(fp, data_element, '\0') # pad with 0-byte to even length
def multi_string(val):
"""Put a string together with delimiter if has more than one value"""
if isinstance(val, (list, tuple)):
return b"\\".join(val) # \ is escape chr, so "\\" gives single backslash
else:
return val
def write_string(fp, data_element, padding=' '):
"""Write a single or multivalued string."""
val = multi_string(data_element.value)
if len(val) % 2 != 0:
val = val + padding # pad to even length
fp.write(val)
def write_number_string(fp, data_element, padding = ' '):
"""Handle IS or DS VR - write a number stored as a string of digits."""
# If the DS or IS has an original_string attribute, use that, so that
# unchanged data elements are written with exact string as when read from file
val = data_element.value
if isinstance(val, (list, tuple)):
val = b"\\".join((x.original_string if hasattr(x, 'original_string')
else str(x) for x in val))
else:
val = val.original_string if hasattr(val, 'original_string') else str(val)
if len(val) % 2 != 0:
val = val + padding # pad to even length
fp.write(val)
def write_data_element(fp, data_element):
"""Write the data_element to file fp according to dicom media storage rules."""
fp.write_tag(data_element.tag)
VR = data_element.VR
if not fp.is_implicit_VR:
if len(VR) != 2:
msg = "Cannot write ambiguous VR of '%s' for data element with tag %r." % (VR, data_element.tag)
msg += "\nSet the correct VR before writing, or use an implicit VR transfer syntax"
raise ValueError(msg)
fp.write(VR)
if VR in extra_length_VRs:
fp.write_US(0) # reserved 2 bytes
if VR not in writers:
raise NotImplementedError("write_data_element: unknown Value Representation '{0}'".format(VR))
length_location = fp.tell() # save location for later.
if not fp.is_implicit_VR and VR not in ['OB', 'OW', 'OF', 'SQ', 'UT', 'UN']:
fp.write_US(0) # Explicit VR length field is only 2 bytes
else:
fp.write_UL(0xFFFFFFFFL) # will fill in real length value later if not undefined length item
try:
writers[VR][0] # if writer is a tuple, then need to pass a number format
except TypeError:
writers[VR](fp, data_element) # call the function to write that kind of item
else:
writers[VR][0](fp, data_element, writers[VR][1])
# print DataElement(tag, VR, value)
is_undefined_length = False
if hasattr(data_element, "is_undefined_length") and data_element.is_undefined_length:
is_undefined_length = True
location = fp.tell()
fp.seek(length_location)
if not fp.is_implicit_VR and VR not in ['OB', 'OW', 'OF', 'SQ', 'UT', 'UN']:
fp.write_US(location - length_location - 2) # 2 is length of US
else:
# write the proper length of the data_element back in the length slot, unless is SQ with undefined length.
if not is_undefined_length:
fp.write_UL(location - length_location - 4) # 4 is length of UL
fp.seek(location) # ready for next data_element
if is_undefined_length:
fp.write_tag(SequenceDelimiterTag)
fp.write_UL(0) # 4-byte 'length' of delimiter data item
def write_dataset(fp, dataset):
"""Write a Dataset dictionary to the file. Return the total length written."""
fpStart = fp.tell()
# data_elements must be written in tag order
tags = sorted(dataset.keys())
for tag in tags:
write_data_element(fp, dataset[tag])
return fp.tell() - fpStart
def write_sequence(fp, data_element):
"""Write a dicom Sequence contained in data_element to the file fp."""
# write_data_element has already written the VR='SQ' (if needed) and
# a placeholder for length"""
sequence = data_element.value
for dataset in sequence:
write_sequence_item(fp, dataset)
def write_sequence_item(fp, dataset):
"""Write an item (dataset) in a dicom Sequence to the dicom file fp."""
# see Dicom standard Part 5, p. 39 ('03 version)
# This is similar to writing a data_element, but with a specific tag for Sequence Item
fp.write_tag(ItemTag) # marker for start of Sequence Item
length_location = fp.tell() # save location for later.
fp.write_UL(0xffffffffL) # will fill in real value later if not undefined length
write_dataset(fp, dataset)
if getattr(dataset, "is_undefined_length_sequence_item", False):
fp.write_tag(ItemDelimiterTag)
fp.write_UL(0) # 4-bytes 'length' field for delimiter item
else: # we will be nice and set the lengths for the reader of this file
location = fp.tell()
fp.seek(length_location)
fp.write_UL(location - length_location - 4) # 4 is length of UL
fp.seek(location) # ready for next data_element
def write_UN(fp, data_element):
"""Write a byte string for an DataElement of value 'UN' (unknown)."""
fp.write(data_element.value)
def write_ATvalue(fp, data_element):
"""Write a data_element tag to a file."""
try:
iter(data_element.value) # see if is multi-valued AT; # Note will fail if Tag ever derived from true tuple rather than being a long
except TypeError:
tag = Tag(data_element.value) # make sure is expressed as a Tag instance
fp.write_tag(tag)
else:
tags = [Tag(tag) for tag in data_element.value]
for tag in tags:
fp.write_tag(tag)
def _write_file_meta_info(fp, meta_dataset):
"""Write the dicom group 2 dicom storage File Meta Information to the file.
The file should already be positioned past the 128 byte preamble.
Raises ValueError if the required data_elements (elements 2,3,0x10,0x12)
are not in the dataset. If the dataset came from a file read with
read_file(), then the required data_elements should already be there.
"""
fp.write(b'DICM')
# File meta info is always LittleEndian, Explicit VR. After will change these
# to the transfer syntax values set in the meta info
fp.is_little_endian = True
fp.is_implicit_VR = False
if Tag((2,1)) not in meta_dataset:
meta_dataset.add_new((2,1), b'OB', b"\0\1") # file meta information version
# Now check that required meta info tags are present:
missing = []
for element in [2, 3, 0x10, 0x12]:
if Tag((2, element)) not in meta_dataset:
missing.append(Tag((2, element)))
if missing:
raise ValueError("Missing required tags {0} for file meta information".format(str(missing)))
# Put in temp number for required group length, save current location to come back
meta_dataset[(2,0)] = DataElement((2,0), 'UL', 0) # put 0 to start
group_length_data_element_size = 12 # !based on DICOM std ExplVR
group_length_tell = fp.tell()
# Write the file meta datset, including temp group length
length = write_dataset(fp, meta_dataset)
group_length = length - group_length_data_element_size # counts from end of that
# Save end of file meta to go back to
end_of_file_meta = fp.tell()
# Go back and write the actual group length
fp.seek(group_length_tell)
group_length_data_element = DataElement((2,0), 'UL', group_length)
write_data_element(fp, group_length_data_element)
# Return to end of file meta, ready to write remainder of the file
fp.seek(end_of_file_meta)
def write_file(filename, dataset, WriteLikeOriginal=True):
"""Store a Dataset to the filename specified.
Set dataset.preamble if you want something other than 128 0-bytes.
If the dataset was read from an existing dicom file, then its preamble
was stored at read time. It is up to you to ensure the preamble is still
correct for its purposes.
If there is no Transfer Syntax tag in the dataset,
Set dataset.is_implicit_VR, and .is_little_endian
to determine the transfer syntax used to write the file.
WriteLikeOriginal -- True if want to preserve the following for each sequence
within this dataset:
- preamble -- if no preamble in read file, than not used here
- dataset.hasFileMeta -- if writer did not do file meta information,
then don't write here either
- seq.is_undefined_length -- if original had delimiters, write them now too,
instead of the more sensible length characters
- <dataset>.is_undefined_length_sequence_item -- for datasets that belong to a
sequence, write the undefined length delimiters if that is
what the original had
Set WriteLikeOriginal = False to produce a "nicer" DICOM file for other readers,
where all lengths are explicit.
"""
# Decide whether to write DICOM preamble. Should always do so unless trying to mimic the original file read in
preamble = getattr(dataset, "preamble", None)
if not preamble and not WriteLikeOriginal:
preamble = b"\0"*128
file_meta = dataset.file_meta
if file_meta is None:
file_meta = Dataset()
if 'TransferSyntaxUID' not in file_meta:
if dataset.is_little_endian and dataset.is_implicit_VR:
file_meta.add_new((2, 0x10), 'UI', ImplicitVRLittleEndian)
elif dataset.is_little_endian and not dataset.is_implicit_VR:
file_meta.add_new((2, 0x10), 'UI', ExplicitVRLittleEndian)
elif not dataset.is_little_endian and not dataset.is_implicit_VR:
file_meta.add_new((2, 0x10), 'UI', ExplicitVRBigEndian)
else:
raise NotImplementedError("pydicom has not been verified for Big Endian with Implicit VR")
fp = DicomFile(filename,'wb')
try:
if preamble:
fp.write(preamble) # blank 128 byte preamble
_write_file_meta_info(fp, file_meta)
# Set file VR, endian. MUST BE AFTER writing META INFO (which changes to Explict LittleEndian)
fp.is_implicit_VR = dataset.is_implicit_VR
fp.is_little_endian = dataset.is_little_endian
write_dataset(fp, dataset)
finally:
fp.close()
# Map each VR to a function which can write it
# for write_numbers, the Writer maps to a tuple (function, struct_format)
# (struct_format is python's struct module format)
writers = {'UL':(write_numbers,'L'), 'SL':(write_numbers,'l'),
'US':(write_numbers,'H'), 'SS':(write_numbers, 'h'),
'FL':(write_numbers,'f'), 'FD':(write_numbers, 'd'),
'OF':(write_numbers,'f'),
'OB':write_OBvalue, 'UI':write_UI,
'SH':write_string, 'DA':write_string, 'TM': write_string,
'CS':write_string, 'PN':write_string, 'LO': write_string,
'IS':write_number_string, 'DS':write_number_string, 'AE': write_string,
'AS':write_string,
'LT':write_string,
'SQ':write_sequence,
'UN':write_UN,
'AT':write_ATvalue,
'ST':write_string,
'OW':write_OWvalue,
'US or SS':write_OWvalue,
'OW/OB':write_OBvalue,
'OB/OW':write_OBvalue,
'OB or OW':write_OBvalue,
'OW or OB':write_OBvalue,
'DT':write_string,
'UT':write_string,
} # note OW/OB depends on other items, which we don't know at write time
|
njvack/yadda
|
dicom/filewriter.py
|
Python
|
mit
| 13,586
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class SubnetsOperations(object):
"""SubnetsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-10-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-10-01"
self.config = config
def _delete_initial(
self, resource_group_name, virtual_network_name, subnet_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, virtual_network_name, subnet_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, virtual_network_name, subnet_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified subnet by virtual network and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Subnet or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_10_01.models.Subnet or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Subnet', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def _create_or_update_initial(
self, resource_group_name, virtual_network_name, subnet_name, subnet_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(subnet_parameters, 'Subnet')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Subnet', response)
if response.status_code == 201:
deserialized = self._deserialize('Subnet', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, virtual_network_name, subnet_name, subnet_parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a subnet in the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param subnet_parameters: Parameters supplied to the create or update
subnet operation.
:type subnet_parameters: ~azure.mgmt.network.v2017_10_01.models.Subnet
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns Subnet or
ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_10_01.models.Subnet]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
subnet_parameters=subnet_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('Subnet', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list(
self, resource_group_name, virtual_network_name, custom_headers=None, raw=False, **operation_config):
"""Gets all subnets in a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Subnet
:rtype:
~azure.mgmt.network.v2017_10_01.models.SubnetPaged[~azure.mgmt.network.v2017_10_01.models.Subnet]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.SubnetPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SubnetPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_10_01/operations/subnets_operations.py
|
Python
|
mit
| 18,375
|
"""CMS app-hook for the ``good_practice_examples`` app."""
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class GoodPracticeExamplesApphook(CMSApp):
name = _("Good practice examples")
urls = ["good_practice_examples.urls"]
apphook_pool.register(GoodPracticeExamplesApphook)
|
bitmazk/django-good-practice-examples
|
good_practice_examples/cms_app.py
|
Python
|
mit
| 368
|
# image datasets enxtend by image data generator
# should genereate one type/class by one type/class
# the folder structure as blow
# note only one type in the train folder for one time.
# the code need reflector in future
"""directory structure:
```
dataset/
train/
Type_1/
001.jpg
002.jpg
...
"""
from keras.preprocessing.image import ImageDataGenerator
img_dir = '/Users/liuqh/Desktop/dataset/train'
sav_dir = '/Users/liuqh/Desktop/new'
datagen = ImageDataGenerator(
rotation_range = 90,
#width_shift_range = 0.2,
#height_shift_range = 0.2,
#zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest'
)
i = 1
for batch in datagen.flow_from_directory(img_dir,
target_size=(224,224),
shuffle=False,
batch_size= 100,
save_prefix='_gen',
save_to_dir=sav_dir):
i += 1
if i > 66:
break
|
samleoqh/machine-ln
|
src/py/img_generator.py
|
Python
|
mit
| 1,107
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'SnapToGridWidget.ui'
#
# Created: Tue Sep 1 12:58:50 2015
# by: PyQt4 UI code generator 4.11.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_SnapToGridWidget(object):
def setupUi(self, SnapToGridWidget):
SnapToGridWidget.setObjectName(_fromUtf8("SnapToGridWidget"))
SnapToGridWidget.resize(293, 797)
SnapToGridWidget.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea|QtCore.Qt.RightDockWidgetArea)
self.dockWidgetContents = QtGui.QWidget()
self.dockWidgetContents.setObjectName(_fromUtf8("dockWidgetContents"))
self.scrollArea = QtGui.QScrollArea(self.dockWidgetContents)
self.scrollArea.setGeometry(QtCore.QRect(4, 4, 281, 761))
self.scrollArea.setWidgetResizable(False)
self.scrollArea.setObjectName(_fromUtf8("scrollArea"))
self.scrollAreaWidgetContents = QtGui.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 279, 759))
self.scrollAreaWidgetContents.setObjectName(_fromUtf8("scrollAreaWidgetContents"))
self.line = QtGui.QFrame(self.scrollAreaWidgetContents)
self.line.setGeometry(QtCore.QRect(10, 625, 251, 16))
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.add_layers_button = QtGui.QPushButton(self.scrollAreaWidgetContents)
self.add_layers_button.setGeometry(QtCore.QRect(5, 5, 261, 32))
self.add_layers_button.setObjectName(_fromUtf8("add_layers_button"))
self.cancel_snap_button = QtGui.QPushButton(self.scrollAreaWidgetContents)
self.cancel_snap_button.setGeometry(QtCore.QRect(140, 585, 115, 32))
self.cancel_snap_button.setObjectName(_fromUtf8("cancel_snap_button"))
self.create_backup_gbox = QtGui.QGroupBox(self.scrollAreaWidgetContents)
self.create_backup_gbox.setGeometry(QtCore.QRect(10, 485, 251, 96))
self.create_backup_gbox.setCheckable(True)
self.create_backup_gbox.setObjectName(_fromUtf8("create_backup_gbox"))
self.label_9 = QtGui.QLabel(self.create_backup_gbox)
self.label_9.setGeometry(QtCore.QRect(10, 30, 126, 16))
self.label_9.setObjectName(_fromUtf8("label_9"))
self.backup_folder_edit = QtGui.QLineEdit(self.create_backup_gbox)
self.backup_folder_edit.setGeometry(QtCore.QRect(10, 50, 191, 21))
self.backup_folder_edit.setReadOnly(True)
self.backup_folder_edit.setObjectName(_fromUtf8("backup_folder_edit"))
self.backup_folder_button = QtGui.QPushButton(self.create_backup_gbox)
self.backup_folder_button.setGeometry(QtCore.QRect(210, 47, 28, 28))
self.backup_folder_button.setText(_fromUtf8(""))
self.backup_folder_button.setObjectName(_fromUtf8("backup_folder_button"))
self.snap_layers_lwidget = QtGui.QListWidget(self.scrollAreaWidgetContents)
self.snap_layers_lwidget.setGeometry(QtCore.QRect(10, 65, 251, 201))
self.snap_layers_lwidget.setSelectionMode(QtGui.QAbstractItemView.MultiSelection)
self.snap_layers_lwidget.setObjectName(_fromUtf8("snap_layers_lwidget"))
self.remove_layer_button = QtGui.QPushButton(self.scrollAreaWidgetContents)
self.remove_layer_button.setGeometry(QtCore.QRect(5, 270, 131, 32))
self.remove_layer_button.setObjectName(_fromUtf8("remove_layer_button"))
self.remove_all_layers_button = QtGui.QPushButton(self.scrollAreaWidgetContents)
self.remove_all_layers_button.setGeometry(QtCore.QRect(135, 270, 131, 32))
self.remove_all_layers_button.setObjectName(_fromUtf8("remove_all_layers_button"))
self.label_5 = QtGui.QLabel(self.scrollAreaWidgetContents)
self.label_5.setGeometry(QtCore.QRect(10, 45, 206, 16))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.snap_settings_gbox = QtGui.QGroupBox(self.scrollAreaWidgetContents)
self.snap_settings_gbox.setGeometry(QtCore.QRect(10, 315, 251, 166))
self.snap_settings_gbox.setObjectName(_fromUtf8("snap_settings_gbox"))
self.layer_rbutton = QtGui.QRadioButton(self.snap_settings_gbox)
self.layer_rbutton.setGeometry(QtCore.QRect(10, 50, 126, 20))
self.layer_rbutton.setChecked(True)
self.layer_rbutton.setObjectName(_fromUtf8("layer_rbutton"))
self.map_extent_rbutton = QtGui.QRadioButton(self.snap_settings_gbox)
self.map_extent_rbutton.setGeometry(QtCore.QRect(10, 75, 231, 20))
self.map_extent_rbutton.setChecked(False)
self.map_extent_rbutton.setObjectName(_fromUtf8("map_extent_rbutton"))
self.label_6 = QtGui.QLabel(self.snap_settings_gbox)
self.label_6.setGeometry(QtCore.QRect(15, 105, 191, 16))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.grid_size_sbox = QtGui.QDoubleSpinBox(self.snap_settings_gbox)
self.grid_size_sbox.setGeometry(QtCore.QRect(15, 125, 121, 24))
self.grid_size_sbox.setDecimals(3)
self.grid_size_sbox.setMinimum(0.001)
self.grid_size_sbox.setMaximum(10.0)
self.grid_size_sbox.setSingleStep(0.01)
self.grid_size_sbox.setObjectName(_fromUtf8("grid_size_sbox"))
self.label_7 = QtGui.QLabel(self.snap_settings_gbox)
self.label_7.setGeometry(QtCore.QRect(10, 30, 126, 16))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.restore_geom_gbox = QtGui.QGroupBox(self.scrollAreaWidgetContents)
self.restore_geom_gbox.setGeometry(QtCore.QRect(10, 645, 251, 91))
self.restore_geom_gbox.setObjectName(_fromUtf8("restore_geom_gbox"))
self.restore_button = QtGui.QPushButton(self.restore_geom_gbox)
self.restore_button.setGeometry(QtCore.QRect(10, 30, 106, 32))
self.restore_button.setObjectName(_fromUtf8("restore_button"))
self.cancel_restore_button = QtGui.QPushButton(self.restore_geom_gbox)
self.cancel_restore_button.setGeometry(QtCore.QRect(125, 30, 115, 32))
self.cancel_restore_button.setObjectName(_fromUtf8("cancel_restore_button"))
self.label = QtGui.QLabel(self.restore_geom_gbox)
self.label.setGeometry(QtCore.QRect(20, 60, 166, 16))
self.label.setObjectName(_fromUtf8("label"))
self.snap_button = QtGui.QPushButton(self.scrollAreaWidgetContents)
self.snap_button.setGeometry(QtCore.QRect(20, 585, 115, 32))
self.snap_button.setObjectName(_fromUtf8("snap_button"))
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
SnapToGridWidget.setWidget(self.dockWidgetContents)
self.retranslateUi(SnapToGridWidget)
QtCore.QMetaObject.connectSlotsByName(SnapToGridWidget)
SnapToGridWidget.setTabOrder(self.add_layers_button, self.snap_layers_lwidget)
SnapToGridWidget.setTabOrder(self.snap_layers_lwidget, self.remove_layer_button)
SnapToGridWidget.setTabOrder(self.remove_layer_button, self.remove_all_layers_button)
SnapToGridWidget.setTabOrder(self.remove_all_layers_button, self.map_extent_rbutton)
SnapToGridWidget.setTabOrder(self.map_extent_rbutton, self.layer_rbutton)
SnapToGridWidget.setTabOrder(self.layer_rbutton, self.grid_size_sbox)
SnapToGridWidget.setTabOrder(self.grid_size_sbox, self.create_backup_gbox)
SnapToGridWidget.setTabOrder(self.create_backup_gbox, self.backup_folder_edit)
SnapToGridWidget.setTabOrder(self.backup_folder_edit, self.backup_folder_button)
SnapToGridWidget.setTabOrder(self.backup_folder_button, self.snap_button)
SnapToGridWidget.setTabOrder(self.snap_button, self.cancel_snap_button)
SnapToGridWidget.setTabOrder(self.cancel_snap_button, self.restore_button)
SnapToGridWidget.setTabOrder(self.restore_button, self.cancel_restore_button)
def retranslateUi(self, SnapToGridWidget):
SnapToGridWidget.setWindowTitle(_translate("SnapToGridWidget", "Snap To Grid", None))
self.add_layers_button.setText(_translate("SnapToGridWidget", "Add Selected TOC Layers", None))
self.cancel_snap_button.setText(_translate("SnapToGridWidget", "Cancel", None))
self.create_backup_gbox.setTitle(_translate("SnapToGridWidget", "Create Backup (Geometries only)", None))
self.label_9.setText(_translate("SnapToGridWidget", "Backup Folder", None))
self.remove_layer_button.setText(_translate("SnapToGridWidget", "Remove Selected", None))
self.remove_all_layers_button.setText(_translate("SnapToGridWidget", "Remove All", None))
self.label_5.setText(_translate("SnapToGridWidget", "Layers To Be Snapped", None))
self.snap_settings_gbox.setTitle(_translate("SnapToGridWidget", "Snap Settings", None))
self.layer_rbutton.setText(_translate("SnapToGridWidget", "Layer Extent", None))
self.map_extent_rbutton.setText(_translate("SnapToGridWidget", "Current Map View (Intersecting)", None))
self.label_6.setText(_translate("SnapToGridWidget", "Grid Size [0.001m - 10m]", None))
self.label_7.setText(_translate("SnapToGridWidget", "Snap Extent:", None))
self.restore_geom_gbox.setTitle(_translate("SnapToGridWidget", "Restore Geometries (from Backup)", None))
self.restore_button.setText(_translate("SnapToGridWidget", "Restore", None))
self.cancel_restore_button.setText(_translate("SnapToGridWidget", "Cancel", None))
self.label.setText(_translate("SnapToGridWidget", "<html><head/><body><p><span style=\" color:#666666;\">For layers selected above.</span></p></body></html>", None))
self.snap_button.setText(_translate("SnapToGridWidget", "Snap", None))
|
allspatial/vertex-tools
|
view/Ui_SnapToGridWidget.py
|
Python
|
mit
| 10,208
|
from scipy import misc
from skimage import color
import numpy
import sompy
import pickle
for mapsize in [[3,3],[5,5],[10,10],[20,20]]:
somFile = open ('pkls/trainedSOM' + str(mapsize[0]) +'x'+str(mapsize[1])+'.pkl','rb')
som = pickle.load (somFile)
somFile.close()
codebook = som._normalizer.denormalize_by(som.data_raw, som.codebook.matrix)
L = numpy.zeros(mapsize[0]*mapsize[1])
L = L.reshape(-1,1)
img = numpy.concatenate((L, codebook),1)
img = img.reshape(mapsize[0],mapsize[1],3)
img[:,:,0]=50
misc.imsave('SOM' + str(mapsize[0]) +'x'+str(mapsize[1])+'_L50.png',color.luv2rgb(img))
|
mrichart/NNcoloring
|
src/printSom.py
|
Python
|
mit
| 633
|
# Re-aligner small RNA sequence from SAM/BAM file (miRBase annotation)
from __future__ import print_function
import os.path as op
import re
import shutil
import pandas as pd
import pysam
import argparse
from seqcluster.libs import do
from seqcluster.libs.utils import file_exists
import seqcluster.libs.logger as mylog
from seqcluster.install import _get_miraligner
from seqcluster.seqbuster.snps import create_vcf
from seqcluster.collapse import collapse_fastq
from seqcluster.seqbuster.realign import *
from mirtop.gff import reader
logger = mylog.getLogger(__name__)
def _download_mirbase(args, version="CURRENT"):
"""
Download files from mirbase
"""
if not args.hairpin or not args.mirna:
logger.info("Working with version %s" % version)
hairpin_fn = op.join(op.abspath(args.out), "hairpin.fa.gz")
mirna_fn = op.join(op.abspath(args.out), "miRNA.str.gz")
if not file_exists(hairpin_fn):
cmd_h = "wget ftp://mirbase.org/pub/mirbase/%s/hairpin.fa.gz -O %s && gunzip -f !$" % (version, hairpin_fn)
do.run(cmd_h, "download hairpin")
if not file_exists(mirna_fn):
cmd_m = "wget ftp://mirbase.org/pub/mirbase/%s/miRNA.str.gz -O %s && gunzip -f !$" % (version, mirna_fn)
do.run(cmd_m, "download mirna")
else:
return args.hairpin, args.mirna
def _make_unique(name, idx):
"""Make name unique in case only counts there"""
p = re.compile(".[aA-zZ]+_x[0-9]+")
if p.match(name):
tags = name[1:].split("_x")
return ">%s_%s_x%s" % (tags[0], idx, tags[1])
return name.replace("@", ">")
def _filter_seqs(fn):
"""Convert names of sequences to unique ids"""
out_file = op.splitext(fn)[0] + "_unique.fa"
idx = 0
if not file_exists(out_file):
with open(out_file, 'w') as out_handle:
with open(fn) as in_handle:
line = in_handle.readline()
while line:
if line.startswith("@") or line.startswith(">"):
fixed_name = _make_unique(line.strip(), idx)
seq = in_handle.readline().strip()
counts = _get_freq(fixed_name)
if len(seq) < 26 and (counts > 1 or counts == 0):
idx += 1
print(fixed_name, file=out_handle, end="\n")
print(seq, file=out_handle, end="\n")
if line.startswith("@"):
in_handle.readline()
in_handle.readline()
line = in_handle.readline()
return out_file
def _convert_to_fasta(fn):
out_file = op.splitext(fn)[0] + ".fa"
with open(out_file, 'w') as out_handle:
with open(fn) as in_handle:
line = in_handle.readline()
while line:
if line.startswith("@"):
seq = in_handle.readline()
_ = in_handle.readline()
qual = in_handle.readline()
elif line.startswith(">"):
seq = in_handle.readline()
count = 2
if line.find("_x"):
count = int(line.strip().split("_x")[1])
if count > 1:
print(">%s" % line.strip()[1:], file=out_handle, end="")
print(seq.strip(), file=out_handle, end="")
line = in_handle.readline()
return out_file
def _get_pos(string):
name = string.split(":")[0][1:]
pos = string.split(":")[1][:-1].split("-")
return name, map(int, pos)
def _read_mature(matures, sps):
mature = defaultdict(dict)
with open(matures) as in_handle:
for line in in_handle:
if line.startswith(">") and line.find(sps) > -1:
name = line.strip().replace(">", " ").split()
mir5p = _get_pos(name[2])
mature[name[0]] = {mir5p[0]: mir5p[1]}
if len(name) > 3:
mir3p = _get_pos(name[3])
mature[name[0]].update({mir3p[0]: mir3p[1]})
return mature
def _read_precursor(precursor, sps):
"""
Load precursor file for that species
"""
hairpin = defaultdict(str)
name = None
with open(precursor) as in_handle:
for line in in_handle:
if line.startswith(">"):
if hairpin[name]:
hairpin[name] = hairpin[name] + "NNNNNNNNNNNN"
name = line.strip().replace(">", " ").split()[0]
else:
hairpin[name] += line.strip()
hairpin[name] = hairpin[name] + "NNNNNNNNNNNN"
return hairpin
def _read_gtf(gtf):
"""
Load GTF file with precursor positions on genome
"""
if not gtf:
return gtf
db = defaultdict(list)
with open(gtf) as in_handle:
for line in in_handle:
if line.startswith("#"):
continue
cols = line.strip().split("\t")
name = [n.split("=")[1] for n in cols[-1].split(";") if n.startswith("Name")]
chrom, start, end, strand = cols[0], cols[3], cols[4], cols[6]
if cols[2] == "miRNA_primary_transcript":
db[name[0]].append([chrom, int(start), int(end), strand])
return db
def _coord(sequence, start, mirna, precursor, iso):
"""
Define t5 and t3 isomirs
"""
dif = abs(mirna[0] - start)
if start < mirna[0]:
iso.t5 = sequence[:dif].upper()
elif start > mirna[0]:
iso.t5 = precursor[mirna[0] - 1:mirna[0] - 1 + dif].lower()
elif start == mirna[0]:
iso.t5 = "NA"
if dif > 4:
logger.debug("start > 3 %s %s %s %s %s" % (start, len(sequence), dif, mirna, iso.format()))
return None
end = start + (len(sequence) - len(iso.add)) - 1
dif = abs(mirna[1] - end)
if iso.add:
sequence = sequence[:-len(iso.add)]
# if dif > 3:
# return None
if end > mirna[1]:
iso.t3 = sequence[-dif:].upper()
elif end < mirna[1]:
iso.t3 = precursor[mirna[1] - dif:mirna[1]].lower()
elif end == mirna[1]:
iso.t3 = "NA"
if dif > 4:
logger.debug("end > 3 %s %s %s %s %s" % (len(sequence), end, dif, mirna, iso.format()))
return None
logger.debug("%s %s %s %s %s %s" % (start, len(sequence), end, dif, mirna, iso.format()))
return True
def _annotate(reads, mirbase_ref, precursors):
"""
Using SAM/BAM coordinates, mismatches and realign to annotate isomiRs
"""
for r in reads:
for p in reads[r].precursors:
start = reads[r].precursors[p].start + 1 # convert to 1base
end = start + len(reads[r].sequence)
for mature in mirbase_ref[p]:
mi = mirbase_ref[p][mature]
is_iso = _coord(reads[r].sequence, start, mi, precursors[p], reads[r].precursors[p])
logger.debug(("{r} {p} {start} {is_iso} {mature} {mi} {mature_s}").format(s=reads[r].sequence, mature_s=precursors[p][mi[0]-1:mi[1]], **locals()))
if is_iso:
reads[r].precursors[p].mirna = mature
break
return reads
def _realign(seq, precursor, start):
"""
The actual fn that will realign the sequence
"""
error = set()
pattern_addition = [[1, 1, 0], [1, 0, 1], [0, 1, 0], [0, 1, 1], [0, 0, 1], [1, 1, 1]]
for pos in range(0, len(seq)):
if seq[pos] != precursor[(start + pos)]:
error.add(pos)
subs, add = [], []
for e in error:
if e < len(seq) - 3:
subs.append([e, seq[e], precursor[start + e]])
pattern, error_add = [], []
for e in range(len(seq) - 3, len(seq)):
if e in error:
pattern.append(1)
error_add.append(e)
else:
pattern.append(0)
for p in pattern_addition:
if pattern == p:
add = seq[error_add[0]:]
break
if not add and error_add:
for e in error_add:
subs.append([e, seq[e], precursor[start + e]])
return subs, add
def _clean_hits(reads):
"""
Select only best matches
"""
new_reads = defaultdict(realign)
for r in reads:
world = {}
sc = 0
for p in reads[r].precursors:
world[p] = reads[r].precursors[p].get_score(len(reads[r].sequence))
if sc < world[p]:
sc = world[p]
new_reads[r] = reads[r]
for p in world:
logger.debug("score %s %s %s" % (r, p, world[p]))
if sc != world[p]:
logger.debug("remove %s %s %s" % (r, p, world[p]))
new_reads[r].remove_precursor(p)
return new_reads
def _sort_by_name(bam_fn):
"""
sort bam file by name sequence
"""
def _sam_to_bam(bam_fn):
if bam_fn.endswith("bam"):
bam_out = "%s.bam" % os.path.splitext(bam_fn)[0]
cmd = "samtools view -Sbh {bam_fn} -o {bam_out}"
do.run(cmd)
return bam_out
return bam_fn
def _read_bam(bam_fn, precursors):
"""
read bam file and perform realignment of hits
"""
mode = "r" if bam_fn.endswith("sam") else "rb"
handle = pysam.Samfile(bam_fn, mode)
reads = defaultdict(realign)
for line in handle:
chrom = handle.getrname(line.reference_id)
# print("%s %s %s %s" % (line.query_name, line.reference_start, line.query_sequence, chrom))
query_name = line.query_name
if query_name not in reads:
reads[query_name].sequence = line.query_sequence
iso = isomir()
iso.align = line
iso.start = line.reference_start
iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], line.reference_start)
reads[query_name].set_precursor(chrom, iso)
reads = _clean_hits(reads)
return reads
def _collapse_fastq(in_fn):
"""
collapse reads into unique sequences
"""
args = argparse.Namespace()
args.fastq = in_fn
args.minimum = 1
args.out = op.dirname(in_fn)
return collapse_fastq(args)
def _read_pyMatch(fn, precursors):
"""
read pyMatch file and perform realignment of hits
"""
with open(fn) as handle:
reads = defaultdict(realign)
for line in handle:
query_name, seq, chrom, reference_start, end, mism, add = line.split()
reference_start = int(reference_start)
# chrom = handle.getrname(cols[1])
# print("%s %s %s %s" % (line.query_name, line.reference_start, line.query_sequence, chrom))
if query_name not in reads:
reads[query_name].sequence = seq
iso = isomir()
iso.align = line
iso.start = reference_start
iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], reference_start)
logger.debug("%s %s %s %s %s" % (query_name, reference_start, chrom, iso.subs, iso.add))
if len(iso.subs) > 1:
continue
reads[query_name].set_precursor(chrom, iso)
reads = _clean_hits(reads)
return reads
def _parse_mut(subs):
"""
Parse mutation tag from miraligner output
"""
if subs!="0":
subs = [[subs.replace(subs[-2:], ""),subs[-2], subs[-1]]]
return subs
def _read_miraligner(fn):
"""Read ouput of miraligner and create compatible output."""
reads = defaultdict(realign)
with open(fn) as in_handle:
in_handle.readline()
for line in in_handle:
cols = line.strip().split("\t")
iso = isomir()
query_name, seq = cols[1], cols[0]
chrom, reference_start = cols[-2], cols[3]
iso.mirna = cols[3]
subs, add, iso.t5, iso.t3 = cols[6:10]
if query_name not in reads:
reads[query_name].sequence = seq
iso.align = line
iso.start = reference_start
iso.subs, iso.add = _parse_mut(subs), add
logger.debug("%s %s %s %s %s" % (query_name, reference_start, chrom, iso.subs, iso.add))
reads[query_name].set_precursor(chrom, iso)
return reads
def _cmd_miraligner(fn, out_file, species, hairpin, out):
"""
Run miraligner for miRNA annotation
"""
tool = _get_miraligner()
path_db = op.dirname(op.abspath(hairpin))
cmd = "{tool} -freq -i {fn} -o {out_file} -s {species} -db {path_db} -sub 1 -trim 3 -add 3"
if not file_exists(out_file):
logger.info("Running miraligner with %s" % fn)
do.run(cmd.format(**locals()), "miraligner with %s" % fn)
shutil.move(out_file + ".mirna", out_file)
return out_file
def _mirtop(out_files, hairpin, gff3, species, out):
"""
Convert miraligner to mirtop format
"""
args = argparse.Namespace()
args.hairpin = hairpin
args.sps = species
args.gtf = gff3
args.add_extra = True
args.files = out_files
args.format = "seqbuster"
args.out_format = "gff"
args.out = out
reader(args)
def _get_freq(name):
"""
Check if name read contains counts (_xNumber)
"""
try:
counts = int(name.split("_x")[1])
except:
return 0
return counts
def _tab_output(reads, out_file, sample):
seen = set()
lines = []
lines_pre = []
seen_ann = {}
dt = None
with open(out_file, 'w') as out_handle:
print("name\tseq\tfreq\tchrom\tstart\tend\tsubs\tadd\tt5\tt3\ts5\ts3\tDB\tprecursor\thits", file=out_handle, end="")
for (r, read) in reads.items():
hits = set()
[hits.add(mature.mirna) for mature in read.precursors.values() if mature.mirna]
hits = len(hits)
for (p, iso) in read.precursors.items():
if len(iso.subs) > 3 or not iso.mirna:
continue
if (r, iso.mirna) not in seen:
seen.add((r, iso.mirna))
chrom = iso.mirna
if not chrom:
chrom = p
count = _get_freq(r)
seq = reads[r].sequence
if iso.get_score(len(seq)) < 1:
continue
if iso.subs:
iso.subs = [] if "N" in iso.subs[0] else iso.subs
annotation = "%s:%s" % (chrom, iso.format(":"))
res = ("{seq}\t{r}\t{count}\t{chrom}\tNA\tNA\t{format}\tNA\tNA\tmiRNA\t{p}\t{hits}").format(format=iso.format().replace("NA", "0"), **locals())
if annotation in seen_ann and seq.find("N") < 0 and seen_ann[annotation].split("\t")[0].find("N") < 0:
raise ValueError("Same isomir %s from different sequence: \n%s and \n%s" % (annotation, res, seen_ann[annotation]))
seen_ann[annotation] = res
lines.append([annotation, chrom, count, sample, hits])
lines_pre.append([annotation, chrom, p, count, sample, hits])
print(res, file=out_handle, end="")
if lines:
dt = pd.DataFrame(lines)
dt.columns = ["isomir", "chrom", "counts", "sample", "hits"]
dt = dt[dt['hits']>0]
dt = dt.loc[:, "isomir":"sample"]
dt = dt.groupby(['isomir', 'chrom', 'sample'], as_index=False).sum()
dt.to_csv(out_file + "_summary")
dt_pre = pd.DataFrame(lines_pre)
dt_pre.columns = ["isomir", "mature", "chrom", "counts", "sample", "hits"]
dt_pre = dt_pre[dt_pre['hits']==1]
dt_pre = dt_pre.loc[:, "isomir":"sample"]
dt_pre = dt_pre.groupby(['isomir', 'chrom', 'mature', 'sample'], as_index=False).sum()
return out_file, dt, dt_pre
return None
def _merge(dts):
"""
merge multiple samples in one matrix
"""
df = pd.concat(dts)
ma = df.pivot(index='isomir', columns='sample', values='counts')
ma_mirna = ma
ma = ma.fillna(0)
ma_mirna['mirna'] = [m.split(":")[0] for m in ma.index.values]
ma_mirna = ma_mirna.groupby(['mirna']).sum()
ma_mirna = ma_mirna.fillna(0)
return ma, ma_mirna
def _create_counts(out_dts, out_dir):
"""Summarize results into single files."""
ma, ma_mirna = _merge(out_dts)
out_ma = op.join(out_dir, "counts.tsv")
out_ma_mirna = op.join(out_dir, "counts_mirna.tsv")
ma.to_csv(out_ma, sep="\t")
ma_mirna.to_csv(out_ma_mirna, sep="\t")
return out_ma_mirna, out_ma
def miraligner(args):
"""
Realign BAM hits to miRBAse to get better accuracy and annotation
"""
hairpin, mirna = _download_mirbase(args)
precursors = _read_precursor(args.hairpin, args.sps)
matures = _read_mature(args.mirna, args.sps)
gtf = _read_gtf(args.gtf)
out_dts = []
out_files = []
for bam_fn in args.files:
sample = op.splitext(op.basename(bam_fn))[0]
logger.info("Reading %s" % bam_fn)
if bam_fn.endswith("bam") or bam_fn.endswith("sam"):
bam_fn = _sam_to_bam(bam_fn)
bam_sort_by_n = op.splitext(bam_fn)[0] + "_sort"
pysam.sort("-n", bam_fn, bam_sort_by_n)
reads = _read_bam(bam_sort_by_n + ".bam", precursors)
elif bam_fn.endswith("fasta") or bam_fn.endswith("fa") or \
bam_fn.endswith("fastq"):
if args.collapse:
bam_fn = _collapse_fastq(bam_fn)
out_file = op.join(args.out, sample + ".premirna")
bam_fn = _filter_seqs(bam_fn)
if args.miraligner:
_cmd_miraligner(bam_fn, out_file, args.sps, args.hairpin, args.out)
reads = _read_miraligner(out_file)
out_files.append(out_file)
else:
raise ValueError("Format not recognized.")
if args.miraligner:
_mirtop(out_files, args.hairpin, args.gtf, args.sps, args.out)
if not args.miraligner:
reads = _annotate(reads, matures, precursors)
out_file = op.join(args.out, sample + ".mirna")
out_file, dt, dt_pre = _tab_output(reads, out_file, sample)
try:
vcf_file = op.join(args.out, sample + ".vcf")
if not file_exists(vcf_file):
# if True:
create_vcf(dt_pre, matures, gtf, vcf_file)
try:
import vcf
vcf.Reader(filename=vcf_file)
except Exception as e:
logger.warning(e.__doc__)
logger.warning(e)
except Exception as e:
# traceback.print_exc()
logger.warning(e.__doc__)
logger.warning(e)
if isinstance(dt, pd.DataFrame):
out_dts.append(dt)
if out_dts:
_create_counts(out_dts, args.out)
else:
print("No files analyzed!")
|
lpantano/seqcluster
|
seqcluster/seqbuster/__init__.py
|
Python
|
mit
| 18,893
|
from flask import Blueprint, render_template, request, abort, flash, redirect, url_for
from flask_babel import lazy_gettext
from c3bottles import db
from c3bottles.model.drop_point import DropPoint
from c3bottles.model.report import Report
from c3bottles.model.visit import Visit
from c3bottles.views import needs_reporting, needs_visiting
bp = Blueprint("action", __name__)
@bp.route("/report", methods=("GET", "POST"))
@bp.route("/<int:number>")
@needs_reporting
def report(number=None):
dp = DropPoint.query.get_or_404(request.values.get("number", number))
if dp.removed:
abort(404)
state = request.values.get("state")
if state:
try:
Report(dp=dp, state=state)
except ValueError as e:
return render_template(
"error.html",
text=lazy_gettext("Errors occurred while processing your report:"),
errors=[v for d in e.args for v in d.values()]
)
else:
db.session.commit()
return render_template(
"success.html",
heading=lazy_gettext("Thank you!"),
text=lazy_gettext("Your report has been received successfully."),
)
else:
return render_template(
"action/report.html",
dp=dp
)
@bp.route("/visit", methods=("GET", "POST"))
@bp.route("/visit/<int:number>")
@needs_visiting
def visit(number=None):
dp = DropPoint.query.get_or_404(request.values.get("number", number))
if dp.removed:
abort(404)
action = request.values.get("maintenance")
if action:
try:
Visit(dp=dp, action=action)
except ValueError as e:
return render_template(
"error.html",
text=lazy_gettext("Errors occurred while processing your visit:"),
errors=[v for d in e.args for v in d.values()]
)
else:
db.session.commit()
flash({
"class": "success disappear",
"text": lazy_gettext("Your visit has been processed successfully."),
})
return redirect("{}#{}/{}/{}/3".format(url_for("view.map_"), dp.level, dp.lat, dp.lng))
else:
return render_template(
"action/visit.html",
dp=dp,
)
|
der-michik/c3bottles
|
c3bottles/views/action.py
|
Python
|
mit
| 2,365
|
import os
from os.path import abspath, dirname
from sys import path
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.settings")
application = DjangoWhiteNoise(get_wsgi_application())
|
krunal10/aves
|
aves/wsgi.py
|
Python
|
mit
| 359
|
from datetime import datetime
from decimal import Decimal
from time import mktime
from flask import request, current_app
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, class_mapper
from sqlalchemy.orm.collections import InstrumentedList
from sqlalchemy.ext.declarative import declarative_base
class _BaseModel(object):
def __init__(self, **kwargs):
self.update(**kwargs)
super(_BaseModel, self).__init__()
def update(self, **kwargs):
whitelist = self.column_whitelist()
for key in kwargs.keys():
if key not in whitelist:
raise TypeError(
'{0} is an invalid keyword argument for {1}. '
'Valid choices are {2}'
.format(key, self.__class__.__name__, whitelist))
for key, val in kwargs.items():
setattr(self, key, val)
@classmethod
def column_whitelist(cls):
return [c.key for c in class_mapper(cls).columns]
def serialize(self, detail=False):
return {c.key: getattr(self, c.key)
for c in class_mapper(self.__class__).columns}
def for_json(self, detail=False):
data = self.serialize(detail=detail)
try:
for key, val in data.items():
if isinstance(val, datetime):
data[key] = mktime(val.timetuple()) * 1000
elif isinstance(val, InstrumentedList):
data[key] = list(val)
elif isinstance(val, Decimal):
data[key] = float(val)
except AttributeError:
# This means that data isn't a dict, which is ok.
pass
return data
Model = declarative_base(name='Model', cls=_BaseModel)
def get_session():
"""Get a database session for the current request."""
if not hasattr(current_app, 'Session'):
engine = create_engine(current_app.config.get('DATABASE_URL'),
convert_unicode=True)
current_app.Session = sessionmaker(bind=engine)
if not hasattr(request, 'db_session'):
request.db_session = current_app.Session()
return request.db_session
|
mythmon/piper
|
piper/database.py
|
Python
|
mit
| 2,210
|
# -*- coding: utf-8 -*-
# Scrapy settings for cookpad project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'cookpad'
SPIDER_MODULES = ['cookpad.spiders']
NEWSPIDER_MODULE = 'cookpad.spiders'
LOG_LEVEL = 'ERROR'
LOG_FILE = 'cookpad.log'
IS_MSSQLDB = False
ITEM_PIPELINES = {'cookpad.pipelines.MongoDBPipeline':300}
MONGODB_SERVER = "ds127321.mlab.com"
MONGODB_PORT = 27321
MONGODB_DB = "heroku_v65c6f57"
MONGODB_COLLECTION_RECIPES = "recipes"
MONGODB_COLLECTION_RECIPES_SPIDER = 'recipes_spider'
MONGODB_USER = "admin"
MONGODB_PASSWORD = "dodido_2008"
MSSQL_SERVER = "."
MSSQL_DB = "meals"
MSSQL_USER = "meals"
MSSQL_PASSWORD = "dodido_2008"
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'cookpad (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
DOWNLOADER_MIDDLEWARES = {
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 300}
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 100
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 0
# The download delay setting will honor only one of:
CONCURRENT_REQUESTS_PER_DOMAIN = 100
CONCURRENT_REQUESTS_PER_IP = 100
# Disable cookies (enabled by default)
COOKIES_ENABLED = True
COOKIES_DEBUG = False
# Disable Telnet Console (enabled by default)
TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'Referer': 'https://cookpad.com/eg/%D9%88%D8%B5%D9%81%D8%A7%D8%AA',
'Turbolinks-Referrer': 'https://cookpad.com/eg/%D9%88%D8%B5%D9%81%D8%A7%D8%AA',
'X-Turbolinks-Request': 'true',
'Accept': 'text/html, application/xhtml+xml',
'Accept-Encoding': 'gzip, deflate, sdch, br',
'Accept-Language': 'en-US,en;q=0.8,ar;q=0.6'
}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'cookpad.middlewares.CookpadSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'cookpad.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'cookpad.pipelines.CookpadPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = False
# The initial download delay
AUTOTHROTTLE_START_DELAY = 0
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 3
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 16.0
# Enable showing throttling stats for every response received:
AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
HTTPCACHE_ENABLED = False
HTTPCACHE_EXPIRATION_SECS = 0
HTTPCACHE_DIR = './httpcache'
HTTPCACHE_IGNORE_HTTP_CODES = [502]
HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
HTTPCACHE_GZIP = True
|
tahazayed/cookpadscraper
|
cookpad/settings.py
|
Python
|
mit
| 4,097
|
from scipy import misc
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import randint
import math
import cv2
from keras.layers import Input, Dense, Dropout, Flatten, Activation, BatchNormalization, Reshape
from keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Lambda
from keras.layers.noise import GaussianNoise
from keras.models import Model, Sequential, Input
from keras.optimizers import Adam , SGD
from keras.activations import elu ,selu
import keras
subtract_layer = Lambda(lambda inputs: inputs[0] - inputs[1],
output_shape=lambda shapes: shapes[0])
'''
This model uses an encoder decoder model with a cheat connection between the encoder and decoder, so it can erase and readd features
'''
s=96 #s must be a multiple of 8
savenum=0
def createpair(size): # generate toy data
data = np.zeros((size,size))
circcenter=(randint(0,size),randint(0,size))
r=randint(size//6,size//3)
squarecenter=(randint(0,size),randint(0,size))
squaresize=randint(size//8,size//4)
squareoffset=(squaresize,squaresize)
triverts=[(randint(0,size),randint(0,size)),(randint(0,size),randint(0,size)),(randint(0,size),randint(0,size))]
cv2.circle(data,circcenter,r,(255),thickness=-1)
cv2.rectangle(data,tuple(np.subtract(squarecenter,squareoffset)),tuple(np.add(squarecenter,squareoffset)),(255),thickness=-1)
notri=np.copy(data)
cv2.fillConvexPoly(data,np.array(triverts),(255))
data=np.expand_dims(data, axis=2)
notri=np.expand_dims(notri, axis=2)
return data,notri
def createpairs(size,num):
withtri=np.zeros((num,size,size,1),dtype=np.float32)
notri=np.zeros((num,size,size,1),dtype=np.float32)
for i in range(num):
c=createpair(size)
withtri[i]=c[0]/255
notri[i]=c[1]/255
return withtri,notri
encoderin=Input(shape=(s,s,1))
cheatencoderin=Input(shape=(s,s,1))
encoder=keras.layers.concatenate([encoderin,cheatencoderin])
encoder=BatchNormalization()(encoder)
encoder=Conv2D(32,(3,3),padding='same')(encoder)
for i in range(1,4): #for loops make it easer to change network structure, and are easier to read
encoder=MaxPooling2D()(encoder)
encoder=Conv2D(32*(2**i),(3,3),padding='same')(encoder)
encoder=BatchNormalization()(encoder)
encoder=Activation('elu')(encoder)
encoder=Conv2D(32*(2**i),(3,3),padding='same')(encoder)
encoder=BatchNormalization()(encoder)
encoder=Activation('elu')(encoder)
encodermodel=Model(inputs=[encoderin,cheatencoderin],outputs=encoder)
cheatin=Input(shape=(s//8,s//8,256))
cheat=Flatten()(cheatin)
cheat=Dense(100)(cheat)
cheat=BatchNormalization()(cheat)
cheat=Activation('elu')(cheat)
cheat=Dense(s*s)(cheat)
cheat=BatchNormalization()(cheat)
cheat=Activation('elu')(cheat)
cheat=Reshape((s,s,1))(cheat)
cheatskip=Model(inputs=cheatin,outputs=cheat)
zeros=Input(shape=(s,s,1))
maindecoderin=Input(shape=(s//8,s//8,256))
decoder=maindecoderin
for i in range(3,0,-1):
decoder=Conv2D(32*(2**i),(3,3),padding='same')(decoder)
decoder=BatchNormalization()(decoder)
decoder=Activation('elu')(decoder)
decoder=Conv2D(32*(2**i),(3,3),padding='same')(decoder)
decoder=BatchNormalization()(decoder)
decoder=Activation('elu')(decoder)
decoder=UpSampling2D()(decoder)
decoder=Conv2D(32,(3,3),padding='same')(decoder)
decoder=BatchNormalization()(decoder)
decoder=Activation('elu')(decoder)
decoder=Conv2D(1,(3,3),padding='same',activation='sigmoid')(decoder)
decodermodel=Model(inputs=[maindecoderin],outputs=[decoder])
eraserin=Input(shape=(s,s,1))
enc=encodermodel([eraserin,zeros])
skipped=cheatskip(enc)
notriangle=decodermodel(enc)
reenc=encodermodel([notriangle,skipped])
reconstructed=decodermodel(reenc)
eraser=Model(inputs=[eraserin,zeros],outputs=[notriangle,reconstructed]) #binary_crossentropy
eraser.compile(loss='binary_crossentropy', optimizer='adam')
discriminatorinput=Input(shape=[s,s,1]) # build the discrimiator
d=discriminatorinput
d=Conv2D(32,(3,3))(d)
#d=BatchNormalization()(d)
d=Activation('elu')(d)
for i in range(1,4):
d=MaxPooling2D()(d)
d=Conv2D(32*(2**i),(3,3))(d)
#d=BatchNormalization()(d)
d=Activation('elu')(d)
d=Conv2D(32*(2**i),(3,3))(d)
#d=BatchNormalization()(d)
d=Activation('elu')(d)
d=Flatten()(d)
for i in range(2):
d=Dense(512)(d)
#d=BatchNormalization()(d)
d=Activation('elu')(d)
d=Dense(1,activation='sigmoid')(d)
discriminator=Model(inputs=discriminatorinput,outputs=d)
discriminator.compile(loss='binary_crossentropy', optimizer='adam')
ganinput=Input(shape=[s,s,1]) # build the gan
img=eraser([ganinput,zeros])
ganout=discriminator(img[0])
gan=Model(inputs=[ganinput,zeros],outputs=[ganout,img[1]])
gan.compile(loss='binary_crossentropy', optimizer='adam')
def show(n): # show images
b=createpairs(s,n)
targets=np.squeeze(b[1])
inputs=np.squeeze(b[0])
out=eraser.predict([b[0],z])
outa=np.squeeze(out[0])
outb=np.squeeze(out[1])
for i in range(n):
plt.subplot(4,4,i+1)
plt.imshow(inputs[i])
plt.subplot(4,4,i+1+n)
plt.imshow(outa[i])
plt.subplot(4,4,i+1+n*2)
plt.imshow(outb[i])
#plt.show()
global savenum
plt.savefig(str(savenum)+".png")
savenum+=1
z=np.zeros(shape=(16,s,s,1))
for count in range(10000):
c=createpairs(s,16)
loss=eraser.train_on_batch([c[0],z],[c[1],c[0]])
print(loss)
if(count%100==0): # show every 50 iterations
print("")
show(4)
for count in range(10000):
real=np.array(createpairs(s,8)[1])
fake=np.array(createpairs(s,8)[0])
images=np.concatenate([real,fake])
labels=np.concatenate([np.ones(8),np.zeros(8)])
dloss=discriminator.train_on_batch(images,labels) # train the discriminator
ganlabels=np.ones(16)
fake=createpairs(s,16)[0]
ganloss=gan.train_on_batch(fake,ganlabels) # train the generator
print("discriminator loss",dloss,"generator loss",ganloss)
if(count%100==0): # show every 50 iterations
print("")
show(4)
|
Elch123/geometricerasure
|
multimodel.py
|
Python
|
mit
| 5,753
|
# -*- coding: utf-8 -*-
"""
.. module:: models
"""
import logging
import os
# pylint: disable=unused-import
import uuid
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db.models import F
from django.utils import timezone
logger = logging.getLogger('volontulo.models')
class Organization(models.Model):
"""Model that handles ogranizations/institutions."""
name = models.CharField(max_length=150)
address = models.CharField(max_length=150)
description = models.TextField()
def __str__(self):
"""Organization model string reprezentation."""
return self.name
class OffersManager(models.Manager):
"""Offers Manager."""
def get_active(self):
"""Return active offers."""
return self.filter(
offer_status='published',
action_status__in=('ongoing', 'future'),
recruitment_status__in=('open', 'supplemental'),
).all()
def get_for_administrator(self):
"""Return all offers for administrator to allow management."""
return self.filter(offer_status='unpublished').all()
def get_weightened(self):
"""Return all published offers ordered by weight."""
return self.filter(offer_status='published').order_by('weight')
def get_archived(self):
"""Return archived offers."""
return self.filter(
offer_status='published',
action_status__in=('ongoing', 'finished'),
recruitment_status='closed',
).all()
class Offer(models.Model):
"""Offer model."""
OFFER_STATUSES = (
('unpublished', 'Unpublished'),
('published', 'Published'),
('rejected', 'Rejected'),
)
RECRUITMENT_STATUSES = (
('open', 'Open'),
('supplemental', 'Supplemental'),
('closed', 'Closed'),
)
ACTION_STATUSES = (
('future', 'Future'),
('ongoing', 'Ongoing'),
('finished', 'Finished'),
)
objects = OffersManager()
organization = models.ForeignKey(Organization)
volunteers = models.ManyToManyField(User)
description = models.TextField()
requirements = models.TextField(blank=True, default='')
time_commitment = models.TextField()
benefits = models.TextField()
location = models.CharField(max_length=150)
title = models.CharField(max_length=150)
started_at = models.DateTimeField(blank=True, null=True)
finished_at = models.DateTimeField(blank=True, null=True)
time_period = models.CharField(max_length=150, default='', blank=True)
status_old = models.CharField(
max_length=30,
default='NEW',
null=True,
unique=False
)
offer_status = models.CharField(
max_length=16,
choices=OFFER_STATUSES,
default='unpublished',
)
recruitment_status = models.CharField(
max_length=16,
choices=RECRUITMENT_STATUSES,
default='open',
)
action_status = models.CharField(
max_length=16,
choices=ACTION_STATUSES,
default='ongoing',
)
votes = models.BooleanField(default=0)
recruitment_start_date = models.DateTimeField(blank=True, null=True)
recruitment_end_date = models.DateTimeField(blank=True, null=True)
reserve_recruitment = models.BooleanField(blank=True, default=True)
reserve_recruitment_start_date = models.DateTimeField(
blank=True,
null=True
)
reserve_recruitment_end_date = models.DateTimeField(
blank=True,
null=True
)
action_ongoing = models.BooleanField(default=False, blank=True)
constant_coop = models.BooleanField(default=False, blank=True)
action_start_date = models.DateTimeField(blank=True, null=True)
action_end_date = models.DateTimeField(blank=True, null=True)
volunteers_limit = models.IntegerField(default=0, null=True, blank=True)
weight = models.IntegerField(default=0, null=True, blank=True)
def __str__(self):
"""Offer string representation."""
return self.title
def set_main_image(self, is_main):
"""Set main image flag unsetting other offers images.
:param is_main: Boolean flag resetting offer main image
"""
if is_main:
OfferImage.objects.filter(offer=self).update(is_main=False)
return True
return False
def save_offer_image(self, gallery, is_main=False):
"""Handle image upload for user profile page.
:param gallery: UserProfile model instance
:param userprofile: UserProfile model instance
:param is_main: Boolean main image flag
"""
gallery.offer = self
gallery.is_main = self.set_main_image(is_main)
gallery.save()
return self
def create_new(self):
"""Set status while creating new offer."""
self.offer_status = 'unpublished'
self.recruitment_status = 'open'
if self.started_at or self.finished_at:
self.action_status = self.determine_action_status()
def determine_action_status(self):
"""Determine action status by offer dates."""
if (
(
self.finished_at and
self.started_at < timezone.now() < self.finished_at
) or
(
self.started_at < timezone.now() and
not self.finished_at
)
):
return 'ongoing'
elif self.started_at > timezone.now():
return 'future'
else:
return 'finished'
def change_status(self, status):
"""Change offer status.
:param status: string Offer status
"""
if status in ('published', 'rejected', 'unpublished'):
self.offer_status = status
self.save()
return self
def unpublish(self):
"""Unpublish offer."""
self.offer_status = 'unpublished'
self.save()
return self
def publish(self):
"""Publish offer."""
self.offer_status = 'published'
Offer.objects.all().update(weight=F('weight') + 1)
self.weight = 0
self.save()
return self
def reject(self):
"""Reject offer."""
self.offer_status = 'rejected'
self.save()
return self
def close_offer(self):
"""Change offer status to close."""
self.offer_status = 'unpublished'
self.action_status = 'finished'
self.recruitment_status = 'closed'
self.save()
return self
class UserProfile(models.Model):
"""Model that handles users profiles."""
user = models.OneToOneField(User)
organizations = models.ManyToManyField(
Organization,
related_name='userprofiles',
)
is_administrator = models.BooleanField(default=False, blank=True)
phone_no = models.CharField(
max_length=32,
blank=True,
default='',
null=True
)
uuid = models.UUIDField(default=uuid.uuid4, unique=True)
def is_admin(self):
"""Return True if current user is administrator, else return False"""
return self.is_administrator
def is_volunteer(self):
"""Return True if current user is volunteer, else return False"""
return not (self.is_administrator and self.organizations)
def can_edit_offer(self, offer=None, offer_id=None):
"""Checks if the user can edit an offer based on its ID"""
if offer is None:
offer = Offer.objects.get(id=offer_id)
return self.is_administrator or self.organizations.filter(
id=offer.organization_id).exists()
def get_avatar(self):
"""Return avatar for current user."""
return UserGallery.objects.filter(
userprofile=self,
is_avatar=True
)
def clean_images(self):
"""Clean user images."""
images = UserGallery.objects.filter(userprofile=self)
for image in images:
try:
os.remove(os.path.join(settings.MEDIA_ROOT, str(image.image)))
except OSError as ex:
logger.error(ex)
image.delete()
def __str__(self):
return self.user.email
class UserGallery(models.Model):
"""Handling user images."""
userprofile = models.ForeignKey(UserProfile, related_name='images')
image = models.ImageField(upload_to='profile/')
is_avatar = models.BooleanField(default=False)
def __str__(self):
"""String representation of an image."""
return str(self.image)
class OfferImage(models.Model):
"""Handling offer image."""
offer = models.ForeignKey(Offer, related_name='images')
path = models.ImageField(upload_to='offers/')
is_main = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
"""String representation of an image."""
return str(self.path)
class OrganizationGallery(models.Model):
"""Handling organizations gallery."""
organization = models.ForeignKey(Organization, related_name='images')
published_by = models.ForeignKey(UserProfile, related_name='gallery')
path = models.ImageField(upload_to='gallery/')
is_main = models.BooleanField(default=False, blank=True)
def __str__(self):
"""String representation of an image."""
return str(self.path)
def remove(self):
"""Remove image."""
self.remove()
def set_as_main(self, organization):
"""Save image as main.
:param organization: Organization model instance
"""
OrganizationGallery.objects.filter(
organization_id=organization.id
).update(is_main=False)
self.is_main = True
self.save()
@staticmethod
def get_organizations_galleries(userprofile):
"""Get images grouped by organizations
:param userprofile: UserProfile model instance
"""
organizations = Organization.objects.filter(
userprofiles=userprofile
).all()
return {o.name: o.images.all() for o in organizations}
class Page(models.Model):
"""Static page model."""
title = models.CharField(max_length=255)
content = models.TextField()
author = models.ForeignKey(UserProfile)
published = models.BooleanField(default=False)
modified_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
|
mlipa/volontulo
|
backend/apps/volontulo/models.py
|
Python
|
mit
| 10,634
|
#Coded by SpzY
#Version 1.1.1
import random
RollList = []
autoadd = [0]
def d20():
return random.randint(1,20)
def d12():
return random.randint(1,12)
def d10():
return random.randint(1,10)
def d8():
return random.randint(1,8)
def d6():
return random.randint(1,6)
def d4():
return random.randint(1,4)
def randomcheck():
x = random.randint(1,100)
if x > 50:
return "Male"
else:
return "Female"
def RollListF():
'''Shows the list'''
empty = "No Saved Rolls. Is toglist enabled?"
if len(RollList) == 0:
return empty
else:
tempstr = "Saved Rolls: "
for i in RollList:
tempstr += str(i)
if i == RollList[-1]:
return tempstr
else:
tempstr += ", "
def RollSum():
'''Sums all the rolls in the list'''
thesum = 0
tempstr = ""
empty = "No Saved Rolls to add. Is toglist enabled?"
if len(RollList) == 0:
return empty
else:
for i in RollList:
thesum += i
for i in RollList:
tempstr += str(i)
if i == RollList[-1]:
tempstr += " = "
tempstr += str(thesum)
return tempstr
else:
tempstr += " + "
def Clear():
'''Clears the list'''
print("The list has been cleared of rolls.")
del RollList[:]
def mAdd():
'''Manually adds numbers to the saved roll list (If toglist isn't enabled and you need a calculator)'''
loop = 0
tempstr = ""
while loop == 0:
z = input("Which number would you like to add to the saved roll list manually?: ")
RollList.append(z)
print(z,"has been added to the Roll List for later addition.")
x = input("Are there any other numbers to be added to the saved roll list? y/n: ")
if x == "y" or x == "yes":
mAdd()
if x == "n" or x == "no":
loop += 1
print("Exiting manual addition.")
else:
print("Didn't understand entry, exiting manual addition.")
loop += 1
Roll()
def Roll():
y = 1
x = str(input())
while y == 1:
#Dice Rolling
if x == "20" or x == "D20" or x == "d20":
z = d20()
print("D20 Roll: ",z)
if autoadd[0] == 0:
RollList.append(z)
return Roll()
if x == "12" or x == "D12" or x == "d12":
z = d12()
print("D12 Roll: ",z)
if autoadd[0] == 0:
RollList.append(z)
return Roll()
if x == "10" or x == "D10" or x == "d10":
z = d10()
print("D10 Roll: ",z)
if autoadd[0] == 0:
RollList.append(z)
return Roll()
if x == "8" or x == "D8" or x == "d8":
z = d8()
print("D8 Roll: ",z)
if autoadd[0] == 0:
RollList.append(z)
return Roll()
if x == "6" or x == "D6" or x == "d6":
z = d6()
print("D6 Roll: ",z)
if autoadd[0] == 0:
RollList.append(z)
return Roll()
if x == "4" or x == "D4" or x == "d4":
z = d4()
print("D4 Roll: ",z)
if autoadd[0] == 0:
RollList.append(z)
return Roll()
#Exiting and other functions for the numbers
if x == "help":
print("----------------------------")
print("Supported Dice Rolls: D4, D6, D8, D10, D12, D20")
print("To roll a die, simply type the number you want or the number with a 'D' or 'd'")
print("CMDS: stop, list, sum, clear")
print("----------------------------")
print("*stop: stops the Roll() program allowing for manual input (Mainly for testing purposes).")
print("*list [shortcut(s): 'r', 'l']: displays all the rolls currently stored in the list, these numbers are used for 'sum' command.")
print("*sum [shortcut(s): 's']: adds all the rolls stored in list.")
print("*clear [shortcut(s): 'c']: clears all the rolls stored in the list, should be used in between long rolls.")
print("*example: shows an example for the usage of all these commands.")
print("*toglist [shortcut(s): 'tl', 't']: toggles rolls from being added to the list")
print("*checktog [shortcut(s): 'ct']: checks if list appending is toggled.")
print("*madd [shortcut(s): 'ma','a']: manually adds numbers to the saved roll list(Usually if you forgot to toglist and need to add rolls).")
print("----------------------------")
return Roll()
if x == "example":
print("Here is an example of CMDS:")
print("----------------------------")
print("Say I'm rolling 3D4 for damage so I type '4' and hit ENTER 3 times in a row.")
print("The rolls give me 2,3, and 1. Which appends the list of numbers to [2,3,1]")
print("Typing 'list' (or 'l' or 'r') shows me the list of numbers.")
print("Typing 'sum' (or 's') shows me '6' which is what 2+3+1 equals.")
print("Typing 'clear' (or 'c') will clear the list of all the rolls.")
print("After typing 'clear' if I type 's' I'll get 0, if I type 'list' I'll get '[]' which means the list is empty.")
print("After typing 'clear' you can re-roll more dice to be added. Make sure to 'clear' before rolling a series of dice that you want to add.")
print("To disable automatically adding rolls to the list, use 'disable'")
print("----------------------------")
return Roll()
if x == "roll":
print("Did you mean to roll a die? Try typing the number of the die you want to roll. Ex: type '20' to roll a D20 die.")
Roll()
if x == "madd" or x == "ma" or x == "a":
return mAdd()
return Roll()
if x == "ct" or x == "checktog":
if autoadd[0] == 0:
print("List creation is currently turned on. Use 'toglist' to disable.")
Roll()
if autoadd[0] == 1:
print("List creation is currently turned off. Use 'toglist' to re-enable.")
Roll()
if x == "toglist" or x == "tl" or x == "t":
if autoadd[0] == 0:
autoadd[0] += 1
print("List creation stopped")
return Roll()
if autoadd[0] == 1:
autoadd[0] -= 1
print("List creation restarted")
return Roll()
if x == "stop" or x == "Exit" or x == "exit":
return "Exiting Loop, use Roll() to reenable."
y += 1
if x == "r" or x == "l" or x == "list":
print("This may be behind if you've rolled quickly, if a number is missing try entering 'list' again")
if autoadd[0] == 1:
print("List Creation is stopped, use toglist to re-enable.")
if len(RollList) > 0:
print("The numbers still in the list are: ", RollListF())
else:
return Roll()
else:
print(RollListF())
return Roll()
if x == "s" or x == "sum":
if autoadd[0] == 1:
print("List Creation is stopped, use toglist to re-enable")
if len(RollList) > 0:
print("The numbers remaining in the list sum to: ", RollSum())
else:
return Roll()
else:
print(RollSum())
return Roll()
if x == "c" or x == "clear":
Clear()
return Roll()
else:
return Roll()
print("You are now rolling, typing the number of the die you wish to roll. Ex: '20' rolls a D20.")
print("Type 'help' to see all the commands.")
Roll()
|
Sp4zzy/DDRoller
|
DDRollerV1.1.1.py
|
Python
|
mit
| 8,041
|
import pyglet
import sys
import math
import random
import settings
from itertools import chain
import entity
import sounds
import music
import cart
import ruby
import scene
import track
import level
import obstacle
from collections import namedtuple
from utils import Point, Vec2d, Rect
glevels = []
class GameLevel(object):
name = "default"
music = 'strike-force'
VIEWPORT_MAX_RATE = settings.VIEWPORT_MAX_RATE
def __init__(self, game):
super(GameLevel, self).__init__()
glevels.append(self)
if self.music:
music.play(self.music)
# in game coords. viewport is your window into game world
self.game = game
pyglet.gl.glClearColor(*settings.BACKGROUND_COLOUR)
self.width = self.game.window.width
self.height = self.game.window.height
self.viewport = scene.ViewportManager(Rect(0.0, 0.0, self.width, self.height))
self.main_batch = pyglet.graphics.Batch()
self.score_ruby = RubyScore(batch=self.main_batch)
self.score_ruby.x = settings.SCORE_LABEL_X + self.score_ruby.width / 2
self.score_ruby.y = settings.SCORE_LABEL_Y + self.score_ruby.height / 2
self.score_label = pyglet.text.Label(text = "",
x = settings.SCORE_LABEL_X + self.score_ruby.width + 10,
y = settings.SCORE_LABEL_Y + 5,
color=settings.MENU_COLOR_OPTION,
batch = self.main_batch)
self.score_cart = pyglet.sprite.Sprite(
img=pyglet.resource.image(settings.CART_IMAGE), batch=self.main_batch)
self.score_cart.scale = 0.25
self.score_cart.x = settings.LIVES_LABEL_X + 0
self.score_cart.y = settings.LIVES_LABEL_Y + 0
self.lives_label = pyglet.text.Label(text = "",
x = settings.LIVES_LABEL_X + self.score_cart.width + 10,
y = settings.LIVES_LABEL_Y + 5,
color=settings.MENU_COLOR_OPTION,
batch = self.main_batch)
self.quit_label = pyglet.text.Label(text = "By NSTeamStrong: q [quit] space [jump]",
x = settings.QUIT_LABEL_X,
color=settings.MENU_COLOR_OPTION,
y = settings.QUIT_LABEL_Y,
batch = self.main_batch)
self.cart = None
self.entities = []
self.catchup = True
self.setup(self.name)
# next 3 are needed to play nicely with scene manager
def start(self):
pyglet.clock.schedule(self.update) # main loop
def stop(self):
pyglet.clock.unschedule(self.update)
pass
def finish(self, skip=None):
label = getattr(self, 'finish_label', 'level_finished')
self.game.scene_finished(label, skip=skip)
def setup(self, levelname):
sounds.load()
self.lives = settings.STARTING_LIVES
self.update_labels()
self.level = level.load(levelname)
self.track = track.Track()
self.track.add_tracks(self.level.tracks)
self.ruby_list = entity.ObjectList({'default': ruby.Ruby})
self.ruby_list.add(self.level.rubies)
self.obstacle_list = entity.ObjectList({
'default': obstacle.Obstacle,
'exit': obstacle.EndLevel})
self.obstacle_list.add(self.level.obstacles)
self.spawn_points = entity.ObjectList({'default': obstacle.Spawn})
self.spawn_points.add(self.level.spawn)
self.entities.extend(self.spawn_points.objects)
self.objects = [self.ruby_list, self.obstacle_list]
self.create_cart()
self.viewport.reset(
Rect(self.cart.gp.x,
self.cart.gp.y - self.height / 2,
self.width, self.height))
# now check the level contents.
self.bg = scene.Background(self.level.layers, self.viewport)
def on_draw(self): #runs every frame
self.game.window.clear()
self.bg.draw()
self.draw_track()
self.update_labels()
self.draw_entities()
self.draw_objects()
self.main_batch.draw()
def draw_entities(self):
# TODO: check if entities are visible before drawing
(vpx, vpy, vpwidth, vpheight) = self.viewport
for entity in self.entities:
entity.position = (entity.gp.x - vpx, entity.gp.y - vpy)
entity.draw()
def draw_track(self):
(vpx, vpy, vpwidth, vpheight) = self.viewport
vertices = []
colors = []
#sleeper magick
for sleeper in self.track.visible_sleepers:
(x, y, r, x1, y1, x2, y2, x3, y3, x4, y4, ctr, ctg, ctb, cbr, cbg, cbb) = sleeper
vertices.extend([x1 - vpx, y1 - vpy, x2 - vpx, y2 - vpy, x3 - vpx, y3 - vpy, x4 - vpx, y4 - vpy])
colors.extend([ctr, ctg, ctb, ctr, ctg, ctb])
colors.extend([cbr, cbg, cbb, cbr, cbg, cbb])
if len(colors) > 0 and len(vertices) > 0:
vlist = pyglet.graphics.vertex_list(len(self.track.visible_sleepers) * 4,
('v2f/stream', vertices),
('c3f/stream', colors))
vlist.draw(pyglet.gl.GL_QUADS)
vlist.delete()
vertices = []
colors = []
for line in self.track.visible_track_segments:
vertices.extend([line.x1 - vpx, line.y1 - vpy + settings.TRACK_WIDTH/2.0, line.x2 - vpx, line.y2 - vpy + settings.TRACK_WIDTH/2.0])
colors.extend(settings.TRACK_COLOR_TOP)
vertices.extend([line.x2 - vpx, line.y2 - vpy - settings.TRACK_WIDTH/2.0, line.x1 - vpx, line.y1 - vpy - settings.TRACK_WIDTH/2.0])
colors.extend(settings.TRACK_COLOR_BOTTOM)
if len(colors) > 0 and len(vertices) > 0:
vlist = pyglet.graphics.vertex_list(len(self.track.visible_track_segments) * 4,
('v2f/stream', vertices),
('c3f/stream', colors))
vlist.draw(pyglet.gl.GL_LINES)
vlist.delete()
def draw_objects(self):
(vpx, vpy, vpwidth, vpheight) = self.viewport
for objects in self.objects:
for ruby in objects.visible:
ruby.position = (ruby.gp.x - vpx, ruby.gp.y - vpy)
ruby.draw()
def on_key_press(self, symbol, modifiers):
# called every time a key is pressed
# quit if q is pressed
if symbol == pyglet.window.key.Q:
sys.exit(0)
elif symbol == pyglet.window.key.SPACE:
self.cart.jump()
elif symbol == pyglet.window.key.R:
self.cart.gp = self.spawn_points[0].gp
elif symbol == pyglet.window.key.I:
raw_input()
elif symbol == pyglet.window.key.O:
music.next()
elif symbol == pyglet.window.key.P:
music.stop()
else:
# ok check for level skipping.
level = skip.get(symbol)
if level:
self.finish(skip=level)
def on_key_release(self, symbol, modifiers):
# called every time a key is released
pass
def update(self, dt):
# main game loop
# dt is time in seconds in between calls
dt = min(dt, 0.1) # prevent level lag bringing us down.
# update cart with track info for current x coord
(track_height, track_angle) = self.track.track_info_at_x(self.cart.gp)
self.cart.update(dt, track_height, track_angle)
# update viewport and visible track/entities
viewpos = Point(self.cart.gp.x + settings.VIEWPORT_LOOKAHEAD, self.cart.gp.y)
(track_height, track_angle) = self.track.track_info_at_x(viewpos)
#TODO: ugly hack, keep viewport level for breaks in track (for breaks in track track_level < 0)
# rate limit movements by settings.VIEWPORT_MAX_RATE px per frame.
self.viewport.update(self.cart.gp.x, track_height)
self.track.update_visible(self.viewport.rect)
self.ruby_list.update_visible(self.viewport.rect)
self.obstacle_list.update_visible(self.viewport.rect)
self.check_collisions()
if self.cart.gp.y < self.viewport.y - settings.DEAD_OFFSET_Y:
self.die()
self.score_ruby.update(dt)
def check_collisions(self):
# rubies.
rubies_to_delete = self.cart.collided_objects(self.ruby_list.visible)
for ruby in rubies_to_delete:
self.game.scores['rubies'] += 1
self.ruby_list.objects.remove(ruby)
self.score_ruby.animate()
if rubies_to_delete:
s = sounds.cart_ruby.play()
s.volume = 0.3
# obstacles.
for obstacle in chain(self.obstacle_list.visible, self.spawn_points):
if obstacle.collides_with(self.cart):
obstacle.collided(self)
def die(self):
if self.lives > 1:
self.lives -= 1
sounds.cart_die.play()
self.update_labels()
self.reset_level()
else:
self.game_over()
def reset_level(self):
self.cart.gp = gp = self.spawn_points[0].gp
self.cart.reset()
self.viewport.reset(Rect(gp.x, gp.y - self.height / 2, self.width, self.height))
def game_over(self):
self.game.scene_finished("defeat")
def update_labels(self):
self.score_label.text = str(self.game.scores['rubies'])
self.lives_label.text = str(self.lives)
def create_cart(self):
self.cart = cart.Cart()
self.cart.gp = self.spawn_points[0].gp
self.cart.batch = self.main_batch
self.entities.append(self.cart)
class Level2(GameLevel): name = "level2"
class Level3(GameLevel): name = "level3"
class Level4(GameLevel): name = "level4"
class Level5(GameLevel): name = "level5"
class Level6(GameLevel):
name = "level6"
VIEWPORT_MAX_RATE = 20
class Level7(GameLevel): name = "level7"
class Level8(GameLevel): name = "level8"
class Level9(GameLevel):
name = "level9"
finish_label = "victory"
glevels.extend([GameLevel, Level2, Level3, Level4, Level5, Level6, Level7,
Level8, Level9])
# for now allow skipping.
skip = {
pyglet.window.key._1: 1
, pyglet.window.key._2: 2
, pyglet.window.key._3: 3
, pyglet.window.key._4: 4
, pyglet.window.key._5: 5
, pyglet.window.key._6: 6
, pyglet.window.key._7: 7
, pyglet.window.key._8: 8
, pyglet.window.key._9: 9
}
class RubyScore(entity.Entity):
IMAGE = settings.RUBY_IMAGE
velocity = 0
acceleration = -100
rate = 1
def init(self):
self.scale = 0.5
def update(self, dt):
self.velocity += dt * self.acceleration
self.scale += self.velocity * dt
if (self.scale < 0.5001) and self.velocity < 0:
self.velocity = 0
# clip scale between 1 and a half.
self.scale = max(min(self.scale, 1), 0.5)
def animate(self):
self.velocity += 7
|
rozifus/TeamStrong13_4
|
por/gamelevel.py
|
Python
|
mit
| 11,368
|
# Generated by Django 2.0 on 2018-02-08 11:49
from django.db import migrations
def forwards(apps, schema_editor):
"""
Change all Movie objects into Work objects, and their associated
data into WorkRole and WorkSelection models, then delete the Movie.
"""
Movie = apps.get_model("spectator_events", "Movie")
Work = apps.get_model("spectator_events", "Work")
WorkRole = apps.get_model("spectator_events", "WorkRole")
WorkSelection = apps.get_model("spectator_events", "WorkSelection")
for m in Movie.objects.all():
work = Work.objects.create(
kind="movie",
title=m.title,
title_sort=m.title_sort,
year=m.year,
imdb_id=m.imdb_id,
)
for role in m.roles.all():
WorkRole.objects.create(
creator=role.creator,
work=work,
role_name=role.role_name,
role_order=role.role_order,
)
for selection in m.events.all():
WorkSelection.objects.create(
event=selection.event, work=work, order=selection.order
)
m.delete()
class Migration(migrations.Migration):
dependencies = [
("spectator_events", "0029_plays_to_works"),
]
operations = [
migrations.RunPython(forwards),
]
|
philgyford/django-spectator
|
spectator/events/migrations/0030_movies_to_works.py
|
Python
|
mit
| 1,362
|
#!/usr/bin/env python
"""
Copyright (C) 2010-2013, Ryan Fan
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Library General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
from getlogs.jobs import BaseJob
class InitJob(BaseJob):
def __init__(self, server):
super(InitJob, self).__init__(server)
def perform(self):
access_ok, op_output = self.operation.access_case_dir()
# access of SR case specific file fails
if not access_ok:
is_it_fixed = False
if "permission denied" in op_output.lower():
is_it_fixed = self.operation.grant_acl()
elif "no such file or directory" in op_output.lower():
is_it_fixed = self.operation.create_case()
# failed initializing ISDE env
if not is_it_fixed:
return False
return True
# if access ok
return True
|
rfancn/myprojects
|
getlogs/jobs/jobinit.py
|
Python
|
mit
| 1,550
|
from django.urls import path
from . import views
urlpatterns = [
path(
'data_release',
views.DataReleaseView.as_view(),
name='data_release'
),
path(
'data_release/<slug:site>/<slug:job>',
views.DataReleaseJobView.as_view(),
name='data_release_job'
),
]
|
kingsdigitallab/tvof-django
|
tvof/data_release/urls.py
|
Python
|
mit
| 319
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\contrast_ctrl_template.ui'
#
# Created: Mon Jul 06 15:31:42 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(176, 391)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setSpacing(0)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.histogram = HistogramLUTWidget(Form)
self.histogram.setObjectName(_fromUtf8("histogram"))
self.gridLayout.addWidget(self.histogram, 0, 0, 1, 2)
self.btnAutoGain = QtGui.QPushButton(Form)
self.btnAutoGain.setCheckable(True)
self.btnAutoGain.setChecked(False)
self.btnAutoGain.setObjectName(_fromUtf8("btnAutoGain"))
self.gridLayout.addWidget(self.btnAutoGain, 1, 0, 1, 2)
self.label_6 = QtGui.QLabel(Form)
self.label_6.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout.addWidget(self.label_6, 2, 0, 1, 1)
self.spinAutoGainSpeed = QtGui.QDoubleSpinBox(Form)
self.spinAutoGainSpeed.setProperty("value", 2.0)
self.spinAutoGainSpeed.setObjectName(_fromUtf8("spinAutoGainSpeed"))
self.gridLayout.addWidget(self.spinAutoGainSpeed, 2, 1, 1, 1)
self.label_8 = QtGui.QLabel(Form)
self.label_8.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout.addWidget(self.label_8, 3, 0, 1, 1)
self.spinAutoGainCenterWeight = QtGui.QDoubleSpinBox(Form)
self.spinAutoGainCenterWeight.setMaximum(1.0)
self.spinAutoGainCenterWeight.setSingleStep(0.1)
self.spinAutoGainCenterWeight.setObjectName(_fromUtf8("spinAutoGainCenterWeight"))
self.gridLayout.addWidget(self.spinAutoGainCenterWeight, 3, 1, 1, 1)
self.zoomLiveBtn = QtGui.QPushButton(Form)
self.zoomLiveBtn.setObjectName(_fromUtf8("zoomLiveBtn"))
self.gridLayout.addWidget(self.zoomLiveBtn, 6, 0, 1, 2)
self.mirrorChkBox = QtGui.QCheckBox(Form)
self.mirrorChkBox.setAutoRepeat(False)
self.mirrorChkBox.setTristate(False)
self.mirrorChkBox.setObjectName(_fromUtf8("mirrorChkBox"))
self.gridLayout.addWidget(self.mirrorChkBox, 7, 1, 1, 1)
self.alphaSlider = QtGui.QSlider(Form)
self.alphaSlider.setMaximum(100)
self.alphaSlider.setSingleStep(1)
self.alphaSlider.setProperty("value", 100)
self.alphaSlider.setOrientation(QtCore.Qt.Horizontal)
self.alphaSlider.setObjectName(_fromUtf8("alphaSlider"))
self.gridLayout.addWidget(self.alphaSlider, 4, 1, 1, 1)
self.label_4 = QtGui.QLabel(Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_4.sizePolicy().hasHeightForWidth())
self.label_4.setSizePolicy(sizePolicy)
self.label_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 4, 0, 1, 1)
self.flipChkBox = QtGui.QCheckBox(Form)
self.flipChkBox.setCheckable(True)
self.flipChkBox.setChecked(False)
self.flipChkBox.setAutoRepeat(False)
self.flipChkBox.setAutoExclusive(False)
self.flipChkBox.setTristate(False)
self.flipChkBox.setObjectName(_fromUtf8("flipChkBox"))
self.gridLayout.addWidget(self.flipChkBox, 7, 0, 1, 1)
self.rotateLeftChkBox = QtGui.QCheckBox(Form)
self.rotateLeftChkBox.setCheckable(True)
self.rotateLeftChkBox.setAutoExclusive(False)
self.rotateLeftChkBox.setTristate(False)
self.rotateLeftChkBox.setObjectName(_fromUtf8("rotateLeftChkBox"))
self.gridLayout.addWidget(self.rotateLeftChkBox, 9, 0, 1, 1)
self.rotateRightChkBox = QtGui.QCheckBox(Form)
self.rotateRightChkBox.setCheckable(True)
self.rotateRightChkBox.setAutoExclusive(False)
self.rotateRightChkBox.setTristate(False)
self.rotateRightChkBox.setObjectName(_fromUtf8("rotateRightChkBox"))
self.gridLayout.addWidget(self.rotateRightChkBox, 9, 1, 1, 1)
self.hideShowBtn = QtGui.QPushButton(Form)
self.hideShowBtn.setObjectName(_fromUtf8("hideShowBtn"))
self.gridLayout.addWidget(self.hideShowBtn, 5, 0, 1, 2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.btnAutoGain.setToolTip(_translate("Form", "Determines the behavior of the white/black level sliders.\n"
"When enabled, the sliders maximum and minimum values are set\n"
"to the maximum and minimum intensity values in the image.\n"
"When disabled, the minimum is 0 and the maximum is the largest \n"
"possible intensity given the bit depth of the camera.", None))
self.btnAutoGain.setText(_translate("Form", "Auto Gain", None))
self.label_6.setText(_translate("Form", "Auto Gain Delay", None))
self.spinAutoGainSpeed.setToolTip(_translate("Form", "Smooths out the auto gain control, prevents very\n"
"brief flashes from affecting the gain. Larger values\n"
"indicate more smoothing.\n"
"", None))
self.label_8.setText(_translate("Form", "Frame Center Weight", None))
self.spinAutoGainCenterWeight.setToolTip(_translate("Form", "Weights the auto gain measurement to the center 1/3 of\n"
"the frame when set to 1.0. A value of 0.0 meters from \n"
"the entire frame.", None))
self.zoomLiveBtn.setText(_translate("Form", "Zoom to Image", None))
self.mirrorChkBox.setText(_translate("Form", "Mirror", None))
self.label_4.setText(_translate("Form", "Transparency", None))
self.flipChkBox.setText(_translate("Form", "Flip", None))
self.rotateLeftChkBox.setText(_translate("Form", "Rotate Left", None))
self.rotateRightChkBox.setText(_translate("Form", "Rotate Right", None))
self.hideShowBtn.setText(_translate("Form", "Hide/Show Image", None))
from acq4.pyqtgraph import HistogramLUTWidget
|
mgraupe/acq4
|
acq4/util/imaging/contrast_ctrl_template.py
|
Python
|
mit
| 7,059
|
from setuptools import setup, find_packages
VERSION = '0.3.16'
LONG_DESCRIPTION = open('README.rst').read()
setup(name='virtstrap',
version=VERSION,
description="virtstrap - Making repeatable environments easy!",
long_description=LONG_DESCRIPTION,
keywords='',
author='Reuven V. Gonzales',
author_email='reuven@tobetter.us',
url="https://github.com/ravenac95/virtstrap",
license='MIT',
platforms='*nix',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'virtualenv',
'virtstrap-core',
],
entry_points={
'console_scripts': [
'vstrap = virtstrap_system.runner:main',
],
},
classifiers=[
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Operating System :: POSIX',
'Topic :: Software Development :: Build Tools',
],
)
|
ravenac95/virtstrap
|
virtstrap/setup.py
|
Python
|
mit
| 1,017
|
# -*- coding: utf-8 -*-
"""Pibooth base states.
"""
import traceback
from pibooth.utils import LOGGER, BlockConsoleHandler
class StateMachine(object):
def __init__(self, plugins_manager, configuration, application, window):
self.states = set()
self.failsafe_state = None
self.active_state = None
# Share the application to manage between states
self.app = application
self.win = window
self.cfg = configuration
self.pm = plugins_manager
def add_state(self, name):
"""Add a state to the internal dictionary.
"""
self.states.add(name)
def add_failsafe_state(self, name):
"""Add a state that will be call in case of exception.
"""
self.failsafe_state = name
self.states.add(name)
def remove_state(self, name):
"""Remove a state to the internal dictionary.
"""
self.states.discard(name)
if name == self.failsafe_state:
self.failsafe_state = None
def process(self, events):
"""Let the current state do it's thing.
"""
# Only continue if there is an active state
if self.active_state is None:
return
try:
# Perform the actions of the active state
hook = getattr(self.pm.hook, 'state_{}_do'.format(self.active_state))
hook(cfg=self.cfg, app=self.app, win=self.win, events=events)
# Check conditions to activate the next state
hook = getattr(self.pm.hook, 'state_{}_validate'.format(self.active_state))
new_state_name = hook(cfg=self.cfg, app=self.app, win=self.win, events=events)
except Exception as ex:
if self.failsafe_state and self.active_state != self.failsafe_state:
LOGGER.error(str(ex))
if BlockConsoleHandler.is_debug():
traceback.print_exc()
new_state_name = self.failsafe_state
else:
raise
if new_state_name is not None:
self.set_state(new_state_name)
def set_state(self, state_name):
"""Change state machine's active state
"""
try:
# Perform any exit actions of the current state
if self.active_state is not None:
hook = getattr(self.pm.hook, 'state_{}_exit'.format(self.active_state))
hook(cfg=self.cfg, app=self.app, win=self.win)
except Exception as ex:
if self.failsafe_state and self.active_state != self.failsafe_state:
LOGGER.error(str(ex))
if BlockConsoleHandler.is_debug():
traceback.print_exc()
state_name = self.failsafe_state
else:
raise
if state_name not in self.states:
raise ValueError('"{}" not in registered states...'.format(state_name))
# Switch to the new state and perform its entry actions
LOGGER.debug("Activate state '%s'", state_name)
self.active_state = state_name
try:
hook = getattr(self.pm.hook, 'state_{}_enter'.format(self.active_state))
hook(cfg=self.cfg, app=self.app, win=self.win)
except Exception as ex:
if self.failsafe_state and self.active_state != self.failsafe_state:
LOGGER.error(str(ex))
if BlockConsoleHandler.is_debug():
traceback.print_exc()
self.set_state(self.failsafe_state)
else:
raise
|
werdeil/pibooth
|
pibooth/states.py
|
Python
|
mit
| 3,595
|
from lxml import etree
from mathmlcontent_to_string import MathML2String
s1 = """<semantics xml:id="m52.1a" xref="m52.1.pmml">
<apply xml:id="m52.1.7" xref="m52.1.7.pmml">
<eq xml:id="m52.1.2" xref="m52.1.2.pmml"/>
<qvar xmlns:mws="http://search.mathweb.org/ns" name="A"/>
<apply xml:id="m52.1.7.1" xref="m52.1.7.1.pmml">
<times xml:id="m52.1.7.1.1" xref="m52.1.7.1.1.pmml"/>
<qvar xmlns:mws="http://search.mathweb.org/ns" name="U"/>
<qvar xmlns:mws="http://search.mathweb.org/ns" name="S"/>
<apply xml:id="m52.1.7.1.2" xref="m52.1.7.1.2.pmml">
<csymbol cd="ambiguous" xml:id="m52.1.7.1.2.1">superscript</csymbol>
<qvar xmlns:mws="http://search.mathweb.org/ns" name="V"/>
<ci xml:id="m52.1.6.1" xref="m52.1.6.1.pmml">T</ci>
</apply>
</apply>
</apply>
</semantics>
"""
s2 = """
<semantics xml:id="m54.1a" xref="m54.1.pmml">
<cerror xml:id="m54.1b" xref="m54.1.pmml">
<csymbol cd="ambiguous" xml:id="m54.1c" xref="m54.1.pmml">fragments</csymbol>
<csymbol cd="unknown" xml:id="m54.1d" xref="m54.1.pmml">P</csymbol>
<cerror xml:id="m54.1e" xref="m54.1.pmml">
<csymbol cd="ambiguous" xml:id="m54.1f" xref="m54.1.pmml">fragments</csymbol>
<ci xml:id="m54.1.2" xref="m54.1.2.pmml">normal-[</ci>
<csymbol cd="unknown" xml:id="m54.1g" xref="m54.1.pmml">X</csymbol>
<geq xml:id="m54.1.4" xref="m54.1.4.pmml"/>
<csymbol cd="unknown" xml:id="m54.1h" xref="m54.1.pmml">t</csymbol>
<ci xml:id="m54.1.6" xref="m54.1.6.pmml">normal-]</ci>
</cerror>
<leq xml:id="m54.1.7" xref="m54.1.7.pmml"/>
<apply xml:id="m54.1.8" xref="m54.1.8.pmml">
<divide xml:id="m54.1.8.1" xref="m5.1.8.1.pmml"/>
<apply xml:id="m54.1.8.2" xref="m54.1.8.2.pmml">
<times xml:id="m54.1.8.2.5" xref="m54.1.8.2.5.pmml"/>
<ci xml:id="m54.1.8.2.1" xref="m54.1.8.2.1.pmml">E</ci>
<qvar xmlns:mws="http://search.mathweb.org/ns" name="X"/>
</apply>
<qvar xmlns:mws="http://search.mathweb.org/ns" name="t"/>
</apply>
</cerror>
</semantics>
"""
s3 = """
<semantics>
<cerror>
<qvar name='a'></qvar>
<leq/>
<qvar name='b'></qvar>
</cerror>
</semantics>
"""
address = "/home/narya/Dropbox/NTCIR11-Math2-queries-participants.xml"
doc = etree.parse(address)
formulae = doc.xpath(".//*[local-name() = 'formula']")
for f in formulae:
idx = f.getparent().getparent()[0].text
print idx
#if "10" not in idx: continue
for ann in f.xpath(".//*[local-name() = 'annotation']") + f.xpath(".//*[local-name() = 'annotation-xml']"):
ann_p = ann.getparent()
ann_p.remove(ann)
for sem in f.xpath(".//*[local-name() = 'semantics']"):
m = MathML2String()
print m.convert(etree.ElementTree(sem))
print
#d1 = etree.fromstring(s3.encode("utf-8"))
#print m.convert(etree.ElementTree(d1))
|
frozstone/mcatunification
|
mathmlcontent_to_string_test.py
|
Python
|
mit
| 3,296
|
'''
Write a program that prompts for a file name, then opens that file and reads through the file,
and print the contents of the file in upper case.
'''
fileName = raw_input('Enter file name : ')
openFile = open(fileName)
readFile = openFile.read().upper().strip()
print readFile
|
rahulbohra/Python-Basic
|
55_file_problem_statement.py
|
Python
|
mit
| 280
|
from file_preprocess_js_subset import FilePreProcessJsSubset
from file_preprocess_regex import FilePreProcessRegex
class FilePreProcessorFactory(object):
def get_preprocessor(self, name):
classes = (FilePreProcessJsSubset, FilePreProcessRegex)
for clazz in classes:
if clazz.get_name() == name:
return clazz()
raise FilePreProcessorDoesNotExistException()
class FilePreProcessorDoesNotExistException(Exception):
pass
|
markdrago/caboose
|
src/files/file_preprocessor_factory.py
|
Python
|
mit
| 482
|
from definitions import names
from objects import tiles
import random
class Town (object):
def __init__ (self, seed=None):
self.seed = seed
self.name = self.generate_name(self.seed)
self.shop = False
self.shop_items = []
def generate_name (self, seed=None):
random.seed(self.seed)
name = []
length = random.randint(3,5)
for i in range(length):
name.append(random.choice(names.characters))
name.append(' Town')
return ''.join(name).title()
def add_shop (self):
self.shop = True
def text (self, position=None):
out = ['You are in {}.\n'.format(self.name)]
if self.shop == True:
out.append('Among the tiny houses, you see a shop.\n')
out.append('What would you like to do?')
out = ''.join(out)
return out
def __str__ (self):
return self.name
|
rekyuu/rpyg
|
src/objects/towns.py
|
Python
|
mit
| 801
|
__author__ = 'cody'
from dbapi.test.testing_utils import *
import json
class UserTest(DbapiTestCase):
def test_get_user_success(self):
return_value = self.app.get("/user/{}".format(self.test_user.username))
self.assertEqual(self.test_user.todict(), json.loads(return_value.data.decode())["data"])
def test_get_user_not_found(self):
return_value = self.app.get("/user/{}".format(666))
return_text = json.loads(return_value.data.decode())["err"]
self.assertIn(USER_NOT_FOUND, return_text)
self.assertEqual(return_value.status_code, HTTPStatusCodes.NOT_FOUND)
def test_get_all_users(self):
second_user = self.create_user({"username": "second_user", "password": "password"})
return_value = self.app.get("/user")
return_value_data = json.loads(return_value.data.decode())["data"]
self.assertIn(self.test_user.todict(), return_value_data)
self.assertIn(second_user.todict(), return_value_data)
def test_get_all_users_none_found(self):
self.session.delete(self.test_user)
self.session.commit()
return_value = self.app.get("/user")
self.assertEqual(json.loads(return_value.data.decode())["data"], [])
def test_create_new_user(self):
new_user_dict = {"username": "new_user", "password": "password"}
return_value = self.app.post("/user", data=json.dumps(new_user_dict))
query_result = self.session.query(User).filter(User.username == "new_user").first()
self.assertIsNotNone(query_result)
self.assertEqual(query_result.username, "new_user")
def test_create_new_user_already_exists(self):
query_result = self.session.query(User).filter(User.username == self.test_user.username).first()
self.assertIsNotNone(query_result)
self.assertEqual(query_result.username, self.test_user.username)
new_user_dict = {"username": self.test_user.username, "password": "foopassword"}
return_value = self.app.post("/user", data=json.dumps(new_user_dict))
return_text = json.loads(return_value.data.decode())["err"]
self.assertEqual(return_text, USER_ALREADY_EXISTS)
self.assertEqual(return_value.status_code, HTTPStatusCodes.FORBIDDEN)
def test_update_user(self):
update_user_dict = {"username": self.test_user.username, "email": "updated_email"}
return_value = self.app.post("/user/{}".format(self.test_user.username), data=json.dumps(update_user_dict))
self.session.expire_all()
query_result = self.session.query(User).filter(User.username == self.test_user.username).scalar()
self.assertEqual("updated_email", query_result.email)
def test_update_user_not_found(self):
update_user_dict = {"name": self.test_user.username, "email": "updated_email"}
return_value = self.app.post("/user/{}".format(666), data=json.dumps(update_user_dict))
return_text = json.loads(return_value.data.decode())["err"]
self.assertIn(USER_NOT_FOUND, return_text)
self.assertEqual(return_value.status_code, HTTPStatusCodes.NOT_FOUND)
def test_delete_user(self):
return_value = self.app.delete("/user/{}".format(self.test_user.username))
query_result = self.session.query(User).filter(User.id == self.test_user.username).first()
self.assertIsNone(query_result)
def test_delete_user_not_found(self):
return_value = self.app.delete("/user/{}".format(666))
return_text = json.loads(return_value.data.decode())["err"]
self.assertIn(USER_NOT_FOUND, return_text)
self.assertEqual(return_value.status_code, HTTPStatusCodes.NOT_FOUND)
def test_authenticate_user_success(self):
auth_dict = {"username": self.test_user.username, "password": "password"}
return_value = self.app.post("/user/authenticate", data=json.dumps(auth_dict))
self.assertEqual(self.test_user.todict(), json.loads(return_value.data.decode())["data"])
def test_authenticate_user_failure(self):
auth_dict = {"username": self.test_user.username, "password": "incorrectpassword"}
return_value = self.app.post("/user/authenticate", data=json.dumps(auth_dict))
return_text = json.loads(return_value.data.decode())["err"]
self.assertIn(AUTHENTICATION_FAILURE, return_text)
self.assertEqual(return_value.status_code, HTTPStatusCodes.UNAUTHORISED)
def test_delete_user_not_found(self):
auth_dict = {"username": "incorrectusername", "password": "password"}
return_value = self.app.post("/user/authenticate", data=json.dumps(auth_dict))
return_text = json.loads(return_value.data.decode())["err"]
self.assertIn(USER_NOT_FOUND, return_text)
self.assertEqual(return_value.status_code, HTTPStatusCodes.NOT_FOUND)
if __name__ == '__main__':
unittest.main()
|
codyharrington/todolist
|
dbapi/test/user_test.py
|
Python
|
mit
| 4,887
|
from geodata.addresses.numbering import NumberedComponent
from geodata.encoding import safe_decode
class PostCode(NumberedComponent):
@classmethod
def phrase(cls, postcode, language, country=None):
if postcode is None:
return None
return cls.numeric_phrase('postcodes.alphanumeric', postcode, language,
dictionaries=['postcodes'], country=country)
|
openvenues/libpostal
|
scripts/geodata/addresses/postcodes.py
|
Python
|
mit
| 420
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from hwt.hdl.statements.assignmentContainer import HdlAssignmentContainer
from hwt.hdl.statements.statement import HdlStatement
from hwtLib.examples.axi.oooOp.counterHashTable import OooOpExampleCounterHashTable
from hwtLib.examples.mem.ram import SimpleAsyncRam
from hwtLib.examples.statements.ifStm import SimpleIfStatement3
from hwtLib.mem.atomic.flipReg import FlipRegister
from hwtLib.mem.cuckooHashTable import CuckooHashTable
from hwtLib.peripheral.displays.segment7 import Segment7
from hwtLib.peripheral.i2c.masterBitCntrl import I2cMasterBitCtrl
from hwtLib.tests.synthesizer.interfaceLevel.subunitsSynthesisTC import synthesised
class StatementsConsystencyTC(unittest.TestCase):
def check_consystency(self, u):
synthesised(u)
c = u._ctx
for s in c.signals:
for e in s.endpoints:
if isinstance(e, HdlStatement):
self.assertIs(e.parentStm, None, (s, e))
self.assertIn(e, c.statements)
for d in s.drivers:
if isinstance(d, HdlStatement):
self.assertIs(d.parentStm, None, (s, d))
self.assertIn(d, c.statements)
for stm in c.statements:
self.assertIs(stm.parentStm, None)
def test_if_stm_merging(self):
u = FlipRegister()
self.check_consystency(u)
def test_comples_stm_ops(self):
u = CuckooHashTable()
self.check_consystency(u)
def test_rm_statement(self):
u = SimpleIfStatement3()
self.check_consystency(u)
stms = u._ctx.statements
self.assertEqual(len(stms), 1)
self.assertIsInstance(list(stms)[0], HdlAssignmentContainer)
def test_index_inputs_with_assignment_has_endpoint(self):
u = SimpleAsyncRam()
self.check_consystency(u)
self.assertEqual(len(u.addr_in._sigInside.endpoints), 1)
self.assertEqual(len(u.addr_out._sigInside.endpoints), 1)
def test_if_inputs_correc(self):
u = Segment7()
self.check_consystency(u)
def test_unconnected_slices_removed_from_inputs_of_statements(self):
u = OooOpExampleCounterHashTable()
self.check_consystency(u)
def test_stm_enclosure_consystency(self):
u = I2cMasterBitCtrl()
self.check_consystency(u)
# test if there is not a latch
for stm in u._ctx.statements:
if stm._event_dependent_from_branch != 0:
diff = stm._enclosed_for.symmetric_difference(stm._outputs)
self.assertEqual(diff, set(), f"\n{stm}")
if __name__ == '__main__':
unittest.main()
|
Nic30/hwtLib
|
hwtLib/tests/synthesizer/rtlLevel/statements_consystency_test.py
|
Python
|
mit
| 2,701
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
from config import config
from glob import glob
import importlib
import os
import re
from slackclient import SlackClient
import sys
import time
import traceback
import random
from plugins.botlib import BotLib
curdir = os.path.dirname(os.path.abspath(__file__))
os.chdir(curdir)
from config import config
lastchannel = ""
hooks = {}
def init_plugins():
for plugin in glob('plugins/[!_]*.py'):
print("plugin: {0}".format(plugin))
try:
mod = importlib.import_module(plugin.replace(os.path.sep, ".")[:-3])
modname = mod.__name__.split('.')[1]
for hook in re.findall("on_(\w+)", " ".join(dir(mod))):
hookfun = getattr(mod, "on_" + hook)
print("attaching {0}.{1} to {2}".format(modname, hookfun, hook))
hooks.setdefault(hook, []).append(hookfun)
if mod.__doc__:
firstline = mod.__doc__.strip()
hooks.setdefault('help', {})[modname] = firstline
hooks.setdefault('extendedhelp', {})[modname] = mod.__doc__
#bare except, because the modules could raise any number of errors
#on import, and we want them not to kill our server
except:
print("import failed on module {0}, module not loaded".format(plugin))
print("{0}".format(sys.exc_info()[0]))
print("{0}".format(traceback.format_exc()))
init_plugins()
def run_hook(hook, data, server):
responses = []
for hook in hooks.get(hook, []):
h = hook(data, server)
if h: responses.append(h)
return responses
def handle_message(client, event):
# ignore bot messages and edits
subtype = event.get("subtype", "")
if subtype == "bot_message" or subtype == "message_changed": return
botname = sc.server.login_data["self"]["name"]
try:
msguser = client.server.users.get(event["user"])
except KeyError:
print("event {0} has no user".format(event))
return
if msguser:
if msguser["name"] == botname or msguser["name"].lower() == "slackbot":
return
text = "\n".join(run_hook("message", event, {"client": client, "config": config, "hooks": hooks}))
if text:
print(event["channel"])
global lastchannel
lastchannel = event["channel"]
client.rtm_send_message(event["channel"], text)
event_handlers = {
"message": handle_message
}
# def msg():
# return random.choice(["yee", "우수개발자....", "ㅋㅋ", "오타쿠다...", "집에가고싶어...", "배고파..."])
if __name__=="__main__":
sc = SlackClient(config["token"])
bot = BotLib.set_slack_client(sc)
if sc.rtm_connect():
users = sc.server.users
while True:
# if lastchannel:
# if random.randint(1, 5000) == 5:
# sc.rtm_send_message(lastchannel, unicode(msg(), "utf8"))
events = sc.rtm_read()
for event in events:
#print "got {0}".format(event.get("type", event))
handler = event_handlers.get(event.get("type"))
if handler:
handler(sc, event)
time.sleep(1)
else:
print("Connection Failed, invalid token <{0}>?".format(config["token"]))
|
storyhe/playWithBot
|
slask.py
|
Python
|
mit
| 3,367
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class ExpressRouteServiceProvidersOperations(object):
"""ExpressRouteServiceProvidersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2016-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the available express route service providers.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ExpressRouteServiceProvider
:rtype:
~azure.mgmt.network.v2016_12_01.models.ExpressRouteServiceProviderPaged[~azure.mgmt.network.v2016_12_01.models.ExpressRouteServiceProvider]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteServiceProviders'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ExpressRouteServiceProviderPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ExpressRouteServiceProviderPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2016_12_01/operations/express_route_service_providers_operations.py
|
Python
|
mit
| 4,238
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('barsystem', '0036_auto_20150721_1314'),
]
operations = [
migrations.AlterModelOptions(
name='productcategory',
options={'verbose_name': 'Product category', 'verbose_name_plural': 'Product categories'},
),
migrations.RenameField(
model_name='product',
old_name='bar_code',
new_name='barcode',
),
]
|
TkkrLab/barsystem
|
barsystem/src/barsystem/migrations/0037_auto_20150721_1500.py
|
Python
|
mit
| 581
|
#!/usr/bin/python3
import unittest
from botocore.exceptions import ClientError
import sys
import datetime
import threading
from queue import Queue
from . import util
class LambdaTaskTestCase(unittest.TestCase):
# ========================================================================
# ========================================================================
# ========================================================================
def test_PollForActivityTask(self):
swf = util.BasicSwfSetup(self)
function_name = swf.upload_lambda('SimpleLambda', 'simple_lambda.js')
domain_name = swf.create_domain()
workflow_type = swf.create_workflow_type()
workflow_id, run_id = swf.start_workflow_execution(workflow_type)
decision_task = swf.poll_for_decision()
swf.assert_workflow_state(decision_task)
swf.assert_has_event(decision_task, 'WorkflowExecutionStarted')
lambda_activity_id = 'Simple Lambda Invocation'
lambda_input = 'Test Input'
swf.client.respond_decision_task_completed(
taskToken=decision_task['taskToken'],
decisions=[
{
'decisionType': 'ScheduleLambdaFunction',
'scheduleLambdaFunctionDecisionAttributes': {
'id': lambda_activity_id,
'input': lambda_input,
'name': 'SimpleLambda'
}
}
]
)
lambda_ordered_events = [
'LambdaFunctionScheduled',
'LambdaFunctionStarted',
'LambdaFunctionCompleted'
]
decision_task = swf.poll_for_decision()
# FIXME DEBUG
print('Decision Events:')
for event in decision_task['events']:
print(' - ' + event['eventType'])
for lambda_event_name in lambda_ordered_events:
swf.assert_workflow_state(decision_task)
print('... checking for ' + lambda_event_name)
event, attributes = util.get_event(lambda_event_name, decision_task['events'])
if event is None:
# Need to complete the previous decision first
swf.client.respond_decision_task_completed(
taskToken=decision_task['taskToken'],
decisions=[]
)
print('... needing to poll again')
decision_task = swf.poll_for_decision()
swf.assert_workflow_state(decision_task)
# FIXME DEBUG
print('Decision Events:')
for event in decision_task['events']:
print(' - ' + event['eventType'])
event, attributes = util.get_event(lambda_event_name, decision_task['events'])
self.assertIsNotNone(event, 'Did not find ' + lambda_event_name)
self.assertIn('result', attributes, '[result] not in completed lambda function')
self.assertEqual(lambda_input, attributes['result'], 'Did not return correct lambda result')
swf.client.respond_decision_task_completed(
taskToken=decision_task['taskToken'],
decisions=[
{
'decisionType': 'CompleteWorkflowExecution',
'completeWorkflowExecutionDecisionAttributes': {}
}
]
)
# ========================================================================
# ========================================================================
# ========================================================================
def test_LambdaInvokingWorkflow(self):
swf = util.BasicSwfSetup(self)
function_name = swf.upload_lambda('SignalLambda', 'signal_lambda.js')
domain_name = swf.create_domain()
workflow_type = swf.create_workflow_type()
workflow_id, run_id = swf.start_workflow_execution(workflow_type)
decision_task = swf.poll_for_decision()
swf.assert_workflow_state(decision_task)
swf.assert_has_event(decision_task, 'WorkflowExecutionStarted')
lambda_activity_id = 'Signal Workflow Lambda Invocation'
signal_input = util.create_new_name()
signal_name = 'Lambda signal'
lambda_input = ('{"domain": "' + domain_name +
'","workflowId": "' + workflow_id +
'","signalName": "' + signal_name +
'","input": "' + signal_input +
'"}')
swf.client.respond_decision_task_completed(
taskToken=decision_task['taskToken'],
decisions=[
{
'decisionType': 'ScheduleLambdaFunction',
'scheduleLambdaFunctionDecisionAttributes': {
'id': lambda_activity_id,
'name': 'SignalLambda',
'input': lambda_input
}
}
]
)
lambda_ordered_events = [
'LambdaFunctionScheduled',
'LambdaFunctionStarted',
'WorkflowExecutionSignaled',
'LambdaFunctionCompleted'
]
decision_task = swf.poll_for_decision()
# FIXME DEBUG
print('Decision Events:')
for event in decision_task['events']:
print(' - ' + event['eventType'])
for lambda_event_name in lambda_ordered_events:
swf.assert_workflow_state(decision_task)
print('... checking for ' + lambda_event_name)
event, attributes = util.get_event(lambda_event_name, decision_task['events'])
if event is None:
# Need to complete the previous decision first
swf.client.respond_decision_task_completed(
taskToken=decision_task['taskToken'],
decisions=[]
)
print('... needing to poll again')
decision_task = swf.poll_for_decision()
swf.assert_workflow_state(decision_task)
# FIXME DEBUG
print('Decision Events:')
for event in decision_task['events']:
print(' - ' + event['eventType'])
event, attributes = util.get_event(lambda_event_name, decision_task['events'])
self.assertIsNotNone(event, 'Did not find ' + lambda_event_name)
swf.client.respond_decision_task_completed(
taskToken=decision_task['taskToken'],
decisions=[
{
'decisionType': 'CompleteWorkflowExecution',
'completeWorkflowExecutionDecisionAttributes': {}
}
]
)
event, attributes = util.get_event('WorkflowExecutionSignaled', decision_task['events'])
self.assertIsNotNone(event, 'Could not find WorkflowExecutionSignaled in history')
self.assertIn('signalName', attributes, 'signalName not in signal event')
self.assertEqual(attributes['signalName'], signal_name, 'signalName not set right')
self.assertIn('input', attributes, 'input not in signal event')
self.assertEqual(attributes['input'], signal_input, 'input not set right')
if __name__ == '__main__':
unittest.main()
|
groboclown/pipit
|
test/integration/boto3/swf/lambdas.py
|
Python
|
mit
| 7,493
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 4 16:04:25 2017
@author: Antonio
La biblioteca externas es re
"""
import re
"""
* Clase que se encarga de encapsular todos los atributos de un objeto json
* para almacenar los datos de un libro
"""
class JElement:
"""
* Método constructor de la clase
* Recibe:
* title: título del libro
* dic: diccionario para hacer la traducción dewey
"""
def __init__(self,title,dic):
self.dewey = {}
self.dic = dic
self.title=title
self.topics = []
self.added = set()
"""
* Método que se encarga de agregar temas dentro de la lista de temas
* Recibe:
* lab: Etiqueta del tema
* cont: tema que se agrega
"""
def addTopic(self,lab,cont):
key = lab+"_"+cont
if not key in self.added:
self.added.add(key)
tmp = {"label":lab,"cont":cont}
self.topics.append(tmp)
"""
* Método que se encarga de retornar el código Dewey completo del objeto
* Regresa:
* Código dewey completo del objeto
"""
def getFullDewey(self):
return self.dewey["full"]
"""
* Método que retorna el diccionario donde se almacena el código dewey
* de forma completa y separada por unidad, decena y centena
"""
def getDewey(self):
return self.dewey
"""
* Método que se encarga de regresar el título asignado al objeto
"""
def getTitle(self):
return self.title
"""
* Método que se encarga de regresar el contenido de un elemento del diccionario
* Recibe:
* key: llave del elemento del diccionario
* Regresa:
* "" si la llave no está dentro del diccionario
* el contenido del diccionario en caso de que la llave exista
"""
def getDictElement(self,key):
if key in self.dic:
text = str(self.dic[key])
text = text.replace("\'","")
text = text.replace("{","")
text = text.replace("}","")
return text
else:
return ""
"""
* Método que se encarga de regresar el diccionario donde se encuentran
* almacenados los temas
"""
def getTopics(self):
return self.topics
"""
* Método que se encarga de agregar el código dewey del libro dentro del
* diccionario de datos
* Recibe:
* dew: Código dewey correspondiente
"""
def setDewey(self,dew):
if re.fullmatch("\d+\.?\d*",dew):
tmp = dew.split(".")
dew = tmp[0]
while len(dew)<3:
dew = "0"+dew
self.dewey["cent"] = dew[0]+"00"
self.dewey["dec"] = "0"+dew[1]+"0"
self.dewey["uni"] = "00"+dew[2]
self.dewey["full"] = dew[0:3]
#self.dewey["original"] = dew
self.dewey["name"] = self.getDictElement(dew[0:3])
else:
self.dewey = {"cent":"","dec":"","uni":"","full":""}
if re.match(".*en proceso de.*",dew.lower()):
self.dewey["name"]="EN PROCESO DE CATALOGACIÓN"
else:
self.dewey["name"]=dew
|
silviaegt/bdcv_metadata
|
Comparador/JElement.py
|
Python
|
mit
| 3,201
|
import Utilities
from tkinter import *
from random import randint
from random import uniform
from collections import deque
from collections import OrderedDict
import numpy as np
from hillClimbing import hillClimbing_basic
from hillClimbing import hillClimbing_randomStart
from hillClimbing import getValueFunction
from hillClimbing import hillClimbing_randomWalk
from randomMatrix import newRandomMatrix
import population_based as p
import time
def intersection(arr,p,size,num):
print("In intersection\n")
newArr = np.copy(arr)
n= int(p * size * size)
x= int(n / size)
y= int(n % size)
g=0
i=0
j=0
while g < num:
now=g
after=g+1
if now == num-1:
after=0
i=size-x-1
j=size-y
while j < size:
newArr[now][i,j]=arr[after][i,j]
j+=1
i+=1
while i < size:
j=0
while j < size:
newArr[now][i,j]=arr[after][i,j]
j+=1
i+=1
g+=1
print("out of intersection\n")
return newArr
def population_based(size,p,iteration,num,r, bot, submit, m2):
start = time.time()
print("in population\n")
i = 0
array=[]
finalValue=0
while i < num:
someMatrix = newRandomMatrix(size)
array.append(someMatrix)
i+=1
i=0
while i < iteration:
val=[]
total=0
g=0
while g < num:
Matrix = np.copy(Utilities.BFS(Utilities.createGraph(array[g]), (0,0), size))
value1 = getValueFunction(Matrix)
if r.pb_valueFunction==0:
r.pb_valueFunction=value1
if value1>=r.pb_valueFunction:
r.pb_valueFunction=value1
r.pb_best_path=Matrix
r.pb_best=array[g]
r.pb_best_iteration=i
if value1 < 0:
value1=0
total+=value1
val.append(value1)
g+=1
g=0
while g < num:
if g == 0:
val[g]=float(val[g]/total)
else:
val[g]=float(val[g]/total)+val[g-1]
g+=1
g=0
h=0
prob=0
newArray=[]
while g < num:
prob=uniform(0.0, 1.0)
while h < num:
if prob <= val[h]:
newArray.append(array[h])
break
h+=1
g+=1
newArray=intersection(newArray,p,size,num)
g=0
while g < num:
Matrix = np.copy(Utilities.BFS(Utilities.createGraph(newArray[g]), (0,0), size))
value1 = getValueFunction(Matrix)
if value1>=r.pb_valueFunction:
r.pb_valueFunction=value1
r.pb_best_path=Matrix
r.pb_best=array[g]
r.pb_best_iteration=i+1
g+=1
g=0
while g < num:
r_row = randint(0,size-1)
r_col = randint(0,size-1)
while r_row == size - 1 and r_col == size - 1:
r_row = randint(0,size-1)
r_col = randint(0,size-1)
valid = max(abs(r_row-(size-1)/2), abs(r_col-(size-1)/2)) + (size -1)/2
new_val = randint(1,valid)
newArray[g][r_row, r_col] = new_val
g+=1
array=newArray
i+=1
g=0
while g < num:
Matrix = np.copy(Utilities.BFS(Utilities.createGraph(array[g]), (0,0), size))
value1 = getValueFunction(Matrix)
if value1>=r.pb_valueFunction:
r.pb_valueFunction=value1
r.pb_best_path=Matrix
r.pb_best=array[g]
r.pb_best_iteration=iteration
g+=1
Utilities.displayMatrix(r.pb_best, bot,0)
Utilities.displayMatrix(r.pb_best_path, m2,1)
a = Label(bot, text = "and the value function is: ")
a.pack()
b = Label(bot, text = str(r.pb_valueFunction))
b.pack()
diff = time.time() - start
t = Label(bot, text = "Total time consumed: ")
t.pack()
t1 = Label(bot, text = diff)
t1.pack()
submit.pack_forget()
print("out of population\n")
|
lj9707/CourseWork
|
cs440/Local Search/population_based.py
|
Python
|
mit
| 4,218
|
# coding: UTF-8
'''Generic functions used by several modules. Primarily data validation.'''
import os
import sys
# The directory that project-specific files are located in.
# Assumes we read the non project-specific files, set directory, then open
# only project-specific files.
directory = ""
test_mode = False
class Invalid(Exception):
'''Exception that triggers upon invalid data.'''
max_err = 1
err_count = 0
def __init__(self, err_code, *args):
super(Invalid, self).__init__(err_code)
err_msg = err_dict[err_code].format(*args)
# Escape all braces in the error code, so format doesn't break.
err_msg = err_msg.replace("{", "{{").replace("}", "}}")
self.message = "Error on {} of {}: " + err_msg
class RestartSignal(Exception):
'''Exception that signals a restart.'''
pass
def terminate():
'''Close the program, waiting for user input if run from executable.'''
if test_mode:
sys.exit()
else:
print("Press enter to rerun Catalysis. Change files as desired.")
print("Otherwise, type something and then hit enter.")
# Input can't have an argument, due to the codec.
do_not_repeat = input()
if do_not_repeat:
sys.exit()
raise RestartSignal
def get_file_name(file_name):
'''Return the name of the file, making corrections for the
Py2Exe handling of file locations.'''
frozen = os.path.dirname(sys.executable) if getattr(
sys, 'frozen', False) else ""
return os.path.join(frozen, directory, file_name)
def quote_replace(match):
'''Replace smart quotes with ASCII quotes.'''
return {"‘": "'", "’": "'", "“": '"', "”": '"'}[match.group()]
def extract_data(file_name):
'''Return the lines of the target file.'''
input_file = get_file_name(file_name)
try:
with open(input_file, "r", encoding="utf-8-sig") as f:
return f.read().splitlines()
except UnicodeDecodeError:
print(("Encoding for {} unknown. Please convert your files to UTF-8 " +
"encoding before continuing.").format(file_name))
except IOError:
print("Ensure {} exists in this folder.".format(file_name))
terminate()
def list_to_n_gen(target_list, mod, msg=""):
'''Convert a flat list to a generator of n-length tuples. Raise an error if
there aren't enough elements to convert fully.'''
remainder = len(target_list) % mod
if remainder:
raise Invalid("implicit tuple", mod, msg, remainder, mod - remainder)
else:
target_list = [iter(target_list)] * mod
return zip(*target_list)
def key_or_value(value, dictionary, word, or_value=False):
'''Return the value from dictionary. If or_value, return value if it's
a dictionary key. If neither work, send an error.'''
try:
return dictionary[value]
except KeyError:
if or_value and value in dictionary.values():
return value
else:
raise Invalid("bad key", value, word)
def is_object(possible_object, target_set, item_name, object_dict):
'''Return the object with the given handle, and validate that the value
for attribute is in target_set. Sample use: converting a handle to
a piece of evidence or profile.'''
try:
target_object = object_dict[possible_object]
except KeyError:
raise Invalid("unk obj", possible_object)
if target_object.attribute not in target_set:
raise Invalid("type in set", item_name, ", ".join(target_set))
return target_object
def find_subobject(seek_attributes, place, target):
'''Search the given place's seek_attributes attributes for an item with
a name attribute of target. Seeks a place's foreground or background
object.'''
for attr in seek_attributes:
for item in place[attr]:
if item["name"] == target:
return (attr, item["id"])
raise Invalid("missing subobj", target)
def validate_int(value, name):
'''Validate that the value is an integer, using name in the error
message. Return the integer.'''
try:
return int(value)
except ValueError:
raise Invalid("int", name)
def int_at_least(value, target, name):
'''Validate that value is an integer at least target, using name in the
error message. Return the integer.'''
value = validate_int(value, name)
if value < target:
raise Invalid("min", name, target)
else:
return value
err_dict = {
"anc dupl": "Anchor {} is already in use.",
"anc unset": "{} anchor {} is not set.",
"arg missing": "{} not provided.",
"attr name": "{} is not a valid attribute name.",
"attr val": "{} is not a valid attribute value.",
"ban duplicate": "{} name {} is used twice.",
"bad arg num": "Command {} does not have the proper number of arguments.",
"bad context": "A command {} must be {}one of: {}.",
"bad exam type": "Received pointing instructions for a {}, instead of one of: {}.",
"bad exp number": (
"Argument {} received {} pieces of an expression instead of {}."
),
"bad global arg num": "{} arguments not taken.",
"bad key": "{} is not a valid {} keyword.",
"bad shape": "You can only examine with poly, rect, or circle, not {}.",
"ban manual": "{} must not be called manually.",
"ban on merge": "You cannot use {} on a merged frame.",
"ban obj name": (
"Objects cannot be given the name of a kind of object. Remove or " +
"rename {}."
),
"block open": (
"The trial has ended with a cross-examination or investigation " +
"still open."
),
"circle 3": "circle needs 3 arguments after the shape.",
"config attr": "Configuration attribute {} is invalid.",
"config colon": "A configuration line must have ': ' exactly once.",
"defaults unsupported": "This (sub)object has no default values.",
"default no object": (
"Attempted to point to an object for a default place without objects."
),
"enforce scene": "Command {} can only be run in an investigation.",
"excess press": (
"Tried to start more press conversations than pressable statements."
),
"excess suffix": "{} has exceeded the number of available sprites.",
"exp dependency": (
"Tried to specify a regular object without specifying a regular " +
"place."
),
"expected_new_obj": "Expected initiation of a new object.",
"global action only": (
"Received multiple arguments for a command that only has globals."
),
"implicit tuple": (
"Expected a multiple of {} arguments{}. Remove {} or add {}."
),
"inadequate press": (
"Tried to {} when {} press conversation(s) need to be written."
),
"in obj": "Line not recognized. Currently in an object.",
"int": "{} must be an integer.",
"keyword claim": "{} name {} is a keyword name.",
"min": "{} must be at least {}.",
"missing locks": (
"Scene number {} does not have psyche-locks, so the psyche-lock " +
"button cannot be manipulated."
),
"mult char no place": (
"You can only define one character on a frame with no defined place."
),
"missing subobj": "Subobject with name {} not found.",
"mult char pos": (
"Detected characters with the same position on this frame."
),
"mult contra": "Tried to make {} contradictory twice.",
"mult pos": "Tried to use position {} twice.",
"mult pres": "Tried to make {} presentable twice.",
"not word": "{} may only have letters, numbers, and underscores.",
"no close brace": "Failed to spot closing brace for argument {}.",
"no continuation": "Line continuation syntax not supported here.",
"no default attr": "{} does not support double colon syntax.",
"no exp": (
"Attempted to use an expression for an action that does not permit" +
" them. See the argument for {}."
),
"no parent obj": "Subobjects need a parent object.",
"num": "{} must be a number.",
"obj subobj forbid": "Subobject {} cannot be used in object {}.",
"parent obj dne": "Parent object {} not found.",
"place post char": (
"Place cannot be defined after characters are defined."
),
"poly 6": "poly needs at least 6 arguments after the shape.",
"poly pair": "poly needs pairs of coordinates.",
"pre char": "There must be a character before {} can be set.",
"pre place": "{} can only be run after a place is defined.",
"prefix dupl": "The prefix {} is not unique.",
"rect 4": "rect needs 4 arguments after the shape.",
"rect to quad 4": (
"Arguments 4 and 5 must be greater than arguments 2 and 3, " +
"respectively."
),
"schema fail": "Ran out of schema arguments at term {}.{}",
"selector": "selector.txt expected this line to be {}.",
"selector length": "selector.txt must have eight lines.",
"subobj dne": "Subobject {} not recognized.",
"suffix dupl": "The suffix {} is not unique.",
"suffix no prefix": "Attempted to set a suffix before setting a prefix.",
"terminal merge": "Attempted to merge the last frame into the next frame.",
"type in set": "{}'s type must be one of: {}.",
"unescaped brace": (
"An unexpected, unescaped brace was found for " +
"argument {}."
),
"unk anc type": "Anchor type {} is not recognized.",
"unk line": "Line not recognized. Check for typos.",
"unk obj": "{} is not a recognized object.",
"unk pre": "{} is not a recognized prefix.",
"unk sprite": "{} is not a recognized longform sprite.",
"unk suff": "{} is not a recognized suffix for prefix {}.",
"unclosed": "{} not closed.",
"valid fail": (
"Critical error! Context validation has failed. {} permitted while " +
"in {}. Please notify Enthalpy."
),
"$ syntax": (
"Expression argument {} contained an odd number of unescaped $ " +
"symbols."
),
": syntax": "Expression argument {} had an unexpected syntactic colon."
}
|
Enthalpy-AC/catalysis
|
catalysis_globals.py
|
Python
|
mit
| 10,145
|
from .pydialog import Dialog
__all__ = ['Dialog']
|
nicr9/pydialog
|
pydialog/__init__.py
|
Python
|
mit
| 50
|
SOURCES = {
"vogue_india": {
"NAME": "vogue_india",
"ALLOWED_DOMAINS": "http://www.vogue.in/",
"START_URLS": [
"http://www.vogue.in/fashion/fashion-trends/"
],
"BASE_URL": "http://www.vogue.in/",
"LIST_PAGE_XPATH": "//*[@id='eight_grid_block0']/section/div[1]/h3/a/@href",
"BLOG_CONTENT_XPATH": "//div[contains(@class,'description')]",
"HEADING_XPATH": "//div[contains(@class,'midContent')]/article/h1/text()",
"TEXT_XPATH": "//div[contains(@class,'midContent')]/article/div[1]/p/text()",
"IMG_XPATH": "//div[contains(@class,'cycle-slideshow')]//img/@src"
},
"stylegirlfriend": {
"NAME": "stylegirlfriend",
"ALLOWED_DOMAINS": "http://www.stylegirlfriend.com/",
"START_URLS": [
"http://www.stylegirlfriend.com/blog/"
],
"BASE_URL": "",
"LIST_PAGE_XPATH": "//div[contains(@class,'post-header-area')]//a[2]/@href",
"HEADING_XPATH": "//div[contains(@class,'NO-HEADING-XXX')]",
"BLOG_CONTENT_XPATH": "//div[contains(@class,'blog-post')]",
"TEXT_XPATH": "p//text()",
"IMG_XPATH": "//div[contains(@class,'blog-img')]//img/@src"
},
"gq-magazine": {
"NAME": "gq-magazine",
"ALLOWED_DOMAINS": "http://www.gq-magazine.co.uk/",
"START_URLS": [
"http://www.gq-magazine.co.uk/topic/fashion"
],
"BASE_URL": "http://www.gq-magazine.co.uk",
"LIST_PAGE_XPATH": "//div[contains(@class,'c-card-list__item')]/article/a/@href",
"HEADING_XPATH": "//article[1]/div[1]/div[1]/h1/text()",
"BLOG_CONTENT_XPATH": "//article[1]/div[2]/div/div",
"TEXT_XPATH": "string(.)",
"IMG_XPATH": "//figure/div/img/@data-src"
},
"whowhatwear": {
"NAME": "whowhatwear",
"ALLOWED_DOMAINS": "http://www.whowhatwear.com/",
"START_URLS": [
"http://www.whowhatwear.com/section/fashion-trends"
],
"BASE_URL": "http://www.whowhatwear.com/",
"LIST_PAGE_XPATH": "//div[@class='promo-feed-img']/a/@href",
"HEADING_XPATH": "//div[contains(@class,'widgets-list-headline')]/h1/text()",
"BLOG_CONTENT_XPATH": "//article[1]/div[2]/div/div",
"TEXT_XPATH": "//div[contains(@class,'body')]/p//text()",
"IMG_XPATH": "//div[contains(@class,'image-container')]/img/@src"
}
}
|
rksaxena/crawler_templates
|
crawler_type2/crawler_type2/config.py
|
Python
|
mit
| 2,412
|
'''
Created on Mar 16, 2012
@author: mchrzanowski
'''
from math import log10
from time import time
class FibGenerator(object):
''' super-quick memoized fib calculator exploiting the fact that we need to calculate each fib number,
yet we don't need to store all previous numbers.'''
def __init__(self):
''' start the variables out with the first two Fib numbers. Label the second to be the first Fib number'''
self.previous = 0
self.current = 1
self.iterator = 1
def generateNext(self):
''' return the next fib number as well as an identifier of which fib number this is '''
self.previous, self.current = self.current, self.previous + self.current
self.iterator += 1
return self.iterator, self.current
def main():
solution = 0
pandigitals = frozenset(i for i in xrange(1, 10))
fibGenerator = FibGenerator()
def createSetOfDigitsInNumber(number):
setToFill = set()
while number != 0:
setToFill.add(number % 10)
number /= 10
return setToFill
while solution == 0:
fibNumber, result = fibGenerator.generateNext()
# formula to get the first number of digits in a number came from:
# http://www.maths.surrey.ac.uk/hosted-sites/R.Knott/Fibonacci/fibFormula.html#fibinits
# do the head check first as it's FAR faster than if the tail is checked first.
headSet = createSetOfDigitsInNumber(int(10 ** (log10(result) - int(log10(result))) * 10 ** 8))
if headSet == pandigitals:
tailSet = createSetOfDigitsInNumber(result % 10 ** 9)
if pandigitals == tailSet:
solution = fibNumber
print "Solution: ", solution
if __name__ == '__main__':
start = time()
main()
end = time()
print "Runtime:", end - start, "seconds."
|
mchrzanowski/ProjectEuler
|
src/python/Problem104.py
|
Python
|
mit
| 1,982
|
from IRC import IRC
from CMDParser import CMDParser
import sys, time, os, datetime
from pykeyboard import PyKeyboard
class TwitchPlay:
def start(self, mode):
os.popen("vba 大聯盟棒球賽.gba")
irc = IRC("irc.twitch.tv", "ji01", "oauth:kx5f1v4wi8ni4fkmayta7ov9mzzy")
irc.connect()
try:
parser = CMDParser(mode)
except CMDParser.NoModeError:
print("available modes: " + CMDParser.print_mode())
exit(1)
print("Gamemode: " + mode)
print("ready")
all_commands = ["left", "right", "up", "down", "select", "start", "A", "B", "L", "R"]
commands = all_commands
while True:
try:
if mode == "democracy":
print("start voting for 5 second(s)")
time.sleep(5)
messages = irc.get_msg(irc.recv())
text = ""
for message in messages :
print(message)
text += message.text
result = parser.parse(text, commands)
if mode == "democracy" and len(result) > 1:
print("voting again for " + str(result))
commands = result
print(result)
else :
commands = all_commands
if (len(result) > 0):
print("commands :" + str(result))
for cmd in result :
self.sendkey(cmd)
except KeyboardInterrupt:
print ("Ctrl-C caught, disconnecting")
irc.disconnect()
sys.exit()
def sendkey(self, cmd):
keys = {"left":0x25, "right":0x27, "up":0x26, "down":0x28, "A":"z", "B":"x", "L":"a", "R":"s", "start":0x0d, "select":0x08}
kb = PyKeyboard()
kb.tap_key(keys[cmd])
|
joshua5201/NATwitchPlay
|
TwitchPlay.py
|
Python
|
mit
| 1,906
|
from utils import fio
import matplotlib.pyplot as plt
from mlp.neural_network import MLPClassifier # Using copies of dev branch from sklearn, since these classes are not yet released.
from sklearn.preprocessing import MinMaxScaler
# different learning rate schedules and momentum parameters
#params = [{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': 0, 'learning_rate_init': 0.2},
# {'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9, 'nesterovs_momentum': False, 'learning_rate_init': 0.2},
# {'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9, 'nesterovs_momentum': True, 'learning_rate_init': 0.2},
# {'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0, 'learning_rate_init': 0.2},
# {'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9, 'nesterovs_momentum': True, 'learning_rate_init': 0.2},
# {'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9, 'nesterovs_momentum': False, 'learning_rate_init': 0.2},
# {'algorithm': 'adam'}]
#
#labels = ["constant learning-rate",
# "constant with momentum",
# "constant with Nesterov's momentum",
# "inv-scaling learning-rate",
# "inv-scaling with momentum",
# "inv-scaling with Nesterov's momentum",
# "adam"]
params = [{'algorithm': 'sgd', 'learning_rate_init': 0.1, 'hidden_layer_sizes': (30,), 'max_iter': 20, 'tol': -1},
{'algorithm': 'sgd', 'learning_rate_init': 0.1, 'hidden_layer_sizes': (60,), 'max_iter': 20},
{'algorithm': 'sgd', 'learning_rate_init': 0.1, 'hidden_layer_sizes': (100,), 'max_iter': 20},
{'algorithm': 'sgd', 'learning_rate_init': 0.2, 'hidden_layer_sizes': (30,), 'max_iter': 20},
{'algorithm': 'sgd', 'learning_rate_init': 0.2, 'hidden_layer_sizes': (60,), 'max_iter': 20},
{'algorithm': 'sgd', 'learning_rate_init': 0.2, 'hidden_layer_sizes': (100,), 'max_iter': 20},
{'algorithm': 'sgd', 'learning_rate_init': 0.3, 'hidden_layer_sizes': (100,), 'max_iter': 20}]
labels = ["lr:0.1, neurons:30",
"lr:0.1, neurons:60",
"lr:0.1, neurons:100",
"lr:0.2, neurons:30",
"lr:0.2, neurons:60",
"lr:0.2, neurons:100",
"lr:0.3, neurons:100"]
plot_args = [{'c': 'red', 'linestyle': '-'},
{'c': 'green', 'linestyle': '-'},
{'c': 'blue', 'linestyle': '-'},
{'c': 'red', 'linestyle': '--'},
{'c': 'green', 'linestyle': '--'},
{'c': 'blue', 'linestyle': '--'},
{'c': 'black', 'linestyle': '-'}]
def plot_on_dataset(X, y, ax, name):
# for each dataset, plot learning for each learning strategy
print("\nlearning on dataset %s" % name)
ax.set_title(name)
X = MinMaxScaler().fit_transform(X)
mlps = []
for label, param in zip(labels, params):
print("training: %s" % label)
mlp = MLPClassifier(verbose=0, random_state=0, **param)
mlp.fit(X, y)
mlps.append(mlp)
print("Training set score: %f" % mlp.score(X, y))
print("Training set loss: %f" % mlp.loss_)
for mlp, label, args in zip(mlps, labels, plot_args):
ax.plot(mlp.loss_curve_, label=label, **args)
def export_predictions(predictions):
config = fio.get_config()
file_path = "./evaluation/mnist_mlp_result.csv"
fio.export_csv_data(file_path, predictions)
def main():
config = fio.get_config()
# print("Config sections: %s" % config.sections())
# Load train set.
csv_train_set_data = fio.import_csv_data(fio.get_absolute_path(config.get('MNIST', 'trainingset')))
#print("CSV train data length: %i" % len(csv_train_set_data))
#train_set_sample_data = fio.get_random_data_sample(csv_train_set_data, 2699) # Just load 10% random data while developing.
train_set_lables, train_set_data = fio.split_labels_data(csv_train_set_data, 0)
# Rescale.
train_set_data = train_set_data / 255.
print("Train data length: %i" % len(train_set_data))
# Load test set.
csv_test_set_data = fio.import_csv_data(fio.get_absolute_path(config.get('MNIST', 'testset')))
print("Test data length: %i" % len(csv_test_set_data))
#test_set_sample_data = fio.get_random_data_sample(csv_test_set_data, 1501) # Just load 10% random data while developing.
test_set_lables, test_set_data = fio.split_labels_data(csv_test_set_data, 0)
# Rescale.
test_set_data = test_set_data / 255.
## mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
## algorithm='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(len(train_set_data) * 0.1,), max_iter=30, alpha=1e-4,
algorithm='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
X = MinMaxScaler().fit_transform(train_set_data)
mlp.fit(X, train_set_lables)
print("Training set score: %f" % mlp.score(X, train_set_lables))
print("Training set loss: %f" % mlp.loss_)
print("Test set score: %f" % mlp.score(test_set_data, test_set_lables))
# Load evaluation set.
evaluation_set_data = fio.import_csv_data(fio.get_absolute_path(config.get('Evaluation.SVM', 'mnist')))
print("Evaluation data length: %i" % len(evaluation_set_data))
# Rescale.
evaluation_set_data = evaluation_set_data / 255.
predictions = mlp.predict(evaluation_set_data)
export_predictions(predictions)
#fig, axes = plt.subplots(3, 3)
## use global min / max to ensure all weights are shown on the same scale
#vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
#for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
# ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
# vmax=.5 * vmax)
# ax.set_xticks(())
# ax.set_yticks(())
#plt.show()
#fig = plt.figure()
#ax = fig.add_subplot(1, 1, 1)
#plot_on_dataset(train_set_data, train_set_lables, ax=ax, name="mnist")
#fig.legend(ax.get_lines(), labels=labels, ncol=3, loc="upper center")
#plt.show()
# Program entry point. Don't execute if imported.
if __name__ == '__main__':
main()
|
dwettstein/pattern-recognition-2016
|
mlp_main.py
|
Python
|
mit
| 6,307
|
import json
from django.test import TestCase
from package.repos import get_repo_for_repo_url
from package.repos.bitbucket import repo_handler as bitbucket_handler
from package.repos.github import repo_handler as github_handler
from package.repos.base_handler import BaseHandler
from package.repos.unsupported import UnsupportedHandler
from package.models import Commit, Package, Category
class BaseBase(TestCase):
def setUp(self):
self.category = Category.objects.create(
title='dummy',
slug='dummy'
)
self.category.save()
class TestBaseHandler(BaseBase):
def setUp(self):
super(TestBaseHandler, self).setUp()
self.package = Package.objects.create(
title="Django Piston",
slug="django-piston",
repo_url="https://bitbucket.org/jespern/django-piston",
category=self.category
)
def test_not_implemented(self):
# TODO switch the NotImplemented to the other side
handler = BaseHandler()
self.assertEqual(NotImplemented, handler.title)
self.assertEqual(NotImplemented, handler.url)
self.assertEqual(NotImplemented, handler.repo_regex)
self.assertEqual(NotImplemented, handler.slug_regex)
self.assertEqual(NotImplemented, handler.__str__())
self.assertEqual(NotImplemented, handler.fetch_metadata(self.package))
self.assertEqual(NotImplemented, handler.fetch_commits(self.package))
def test_is_other(self):
handler = BaseHandler()
self.assertEqual(handler.is_other, False)
def test_get_repo_for_repo_url(self):
samples = """u'http://repos.entrouvert.org/authentic.git/tree
http://code.basieproject.org/
http://znc-sistemas.github.com/django-municipios
http://django-brutebuster.googlecode.com/svn/trunk/BruteBuster/
http://hg.piranha.org.ua/byteflow/
http://code.google.com/p/classcomm
http://savannah.nongnu.org/projects/dina-project/
tyrion/django-acl/
izi/django-admin-tools/
bkonkle/django-ajaxcomments/
http://django-ajax-selects.googlecode.com/svn/trunk/
http://django-antivirus.googlecode.com/svn/trunk/
codekoala/django-articles/
https://launchpad.net/django-audit
https://django-audit.googlecode.com/hg/
tyrion/django-autocomplete/
http://code.google.com/p/django-autocomplete/
http://pypi.python.org/pypi/django-autoreports
http://code.google.com/p/django-basic-tumblelog/
schinckel/django-biometrics/
discovery/django-bitly/
bkroeze/django-bursar/src
http://hg.mornie.org/django/c5filemanager/
https://code.launchpad.net/django-cachepurge
http://code.google.com/p/django-campaign/
http://code.google.com/p/django-cas/
http://code.google.com/p/django-chat
http://code.google.com/p/django-compress/
https://launchpad.net/django-configglue
dantario/djelfinder/
ubernostrum/django-contact-form/
http://bitbucket.org/smileychris/django-countries/
http://code.google.com/p/django-courier
http://django-cube.googlecode.com/hg
http://launchpad.net/django-debian
http://pypi.python.org/pypi/django-debug-toolbar-extra
http://code.playfire.com/django-debug-toolbar-user-panel
http://svn.os4d.org/svn/djangodevtools/trunk
http://code.google.com/p/django-dynamic-formset
http://code.google.com/p/django-evolution/
http://pypi.python.org/pypi/django-form-admin
muhuk/django-formfieldset/
http://code.google.com/p/django-forum/
http://code.google.com/p/django-generic-confirmation
http://pypi.python.org/pypi/django-genericforeignkey
https://launchpad.net/django-genshi
http://code.google.com/p/django-gmapi/
http://code.google.com/p/django-ids
http://pypi.python.org/pypi/django-inlinetrans
http://www.github.com/rosarior/django-inventory
codekoala/django-ittybitty/overview
http://bitbucket.org/mrpau/django-jobsboard
http://code.google.com/p/django-jqchat
http://code.google.com/p/djangokit/
http://code.google.com/p/django-ldap-groups/
carljm/django-localeurl/
http://code.google.com/p/django-messages/
robcharlwood/django-mothertongue/
fivethreeo/django-mptt-comments/
http://code.google.com/p/django-multilingual
http://code.google.com/p/django-navbar/
http://code.larlet.fr/django-oauth-plus/wiki/Home
http://django-observer.googlecode.com/svn/trunk/
aaronmader/django-parse_rss/tree/master/parse_rss
http://bitbucket.org/fhahn/django-permission-backend-nonrel
https://code.google.com/p/django-pgsql-interval-field
http://code.google.com/p/django-profile/
lukaszb/django-projector/
http://pypi.python.org/pypi/django-proxy-users
https://bitbucket.org/dias.kev/django-quotidian
nabucosound/django-rbac/
http://djangorestmodel.sourceforge.net/index.html
kmike/django-robokassa/
http://code.google.com/p/django-selectreverse/
http://code.google.com/p/django-simple-newsletter/
http://code.google.com/p/django-simplepages/
http://code.google.com/p/django-simple-wiki
http://pypi.python.org/pypi/django-smart-extends
vgavro/django-smsgate/
schinckel/django-sms-gateway/
http://pypi.python.org/pypi/django-staticmedia
http://opensource.washingtontimes.com/projects/django-supertagging/
http://code.google.com/p/django-tagging-autocomplete
https://source.codetrax.org/hgroot/django-taggit-autocomplete-modified
feuervogel/django-taggit-templatetags/
http://code.google.com/p/django-tasks/
http://code.google.com/p/djangotechblog/
https://launchpad.net/django-testscenarios/
http://django-thumbs.googlecode.com/svn/trunk/
http://code.google.com/p/django-trackback/
http://code.google.com/p/django-transmeta
http://sourceforge.net/projects/django-ui
daks/django-userthemes/
https://django-valuate.googlecode.com/hg
kmike/django-vkontakte-iframe/
http://code.google.com/p/django-voice
http://code.google.com/p/django-wikiapp
cleemesser/django-wsgiserver/
http://code.google.com/p/djapian/
http://code.google.com/p/djfacet
http://code.google.com/p/dojango-datable
http://evennia.googlecode.com/svn/trunk
http://feedjack.googlecode.com/hg
http://code.google.com/p/fullhistory
http://code.google.com/p/goflow
https://launchpad.net/django-jsonfield
https://launchpad.net/linaro-django-xmlrpc/
http://linkexchange.org.ua/browser
http://code.google.com/p/mango-py
http://dev.merengueproject.org/
http://code.google.com/p/django-inoutboard/
http://svn.osqa.net/svnroot/osqa/trunk
http://peach3.nl/trac/
jespern/django-piston/
http://code.google.com/p/django-provinceitaliane/
http://bitbucket.org/kmike/pymorphy
schinckel/django-rest-api/
chris1610/satchmo/
spookylukey/semanticeditor/
http://code.google.com/p/sorethumb/
andrewgodwin/south/
http://source.sphene.net/svn/root/django/communitytools/trunk
http://source.sphene.net/svn/root/django/communitytools
sebpiq/spiteat/
schinckel/django-timedelta-field/
http://projects.unbit.it/hg/uwsgi
http://www.dataportal.it"""
for sample in samples.split("\n"):
self.assertTrue(isinstance(get_repo_for_repo_url(sample), UnsupportedHandler))
"""
class TestBitbucketRepo(TestBaseHandler):
def setUp(self):
super(TestBitbucketRepo, self).setUp()
self.package = Package.objects.create(
title="django",
slug="django",
repo_url="https://bitbucket.org/django/django",
category=self.category
)
def test_fetch_commits(self):
self.assertEqual(Commit.objects.count(), 0)
bitbucket_handler.fetch_commits(self.package)
self.assertNotEqual(Commit.objects.count(), 0)
def test_fetch_metadata(self):
package = bitbucket_handler.fetch_metadata(self.package)
self.assertTrue(
package.repo_description.startswith("Official clone of the Subversion repo")
)
self.assertTrue(package.repo_watchers > 0)
self.assertTrue(package.repo_forks > 0)
self.assertEquals(package.participants, "django")
"""
class TestGithubRepo(TestBaseHandler):
def setUp(self):
super(TestGithubRepo, self).setUp()
self.package = Package.objects.create(
title="Django",
slug="django",
repo_url="https://github.com/django/django",
category=self.category
)
# def test_fetch_commits(self):
# import time
# time.sleep(10)
# self.assertEqual(Commit.objects.count(), 0)
# github_handler.fetch_commits(self.package)
# self.assertTrue(Commit.objects.count() > 0)
# def test_fetch_metadata(self):
# # Currently a live tests that access github
# package = github_handler.fetch_metadata(self.package)
# self.assertEqual(package.repo_description, "The Web framework for perfectionists with deadlines.")
# self.assertTrue(package.repo_watchers > 100)
# # test what happens when setting up an unsupported repo
# self.package.repo_url = "https://example.com"
# self.package.fetch_metadata()
# self.assertEqual(self.package.repo_description, "")
# self.assertEqual(self.package.repo_watchers, 0)
# self.package.fetch_commits()
class TestRepos(BaseBase):
def test_repo_registry(self):
from package.repos import get_repo, supported_repos
g = get_repo("github")
self.assertEqual(g.title, "Github")
self.assertEqual(g.url, "https://github.com")
self.assertTrue("github" in supported_repos())
self.assertRaises(ImportError, lambda: get_repo("xyzzy"))
|
pydanny/djangopackages
|
package/tests/test_repos.py
|
Python
|
mit
| 9,307
|
import os
import collections
import lamnfyc.context_managers
import lamnfyc.settings
import lamnfyc.decorators
import lamnfyc.packages.base
import subprocess
@lamnfyc.decorators.check_installed('bin/python')
def two_seven_installer(package, temp, env):
command = '''LDFLAGS="-L{path}/lib"
CPPFLAGS="-I{path}/include -I{path}/ssl"
CFLAGS="-I{path}/include"
LD_LIBRARY_PATH={path}/lib ./configure --prefix={path} --with-ensurepip=yes --libdir={path}/lib'''
temp = os.path.join(temp, 'Python-{}'.format(package.version))
with lamnfyc.context_managers.chdir(temp):
subprocess.call(command.format(path=lamnfyc.settings.environment_path), env=env, shell=True)
subprocess.call('make', env=env, shell=True)
subprocess.call('make install', env=env, shell=True)
# pip doenst exist, need to go get it
if not os.path.exists(os.path.join(lamnfyc.settings.environment_path, 'bin', 'pip')):
ez_setup_path = os.path.join(temp, 'ez_setup.py')
lamnfyc.utils.download('https://bootstrap.pypa.io/ez_setup.py', ez_setup_path)
subprocess.call('python {}'.format(ez_setup_path), env=env, shell=True)
subprocess.call('easy_install pip', env=env, shell=True)
# upgrade pip to latests
subprocess.call('pip install -U pip', env=env, shell=True)
def three_five_installer():
raise NotImplemented()
class PythonPackage(lamnfyc.packages.base.TarPacket):
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
class Python27Package(PythonPackage):
"""
Used for python < 2.7.9 where pip was not integrated
"""
# attributed to the environment if not there
ENVIRONMENT_VARIABLES = (
('PYTHONNOUSERSITE', '$VIRTUAL_ENV/lib/python2.7/site-packages',),
('LDSHARED', 'clang -bundle -undefined dynamic_lookup',),
('LDCXXSHARED', 'clang++ -bundle -undefined dynamic_lookup',),
('BLDSHARED', '$LDSHARED',),
)
VERSIONS = collections.OrderedDict()
VERSIONS['3.5.0'] = PythonPackage('https://www.python.org/ftp/python/3.5.0/Python-3.5.0.tar.xz',
installer=three_five_installer,
md5_signature='d149d2812f10cbe04c042232e7964171',
depends_on=[
lamnfyc.packages.base.RequiredPacket(name='readline', version='6.3'),
lamnfyc.packages.base.RequiredPacket(name='openssl', version='1.0.2g'),
])
VERSIONS['2.7.12'] = PythonPackage('https://www.python.org/ftp/python/2.7.12/Python-2.7.12.tar.xz',
installer=two_seven_installer,
md5_signature='57dffcee9cee8bb2ab5f82af1d8e9a69',
depends_on=[
lamnfyc.packages.base.RequiredPacket(name='readline', version='6.3'),
lamnfyc.packages.base.RequiredPacket(name='openssl', version='1.0.2g'),
])
VERSIONS['2.7.9'] = PythonPackage('https://www.python.org/ftp/python/2.7.9/Python-2.7.9.tar.xz',
installer=two_seven_installer,
md5_signature='38d530f7efc373d64a8fb1637e3baaa7',
depends_on=[
lamnfyc.packages.base.RequiredPacket(name='readline', version='6.3'),
lamnfyc.packages.base.RequiredPacket(name='openssl', version='1.0.2g'),
])
VERSIONS['2.7.6'] = Python27Package('https://www.python.org/ftp/python/2.7.6/Python-2.7.6.tar.xz',
installer=two_seven_installer,
md5_signature='bcf93efa8eaf383c98ed3ce40b763497',
depends_on=[
lamnfyc.packages.base.RequiredPacket(name='readline', version='6.3'),
lamnfyc.packages.base.RequiredPacket(name='openssl', version='1.0.2g'),
])
for version, item in VERSIONS.iteritems():
item.name = 'python'
item.version = version
|
kingbuzzman/lamnfyc
|
lamnfyc/packages/python/__init__.py
|
Python
|
mit
| 4,356
|
##################################################
######scikit_learn to do the classifications######
##################################################
##################################################
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
##################################################
#####Hard coded (currently) where the datasets####
#################are located######################
##################################################
attack_file = 'C:/Users/will_doyle/Documents/GitHub/datamining/format_py/single_ngram_attack.txt'
normal_file = 'C:/Users/will_doyle/Documents/GitHub/datamining/format_py/single_ngram_normal.txt'
test_file = 'C:/Users/will_doyle/Documents/GitHub/datamining/format_py/single_ngram_vali.txt'
##################################################
####Create the instances for validation testing###
##################################################
##################################################
def makeValiInstance(fileName):
if isinstance(fileName,str):
my_file = open(str(fileName),"r+")
words = my_file.read().split("\n")
my_file.close()
words.remove('')
num_instances = words.count("new")
print("Number of Instances to Validate: " + str(num_instances))
instance = []
data = []
for line in words:
if line == "new":
my_data = [data]
instance += (my_data)
data = []
data.extend([line.split()])
for i in instance:
for entry in i:
if '1' in entry:
entry.remove('1')
if '0' in entry:
entry.remove('0')
return instance
else:
return -1
##################################################
#####Create the instances for training############
##################################################
##################################################
def makeFitInstance(fileName):
if isinstance(fileName, str):
my_file = open(str(fileName), "r+")
words = my_file.read().split("\n")
my_file.close()
words.remove('')
data = []
for line in words:
data.extend([line.split()])
classi = []
for entry in data:
if entry[-1] == '1':
classi.extend('a')
entry.remove('1')
else:
classi.extend('n')
entry.remove('0')
instance = {}
instance[0] = data
instance[1] = classi
return instance
else:
return -1
##################################################
#######Calculates the class of the subsequences###
########as a ratio################################
##################################################
def calClass(svm,data):
normal = ['n']
attack = ['a']
num = 0
total_n = 0
total_a = 0
if ['new'] in data:
data.remove(['new'])
for x in data:
num += 1
if svm.predict(x) == attack:
total_a += 1
elif svm.predict(x) == normal:
total_n += 1
else:
print("OOPS")
return
nratio = (float(total_n)/float(num))
aratio = (float(total_a)/float(num))
if nratio > 0.9:
return 'normal'
else:
return 'attack'
##################################################
#########Percentage validation####################
###########of the validation data#################
##################################################
def validateClass(svm,validation_array):
validate = 0.0
num = 0.0
print("length: " + str(len(validation_array)))
for data in validation_array:
num += 1
if calClass(svm,data) == 'normal':
validate += 1
print("NUM: " + str(int(num)) + " CLASSIFIED AS: " + calClass(svm,data))
return float((validate)/(num))
##################################################
################Main##############################
##################################################
##################################################
print("Creating the training data...")
##################################################
#############Create the attack and################
#################normal data and combine them#####
##################################################
instance_a = makeFitInstance(attack_file)
instance_n = makeFitInstance(normal_file)
fit_data = instance_a[0] + instance_n[0]
fit_classes = instance_a[1] + instance_n[1]
print("Training the model....")
##################################################
##############Train the Support Vector############
######################Machine#####################
##################################################
clf = GaussianNB()
clf.fit(fit_data,fit_classes)
print("Model has been trained, building test dataset...")
##################################################
#############Create the validation data###########
##################################################
##################################################
instance_v = makeValiInstance(test_file)
print("Validating the test dataset...")
##################################################
############Validate the data with the trained####
###############Support Vector Machine#############
##################################################
print("Percentage validated correctly: " + str(validateClass(clf,instance_v)))
|
doylew/detectionsc
|
format_py/n_gram_nb.py
|
Python
|
mit
| 5,595
|
from securitylib.crypto import *
from nose.tools import ok_, eq_, with_setup
from test_utils import setup_seeded_random, teardown_seeded_random, assert_raises_with_message
def test_generate_authenticator():
eq_(generate_authenticator('KJxyKJaV06', '5f07ec7a02bb0d7dc92d8aae1e0817e2a64a1265797b45f4780b49af11df61e1'.decode('hex')), 'aee1a8fc5443bbaf982b074c755b4e4faee028cc54ecb83868ec3e1a64f45e6f'.decode('hex'))
assert_raises_with_message(ValueError, 'Parameter authenticator_key must have length 32 bytes.', generate_authenticator, 'KJxyKJaV06', 'cf9021efdfec6a4e3fd8'.encode('hex'))
def test_validate_authenticator():
ok_(validate_authenticator('KJxyKJaV06', '5f07ec7a02bb0d7dc92d8aae1e0817e2a64a1265797b45f4780b49af11df61e1'.decode('hex'), 'aee1a8fc5443bbaf982b074c755b4e4faee028cc54ecb83868ec3e1a64f45e6f'.decode('hex')))
assert_raises_with_message(ValueError, 'Parameter authenticator_key must have length 32 bytes.', validate_authenticator, 'KJxyKJaV06', 'cf9021efdfec6a4e3fd8', '')
@with_setup(setup_seeded_random, teardown_seeded_random)
def test_generate_encryption_key():
eq_(generate_encryption_key(), '9a45076e45211648b857327311a73c1b'.decode('hex'))
@with_setup(setup_seeded_random, teardown_seeded_random)
def test_generate_authenticator_key():
eq_(generate_authenticator_key(), 'bdb4b6e8d792e4c973c0039c8d4f59a79a45076e45211648b857327311a73c1b'.decode('hex'))
def test_generate_encryption_key_from_password():
eq_(generate_encryption_key_from_password('password', 'salt'), '5c3d0b075ebf4e11b346cf18512e8dda'.decode('hex'))
def test_generate_authenticator_key_from_password():
eq_(generate_authenticator_key_from_password('password', 'salt'), '5c3d0b075ebf4e11b346cf18512e8ddaf29f70d67e67a94e6defe076d461e042'.decode('hex'))
@with_setup(setup_seeded_random, teardown_seeded_random)
def test_encrypt():
eq_(encrypt('The quick brown fox was not quick enough and is now an UNFOX!', 'aa79a8ab43636644d77f2b6b34842b98'.decode('hex'), '61d1a03428fd560ddf93734869ad951cb11d643e69ac19301427f16407d8faf8'.decode('hex')),
'0128153d5614aebc47fc2b69331aa1895d70e45fdffa94f04bae7ecef12f9dd4729a45076e45211648b857327311a73c1b00000000eff464a6b51411e7997787049fb0424faecff0786f213652116b4a50022e04cf24ff607d6366b9e3771486f396f8a3dd3d77f5c07bac8d2e0758454e511157e1'.decode('hex'))
assert_raises_with_message(ValueError, 'Parameter key must have length 16 bytes.', encrypt, '', 'ababab'.decode('hex'), 'abcdef'.decode('hex'))
assert_raises_with_message(ValueError, 'Parameter authenticator_key must have length 32 bytes.', encrypt, '', 'aa79a8ab43636644d77f2b6b34842b98'.decode('hex'), 'abcdef'.decode('hex'))
def test_decrypt():
eq_(decrypt('0128153d5614aebc47fc2b69331aa1895d70e45fdffa94f04bae7ecef12f9dd4729a45076e45211648b857327311a73c1b00000000eff464a6b51411e7997787049fb0424faecff0786f213652116b4a50022e04cf24ff607d6366b9e3771486f396f8a3dd3d77f5c07bac8d2e0758454e511157e1'.decode('hex'),
'aa79a8ab43636644d77f2b6b34842b98'.decode('hex'), '61d1a03428fd560ddf93734869ad951cb11d643e69ac19301427f16407d8faf8'.decode('hex')), 'The quick brown fox was not quick enough and is now an UNFOX!')
assert_raises_with_message(ValueError, 'Parameter key must have length 16 bytes.', decrypt, '', 'ababab'.decode('hex'), 'abcdef'.decode('hex'))
assert_raises_with_message(ValueError, 'Parameter authenticator_key must have length 32 bytes.', decrypt, '', 'aa79a8ab43636644d77f2b6b34842b98'.decode('hex'), 'abcdef'.decode('hex'))
|
sapo/securitylib-python
|
tests/test_crypto.py
|
Python
|
mit
| 3,513
|
#!/usr/bin/env python
from euler import *
result = sum(primes_less_than(2000000))
print result
|
kerkeslager/sandbox
|
euler/py/euler-0010.py
|
Python
|
mit
| 98
|
import random
from behave import given, when, then
from howabout import get_levenshtein
@given('two long strings')
def step_two_long_strings(context):
alphabet = 'abcdefghijklmnopqrstuvwxyz'
random_str = lambda size: [random.choice(alphabet) for _ in range(0, size)]
context.first = random_str(1024)
context.second = random_str(1024)
@given('two empty strings')
def step_two_empty_strings(context):
context.first = ''
context.second = ''
@when('we compare them')
def step_compare_two_strings(context):
context.distance = get_levenshtein(context.first, context.second)
@then('the interpreter should not overflow')
def step_assert_no_overflow(context):
assert not context.failed
@given('"{string}" and the empty string')
def step_a_string_and_the_emtpy_string(context, string):
context.first = string
context.second = ''
@given('a string "{string}"')
def step_a_string(context, string):
context.first = string
@when('we compare it to itself')
def step_compare_string_to_itself(context):
string = context.first, context.first
context.distance = get_levenshtein(string, string)
@then('the distance is {distance:d}')
def step_assert_distance(context, distance):
assert context.distance == distance
@given('the first string "{first}" and the second string "{second}" starting with "{prefix}"')
def step_impl2(context, first, second, prefix):
"""
:type context behave.runner.Context
:type first str
:type second str
:type prefix str
"""
context.first = first
context.second = second
|
clibc/howabout
|
features/steps/levenshtein_steps.py
|
Python
|
mit
| 1,581
|
#! /usr/bin/env python
"""
Given a list of files, find all video files and classify them by their base directory:
foo_file1 /foo/bar/baz (index 1)
foo_file2 /foor/bar/bay (index 2)
foo_file3 /foo/bar/baz (index 1, same dir as foo_file1)
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--dir", help='Dir to search from')
#1. Parse a list of strings (filenames)
# Split on non-alphanumeric characters, clean, track the largest
# sort in decreasing order
# normalize
args = vars(parser.parse_args());
|
capusta/handyAutomation
|
tensorflow/processdata.py
|
Python
|
mit
| 555
|
# -*- coding: utf-8 -*-
import sys
import six
from django.core.exceptions import ValidationError
from ..models import Driver, Competition, Team, Season, Circuit
from ..models import TeamSeason, Seat, GrandPrix, Race, Result, ContenderSeason, CompetitionTeam
def retro_encode(text):
if sys.version_info < (3, 0):
try:
return text.encode('utf-8')
except UnicodeDecodeError:
return text
else:
return text
class CommonCreateTestCase(object):
def set_test_create(self, model, **kwargs):
return model.objects.create(**kwargs)
class CommonDriverTestCase(CommonCreateTestCase):
def get_test_driver(self, **kwargs):
defaults = {
'last_name': u'García',
'first_name': u'Juan'
}
defaults.update(**kwargs)
return self.set_test_create(model=Driver, **defaults)
def get_test_driver_2(self):
kwargs = {
'last_name': u'López',
'first_name': u'Jaime'
}
return self.get_test_driver(**kwargs)
class CommonCompetitionTestCase(CommonCreateTestCase):
def get_test_competition(self, **kwargs):
defaults = {
'name': u'Competición A',
'full_name': u'Competición ABC'
}
defaults.update(**kwargs)
return self.set_test_create(model=Competition, **defaults)
def get_test_competition_2(self):
kwargs = {
'name': u'Competición B',
'full_name': u'Competición BDF'
}
return self.get_test_competition(**kwargs)
class CommonTeamTestCase(CommonCompetitionTestCase):
def get_test_team(self, **kwargs):
defaults = {
'name': u'Escudería Tec Auto',
'full_name': u'Escudería Tec Auto'
}
defaults.update(**kwargs)
return self.set_test_create(model=Team, **defaults)
def get_test_team_2(self):
kwargs = {
'name': u'D27R',
'full_name': u'Driver 27 Racing Team'
}
return self.get_test_team(**kwargs)
def get_test_competition_team(self, **kwargs):
defaults = {}
defaults.update(**kwargs)
if 'competition' not in defaults:
defaults['competition'] = self.get_test_competition()
if 'team' not in defaults:
defaults['team'] = self.get_test_team()
return self.set_test_create(model=CompetitionTeam, **defaults)
class CommonSeasonTestCase(CommonCompetitionTestCase):
def get_test_season(self, **kwargs):
defaults = {
'year': 2016,
'punctuation': u'F1-25'
}
defaults.update(**kwargs)
if 'competition' not in defaults:
defaults['competition'] = self.get_test_competition()
return self.set_test_create(model=Season, **defaults)
class CommonTeamSeasonTestCase(CommonTeamTestCase, CommonSeasonTestCase):
pass
class CommonSeatTestCase(CommonDriverTestCase, CommonTeamTestCase):
def get_test_seat(self, **kwargs):
defaults = {}
defaults.update(**kwargs)
if 'driver' not in defaults:
defaults['driver'] = self.get_test_driver()
if 'team' not in defaults:
defaults['team'] = self.get_test_team()
return self.set_test_create(model=Seat, **defaults)
def get_test_seat_teammate(self, seat_a):
driver = self.get_test_driver_2()
team = seat_a.team
seat_args = {'driver': driver, 'team': team}
return self.get_test_seat(**seat_args)
def get_test_seat_same_driver_other_team(self, seat_a):
driver = seat_a.driver
team = self.get_test_team_2()
seat_args = {'driver': driver, 'team': team}
return self.get_test_seat(**seat_args)
class CommonRaceTestCase(CommonSeasonTestCase):
def get_test_race(self, **kwargs):
defaults = {'round': 1}
defaults.update(**kwargs)
if 'season' not in defaults:
defaults['season'] = self.get_test_season()
return self.set_test_create(model=Race, **defaults)
def get_test_circuit(self, **kwargs):
defaults = {
'name': u'Autódromo de Jacarepaguá',
'city': u'Jacarepaguá',
'opened_in': 1978
}
defaults.update(**kwargs)
return self.set_test_create(model=Circuit, **defaults)
def get_test_grandprix(self, **kwargs):
defaults = {'name': u'Grande Prêmio do Brasil'}
defaults.update(**kwargs)
return self.set_test_create(model=GrandPrix, **defaults)
class CommonResultTestCase(CommonSeatTestCase, CommonRaceTestCase):
def get_test_result(self, **kwargs):
defaults = {}
defaults.update(**kwargs)
if 'seat' not in defaults:
defaults['seat'] = self.get_test_seat()
if 'race' not in defaults:
defaults['race'] = self.get_test_race()
return self.set_test_create(model=Result, **defaults)
|
SRJ9/django-driver27
|
driver27/tests/common.py
|
Python
|
mit
| 4,985
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Models for user, blog, comment.
'''
__author__ = 'Shawling'
import time
import uuid
from orm import BooleanField, FloatField, Model, StringField, TextField, IntegerField
# 通过拼接时间戳与Python内置的uuid算法保证id的唯一性
def next_id():
return '%015d%s000' % (int(time.time() * 1000), uuid.uuid4().hex)
class Blog(Model):
__table__ = 'blogs'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
title = StringField(ddl='varchar(50)')
summary = StringField(ddl='varchar(50)')
content = TextField()
picture = StringField(ddl='varchar(50)')
created_at = FloatField(default=time.time)
class TagOfBlog(Model):
__table__ = 'tag_of_blog'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
blog_id = StringField(ddl='varchar(50)')
tag_id = StringField(ddl='varchar(50)')
class Tag(Model):
__table__ = 'tags'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
name = StringField(ddl='varchar(50)')
class Pic(Model):
__table__ = 'pictures'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
picture_path = StringField(ddl='varchar(50)')
blog_id = StringField(ddl='varchar(50)')
feel = StringField(default='', ddl='varchar(50)')
created_at = FloatField(default=time.time)
class Comment(Model):
__table__ = 'comments'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
blog_id = StringField(ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_email = StringField(ddl='varchar(50)')
user_website = StringField(ddl='varchar(50)')
user_ip = StringField(ddl='varchar(50)')
content = TextField()
created_at = FloatField(default=time.time)
class IP(Model):
__table__ = 'ips'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
ip = StringField(ddl='varchar(50)')
country = StringField(ddl='varchar(50)')
province = StringField(ddl='varchar(50)')
city = StringField(ddl='varchar(50)')
isp = StringField(ddl='varchar(50)')
last_time = FloatField(default=time.time)
access_count = IntegerField(default=1)
|
Shawling/STAMS
|
www/models.py
|
Python
|
mit
| 2,265
|
#!/usr/bin/env python
"""GUI for Mine Sweeper.
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
from __future__ import print_function
import argparse
try:
from PyQt4 import QtGui, QtCore
from PyQt4.QCore import QWidget, QApplication, QGridLayout
except ImportError:
from PyQt5 import QtCore
from PyQt5.QtWidgets import QWidget, QApplication, QGridLayout
from minesweeper import MSGame, gui
def ms_game_main(board_width, board_height, num_mines, port, ip_add):
"""Main function for Mine Sweeper Game.
Parameters
----------
board_width : int
the width of the board (> 0)
board_height : int
the height of the board (> 0)
num_mines : int
the number of mines, cannot be larger than
(board_width x board_height)
port : int
UDP port number, default is 5678
ip_add : string
the ip address for receiving the command,
default is localhost.
"""
ms_game = MSGame(board_width, board_height, num_mines,
port=port, ip_add=ip_add)
ms_app = QApplication([])
ms_window = QWidget()
ms_window.setAutoFillBackground(True)
ms_window.setWindowTitle("Mine Sweeper")
ms_layout = QGridLayout()
ms_window.setLayout(ms_layout)
fun_wg = gui.ControlWidget()
grid_wg = gui.GameWidget(ms_game, fun_wg)
remote_thread = gui.RemoteControlThread()
def update_grid_remote(move_msg):
"""Update grid from remote control."""
if grid_wg.ms_game.game_status == 2:
grid_wg.ms_game.play_move_msg(str(move_msg))
grid_wg.update_grid()
remote_thread.transfer.connect(update_grid_remote)
def reset_button_state():
"""Reset button state."""
grid_wg.reset_game()
fun_wg.reset_button.clicked.connect(reset_button_state)
ms_layout.addWidget(fun_wg, 0, 0)
ms_layout.addWidget(grid_wg, 1, 0)
remote_thread.control_start(grid_wg.ms_game)
ms_window.show()
ms_app.exec_()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Mine Sweeper Minesweeper \
with interfaces for \
Reinforcement Learning \
by Yuhuang Hu")
parser.add_argument("--board-width", type=int,
default=20,
help="width of the board.")
parser.add_argument("--board-height", type=int,
default=20,
help="height of the board.")
parser.add_argument("--num-mines", type=int,
default=40,
help="number of mines.")
parser.add_argument("--port", type=int,
default=5678,
help="The port for TCP connection.")
parser.add_argument("--ip-add", type=str,
default="127.0.0.1",
help="The IP address for TCP connection.")
args = parser.parse_args()
ms_game_main(**vars(args))
|
duguyue100/minesweeper
|
scripts/ms-gui.py
|
Python
|
mit
| 3,092
|
import os
def main():
folders = os.listdir('Input_TS/')
for foldername in folders:
for i in range(200):
if not os.path.exists('out/' + foldername):
os.makedirs('out/' + foldername)
os.system('python detectanoms.py Input_TS/' + foldername +'/'+str(i) + '.csv out1/' + foldername+ '/'+str(i) + '.txt')
if __name__ == '__main__':
main()
|
cs60050/TeamGabru
|
3-Time-Series-Analysis/3-AnomalyDetection/runner.py
|
Python
|
mit
| 353
|
# Definition of the version number
__all__ = ["__version__"]
# major, minor, patch, -extra
version_info = 0, 8, 1
# Nice string for the version
__version__ = '.'.join(map(str, version_info))
|
lrq3000/fdict
|
fdict/_version.py
|
Python
|
mit
| 194
|
"""
"""
import time
import numpy as np
import scipy as sp
import pandas as pd
import xgboost as xgb
import re
from sklearn.datasets import make_classification
from sklearn.cross_validation import StratifiedShuffleSplit
def get_leaf_values(tree_str):
# To find 'leaf=0.123\n'
prog=re.compile(r"(?<=leaf\=)(.+)\n")
result = [float(rval) for rval in prog.findall(tree_str)]
return np.array(result)
def get_all_leaves(bst):
dmp = bst.get_dump()
return [get_leaf_values(tree) for tree in dmp]
# init begin
n = 2 ** 15
n_classes = 3
X, y = make_classification(n_samples=n, n_classes=n_classes,
n_features=100, n_informative=75, n_redundant=20,
class_sep=0.5, shuffle=True, random_state=123)
sss = StratifiedShuffleSplit(y, n_iter=1, test_size=0.2, random_state=321)
train_idx, valid_idx = list(sss)[0]
df_data = pd.DataFrame(np.column_stack((y, X)))
df_data.iloc[train_idx].to_csv('cl3_train.csv', index=False, header=False)
df_data.iloc[valid_idx].to_csv('cl3_valid.csv', index=False, header=False)
# init end
# r013
# No preprocessing
# 2016/11/2 40m
dtrain = xgb.DMatrix(X[train_idx], label = y[train_idx])
dvalid = xgb.DMatrix(X[valid_idx], label = y[valid_idx])
n_rounds = 500
evals_result = {}
param = {'max_depth':100, 'eta':0.1, 'silent':1, 'objective':'multi:softmax',
'num_class':n_classes, 'min_child_weight':100, 'lambda':0,
'eval_metric':'mlogloss', 'nthread':-1, 'seed':123}
t0 = time.time()
bst = xgb.train(param, dtrain, n_rounds, [(dvalid, 'valid')],
evals_result=evals_result, verbose_eval=True)
print(time.time() - t0)
tmp = get_all_leaves(bst)
n_nodes = np.array([len(s) for s in tmp]).reshape((n_rounds, n_classes))
df = pd.DataFrame({'valid_loss':evals_result['valid']['mlogloss'],
'leaf_cnt_0':n_nodes[:,0],
'leaf_cnt_1':n_nodes[:,1],
'leaf_cnt_2':n_nodes[:,2]})
df.to_csv('log/r013.csv')
print(df.iloc[99::100,:])
leaf_cnt_0 leaf_cnt_1 leaf_cnt_2 valid_loss
99 48 48 49 0.447766
199 29 28 29 0.309277
299 22 19 21 0.250185
399 15 17 15 0.217990
499 14 13 14 0.197343
# r014
# equal_frequency_binning
# 2016/11/2 21.6m
def equal_frequency_binning(X, nbins=255):
rval_X = []
rval_bins = []
for i in range(X.shape[1]):
x = X[:, i]
bins = np.percentile(x, np.linspace(0, 100, nbins))
rval_bins.append(bins)
x_cut = pd.cut(x, bins)
rval_X.append(x_cut.codes)
return np.column_stack(rval_X), rval_bins
X2, ignore = equal_frequency_binning(X, nbins=255)
dtrain = xgb.DMatrix(X2[train_idx], label = y[train_idx])
dvalid = xgb.DMatrix(X2[valid_idx], label = y[valid_idx])
n_rounds = 500
evals_result = {}
param = {'max_depth':100, 'eta':0.1, 'silent':1, 'objective':'multi:softmax',
'num_class':n_classes, 'min_child_weight':100, 'lambda':0,
'eval_metric':'mlogloss', 'nthread':-1, 'seed':123}
t0 = time.time()
bst = xgb.train(param, dtrain, n_rounds, [(dvalid, 'valid')],
evals_result=evals_result, verbose_eval=True)
print(time.time() - t0)
tmp = get_all_leaves(bst)
n_nodes = np.array([len(s) for s in tmp]).reshape((n_rounds, n_classes))
df = pd.DataFrame({'valid_loss':evals_result['valid']['mlogloss'],
'leaf_cnt_0':n_nodes[:,0],
'leaf_cnt_1':n_nodes[:,1],
'leaf_cnt_2':n_nodes[:,2]})
df.to_csv('log/r014.csv')
print(df.iloc[99::100,:])
leaf_cnt_0 leaf_cnt_1 leaf_cnt_2 valid_loss
99 50 50 46 0.447109
199 30 29 29 0.307661
299 22 21 21 0.247279
399 18 16 17 0.214577
499 13 13 14 0.193776
imp = pd.Series(bst.get_fscore())
imp.sort_values(ascending=False).head(10)
f35 682
f15 677
f39 676
f71 672
f90 657
f23 647
f94 641
f60 641
f10 632
f75 626
imp.sort_values(ascending=False).tail(10)
f73 307
f26 301
f72 296
f59 295
f13 278
f17 62
f47 39
f29 23
f34 10
f93 6
# r015
# equal_frequency_binning
# 2016/11/9 21.8m
def equal_frequency_binning(X, nbins=255):
rval_X = []
rval_bins = []
for i in range(X.shape[1]):
x = X[:, i]
x_cut, bins = pd.qcut(x, nbins, retbins=True)
rval_X.append(x_cut.codes)
rval_bins.append(bins)
return np.column_stack(rval_X), rval_bins
X2, ignore = equal_frequency_binning(X, nbins=255)
dtrain = xgb.DMatrix(X2[train_idx], label = y[train_idx])
dvalid = xgb.DMatrix(X2[valid_idx], label = y[valid_idx])
n_rounds = 500
evals_result = {}
param = {'max_depth':100, 'eta':0.1, 'silent':1, 'objective':'multi:softmax',
'num_class':n_classes, 'min_child_weight':100, 'lambda':0,
'eval_metric':'mlogloss', 'nthread':-1, 'seed':123}
t0 = time.time()
bst = xgb.train(param, dtrain, n_rounds, [(dvalid, 'valid')],
evals_result=evals_result, verbose_eval=True)
print(time.time() - t0)
tmp = get_all_leaves(bst)
n_nodes = np.array([len(s) for s in tmp]).reshape((n_rounds, n_classes))
df = pd.DataFrame({'valid_loss':evals_result['valid']['mlogloss'],
'leaf_cnt_0':n_nodes[:,0],
'leaf_cnt_1':n_nodes[:,1],
'leaf_cnt_2':n_nodes[:,2]})
df.to_csv('log/r015.csv')
print(df.iloc[99::100,:])
leaf_cnt_0 leaf_cnt_1 leaf_cnt_2 valid_loss
99 51 49 49 0.446756
199 29 30 30 0.307652
299 21 21 24 0.248894
399 16 16 16 0.216791
499 13 14 12 0.195948
imp = pd.Series(bst.get_fscore())
imp.sort_values(ascending=False).head(10)
f39 699
f35 686
f15 682
f60 680
f90 672
f23 672
f10 648
f71 639
f75 631
f62 630
imp.sort_values(ascending=False).tail(10)
f59 320
f56 302
f26 280
f13 272
f72 265
f17 62
f47 41
f29 26
f93 17
f34 10
|
tks0123456789/XGB_experiments
|
multiclass_test.py
|
Python
|
mit
| 6,257
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The core data store and collection logic for beets.
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import unicodedata
import time
import re
import six
from beets import logging
from beets.mediafile import MediaFile, UnreadableFileError
from beets import plugins
from beets import util
from beets.util import bytestring_path, syspath, normpath, samefile
from beets.util.functemplate import Template
from beets import dbcore
from beets.dbcore import types
import beets
# To use the SQLite "blob" type, it doesn't suffice to provide a byte
# string; SQLite treats that as encoded text. Wrapping it in a `buffer` or a
# `memoryview`, depending on the Python version, tells it that we
# actually mean non-text data.
if six.PY2:
BLOB_TYPE = buffer # noqa: F821
else:
BLOB_TYPE = memoryview
log = logging.getLogger('beets')
# Library-specific query types.
class PathQuery(dbcore.FieldQuery):
"""A query that matches all items under a given path.
Matching can either be case-insensitive or case-sensitive. By
default, the behavior depends on the OS: case-insensitive on Windows
and case-sensitive otherwise.
"""
def __init__(self, field, pattern, fast=True, case_sensitive=None):
"""Create a path query. `pattern` must be a path, either to a
file or a directory.
`case_sensitive` can be a bool or `None`, indicating that the
behavior should depend on the filesystem.
"""
super(PathQuery, self).__init__(field, pattern, fast)
# By default, the case sensitivity depends on the filesystem
# that the query path is located on.
if case_sensitive is None:
path = util.bytestring_path(util.normpath(pattern))
case_sensitive = beets.util.case_sensitive(path)
self.case_sensitive = case_sensitive
# Use a normalized-case pattern for case-insensitive matches.
if not case_sensitive:
pattern = pattern.lower()
# Match the path as a single file.
self.file_path = util.bytestring_path(util.normpath(pattern))
# As a directory (prefix).
self.dir_path = util.bytestring_path(os.path.join(self.file_path, b''))
@classmethod
def is_path_query(cls, query_part):
"""Try to guess whether a unicode query part is a path query.
Condition: separator precedes colon and the file exists.
"""
colon = query_part.find(':')
if colon != -1:
query_part = query_part[:colon]
# Test both `sep` and `altsep` (i.e., both slash and backslash on
# Windows).
return (
(os.sep in query_part or
(os.altsep and os.altsep in query_part)) and
os.path.exists(syspath(normpath(query_part)))
)
def match(self, item):
path = item.path if self.case_sensitive else item.path.lower()
return (path == self.file_path) or path.startswith(self.dir_path)
def col_clause(self):
file_blob = BLOB_TYPE(self.file_path)
dir_blob = BLOB_TYPE(self.dir_path)
if self.case_sensitive:
query_part = '({0} = ?) || (substr({0}, 1, ?) = ?)'
else:
query_part = '(BYTELOWER({0}) = BYTELOWER(?)) || \
(substr(BYTELOWER({0}), 1, ?) = BYTELOWER(?))'
return query_part.format(self.field), \
(file_blob, len(dir_blob), dir_blob)
# Library-specific field types.
class DateType(types.Float):
# TODO representation should be `datetime` object
# TODO distinguish between date and time types
query = dbcore.query.DateQuery
def format(self, value):
return time.strftime(beets.config['time_format'].as_str(),
time.localtime(value or 0))
def parse(self, string):
try:
# Try a formatted date string.
return time.mktime(
time.strptime(string,
beets.config['time_format'].as_str())
)
except ValueError:
# Fall back to a plain timestamp number.
try:
return float(string)
except ValueError:
return self.null
class PathType(types.Type):
"""A dbcore type for filesystem paths. These are represented as
`bytes` objects, in keeping with the Unix filesystem abstraction.
"""
sql = u'BLOB'
query = PathQuery
model_type = bytes
def __init__(self, nullable=False):
"""Create a path type object. `nullable` controls whether the
type may be missing, i.e., None.
"""
self.nullable = nullable
@property
def null(self):
if self.nullable:
return None
else:
return b''
def format(self, value):
return util.displayable_path(value)
def parse(self, string):
return normpath(bytestring_path(string))
def normalize(self, value):
if isinstance(value, six.text_type):
# Paths stored internally as encoded bytes.
return bytestring_path(value)
elif isinstance(value, BLOB_TYPE):
# We unwrap buffers to bytes.
return bytes(value)
else:
return value
def from_sql(self, sql_value):
return self.normalize(sql_value)
def to_sql(self, value):
if isinstance(value, bytes):
value = BLOB_TYPE(value)
return value
class MusicalKey(types.String):
"""String representing the musical key of a song.
The standard format is C, Cm, C#, C#m, etc.
"""
ENHARMONIC = {
r'db': 'c#',
r'eb': 'd#',
r'gb': 'f#',
r'ab': 'g#',
r'bb': 'a#',
}
null = None
def parse(self, key):
key = key.lower()
for flat, sharp in self.ENHARMONIC.items():
key = re.sub(flat, sharp, key)
key = re.sub(r'[\W\s]+minor', 'm', key)
key = re.sub(r'[\W\s]+major', '', key)
return key.capitalize()
def normalize(self, key):
if key is None:
return None
else:
return self.parse(key)
class DurationType(types.Float):
"""Human-friendly (M:SS) representation of a time interval."""
query = dbcore.query.DurationQuery
def format(self, value):
if not beets.config['format_raw_length'].get(bool):
return beets.ui.human_seconds_short(value or 0.0)
else:
return value
def parse(self, string):
try:
# Try to format back hh:ss to seconds.
return util.raw_seconds_short(string)
except ValueError:
# Fall back to a plain float.
try:
return float(string)
except ValueError:
return self.null
# Library-specific sort types.
class SmartArtistSort(dbcore.query.Sort):
"""Sort by artist (either album artist or track artist),
prioritizing the sort field over the raw field.
"""
def __init__(self, model_cls, ascending=True, case_insensitive=True):
self.album = model_cls is Album
self.ascending = ascending
self.case_insensitive = case_insensitive
def order_clause(self):
order = "ASC" if self.ascending else "DESC"
field = 'albumartist' if self.album else 'artist'
collate = 'COLLATE NOCASE' if self.case_insensitive else ''
return ('(CASE {0}_sort WHEN NULL THEN {0} '
'WHEN "" THEN {0} '
'ELSE {0}_sort END) {1} {2}').format(field, collate, order)
def sort(self, objs):
if self.album:
field = lambda a: a.albumartist_sort or a.albumartist
else:
field = lambda i: i.artist_sort or i.artist
if self.case_insensitive:
key = lambda x: field(x).lower()
else:
key = field
return sorted(objs, key=key, reverse=not self.ascending)
# Special path format key.
PF_KEY_DEFAULT = 'default'
# Exceptions.
@six.python_2_unicode_compatible
class FileOperationError(Exception):
"""Indicates an error when interacting with a file on disk.
Possibilities include an unsupported media type, a permissions
error, and an unhandled Mutagen exception.
"""
def __init__(self, path, reason):
"""Create an exception describing an operation on the file at
`path` with the underlying (chained) exception `reason`.
"""
super(FileOperationError, self).__init__(path, reason)
self.path = path
self.reason = reason
def text(self):
"""Get a string representing the error. Describes both the
underlying reason and the file path in question.
"""
return u'{0}: {1}'.format(
util.displayable_path(self.path),
six.text_type(self.reason)
)
# define __str__ as text to avoid infinite loop on super() calls
# with @six.python_2_unicode_compatible
__str__ = text
@six.python_2_unicode_compatible
class ReadError(FileOperationError):
"""An error while reading a file (i.e. in `Item.read`).
"""
def __str__(self):
return u'error reading ' + super(ReadError, self).text()
@six.python_2_unicode_compatible
class WriteError(FileOperationError):
"""An error while writing a file (i.e. in `Item.write`).
"""
def __str__(self):
return u'error writing ' + super(WriteError, self).text()
# Item and Album model classes.
@six.python_2_unicode_compatible
class LibModel(dbcore.Model):
"""Shared concrete functionality for Items and Albums.
"""
_format_config_key = None
"""Config key that specifies how an instance should be formatted.
"""
def _template_funcs(self):
funcs = DefaultTemplateFunctions(self, self._db).functions()
funcs.update(plugins.template_funcs())
return funcs
def store(self, fields=None):
super(LibModel, self).store(fields)
plugins.send('database_change', lib=self._db, model=self)
def remove(self):
super(LibModel, self).remove()
plugins.send('database_change', lib=self._db, model=self)
def add(self, lib=None):
super(LibModel, self).add(lib)
plugins.send('database_change', lib=self._db, model=self)
def __format__(self, spec):
if not spec:
spec = beets.config[self._format_config_key].as_str()
assert isinstance(spec, six.text_type)
return self.evaluate_template(spec)
def __str__(self):
return format(self)
def __bytes__(self):
return self.__str__().encode('utf-8')
class FormattedItemMapping(dbcore.db.FormattedMapping):
"""Add lookup for album-level fields.
Album-level fields take precedence if `for_path` is true.
"""
def __init__(self, item, for_path=False):
super(FormattedItemMapping, self).__init__(item, for_path)
self.album = item.get_album()
self.album_keys = []
if self.album:
for key in self.album.keys(True):
if key in Album.item_keys or key not in item._fields.keys():
self.album_keys.append(key)
self.all_keys = set(self.model_keys).union(self.album_keys)
def _get(self, key):
"""Get the value for a key, either from the album or the item.
Raise a KeyError for invalid keys.
"""
if self.for_path and key in self.album_keys:
return self._get_formatted(self.album, key)
elif key in self.model_keys:
return self._get_formatted(self.model, key)
elif key in self.album_keys:
return self._get_formatted(self.album, key)
else:
raise KeyError(key)
def __getitem__(self, key):
"""Get the value for a key. Certain unset values are remapped.
"""
value = self._get(key)
# `artist` and `albumartist` fields fall back to one another.
# This is helpful in path formats when the album artist is unset
# on as-is imports.
if key == 'artist' and not value:
return self._get('albumartist')
elif key == 'albumartist' and not value:
return self._get('artist')
else:
return value
def __iter__(self):
return iter(self.all_keys)
def __len__(self):
return len(self.all_keys)
class Item(LibModel):
_table = 'items'
_flex_table = 'item_attributes'
_fields = {
'id': types.PRIMARY_ID,
'path': PathType(),
'album_id': types.FOREIGN_ID,
'title': types.STRING,
'artist': types.STRING,
'artist_sort': types.STRING,
'artist_credit': types.STRING,
'album': types.STRING,
'albumartist': types.STRING,
'albumartist_sort': types.STRING,
'albumartist_credit': types.STRING,
'genre': types.STRING,
'lyricist': types.STRING,
'composer': types.STRING,
'composer_sort': types.STRING,
'arranger': types.STRING,
'grouping': types.STRING,
'year': types.PaddedInt(4),
'month': types.PaddedInt(2),
'day': types.PaddedInt(2),
'track': types.PaddedInt(2),
'tracktotal': types.PaddedInt(2),
'disc': types.PaddedInt(2),
'disctotal': types.PaddedInt(2),
'lyrics': types.STRING,
'comments': types.STRING,
'bpm': types.INTEGER,
'comp': types.BOOLEAN,
'mb_trackid': types.STRING,
'mb_albumid': types.STRING,
'mb_artistid': types.STRING,
'mb_albumartistid': types.STRING,
'albumtype': types.STRING,
'label': types.STRING,
'acoustid_fingerprint': types.STRING,
'acoustid_id': types.STRING,
'mb_releasegroupid': types.STRING,
'asin': types.STRING,
'catalognum': types.STRING,
'script': types.STRING,
'language': types.STRING,
'country': types.STRING,
'albumstatus': types.STRING,
'media': types.STRING,
'albumdisambig': types.STRING,
'disctitle': types.STRING,
'encoder': types.STRING,
'rg_track_gain': types.NULL_FLOAT,
'rg_track_peak': types.NULL_FLOAT,
'rg_album_gain': types.NULL_FLOAT,
'rg_album_peak': types.NULL_FLOAT,
'r128_track_gain': types.PaddedInt(6),
'r128_album_gain': types.PaddedInt(6),
'original_year': types.PaddedInt(4),
'original_month': types.PaddedInt(2),
'original_day': types.PaddedInt(2),
'initial_key': MusicalKey(),
'length': DurationType(),
'bitrate': types.ScaledInt(1000, u'kbps'),
'format': types.STRING,
'samplerate': types.ScaledInt(1000, u'kHz'),
'bitdepth': types.INTEGER,
'channels': types.INTEGER,
'mtime': DateType(),
'added': DateType(),
}
_search_fields = ('artist', 'title', 'comments',
'album', 'albumartist', 'genre')
_types = {
'data_source': types.STRING,
}
_media_fields = set(MediaFile.readable_fields()) \
.intersection(_fields.keys())
"""Set of item fields that are backed by `MediaFile` fields.
Any kind of field (fixed, flexible, and computed) may be a media
field. Only these fields are read from disk in `read` and written in
`write`.
"""
_media_tag_fields = set(MediaFile.fields()).intersection(_fields.keys())
"""Set of item fields that are backed by *writable* `MediaFile` tag
fields.
This excludes fields that represent audio data, such as `bitrate` or
`length`.
"""
_formatter = FormattedItemMapping
_sorts = {'artist': SmartArtistSort}
_format_config_key = 'format_item'
@classmethod
def _getters(cls):
getters = plugins.item_field_getters()
getters['singleton'] = lambda i: i.album_id is None
getters['filesize'] = Item.try_filesize # In bytes.
return getters
@classmethod
def from_path(cls, path):
"""Creates a new item from the media file at the specified path.
"""
# Initiate with values that aren't read from files.
i = cls(album_id=None)
i.read(path)
i.mtime = i.current_mtime() # Initial mtime.
return i
def __setitem__(self, key, value):
"""Set the item's value for a standard field or a flexattr.
"""
# Encode unicode paths and read buffers.
if key == 'path':
if isinstance(value, six.text_type):
value = bytestring_path(value)
elif isinstance(value, BLOB_TYPE):
value = bytes(value)
if key in MediaFile.fields():
self.mtime = 0 # Reset mtime on dirty.
super(Item, self).__setitem__(key, value)
def update(self, values):
"""Set all key/value pairs in the mapping. If mtime is
specified, it is not reset (as it might otherwise be).
"""
super(Item, self).update(values)
if self.mtime == 0 and 'mtime' in values:
self.mtime = values['mtime']
def get_album(self):
"""Get the Album object that this item belongs to, if any, or
None if the item is a singleton or is not associated with a
library.
"""
if not self._db:
return None
return self._db.get_album(self)
# Interaction with file metadata.
def read(self, read_path=None):
"""Read the metadata from the associated file.
If `read_path` is specified, read metadata from that file
instead. Updates all the properties in `_media_fields`
from the media file.
Raises a `ReadError` if the file could not be read.
"""
if read_path is None:
read_path = self.path
else:
read_path = normpath(read_path)
try:
mediafile = MediaFile(syspath(read_path))
except UnreadableFileError as exc:
raise ReadError(read_path, exc)
for key in self._media_fields:
value = getattr(mediafile, key)
if isinstance(value, six.integer_types):
if value.bit_length() > 63:
value = 0
self[key] = value
# Database's mtime should now reflect the on-disk value.
if read_path == self.path:
self.mtime = self.current_mtime()
self.path = read_path
def write(self, path=None, tags=None):
"""Write the item's metadata to a media file.
All fields in `_media_fields` are written to disk according to
the values on this object.
`path` is the path of the mediafile to write the data to. It
defaults to the item's path.
`tags` is a dictionary of additional metadata the should be
written to the file. (These tags need not be in `_media_fields`.)
Can raise either a `ReadError` or a `WriteError`.
"""
if path is None:
path = self.path
else:
path = normpath(path)
# Get the data to write to the file.
item_tags = dict(self)
item_tags = {k: v for k, v in item_tags.items()
if k in self._media_fields} # Only write media fields.
if tags is not None:
item_tags.update(tags)
plugins.send('write', item=self, path=path, tags=item_tags)
# Open the file.
try:
mediafile = MediaFile(syspath(path),
id3v23=beets.config['id3v23'].get(bool))
except UnreadableFileError as exc:
raise ReadError(path, exc)
# Write the tags to the file.
mediafile.update(item_tags)
try:
mediafile.save()
except UnreadableFileError as exc:
raise WriteError(self.path, exc)
# The file has a new mtime.
if path == self.path:
self.mtime = self.current_mtime()
plugins.send('after_write', item=self, path=path)
def try_write(self, path=None, tags=None):
"""Calls `write()` but catches and logs `FileOperationError`
exceptions.
Returns `False` an exception was caught and `True` otherwise.
"""
try:
self.write(path, tags)
return True
except FileOperationError as exc:
log.error(u"{0}", exc)
return False
def try_sync(self, write, move, with_album=True):
"""Synchronize the item with the database and, possibly, updates its
tags on disk and its path (by moving the file).
`write` indicates whether to write new tags into the file. Similarly,
`move` controls whether the path should be updated. In the
latter case, files are *only* moved when they are inside their
library's directory (if any).
Similar to calling :meth:`write`, :meth:`move`, and :meth:`store`
(conditionally).
"""
if write:
self.try_write()
if move:
# Check whether this file is inside the library directory.
if self._db and self._db.directory in util.ancestry(self.path):
log.debug(u'moving {0} to synchronize path',
util.displayable_path(self.path))
self.move(with_album=with_album)
self.store()
# Files themselves.
def move_file(self, dest, copy=False, link=False, hardlink=False):
"""Moves or copies the item's file, updating the path value if
the move succeeds. If a file exists at ``dest``, then it is
slightly modified to be unique.
"""
if not util.samefile(self.path, dest):
dest = util.unique_path(dest)
if copy:
util.copy(self.path, dest)
plugins.send("item_copied", item=self, source=self.path,
destination=dest)
elif link:
util.link(self.path, dest)
plugins.send("item_linked", item=self, source=self.path,
destination=dest)
elif hardlink:
util.hardlink(self.path, dest)
plugins.send("item_hardlinked", item=self, source=self.path,
destination=dest)
else:
plugins.send("before_item_moved", item=self, source=self.path,
destination=dest)
util.move(self.path, dest)
plugins.send("item_moved", item=self, source=self.path,
destination=dest)
# Either copying or moving succeeded, so update the stored path.
self.path = dest
def current_mtime(self):
"""Returns the current mtime of the file, rounded to the nearest
integer.
"""
return int(os.path.getmtime(syspath(self.path)))
def try_filesize(self):
"""Get the size of the underlying file in bytes.
If the file is missing, return 0 (and log a warning).
"""
try:
return os.path.getsize(syspath(self.path))
except (OSError, Exception) as exc:
log.warning(u'could not get filesize: {0}', exc)
return 0
# Model methods.
def remove(self, delete=False, with_album=True):
"""Removes the item. If `delete`, then the associated file is
removed from disk. If `with_album`, then the item's album (if
any) is removed if it the item was the last in the album.
"""
super(Item, self).remove()
# Remove the album if it is empty.
if with_album:
album = self.get_album()
if album and not album.items():
album.remove(delete, False)
# Send a 'item_removed' signal to plugins
plugins.send('item_removed', item=self)
# Delete the associated file.
if delete:
util.remove(self.path)
util.prune_dirs(os.path.dirname(self.path), self._db.directory)
self._db._memotable = {}
def move(self, copy=False, link=False, hardlink=False, basedir=None,
with_album=True, store=True):
"""Move the item to its designated location within the library
directory (provided by destination()). Subdirectories are
created as needed. If the operation succeeds, the item's path
field is updated to reflect the new location.
If `copy` is true, moving the file is copied rather than moved.
Similarly, `link` creates a symlink instead, and `hardlink`
creates a hardlink.
basedir overrides the library base directory for the
destination.
If the item is in an album, the album is given an opportunity to
move its art. (This can be disabled by passing
with_album=False.)
By default, the item is stored to the database if it is in the
database, so any dirty fields prior to the move() call will be written
as a side effect. You probably want to call save() to commit the DB
transaction. If `store` is true however, the item won't be stored, and
you'll have to manually store it after invoking this method.
"""
self._check_db()
dest = self.destination(basedir=basedir)
# Create necessary ancestry for the move.
util.mkdirall(dest)
# Perform the move and store the change.
old_path = self.path
self.move_file(dest, copy, link, hardlink)
if store:
self.store()
# If this item is in an album, move its art.
if with_album:
album = self.get_album()
if album:
album.move_art(copy)
if store:
album.store()
# Prune vacated directory.
if not copy:
util.prune_dirs(os.path.dirname(old_path), self._db.directory)
# Templating.
def destination(self, fragment=False, basedir=None, platform=None,
path_formats=None):
"""Returns the path in the library directory designated for the
item (i.e., where the file ought to be). fragment makes this
method return just the path fragment underneath the root library
directory; the path is also returned as Unicode instead of
encoded as a bytestring. basedir can override the library's base
directory for the destination.
"""
self._check_db()
platform = platform or sys.platform
basedir = basedir or self._db.directory
path_formats = path_formats or self._db.path_formats
# Use a path format based on a query, falling back on the
# default.
for query, path_format in path_formats:
if query == PF_KEY_DEFAULT:
continue
query, _ = parse_query_string(query, type(self))
if query.match(self):
# The query matches the item! Use the corresponding path
# format.
break
else:
# No query matched; fall back to default.
for query, path_format in path_formats:
if query == PF_KEY_DEFAULT:
break
else:
assert False, u"no default path format"
if isinstance(path_format, Template):
subpath_tmpl = path_format
else:
subpath_tmpl = Template(path_format)
# Evaluate the selected template.
subpath = self.evaluate_template(subpath_tmpl, True)
# Prepare path for output: normalize Unicode characters.
if platform == 'darwin':
subpath = unicodedata.normalize('NFD', subpath)
else:
subpath = unicodedata.normalize('NFC', subpath)
if beets.config['asciify_paths']:
subpath = util.asciify_path(
subpath,
beets.config['path_sep_replace'].as_str()
)
maxlen = beets.config['max_filename_length'].get(int)
if not maxlen:
# When zero, try to determine from filesystem.
maxlen = util.max_filename_length(self._db.directory)
subpath, fellback = util.legalize_path(
subpath, self._db.replacements, maxlen,
os.path.splitext(self.path)[1], fragment
)
if fellback:
# Print an error message if legalization fell back to
# default replacements because of the maximum length.
log.warning(
u'Fell back to default replacements when naming '
u'file {}. Configure replacements to avoid lengthening '
u'the filename.',
subpath
)
if fragment:
return util.as_string(subpath)
else:
return normpath(os.path.join(basedir, subpath))
class Album(LibModel):
"""Provides access to information about albums stored in a
library. Reflects the library's "albums" table, including album
art.
"""
_table = 'albums'
_flex_table = 'album_attributes'
_always_dirty = True
_fields = {
'id': types.PRIMARY_ID,
'artpath': PathType(True),
'added': DateType(),
'albumartist': types.STRING,
'albumartist_sort': types.STRING,
'albumartist_credit': types.STRING,
'album': types.STRING,
'genre': types.STRING,
'year': types.PaddedInt(4),
'month': types.PaddedInt(2),
'day': types.PaddedInt(2),
'disctotal': types.PaddedInt(2),
'comp': types.BOOLEAN,
'mb_albumid': types.STRING,
'mb_albumartistid': types.STRING,
'albumtype': types.STRING,
'label': types.STRING,
'mb_releasegroupid': types.STRING,
'asin': types.STRING,
'catalognum': types.STRING,
'script': types.STRING,
'language': types.STRING,
'country': types.STRING,
'albumstatus': types.STRING,
'albumdisambig': types.STRING,
'rg_album_gain': types.NULL_FLOAT,
'rg_album_peak': types.NULL_FLOAT,
'r128_album_gain': types.PaddedInt(6),
'original_year': types.PaddedInt(4),
'original_month': types.PaddedInt(2),
'original_day': types.PaddedInt(2),
}
_search_fields = ('album', 'albumartist', 'genre')
_types = {
'path': PathType(),
'data_source': types.STRING,
}
_sorts = {
'albumartist': SmartArtistSort,
'artist': SmartArtistSort,
}
item_keys = [
'added',
'albumartist',
'albumartist_sort',
'albumartist_credit',
'album',
'genre',
'year',
'month',
'day',
'disctotal',
'comp',
'mb_albumid',
'mb_albumartistid',
'albumtype',
'label',
'mb_releasegroupid',
'asin',
'catalognum',
'script',
'language',
'country',
'albumstatus',
'albumdisambig',
'rg_album_gain',
'rg_album_peak',
'r128_album_gain',
'original_year',
'original_month',
'original_day',
]
"""List of keys that are set on an album's items.
"""
_format_config_key = 'format_album'
@classmethod
def _getters(cls):
# In addition to plugin-provided computed fields, also expose
# the album's directory as `path`.
getters = plugins.album_field_getters()
getters['path'] = Album.item_dir
getters['albumtotal'] = Album._albumtotal
return getters
def items(self):
"""Returns an iterable over the items associated with this
album.
"""
return self._db.items(dbcore.MatchQuery('album_id', self.id))
def remove(self, delete=False, with_items=True):
"""Removes this album and all its associated items from the
library. If delete, then the items' files are also deleted
from disk, along with any album art. The directories
containing the album are also removed (recursively) if empty.
Set with_items to False to avoid removing the album's items.
"""
super(Album, self).remove()
# Delete art file.
if delete:
artpath = self.artpath
if artpath:
util.remove(artpath)
# Remove (and possibly delete) the constituent items.
if with_items:
for item in self.items():
item.remove(delete, False)
def move_art(self, copy=False, link=False, hardlink=False):
"""Move or copy any existing album art so that it remains in the
same directory as the items.
"""
old_art = self.artpath
if not old_art:
return
new_art = self.art_destination(old_art)
if new_art == old_art:
return
new_art = util.unique_path(new_art)
log.debug(u'moving album art {0} to {1}',
util.displayable_path(old_art),
util.displayable_path(new_art))
if copy:
util.copy(old_art, new_art)
elif link:
util.link(old_art, new_art)
elif hardlink:
util.hardlink(old_art, new_art)
else:
util.move(old_art, new_art)
self.artpath = new_art
# Prune old path when moving.
if not copy:
util.prune_dirs(os.path.dirname(old_art),
self._db.directory)
def move(self, copy=False, link=False, hardlink=False, basedir=None,
store=True):
"""Moves (or copies) all items to their destination. Any album
art moves along with them. basedir overrides the library base
directory for the destination. By default, the album is stored to the
database, persisting any modifications to its metadata. If `store` is
true however, the album is not stored automatically, and you'll have
to manually store it after invoking this method.
"""
basedir = basedir or self._db.directory
# Ensure new metadata is available to items for destination
# computation.
if store:
self.store()
# Move items.
items = list(self.items())
for item in items:
item.move(copy, link, hardlink, basedir=basedir, with_album=False,
store=store)
# Move art.
self.move_art(copy, link, hardlink)
if store:
self.store()
def item_dir(self):
"""Returns the directory containing the album's first item,
provided that such an item exists.
"""
item = self.items().get()
if not item:
raise ValueError(u'empty album')
return os.path.dirname(item.path)
def _albumtotal(self):
"""Return the total number of tracks on all discs on the album
"""
if self.disctotal == 1 or not beets.config['per_disc_numbering']:
return self.items()[0].tracktotal
counted = []
total = 0
for item in self.items():
if item.disc in counted:
continue
total += item.tracktotal
counted.append(item.disc)
if len(counted) == self.disctotal:
break
return total
def art_destination(self, image, item_dir=None):
"""Returns a path to the destination for the album art image
for the album. `image` is the path of the image that will be
moved there (used for its extension).
The path construction uses the existing path of the album's
items, so the album must contain at least one item or
item_dir must be provided.
"""
image = bytestring_path(image)
item_dir = item_dir or self.item_dir()
filename_tmpl = Template(
beets.config['art_filename'].as_str())
subpath = self.evaluate_template(filename_tmpl, True)
if beets.config['asciify_paths']:
subpath = util.asciify_path(
subpath,
beets.config['path_sep_replace'].as_str()
)
subpath = util.sanitize_path(subpath,
replacements=self._db.replacements)
subpath = bytestring_path(subpath)
_, ext = os.path.splitext(image)
dest = os.path.join(item_dir, subpath + ext)
return bytestring_path(dest)
def set_art(self, path, copy=True):
"""Sets the album's cover art to the image at the given path.
The image is copied (or moved) into place, replacing any
existing art.
Sends an 'art_set' event with `self` as the sole argument.
"""
path = bytestring_path(path)
oldart = self.artpath
artdest = self.art_destination(path)
if oldart and samefile(path, oldart):
# Art already set.
return
elif samefile(path, artdest):
# Art already in place.
self.artpath = path
return
# Normal operation.
if oldart == artdest:
util.remove(oldart)
artdest = util.unique_path(artdest)
if copy:
util.copy(path, artdest)
else:
util.move(path, artdest)
self.artpath = artdest
plugins.send('art_set', album=self)
def store(self, fields=None):
"""Update the database with the album information. The album's
tracks are also updated.
:param fields: The fields to be stored. If not specified, all fields
will be.
"""
# Get modified track fields.
track_updates = {}
for key in self.item_keys:
if key in self._dirty:
track_updates[key] = self[key]
with self._db.transaction():
super(Album, self).store(fields)
if track_updates:
for item in self.items():
for key, value in track_updates.items():
item[key] = value
item.store()
def try_sync(self, write, move):
"""Synchronize the album and its items with the database.
Optionally, also write any new tags into the files and update
their paths.
`write` indicates whether to write tags to the item files, and
`move` controls whether files (both audio and album art) are
moved.
"""
self.store()
for item in self.items():
item.try_sync(write, move)
# Query construction helpers.
def parse_query_parts(parts, model_cls):
"""Given a beets query string as a list of components, return the
`Query` and `Sort` they represent.
Like `dbcore.parse_sorted_query`, with beets query prefixes and
special path query detection.
"""
# Get query types and their prefix characters.
prefixes = {':': dbcore.query.RegexpQuery}
prefixes.update(plugins.queries())
# Special-case path-like queries, which are non-field queries
# containing path separators (/).
path_parts = []
non_path_parts = []
for s in parts:
if PathQuery.is_path_query(s):
path_parts.append(s)
else:
non_path_parts.append(s)
query, sort = dbcore.parse_sorted_query(
model_cls, non_path_parts, prefixes
)
# Add path queries to aggregate query.
# Match field / flexattr depending on whether the model has the path field
fast_path_query = 'path' in model_cls._fields
query.subqueries += [PathQuery('path', s, fast_path_query)
for s in path_parts]
return query, sort
def parse_query_string(s, model_cls):
"""Given a beets query string, return the `Query` and `Sort` they
represent.
The string is split into components using shell-like syntax.
"""
message = u"Query is not unicode: {0!r}".format(s)
assert isinstance(s, six.text_type), message
try:
parts = util.shlex_split(s)
except ValueError as exc:
raise dbcore.InvalidQueryError(s, exc)
return parse_query_parts(parts, model_cls)
def _sqlite_bytelower(bytestring):
""" A custom ``bytelower`` sqlite function so we can compare
bytestrings in a semi case insensitive fashion. This is to work
around sqlite builds are that compiled with
``-DSQLITE_LIKE_DOESNT_MATCH_BLOBS``. See
``https://github.com/beetbox/beets/issues/2172`` for details.
"""
if not six.PY2:
return bytestring.lower()
return buffer(bytes(bytestring).lower()) # noqa: F821
# The Library: interface to the database.
class Library(dbcore.Database):
"""A database of music containing songs and albums.
"""
_models = (Item, Album)
def __init__(self, path='library.blb',
directory='~/Music',
path_formats=((PF_KEY_DEFAULT,
'$artist/$album/$track $title'),),
replacements=None):
timeout = beets.config['timeout'].as_number()
super(Library, self).__init__(path, timeout=timeout)
self.directory = bytestring_path(normpath(directory))
self.path_formats = path_formats
self.replacements = replacements
self._memotable = {} # Used for template substitution performance.
def _create_connection(self):
conn = super(Library, self)._create_connection()
conn.create_function('bytelower', 1, _sqlite_bytelower)
return conn
# Adding objects to the database.
def add(self, obj):
"""Add the :class:`Item` or :class:`Album` object to the library
database. Return the object's new id.
"""
obj.add(self)
self._memotable = {}
return obj.id
def add_album(self, items):
"""Create a new album consisting of a list of items.
The items are added to the database if they don't yet have an
ID. Return a new :class:`Album` object. The list items must not
be empty.
"""
if not items:
raise ValueError(u'need at least one item')
# Create the album structure using metadata from the first item.
values = dict((key, items[0][key]) for key in Album.item_keys)
album = Album(self, **values)
# Add the album structure and set the items' album_id fields.
# Store or add the items.
with self.transaction():
album.add(self)
for item in items:
item.album_id = album.id
if item.id is None:
item.add(self)
else:
item.store()
return album
# Querying.
def _fetch(self, model_cls, query, sort=None):
"""Parse a query and fetch. If a order specification is present
in the query string the `sort` argument is ignored.
"""
# Parse the query, if necessary.
try:
parsed_sort = None
if isinstance(query, six.string_types):
query, parsed_sort = parse_query_string(query, model_cls)
elif isinstance(query, (list, tuple)):
query, parsed_sort = parse_query_parts(query, model_cls)
except dbcore.query.InvalidQueryArgumentValueError as exc:
raise dbcore.InvalidQueryError(query, exc)
# Any non-null sort specified by the parsed query overrides the
# provided sort.
if parsed_sort and not isinstance(parsed_sort, dbcore.query.NullSort):
sort = parsed_sort
return super(Library, self)._fetch(
model_cls, query, sort
)
@staticmethod
def get_default_album_sort():
"""Get a :class:`Sort` object for albums from the config option.
"""
return dbcore.sort_from_strings(
Album, beets.config['sort_album'].as_str_seq())
@staticmethod
def get_default_item_sort():
"""Get a :class:`Sort` object for items from the config option.
"""
return dbcore.sort_from_strings(
Item, beets.config['sort_item'].as_str_seq())
def albums(self, query=None, sort=None):
"""Get :class:`Album` objects matching the query.
"""
return self._fetch(Album, query, sort or self.get_default_album_sort())
def items(self, query=None, sort=None):
"""Get :class:`Item` objects matching the query.
"""
return self._fetch(Item, query, sort or self.get_default_item_sort())
# Convenience accessors.
def get_item(self, id):
"""Fetch an :class:`Item` by its ID. Returns `None` if no match is
found.
"""
return self._get(Item, id)
def get_album(self, item_or_id):
"""Given an album ID or an item associated with an album, return
an :class:`Album` object for the album. If no such album exists,
returns `None`.
"""
if isinstance(item_or_id, int):
album_id = item_or_id
else:
album_id = item_or_id.album_id
if album_id is None:
return None
return self._get(Album, album_id)
# Default path template resources.
def _int_arg(s):
"""Convert a string argument to an integer for use in a template
function. May raise a ValueError.
"""
return int(s.strip())
class DefaultTemplateFunctions(object):
"""A container class for the default functions provided to path
templates. These functions are contained in an object to provide
additional context to the functions -- specifically, the Item being
evaluated.
"""
_prefix = 'tmpl_'
def __init__(self, item=None, lib=None):
"""Parametrize the functions. If `item` or `lib` is None, then
some functions (namely, ``aunique``) will always evaluate to the
empty string.
"""
self.item = item
self.lib = lib
def functions(self):
"""Returns a dictionary containing the functions defined in this
object. The keys are function names (as exposed in templates)
and the values are Python functions.
"""
out = {}
for key in self._func_names:
out[key[len(self._prefix):]] = getattr(self, key)
return out
@staticmethod
def tmpl_lower(s):
"""Convert a string to lower case."""
return s.lower()
@staticmethod
def tmpl_upper(s):
"""Covert a string to upper case."""
return s.upper()
@staticmethod
def tmpl_title(s):
"""Convert a string to title case."""
return s.title()
@staticmethod
def tmpl_left(s, chars):
"""Get the leftmost characters of a string."""
return s[0:_int_arg(chars)]
@staticmethod
def tmpl_right(s, chars):
"""Get the rightmost characters of a string."""
return s[-_int_arg(chars):]
@staticmethod
def tmpl_if(condition, trueval, falseval=u''):
"""If ``condition`` is nonempty and nonzero, emit ``trueval``;
otherwise, emit ``falseval`` (if provided).
"""
try:
int_condition = _int_arg(condition)
except ValueError:
if condition.lower() == "false":
return falseval
else:
condition = int_condition
if condition:
return trueval
else:
return falseval
@staticmethod
def tmpl_asciify(s):
"""Translate non-ASCII characters to their ASCII equivalents.
"""
return util.asciify_path(s, beets.config['path_sep_replace'].as_str())
@staticmethod
def tmpl_time(s, fmt):
"""Format a time value using `strftime`.
"""
cur_fmt = beets.config['time_format'].as_str()
return time.strftime(fmt, time.strptime(s, cur_fmt))
def tmpl_aunique(self, keys=None, disam=None, bracket=None):
"""Generate a string that is guaranteed to be unique among all
albums in the library who share the same set of keys. A fields
from "disam" is used in the string if one is sufficient to
disambiguate the albums. Otherwise, a fallback opaque value is
used. Both "keys" and "disam" should be given as
whitespace-separated lists of field names, while "bracket" is a
pair of characters to be used as brackets surrounding the
disambiguator or empty to have no brackets.
"""
# Fast paths: no album, no item or library, or memoized value.
if not self.item or not self.lib:
return u''
if self.item.album_id is None:
return u''
memokey = ('aunique', keys, disam, self.item.album_id)
memoval = self.lib._memotable.get(memokey)
if memoval is not None:
return memoval
keys = keys or 'albumartist album'
disam = disam or 'albumtype year label catalognum albumdisambig'
if bracket is None:
bracket = '[]'
keys = keys.split()
disam = disam.split()
# Assign a left and right bracket or leave blank if argument is empty.
if len(bracket) == 2:
bracket_l = bracket[0]
bracket_r = bracket[1]
else:
bracket_l = u''
bracket_r = u''
album = self.lib.get_album(self.item)
if not album:
# Do nothing for singletons.
self.lib._memotable[memokey] = u''
return u''
# Find matching albums to disambiguate with.
subqueries = []
for key in keys:
value = album.get(key, '')
subqueries.append(dbcore.MatchQuery(key, value))
albums = self.lib.albums(dbcore.AndQuery(subqueries))
# If there's only one album to matching these details, then do
# nothing.
if len(albums) == 1:
self.lib._memotable[memokey] = u''
return u''
# Find the first disambiguator that distinguishes the albums.
for disambiguator in disam:
# Get the value for each album for the current field.
disam_values = set([a.get(disambiguator, '') for a in albums])
# If the set of unique values is equal to the number of
# albums in the disambiguation set, we're done -- this is
# sufficient disambiguation.
if len(disam_values) == len(albums):
break
else:
# No disambiguator distinguished all fields.
res = u' {1}{0}{2}'.format(album.id, bracket_l, bracket_r)
self.lib._memotable[memokey] = res
return res
# Flatten disambiguation value into a string.
disam_value = album.formatted(True).get(disambiguator)
# Return empty string if disambiguator is empty.
if disam_value:
res = u' {1}{0}{2}'.format(disam_value, bracket_l, bracket_r)
else:
res = u''
self.lib._memotable[memokey] = res
return res
@staticmethod
def tmpl_first(s, count=1, skip=0, sep=u'; ', join_str=u'; '):
""" Gets the item(s) from x to y in a string separated by something
and join then with something
:param s: the string
:param count: The number of items included
:param skip: The number of items skipped
:param sep: the separator. Usually is '; ' (default) or '/ '
:param join_str: the string which will join the items, default '; '.
"""
skip = int(skip)
count = skip + int(count)
return join_str.join(s.split(sep)[skip:count])
def tmpl_ifdef(self, field, trueval=u'', falseval=u''):
""" If field exists return trueval or the field (default)
otherwise, emit return falseval (if provided).
:param field: The name of the field
:param trueval: The string if the condition is true
:param falseval: The string if the condition is false
:return: The string, based on condition
"""
if self.item.formatted().get(field):
return trueval if trueval else self.item.formatted().get(field)
else:
return falseval
# Get the name of tmpl_* functions in the above class.
DefaultTemplateFunctions._func_names = \
[s for s in dir(DefaultTemplateFunctions)
if s.startswith(DefaultTemplateFunctions._prefix)]
|
pkess/beets
|
beets/library.py
|
Python
|
mit
| 54,095
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os, sys
import numpy as np
def determine_path(root=__file__):
"""Borrowed from wxglade.py"""
try:
# root = __file__
if os.path.islink(root):
root = os.path.realpath(root)
return os.path.dirname(os.path.abspath(root))
except:
print("I'm sorry, but something is wrong.")
print("There is no __file__ variable. Please contact the author.")
sys.exit()
testdir = determine_path()+'/' # Executable path
def verify_point(test, env, time=None, temp=None, value=None, comment=''):
"""Verify that Script output is equal to passed parameters"""
if time in ['None', None]:
test.assertEqual(env.time, None)
else:
test.assertAlmostEqual(env.time, time, delta=1.5)
if temp in ['None', None]:
test.assertEqual(env.temp, None)
else:
test.assertAlmostEqual(env.temp, temp, delta=0.01)
if value in ['None', None]:
test.assertEqual(env.value, None)
else:
test.assertAlmostEqual(env.value, value)
test.assertEqual(env.comment, comment)
from misura.canon import logger
class DummyInstrument(dict):
measure = {}
kiln = {}
running = True
log = logger.Log
_parent = None
manager = False
main_confdir = testdir
def __init__(self, fullpath=False):
super(DummyInstrument, self).__init__()
if not fullpath:
return
fp = fullpath.split('/')
dp = fp[-1]
if dp == '':
self['devpath'] = 'MAINSERVER'
self['fullpath'] = '/'
else:
self['devpath'] = dp
self['fullpath'] = '/'.join(fp)
def parent(self):
return self._parent
def set(self, key, val):
self[key] = val
def stop_acquisition(self):
self.running = False
def putSubHandler(self, *a):
pass
@property
def devices(self):
return []
def iter_samples(self):
return []
class FakeStorageFile(object):
"""Faking an hdf file"""
r = list(range(100))
r += list(range(100, 0, -1))
nrows = len(r)
r = np.array(r) * 1.
t = np.arange(nrows) * 15.
T = r * 10
h = np.concatenate((np.linspace(90, 95, 50),
np.linspace(95, 6, 150)))
cohe = np.concatenate((np.linspace(70, 98, 100),
np.linspace(97, 31, 100)))
w = np.concatenate((np.linspace(60, 50, 120),
np.linspace(50, 180, 80)))
Left_pos = np.linspace(0, 5, 200)
Right_pos = Left_pos
dil = Left_pos + Right_pos
def __init__(self):
self.nodes = {'/hsm/sample0/h': self.t_arr(self.h),
'/hsm/sample0/cohe': self.t_arr(self.cohe),
'/hsm/sample0/w': self.t_arr(self.w),
'/hsm/sample0/dil': self.t_arr(self.dil),
'/kiln/T': self.t_arr(self.T)}
def t_arr(self, arr):
return np.array([self.t, arr]).transpose()
def get_node(self, path):
return self.nodes[path]
def close(self):
return True
def set_time_limit(self, *a, **k):
return True
def set_limit(self, *a, **k):
return True
def min(self, curve):
c = self.get_node(curve)[:, 1]
return 0, 0, min(c)
def max(self, curve):
c = self.get_node(curve)[:, 1]
return 0, 0, max(c)
def checkCompile(test, si, out):
"""Check if Script is compiled correctly"""
for k, opt in si.describe().items():
if opt['type'] != 'Script':
continue
test.assertTrue(k in si.all_scripts, 'Missing Script ' + k)
si.all_scripts[k].eval(out, si)
outopt = False
# Find output option (child)
for handle, desc in out.describe().items():
if desc['parent'] == k:
outopt = handle
break
if not outopt:
return
o = out[outopt]
t = None if o['time'] == 'None' else o['time']
T = None if o['temp'] == 'None' else o['temp']
v = None if o['value'] == 'None' else o['value']
verify_point(test, si.env, o['time'], o['temp'], o['value'])
|
tainstr/misura.canon
|
misura/canon/tests/__init__.py
|
Python
|
mit
| 4,285
|
PARAMETER_TYPES = [(1, 'query'), (2, 'user')]
TRANSFORMATION_TYPES = [(1, 'Transpose'), (2, 'Split'), (3, 'Merge')]
COLUMN_TYPES = [(1, 'dimension'), (2, 'metric')]
|
devashishsharma2302/testing-heroku-deployment
|
squealy/constants.py
|
Python
|
mit
| 165
|
import unittest
from betago import scoring
from betago.dataloader import goboard
class ScoringTestCase(unittest.TestCase):
def test_identify_territory(self):
board = goboard.from_string('''
...b..w..
...b..w..
bbbb..w..
wwwww.www
wwbbw..w.
wb.bbwww.
wbbbb....
..b.b....
..bbb....
''')
territory = scoring.evaluate_territory(board)
self.assertEqual(8, territory.num_black_territory)
self.assertEqual(6, territory.num_white_territory)
self.assertEqual(20, territory.num_black_stones)
self.assertEqual(20, territory.num_white_stones)
self.assertEqual(27, territory.num_dame)
self.assertIn((0, 0), territory.dame_points)
self.assertNotIn((8, 0), territory.dame_points)
|
maxpumperla/betago
|
tests/scoring_test.py
|
Python
|
mit
| 863
|
import OOMP
newPart = OOMP.oompItem(9136)
newPart.addTag("oompType", "LEDS")
newPart.addTag("oompSize", "0603")
newPart.addTag("oompColor", "W")
newPart.addTag("oompDesc", "STAN")
newPart.addTag("oompIndex", "01")
OOMP.parts.append(newPart)
|
oomlout/oomlout-OOMP
|
old/OOMPpart_LEDS_0603_W_STAN_01.py
|
Python
|
cc0-1.0
| 243
|
import json
import re
import glob
class ChapterDependency(object):
def __init__(self, other_chapter, other_quest):
self.other_chapter = other_chapter
self.other_quest = other_quest
class DependencyGraph(object):
def __init__(self):
self.chapters = []
self.dependencies = {}
def add_dependency(self, dependent, dependency):
if dependent not in self.chapters:
self.add_chapter(dependent)
if dependency not in self.dependencies[dependent]:
self.dependencies[dependent].append(dependency)
def add_chapter(self, chapter):
self.chapters.append(chapter)
self.dependencies[chapter] = []
def print_dependencies(self):
for chapter in self.chapters:
print(chapter)
for dependency in self.dependencies[chapter]:
print("\t" + dependency)
if __name__ == "__main__":
graph = DependencyGraph()
preq_chapter_quest = re.compile("\{(.+)\}\[(.+)\]")
jsons_file_names = glob.glob(r"C:\space_race\bfsr_github\bfsr\config\hqm\QuestFiles\*.json")
for json_file_name in jsons_file_names:
if "reputations.json" in json_file_name:
continue
with open(json_file_name) as json_file:
try:
chapter = json.load(json_file)
graph.add_chapter(chapter["name"])
for quest in chapter["quests"]:
if "prerequisites" in quest:
for preq in quest["prerequisites"]:
matching = preq_chapter_quest.search(str(preq))
if matching:
dependency = matching.group(1)
graph.add_dependency(chapter["name"], dependency)
except Exception as e:
print("Couldn't parse %s" % json_file_name)
print(e)
graph.print_dependencies()
|
rockobonaparte/bfsr
|
scripts/hqm_chapter_deps.py
|
Python
|
cc0-1.0
| 1,953
|
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL1006230002.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString)
|
biomodels/MODEL1006230002
|
MODEL1006230002/model.py
|
Python
|
cc0-1.0
| 427
|
import os, glob, cv2
import numpy as np
import pylab
im_simple = '/home/faedrus/Documents/au_trap/20141031/1414763725.88.png'
im_complex = '/home/faedrus/Documents/au_trap/20141031/1414769658.04.png'
img = cv2.imread(im_simple)
(height, width, depth) = img.shape
cv2.namedWindow('original', cv2.WINDOW_NORMAL)
cv2.imshow('original', img)
#flatten to gray, convert to floating
harris = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
harris = np.float32(harris)
#cornerHarris (image, blocksize, ksize, k)
dst = cv2.cornerHarris (harris, 2, 3, 0.04)
dst = cv2.dilate (dst, None)
#thresholding
#harris[dst > 0.99*dst.max()] = [0,0,255]
cv2.namedWindow('harris', cv2.WINDOW_NORMAL)
cv2.imshow('harris', harris)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
jfindleyderegt/au_trap
|
test_harris.py
|
Python
|
cc0-1.0
| 743
|
import argparse
import codecs
import hashlib
import logging
try:
import requests_cache
requests_cache.install_cache('fr_cache')
except ImportError:
# If the cache library isn't present, do nothing -- we'll just make full
# HTTP requests rather than looking it up from the cache
pass
from regparser.builder import (
Builder, Checkpointer, LayerCacheAggregator, NullCheckpointer)
from regparser.diff.tree import changes_between
from regparser.tree.struct import FrozenNode
logger = logging.getLogger('build_from')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Regulation parser')
parser.add_argument('filename',
help='XML file containing the regulation')
parser.add_argument('title', type=int, help='Title number')
parser.add_argument('act_title', type=int, help='Act title',
action='store')
parser.add_argument('act_section', type=int, help='Act section')
parser.add_argument('--generate-diffs', type=bool, help='Generate diffs?',
required=False, default=True)
parser.add_argument('--checkpoint', required=False,
help='Directory to save checkpoint data')
args = parser.parse_args()
with codecs.open(args.filename, 'r', 'utf-8') as f:
reg = f.read()
file_digest = hashlib.sha256(reg.encode('utf-8')).hexdigest()
act_title_and_section = [args.act_title, args.act_section]
if args.checkpoint:
checkpointer = Checkpointer(args.checkpoint)
else:
checkpointer = NullCheckpointer()
# First, the regulation tree
reg_tree = checkpointer.checkpoint(
"init-tree-" + file_digest,
lambda: Builder.reg_tree(reg))
title_part = reg_tree.label_id()
doc_number = checkpointer.checkpoint(
"doc-number-" + file_digest,
lambda: Builder.determine_doc_number(reg, args.title, title_part))
if not doc_number:
raise ValueError("Could not determine document number")
checkpointer.suffix = ":".join(
["", title_part, str(args.title), doc_number])
# Run Builder
builder = Builder(cfr_title=args.title,
cfr_part=title_part,
doc_number=doc_number,
checkpointer=checkpointer)
builder.write_notices()
# Always do at least the first reg
logger.info("Version %s", doc_number)
builder.write_regulation(reg_tree)
layer_cache = LayerCacheAggregator()
builder.gen_and_write_layers(reg_tree, act_title_and_section, layer_cache)
layer_cache.replace_using(reg_tree)
# this used to assume implicitly that if gen-diffs was not specified it was
# True; changed it to explicit check
if args.generate_diffs:
all_versions = {doc_number: reg_tree}
for last_notice, old, new_tree, notices in builder.revision_generator(
reg_tree):
version = last_notice['document_number']
logger.info("Version %s", version)
all_versions[version] = new_tree
builder.doc_number = version
builder.write_regulation(new_tree)
layer_cache.invalidate_by_notice(last_notice)
builder.gen_and_write_layers(new_tree, act_title_and_section,
layer_cache, notices)
layer_cache.replace_using(new_tree)
# convert to frozen trees
for doc in all_versions:
all_versions[doc] = FrozenNode.from_node(all_versions[doc])
# now build diffs - include "empty" diffs comparing a version to itself
for lhs_version, lhs_tree in all_versions.iteritems():
for rhs_version, rhs_tree in all_versions.iteritems():
changes = checkpointer.checkpoint(
"-".join(["diff", lhs_version, rhs_version]),
lambda: dict(changes_between(lhs_tree, rhs_tree)))
builder.writer.diff(
reg_tree.label_id(), lhs_version, rhs_version
).write(changes)
|
jmcarp/regulations-parser
|
build_from.py
|
Python
|
cc0-1.0
| 4,170
|
# -*- coding: utf-8 -*-
# bontempo.py
# author : Antoine Passemiers
import numpy as np
class BeatDetector:
def __init__(self):
self.frame_size = 128
def detect(self, signal):
n_slides = len(signal) / self.frame_size
ste = np.empty(n_slides, dtype = np.double)
beats = np.empty(n_slides, dtype = np.bool)
for i in range(n_slides):
frame = signal[self.frame_size*i:self.frame_size*(i+1)]
ste[i] = np.sum(frame ** 2)
ticks = np.where(ste > ste.mean() * 2)[0]
# bpm = 60.0 * 4410.0 / (np.diff(ticks).mean() * self.frame_size)
return ticks * self.frame_size + self.frame_size / 2
|
AntoinePassemiers/Neuhon
|
python/bontempo.py
|
Python
|
epl-1.0
| 672
|
# Use of this source code is governed by the MIT license.
__license__ = "MIT"
try:
from collections.abc import Callable # Python 3.6
except ImportError as e:
from collections import Callable
import re
import sys
import warnings
try:
import thlib.side.soupsieve as soupsieve
except ImportError as e:
soupsieve = None
warnings.warn(
'The soupsieve package is not installed. CSS selectors cannot be used.'
)
from bs4.formatter import (
Formatter,
HTMLFormatter,
XMLFormatter,
)
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
nonwhitespace_re = re.compile(r"\S+")
# NOTE: This isn't used as of 4.7.0. I'm leaving it for a little bit on
# the off chance someone imported it for their own use.
whitespace_re = re.compile(r"\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
# These encodings are recognized by Python (so PageElement.encode
# could theoretically support them) but XML and HTML don't recognize
# them (so they should not show up in an XML or HTML document as that
# document's encoding).
#
# If an XML document is encoded in one of these encodings, no encoding
# will be mentioned in the XML declaration. If an HTML document is
# encoded in one of these encodings, and the HTML document has a
# <meta> tag that mentions an encoding, the encoding will be given as
# the empty string.
#
# Source:
# https://docs.python.org/3/library/codecs.html#python-specific-encodings
PYTHON_SPECIFIC_ENCODINGS = set([
"idna",
"mbcs",
"oem",
"palmos",
"punycode",
"raw_unicode_escape",
"undefined",
"unicode_escape",
"raw-unicode-escape",
"unicode-escape",
"string-escape",
"string_escape",
])
class NamespacedAttribute(str):
"""A namespaced string (e.g. 'xml:lang') that remembers the namespace
('xml') and the name ('lang') that were used to create it.
"""
def __new__(cls, prefix, name=None, namespace=None):
if not name:
# This is the default namespace. Its name "has no value"
# per https://www.w3.org/TR/xml-names/#defaulting
name = None
if name is None:
obj = str.__new__(cls, prefix)
elif prefix is None:
# Not really namespaced.
obj = str.__new__(cls, name)
else:
obj = str.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(str):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be one of these objects.
"""
def __new__(cls, original_value):
obj = str.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
"""When an HTML document is being encoded to a given encoding, the
value of a meta tag's 'charset' is the name of the encoding.
"""
if encoding in PYTHON_SPECIFIC_ENCODINGS:
return ''
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile(r"((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return str.__new__(str, original_value)
obj = str.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
if encoding in PYTHON_SPECIFIC_ENCODINGS:
return ''
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class PageElement(object):
"""Contains the navigational information for some part of the page:
that is, its current location in the parse tree.
NavigableString, Tag, etc. are all subclasses of PageElement.
"""
def setup(self, parent=None, previous_element=None, next_element=None,
previous_sibling=None, next_sibling=None):
"""Sets up the initial relations between this element and
other elements.
:param parent: The parent of this element.
:param previous_element: The element parsed immediately before
this one.
:param next_element: The element parsed immediately before
this one.
:param previous_sibling: The most recently encountered element
on the same level of the parse tree as this one.
:param previous_sibling: The next element to be encountered
on the same level of the parse tree as this one.
"""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = next_element
if self.next_element is not None:
self.next_element.previous_element = self
self.next_sibling = next_sibling
if self.next_sibling is not None:
self.next_sibling.previous_sibling = self
if (previous_sibling is None
and self.parent is not None and self.parent.contents):
previous_sibling = self.parent.contents[-1]
self.previous_sibling = previous_sibling
if previous_sibling is not None:
self.previous_sibling.next_sibling = self
def format_string(self, s, formatter):
"""Format the given string using the given formatter.
:param s: A string.
:param formatter: A Formatter object, or a string naming one of the standard formatters.
"""
if formatter is None:
return s
if not isinstance(formatter, Formatter):
formatter = self.formatter_for_name(formatter)
output = formatter.substitute(s)
return output
def formatter_for_name(self, formatter):
"""Look up or create a Formatter for the given identifier,
if necessary.
:param formatter: Can be a Formatter object (used as-is), a
function (used as the entity substitution hook for an
XMLFormatter or HTMLFormatter), or a string (used to look
up an XMLFormatter or HTMLFormatter in the appropriate
registry.
"""
if isinstance(formatter, Formatter):
return formatter
if self._is_xml:
c = XMLFormatter
else:
c = HTMLFormatter
if isinstance(formatter, Callable):
return c(entity_substitution=formatter)
return c.REGISTRY[formatter]
@property
def _is_xml(self):
"""Is this element part of an XML tree or an HTML tree?
This is used in formatter_for_name, when deciding whether an
XMLFormatter or HTMLFormatter is more appropriate. It can be
inefficient, but it should be called very rarely.
"""
if self.known_xml is not None:
# Most of the time we will have determined this when the
# document is parsed.
return self.known_xml
# Otherwise, it's likely that this element was created by
# direct invocation of the constructor from within the user's
# Python code.
if self.parent is None:
# This is the top-level object. It should have .known_xml set
# from tree creation. If not, take a guess--BS is usually
# used on HTML markup.
return getattr(self, 'is_xml', False)
return self.parent._is_xml
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
"""Replace this PageElement with another one, keeping the rest of the
tree the same.
:param replace_with: A PageElement.
:return: `self`, no longer part of the tree.
"""
if self.parent is None:
raise ValueError(
"Cannot replace one element with another when the "
"element to be replaced is not part of a tree.")
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract(_self_index=my_index)
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
"""Replace this PageElement with its contents.
:return: `self`, no longer part of the tree.
"""
my_parent = self.parent
if self.parent is None:
raise ValueError(
"Cannot replace an element with its contents when that"
"element is not part of a tree.")
my_index = self.parent.index(self)
self.extract(_self_index=my_index)
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
"""Wrap this PageElement inside another one.
:param wrap_inside: A PageElement.
:return: `wrap_inside`, occupying the position in the tree that used
to be occupied by `self`, and with `self` inside it.
"""
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self, _self_index=None):
"""Destructively rips this element out of the tree.
:param _self_index: The location of this element in its parent's
.contents, if known. Passing this in allows for a performance
optimization.
:return: `self`, no longer part of the tree.
"""
if self.parent is not None:
if _self_index is None:
_self_index = self.parent.index(self)
del self.parent.contents[_self_index]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if (self.previous_element is not None and
self.previous_element is not next_element):
self.previous_element.next_element = next_element
if next_element is not None and next_element is not self.previous_element:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if (self.previous_sibling is not None
and self.previous_sibling is not self.next_sibling):
self.previous_sibling.next_sibling = self.next_sibling
if (self.next_sibling is not None
and self.next_sibling is not self.previous_sibling):
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self, is_initialized=True, accept_self=True):
"""Finds the last element beneath this object to be parsed.
:param is_initialized: Has `setup` been called on this PageElement
yet?
:param accept_self: Is `self` an acceptable answer to the question?
"""
if is_initialized and self.next_sibling is not None:
last_child = self.next_sibling.previous_element
else:
last_child = self
while isinstance(last_child, Tag) and last_child.contents:
last_child = last_child.contents[-1]
if not accept_self and last_child is self:
last_child = None
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
"""Insert a new PageElement in the list of this PageElement's children.
This works the same way as `list.insert`.
:param position: The numeric position that should be occupied
in `self.children` by the new PageElement.
:param new_child: A PageElement.
"""
if new_child is None:
raise ValueError("Cannot insert None into a tag.")
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, str)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
from bs4 import BeautifulSoup
if isinstance(new_child, BeautifulSoup):
# We don't want to end up with a situation where one BeautifulSoup
# object contains another. Insert the children one at a time.
for subchild in list(new_child.contents):
self.insert(position, subchild)
position += 1
return
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
current_index = self.index(new_child)
if current_index < position:
# We're moving this element further down the list
# of this object's children. That means that when
# we extract this element, our target index will
# jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant(False)
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant(False)
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given PageElement to the contents of this one.
:param tag: A PageElement.
"""
self.insert(len(self.contents), tag)
def extend(self, tags):
"""Appends the given PageElements to this one's contents.
:param tags: A list of PageElements.
"""
for tag in tags:
self.append(tag)
def insert_before(self, *args):
"""Makes the given element(s) the immediate predecessor of this one.
All the elements will have the same parent, and the given elements
will be immediately before this one.
:param args: One or more PageElements.
"""
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
if any(x is self for x in args):
raise ValueError("Can't insert an element before itself.")
for predecessor in args:
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, *args):
"""Makes the given element(s) the immediate successor of this one.
The elements will have the same parent, and the given elements
will be immediately after this one.
:param args: One or more PageElements.
"""
# Do all error checking before modifying the tree.
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
if any(x is self for x in args):
raise ValueError("Can't insert an element after itself.")
offset = 0
for successor in args:
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1+offset, successor)
offset += 1
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Find the first PageElement that matches the given criteria and
appears later in the document than this PageElement.
All find_* methods take a common set of arguments. See the online
documentation for detailed explanations.
:param name: A filter on tag name.
:param attrs: A dictionary of filters on attribute values.
:param text: A filter for a NavigableString with specific text.
:kwargs: A dictionary of filters on attribute values.
:return: A PageElement.
:rtype: bs4.element.Tag | bs4.element.NavigableString
"""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Find all PageElements that match the given criteria and appear
later in the document than this PageElement.
All find_* methods take a common set of arguments. See the online
documentation for detailed explanations.
:param name: A filter on tag name.
:param attrs: A dictionary of filters on attribute values.
:param text: A filter for a NavigableString with specific text.
:param limit: Stop looking after finding this many results.
:kwargs: A dictionary of filters on attribute values.
:return: A ResultSet containing PageElements.
"""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Find the closest sibling to this PageElement that matches the
given criteria and appears later in the document.
All find_* methods take a common set of arguments. See the
online documentation for detailed explanations.
:param name: A filter on tag name.
:param attrs: A dictionary of filters on attribute values.
:param text: A filter for a NavigableString with specific text.
:kwargs: A dictionary of filters on attribute values.
:return: A PageElement.
:rtype: bs4.element.Tag | bs4.element.NavigableString
"""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Find all siblings of this PageElement that match the given criteria
and appear later in the document.
All find_* methods take a common set of arguments. See the online
documentation for detailed explanations.
:param name: A filter on tag name.
:param attrs: A dictionary of filters on attribute values.
:param text: A filter for a NavigableString with specific text.
:param limit: Stop looking after finding this many results.
:kwargs: A dictionary of filters on attribute values.
:return: A ResultSet of PageElements.
:rtype: bs4.element.ResultSet
"""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Look backwards in the document from this PageElement and find the
first PageElement that matches the given criteria.
All find_* methods take a common set of arguments. See the online
documentation for detailed explanations.
:param name: A filter on tag name.
:param attrs: A dictionary of filters on attribute values.
:param text: A filter for a NavigableString with specific text.
:kwargs: A dictionary of filters on attribute values.
:return: A PageElement.
:rtype: bs4.element.Tag | bs4.element.NavigableString
"""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Look backwards in the document from this PageElement and find all
PageElements that match the given criteria.
All find_* methods take a common set of arguments. See the online
documentation for detailed explanations.
:param name: A filter on tag name.
:param attrs: A dictionary of filters on attribute values.
:param text: A filter for a NavigableString with specific text.
:param limit: Stop looking after finding this many results.
:kwargs: A dictionary of filters on attribute values.
:return: A ResultSet of PageElements.
:rtype: bs4.element.ResultSet
"""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this PageElement that matches the
given criteria and appears earlier in the document.
All find_* methods take a common set of arguments. See the online
documentation for detailed explanations.
:param name: A filter on tag name.
:param attrs: A dictionary of filters on attribute values.
:param text: A filter for a NavigableString with specific text.
:kwargs: A dictionary of filters on attribute values.
:return: A PageElement.
:rtype: bs4.element.Tag | bs4.element.NavigableString
"""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns all siblings to this PageElement that match the
given criteria and appear earlier in the document.
All find_* methods take a common set of arguments. See the online
documentation for detailed explanations.
:param name: A filter on tag name.
:param attrs: A dictionary of filters on attribute values.
:param text: A filter for a NavigableString with specific text.
:param limit: Stop looking after finding this many results.
:kwargs: A dictionary of filters on attribute values.
:return: A ResultSet of PageElements.
:rtype: bs4.element.ResultSet
"""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Find the closest parent of this PageElement that matches the given
criteria.
All find_* methods take a common set of arguments. See the online
documentation for detailed explanations.
:param name: A filter on tag name.
:param attrs: A dictionary of filters on attribute values.
:kwargs: A dictionary of filters on attribute values.
:return: A PageElement.
:rtype: bs4.element.Tag | bs4.element.NavigableString
"""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1, **kwargs)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Find all parents of this PageElement that match the given criteria.
All find_* methods take a common set of arguments. See the online
documentation for detailed explanations.
:param name: A filter on tag name.
:param attrs: A dictionary of filters on attribute values.
:param limit: Stop looking after finding this many results.
:kwargs: A dictionary of filters on attribute values.
:return: A PageElement.
:rtype: bs4.element.Tag | bs4.element.NavigableString
"""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
"""The PageElement, if any, that was parsed just after this one.
:return: A PageElement.
:rtype: bs4.element.Tag | bs4.element.NavigableString
"""
return self.next_element
@property
def previous(self):
"""The PageElement, if any, that was parsed just before this one.
:return: A PageElement.
:rtype: bs4.element.Tag | bs4.element.NavigableString
"""
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if text is None and 'string' in kwargs:
text = kwargs['string']
del kwargs['string']
if isinstance(name, SoupStrainer):
strainer = name
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
if text is None and not limit and not attrs and not kwargs:
if name is True or name is None:
# Optimization to find all tags.
result = (element for element in generator
if isinstance(element, Tag))
return ResultSet(strainer, result)
elif isinstance(name, str):
# Optimization to find all tags with a given name.
if name.count(':') == 1:
# This is a name with a prefix. If this is a namespace-aware document,
# we need to match the local name against tag.name. If not,
# we need to match the fully-qualified name against tag.name.
prefix, local_name = name.split(':', 1)
else:
prefix = None
local_name = name
result = (element for element in generator
if isinstance(element, Tag)
and (
element.name == name
) or (
element.name == local_name
and (prefix is None or element.prefix == prefix)
)
)
return ResultSet(strainer, result)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
"""All PageElements that were parsed after this one.
:yield: A sequence of PageElements.
"""
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
"""All PageElements that are siblings of this one but were parsed
later.
:yield: A sequence of PageElements.
"""
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
"""All PageElements that were parsed before this one.
:yield: A sequence of PageElements.
"""
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
"""All PageElements that are siblings of this one but were parsed
earlier.
:yield: A sequence of PageElements.
"""
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
"""All PageElements that are parents of this PageElement.
:yield: A sequence of PageElements.
"""
i = self.parent
while i is not None:
yield i
i = i.parent
@property
def decomposed(self):
"""Check whether a PageElement has been decomposed.
:rtype: bool
"""
return getattr(self, '_decomposed', False) or False
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(str, PageElement):
"""A Python Unicode string that is part of a parse tree.
When Beautiful Soup parses the markup <b>penguin</b>, it will
create a NavigableString for the string "penguin".
"""
PREFIX = ''
SUFFIX = ''
# We can't tell just by looking at a string whether it's contained
# in an XML document or an HTML document.
known_xml = None
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, str):
u = str.__new__(cls, value)
else:
u = str.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
u.setup()
return u
def __copy__(self):
"""A copy of a NavigableString has the same contents and class
as the original, but it is not connected to the parse tree.
"""
return type(self)(self)
def __getnewargs__(self):
return (str(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
"""Run the string through the provided formatter.
:param formatter: A Formatter object, or a string naming one of the standard formatters.
"""
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
@property
def name(self):
"""Since a NavigableString is not a Tag, it has no .name.
This property is implemented so that code like this doesn't crash
when run on a mixture of Tag and NavigableString objects:
[x.name for x in tag.children]
"""
return None
@name.setter
def name(self, name):
"""Prevent NavigableString.name from ever being set."""
raise AttributeError("A NavigableString cannot be given a name.")
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
This is an abstract class used for special kinds of strings such
as comments (the Comment class) and CDATA blocks (the CData
class).
"""
PREFIX = ''
SUFFIX = ''
def output_ready(self, formatter=None):
"""Make this string ready for output by adding any subclass-specific
prefix or suffix.
:param formatter: A Formatter object, or a string naming one
of the standard formatters. The string will be passed into the
Formatter, but only to trigger any side effects: the return
value is ignored.
:return: The string, with any subclass-specific prefix and
suffix added on.
"""
if formatter is not None:
ignore = self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
"""A CDATA block."""
PREFIX = '<![CDATA['
SUFFIX = ']]>'
class ProcessingInstruction(PreformattedString):
"""A SGML processing instruction."""
PREFIX = '<?'
SUFFIX = '>'
class XMLProcessingInstruction(ProcessingInstruction):
"""An XML processing instruction."""
PREFIX = '<?'
SUFFIX = '?>'
class Comment(PreformattedString):
"""An HTML or XML comment."""
PREFIX = '<!--'
SUFFIX = '-->'
class Declaration(PreformattedString):
"""An XML declaration."""
PREFIX = '<?'
SUFFIX = '?>'
class Doctype(PreformattedString):
"""A document type declaration."""
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
"""Generate an appropriate document type declaration for a given
public ID and system ID.
:param name: The name of the document's root element, e.g. 'html'.
:param pub_id: The Formal Public Identifier for this document type,
e.g. '-//W3C//DTD XHTML 1.1//EN'
:param system_id: The system identifier for this document type,
e.g. 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'
:return: A Doctype.
"""
value = name or ''
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = '<!DOCTYPE '
SUFFIX = '>\n'
class Stylesheet(NavigableString):
"""A NavigableString representing an stylesheet (probably
CSS).
Used to distinguish embedded stylesheets from textual content.
"""
pass
class Script(NavigableString):
"""A NavigableString representing an executable script (probably
Javascript).
Used to distinguish executable code from textual content.
"""
pass
class TemplateString(NavigableString):
"""A NavigableString representing a string found inside an HTML
template embedded in a larger document.
Used to distinguish such strings from the main body of the document.
"""
pass
class Tag(PageElement):
"""Represents an HTML or XML tag that is part of a parse tree, along
with its attributes and contents.
When Beautiful Soup parses the markup <b>penguin</b>, it will
create a Tag object representing the <b> tag.
"""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None,
is_xml=None, sourceline=None, sourcepos=None,
can_be_empty_element=None, cdata_list_attributes=None,
preserve_whitespace_tags=None
):
"""Basic constructor.
:param parser: A BeautifulSoup object.
:param builder: A TreeBuilder.
:param name: The name of the tag.
:param namespace: The URI of this Tag's XML namespace, if any.
:param prefix: The prefix for this Tag's XML namespace, if any.
:param attrs: A dictionary of this Tag's attribute values.
:param parent: The PageElement to use as this Tag's parent.
:param previous: The PageElement that was parsed immediately before
this tag.
:param is_xml: If True, this is an XML tag. Otherwise, this is an
HTML tag.
:param sourceline: The line number where this tag was found in its
source document.
:param sourcepos: The character position within `sourceline` where this
tag was found.
:param can_be_empty_element: If True, this tag should be
represented as <tag/>. If False, this tag should be represented
as <tag></tag>.
:param cdata_list_attributes: A list of attributes whose values should
be treated as CDATA if they ever show up on this tag.
:param preserve_whitespace_tags: A list of tag names whose contents
should have their whitespace preserved.
"""
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if ((not builder or builder.store_line_numbers)
and (sourceline is not None or sourcepos is not None)):
self.sourceline = sourceline
self.sourcepos = sourcepos
if attrs is None:
attrs = {}
elif attrs:
if builder is not None and builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
else:
attrs = dict(attrs)
# If possible, determine ahead of time whether this tag is an
# XML tag.
if builder:
self.known_xml = builder.is_xml
else:
self.known_xml = is_xml
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
if builder is None:
# In the absence of a TreeBuilder, use whatever values were
# passed in here. They're probably None, unless this is a copy of some
# other tag.
self.can_be_empty_element = can_be_empty_element
self.cdata_list_attributes = cdata_list_attributes
self.preserve_whitespace_tags = preserve_whitespace_tags
else:
# Set up any substitutions for this tag, such as the charset in a META tag.
builder.set_up_substitutions(self)
# Ask the TreeBuilder whether this tag might be an empty-element tag.
self.can_be_empty_element = builder.can_be_empty_element(name)
# Keep track of the list of attributes of this tag that
# might need to be treated as a list.
#
# For performance reasons, we store the whole data structure
# rather than asking the question of every tag. Asking would
# require building a new data structure every time, and
# (unlike can_be_empty_element), we almost never need
# to check this.
self.cdata_list_attributes = builder.cdata_list_attributes
# Keep track of the names that might cause this tag to be treated as a
# whitespace-preserved tag.
self.preserve_whitespace_tags = builder.preserve_whitespace_tags
parserClass = _alias("parser_class") # BS3
def __copy__(self):
"""A copy of a Tag is a new Tag, unconnected to the parse tree.
Its contents are a copy of the old Tag's contents.
"""
clone = type(self)(
None, self.builder, self.name, self.namespace,
self.prefix, self.attrs, is_xml=self._is_xml,
sourceline=self.sourceline, sourcepos=self.sourcepos,
can_be_empty_element=self.can_be_empty_element,
cdata_list_attributes=self.cdata_list_attributes,
preserve_whitespace_tags=self.preserve_whitespace_tags
)
for attr in ('can_be_empty_element', 'hidden'):
setattr(clone, attr, getattr(self, attr))
for child in self.contents:
clone.append(child.__copy__())
return clone
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this
PageElement.
TODO It might make sense to have NavigableString.string return
itself.
:return: If this element has a single string child, return
value is that string. If this element has one child tag,
return value is the 'string' attribute of the child tag,
recursively. If this element is itself a string, has no
children, or has more than one child, return value is None.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
"""Replace this PageElement's contents with `string`."""
self.clear()
self.append(string.__class__(string))
def _all_strings(self, strip=False, types=(NavigableString, CData)):
"""Yield all strings of certain classes, possibly stripping them.
:param strip: If True, all strings will be stripped before being
yielded.
:types: A tuple of NavigableString subclasses. Any strings of
a subclass not found in this list will be ignored. By
default, this means only NavigableString and CData objects
will be considered. So no comments, processing instructions,
etc.
:yield: A sequence of strings.
"""
for descendant in self.descendants:
if (
(types is None and not isinstance(descendant, NavigableString))
or
(types is not None and type(descendant) not in types)):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
"""Yield all strings in the document, stripping them first.
:yield: A sequence of stripped strings.
"""
for string in self._all_strings(True):
yield string
def get_text(self, separator="", strip=False,
types=(NavigableString, CData)):
"""Get all child strings, concatenated using the given separator.
:param separator: Strings will be concatenated using this separator.
:param strip: If True, strings will be stripped before being
concatenated.
:types: A tuple of NavigableString subclasses. Any strings of
a subclass not found in this list will be ignored. By
default, this means only NavigableString and CData objects
will be considered. So no comments, processing instructions,
stylesheets, etc.
:return: A string.
"""
return separator.join([s for s in self._all_strings(
strip, types=types)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys this PageElement and its children.
This element will be removed from the tree and wiped out; so
will everything beneath it.
The behavior of a decomposed PageElement is undefined and you
should never use one for anything, but if you need to _check_
whether an element has been decomposed, you can use the
`decomposed` property.
"""
self.extract()
i = self
while i is not None:
n = i.next_element
i.__dict__.clear()
i.contents = []
i._decomposed = True
i = n
def clear(self, decompose=False):
"""Wipe out all children of this PageElement by calling extract()
on them.
:param decompose: If this is True, decompose() (a more
destructive method) will be called instead of extract().
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def smooth(self):
"""Smooth out this element's children by consolidating consecutive
strings.
This makes pretty-printed output look more natural following a
lot of operations that modified the tree.
"""
# Mark the first position of every pair of children that need
# to be consolidated. Do this rather than making a copy of
# self.contents, since in most cases very few strings will be
# affected.
marked = []
for i, a in enumerate(self.contents):
if isinstance(a, Tag):
# Recursively smooth children.
a.smooth()
if i == len(self.contents)-1:
# This is the last item in .contents, and it's not a
# tag. There's no chance it needs any work.
continue
b = self.contents[i+1]
if (isinstance(a, NavigableString)
and isinstance(b, NavigableString)
and not isinstance(a, PreformattedString)
and not isinstance(b, PreformattedString)
):
marked.append(i)
# Go over the marked positions in reverse order, so that
# removing items from .contents won't affect the remaining
# positions.
for i in reversed(marked):
a = self.contents[i]
b = self.contents[i+1]
b.extract()
n = NavigableString(a+b)
a.replace_with(n)
def index(self, element):
"""Find the index of a child by identity, not value.
Avoids issues with tag.contents.index(element) getting the
index of equal elements.
:param element: Look for this PageElement in `self.contents`.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def get_attribute_list(self, key, default=None):
"""The same as get(), but always returns a list.
:param key: The attribute to look for.
:param default: Use this value if the attribute is not present
on this PageElement.
:return: A list of values, probably containing only a single
value.
"""
value = self.get(key, default)
if not isinstance(value, list):
value = [value]
return value
def has_attr(self, key):
"""Does this PageElement have an attribute with the given name?"""
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the Tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a Tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a Tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __bool__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a Tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
"""Calling tag.subtag is the same as calling tag.find(name="subtag")"""
#print("Getattr %s.%s" % (self.__class__, tag))
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%(name)sTag is deprecated, use .find("%(name)s") instead. If you really were looking for a tag called %(name)sTag, use .find("%(name)sTag")' % dict(
name=tag_name
)
)
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag == "contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this Tag has the same name, the same attributes,
and the same contents (recursively) as `other`."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this Tag is not identical to `other`,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding="unicode-escape"):
"""Renders this PageElement as a string.
:param encoding: The encoding to use (Python 2 only).
:return: Under Python 2, a bytestring; under Python 3,
a Unicode string.
"""
if PY3K:
# "The return value must be a string object", i.e. Unicode
return self.decode()
else:
# "The return value must be a string object", i.e. a bytestring.
# By convention, the return value of __repr__ should also be
# an ASCII string.
return self.encode(encoding)
def __unicode__(self):
"""Renders this PageElement as a Unicode string."""
return self.decode()
def __str__(self):
"""Renders this PageElement as a generic string.
:return: Under Python 2, a UTF-8 bytestring; under Python 3,
a Unicode string.
"""
if PY3K:
return self.decode()
else:
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
"""Render a bytestring representation of this PageElement and its
contents.
:param encoding: The destination encoding.
:param indent_level: Each line of the rendering will be
indented this many spaces. Used internally in
recursive calls while pretty-printing.
:param formatter: A Formatter object, or a string naming one of
the standard formatters.
:param errors: An error handling strategy such as
'xmlcharrefreplace'. This value is passed along into
encode() and its value should be one of the constants
defined by Python.
:return: A bytestring.
"""
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Render a Unicode representation of this PageElement and its
contents.
:param indent_level: Each line of the rendering will be
indented this many spaces. Used internally in
recursive calls while pretty-printing.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
:param formatter: A Formatter object, or a string naming one of
the standard formatters.
"""
# First off, turn a non-Formatter `formatter` into a Formatter
# object. This will stop the lookup from happening over and
# over again.
if not isinstance(formatter, Formatter):
formatter = self.formatter_for_name(formatter)
attributes = formatter.attributes(self)
attrs = []
for key, val in attributes:
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, str):
val = str(val)
elif (
isinstance(val, AttributeValueWithCharsetSubstitution)
and eventual_encoding is not None
):
val = val.encode(eventual_encoding)
text = formatter.attribute_value(val)
decoded = (
str(key) + '='
+ formatter.quoted_attribute_value(text))
attrs.append(decoded)
close = ''
closeTag = ''
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
if self.is_empty_element:
close = formatter.void_element_close_prefix or ''
else:
closeTag = '</%s%s>' % (prefix, self.name)
pretty_print = self._should_pretty_print(indent_level)
space = ''
indent_space = ''
if indent_level is not None:
indent_space = (' ' * (indent_level - 1))
if pretty_print:
space = indent_space
indent_contents = indent_level + 1
else:
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter
)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if indent_level is not None:
# Even if this particular tag is not pretty-printed,
# we should indent up to the start of the tag.
s.append(indent_space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if indent_level is not None and closeTag and self.next_sibling:
# Even if this particular tag is not pretty-printed,
# we're now done with the tag, and we should add a
# newline if appropriate.
s.append("\n")
s = ''.join(s)
return s
def _should_pretty_print(self, indent_level):
"""Should this tag be pretty-printed?
Most of them should, but some (such as <pre> in HTML
documents) should not.
"""
return (
indent_level is not None
and (
not self.preserve_whitespace_tags
or self.name not in self.preserve_whitespace_tags
)
)
def prettify(self, encoding=None, formatter="minimal"):
"""Pretty-print this PageElement as a string.
:param encoding: The eventual encoding of the string. If this is None,
a Unicode string will be returned.
:param formatter: A Formatter object, or a string naming one of
the standard formatters.
:return: A Unicode string (if encoding==None) or a bytestring
(otherwise).
"""
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param indent_level: Each line of the rendering will be
indented this many spaces. Used internally in
recursive calls while pretty-printing.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. decode_contents() is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
:param formatter: A Formatter object, or a string naming one of
the standard Formatters.
"""
# First off, turn a string formatter into a Formatter object. This
# will stop the lookup from happening over and over again.
if not isinstance(formatter, Formatter):
formatter = self.formatter_for_name(formatter)
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
preserve_whitespace = (
self.preserve_whitespace_tags and self.name in self.preserve_whitespace_tags
)
if text and indent_level and not preserve_whitespace:
text = text.strip()
if text:
if pretty_print and not preserve_whitespace:
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print and not preserve_whitespace:
s.append("\n")
return ''.join(s)
def encode_contents(
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this PageElement as a bytestring.
:param indent_level: Each line of the rendering will be
indented this many spaces. Used internally in
recursive calls while pretty-printing.
:param eventual_encoding: The bytestring will be in this encoding.
:param formatter: A Formatter object, or a string naming one of
the standard Formatters.
:return: A bytestring.
"""
contents = self.decode_contents(indent_level, encoding, formatter)
return contents.encode(encoding)
# Old method for BS3 compatibility
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Deprecated method for BS3 compatibility."""
if not prettyPrint:
indentLevel = None
return self.encode_contents(
indent_level=indentLevel, encoding=encoding)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Look in the children of this PageElement and find the first
PageElement that matches the given criteria.
All find_* methods take a common set of arguments. See the online
documentation for detailed explanations.
:param name: A filter on tag name.
:param attrs: A dictionary of filters on attribute values.
:param recursive: If this is True, find() will perform a
recursive search of this PageElement's children. Otherwise,
only the direct children will be considered.
:param limit: Stop looking after finding this many results.
:kwargs: A dictionary of filters on attribute values.
:return: A PageElement.
:rtype: bs4.element.Tag | bs4.element.NavigableString
"""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find #BS2
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Look in the children of this PageElement and find all
PageElements that match the given criteria.
All find_* methods take a common set of arguments. See the online
documentation for detailed explanations.
:param name: A filter on tag name.
:param attrs: A dictionary of filters on attribute values.
:param recursive: If this is True, find_all() will perform a
recursive search of this PageElement's children. Otherwise,
only the direct children will be considered.
:param limit: Stop looking after finding this many results.
:kwargs: A dictionary of filters on attribute values.
:return: A ResultSet of PageElements.
:rtype: bs4.element.ResultSet
"""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
"""Iterate over all direct children of this PageElement.
:yield: A sequence of PageElements.
"""
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
"""Iterate over all children of this PageElement in a
breadth-first sequence.
:yield: A sequence of PageElements.
"""
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# CSS selector code
def select_one(self, selector, namespaces=None, **kwargs):
"""Perform a CSS selection operation on the current element.
:param selector: A CSS selector.
:param namespaces: A dictionary mapping namespace prefixes
used in the CSS selector to namespace URIs. By default,
Beautiful Soup will use the prefixes it encountered while
parsing the document.
:param kwargs: Keyword arguments to be passed into SoupSieve's
soupsieve.select() method.
:return: A Tag.
:rtype: bs4.element.Tag
"""
value = self.select(selector, namespaces, 1, **kwargs)
if value:
return value[0]
return None
def select(self, selector, namespaces=None, limit=None, **kwargs):
"""Perform a CSS selection operation on the current element.
This uses the SoupSieve library.
:param selector: A string containing a CSS selector.
:param namespaces: A dictionary mapping namespace prefixes
used in the CSS selector to namespace URIs. By default,
Beautiful Soup will use the prefixes it encountered while
parsing the document.
:param limit: After finding this number of results, stop looking.
:param kwargs: Keyword arguments to be passed into SoupSieve's
soupsieve.select() method.
:return: A ResultSet of Tags.
:rtype: bs4.element.ResultSet
"""
if namespaces is None:
namespaces = self._namespaces
if limit is None:
limit = 0
if soupsieve is None:
raise NotImplementedError(
"Cannot execute CSS selectors because the soupsieve package is not installed."
)
results = soupsieve.select(selector, self, namespaces, limit, **kwargs)
# We do this because it's more consistent and because
# ResultSet.__getattr__ has a helpful error message.
return ResultSet(None, results)
# Old names for backwards compatibility
def childGenerator(self):
"""Deprecated generator."""
return self.children
def recursiveChildGenerator(self):
"""Deprecated generator."""
return self.descendants
def has_key(self, key):
"""Deprecated method. This was kind of misleading because has_key()
(attributes) was different from __in__ (contents).
has_key() is gone in Python 3, anyway.
"""
warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % (
key))
return self.has_attr(key)
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
string).
This is primarily used to underpin the find_* methods, but you can
create one yourself and pass it in as `parse_only` to the
`BeautifulSoup` constructor, to parse a subset of a large
document.
"""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
"""Constructor.
The SoupStrainer constructor takes the same arguments passed
into the find_* methods. See the online documentation for
detailed explanations.
:param name: A filter on tag name.
:param attrs: A dictionary of filters on attribute values.
:param text: A filter for a NavigableString with specific text.
:kwargs: A dictionary of filters on attribute values.
"""
self.name = self._normalize_search_value(name)
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if 'class_' in kwargs:
# Treat class_="foo" as a search for the 'class'
# attribute, overriding any non-dict value for attrs.
kwargs['class'] = kwargs['class_']
del kwargs['class_']
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
normalized_attrs = {}
for key, value in list(attrs.items()):
normalized_attrs[key] = self._normalize_search_value(value)
self.attrs = normalized_attrs
self.text = self._normalize_search_value(text)
def _normalize_search_value(self, value):
# Leave it alone if it's a Unicode string, a callable, a
# regular expression, a boolean, or None.
if (isinstance(value, str) or isinstance(value, Callable) or hasattr(value, 'match')
or isinstance(value, bool) or value is None):
return value
# If it's a bytestring, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# If it's listlike, convert it into a list of strings.
if hasattr(value, '__iter__'):
new_value = []
for v in value:
if (hasattr(v, '__iter__') and not isinstance(v, bytes)
and not isinstance(v, str)):
# This is almost certainly the user's mistake. In the
# interests of avoiding infinite loops, we'll let
# it through as-is rather than doing a recursive call.
new_value.append(v)
else:
new_value.append(self._normalize_search_value(v))
return new_value
# Otherwise, convert it into a Unicode string.
# The unicode(str()) thing is so this will do the same thing on Python 2
# and Python 3.
return str(str(value))
def __str__(self):
"""A human-readable representation of this SoupStrainer."""
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
"""Check whether a Tag with the given name and attributes would
match this SoupStrainer.
Used prospectively to decide whether to even bother creating a Tag
object.
:param markup_name: A tag name as found in some markup.
:param markup_attrs: A dictionary of attributes as found in some markup.
:return: True if the prospective tag would match this SoupStrainer;
False otherwise.
"""
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and not self._matches(found.string, self.text):
found = None
return found
# For BS3 compatibility.
searchTag = search_tag
def search(self, markup):
"""Find all items in `markup` that match this SoupStrainer.
Used by the core _find_all() method, which is ultimately
called by all find_* methods.
:param markup: A PageElement or a list of them.
"""
# print('looking for %s in %s' % (self, markup))
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, str)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, str):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against, already_tried=None):
# print(u"Matching %s against %s" % (markup, match_against))
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching a multi-valued attribute
# like 'class'.
for item in markup:
if self._matches(item, match_against):
return True
# We didn't match any particular value of the multivalue
# attribute, but maybe we match the attribute value when
# considered as a string.
if self._matches(' '.join(markup), match_against):
return True
return False
if match_against is True:
# True matches any non-None value.
return markup is not None
if isinstance(match_against, Callable):
return match_against(markup)
# Custom callables take the tag as an argument, but all
# other ways of matching match the tag name as a string.
original_markup = markup
if isinstance(markup, Tag):
markup = markup.name
# Ensure that `markup` is either a Unicode string, or None.
markup = self._normalize_search_value(markup)
if markup is None:
# None matches None, False, an empty string, an empty list, and so on.
return not match_against
if (hasattr(match_against, '__iter__')
and not isinstance(match_against, str)):
# We're asked to match against an iterable of items.
# The markup must be match at least one item in the
# iterable. We'll try each one in turn.
#
# To avoid infinite recursion we need to keep track of
# items we've already seen.
if not already_tried:
already_tried = set()
for item in match_against:
if item.__hash__:
key = item
else:
key = id(item)
if key in already_tried:
continue
else:
already_tried.add(key)
if self._matches(original_markup, item, already_tried):
return True
else:
return False
# Beyond this point we might need to run the test twice: once against
# the tag's name and once against its prefixed name.
match = False
if not match and isinstance(match_against, str):
# Exact string match
match = markup == match_against
if not match and hasattr(match_against, 'search'):
# Regexp match
return match_against.search(markup)
if (not match
and isinstance(original_markup, Tag)
and original_markup.prefix):
# Try the whole thing again with the prefixed tag name.
return self._matches(
original_markup.prefix + ':' + original_markup.name, match_against
)
return match
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source, result=()):
"""Constructor.
:param source: A SoupStrainer.
:param result: A list of PageElements.
"""
super(ResultSet, self).__init__(result)
self.source = source
def __getattr__(self, key):
"""Raise a helpful exception to explain a common code fix."""
raise AttributeError(
"ResultSet object has no attribute '%s'. You're probably treating a list of elements like a single element. Did you call find_all() when you meant to call find()?" % key
)
|
listyque/TACTIC-Handler
|
thlib/side/bs4/element.py
|
Python
|
epl-1.0
| 81,090
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
### These unit tests were moved here from agent_location_choice_model to remove
### a circular dependency with household_location_choice_model_creator.
from opus_core.tests import opus_unittest
from numpy import array, ma, arange, where, zeros, concatenate
from opus_core.resources import Resources
from urbansim.datasets.gridcell_dataset import GridcellDataset
from urbansim.datasets.household_dataset import HouseholdDataset
from urbansim.datasets.job_dataset import JobDataset
from urbansim.models.household_location_choice_model_creator import HouseholdLocationChoiceModelCreator
from urbansim.models.employment_location_choice_model import EmploymentLocationChoiceModel
from urbansim.datasets.job_building_type_dataset import JobBuildingTypeDataset
from opus_core.coefficients import Coefficients
from opus_core.equation_specification import EquationSpecification
from opus_core.datasets.dataset import DatasetSubset
from opus_core.tests.stochastic_test_case import StochasticTestCase
from opus_core.model_group import ModelGroup, ModelGroupMember
from opus_core.storage_factory import StorageFactory
class Test(StochasticTestCase):
def test_do_nothing_if_no_agents(self):
storage = StorageFactory().get_storage('dict_storage')
households_table_name = 'households'
storage.write_table(
table_name = households_table_name,
table_data = {
"household_id": arange(10000)+1,
"grid_id": array(10000*[-1])
}
)
households = HouseholdDataset(in_storage=storage, in_table_name=households_table_name)
gridcells_table_name = 'gridcells'
storage.write_table(
table_name = gridcells_table_name,
table_data = {
"grid_id": arange(100)+1,
"cost":array(50*[100]+50*[1000])
}
)
gridcells = GridcellDataset(in_storage=storage, in_table_name=gridcells_table_name)
# create coefficients and specification
coefficients = Coefficients(names=("costcoef", ), values=(-0.001,))
specification = EquationSpecification(variables=("gridcell.cost", ), coefficients=("costcoef", ))
# run the model
hlcm = HouseholdLocationChoiceModelCreator().get_model(location_set=gridcells, compute_capacity_flag=False,
choices = "opus_core.random_choices_from_index", sample_size_locations = 30)
hlcm.run(specification, coefficients, agent_set = households, agents_index=array([], dtype='int32'), debuglevel=1)
# get results
gridcells.compute_variables(["urbansim.gridcell.number_of_households"],
resources=Resources({"household":households}))
result = gridcells.get_attribute("number_of_households")
# check the individual gridcells
self.assertEqual(ma.allclose(result, zeros((100,)) , rtol=0), True)
def test_agents_go_to_attractive_locations(self):
"""10 gridcells - 5 with cost 100, 5 with cost 1000, no capacity restrictions
100 households
We set the coefficient value for cost -0.001. This leads to probability
proportion 0.71 (less costly gridcells) to 0.29 (expensive gridcells)
(derived from the logit formula)
"""
storage = StorageFactory().get_storage('dict_storage')
nhhs = 100
ngcs = 10
ngcs_attr = ngcs/2
ngcs_noattr = ngcs - ngcs_attr
hh_grid_ids = array(nhhs*[-1])
household_data = {
'household_id': arange(nhhs)+1,
'grid_id': hh_grid_ids
}
gridcell_data = {
'grid_id': arange(ngcs)+1,
'cost':array(ngcs_attr*[100]+ngcs_noattr*[1000])
}
storage.write_table(table_name = 'households', table_data = household_data)
storage.write_table(table_name = 'gridcells', table_data = gridcell_data)
households = HouseholdDataset(in_storage=storage, in_table_name='households')
gridcells = GridcellDataset(in_storage=storage, in_table_name='gridcells')
# create coefficients and specification
coefficients = Coefficients(names=("costcoef", ), values=(-0.001,))
specification = EquationSpecification(variables=("gridcell.cost", ), coefficients=("costcoef", ))
# check the individual gridcells
def run_model():
hlcm = HouseholdLocationChoiceModelCreator().get_model(location_set=gridcells, compute_capacity_flag=False,
choices = "opus_core.random_choices_from_index", sample_size_locations = 8)
hlcm.run(specification, coefficients, agent_set=households, debuglevel=1)
# get results
gridcells.compute_variables(["urbansim.gridcell.number_of_households"],
resources=Resources({"household":households}))
result_more_attractive = gridcells.get_attribute_by_id("number_of_households", arange(ngcs_attr)+1)
result_less_attractive = gridcells.get_attribute_by_id("number_of_households", arange(ngcs_attr+1, ngcs+1))
households.set_values_of_one_attribute(attribute="grid_id", values=hh_grid_ids)
gridcells.delete_one_attribute("number_of_households")
result = concatenate((result_more_attractive, result_less_attractive))
return result
expected_results = array(ngcs_attr*[nhhs*0.71/float(ngcs_attr)] + ngcs_noattr*[nhhs*0.29/float(ngcs_noattr)])
self.run_stochastic_test(__file__, run_model, expected_results, 10)
def run_model_2():
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(table_name = 'households', table_data = household_data)
households = HouseholdDataset(in_storage=storage, in_table_name='households')
storage.write_table(table_name = 'gridcells', table_data = gridcell_data)
gridcells = GridcellDataset(in_storage=storage, in_table_name='gridcells')
hlcm = HouseholdLocationChoiceModelCreator().get_model(location_set=gridcells, compute_capacity_flag=False,
choices = "opus_core.random_choices_from_index", sample_size_locations = 8)
hlcm.run(specification, coefficients, agent_set=households, debuglevel=1)
# get results
gridcells.compute_variables(["urbansim.gridcell.number_of_households"],
resources=Resources({"household":households}))
result_more_attractive = gridcells.get_attribute_by_id("number_of_households", arange(ngcs_attr)+1)
result_less_attractive = gridcells.get_attribute_by_id("number_of_households", arange(ngcs_attr+1, ngcs+1))
return array([result_more_attractive.sum(), result_less_attractive.sum()])
expected_results = array([nhhs*0.71, nhhs*0.29])
self.run_stochastic_test(__file__, run_model_2, expected_results, 10)
def test_agents_do_not_go_to_inferior_locations(self):
"""100 gridcells - 99 with attractiveness 2000, 1 with attractiveness 1, no capacity restrictions
10,000 households
We set the coefficient value for attracitiveness to 1.
"""
storage = StorageFactory().get_storage('dict_storage')
#create households
storage.write_table(table_name='households',
table_data = {
'household_id': arange(10000)+1,
'grid_id': array(10000*[-1])
}
)
households = HouseholdDataset(in_storage=storage, in_table_name='households')
# create gridcells
storage.write_table(table_name='gridcells',
table_data = {
'grid_id': arange(100)+1,
'attractiveness':array(99*[2000]+[1])
}
)
gridcells = GridcellDataset(in_storage=storage, in_table_name='gridcells')
# create coefficients and specification
coefficients = Coefficients(names=("attractcoef", ), values=(1,))
specification = EquationSpecification(variables=("gridcell.attractiveness", ), coefficients=("attractcoef", ))
# run the model
hlcm = HouseholdLocationChoiceModelCreator().get_model(location_set=gridcells, compute_capacity_flag=False,
choices = "opus_core.random_choices_from_index", sample_size_locations = 30)
hlcm.run(specification, coefficients, agent_set = households, debuglevel=1)
# get results
gridcells.compute_variables(["urbansim.gridcell.number_of_households"],
resources=Resources({"household":households}))
result = gridcells.get_attribute_by_id("number_of_households", 100)
# nobody should choose gridcell 100
self.assertEqual(result, 0, "Error: %s is not equal to 0" % (result,))
def xtest_gracefully_handle_empty_choice_sets(self):
storage = StorageFactory().get_storage('dict_storage')
#create households
storage.write_table(table_name='households',
table_data = {
'household_id': arange(10000)+1,
'grid_id': array(100*range(100))+1
}
)
households = HouseholdDataset(in_storage=storage, in_table_name='households')
# create gridcells
storage.write_table(table_name='gridcells',
table_data = {
'grid_id': arange(100)+1,
'residential_units':array(100*[100])
}
)
gridcells = GridcellDataset(in_storage=storage, in_table_name='gridcells')
# create coefficients and specification
coefficients = Coefficients(names=("dummy",), values=(0,))
specification = EquationSpecification(variables=("gridcell.residential_units",), coefficients=("dummy",))
# run the model
hlcm = HouseholdLocationChoiceModelCreator().get_model( location_set=gridcells,
choices = "opus_core.random_choices_from_index", sample_size_locations = 30)
hlcm.run(specification, coefficients, agent_set=households, debuglevel=1)
# get results
gridcells.compute_variables(["urbansim.gridcell.number_of_households"],
resources=Resources({"household":households}))
result = gridcells.get_attribute_by_id("number_of_households", 100)
# nobody should choose gridcell 100
self.assertEqual(ma.allclose(result.sum(), 0 , rtol=0),
True, "Error: %s is not equal to 0" % (result.sum(),))
def test_unplaced_agents_decrease_available_space(self):
"""Using the household location choice model, create a set of available spaces and
2000 unplaced agents (along with 5000 placed agents). Run the model, and check that
the unplaced agents were placed, and the number of available spaces has decreased"""
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(table_name='households',
table_data = {
'grid_id': array(2000*[0] + 5000*[1]),
'household_id': arange(7000)+1
}
)
storage.write_table(table_name='gridcells',
table_data= {
'residential_units':array(50*[10000]),
'grid_id': arange(50)+1
}
)
households = HouseholdDataset(in_storage=storage, in_table_name='households')
gridcells = GridcellDataset(in_storage=storage, in_table_name='gridcells')
coefficients = Coefficients(names=("dummy",), values=(0.1,))
specification = EquationSpecification(variables=("gridcell.residential_units",), coefficients=("dummy",))
"""need to specify to the household location choice model exactly which households are moving,
because by default it assumes all current households want to move, but in this test,
the 5000 households already in gridcell #1 shouldn't move.
here, we specify that only the unplaced households should be moved."""
agents_index = where(households.get_attribute("grid_id") == 0)[0]
hlcm = HouseholdLocationChoiceModelCreator().get_model(location_set=gridcells,
choices = "opus_core.random_choices_from_index", sample_size_locations = 30)
hlcm.run(specification, coefficients, agent_set=households, agents_index=agents_index, debuglevel=1)
gridcells.compute_variables(["urbansim.gridcell.vacant_residential_units"],
resources=Resources({"household":households}))
vacancies = gridcells.get_attribute("vacant_residential_units")
"""since there were 5000 households already in gridcell #1, and gridcell #1 has
10000 residential units, there should be no more than 5000 vacant residential units
in gridcell #1 after running this model"""
self.assertEqual(vacancies[0] <= 5000,
True, "Error: %d" % (vacancies[0],))
"""there should be exactly 430000 vacant residential units after the model run,
because there were originally 50 gridcells with 10000 residential units each,
and a total of 7000 units are occupied after the run"""
self.assertEqual(sum(vacancies) == 50 * 10000 - 7000,
True, "Error: %d" % (sum(vacancies)))
def test_agents_placed_in_appropriate_types(self):
"""Create 1000 unplaced industrial jobs and 1 commercial job. Allocate 50 commercial
gridcells with enough space for 10 commercial jobs per gridcell. After running the
EmploymentLocationChoiceModel, the 1 commercial job should be placed,
but the 100 industrial jobs should remain unplaced
"""
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(table_name='job_building_types',
table_data = {
'id':array([2,1]),
'name': array(['commercial', 'industrial'])
}
)
job_building_types = JobBuildingTypeDataset(in_storage=storage, in_table_name='job_building_types')
storage.write_table(table_name='jobs',
table_data = {
'job_id': arange(1001)+1,
'grid_id': array([0]*1001),
'building_type': array([1]*1000 + [2])
}
)
jobs = JobDataset(in_storage=storage, in_table_name='jobs')
storage.write_table(table_name='gridcells',
table_data = {
'grid_id': arange(50)+1,
'commercial_sqft': array([1000]*50),
'commercial_sqft_per_job': array([100]*50)
}
)
gridcells = GridcellDataset(in_storage=storage, in_table_name='gridcells')
coefficients = Coefficients(names=("dummy",), values=(0.1,))
specification = EquationSpecification(variables=("gridcell.commercial_sqft",), coefficients=("dummy",))
compute_resources = Resources({"job":jobs, "job_building_type": job_building_types})
agents_index = where(jobs.get_attribute("grid_id") == 0)
unplace_jobs = DatasetSubset(jobs, agents_index)
agents_index = where(unplace_jobs.get_attribute("building_type") == 2)[0]
gridcells.compute_variables(["urbansim.gridcell.number_of_commercial_jobs"],
resources=compute_resources)
commercial_jobs = gridcells.get_attribute("number_of_commercial_jobs")
gridcells.compute_variables(["urbansim.gridcell.number_of_industrial_jobs"],
resources=compute_resources)
industrial_jobs = gridcells.get_attribute("number_of_industrial_jobs")
model_group = ModelGroup(job_building_types, "name")
elcm = EmploymentLocationChoiceModel(ModelGroupMember(model_group,"commercial"), location_set=gridcells,
agents_grouping_attribute = "job.building_type",
choices = "opus_core.random_choices_from_index", sample_size_locations = 30)
elcm.run(specification, coefficients, agent_set = jobs, agents_index=agents_index, debuglevel=1)
gridcells.compute_variables(["urbansim.gridcell.number_of_commercial_jobs"],
resources=compute_resources)
commercial_jobs = gridcells.get_attribute("number_of_commercial_jobs")
gridcells.compute_variables(["urbansim.gridcell.number_of_industrial_jobs"],
resources=compute_resources)
industrial_jobs = gridcells.get_attribute("number_of_industrial_jobs")
self.assertEqual(commercial_jobs.sum() == 1,
True, "Error, there should only be a total of 1 commercial job")
self.assertEqual(industrial_jobs.sum() == 0,
True, "Error, there should be no industrial jobs because there's no space for them")
def test_agents_equally_distributed_across_attractive_locations(self):
"""Create 5000 unplaced households and 50 gridcells with equal attractiveness.
Theoretically, after running the location_choice_model, there should be
100 houesholds in each gridcell since they're equally attractive, but due to random
sampling there will be a little deviance. The test also checks, if the aggregated probabilities,
i.e. housing demand, are equally distributed.
"""
nhhs = 5000
household_data = {
"household_id": arange(nhhs)+1,
"grid_id": array(nhhs*[-1])
}
gridcell_data = {
"grid_id": arange(50)+1,
"cost":array(50*[1000])
}
coefficients = Coefficients(names=("costcoef", ), values=(-0.001,))
specification = EquationSpecification(variables=("gridcell.cost", ), coefficients=("costcoef", ))
def run_model1():
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(table_name = 'households', table_data = household_data)
households = HouseholdDataset(in_storage=storage, in_table_name='households')
storage.write_table(table_name = 'gridcells', table_data = gridcell_data)
gridcells = GridcellDataset(in_storage=storage, in_table_name='gridcells')
hlcm = HouseholdLocationChoiceModelCreator().get_model(location_set=gridcells, compute_capacity_flag=False,
choices = "opus_core.random_choices_from_index", sample_size_locations = 30)
hlcm.run(specification, coefficients, agent_set = households)
gridcells.compute_variables(["urbansim.gridcell.number_of_households"],
resources=Resources({"household":households}))
return gridcells.get_attribute("number_of_households")
expected_results = array(50*[nhhs/50])
self.run_stochastic_test(__file__, run_model1, expected_results, 10)
def run_model2():
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(table_name = 'households', table_data = household_data)
households = HouseholdDataset(in_storage=storage, in_table_name='households')
storage.write_table(table_name = 'gridcells', table_data = gridcell_data)
gridcells = GridcellDataset(in_storage=storage, in_table_name='gridcells')
hlcm = HouseholdLocationChoiceModelCreator().get_model(location_set=gridcells, compute_capacity_flag=False,
choices = "opus_core.random_choices_from_index", sample_size_locations = 30)
hlcm.run(specification, coefficients, agent_set = households,
run_config=Resources({"demand_string":"gridcell.housing_demand"}))
return gridcells.get_attribute("housing_demand")
#check aggregated demand
expected_results = array(50*[nhhs/50])
self.run_stochastic_test(__file__, run_model2, expected_results, 5)
if __name__=="__main__":
opus_unittest.main()
|
christianurich/VIBe2UrbanSim
|
3rdparty/opus/src/urbansim/tests/test_agent_location_choice_model.py
|
Python
|
gpl-2.0
| 20,625
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def initial_data(apps, schema_editor):
SA = apps.get_model('ship', 'ShipArmour')
SA(name='None', structure=0, armour=0).save()
class Migration(migrations.Migration):
dependencies = [
('ship', '0008_populate_shield'),
]
operations = [
migrations.RunPython(initial_data),
]
|
dwagon/pymoo
|
moo/ship/migrations/0009_populate_armour.py
|
Python
|
gpl-2.0
| 416
|
"""Current version string"""
__version__ = "0.3.2"
|
sneakypete81/pylint-patcher
|
pylint_patcher/_version.py
|
Python
|
gpl-2.0
| 51
|
import subprocess
import numpy as np
import pysftp
from cblparallel.config import * # Various constants such as USERNAME
# Load list of machines
with open('../data/machines.csv', 'r') as f:
machines = [line.strip() for line in f]
# Ask each machine how much CPU is being used
cpus = -np.ones(len(machines))
for (i, machine) in enumerate(machines):
try:
machine_connection = pysftp.Connection(host=machine, private_key=LOCAL_TO_REMOTE_KEY_FILE)
top_response = machine_connection.execute('top -bn2 -p1')
for line in reversed(top_response):
if line[:3] == 'Cpu':
relevant_line = line
break
cpu = sum(float(text[-8:-3]) for text in relevant_line.strip().split(',') if text[-2:] == 'us')
cpus[i] = cpu
machine_connection.close()
print 'CPU usage on %s is %f%%' % (machine, cpu)
except:
print 'Could not connect to %s' % machine
# Sort the cpus - determine which are dead
idx = np.argsort(-cpus)
sorted_cpus = cpus[idx]
sorted_machines = []
for i in idx:
sorted_machines.append(machines[i])
dead = sorted_cpus == -1
# Create some csvs of the data
dead_list = 'Machine,\n'
cpus_list = 'Machine,CPU usage\n'
for (machine, is_dead, cpu) in zip(sorted_machines, dead, sorted_cpus):
if is_dead:
dead_list += machine + ',\n'
else:
cpus_list += '%s,%f\n' % (machine, cpu)
# Remove trailing new lines and save
dead_list = dead_list[:-1]
cpus_list = cpus_list[:-1]
with open('../data/cpus.csv', 'w') as f:
f.write(cpus_list)
with open('../data/dead-ssh.csv', 'w') as f:
f.write(dead_list)
|
jamesrobertlloyd/div-f-fear
|
web/python/cpu.py
|
Python
|
gpl-2.0
| 1,646
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# guider.py
import time
import threading
import win32com.client
class CameraThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.cam = None
def run(self):
win32com.client.pythoncom.CoInitialize()
self.cam = win32com.client.Dispatch("ASCOM.SXGuide0.Camera")
print(self.cam.Connected)
self.cam.Connected = False
print(self.cam.Connected)
self.cam.Connected = True
print(self.cam.Connected)
time.sleep(0.1)
self.cam.Connected = False
print(self.cam.Connected)
self.cam = None
win32com.client.pythoncom.CoUninitialize()
if __name__ == '__main__':
for i in range(10):
camthread = CameraThread()
try:
camthread.run()
except Exception, e:
print i, 'failed'
print e
else:
print i, 'ok'
time.sleep(i*5)
|
bamford/control
|
control/minimal_com_test.py
|
Python
|
gpl-2.0
| 994
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
import pytest
from inspire_json_merger.inspire_json_merger import inspire_json_merge
@pytest.mark.parametrize('scenario', [
'arxiv2arxiv', 'pub2arxiv', 'pub2pub'
])
def test_complete_merge(update_fixture_loader, scenario):
root, head, update, expected_conflict, expected_merged = update_fixture_loader.load_test(scenario)
merged, conflict = inspire_json_merge(root, head, update)
assert merged == expected_merged
assert conflict == expected_conflict
|
rikirenz/inspire-json-merger
|
tests/integration/test_json_merger.py
|
Python
|
gpl-2.0
| 1,443
|
#!/usr/bin/env python3
# This is in its own file rather than inside meson.build
# because a) mixing the two is ugly and b) trying to
# make special characters such as \n go through all
# backends is a fool's errand.
import sys, os, subprocess
h_array = ['--fhead',
"#ifndef __PB_UTILS_ENUM_TYPES_H__\n#define __PB_UTILS_ENUM_TYPES_H__\n\n#include <glib-object.h>\n\nG_BEGIN_DECLS\n",
'--fprod',
"\n/* enumerations from \"@filename@\" */\n",
'--vhead',
"GType @enum_name@_get_type (void);\n#define GST_TYPE_@ENUMSHORT@ (@enum_name@_get_type())\n",
'--ftail',
"G_END_DECLS\n\n#endif /* __PB_UTILS_ENUM_TYPES_H__ */"
]
c_array = ['--fhead',
"#ifndef __PB_UTILS_ENUM_TYPES_H__\n#define __PB_UTILS_ENUM_TYPES_H__\n\n#include <glib-object.h>\n\nG_BEGIN_DECLS\n",
'--fprod',
"\n/* enumerations from \"@filename@\" */\n",
'--vhead',
"GType @enum_name@_get_type (void);\n#define GST_TYPE_@ENUMSHORT@ (@enum_name@_get_type())\n",
'--ftail',
"G_END_DECLS\n\n#endif /* __PB_UTILS_ENUM_TYPES_H__ */"
]
ofilename = sys.argv[1]
headers = sys.argv[2:]
if ofilename.endswith('.h'):
arg_array = h_array
else:
arg_array = c_array
cmd_array = ['glib-mkenums'] + arg_array + headers
#print(cmd_array)
pc = subprocess.Popen(cmd_array, stdout=subprocess.PIPE)
(stdo, _) = pc.communicate()
if pc.returncode != 0:
sys.exit(pc.returncode)
open(ofilename, 'wb').write(stdo)
|
jpakkane/gstreamer-plugins-base
|
gst-libs/gst/pbutils/pbutils_mkenum.py
|
Python
|
gpl-2.0
| 1,522
|
import subprocess as sub
from sys import argv
import re
if len(argv) != 2:
print "Please enter a time as argv[1]"
exit()
output = open('/var/log/kern.log', 'r').read()#.split('\n')
regex = "(Mar [1-9]+\s[0-9]+:[0-9]+:[0-9]+).*Connection Limit Reached.*SRC=([0-9]*.[0-9]*.[0-9]*.[0-9]*) DST=([0-9]*.[0-9]*.[0-9]*.[0-9]*).*SPT=([0-9]*) DPT=([0-9]*)"
delta = int(argv[1])
time_regex = "Mar [0-9]+ [0-9]+:([0-9]+):"
sus_conns = re.findall(regex, output)
init_time = int(re.match(time_regex, sus_conns[-1][0]).group(1))
target = init_time - delta if init_time - delta > 0 else init_time - delta + 60
for grp in reversed(sus_conns):
minutes = int(re.match(time_regex, grp[0]).group(1))
if minutes == target:
break
print grp
|
ucsb-seclab/ictf-framework
|
router/iptables_log_parse.py
|
Python
|
gpl-2.0
| 734
|
__author__ = 'wacax'
#libraries
from os import getcwd, chdir, system
from numpy import exp
from pandas import read_csv
from numpy import vstack
#Add user defined functions
from csv2vw import csv_to_vw
from rankEnsemble import kaggle_rank_avg
def sigmoid(x):
return 1 / (1 + exp(-x))
#directories; change directories accordingly
wd = '/home/wacax/Wacax/Kaggle/criteoLabs/CRITEO Display Advertising Challenge/'
dataDir = '/home/wacax/Wacax/Kaggle/criteoLabs/Data/'
ensembleDir = '/home/wacax/Wacax/Kaggle/criteoLabs/Data/EnsembleData/'
hipersearchScriptLoc = '/home/wacax/vowpal_wabbit-7.7/utl/'
vw77Dir = '/home/wacax/vowpal_wabbit-7.7/vowpalwabbit/'
print getcwd()
if getcwd() + '/' != wd:
chdir(wd)
#DNS and Keys
KeypairFile = '/home/wacax/Wacax/AWSCredentials/wacax-key-pair-uswest2.pem '
DNS = 'ec2-54-69-15-255.us-west-2.compute.amazonaws.com'
#Transform the .csv files to vw format
csv_to_vw(dataDir + 'train.csv', dataDir + 'train2.vw', train=True)
csv_to_vw(dataDir + 'test.csv', dataDir + 'test2.vw', train=False)
#csv_to_vw(dataDir + 'train.csv', dataDir + 'train2.vw', invalidFeatures=['Label', 'Id'],
# Label='Label', ID='Id', weights=NULL, train=True)
#csv_to_vw(dataDir + 'test.csv', dataDir + 'test2.vw', invalidFeatures=['Id'],
# Label='Label', ID='Id', weights=NULL, train=False)
# Find the l1-l2 error resulting in the lowest average loss
#vw hypersearch
# for a logistic loss train-set:
l1ValueLog = system(hipersearchScriptLoc + 'vw-hypersearch -L 1e-20 1 vw -q ii --ngram 2 --loss_function logistic'
' --l1 % ' + dataDir + 'train.vw -b 28')
l2ValueLog = system(hipersearchScriptLoc + 'vw-hypersearch -L 1e-20 1 vw -q ii --ngram 2 --loss_function logistic'
' --l2 % ' + dataDir + 'train.vw -b 28')
l1ValueHin = system(hipersearchScriptLoc + 'vw-hypersearch -L 1e-20 1 vw -q ii --ngram 2 --loss_function hinge'
' --l1 % ' + dataDir + 'train.vw -b 28')
l2ValueHin = system(hipersearchScriptLoc + 'vw-hypersearch -L 1e-20 1 vw -q ii --ngram 2 --loss_function hinge'
' --l2 % ' + dataDir + 'train.vw -b 28')
l1ValueSq = system(hipersearchScriptLoc + 'vw-hypersearch 1e-20 1 vw -q ii --ngram 2 --loss_function squared'
' --l1 % ' + dataDir + 'train.vw -b 28')
l2ValueSq = system(hipersearchScriptLoc + 'vw-hypersearch 1e-20 1 vw -q ii --ngram 2 --loss_function squared'
' --l2 % ' + dataDir + 'train.vw -b 28')
l1ValueNN = system(hipersearchScriptLoc + 'vw-hypersearch -L 1e-20 1 vw -q ii --ngram 2 --loss_function logistic'
' --l1 % ' + dataDir + 'train.vw -b 28 --adaptive --invariant')
l2ValueNN = system(hipersearchScriptLoc + 'vw-hypersearch -L 1e-20 1 vw -q ii --ngram 2 --loss_function logistic'
' --l2 % ' + dataDir + 'train.vw -b 28 --adaptive --invariant')
l2ValueNN = system(hipersearchScriptLoc + 'vw-hypersearch -L 1e-20 1 vw -q ii --ngram 2 --loss_function logistic'
' -l % ' + dataDir + 'train.vw -b 28 --adaptive --invariant')
#NN hyperparameter search:
#raw input
lValueNN = system(hipersearchScriptLoc + 'vw-hypersearch 1e-20 1 vw --loss_function logistic -l % '
+ dataDir + 'train.vw --nn 10 --inpass -b 28')
#Send Train.vw file
#zip it first to reduce sending time
system('gzip -c -9 ' + dataDir + 'train.vw > ' + dataDir + 'train.gz')
system('gzip -c -9 ' + dataDir + 'test.vw > ' + dataDir + 'test.gz')
#send train.gz to the remote instance
system('scp -i ' + KeypairFile + dataDir + 'train.gz' + ' ubuntu@' + DNS + ':')
#send vowpal wabbit source to the remote instance
system('scp -i ' + KeypairFile + dataDir + 'vowpal_wabbit-7.7.tar.gz' + ' ubuntu@' + DNS + ':')
#MODELING
#Training VW:
#Logistic Regression with quadratic numerical features
system(vw77Dir + 'vw ' + dataDir + 'train.vw -f ' + dataDir + 'modelLogQallNgram2.model --loss_function logistic '
'-q ii --ngram 2 -b 28')
system(vw77Dir + 'vw ' + dataDir + 'train.vw -f ' + dataDir + 'modelLogQallNgram2L1.model --loss_function logistic '
'-q ii --ngram 2 -b 28 --l1 7.97443e-20')
system(vw77Dir + 'vw ' + dataDir + 'train.vw -f ' + dataDir + 'modelLogQallNgram2L2.model --loss_function logistic '
'-q ii --ngram 2 -b 28 --l2 1.51475e-14')
system(vw77Dir + 'vw ' + dataDir + 'train.vw -f ' + dataDir + 'modelLogQallNgram2L1L2.model --loss_function logistic '
'-q ii --ngram 2 -b 28 --l1 7.97443e-20 --l2 1.51475e-14')
#Hinge Regression with quadratic numerical features
system(vw77Dir + 'vw ' + dataDir + 'train.vw -f ' + dataDir + 'modelHinQallNgram2.model --loss_function hinge '
'-q ii --ngram 2 -b 28')
system(vw77Dir + 'vw ' + dataDir + 'train.vw -f ' + dataDir + 'modelHinQallNgram2L1.model --loss_function hinge '
'-q ii --ngram 2 -b 28 --l1 4.69724e-19')
system(vw77Dir + 'vw ' + dataDir + 'train.vw -f ' + dataDir + 'modelHinQallNgram2L2.model --loss_function hinge '
'-q ii --ngram 2 -b 28 --l2 1.51475e-14')
system(vw77Dir + 'vw ' + dataDir + 'train.vw -f ' + dataDir + 'modelHinQallNgram2L1L2.model --loss_function hinge '
'-q ii --ngram 2 -b 28 --l1 2.14195e-15 --l2 1.51475e-14')
#Squared Regression with quadratic numerical features
system(vw77Dir + 'vw ' + dataDir + 'train.vw -f ' + dataDir + 'modelSqQallNgram2.model --loss_function squared '
'-q ii --ngram 2 -b 28')
system(vw77Dir + 'vw ' + dataDir + 'train.vw -f ' + dataDir + 'modelSqQallNgram2L1.model --loss_function squared '
'-q ii --ngram 2 -b 28 --l1 2.14195e-15')
system(vw77Dir + 'vw ' + dataDir + 'train.vw -f ' + dataDir + 'modelSqQallNgram2L2.model --loss_function squared '
'-q ii --ngram 2 -b 28 --l2 6.75593e-09')
system(vw77Dir + 'vw ' + dataDir + 'train.vw -f ' + dataDir + 'modelSqQallNgram2L1L2.model --loss_function squared '
'-q ii --ngram 2 -b 28 --l1 2.14195e-15 --l2 6.75593e-09')
#Neural Networks
#Training VW:
system(vw77Dir + 'vw ' + dataDir + 'train.vw -f ' + dataDir + 'NN.model --loss_function logistic'
' --nn 8 --inpass -b 28 -q ii --ngram 2 --adaptive --invariant')
#Testing VW:
#LogLoss
system(vw77Dir + 'vw ' + dataDir + 'test.vw -t -i ' + dataDir + 'modelLogQallNgram2.model -p ' + dataDir + 'LogQallNgram2.txt')
system(vw77Dir + 'vw ' + dataDir + 'test.vw -t -i ' + dataDir + 'modelLogQallNgram2L1.model -p ' + dataDir + 'LogQallNgram2L1.txt')
system(vw77Dir + 'vw ' + dataDir + 'test.vw -t -i ' + dataDir + 'modelLogQallNgram2L2.model -p ' + dataDir + 'LogQallNgram2L2.txt')
system(vw77Dir + 'vw ' + dataDir + 'test.vw -t -i ' + dataDir + 'modelLogQallNgram2L1L2.model -p ' + dataDir + 'LogQallNgram2L1L2.txt')
#Hinge
system(vw77Dir + 'vw ' + dataDir + 'test.vw -t -i ' + dataDir + 'modelHinQallNgram2.model -p ' + dataDir + 'HinQallNgram2.txt')
system(vw77Dir + 'vw ' + dataDir + 'test.vw -t -i ' + dataDir + 'modelHinQallNgram2L1.model -p ' + dataDir + 'HinQallNgram2L1.txt')
system(vw77Dir + 'vw ' + dataDir + 'test.vw -t -i ' + dataDir + 'modelHinQallNgram2L2.model -p ' + dataDir + 'HinQallNgram2L2.txt')
system(vw77Dir + 'vw ' + dataDir + 'test.vw -t -i ' + dataDir + 'modelHinQallNgram2L1L2.model -p ' + dataDir + 'HinQallNgram2L1L2.txt')
#Squared
system(vw77Dir + 'vw ' + dataDir + 'test.vw -t -i ' + dataDir + 'modelSqQallNgram2.model -p ' + dataDir + 'SqQallNgram2.txt')
system(vw77Dir + 'vw ' + dataDir + 'test.vw -t -i ' + dataDir + 'modelSqQallNgram2L1.model -p ' + dataDir + 'SqQQallNgram2L1.txt')
system(vw77Dir + 'vw ' + dataDir + 'test.vw -t -i ' + dataDir + 'modelSqQallNgram2L2.model -p ' + dataDir + 'SqQallNgram2L2.txt')
system(vw77Dir + 'vw ' + dataDir + 'test.vw -t -i ' + dataDir + 'modelSqQallNgram2L1L2.model -p ' + dataDir + 'SqQallNgram2L1L2.txt')
#Neural Networks
system(vw77Dir + 'vw ' + dataDir + 'test.vw -t -i ' + dataDir + 'NN.vw -p ' + dataDir + 'NN100.txt')
#Make Kaggle .csv
submissionTemplate = read_csv(dataDir + 'random_submission.csv', index_col=False)
#Logistic
vwTextOutputLog = read_csv(dataDir + 'LogQallNgram2.txt', sep=' ', header=None)
submissionTemplate['Predicted'] = sigmoid(vwTextOutputLog.ix[:, 0])
submissionTemplate.to_csv(ensembleDir + 'PredictionX.csv', index=False)
#Hinge
vwTextOutputHin = read_csv(dataDir + 'HinQallNgram2.txt', sep=' ', header=None)
HingeOutput = (vwTextOutputHin.ix[:, 0]).as_matrix()
HingeOutputSTD = (HingeOutput - HingeOutput.min(axis=0)) / (HingeOutput.max(axis=0) - HingeOutput.min(axis=0))
Hingescaled = HingeOutputSTD / (1. - 0.) + 0.
submissionTemplate['Predicted'] = Hingescaled
submissionTemplate.to_csv(ensembleDir + 'PredictionXV.csv', index=False)
#Squared
vwTextOutputSq = read_csv(dataDir + 'SqQallNgram2.txt', sep=' ', header=None)
SqOutput = (vwTextOutputSq.ix[:, 0]).as_matrix()
SqOutputSTD = (SqOutput - SqOutput.min(axis=0)) / (SqOutput.max(axis=0) - SqOutput.min(axis=0))
Sqscaled = SqOutputSTD / (1. - 0.) + 0.
submissionTemplate['Predicted'] = Sqscaled
submissionTemplate.to_csv(ensembleDir + 'PredictionXX.csv', index=False)
#Simple Ensemble (Predictions average)
ensembleAvg = vstack((sigmoid(vwTextOutputLog.ix[:, 0]).as_matrix(), Hingescaled, Sqscaled)).T
submissionTemplate['Predicted'] = ensembleAvg.mean(axis=1)
submissionTemplate.to_csv(ensembleDir + 'PredictionXXV.csv', index=False)
#Simple Ensemble
vwTextOutputLog1 = read_csv(dataDir + 'LogQallNgram2.txt', sep=' ', header=None)
vwTextOutputLog2 = read_csv(dataDir + 'LogQallNgram2L1.txt', sep=' ', header=None)
vwTextOutputLog3 = read_csv(dataDir + 'LogQallNgram2L2.txt', sep=' ', header=None)
vwTextOutputLog4 = read_csv(dataDir + 'LogQallNgram2L1L2.txt', sep=' ', header=None)
ensembleAvgLog = vstack((sigmoid(vwTextOutputLog1.ix[:, 0]).as_matrix(),
sigmoid(vwTextOutputLog2.ix[:, 0]).as_matrix(),
sigmoid(vwTextOutputLog3.ix[:, 0]).as_matrix(),
sigmoid(vwTextOutputLog4.ix[:, 0]).as_matrix(),
Hingescaled, Sqscaled)).T
submissionTemplate['Predicted'] = ensembleAvgLog.mean(axis=1)
submissionTemplate.to_csv(dataDir + 'SimpleEnsembleLogHinSq.csv', index=False)
#Ranked Ensemble (Ranked Average)
kaggle_rank_avg(ensembleDir + '*.csv', dataDir + 'RankEnsembleFull.csv')
submissionRankTemplate = read_csv(dataDir + 'RankEnsemble.csv', index_col=False)
submissionTemplate['Predicted'] = ensembleAvg.mean(axis=1)
|
wacax/CRITEO
|
CriteoMain.py
|
Python
|
gpl-2.0
| 11,207
|
import datetime
import pandas_datareader.data as web
#Clase para procesos basicos de acciones
class Stock():
"""Tipo de datos basico para calculo estadistico de acciones"""
def __init__(self):
fecha = datetime.datetime.now()
anio_anterior = int(fecha.strftime("%Y"))-1
fecha_anterior = fecha.replace(year=anio_anterior)
self.hasta = fecha.strftime("%m/%d/%Y")
self.desde = fecha_anterior.strftime("%m/%d/%Y")
self.papel = 'iar'
self.periodo = 42
self.precio_compra = None
self.verbose = None
self.ultimo_cierre = 0
#obtiene datos de acciones de google finance
def trae_datos(self):
"""Obtiene datos desde google finance"""
papel = 'bcba:' + self.papel
datos = web.DataReader(papel, data_source='google', start=self.desde, end=self.hasta)
self.ultimo_cierre = datos.ix[-1]['Close']
return datos
class Btc():
"""Tipo de datos btc"""
def __init__(self):
#https://api.coindesk.com/v1/bpi/historical/close.json?start=2016-08-28&end=2017-08-28¤cy=btc
fecha = datetime.datetime.now()
anio_anterior = int(fecha.strftime("%Y"))-1
fecha_anterior = fecha.replace(year=anio_anterior)
# self.hasta = fecha.strftime("%m/%d/%Y")
self.hasta = fecha.strftime("%Y-%m-%d")
# self.desde = fecha_anterior.strftime("%m/%d/%Y")
self.desde = fecha_anterior.strftime("%Y-%m-%d")
# self.papel = 'iar'
self.periodo = 42
self.precio_compra = None
self.verbose = None
self.ultimo_cierre = 0
def trae_datos(self):
"""Obtiene datos desde coindesk"""
import json
import requests
#https://api.coindesk.com/v1/bpi/historical/close.json?start=2016-08-28&end=2017-08-28¤cy=btc
host = "https://api.coindesk.com/v1/bpi/historical/close.json"
query = "?start=" + self.desde + "&end=" + self.hasta + "¤cy=btc"
get_host = host + query
r = requests.get(get_host)
# self.ultimo_cierre = datos.ix[-1]['Close']
# return datos
# print(r.json())
# print(j['bpi'])
j = r.json()
datos = []
for i in j['bpi']:
datos.append(j['bpi'][i])
self.ultimo_cierre = datos[-1]
return datos
|
supermpm/c2monac
|
stockdefs.py
|
Python
|
gpl-2.0
| 2,369
|
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2017 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys, traceback
import Ice
slice_dir = Ice.getSliceDir()
if not slice_dir:
print(sys.argv[0] + ': Slice directory not found.')
sys.exit(1)
Ice.loadSlice('"-I' + slice_dir + '" Test.ice')
import Test, TestI
def run(args, communicator):
communicator.getProperties().setProperty("TestAdapter.Endpoints", "default -p 12010:udp")
adapter = communicator.createObjectAdapter("TestAdapter")
adapter.add(TestI.MyDerivedClassI(), Ice.stringToIdentity("test"))
adapter.activate()
communicator.waitForShutdown()
return True
try:
initData = Ice.InitializationData()
initData.properties = Ice.createProperties(sys.argv)
#
# Its possible to have batch oneway requests dispatched after the
# adapter is deactivated due to thread scheduling so we supress
# this warning.
#
initData.properties.setProperty("Ice.Warn.Dispatch", "0");
with Ice.initialize(sys.argv, initData) as communicator:
status = run(sys.argv, communicator)
except:
traceback.print_exc()
status = False
sys.exit(not status)
|
ljx0305/ice
|
python/test/Ice/operations/Server.py
|
Python
|
gpl-2.0
| 1,428
|
import glob
import os
from pathlib import Path
import platform
import re
import shutil
import subprocess
from rpmlint.config import Config
from rpmlint.pkg import FakePkg, Pkg
import rpmlint.spellcheck
def testpath():
return Path(os.environ.get('TESTPATH', Path(__file__).parent))
TEST_CONFIG = [testpath() / 'configs/test.config']
CONFIG = Config(TEST_CONFIG)
# predicates used for pytest.mark.skipif decorators
IS_X86_64 = platform.machine() == 'x86_64'
IS_I686 = re.match(platform.machine(), 'i[3456]86')
HAS_32BIT_GLIBC = glob.glob('/lib/ld-linux.so.*')
HAS_CHECKBASHISMS = shutil.which('checkbashisms')
HAS_DASH = shutil.which('dash')
HAS_DESKTOP_FILE_UTILS = shutil.which('desktop-file-validate')
HAS_APPSTREAM_GLIB = shutil.which('appstream-util')
RPMDB_PATH = subprocess.run(['rpm', '--eval', '%_dbpath'], encoding='utf8', stdout=subprocess.PIPE).stdout
HAS_RPMDB = RPMDB_PATH and Path(RPMDB_PATH.strip()).exists()
def _has_dictionary(language):
if not rpmlint.spellcheck.ENCHANT:
return False
spell = rpmlint.spellcheck.Spellcheck()
spell._init_checker(language)
return spell._enchant_checkers.get(language)
HAS_ENGLISH_DICTIONARY = _has_dictionary('en_US')
HAS_CZECH_DICTIONARY = _has_dictionary('cs_CZ')
def get_tested_path(path):
return testpath() / path
def get_tested_package(name, testdir):
filename = Path(name).name + '-*.rpm'
candidates = list(get_tested_path(name).parent.glob(filename))
assert len(candidates) == 1
return Pkg(candidates[0], testdir)
def get_tested_spec_package(name):
filename = Path(name).name + '.spec'
candidates = list(get_tested_path(name).parent.glob(filename))
assert len(candidates) == 1
return FakePkg(candidates[0])
|
rpm-software-management/rpmlint
|
test/Testing.py
|
Python
|
gpl-2.0
| 1,744
|
"""
resolveurl Kodi Addon
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import json
from lib import helpers
from resolveurl import common
from resolveurl.common import i18n
from resolveurl.resolver import ResolveUrl, ResolverError
logger = common.log_utils.Logger.get_logger(__name__)
logger.disable()
API_BASE_URL = 'https://api.fruithosted.net'
INFO_URL = API_BASE_URL + '/streaming/info'
GET_URL = API_BASE_URL + '/streaming/get?file={media_id}'
FILE_URL = API_BASE_URL + '/file/info?file={media_id}'
class StreamangoResolver(ResolveUrl):
name = "streamango"
domains = ['streamango.com', 'streamcherry.com', 'fruitstreams.com', 'fruitadblock.net', 'fruithosted.net', 'fruithosts.net']
pattern = '(?://|\.)((?:stream(?:ango|cherry)|(?:fruit(?:streams|adblock|hosts)))\.(?:com|net))/(?:v/d|f|embed)/([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
self.headers = {'User-Agent': common.RAND_UA}
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
html = self.net.http_GET(web_url, headers=self.headers).content
if html:
encoded = re.search('''srces\.push\(\s*{type:"video/mp4",src:\w+\('([^']+)',(\d+)''', html)
if encoded:
source = self.decode(encoded.group(1), int(encoded.group(2)))
if source:
source = "http:%s" % source if source.startswith("//") else source
source = source.split("/")
if not source[-1].isdigit():
source[-1] = re.sub('[^\d]', '', source[-1])
source = "/".join(source)
self.headers.update({'Referer': web_url})
return source + helpers.append_headers(self.headers)
try:
if not self.__file_exists(media_id):
raise ResolverError('File Not Available')
video_url = self.__check_auth(media_id)
if not video_url:
video_url = self.__auth_ip(media_id)
except ResolverError:
raise
if video_url:
return video_url + helpers.append_headers(self.headers)
else:
raise ResolverError(i18n('no_ip_authorization'))
def __file_exists(self, media_id):
js_data = self.__get_json(FILE_URL.format(media_id=media_id))
return js_data.get('result', {}).get(media_id, {}).get('status') == 200
def __auth_ip(self, media_id):
js_data = self.__get_json(INFO_URL)
pair_url = js_data.get('result', {}).get('auth_url', '')
if pair_url:
pair_url = pair_url.replace('\/', '/')
header = i18n('stream_auth_header')
line1 = i18n('auth_required')
line2 = i18n('visit_link')
line3 = i18n('click_pair').decode('utf-8') % pair_url
with common.kodi.CountdownDialog(header, line1, line2, line3) as cd:
return cd.start(self.__check_auth, [media_id])
def __check_auth(self, media_id):
try:
js_data = self.__get_json(GET_URL.format(media_id=media_id))
except ResolverError as e:
status, msg = e
if status == 403:
return
else:
raise ResolverError(msg)
return js_data.get('result', {}).get('url')
def __get_json(self, url):
result = self.net.http_GET(url, headers=self.headers).content
common.logger.log(result)
js_result = json.loads(result)
if js_result['status'] != 200:
raise ResolverError(js_result['status'], js_result['msg'])
return js_result
def decode(self, encoded, code):
_0x59b81a = ""
k = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='
k = k[::-1]
count = 0
for index in range(0, len(encoded) - 1):
while count <= len(encoded) - 1:
_0x4a2f3a = k.index(encoded[count])
count += 1
_0x29d5bf = k.index(encoded[count])
count += 1
_0x3b6833 = k.index(encoded[count])
count += 1
_0x426d70 = k.index(encoded[count])
count += 1
_0x2e4782 = ((_0x4a2f3a << 2) | (_0x29d5bf >> 4))
_0x2c0540 = (((_0x29d5bf & 15) << 4) | (_0x3b6833 >> 2))
_0x5a46ef = ((_0x3b6833 & 3) << 6) | _0x426d70
_0x2e4782 = _0x2e4782 ^ code
_0x59b81a = str(_0x59b81a) + chr(_0x2e4782)
if _0x3b6833 != 64:
_0x59b81a = str(_0x59b81a) + chr(_0x2c0540)
if _0x3b6833 != 64:
_0x59b81a = str(_0x59b81a) + chr(_0x5a46ef)
return _0x59b81a
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, 'https://{host}/embed/{media_id}')
@classmethod
def isPopup(self):
return True
|
repotvsupertuga/tvsupertuga.repository
|
script.module.resolveurl/lib/resolveurl/plugins/streamango.py
|
Python
|
gpl-2.0
| 5,637
|
#!/usr/bin/python
import os
import getpass
from selenium import webdriver
import login
import training
import squad
########################################################################################
driver = webdriver.PhantomJS("lib/phantomjs")
########################################################################################
if not os.path.exists("lib"):
os.makedirs("lib")
if not os.path.exists("config"):
os.makedirs("config")
if not os.path.exists("squad"):
os.makedirs("squad")
if not os.path.exists("training_reports"):
os.makedirs("training_reports")
if not os.path.exists("errors"):
os.makedirs("errors")
if not os.path.exists("upload"):
os.makedirs("upload")
if not os.path.exists("upload/training_reports"):
os.makedirs("upload/training_reports")
if not os.path.exists("upload/squad"):
os.makedirs("upload/squad")
if login.login(driver):
training.training(driver)
squad.squad(driver)
driver.close()
driver.quit()
|
Mariusz-v7/MZCreeper
|
main.py
|
Python
|
gpl-2.0
| 985
|
from qgis.gui import QgsMapMouseEvent
from qgis.core import QgsWkbTypes, QgsPointXY
def point_from_event(event: QgsMapMouseEvent, snapping: bool) -> QgsPointXY:
"""
Returns the point from the mouse canvas event. If snapping is enabled it will be
snapped using the settings.
:param event: The map mouse event.
:return: Point for the map canvas event.
"""
if snapping:
point = event.snapPoint()
else:
point = event.originalMapPoint()
return point
def setRubberBand(canvas, selectRect, rubberBand):
transform = canvas.getCoordinateTransform()
lowerleft = transform.toMapCoordinates(selectRect.left(), selectRect.bottom())
upperright = transform.toMapCoordinates(selectRect.right(), selectRect.top())
if rubberBand:
rubberBand.reset(QgsWkbTypes.PolygonGeometry);
rubberBand.addPoint(lowerleft, False);
rubberBand.addPoint(QgsPointXY(upperright.x(), lowerleft.y()), False);
rubberBand.addPoint(upperright, False);
rubberBand.addPoint(QgsPointXY(lowerleft.x(), upperright.y()), True);
|
DMS-Aus/Roam
|
src/roam/maptools/maptoolutils.py
|
Python
|
gpl-2.0
| 1,092
|
from ConfigParser import SafeConfigParser
from webradio.xdg import get_config_filename
import os
class Configuration(object):
def __init__(self):
self.__parser = SafeConfigParser()
self.__parser.add_section('WebRadio')
self.read()
def read(self, target=None):
if target is None:
target = self.filename
self.__parser.read(target)
def write(self, target=None):
if target is None:
target = self.filename
if isinstance(target, str):
confdir = os.path.dirname(target)
if not os.path.isdir(confdir):
os.makedirs(os.path.dirname(target))
target = file(target, 'w')
self.__parser.write(target)
def _get(self, section, key, default=None):
if not section:
section = 'WebRadio'
if self.__parser.has_option(section, key):
return self.__parser.get(section, key)
return default
def _set(self, section, key, value):
if not section:
section = 'WebRadio'
return self.__parser.set(section, key, value)
filename = property(
fget=lambda self: get_config_filename('settings'))
tags = property(
fget=lambda self: self._get(None, 'tags'),
fset=lambda self, value: self._set(None, 'tags', value))
channel_uri = property(
fget=lambda self: self._get(None, 'channel-uri'),
fset=lambda self, value: self._set(None, 'channel-uri', value))
|
hasselmm/webradio
|
webradio/config.py
|
Python
|
gpl-2.0
| 1,517
|
"""SCons.SConf
Autoconf-like configuration support.
In other words, SConf allows to run tests on the build machine to detect
capabilities of system and do some things based on result: generate config
files, header files for C/C++, update variables in environment.
Tests on the build system can detect if compiler sees header files, if
libraries are installed, if some command line options are supported etc.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/SConf.py rel_2.3.5:3329:275e75118ad4 2015/06/20 11:18:26 bdbaddog"
import SCons.compat
import io
import os
import re
import sys
import traceback
import SCons.Action
import SCons.Builder
import SCons.Errors
import SCons.Job
import SCons.Node.FS
import SCons.Taskmaster
import SCons.Util
import SCons.Warnings
import SCons.Conftest
from SCons.Debug import Trace
# Turn off the Conftest error logging
SCons.Conftest.LogInputFiles = 0
SCons.Conftest.LogErrorMessages = 0
# Set
build_type = None
build_types = ['clean', 'help']
def SetBuildType(type):
global build_type
build_type = type
# to be set, if we are in dry-run mode
dryrun = 0
AUTO=0 # use SCons dependency scanning for up-to-date checks
FORCE=1 # force all tests to be rebuilt
CACHE=2 # force all tests to be taken from cache (raise an error, if necessary)
cache_mode = AUTO
def SetCacheMode(mode):
"""Set the Configure cache mode. mode must be one of "auto", "force",
or "cache"."""
global cache_mode
if mode == "auto":
cache_mode = AUTO
elif mode == "force":
cache_mode = FORCE
elif mode == "cache":
cache_mode = CACHE
else:
raise ValueError("SCons.SConf.SetCacheMode: Unknown mode " + mode)
progress_display = SCons.Util.display # will be overwritten by SCons.Script
def SetProgressDisplay(display):
"""Set the progress display to use (called from SCons.Script)"""
global progress_display
progress_display = display
SConfFS = None
_ac_build_counter = 0 # incremented, whenever TryBuild is called
_ac_config_logs = {} # all config.log files created in this build
_ac_config_hs = {} # all config.h files created in this build
sconf_global = None # current sconf object
def _createConfigH(target, source, env):
t = open(str(target[0]), "w")
defname = re.sub('[^A-Za-z0-9_]', '_', str(target[0]).upper())
t.write("""#ifndef %(DEFNAME)s_SEEN
#define %(DEFNAME)s_SEEN
""" % {'DEFNAME' : defname})
t.write(source[0].get_contents())
t.write("""
#endif /* %(DEFNAME)s_SEEN */
""" % {'DEFNAME' : defname})
t.close()
def _stringConfigH(target, source, env):
return "scons: Configure: creating " + str(target[0])
def NeedConfigHBuilder():
if len(_ac_config_hs) == 0:
return False
else:
return True
def CreateConfigHBuilder(env):
"""Called if necessary just before the building targets phase begins."""
action = SCons.Action.Action(_createConfigH,
_stringConfigH)
sconfigHBld = SCons.Builder.Builder(action=action)
env.Append( BUILDERS={'SConfigHBuilder':sconfigHBld} )
for k in _ac_config_hs.keys():
env.SConfigHBuilder(k, env.Value(_ac_config_hs[k]))
class SConfWarning(SCons.Warnings.Warning):
pass
SCons.Warnings.enableWarningClass(SConfWarning)
# some error definitions
class SConfError(SCons.Errors.UserError):
def __init__(self,msg):
SCons.Errors.UserError.__init__(self,msg)
class ConfigureDryRunError(SConfError):
"""Raised when a file or directory needs to be updated during a Configure
process, but the user requested a dry-run"""
def __init__(self,target):
if not isinstance(target, SCons.Node.FS.File):
msg = 'Cannot create configure directory "%s" within a dry-run.' % str(target)
else:
msg = 'Cannot update configure test "%s" within a dry-run.' % str(target)
SConfError.__init__(self,msg)
class ConfigureCacheError(SConfError):
"""Raised when a use explicitely requested the cache feature, but the test
is run the first time."""
def __init__(self,target):
SConfError.__init__(self, '"%s" is not yet built and cache is forced.' % str(target))
# define actions for building text files
def _createSource( target, source, env ):
fd = open(str(target[0]), "w")
fd.write(source[0].get_contents())
fd.close()
def _stringSource( target, source, env ):
return (str(target[0]) + ' <-\n |' +
source[0].get_contents().replace( '\n', "\n |" ) )
class SConfBuildInfo(SCons.Node.FS.FileBuildInfo):
"""
Special build info for targets of configure tests. Additional members
are result (did the builder succeed last time?) and string, which
contains messages of the original build phase.
"""
result = None # -> 0/None -> no error, != 0 error
string = None # the stdout / stderr output when building the target
def set_build_result(self, result, string):
self.result = result
self.string = string
class Streamer(object):
"""
'Sniffer' for a file-like writable object. Similar to the unix tool tee.
"""
def __init__(self, orig):
self.orig = orig
self.s = io.StringIO()
def write(self, str):
if self.orig:
self.orig.write(str)
try:
self.s.write(str)
except TypeError as e:
# "unicode argument expected" bug in IOStream (python 2.x)
self.s.write(str.decode())
def writelines(self, lines):
for l in lines:
self.write(l + '\n')
def getvalue(self):
"""
Return everything written to orig since the Streamer was created.
"""
return self.s.getvalue()
def flush(self):
if self.orig:
self.orig.flush()
self.s.flush()
class SConfBuildTask(SCons.Taskmaster.AlwaysTask):
"""
This is almost the same as SCons.Script.BuildTask. Handles SConfErrors
correctly and knows about the current cache_mode.
"""
def display(self, message):
if sconf_global.logstream:
sconf_global.logstream.write("scons: Configure: " + message + "\n")
def display_cached_string(self, bi):
"""
Logs the original builder messages, given the SConfBuildInfo instance
bi.
"""
if not isinstance(bi, SConfBuildInfo):
SCons.Warnings.warn(SConfWarning,
"The stored build information has an unexpected class: %s" % bi.__class__)
else:
self.display("The original builder output was:\n" +
(" |" + str(bi.string)).replace("\n", "\n |"))
def failed(self):
# check, if the reason was a ConfigureDryRunError or a
# ConfigureCacheError and if yes, reraise the exception
exc_type = self.exc_info()[0]
if issubclass(exc_type, SConfError):
raise
elif issubclass(exc_type, SCons.Errors.BuildError):
# we ignore Build Errors (occurs, when a test doesn't pass)
# Clear the exception to prevent the contained traceback
# to build a reference cycle.
self.exc_clear()
else:
self.display('Caught exception while building "%s":\n' %
self.targets[0])
try:
excepthook = sys.excepthook
except AttributeError:
# Earlier versions of Python don't have sys.excepthook...
def excepthook(type, value, tb):
traceback.print_tb(tb)
print type, value
excepthook(*self.exc_info())
return SCons.Taskmaster.Task.failed(self)
def collect_node_states(self):
# returns (is_up_to_date, cached_error, cachable)
# where is_up_to_date is 1, if the node(s) are up_to_date
# cached_error is 1, if the node(s) are up_to_date, but the
# build will fail
# cachable is 0, if some nodes are not in our cache
T = 0
changed = False
cached_error = False
cachable = True
for t in self.targets:
if T: Trace('%s' % (t))
bi = t.get_stored_info().binfo
if isinstance(bi, SConfBuildInfo):
if T: Trace(': SConfBuildInfo')
if cache_mode == CACHE:
t.set_state(SCons.Node.up_to_date)
if T: Trace(': set_state(up_to-date)')
else:
if T: Trace(': get_state() %s' % t.get_state())
if T: Trace(': changed() %s' % t.changed())
if (t.get_state() != SCons.Node.up_to_date and t.changed()):
changed = True
if T: Trace(': changed %s' % changed)
cached_error = cached_error or bi.result
else:
if T: Trace(': else')
# the node hasn't been built in a SConf context or doesn't
# exist
cachable = False
changed = ( t.get_state() != SCons.Node.up_to_date )
if T: Trace(': changed %s' % changed)
if T: Trace('\n')
return (not changed, cached_error, cachable)
def execute(self):
if not self.targets[0].has_builder():
return
sconf = sconf_global
is_up_to_date, cached_error, cachable = self.collect_node_states()
if cache_mode == CACHE and not cachable:
raise ConfigureCacheError(self.targets[0])
elif cache_mode == FORCE:
is_up_to_date = 0
if cached_error and is_up_to_date:
self.display("Building \"%s\" failed in a previous run and all "
"its sources are up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
raise SCons.Errors.BuildError # will be 'caught' in self.failed
elif is_up_to_date:
self.display("\"%s\" is up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
elif dryrun:
raise ConfigureDryRunError(self.targets[0])
else:
# note stdout and stderr are the same here
s = sys.stdout = sys.stderr = Streamer(sys.stdout)
try:
env = self.targets[0].get_build_env()
if cache_mode == FORCE:
# Set up the Decider() to force rebuilds by saying
# that every source has changed. Note that we still
# call the environment's underlying source decider so
# that the correct .sconsign info will get calculated
# and keep the build state consistent.
def force_build(dependency, target, prev_ni,
env_decider=env.decide_source):
env_decider(dependency, target, prev_ni)
return True
if env.decide_source.func_code is not force_build.func_code:
env.Decider(force_build)
env['PSTDOUT'] = env['PSTDERR'] = s
try:
sconf.cached = 0
self.targets[0].build()
finally:
sys.stdout = sys.stderr = env['PSTDOUT'] = \
env['PSTDERR'] = sconf.logstream
except KeyboardInterrupt:
raise
except SystemExit:
exc_value = sys.exc_info()[1]
raise SCons.Errors.ExplicitExit(self.targets[0],exc_value.code)
except Exception, e:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(1, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
raise e
else:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(0, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
class SConfBase(object):
"""This is simply a class to represent a configure context. After
creating a SConf object, you can call any tests. After finished with your
tests, be sure to call the Finish() method, which returns the modified
environment.
Some words about caching: In most cases, it is not necessary to cache
Test results explicitely. Instead, we use the scons dependency checking
mechanism. For example, if one wants to compile a test program
(SConf.TryLink), the compiler is only called, if the program dependencies
have changed. However, if the program could not be compiled in a former
SConf run, we need to explicitely cache this error.
"""
def __init__(self, env, custom_tests = {}, conf_dir='$CONFIGUREDIR',
log_file='$CONFIGURELOG', config_h = None, _depth = 0):
"""Constructor. Pass additional tests in the custom_tests-dictinary,
e.g. custom_tests={'CheckPrivate':MyPrivateTest}, where MyPrivateTest
defines a custom test.
Note also the conf_dir and log_file arguments (you may want to
build tests in the VariantDir, not in the SourceDir)
"""
global SConfFS
if not SConfFS:
SConfFS = SCons.Node.FS.default_fs or \
SCons.Node.FS.FS(env.fs.pathTop)
if sconf_global is not None:
raise SCons.Errors.UserError
self.env = env
if log_file is not None:
log_file = SConfFS.File(env.subst(log_file))
self.logfile = log_file
self.logstream = None
self.lastTarget = None
self.depth = _depth
self.cached = 0 # will be set, if all test results are cached
# add default tests
default_tests = {
'CheckCC' : CheckCC,
'CheckCXX' : CheckCXX,
'CheckSHCC' : CheckSHCC,
'CheckSHCXX' : CheckSHCXX,
'CheckFunc' : CheckFunc,
'CheckType' : CheckType,
'CheckTypeSize' : CheckTypeSize,
'CheckDeclaration' : CheckDeclaration,
'CheckHeader' : CheckHeader,
'CheckCHeader' : CheckCHeader,
'CheckCXXHeader' : CheckCXXHeader,
'CheckLib' : CheckLib,
'CheckLibWithHeader' : CheckLibWithHeader,
}
self.AddTests(default_tests)
self.AddTests(custom_tests)
self.confdir = SConfFS.Dir(env.subst(conf_dir))
if config_h is not None:
config_h = SConfFS.File(config_h)
self.config_h = config_h
self._startup()
def Finish(self):
"""Call this method after finished with your tests:
env = sconf.Finish()
"""
self._shutdown()
return self.env
def Define(self, name, value = None, comment = None):
"""
Define a pre processor symbol name, with the optional given value in the
current config header.
If value is None (default), then #define name is written. If value is not
none, then #define name value is written.
comment is a string which will be put as a C comment in the
header, to explain the meaning of the value (appropriate C comments /* and
*/ will be put automatically."""
lines = []
if comment:
comment_str = "/* %s */" % comment
lines.append(comment_str)
if value is not None:
define_str = "#define %s %s" % (name, value)
else:
define_str = "#define %s" % name
lines.append(define_str)
lines.append('')
self.config_h_text = self.config_h_text + '\n'.join(lines)
def BuildNodes(self, nodes):
"""
Tries to build the given nodes immediately. Returns 1 on success,
0 on error.
"""
if self.logstream is not None:
# override stdout / stderr to write in log file
oldStdout = sys.stdout
sys.stdout = self.logstream
oldStderr = sys.stderr
sys.stderr = self.logstream
# the engine assumes the current path is the SConstruct directory ...
old_fs_dir = SConfFS.getcwd()
old_os_dir = os.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=1)
# Because we take responsibility here for writing out our
# own .sconsign info (see SConfBuildTask.execute(), above),
# we override the store_info() method with a null place-holder
# so we really control how it gets written.
for n in nodes:
n.store_info = n.do_not_store_info
if not hasattr(n, 'attributes'):
n.attributes = SCons.Node.Node.Attrs()
n.attributes.keep_targetinfo = 1
ret = 1
try:
# ToDo: use user options for calc
save_max_drift = SConfFS.get_max_drift()
SConfFS.set_max_drift(0)
tm = SCons.Taskmaster.Taskmaster(nodes, SConfBuildTask)
# we don't want to build tests in parallel
jobs = SCons.Job.Jobs(1, tm )
jobs.run()
for n in nodes:
state = n.get_state()
if (state != SCons.Node.executed and
state != SCons.Node.up_to_date):
# the node could not be built. we return 0 in this case
ret = 0
finally:
SConfFS.set_max_drift(save_max_drift)
os.chdir(old_os_dir)
SConfFS.chdir(old_fs_dir, change_os_dir=0)
if self.logstream is not None:
# restore stdout / stderr
sys.stdout = oldStdout
sys.stderr = oldStderr
return ret
def pspawn_wrapper(self, sh, escape, cmd, args, env):
"""Wrapper function for handling piped spawns.
This looks to the calling interface (in Action.py) like a "normal"
spawn, but associates the call with the PSPAWN variable from
the construction environment and with the streams to which we
want the output logged. This gets slid into the construction
environment as the SPAWN variable so Action.py doesn't have to
know or care whether it's spawning a piped command or not.
"""
return self.pspawn(sh, escape, cmd, args, env, self.logstream, self.logstream)
def TryBuild(self, builder, text = None, extension = ""):
"""Low level TryBuild implementation. Normally you don't need to
call that - you can use TryCompile / TryLink / TryRun instead
"""
global _ac_build_counter
# Make sure we have a PSPAWN value, and save the current
# SPAWN value.
try:
self.pspawn = self.env['PSPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing PSPAWN construction variable.')
try:
save_spawn = self.env['SPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing SPAWN construction variable.')
nodesToBeBuilt = []
f = "conftest_" + str(_ac_build_counter)
pref = self.env.subst( builder.builder.prefix )
suff = self.env.subst( builder.builder.suffix )
target = self.confdir.File(pref + f + suff)
try:
# Slide our wrapper into the construction environment as
# the SPAWN function.
self.env['SPAWN'] = self.pspawn_wrapper
sourcetext = self.env.Value(text)
if text is not None:
textFile = self.confdir.File(f + extension)
textFileNode = self.env.SConfSourceBuilder(target=textFile,
source=sourcetext)
nodesToBeBuilt.extend(textFileNode)
source = textFileNode
else:
source = None
nodes = builder(target = target, source = source)
if not SCons.Util.is_List(nodes):
nodes = [nodes]
nodesToBeBuilt.extend(nodes)
result = self.BuildNodes(nodesToBeBuilt)
finally:
self.env['SPAWN'] = save_spawn
_ac_build_counter = _ac_build_counter + 1
if result:
self.lastTarget = nodes[0]
else:
self.lastTarget = None
return result
def TryAction(self, action, text = None, extension = ""):
"""Tries to execute the given action with optional source file
contents <text> and optional source file extension <extension>,
Returns the status (0 : failed, 1 : ok) and the contents of the
output file.
"""
builder = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS = {'SConfActionBuilder' : builder} )
ok = self.TryBuild(self.env.SConfActionBuilder, text, extension)
del self.env['BUILDERS']['SConfActionBuilder']
if ok:
outputStr = self.lastTarget.get_contents()
return (1, outputStr)
return (0, "")
def TryCompile( self, text, extension):
"""Compiles the program given in text to an env.Object, using extension
as file extension (e.g. '.c'). Returns 1, if compilation was
successful, 0 otherwise. The target is saved in self.lastTarget (for
further processing).
"""
return self.TryBuild(self.env.Object, text, extension)
def TryLink( self, text, extension ):
"""Compiles the program given in text to an executable env.Program,
using extension as file extension (e.g. '.c'). Returns 1, if
compilation was successful, 0 otherwise. The target is saved in
self.lastTarget (for further processing).
"""
return self.TryBuild(self.env.Program, text, extension )
def TryRun(self, text, extension ):
"""Compiles and runs the program given in text, using extension
as file extension (e.g. '.c'). Returns (1, outputStr) on success,
(0, '') otherwise. The target (a file containing the program's stdout)
is saved in self.lastTarget (for further processing).
"""
ok = self.TryLink(text, extension)
if( ok ):
prog = self.lastTarget
pname = prog.path
output = self.confdir.File(os.path.basename(pname)+'.out')
node = self.env.Command(output, prog, [ [ pname, ">", "${TARGET}"] ])
ok = self.BuildNodes(node)
if ok:
outputStr = output.get_contents()
return( 1, outputStr)
return (0, "")
class TestWrapper(object):
"""A wrapper around Tests (to ensure sanity)"""
def __init__(self, test, sconf):
self.test = test
self.sconf = sconf
def __call__(self, *args, **kw):
if not self.sconf.active:
raise SCons.Errors.UserError
context = CheckContext(self.sconf)
ret = self.test(context, *args, **kw)
if self.sconf.config_h is not None:
self.sconf.config_h_text = self.sconf.config_h_text + context.config_h
context.Result("error: no result")
return ret
def AddTest(self, test_name, test_instance):
"""Adds test_class to this SConf instance. It can be called with
self.test_name(...)"""
setattr(self, test_name, SConfBase.TestWrapper(test_instance, self))
def AddTests(self, tests):
"""Adds all the tests given in the tests dictionary to this SConf
instance
"""
for name in tests.keys():
self.AddTest(name, tests[name])
def _createDir( self, node ):
dirName = str(node)
if dryrun:
if not os.path.isdir( dirName ):
raise ConfigureDryRunError(dirName)
else:
if not os.path.isdir( dirName ):
os.makedirs( dirName )
node._exists = 1
def _startup(self):
"""Private method. Set up logstream, and set the environment
variables necessary for a piped build
"""
global _ac_config_logs
global sconf_global
global SConfFS
self.lastEnvFs = self.env.fs
self.env.fs = SConfFS
self._createDir(self.confdir)
self.confdir.up().add_ignore( [self.confdir] )
if self.logfile is not None and not dryrun:
# truncate logfile, if SConf.Configure is called for the first time
# in a build
if self.logfile in _ac_config_logs:
log_mode = "a"
else:
_ac_config_logs[self.logfile] = None
log_mode = "w"
fp = open(str(self.logfile), log_mode)
self.logstream = SCons.Util.Unbuffered(fp)
# logfile may stay in a build directory, so we tell
# the build system not to override it with a eventually
# existing file with the same name in the source directory
self.logfile.dir.add_ignore( [self.logfile] )
tb = traceback.extract_stack()[-3-self.depth]
old_fs_dir = SConfFS.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=0)
self.logstream.write('file %s,line %d:\n\tConfigure(confdir = %s)\n' %
(tb[0], tb[1], str(self.confdir)) )
SConfFS.chdir(old_fs_dir)
else:
self.logstream = None
# we use a special builder to create source files from TEXT
action = SCons.Action.Action(_createSource,
_stringSource)
sconfSrcBld = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS={'SConfSourceBuilder':sconfSrcBld} )
self.config_h_text = _ac_config_hs.get(self.config_h, "")
self.active = 1
# only one SConf instance should be active at a time ...
sconf_global = self
def _shutdown(self):
"""Private method. Reset to non-piped spawn"""
global sconf_global, _ac_config_hs
if not self.active:
raise SCons.Errors.UserError("Finish may be called only once!")
if self.logstream is not None and not dryrun:
self.logstream.write("\n")
self.logstream.close()
self.logstream = None
# remove the SConfSourceBuilder from the environment
blds = self.env['BUILDERS']
del blds['SConfSourceBuilder']
self.env.Replace( BUILDERS=blds )
self.active = 0
sconf_global = None
if not self.config_h is None:
_ac_config_hs[self.config_h] = self.config_h_text
self.env.fs = self.lastEnvFs
class CheckContext(object):
"""Provides a context for configure tests. Defines how a test writes to the
screen and log file.
A typical test is just a callable with an instance of CheckContext as
first argument:
def CheckCustom(context, ...)
context.Message('Checking my weird test ... ')
ret = myWeirdTestFunction(...)
context.Result(ret)
Often, myWeirdTestFunction will be one of
context.TryCompile/context.TryLink/context.TryRun. The results of
those are cached, for they are only rebuild, if the dependencies have
changed.
"""
def __init__(self, sconf):
"""Constructor. Pass the corresponding SConf instance."""
self.sconf = sconf
self.did_show_result = 0
# for Conftest.py:
self.vardict = {}
self.havedict = {}
self.headerfilename = None
self.config_h = "" # config_h text will be stored here
# we don't regenerate the config.h file after each test. That means,
# that tests won't be able to include the config.h file, and so
# they can't do an #ifdef HAVE_XXX_H. This shouldn't be a major
# issue, though. If it turns out, that we need to include config.h
# in tests, we must ensure, that the dependencies are worked out
# correctly. Note that we can't use Conftest.py's support for config.h,
# cause we will need to specify a builder for the config.h file ...
def Message(self, text):
"""Inform about what we are doing right now, e.g.
'Checking for SOMETHING ... '
"""
self.Display(text)
self.sconf.cached = 1
self.did_show_result = 0
def Result(self, res):
"""Inform about the result of the test. If res is not a string, displays
'yes' or 'no' depending on whether res is evaluated as true or false.
The result is only displayed when self.did_show_result is not set.
"""
if isinstance(res, str):
text = res
elif res:
text = "yes"
else:
text = "no"
if self.did_show_result == 0:
# Didn't show result yet, do it now.
self.Display(text + "\n")
self.did_show_result = 1
def TryBuild(self, *args, **kw):
return self.sconf.TryBuild(*args, **kw)
def TryAction(self, *args, **kw):
return self.sconf.TryAction(*args, **kw)
def TryCompile(self, *args, **kw):
return self.sconf.TryCompile(*args, **kw)
def TryLink(self, *args, **kw):
return self.sconf.TryLink(*args, **kw)
def TryRun(self, *args, **kw):
return self.sconf.TryRun(*args, **kw)
def __getattr__( self, attr ):
if( attr == 'env' ):
return self.sconf.env
elif( attr == 'lastTarget' ):
return self.sconf.lastTarget
else:
raise AttributeError("CheckContext instance has no attribute '%s'" % attr)
#### Stuff used by Conftest.py (look there for explanations).
def BuildProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Program, text, ext)
def CompileProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Object, text, ext)
def CompileSharedObject(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $SHCC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.SharedObject, text, ext)
def RunProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
st, out = self.TryRun(text, ext)
return not st, out
def AppendLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Append(LIBS = lib_name_list)
return oldLIBS
def PrependLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Prepend(LIBS = lib_name_list)
return oldLIBS
def SetLIBS(self, val):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Replace(LIBS = val)
return oldLIBS
def Display(self, msg):
if self.sconf.cached:
# We assume that Display is called twice for each test here
# once for the Checking for ... message and once for the result.
# The self.sconf.cached flag can only be set between those calls
msg = "(cached) " + msg
self.sconf.cached = 0
progress_display(msg, append_newline=0)
self.Log("scons: Configure: " + msg + "\n")
def Log(self, msg):
if self.sconf.logstream is not None:
self.sconf.logstream.write(msg)
#### End of stuff used by Conftest.py.
def SConf(*args, **kw):
if kw.get(build_type, True):
kw['_depth'] = kw.get('_depth', 0) + 1
for bt in build_types:
try:
del kw[bt]
except KeyError:
pass
return SConfBase(*args, **kw)
else:
return SCons.Util.Null()
def CheckFunc(context, function_name, header = None, language = None):
res = SCons.Conftest.CheckFunc(context, function_name, header = header, language = language)
context.did_show_result = 1
return not res
def CheckType(context, type_name, includes = "", language = None):
res = SCons.Conftest.CheckType(context, type_name,
header = includes, language = language)
context.did_show_result = 1
return not res
def CheckTypeSize(context, type_name, includes = "", language = None, expect = None):
res = SCons.Conftest.CheckTypeSize(context, type_name,
header = includes, language = language,
expect = expect)
context.did_show_result = 1
return res
def CheckDeclaration(context, declaration, includes = "", language = None):
res = SCons.Conftest.CheckDeclaration(context, declaration,
includes = includes,
language = language)
context.did_show_result = 1
return not res
def createIncludesFromHeaders(headers, leaveLast, include_quotes = '""'):
# used by CheckHeader and CheckLibWithHeader to produce C - #include
# statements from the specified header (list)
if not SCons.Util.is_List(headers):
headers = [headers]
l = []
if leaveLast:
lastHeader = headers[-1]
headers = headers[:-1]
else:
lastHeader = None
for s in headers:
l.append("#include %s%s%s\n"
% (include_quotes[0], s, include_quotes[1]))
return ''.join(l), lastHeader
def CheckHeader(context, header, include_quotes = '<>', language = None):
"""
A test for a C or C++ header file.
"""
prog_prefix, hdr_to_check = \
createIncludesFromHeaders(header, 1, include_quotes)
res = SCons.Conftest.CheckHeader(context, hdr_to_check, prog_prefix,
language = language,
include_quotes = include_quotes)
context.did_show_result = 1
return not res
def CheckCC(context):
res = SCons.Conftest.CheckCC(context)
context.did_show_result = 1
return not res
def CheckCXX(context):
res = SCons.Conftest.CheckCXX(context)
context.did_show_result = 1
return not res
def CheckSHCC(context):
res = SCons.Conftest.CheckSHCC(context)
context.did_show_result = 1
return not res
def CheckSHCXX(context):
res = SCons.Conftest.CheckSHCXX(context)
context.did_show_result = 1
return not res
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCHeader(context, header, include_quotes = '""'):
"""
A test for a C header file.
"""
return CheckHeader(context, header, include_quotes, language = "C")
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCXXHeader(context, header, include_quotes = '""'):
"""
A test for a C++ header file.
"""
return CheckHeader(context, header, include_quotes, language = "C++")
def CheckLib(context, library = None, symbol = "main",
header = None, language = None, autoadd = 1):
"""
A test for a library. See also CheckLibWithHeader.
Note that library may also be None to test whether the given symbol
compiles without flags.
"""
if library == []:
library = [None]
if not SCons.Util.is_List(library):
library = [library]
# ToDo: accept path for the library
res = SCons.Conftest.CheckLib(context, library, symbol, header = header,
language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
# XXX
# Bram: Can only include one header and can't use #ifdef HAVE_HEADER_H.
def CheckLibWithHeader(context, libs, header, language,
call = None, autoadd = 1):
# ToDo: accept path for library. Support system header files.
"""
Another (more sophisticated) test for a library.
Checks, if library and header is available for language (may be 'C'
or 'CXX'). Call maybe be a valid expression _with_ a trailing ';'.
As in CheckLib, we support library=None, to test if the call compiles
without extra link flags.
"""
prog_prefix, dummy = \
createIncludesFromHeaders(header, 0)
if libs == []:
libs = [None]
if not SCons.Util.is_List(libs):
libs = [libs]
res = SCons.Conftest.CheckLib(context, libs, None, prog_prefix,
call = call, language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
IljaGrebel/OpenWrt-SDK-imx6_HummingBoard
|
staging_dir/host/lib/scons-2.3.5/SCons/SConf.py
|
Python
|
gpl-2.0
| 39,620
|
# Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
# script to turn a plugin dir into a .exz file suitable for distribution
# takes one commandline parameter: the name of the plugin to build, which must
# be a subdirectory of the current directory
# outputs the built plugin to the current directory, overwriting any current
# build of that plugin
import os
import tarfile
from optparse import OptionParser
p = OptionParser()
p.add_option(
"-c",
"--compression",
dest="compression",
action="store",
choices=("", "gz", "bz2"),
default="bz2",
)
p.add_option(
"-e",
"--ignore-extension",
dest="extensions",
action="append",
default=(".pyc", ".pyo"),
)
p.add_option("-f", "--ignore-file", dest="files", action="append", default=("test.py"))
p.add_option("-O", "--output", dest="output", action="store", default="")
options, args = p.parse_args()
# allowed values: "", "gz", "bz2"
COMPRESSION = options.compression
# don't add files with these extensions to the archive
IGNORED_EXTENSIONS = options.extensions
# don't add files with this exact name to the archive
IGNORED_FILES = options.files
_ = lambda x: x
for dir in args:
if not os.path.exists(dir):
print("No such folder %s" % dir)
break
print("Making plugin %s..." % dir)
if not os.path.exists(os.path.join(dir, "PLUGININFO")):
print("ERROR: no valid info for %s, skipping..." % dir)
continue
f = open(os.path.join(dir, "PLUGININFO"))
info = {}
for line in f:
try:
key, val = line.split("=", 1)
except ValueError:
continue
key = key.strip()
val = eval(val)
info[key] = val
f.close()
if "Version" not in info:
print("ERROR: couldn't get version for %s, skipping..." % dir)
continue
tfile = tarfile.open(
options.output + dir + "-%s.exz" % info["Version"],
"w:%s" % COMPRESSION,
format=tarfile.USTAR_FORMAT,
)
for fold, subdirs, files in os.walk(dir):
for file in files:
stop = False
for ext in IGNORED_EXTENSIONS:
if file.endswith(ext):
stop = True
break
if stop:
continue
for name in IGNORED_FILES:
if file == name:
stop = True
break
if stop:
continue
path = os.path.join(fold, file)
tfile.add(path)
tfile.close()
print("Done.")
|
exaile/exaile
|
plugins/dist_plugin.py
|
Python
|
gpl-2.0
| 3,754
|
import os
import re
import logging
import base64
from virttest.utils_test import libvirt
from autotest.client.shared import error
from autotest.client import utils
from virttest import aexpect
from virttest import remote
from virttest import virt_vm
from virttest import virsh
from virttest.libvirt_xml import vm_xml
from virttest.libvirt_xml import secret_xml
from virttest.libvirt_xml.devices.disk import Disk
def run(test, params, env):
"""
Test disk encryption option.
1.Prepare test environment, destroy or suspend a VM.
2.Prepare tgtd and secret config.
3.Edit disks xml and start the domain.
4.Perform test operation.
5.Recover test environment.
6.Confirm the test result.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
virsh_dargs = {'debug': True, 'ignore_status': True}
def check_save_restore(save_file):
"""
Test domain save and restore.
"""
# Save the domain.
ret = virsh.save(vm_name, save_file, **virsh_dargs)
libvirt.check_exit_status(ret)
# Restore the domain.
ret = virsh.restore(save_file, **virsh_dargs)
libvirt.check_exit_status(ret)
def check_snapshot():
"""
Test domain snapshot operation.
"""
snapshot1 = "s1"
snapshot2 = "s2"
ret = virsh.snapshot_create_as(vm_name, snapshot1)
libvirt.check_exit_status(ret)
ret = virsh.snapshot_create_as(vm_name,
"%s --disk-only --diskspec vda,"
"file=/tmp/testvm-snap1"
% snapshot2)
libvirt.check_exit_status(ret, True)
ret = virsh.snapshot_create_as(vm_name,
"%s --memspec file=%s,snapshot=external"
" --diskspec vda,file=/tmp/testvm-snap2"
% (snapshot2, snapshot2))
libvirt.check_exit_status(ret, True)
def check_in_vm(target, old_parts):
"""
Check mount/read/write disk in VM.
:param vm. VM guest.
:param target. Disk dev in VM.
:return: True if check successfully.
"""
try:
session = vm.wait_for_login()
new_parts = libvirt.get_parts_list(session)
added_parts = list(set(new_parts).difference(set(old_parts)))
logging.info("Added parts:%s", added_parts)
if len(added_parts) != 1:
logging.error("The number of new partitions is invalid in VM")
return False
added_part = None
if target.startswith("vd"):
if added_parts[0].startswith("vd"):
added_part = added_parts[0]
elif target.startswith("hd"):
if added_parts[0].startswith("sd"):
added_part = added_parts[0]
if not added_part:
logging.error("Cann't see added partition in VM")
return False
cmd = ("fdisk -l /dev/{0} && mkfs.ext3 -F /dev/{0} && "
"mkdir test && mount /dev/{0} test && echo"
" teststring > test/testfile && umount test"
.format(added_part))
s, o = session.cmd_status_output(cmd)
logging.info("Check disk operation in VM:\n%s", o)
if s != 0:
return False
return True
except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), e:
logging.error(str(e))
return False
# Disk specific attributes.
device = params.get("virt_disk_device", "disk")
device_target = params.get("virt_disk_device_target", "vdd")
device_format = params.get("virt_disk_device_format", "raw")
device_type = params.get("virt_disk_device_type", "file")
device_bus = params.get("virt_disk_device_bus", "virtio")
# iscsi options.
iscsi_target = params.get("iscsi_target")
iscsi_host = params.get("iscsi_host")
iscsi_port = params.get("iscsi_port")
emulated_size = params.get("iscsi_image_size", "1")
uuid = params.get("uuid", "")
auth_uuid = "yes" == params.get("auth_uuid", "")
auth_usage = "yes" == params.get("auth_usage", "")
status_error = "yes" == params.get("status_error")
test_save_snapshot = "yes" == params.get("test_save_snapshot", "no")
check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")
secret_uuid = ""
# Start vm and get all partions in vm.
if vm.is_dead():
vm.start()
session = vm.wait_for_login()
old_parts = libvirt.get_parts_list(session)
session.close()
vm.destroy(gracefully=False)
# Back up xml file.
vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
try:
chap_user = ""
chap_passwd = ""
if auth_uuid or auth_usage:
auth_type = params.get("auth_type")
secret_usage_target = params.get("secret_usage_target")
secret_usage_type = params.get("secret_usage_type")
chap_user = params.get("iscsi_user")
chap_passwd = params.get("iscsi_password")
sec_xml = secret_xml.SecretXML("no", "yes")
sec_xml.description = "iSCSI secret"
sec_xml.auth_type = auth_type
sec_xml.auth_username = chap_user
sec_xml.usage = secret_usage_type
sec_xml.target = secret_usage_target
sec_xml.xmltreefile.write()
ret = virsh.secret_define(sec_xml.xml)
libvirt.check_exit_status(ret)
secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
ret.stdout)[0].lstrip()
logging.debug("Secret uuid %s", secret_uuid)
if secret_uuid == "":
raise error.TestNAError("Failed to get secret uuid")
# Set secret value
secret_string = base64.b64encode(chap_passwd)
ret = virsh.secret_set_value(secret_uuid, secret_string,
**virsh_dargs)
libvirt.check_exit_status(ret)
# Setup iscsi target
iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True,
is_login=False,
image_size=emulated_size,
chap_user=chap_user,
chap_passwd=chap_passwd)
# If we use qcow2 disk format, should format iscsi disk first.
if device_format == "qcow2":
cmd = ("qemu-img create -f qcow2 iscsi://%s:%s/%s/1 %s"
% (iscsi_host, iscsi_port, iscsi_target, emulated_size))
utils.run(cmd)
# Add disk xml.
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
disk_xml = Disk(type_name=device_type)
disk_xml.device = device
disk_xml.source = disk_xml.new_disk_source(
**{"attrs": {"protocol": "iscsi", "name": "%s/1" % iscsi_target},
"hosts": [{"name": iscsi_host, "port": iscsi_port}]})
disk_xml.target = {"dev": device_target, "bus": device_bus}
disk_xml.driver = {"name": "qemu", "type": device_format}
# Check if we want to use a faked uuid.
if not uuid:
uuid = secret_uuid
auth_dict = {}
if auth_uuid:
auth_dict = {"auth_user": chap_user,
"secret_type": secret_usage_type,
"secret_uuid": uuid}
elif auth_usage:
auth_dict = {"auth_user": chap_user,
"secret_type": secret_usage_type,
"secret_usage": secret_usage_target}
if auth_dict:
disk_xml.auth = disk_xml.new_auth(**auth_dict)
# Sync VM xml.
vmxml.add_device(disk_xml)
vmxml.sync()
try:
# Start the VM and check status.
vm.start()
if status_error:
raise error.TestFail("VM started unexpectedly.")
except virt_vm.VMStartError, e:
if status_error:
if re.search(uuid, str(e)):
pass
else:
raise error.TestFail("VM failed to start")
else:
# Check partitions in VM.
if check_partitions:
if not check_in_vm(device_target, old_parts):
raise error.TestFail("Check disk partitions in VM failed")
# Test domain save/restore/snapshot.
if test_save_snapshot:
save_file = os.path.join(test.tmpdir, "%.save" % vm_name)
check_save_restore(save_file)
check_snapshot()
if os.path.exists(save_file):
os.remove(save_file)
finally:
# Delete snapshots.
snapshot_lists = virsh.snapshot_list(vm_name)
if len(snapshot_lists) > 0:
libvirt.clean_up_snapshots(vm_name, snapshot_lists)
for snapshot in snapshot_lists:
virsh.snapshot_delete(vm_name, snapshot, "--metadata")
# Recover VM.
vmxml_backup.sync("--snapshots-metadata")
# Delete the tmp files.
libvirt.setup_or_cleanup_iscsi(is_setup=False)
# Clean up secret
if secret_uuid:
virsh.secret_undefine(secret_uuid)
|
PandaWei/tp-libvirt
|
libvirt/tests/src/virtual_disks/virtual_disks_iscsi.py
|
Python
|
gpl-2.0
| 9,567
|
import itertools
class Drawable_Object(object):
newid = itertools.count().next
def __init__(self, surface, visible = True):
# Get a unique object ID
self._id = Drawable_Object.newid()
# Surface where the Object will be drew
self._surface = surface
# True indicates that the object will be dr
self._visible = visible
def id(self):
return self._id
@property
def visible(self):
return self._visible
@visible.setter
def visible(self, visible):
self._visible = visible
def draw(self):
# TODO: Replace with the ABC module??
raise NotImplementedError("Please Implement this method")
|
fbarrios/navemer
|
navigation/Drawable_Objects/Drawable_Object.py
|
Python
|
gpl-2.0
| 612
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------
# Copyright (c) 2009 Jendrik Seipp
#
# RedNotebook is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# RedNotebook is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with RedNotebook; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------
import logging
import gtk
import pango
from rednotebook.util import markup
from rednotebook import undo
class CategoriesTreeView(object):
def __init__(self, tree_view, main_window):
self.tree_view = tree_view
self.main_window = main_window
self.undo_redo_manager = main_window.undo_redo_manager
# Maintain a list of all entered categories. Initialized by rn.__init__()
self.categories = None
self.statusbar = self.main_window.statusbar
# create a TreeStore with one string column to use as the model
self.tree_store = gtk.TreeStore(str)
# create the TreeView using tree_store
self.tree_view.set_model(self.tree_store)
# create the TreeViewColumn to display the data
self.tvcolumn = gtk.TreeViewColumn()
label = gtk.Label()
label.set_markup('<b>' + _('Categories') + '</b>')
label.show()
self.tvcolumn.set_widget(label)
# add tvcolumn to tree_view
self.tree_view.append_column(self.tvcolumn)
# create a CellRendererText to render the data
self.cell = gtk.CellRendererText()
self.cell.set_property('editable', True)
self.cell.connect('edited', self.edited_cb, self.tree_store)
self.cell.connect('editing-started', self.on_editing_started)
# add the cell to the tvcolumn and allow it to expand
self.tvcolumn.pack_start(self.cell, True)
''' set the cell "text" attribute to column 0 - retrieve text
from that column in tree_store'''
#self.tvcolumn.add_attribute(self.cell, 'text', 0)
self.tvcolumn.add_attribute(self.cell, 'markup', 0)
# make it searchable
self.tree_view.set_search_column(0)
# Allow sorting on the column
self.tvcolumn.set_sort_column_id(0)
# Enable a context menu
self.context_menu = self._get_context_menu()
self.context_menu.attach_to_widget(self.tree_view, lambda x,y:None)
self.tree_view.connect('button-press-event', self.on_button_press_event)
self.tree_view.connect('key-press-event', self.on_key_press_event)
# Wrap lines
self.cell.props.wrap_mode = pango.WRAP_WORD
self.cell.props.wrap_width = 200
self.tree_view.connect_after("size-allocate", self.on_size_allocate, self.tvcolumn, self.cell)
def node_on_top_level(self, iter):
if not type(iter) == gtk.TreeIter:
# iter is a path -> convert to iter
iter = self.tree_store.get_iter(iter)
assert self.tree_store.iter_is_valid(iter)
return self.tree_store.iter_depth(iter) == 0
def on_editing_started(self, cell, editable, path):
# Let the renderer use text not markup temporarily
self.tvcolumn.clear_attributes(self.cell)
self.tvcolumn.add_attribute(self.cell, 'text', 0)
# Fetch the markup
pango_markup = self.tree_store[path][0]
# Reset the renderer to use markup
self.tvcolumn.clear_attributes(self.cell)
self.tvcolumn.add_attribute(self.cell, 'markup', 0)
# We want to show txt2tags markup and not pango markup
editable.set_text(markup.convert_from_pango(pango_markup))
def edited_cb(self, cell, path, new_text, user_data):
'''
Called when text in a cell is changed
new_text is txt2tags markup
'''
if new_text == 'text' and self.node_on_top_level(path):
self.statusbar.show_text('"text" is a reserved keyword', error=True)
return
if len(new_text) < 1:
self.statusbar.show_text(_('Empty nodes are not allowed'), error=True)
return
liststore = user_data
pango_markup = markup.convert_to_pango(new_text)
liststore[path][0] = pango_markup
# Category name changed
if self.node_on_top_level(path):
if new_text not in self.categories:
self.categories.insert(0, new_text)
# Tag name changed
else:
iter = self.tree_store.get_iter(path)
iter_parent = self.tree_store.iter_parent(iter)
tags_iter = self._get_category_iter('Tags')
tags_node_is_parent = self.get_iter_value(iter_parent).capitalize() == 'Tags'
if tags_node_is_parent and self.node_on_top_level(iter_parent):
self.main_window.journal.save_old_day()
# Update cloud
self.main_window.cloud.update()
def check_category(self, category):
if category == 'text':
self.statusbar.show_text('"text" is a reserved keyword', error=True)
return False
if len(category) < 1:
self.statusbar.show_text(_('Empty category names are not allowed'), error=True)
return False
return True
def check_entry(self, text):
if len(text) < 1:
self.statusbar.show_text(_('Empty entries are not allowed'), error=True)
return False
return True
def add_element(self, parent, element_content):
'''
Recursive Method for adding the content
'''
# We want to order the entries ascendingly
ascending = lambda (key, value): key.lower()
for key, value in sorted(element_content.iteritems(), key=ascending):
if key is not None:
key_pango = markup.convert_to_pango(key)
new_child = self.tree_store.append(parent, [key_pango])
if not value == None:
self.add_element(new_child, value)
def set_day_content(self, day):
# We want to order the categories ascendingly, having Tags first
ascending = lambda x: '000' if x.lower() == 'tags' else x.lower()
sorted_keys = sorted(day.content.keys(), key=ascending)
for key in sorted_keys:
value = day.content[key]
if not key == 'text':
self.add_element(None, {key: value})
self.tree_view.expand_all()
def get_day_content(self):
if self.empty():
return {}
content = self._get_element_content(None)
return content
def _get_element_content(self, element):
model = self.tree_store
if self.tree_store.iter_n_children(element) == 0:
return None
else:
content = {}
for i in range(model.iter_n_children(element)):
child = model.iter_nth_child(element, i)
txt2tags_markup = self.get_iter_value(child)
content[txt2tags_markup] = self._get_element_content(child)
return content
def empty(self, category_iter=None):
'''
Tests whether a category has children
If no category is given, test whether there are any categories
'''
return self.tree_store.iter_n_children(category_iter) == 0
def clear(self):
self.tree_store.clear()
assert self.empty(), self.tree_store.iter_n_children(None)
def get_iter_value(self, iter):
# Let the renderer use text not markup temporarily
self.tvcolumn.clear_attributes(self.cell)
self.tvcolumn.add_attribute(self.cell, 'text', 0)
pango_markup = self.tree_store.get_value(iter, 0).decode('utf-8')
# Reset the renderer to use markup
self.tvcolumn.clear_attributes(self.cell)
self.tvcolumn.add_attribute(self.cell, 'markup', 0)
# We want to have txt2tags markup and not pango markup
text = markup.convert_from_pango(pango_markup)
return text
def set_iter_value(self, iter, txt2tags_markup):
'''
text is txt2tags markup
'''
pango_markup = markup.convert_to_pango(txt2tags_markup)
self.tree_store.set_value(iter, 0, pango_markup)
def find_iter(self, category, entry):
logging.debug('Looking for iter: "%s", "%s"' % (category, entry))
category_iter = self._get_category_iter(category)
if not category_iter:
# If the category was not found, return None
return None
for iter_index in range(self.tree_store.iter_n_children(category_iter)):
current_entry_iter = self.tree_store.iter_nth_child(category_iter, iter_index)
current_entry = self.get_iter_value(current_entry_iter)
if str(current_entry) == str(entry):
return current_entry_iter
# If the entry was not found, return None
logging.debug('Iter not found: "%s", "%s"' % (category, entry))
return None
def _get_category_iter(self, category_name):
for iter_index in range(self.tree_store.iter_n_children(None)):
current_category_iter = self.tree_store.iter_nth_child(None, iter_index)
current_category_name = self.get_iter_value(current_category_iter)
if str(current_category_name).lower() == str(category_name).lower():
return current_category_iter
# If the category was not found, return None
logging.debug('Category not found: "%s"' % category_name)
return None
def add_entry(self, category, entry, undoing=False):
if category not in self.categories and category is not None:
self.categories.insert(0, category)
category_iter = self._get_category_iter(category)
entry_pango = markup.convert_to_pango(entry)
category_pango = markup.convert_to_pango(category)
if category_iter is None:
# If category does not exist add new category
category_iter = self.tree_store.append(None, [category_pango])
entry_node = self.tree_store.append(category_iter, [entry_pango])
else:
# If category exists add entry to existing category
entry_node = self.tree_store.append(category_iter, [entry_pango])
if not undoing:
undo_func = lambda: self.delete_node(self.find_iter(category, entry), undoing=True)
redo_func = lambda: self.add_entry(category, entry, undoing=True)
action = undo.Action(undo_func, redo_func, 'categories_tree_view')
self.undo_redo_manager.add_action(action)
self.tree_view.expand_all()
def get_selected_node(self):
'''
Returns selected node or None if none is selected
'''
tree_selection = self.tree_view.get_selection()
model, selected_iter = tree_selection.get_selected()
return selected_iter
def delete_node(self, iter, undoing=False):
if not iter:
# The user has changed the text of the node or deleted it
return
# Save for undoing ------------------------------------
# An entry is deleted
# We want to delete empty categories too
if not self.node_on_top_level(iter):
deleting_entry = True
category_iter = self.tree_store.iter_parent(iter)
category = self.get_iter_value(category_iter)
entries = [self.get_iter_value(iter)]
# A category is deleted
else:
deleting_entry = False
category_iter = iter
category = self.get_iter_value(category_iter)
entries = self._get_element_content(category_iter).keys()
# Delete ---------------------------------------------
self.tree_store.remove(iter)
# Delete empty category
if deleting_entry and self.empty(category_iter):
self.tree_store.remove(category_iter)
# ----------------------------------------------------
if not undoing:
def undo_func():
for entry in entries:
self.add_entry(category, entry, undoing=True)
def redo_func():
for entry in entries:
delete_iter = self.find_iter(category, entry)
self.delete_node(delete_iter, undoing=True)
action = undo.Action(undo_func, redo_func, 'categories_tree_view')
self.undo_redo_manager.add_action(action)
# Update cloud
self.main_window.cloud.update()
def delete_selected_node(self):
'''
This method used to show a warning dialog. This has become obsolete
with the addition of undo functionality for the categories
'''
selected_iter = self.get_selected_node()
if selected_iter:
self.delete_node(selected_iter)
return
message = _('Do you really want to delete this node?')
sort_optimal_dialog = gtk.MessageDialog(parent=self.main_window.main_frame, \
flags=gtk.DIALOG_MODAL, type=gtk.MESSAGE_QUESTION, \
buttons=gtk.BUTTONS_YES_NO, message_format=message)
response = sort_optimal_dialog.run()
sort_optimal_dialog.hide()
if response == gtk.RESPONSE_YES:
self.delete_node(selected_iter)
def on_key_press_event(self, widget, event):
"""
@param widget - gtk.TreeView - The Tree View
@param event - gtk.gdk.event - Event information
Delete an annotation node when user hits "Delete"
"""
keyname = gtk.gdk.keyval_name(event.keyval)
logging.info('Pressed key: %s' % keyname)
if keyname == 'Delete':
self._on_delete_entry_clicked(None)
elif keyname == 'Menu':
# Does not work
logging.info('Context Menu does not work')
self.context_menu.popup(None, None, None, 0, event.time)
def on_button_press_event(self, widget, event):
"""
@param widget - gtk.TreeView - The Tree View
@param event - gtk.gdk.event - Event information
"""
#Get the path at the specific mouse position
path = widget.get_path_at_pos(int(event.x), int(event.y))
if (path == None):
"""If we didn't get a path then we don't want anything
to be selected."""
selection = widget.get_selection()
selection.unselect_all()
# Do not show change and delete options, if nothing is selected
something_selected = (path is not None)
uimanager = self.main_window.uimanager
change_entry_item = uimanager.get_widget('/ContextMenu/ChangeEntry')
change_entry_item.set_sensitive(something_selected)
delete_entry_item = uimanager.get_widget('/ContextMenu/Delete')
delete_entry_item.set_sensitive(something_selected)
if (event.button == 3):
#This is a right-click
self.context_menu.popup(None, None, None, event.button, event.time)
def _get_context_menu(self):
context_menu_xml = '''
<ui>
<popup action="ContextMenu">
<menuitem action="ChangeEntry"/>
<menuitem action="AddEntry"/>
<menuitem action="Delete"/>
</popup>
</ui>'''
uimanager = self.main_window.uimanager
# Create an ActionGroup
actiongroup = gtk.ActionGroup('ContextMenuActionGroup')
new_entry_dialog = self.main_window.new_entry_dialog
# Create actions
actiongroup.add_actions([
('ChangeEntry', gtk.STOCK_EDIT, \
_('Change this text'), \
None, None, self._on_change_entry_clicked
),
('AddEntry', gtk.STOCK_NEW, \
_('Add a new entry'), \
None, None, self._on_add_entry_clicked
),
('Delete', gtk.STOCK_DELETE, \
_('Delete this node'), \
None, None, self._on_delete_entry_clicked
),
])
# Add the actiongroup to the uimanager
uimanager.insert_action_group(actiongroup, 0)
# Add a UI description
uimanager.add_ui_from_string(context_menu_xml)
# Create a Menu
menu = uimanager.get_widget('/ContextMenu')
return menu
def _on_change_entry_clicked(self, action):
iter = self.get_selected_node()
self.tree_view.set_cursor(self.tree_store.get_path(iter), \
focus_column=self.tvcolumn, start_editing=True)
def _on_add_entry_clicked(self, action):
iter = self.get_selected_node()
dialog = self.main_window.new_entry_dialog
# Either nothing was selected -> show normal new_entry_dialog
if iter is None:
dialog.show_dialog()
# or a category was selected
elif self.node_on_top_level(iter):
category = self.get_iter_value(iter)
dialog.show_dialog(category=category)
# or an entry was selected
else:
parent_iter = self.tree_store.iter_parent(iter)
category = self.get_iter_value(parent_iter)
dialog.show_dialog(category=category)
def _on_delete_entry_clicked(self, action):
self.delete_selected_node()
def on_size_allocate(self, treeview, allocation, column, cell):
'''
Code from pychess project
(http://code.google.com/p/pychess/source/browse/trunk/lib/pychess/
System/uistuff.py?r=1025#62)
Allows dynamic line wrapping in a treeview
'''
other_columns = (c for c in treeview.get_columns() if c != column)
new_width = allocation.width - sum(c.get_width() for c in other_columns)
new_width -= treeview.style_get_property("horizontal-separator") * 2
## Customize for treeview with expanders
## The behaviour can only be fitted to one depth -> take the second one
new_width -= treeview.style_get_property('expander-size') * 3
if cell.props.wrap_width == new_width or new_width <= 0:
return
cell.props.wrap_width = new_width
store = treeview.get_model()
iter = store.get_iter_first()
while iter and store.iter_is_valid(iter):
store.row_changed(store.get_path(iter), iter)
iter = store.iter_next(iter)
treeview.set_size_request(0,-1)
## The heights may have changed
column.queue_resize()
|
tomka/rednotebook
|
rednotebook/gui/categories.py
|
Python
|
gpl-2.0
| 20,312
|
# -*- coding: utf-8 -*-
import unittest
from httplib import BAD_REQUEST
from httplib import FORBIDDEN
from httplib import NOT_FOUND
from httplib import NOT_IMPLEMENTED
from datetime import datetime
from tcms.core.contrib.linkreference.models import LinkReference
from tcms.xmlrpc.api import testcaserun
from tcms.xmlrpc.tests.utils import make_http_request
from tcms.testruns.models import TestCaseRunStatus
from tcms.testcases.models import TestCaseBugSystem
from tcms.tests.factories import ProductFactory
from tcms.tests.factories import TestCaseFactory
from tcms.tests.factories import TestCaseRunFactory
from tcms.tests.factories import TestPlanFactory
from tcms.tests.factories import TestRunFactory
from tcms.tests.factories import UserFactory
from tcms.tests.factories import VersionFactory
from tcms.tests.factories import TestBuildFactory
from tcms.xmlrpc.tests.utils import XmlrpcAPIBaseTest
class TestCaseRunCreate(XmlrpcAPIBaseTest):
"""Test testcaserun.create"""
@classmethod
def setUpTestData(cls):
cls.admin = UserFactory(username='tcr_admin', email='tcr_admin@example.com')
cls.staff = UserFactory(username='tcr_staff', email='tcr_staff@example.com')
cls.admin_request = make_http_request(user=cls.admin, user_perm='testruns.add_testcaserun')
cls.staff_request = make_http_request(user=cls.staff)
cls.product = ProductFactory(name='Nitrate')
cls.version = VersionFactory(value='0.1', product=cls.product)
cls.build = cls.product.build.all()[0]
cls.plan = TestPlanFactory(author=cls.admin, owner=cls.admin, product=cls.product)
cls.test_run = TestRunFactory(product_version=cls.version, build=cls.build,
default_tester=None, plan=cls.plan)
cls.case_run_status = TestCaseRunStatus.objects.get(name='IDLE')
cls.case = TestCaseFactory(author=cls.admin, default_tester=None, plan=[cls.plan])
cls.case_run_pks = []
def test_create_with_no_args(self):
bad_args = (None, [], {}, (), 1, 0, -1, True, False, '', 'aaaa', object)
for arg in bad_args:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.create, self.admin_request, arg)
def test_create_with_no_required_fields(self):
values = [
{
"assignee": self.staff.pk,
"case_run_status": self.case_run_status.pk,
"notes": "unit test 2"
},
{
"build": self.build.pk,
"assignee": self.staff.pk,
"case_run_status": 1,
"notes": "unit test 2"
},
{
"run": self.test_run.pk,
"build": self.build.pk,
"assignee": self.staff.pk,
"case_run_status": self.case_run_status.pk,
"notes": "unit test 2"
},
]
for value in values:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.create, self.admin_request, value)
def test_create_with_required_fields(self):
tcr = testcaserun.create(self.admin_request, {
"run": self.test_run.pk,
"build": self.build.pk,
"case": self.case.pk,
"case_text_version": 15,
})
self.assertIsNotNone(tcr)
self.case_run_pks.append(tcr['case_run_id'])
self.assertEqual(tcr['build_id'], self.build.pk)
self.assertEqual(tcr['case_id'], self.case.pk)
self.assertEqual(tcr['run_id'], self.test_run.pk)
def test_create_with_all_fields(self):
tcr = testcaserun.create(self.admin_request, {
"run": self.test_run.pk,
"build": self.build.pk,
"case": self.case.pk,
"assignee": self.admin.pk,
"notes": "test_create_with_all_fields",
"sortkey": 90,
"case_run_status": self.case_run_status.pk,
"case_text_version": 3,
})
self.assertIsNotNone(tcr)
self.case_run_pks.append(tcr['case_run_id'])
self.assertEquals(tcr['build_id'], self.build.pk)
self.assertEquals(tcr['case_id'], self.case.pk)
self.assertEquals(tcr['assignee_id'], self.admin.pk)
self.assertEquals(tcr['notes'], "test_create_with_all_fields")
self.assertEquals(tcr['sortkey'], 90)
self.assertEquals(tcr['case_run_status'], 'IDLE')
self.assertEquals(tcr['case_text_version'], 3)
def test_create_with_non_exist_fields(self):
values = [
{
"run": self.test_run.pk,
"build": self.build.pk,
"case": 111111,
},
{
"run": 11111,
"build": self.build.pk,
"case": self.case.pk,
},
{
"run": self.test_run.pk,
"build": 11222222,
"case": self.case.pk,
},
]
for value in values:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.create, self.admin_request, value)
def test_create_with_chinese(self):
tcr = testcaserun.create(self.admin_request, {
"run": self.test_run.pk,
"build": self.build.pk,
"case": self.case.pk,
"notes": "开源中国",
"case_text_version": 2,
})
self.assertIsNotNone(tcr)
self.case_run_pks.append(tcr['case_run_id'])
self.assertEquals(tcr['build_id'], self.build.pk)
self.assertEquals(tcr['case_id'], self.case.pk)
self.assertEquals(tcr['assignee_id'], None)
self.assertEquals(tcr['case_text_version'], 2)
self.assertEquals(tcr['notes'], u"\u5f00\u6e90\u4e2d\u56fd")
def test_create_with_long_field(self):
large_str = """aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
"""
tcr = testcaserun.create(self.admin_request, {
"run": self.test_run.pk,
"build": self.build.pk,
"case": self.case.pk,
"notes": large_str,
"case_text_version": 2,
})
self.assertIsNotNone(tcr)
self.case_run_pks.append(tcr['case_run_id'])
self.assertEquals(tcr['build_id'], self.build.pk)
self.assertEquals(tcr['case_id'], self.case.pk)
self.assertEquals(tcr['assignee_id'], None)
self.assertEquals(tcr['case_text_version'], 2)
self.assertEquals(tcr['notes'], large_str)
def test_create_with_no_perm(self):
values = {
"run": self.test_run.pk,
"build": self.build.pk,
"case": self.case.pk,
"assignee": self.admin.pk,
"notes": "test_create_with_all_fields",
"sortkey": 2,
"case_run_status": self.case_run_status.pk,
}
self.assertRaisesXmlrpcFault(FORBIDDEN, testcaserun.create, self.staff_request, values)
class TestCaseRunAddComment(XmlrpcAPIBaseTest):
"""Test testcaserun.add_comment"""
@classmethod
def setUpTestData(cls):
cls.admin = UserFactory(username='update_admin', email='update_admin@example.com')
cls.admin_request = make_http_request(user=cls.admin,
user_perm='testruns.change_testcaserun')
cls.case_run_1 = TestCaseRunFactory()
cls.case_run_2 = TestCaseRunFactory()
@unittest.skip('TODO: not implemented yet.')
def test_add_comment_with_no_args(self):
pass
@unittest.skip('TODO: not implemented yet.')
def test_add_comment_with_illegal_args(self):
pass
def test_add_comment_with_string(self):
comment = testcaserun.add_comment(self.admin_request,
"{0},{1}".format(self.case_run_1.pk, self.case_run_2.pk),
"Hello World!")
self.assertIsNone(comment)
comment = testcaserun.add_comment(self.admin_request,
str(self.case_run_1.pk),
"Hello World!")
self.assertIsNone(comment)
def test_add_comment_with_list(self):
comment = testcaserun.add_comment(self.admin_request,
[self.case_run_1.pk, self.case_run_2.pk],
"Hello World!")
self.assertIsNone(comment)
def test_add_comment_with_int(self):
comment = testcaserun.add_comment(self.admin_request, self.case_run_2.pk, "Hello World!")
self.assertIsNone(comment)
class TestCaseRunAttachBug(XmlrpcAPIBaseTest):
"""Test testcaserun.attach_bug"""
@classmethod
def setUpTestData(cls):
cls.admin = UserFactory(username='update_admin', email='update_admin@example.com')
cls.staff = UserFactory(username='update_staff', email='update_staff@example.com')
cls.admin_request = make_http_request(user=cls.admin,
user_perm='testcases.add_testcasebug')
cls.staff_request = make_http_request(user=cls.staff)
cls.case_run = TestCaseRunFactory()
cls.bug_system_jira = TestCaseBugSystem.objects.get(name='JIRA')
cls.bug_system_bz = TestCaseBugSystem.objects.get(name='Bugzilla')
def test_attach_bug_with_no_perm(self):
self.assertRaisesXmlrpcFault(FORBIDDEN, testcaserun.attach_bug, self.staff_request, {})
@unittest.skip('TODO: not implemented yet.')
def test_attach_bug_with_incorrect_type_value(self):
pass
@unittest.skip('TODO: fix code to make this test pass.')
def test_attach_bug_with_no_required_args(self):
values = [
{
"summary": "This is summary.",
"description": "This is description."
},
{
"description": "This is description."
},
{
"summary": "This is summary.",
},
]
for value in values:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.attach_bug,
self.admin_request, value)
def test_attach_bug_with_required_args(self):
bug = testcaserun.attach_bug(self.admin_request, {
"case_run_id": self.case_run.pk,
"bug_id": '1',
"bug_system_id": self.bug_system_bz.pk,
})
self.assertIsNone(bug)
bug = testcaserun.attach_bug(self.admin_request, {
"case_run_id": self.case_run.pk,
"bug_id": "TCMS-123",
"bug_system_id": self.bug_system_jira.pk,
})
self.assertIsNone(bug)
def test_attach_bug_with_all_fields(self):
bug = testcaserun.attach_bug(self.admin_request, {
"case_run_id": self.case_run.pk,
"bug_id": '2',
"bug_system_id": self.bug_system_bz.pk,
"summary": "This is summary.",
"description": "This is description."
})
self.assertIsNone(bug)
def test_succeed_to_attach_bug_by_passing_extra_data(self):
testcaserun.attach_bug(self.admin_request, {
"case_run_id": self.case_run.pk,
"bug_id": '1200',
"bug_system_id": self.bug_system_bz.pk,
"summary": "This is summary.",
"description": "This is description.",
"FFFF": "aaa"
})
bugs_added = self.case_run.case.case_bug.filter(
bug_id='1200', bug_system=self.bug_system_bz.pk).count()
self.assertEqual(1, bugs_added)
def test_attach_bug_with_non_existing_case_run(self):
value = {
"case_run_id": 111111111,
"bug_id": '2',
"bug_system_id": self.bug_system_bz.pk,
}
self.assertRaisesXmlrpcFault(NOT_FOUND, testcaserun.attach_bug, self.admin_request, value)
def test_attach_bug_with_non_existing_bug_system(self):
value = {
"case_run_id": self.case_run.pk,
"bug_id": '2',
"bug_system_id": 111111111,
}
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.attach_bug, self.admin_request, value)
def test_attach_bug_with_chinese(self):
bug = testcaserun.attach_bug(self.admin_request, {
"case_run_id": self.case_run.pk,
"bug_id": '12',
"bug_system_id": self.bug_system_bz.pk,
"summary": "你好,中国",
"description": "中国是一个具有悠久历史的文明古国"
})
self.assertIsNone(bug)
class TestCaseRunAttachLog(XmlrpcAPIBaseTest):
"""Test testcaserun.attach_log"""
@classmethod
def setUpTestData(cls):
cls.case_run = TestCaseRunFactory()
@unittest.skip('TODO: not implemented yet.')
def test_attach_log_with_bad_args(self):
pass
def test_attach_log_with_not_enough_args(self):
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.attach_log, None, '', '')
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.attach_log, None, '')
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.attach_log, None)
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.attach_log, None, '', '', '')
def test_attach_log_with_non_exist_id(self):
self.assertRaisesXmlrpcFault(NOT_FOUND, testcaserun.attach_log, None, 5523533, '', '')
@unittest.skip('TODO: code should be fixed to make this test pass')
def test_attach_log_with_invalid_url(self):
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.attach_log,
None, self.case_run.pk, "UT test logs", 'aaaaaaaaa')
def test_attach_log(self):
url = "http://127.0.0.1/test/test-log.log"
log = testcaserun.attach_log(None, self.case_run.pk, "UT test logs", url)
self.assertIsNone(log)
class TestCaseRunCheckStatus(XmlrpcAPIBaseTest):
"""Test testcaserun.check_case_run_status"""
@unittest.skip('TODO: fix code to make this test pass.')
def test_check_status_with_no_args(self):
bad_args = (None, [], {}, ())
for arg in bad_args:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.check_case_run_status, None, arg)
@unittest.skip('TODO: fix code to make this test pass.')
def test_check_status_with_empty_name(self):
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.check_case_run_status, None, '')
@unittest.skip('TODO: fix code to make this test pass.')
def test_check_status_with_non_basestring(self):
bad_args = (True, False, 1, 0, -1, [1], (1,), dict(a=1), 0.7)
for arg in bad_args:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.check_case_run_status, None, arg)
def test_check_status_with_name(self):
status = testcaserun.check_case_run_status(None, "IDLE")
self.assertIsNotNone(status)
self.assertEqual(status['id'], 1)
self.assertEqual(status['name'], "IDLE")
def test_check_status_with_non_exist_name(self):
self.assertRaisesXmlrpcFault(NOT_FOUND, testcaserun.check_case_run_status, None, "ABCDEFG")
class TestCaseRunDetachBug(XmlrpcAPIBaseTest):
@classmethod
def setUpTestData(cls):
cls.admin = UserFactory()
cls.staff = UserFactory()
cls.admin_request = make_http_request(user=cls.admin,
user_perm='testcases.delete_testcasebug')
cls.staff_request = make_http_request(user=cls.staff,
user_perm='testcases.add_testcasebug')
cls.bug_system_bz = TestCaseBugSystem.objects.get(name='Bugzilla')
cls.bug_system_jira = TestCaseBugSystem.objects.get(name='JIRA')
cls.case_run = TestCaseRunFactory()
def setUp(self):
self.bug_id = '67890'
testcaserun.attach_bug(self.staff_request, {
'case_run_id': self.case_run.pk,
'bug_id': self.bug_id,
'bug_system_id': self.bug_system_bz.pk,
'summary': 'Testing TCMS',
'description': 'Just foo and bar',
})
self.jira_key = 'AWSDF-112'
testcaserun.attach_bug(self.staff_request, {
'case_run_id': self.case_run.pk,
'bug_id': self.jira_key,
'bug_system_id': self.bug_system_jira.pk,
'summary': 'Testing TCMS',
'description': 'Just foo and bar',
})
def tearDown(self):
self.case_run.case.case_bug.all().delete()
@unittest.skip('TODO: fix get_bugs_s to make this test pass.')
def test_detach_bug_with_no_args(self):
bad_args = (None, [], {}, ())
for arg in bad_args:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.detach_bug,
self.admin_request, arg, '12345')
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.detach_bug,
self.admin_request, self.case_run.pk, arg)
def test_detach_bug_with_non_exist_id(self):
original_links_count = self.case_run.case.case_bug.count()
testcaserun.detach_bug(self.admin_request, 9999999, '123456')
self.assertEqual(original_links_count, self.case_run.case.case_bug.count())
@unittest.skip('Refer to #148.')
def test_detach_bug_with_non_exist_bug(self):
original_links_count = self.case_run.case.case_bug.count()
nonexisting_bug = '{0}111'.format(self.bug_id)
testcaserun.detach_bug(self.admin_request, self.case_run.pk, nonexisting_bug)
self.assertEqual(original_links_count, self.case_run.case.case_bug.count())
@unittest.skip('Refer to #148.')
def test_detach_bug(self):
testcaserun.detach_bug(self.admin_request, self.case_run.pk, self.bug_id)
self.assertFalse(self.case_run.case.case_bug.filter(bug_id=self.bug_id).exists())
@unittest.skip('TODO: fix get_bugs_s to make this test pass.')
def test_detach_bug_with_illegal_args(self):
bad_args = ("AAAA", ['A', 'B', 'C'], dict(A=1, B=2), True, False, (1, 2, 3, 4), -100)
for arg in bad_args:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.detach_bug,
self.admin_request, arg, self.bug_id)
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.detach_bug,
self.admin_request, self.case_run.pk, arg)
def test_detach_bug_with_no_perm(self):
self.assertRaisesXmlrpcFault(FORBIDDEN, testcaserun.detach_bug,
self.staff_request, self.case_run.pk, self.bug_id)
class TestCaseRunDetachLog(XmlrpcAPIBaseTest):
@classmethod
def setUpTestData(cls):
cls.status_idle = TestCaseRunStatus.objects.get(name='IDLE')
cls.tester = UserFactory()
cls.case_run = TestCaseRunFactory(assignee=cls.tester, tested_by=None,
notes='testing ...',
sortkey=10,
case_run_status=cls.status_idle)
def setUp(self):
testcaserun.attach_log(None, self.case_run.pk, 'Related issue', 'https://localhost/issue/1')
self.link = self.case_run.links.all()[0]
@unittest.skip('TODO: fix get_bugs_s to make this test pass.')
def test_detach_log_with_no_args(self):
bad_args = (None, [], {}, ())
for arg in bad_args:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.detach_log,
None, arg, self.link.pk)
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.detach_log,
None, self.case_run.pk, arg)
def test_detach_log_with_not_enough_args(self):
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.detach_log, None, '')
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.detach_log, None)
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.detach_log, None, '', '', '')
def test_detach_log_with_non_exist_id(self):
self.assertRaisesXmlrpcFault(NOT_FOUND, testcaserun.detach_log, None, 9999999, self.link.pk)
def test_detach_log_with_non_exist_log(self):
testcaserun.detach_log(None, self.case_run.pk, 999999999)
self.assertEqual(1, self.case_run.links.count())
self.assertEqual(self.link.pk, self.case_run.links.all()[0].pk)
@unittest.skip('TODO: fix get_bugs_s to make this test pass.')
def test_detach_log_with_invalid_type_args(self):
bad_args = ("", "AAA", (1,), [1], dict(a=1), True, False)
for arg in bad_args:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.detach_log,
None, arg, self.link.pk)
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.detach_log,
None, self.case_run.pk, arg)
def test_detach_log(self):
testcaserun.detach_log(None, self.case_run.pk, self.link.pk)
self.assertEqual([], list(self.case_run.links.all()))
@unittest.skip('not implemented yet.')
class TestCaseRunFilter(XmlrpcAPIBaseTest):
pass
@unittest.skip('not implemented yet.')
class TestCaseRunFilterCount(XmlrpcAPIBaseTest):
pass
class TestCaseRunGet(XmlrpcAPIBaseTest):
@classmethod
def setUpTestData(cls):
cls.status_idle = TestCaseRunStatus.objects.get(name='IDLE')
cls.tester = UserFactory()
cls.case_run = TestCaseRunFactory(assignee=cls.tester, tested_by=None,
notes='testing ...',
sortkey=10,
case_run_status=cls.status_idle)
@unittest.skip('TODO: fix get_bugs_s to make this test pass.')
def test_get_with_no_args(self):
bad_args = (None, [], {}, ())
for arg in bad_args:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.get, None, arg)
@unittest.skip('TODO: fix get_bugs_s to make this test pass.')
def test_get_with_non_integer(self):
non_integer = (True, False, '', 'aaaa', self, [1], (1,), dict(a=1), 0.7)
for arg in non_integer:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.get, None, arg)
def test_get_with_non_exist_id(self):
self.assertRaisesXmlrpcFault(NOT_FOUND, testcaserun.get, None, 11111111)
def test_get_with_id(self):
tcr = testcaserun.get(None, self.case_run.pk)
self.assertIsNotNone(tcr)
self.assertEqual(tcr['build_id'], self.case_run.build.pk)
self.assertEqual(tcr['case_id'], self.case_run.case.pk)
self.assertEqual(tcr['assignee_id'], self.tester.pk)
self.assertEqual(tcr['tested_by_id'], None)
self.assertEqual(tcr['notes'], 'testing ...')
self.assertEqual(tcr['sortkey'], 10)
self.assertEqual(tcr['case_run_status'], 'IDLE')
self.assertEqual(tcr['case_run_status_id'], self.status_idle.pk)
class TestCaseRunGetSet(XmlrpcAPIBaseTest):
@classmethod
def setUpTestData(cls):
cls.status_idle = TestCaseRunStatus.objects.get(name='IDLE')
cls.tester = UserFactory()
cls.case_run = TestCaseRunFactory(assignee=cls.tester, tested_by=None,
notes='testing ...',
case_run_status=cls.status_idle)
@unittest.skip('TODO: fix get_bugs_s to make this test pass.')
def test_get_with_no_args(self):
bad_args = (None, [], (), {})
for arg in bad_args:
self.assertRaisesXmlrpcFault(
BAD_REQUEST, testcaserun.get_s,
None, arg, self.case_run.run.pk, self.case_run.build.pk, 0)
self.assertRaisesXmlrpcFault(
BAD_REQUEST, testcaserun.get_s,
None, self.case_run.case.pk, arg, self.case_run.build.pk, 0)
self.assertRaisesXmlrpcFault(
BAD_REQUEST, testcaserun.get_s,
None, self.case_run.case.pk, self.case_run.run.pk, arg, 0)
self.assertRaisesXmlrpcFault(
BAD_REQUEST, testcaserun.get_s,
None, self.case_run.case.pk, self.case_run.run.pk, self.case_run.build.pk, arg)
def test_get_with_non_exist_run(self):
self.assertRaisesXmlrpcFault(NOT_FOUND, testcaserun.get_s,
None, self.case_run.case.pk, 1111111, self.case_run.build.pk,
0)
def test_get_with_non_exist_case(self):
self.assertRaisesXmlrpcFault(NOT_FOUND, testcaserun.get_s,
None, 11111111, self.case_run.run.pk, self.case_run.build.pk,
0)
def test_get_with_non_exist_build(self):
self.assertRaisesXmlrpcFault(NOT_FOUND, testcaserun.get_s,
None, self.case_run.case.pk, self.case_run.run.pk, 1111111,
0)
def test_get_with_non_exist_env(self):
self.assertRaisesXmlrpcFault(NOT_FOUND, testcaserun.get_s,
None,
self.case_run.case.pk,
self.case_run.run.pk,
self.case_run.build.pk,
999999)
def test_get_with_no_env(self):
tcr = testcaserun.get_s(None,
self.case_run.case.pk,
self.case_run.run.pk,
self.case_run.build.pk)
self.assertIsNotNone(tcr)
self.assertEqual(tcr['case_run_id'], self.case_run.pk)
self.assertEqual(tcr['run_id'], self.case_run.run.pk)
self.assertEqual(tcr['case_id'], self.case_run.case.pk)
self.assertEqual(tcr['assignee_id'], self.tester.pk)
self.assertEqual(tcr['tested_by_id'], None)
self.assertEqual(tcr['build_id'], self.case_run.build.pk)
self.assertEqual(tcr['notes'], 'testing ...')
self.assertEqual(tcr['case_run_status_id'], self.status_idle.pk)
self.assertEqual(tcr['environment_id'], 0)
class TestCaseRunGetBugs(XmlrpcAPIBaseTest):
@classmethod
def setUpTestData(cls):
cls.admin = UserFactory()
cls.admin_request = make_http_request(user=cls.admin,
user_perm='testcases.add_testcasebug')
cls.case_run = TestCaseRunFactory()
cls.bug_system_bz = TestCaseBugSystem.objects.get(name='Bugzilla')
testcaserun.attach_bug(cls.admin_request, {
'case_run_id': cls.case_run.pk,
'bug_id': '67890',
'bug_system_id': cls.bug_system_bz.pk,
'summary': 'Testing TCMS',
'description': 'Just foo and bar',
})
def test_get_bugs_with_no_args(self):
bad_args = (None, [], {}, ())
for arg in bad_args:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.get_bugs, None, arg)
@unittest.skip('TODO: fix get_bugs to make this test pass.')
def test_get_bugs_with_non_integer(self):
non_integer = (True, False, '', 'aaaa', self, [1], (1,), dict(a=1), 0.7)
for arg in non_integer:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.get_bugs, None, arg)
def test_get_bugs_with_non_exist_id(self):
bugs = testcaserun.get_bugs(None, 11111111)
self.assertEqual(len(bugs), 0)
self.assertIsInstance(bugs, list)
def test_get_bugs_with_id(self):
bugs = testcaserun.get_bugs(None, self.case_run.pk)
self.assertIsNotNone(bugs)
self.assertEqual(1, len(bugs))
self.assertEqual(bugs[0]['summary'], 'Testing TCMS')
self.assertEqual(bugs[0]['bug_id'], '67890')
class TestCaseRunGetBugsSet(XmlrpcAPIBaseTest):
@classmethod
def setUpTestData(cls):
cls.admin = UserFactory(username='update_admin', email='update_admin@example.com')
cls.admin_request = make_http_request(user=cls.admin,
user_perm='testcases.add_testcasebug')
cls.case_run = TestCaseRunFactory()
cls.bug_system_bz = TestCaseBugSystem.objects.get(name='Bugzilla')
testcaserun.attach_bug(cls.admin_request, {
'case_run_id': cls.case_run.pk,
'bug_id': '67890',
'bug_system_id': cls.bug_system_bz.pk,
'summary': 'Testing TCMS',
'description': 'Just foo and bar',
})
def test_get_bug_set_with_no_args(self):
bad_args = (None, [], (), {})
for arg in bad_args:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.get_bugs_s,
None, arg, self.case_run.case.pk, self.case_run.build.pk,
0)
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.get_bugs_s,
None, self.case_run.run.pk, arg, self.case_run.build.pk,
0)
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.get_bugs_s,
None, self.case_run.run.pk, self.case_run.case.pk, arg,
0)
@unittest.skip('TODO: fix get_bugs_s to make this test pass.')
def test_get_bug_set_with_invalid_environment_value(self):
bad_args = (None, [], (), {})
for arg in bad_args:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.get_bugs_s,
None,
self.case_run.run.pk,
self.case_run.case.pk,
self.case_run.build.pk,
arg)
def test_get_bug_set_with_non_exist_run(self):
tcr = testcaserun.get_bugs_s(None,
1111111,
self.case_run.case.pk,
self.case_run.build.pk,
0)
self.assertIsNotNone(tcr)
self.assertIsInstance(tcr, list)
self.assertEqual(len(tcr), 0)
def test_get_bug_set_with_non_exist_case(self):
tcr = testcaserun.get_bugs_s(None,
self.case_run.run.pk,
11111111,
self.case_run.build.pk,
0)
self.assertIsNotNone(tcr)
self.assertIsInstance(tcr, list)
self.assertEqual(len(tcr), 0)
def test_get_bug_set_with_non_exist_build(self):
tcr = testcaserun.get_bugs_s(None,
self.case_run.run.pk,
self.case_run.case.pk,
1111111,
0)
self.assertIsNotNone(tcr)
self.assertIsInstance(tcr, list)
self.assertEqual(len(tcr), 0)
def test_get_bug_set_with_non_exist_env(self):
tcr = testcaserun.get_bugs_s(None,
self.case_run.run.pk,
self.case_run.case.pk,
self.case_run.build.pk,
999999)
self.assertIsNotNone(tcr)
self.assertIsInstance(tcr, list)
self.assertEqual(len(tcr), 0)
def test_get_bug_set_by_omitting_argument_environment(self):
tcr = testcaserun.get_bugs_s(None,
self.case_run.run.pk,
self.case_run.case.pk,
self.case_run.build.pk)
self.assertIsNotNone(tcr)
self.assertIsInstance(tcr, list)
self.assertEqual(len(tcr), 1)
self.assertEqual(tcr[0]['bug_id'], '67890')
self.assertEqual(tcr[0]['summary'], 'Testing TCMS')
class TestCaseRunGetStatus(XmlrpcAPIBaseTest):
@classmethod
def setUpTestData(cls):
cls.status_running = TestCaseRunStatus.objects.get(name='RUNNING')
def test_get_all_status(self):
rows = testcaserun.get_case_run_status(None)
self.assertEqual(8, len(rows))
names = [row['name'] for row in rows]
self.assertTrue("IDLE" in names)
self.assertTrue("PASSED" in names)
self.assertTrue("FAILED" in names)
self.assertTrue("RUNNING" in names)
self.assertTrue("PAUSED" in names)
self.assertTrue("BLOCKED" in names)
self.assertTrue("ERROR" in names)
self.assertTrue("WAIVED" in names)
rows = testcaserun.get_case_run_status(None, None)
self.assertEqual(8, len(rows))
names = [row['name'] for row in rows]
self.assertTrue("IDLE" in names)
self.assertTrue("PASSED" in names)
self.assertTrue("FAILED" in names)
self.assertTrue("RUNNING" in names)
self.assertTrue("PAUSED" in names)
self.assertTrue("BLOCKED" in names)
self.assertTrue("ERROR" in names)
self.assertTrue("WAIVED" in names)
@unittest.skip('TODO: fix method to make this test pass.')
def test_get_status_with_no_args(self):
bad_args = ([], {}, (), "", "AAAA", self)
for arg in bad_args:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.get_case_run_status, None, arg)
def test_get_status_with_non_exist_id(self):
self.assertRaisesXmlrpcFault(NOT_FOUND, testcaserun.get_case_run_status, None, 999999)
def test_get_status_with_id(self):
status = testcaserun.get_case_run_status(None, self.status_running.pk)
self.assertIsNotNone(status)
self.assertEqual(status['id'], self.status_running.pk)
self.assertEqual(status['name'], "RUNNING")
def test_get_status_with_name(self):
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.get_case_run_status, None, 'PROPOSED')
@unittest.skip('not implemented yet.')
class TestCaseRunGetCompletionTime(XmlrpcAPIBaseTest):
pass
@unittest.skip('not implemented yet.')
class TestCaseRunGetCompletionTimeSet(XmlrpcAPIBaseTest):
pass
@unittest.skip('not implemented yet.')
class TestCaseRunGetHistory(XmlrpcAPIBaseTest):
def test_get_history(self):
self.assertRaisesXmlrpcFault(NOT_IMPLEMENTED, testcaserun.get_history, None, None)
@unittest.skip('not implemented yet.')
class TestCaseRunGetHistorySet(XmlrpcAPIBaseTest):
def test_get_history(self):
self.assertRaisesXmlrpcFault(NOT_IMPLEMENTED, testcaserun.get_history_s,
None, None, None, None)
class TestCaseRunGetLogs(XmlrpcAPIBaseTest):
@classmethod
def setUpTestData(cls):
cls.case_run_1 = TestCaseRunFactory()
cls.case_run_2 = TestCaseRunFactory()
testcaserun.attach_log(None, cls.case_run_1.pk, "Test logs", "http://www.google.com")
@unittest.skip('TODO: fix method to make this test pass.')
def test_get_logs_with_no_args(self):
bad_args = (None, [], (), {}, "")
for arg in bad_args:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.get_logs, None, arg)
@unittest.skip('TODO: fix method to make this test pass.')
def test_get_logs_with_non_integer(self):
bad_args = (True, False, "AAA", 0.7, -1)
for arg in bad_args:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.get_logs, None, arg)
def test_get_logs_with_non_exist_id(self):
self.assertRaisesXmlrpcFault(NOT_FOUND, testcaserun.get_logs, None, 99999999)
def test_get_empty_logs(self):
logs = testcaserun.get_logs(None, self.case_run_2.pk)
self.assertIsInstance(logs, list)
self.assertEqual(len(logs), 0)
def test_get_logs(self):
tcr_log = LinkReference.get_from(self.case_run_1)[0]
logs = testcaserun.get_logs(None, self.case_run_1.pk)
self.assertIsInstance(logs, list)
self.assertEqual(len(logs), 1)
self.assertEqual(logs[0]['id'], tcr_log.pk)
self.assertEqual(logs[0]['name'], "Test logs")
self.assertEqual(logs[0]['url'], "http://www.google.com")
class TestCaseRunUpdate(XmlrpcAPIBaseTest):
@classmethod
def setUpTestData(cls):
cls.admin = UserFactory()
cls.staff = UserFactory()
cls.user = UserFactory()
cls.admin_request = make_http_request(user=cls.admin,
user_perm='testruns.change_testcaserun')
cls.staff_request = make_http_request(user=cls.staff)
cls.build = TestBuildFactory()
cls.case_run_1 = TestCaseRunFactory()
cls.case_run_2 = TestCaseRunFactory()
cls.status_running = TestCaseRunStatus.objects.get(name='RUNNING')
@unittest.skip('TODO: fix method to make this test pass.')
def test_update_with_no_args(self):
bad_args = (None, [], (), {}, "")
for arg in bad_args:
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.update,
self.admin_request, arg, {})
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.update,
self.admin_request, self.case_run_1.pk, arg)
def test_update_with_single_caserun(self):
tcr = testcaserun.update(self.admin_request, self.case_run_1.pk, {
"build": self.build.pk,
"assignee": self.user.pk,
"case_run_status": self.status_running.pk,
"notes": "AAAAAAAA",
"sortkey": 90
})
self.assertIsNotNone(tcr)
self.assertIsInstance(tcr, list)
self.assertEqual(1, len(tcr))
self.assertEqual(tcr[0]['build'], self.build.name)
self.assertEqual(tcr[0]['assignee'], self.user.username)
self.assertEqual(tcr[0]['case_run_status'], 'RUNNING')
self.assertEqual(tcr[0]['notes'], "AAAAAAAA")
self.assertEqual(tcr[0]['sortkey'], 90)
def test_update_with_multi_caserun(self):
tcr = testcaserun.update(self.admin_request,
[self.case_run_1.pk, self.case_run_2.pk],
{
"build": self.build.pk,
"assignee": self.user.pk,
"case_run_status": self.status_running.pk,
"notes": "Hello World!",
"sortkey": 180
})
self.assertIsNotNone(tcr)
self.assertIsInstance(tcr, list)
self.assertEqual(len(tcr), 2)
self.assertEqual(tcr[0]['build'], tcr[1]['build'])
self.assertEqual(tcr[0]['assignee'], tcr[1]['assignee'])
self.assertEqual(tcr[0]['case_run_status'], tcr[1]['case_run_status'])
self.assertEqual(tcr[0]['notes'], tcr[1]['notes'])
self.assertEqual(tcr[0]['sortkey'], tcr[1]['sortkey'])
def test_update_with_non_exist_build(self):
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.update,
self.admin_request, self.case_run_1.pk, {"build": 1111111})
def test_update_with_non_exist_assignee(self):
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.update,
self.admin_request, self.case_run_1.pk, {"assignee": 1111111})
def test_update_with_non_exist_status(self):
self.assertRaisesXmlrpcFault(BAD_REQUEST, testcaserun.update,
self.admin_request, self.case_run_1.pk,
{"case_run_status": 1111111})
def test_update_by_ignoring_undoced_fields(self):
case_run = testcaserun.update(self.admin_request, self.case_run_1.pk, {
"notes": "AAAA",
"close_date": datetime.now(),
'anotherone': 'abc',
})
self.assertEqual('AAAA', case_run[0]['notes'])
def test_update_with_no_perm(self):
self.assertRaisesXmlrpcFault(FORBIDDEN, testcaserun.update,
self.staff_request, self.case_run_1.pk, {"notes": "AAAA"})
|
MrSenko/Nitrate
|
tcms/xmlrpc/tests/test_testcaserun.py
|
Python
|
gpl-2.0
| 41,136
|
# parse the attrs
import re
import pprint
f =open ('attrs.txt')
types = {}
for x in f:
x = re.sub(r' attrs attrs '," ATTRLIST ",x)
x = re.sub(r' ATTRLIST attrs '," ATTRLIST ",x)
x = re.sub(r' ATTRLIST ATTRLIST '," ATTRLIST ",x)
parts = x.split(" . ")
stack = parts[0].split(" ")
last = parts[1]
#m = re.match("LexToken\((\w+),",last)
#if m:
# stack.append(m.group(1))
btype = stack[2]
if btype not in types:
types[btype] = {}
t = types[btype]
#print stack
for s in stack[3:]:
if s not in t:
t[s]={}
t = t[s]
#print btype, stack[3:], parts[1]
#pprint.pprint( types)
def r(t):
rt =[]
#pprint.pprint( t)
for k in t:
m = re.match(r'\w+_ATTR',k)
if m :
#print "\tKey:%s" % k,"->", ",".join(rt)
if len(rt) > 0:
last = rt[-1]
if k != last:
rt.append(k)
else:
rt.append(k)
kd = t[k]
ret = r(kd)
if len(rt) >0 :
for x in ret:
last = rt[-1]
if x != last:
rt.append(x)
else:
rt = ret
return rt
ttypes = {}
for t in types:
#print "T:%s" % t
td = types[t]
d = r(td)
d2 = list(x for x in reversed(d))
k = ",".join(d2)
if k not in ttypes:
ttypes[k] = [t]
else:
last = ttypes[k][-1]
if t != last :
#print t,last
ttypes[k].append(t)
#pprint.pprint(ttypes)
for s in ttypes:
#print s
for x in ttypes[s]:
#print x, s
t = s.replace(","," ")
print """def p_%s_node(psr_val):\n 'node : NODE %s %s attr_list'""" % (x,x,t)
|
h4ck3rm1k3/gcc_py_introspector
|
utils/parse_attrs.py
|
Python
|
gpl-2.0
| 1,800
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
'''
bibauthorid_frontinterface
This file aims to filter and modify the interface given by
bibauthorid_bdinterface in order to make it usable by the
frontend so to keep it as clean as possible.
'''
from itertools import groupby
from operator import itemgetter
from bibauthorid_name_utils import split_name_parts
from bibauthorid_name_utils import soft_compare_names
from bibauthorid_name_utils import create_normalized_name
import bibauthorid_dbinterface as dbinter
from bibauthorid_dbinterface import get_personid_from_uid #emitting
from bibauthorid_dbinterface import create_new_person #emitting
from bibauthorid_dbinterface import update_request_ticket #emitting
from bibauthorid_dbinterface import delete_request_ticket #emitting
from bibauthorid_dbinterface import get_bibref_modification_status #emitting
from bibauthorid_dbinterface import get_canonical_id_from_personid #emitting
from bibauthorid_dbinterface import get_papers_status #emitting
from bibauthorid_dbinterface import get_person_db_names_count #emitting
from bibauthorid_dbinterface import get_person_id_from_canonical_id #emitting
from bibauthorid_dbinterface import get_person_names_count #emitting
from bibauthorid_dbinterface import get_person_db_names_set #emitting
from bibauthorid_dbinterface import get_person_papers #emitting
from bibauthorid_dbinterface import get_persons_with_open_tickets_list #emitting
from bibauthorid_dbinterface import get_request_ticket #emitting
from bibauthorid_dbinterface import insert_user_log #emitting
from bibauthorid_dbinterface import person_bibref_is_touched_old #emitting
from bibauthorid_dbinterface import reject_papers_from_person #emitting
from bibauthorid_dbinterface import reset_papers_flag #emitting
from bibauthorid_dbinterface import user_can_modify_data #emitting
from bibauthorid_dbinterface import user_can_modify_paper #emitting
from bibauthorid_dbinterface import update_personID_canonical_names #emitting
from bibauthorid_dbinterface import get_possible_bibrecref #emitting
from bibauthorid_dbinterface import resolve_paper_access_right #emitting
from bibauthorid_dbinterface import delete_cached_author_page #emitting
from bibauthorid_dbinterface import confirm_papers_to_person #emitting
from bibauthorid_dbinterface import get_name_by_bibrecref #emitting
from bibauthorid_dbinterface import get_personids_and_papers_from_bibrecs
from bibauthorid_dbinterface import get_uid_from_personid
def set_person_data(person_id, tag, value, user_level=0):
old = dbinter.get_personid_row(person_id, tag)
if old[0] != value:
dbinter.set_personid_row(person_id, tag, value, opt2=user_level)
def get_person_data(person_id, tag):
res = dbinter.get_personid_row(person_id, tag)
if res:
return (res[1], res[0])
else:
return []
def del_person_data(tag, person_id=None, value=None):
dbinter.del_personid_row(tag, person_id, value)
def get_bibrefrec_name_string(bibref):
'''
Returns the name string associated to a name string
@param bibref: bibrefrec '100:123,123'
@return: string
'''
name = ""
ref = ""
if not ((bibref and isinstance(bibref, str) and bibref.count(":"))):
return name
if bibref.count(","):
try:
ref = bibref.split(",")[0]
except (ValueError, TypeError, IndexError):
return name
else:
ref = bibref
table, ref = ref.split(":")
dbname = get_name_by_bibrecref((int(table), int(ref)))
if dbname:
name = dbname
return name
def add_person_paper_needs_manual_review(pid, bibrec):
'''
Adds to a person a paper which needs manual review before bibref assignment
@param pid: personid, int
@param bibrec: the bibrec, int
'''
set_person_data(pid, 'paper_needs_bibref_manual_confirm', bibrec)
def get_person_papers_to_be_manually_reviewed(pid):
'''
Returns the set of papers awaiting for manual review for a person for bibref assignment
@param pid: the personid, int
'''
return get_person_data(pid, 'paper_needs_bibref_manual_confirm')
def del_person_papers_needs_manual_review(pid, bibrec):
'''
Deletes from the set of papers awaiting for manual review for a person
@param pid: personid, int
@param bibrec: the bibrec, int
'''
del_person_data(person_id=pid, tag='paper_needs_bibref_manual_confirm', value=str(bibrec))
def set_processed_external_recids(pid, recid_list_str):
'''
Set processed external recids
@param pid: pid
@param recid_list_str: str
'''
del_person_data(person_id=pid, tag='processed_external_recids')
set_person_data(pid, "processed_external_recids", recid_list_str)
def assign_person_to_uid(uid, pid):
'''
Assigns a person to a userid. If person already assigned to someone else, create new person.
Returns the peron id assigned.
@param uid: user id, int
@param pid: person id, int, if -1 creates new person.
@return: pid int
'''
if pid == -1:
pid = dbinter.create_new_person_from_uid(uid)
return pid
else:
current_uid = get_person_data(pid, 'uid')
if len(current_uid) == 0:
set_person_data(pid, 'uid', str(uid))
return pid
else:
pid = dbinter.create_new_person_from_uid(uid)
return pid
def get_processed_external_recids(pid):
'''
Returns processed external recids
@param pid: pid
@return: [str]
'''
db_data = get_person_data(pid, "processed_external_recids")
recid_list_str = ''
if db_data and db_data[0] and db_data[0][1]:
recid_list_str = db_data[0][1]
return recid_list_str
def get_all_personids_recs(pid, claimed_only=False):
return dbinter.get_all_paper_records(pid, claimed_only)
def find_personIDs_by_name_string(target):
'''
Search engine to find persons matching the given string
The matching is done on the surname first, and names if present.
An ordered list (per compatibility) of pids and found names is returned.
@param namestring: string name, 'surname, names I.'
@type: string
@param strict: Define if this shall perform an exact or a fuzzy match
@type strict: boolean
@return: pid list of lists
[pid,[[name string, occur count, compatibility]]]
'''
splitted_name = split_name_parts(target)
family = splitted_name[0]
target_cleaned = create_normalized_name(splitted_name)
levels = (#target + '%', #this introduces a weird problem: different results for mele, salvatore and salvatore mele
family + ',%',
family[:-2] + '%',
'%' + family + ',%',
'%' + family[1:-1] + '%')
if len(family) <= 4:
levels = [levels[0], levels[2]]
for lev in levels:
names = dbinter.get_all_personids_by_name(lev)
if names:
break
is_canonical = False
if not names:
names = dbinter.get_personids_by_canonical_name(target)
is_canonical = True
names = groupby(sorted(names))
names = [(key[0], key[1], len(list(data)), soft_compare_names(target, key[1])) for key, data in names]
names = groupby(names, itemgetter(0))
names = [(key, sorted([(d[1], d[2], d[3]) for d in data if (d[3] > 0.5 or is_canonical)],
key=itemgetter(2), reverse=True)) for key, data in names]
names = [name for name in names if name[1]]
names = sorted(names, key=lambda x: (x[1][0][2], x[1][0][0], x[1][0][1]), reverse=True)
return names
def reclaim_personid_for_new_arXiv_user(bibrecs, name, uid= -1):
pidlist = get_personids_and_papers_from_bibrecs(bibrecs, limit_by_name=name)
pid = None
for p in pidlist:
if not get_uid_from_personid(p[0]):
dbinter.set_personid_row(p[0], 'uid', uid)
return p[0]
return create_new_person(uid, uid_is_owner=True)
|
jrbl/invenio
|
modules/bibauthorid/lib/bibauthorid_frontinterface.py
|
Python
|
gpl-2.0
| 8,992
|