code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import glob
json_files = glob.glob("tests/**/output/**/*.json", recursive=True)
html_files = glob.glob("tests/**/output/**/*.html", recursive=True)
html_list = ""
for f_ in html_files:
html_list += '\t<li><a href="{}">{}</li>\n'.format(
f_[6:],
f_.split(".")[-2],
)
json_list = ""
for f_ in json_files:
json_list += '\t<li><a href="{}">{}</li>\n'.format(
f_[6:],
f_.split(".")[-2],
)
html_file = """
<html>
<body>
<h3>HTML</h3>
<ul>
{}
</ul>
<br/><br/>
<h3>JSON</h3>
<ul>
{}
</ul>
</body>
</html>
""".format(
html_list, json_list
)
print(html_file)
|
great-expectations/great_expectations
|
tests/build_index_page.py
|
Python
|
apache-2.0
| 628
|
#!/usr/bin/env python
import telnetlib
import time
import socket
import sys
import getpass
TELNET_PORT = 23
TELNET_TIMEOUT = 6
def send_command(remote_conn, cmd):
'''
Initiate the Telnet Session
'''
cmd = cmd.rstrip()
remote_conn.write(cmd + '\n')
time.sleep(1)
return remote_conn.read_very_eager()
def login(remote_conn, username, password):
'''
Login to pynet-rtr1
'''
output = remote_conn.read_until("sername:", TELNET_TIMEOUT)
remote_conn.write(username + '\n')
output += remote_conn.read_until("ssword:", TELNET_TIMEOUT)
remote_conn.write(password + '\n')
return output
def no_more(remote_conn, paging_cmd='terminal length 0'):
'''
No paging of Output
'''
return send_command(remote_conn, paging_cmd)
def telnet_connect(ip_addr):
'''
Establish the Telnet Connection
'''
try:
return telnetlib.Telnet(ip_addr, TELNET_PORT, TELNET_TIMEOUT)
except socket.timeout:
sys.exit("Connection timed-out")
def main():
'''
Connect to pynet-rtr1, login, and issue 'show ip int brief'
'''
ip_addr = raw_input("IP address: ")
ip_addr = ip_addr.strip()
username = 'pyclass'
password = getpass.getpass()
remote_conn = telnet_connect(ip_addr)
output = login(remote_conn, username, password)
time.sleep(1)
remote_conn.read_very_eager()
no_more(remote_conn)
output = send_command(remote_conn, 'show ip int brief')
print "\n\n"
print output
print "\n\n"
remote_conn.close()
if __name__ == "__main__":
main()
|
gahlberg/pynet_class_work
|
class2/ex2a_telnet.py
|
Python
|
apache-2.0
| 1,588
|
#!/usr/bin/env python
from __future__ import unicode_literals
'''Generate header file for nanopb from a ProtoBuf FileDescriptorSet.'''
nanopb_version = "nanopb-0.3.9.2"
import sys
import re
import codecs
from functools import reduce
try:
# Add some dummy imports to keep packaging tools happy.
import google, distutils.util # bbfreeze seems to need these
import pkg_resources # pyinstaller / protobuf 2.5 seem to need these
except:
# Don't care, we will error out later if it is actually important.
pass
try:
import google.protobuf.text_format as text_format
import google.protobuf.descriptor_pb2 as descriptor
except:
sys.stderr.write('''
*************************************************************
*** Could not import the Google protobuf Python libraries ***
*** Try installing package 'python-protobuf' or similar. ***
*************************************************************
''' + '\n')
raise
try:
import proto.nanopb_pb2 as nanopb_pb2
import proto.plugin_pb2 as plugin_pb2
except TypeError:
sys.stderr.write('''
****************************************************************************
*** Got TypeError when importing the protocol definitions for generator. ***
*** This usually means that the protoc in your path doesn't match the ***
*** Python protobuf library version. ***
*** ***
*** Please check the output of the following commands: ***
*** which protoc ***
*** protoc --version ***
*** python -c 'import google.protobuf; print(google.protobuf.__file__)' ***
*** If you are not able to find the python protobuf version using the ***
*** above command, use this command. ***
*** pip freeze | grep -i protobuf ***
****************************************************************************
''' + '\n')
raise
except:
sys.stderr.write('''
********************************************************************
*** Failed to import the protocol definitions for generator. ***
*** You have to run 'make' in the nanopb/generator/proto folder. ***
********************************************************************
''' + '\n')
raise
# ---------------------------------------------------------------------------
# Generation of single fields
# ---------------------------------------------------------------------------
import time
import os.path
# Values are tuple (c type, pb type, encoded size, int_size_allowed)
FieldD = descriptor.FieldDescriptorProto
datatypes = {
FieldD.TYPE_BOOL: ('bool', 'BOOL', 1, False),
FieldD.TYPE_DOUBLE: ('double', 'DOUBLE', 8, False),
FieldD.TYPE_FIXED32: ('uint32_t', 'FIXED32', 4, False),
FieldD.TYPE_FIXED64: ('uint64_t', 'FIXED64', 8, False),
FieldD.TYPE_FLOAT: ('float', 'FLOAT', 4, False),
FieldD.TYPE_INT32: ('int32_t', 'INT32', 10, True),
FieldD.TYPE_INT64: ('int64_t', 'INT64', 10, True),
FieldD.TYPE_SFIXED32: ('int32_t', 'SFIXED32', 4, False),
FieldD.TYPE_SFIXED64: ('int64_t', 'SFIXED64', 8, False),
FieldD.TYPE_SINT32: ('int32_t', 'SINT32', 5, True),
FieldD.TYPE_SINT64: ('int64_t', 'SINT64', 10, True),
FieldD.TYPE_UINT32: ('uint32_t', 'UINT32', 5, True),
FieldD.TYPE_UINT64: ('uint64_t', 'UINT64', 10, True)
}
# Integer size overrides (from .proto settings)
intsizes = {
nanopb_pb2.IS_8: 'int8_t',
nanopb_pb2.IS_16: 'int16_t',
nanopb_pb2.IS_32: 'int32_t',
nanopb_pb2.IS_64: 'int64_t',
}
# String types (for python 2 / python 3 compatibility)
try:
strtypes = (unicode, str)
except NameError:
strtypes = (str, )
class Names:
'''Keeps a set of nested names and formats them to C identifier.'''
def __init__(self, parts = ()):
if isinstance(parts, Names):
parts = parts.parts
elif isinstance(parts, strtypes):
parts = (parts,)
self.parts = tuple(parts)
def __str__(self):
return '_'.join(self.parts)
def __add__(self, other):
if isinstance(other, strtypes):
return Names(self.parts + (other,))
elif isinstance(other, Names):
return Names(self.parts + other.parts)
elif isinstance(other, tuple):
return Names(self.parts + other)
else:
raise ValueError("Name parts should be of type str")
def __eq__(self, other):
return isinstance(other, Names) and self.parts == other.parts
def names_from_type_name(type_name):
'''Parse Names() from FieldDescriptorProto type_name'''
if type_name[0] != '.':
raise NotImplementedError("Lookup of non-absolute type names is not supported")
return Names(type_name[1:].split('.'))
def varint_max_size(max_value):
'''Returns the maximum number of bytes a varint can take when encoded.'''
if max_value < 0:
max_value = 2**64 - max_value
for i in range(1, 11):
if (max_value >> (i * 7)) == 0:
return i
raise ValueError("Value too large for varint: " + str(max_value))
assert varint_max_size(-1) == 10
assert varint_max_size(0) == 1
assert varint_max_size(127) == 1
assert varint_max_size(128) == 2
class EncodedSize:
'''Class used to represent the encoded size of a field or a message.
Consists of a combination of symbolic sizes and integer sizes.'''
def __init__(self, value = 0, symbols = []):
if isinstance(value, EncodedSize):
self.value = value.value
self.symbols = value.symbols
elif isinstance(value, strtypes + (Names,)):
self.symbols = [str(value)]
self.value = 0
else:
self.value = value
self.symbols = symbols
def __add__(self, other):
if isinstance(other, int):
return EncodedSize(self.value + other, self.symbols)
elif isinstance(other, strtypes + (Names,)):
return EncodedSize(self.value, self.symbols + [str(other)])
elif isinstance(other, EncodedSize):
return EncodedSize(self.value + other.value, self.symbols + other.symbols)
else:
raise ValueError("Cannot add size: " + repr(other))
def __mul__(self, other):
if isinstance(other, int):
return EncodedSize(self.value * other, [str(other) + '*' + s for s in self.symbols])
else:
raise ValueError("Cannot multiply size: " + repr(other))
def __str__(self):
if not self.symbols:
return str(self.value)
else:
return '(' + str(self.value) + ' + ' + ' + '.join(self.symbols) + ')'
def upperlimit(self):
if not self.symbols:
return self.value
else:
return 2**32 - 1
class Enum:
def __init__(self, names, desc, enum_options):
'''desc is EnumDescriptorProto'''
self.options = enum_options
self.names = names
# by definition, `names` include this enum's name
base_name = Names(names.parts[:-1])
if enum_options.long_names:
self.values = [(names + x.name, x.number) for x in desc.value]
else:
self.values = [(base_name + x.name, x.number) for x in desc.value]
self.value_longnames = [self.names + x.name for x in desc.value]
self.packed = enum_options.packed_enum
def has_negative(self):
for n, v in self.values:
if v < 0:
return True
return False
def encoded_size(self):
return max([varint_max_size(v) for n,v in self.values])
def __str__(self):
result = 'typedef enum _%s {\n' % self.names
result += ',\n'.join([" %s = %d" % x for x in self.values])
result += '\n}'
if self.packed:
result += ' pb_packed'
result += ' %s;' % self.names
result += '\n#define _%s_MIN %s' % (self.names, self.values[0][0])
result += '\n#define _%s_MAX %s' % (self.names, self.values[-1][0])
result += '\n#define _%s_ARRAYSIZE ((%s)(%s+1))' % (self.names, self.names, self.values[-1][0])
if not self.options.long_names:
# Define the long names always so that enum value references
# from other files work properly.
for i, x in enumerate(self.values):
result += '\n#define %s %s' % (self.value_longnames[i], x[0])
if self.options.enum_to_string:
result += '\nconst char *%s_name(%s v);\n' % (self.names, self.names)
return result
def enum_to_string_definition(self):
if not self.options.enum_to_string:
return ""
result = 'const char *%s_name(%s v) {\n' % (self.names, self.names)
result += ' switch (v) {\n'
for ((enumname, _), strname) in zip(self.values, self.value_longnames):
# Strip off the leading type name from the string value.
strval = str(strname)[len(str(self.names)) + 1:]
result += ' case %s: return "%s";\n' % (enumname, strval)
result += ' }\n'
result += ' return "unknown";\n'
result += '}\n'
return result
class FieldMaxSize:
def __init__(self, worst = 0, checks = [], field_name = 'undefined'):
if isinstance(worst, list):
self.worst = max(i for i in worst if i is not None)
else:
self.worst = worst
self.worst_field = field_name
self.checks = list(checks)
def extend(self, extend, field_name = None):
self.worst = max(self.worst, extend.worst)
if self.worst == extend.worst:
self.worst_field = extend.worst_field
self.checks.extend(extend.checks)
class Field:
def __init__(self, struct_name, desc, field_options):
'''desc is FieldDescriptorProto'''
self.tag = desc.number
self.struct_name = struct_name
self.union_name = None
self.name = desc.name
self.default = None
self.max_size = None
self.max_count = None
self.array_decl = ""
self.enc_size = None
self.ctype = None
self.fixed_count = False
if field_options.type == nanopb_pb2.FT_INLINE:
# Before nanopb-0.3.8, fixed length bytes arrays were specified
# by setting type to FT_INLINE. But to handle pointer typed fields,
# it makes sense to have it as a separate option.
field_options.type = nanopb_pb2.FT_STATIC
field_options.fixed_length = True
# Parse field options
if field_options.HasField("max_size"):
self.max_size = field_options.max_size
if desc.type == FieldD.TYPE_STRING and field_options.HasField("max_length"):
# max_length overrides max_size for strings
self.max_size = field_options.max_length + 1
if field_options.HasField("max_count"):
self.max_count = field_options.max_count
if desc.HasField('default_value'):
self.default = desc.default_value
# Check field rules, i.e. required/optional/repeated.
can_be_static = True
if desc.label == FieldD.LABEL_REPEATED:
self.rules = 'REPEATED'
if self.max_count is None:
can_be_static = False
else:
self.array_decl = '[%d]' % self.max_count
self.fixed_count = field_options.fixed_count
elif field_options.proto3:
self.rules = 'SINGULAR'
elif desc.label == FieldD.LABEL_REQUIRED:
self.rules = 'REQUIRED'
elif desc.label == FieldD.LABEL_OPTIONAL:
self.rules = 'OPTIONAL'
else:
raise NotImplementedError(desc.label)
# Check if the field can be implemented with static allocation
# i.e. whether the data size is known.
if desc.type == FieldD.TYPE_STRING and self.max_size is None:
can_be_static = False
if desc.type == FieldD.TYPE_BYTES and self.max_size is None:
can_be_static = False
# Decide how the field data will be allocated
if field_options.type == nanopb_pb2.FT_DEFAULT:
if can_be_static:
field_options.type = nanopb_pb2.FT_STATIC
else:
field_options.type = nanopb_pb2.FT_CALLBACK
if field_options.type == nanopb_pb2.FT_STATIC and not can_be_static:
raise Exception("Field '%s' is defined as static, but max_size or "
"max_count is not given." % self.name)
if field_options.fixed_count and self.max_count is None:
raise Exception("Field '%s' is defined as fixed count, "
"but max_count is not given." % self.name)
if field_options.type == nanopb_pb2.FT_STATIC:
self.allocation = 'STATIC'
elif field_options.type == nanopb_pb2.FT_POINTER:
self.allocation = 'POINTER'
elif field_options.type == nanopb_pb2.FT_CALLBACK:
self.allocation = 'CALLBACK'
else:
raise NotImplementedError(field_options.type)
# Decide the C data type to use in the struct.
if desc.type in datatypes:
self.ctype, self.pbtype, self.enc_size, isa = datatypes[desc.type]
# Override the field size if user wants to use smaller integers
if isa and field_options.int_size != nanopb_pb2.IS_DEFAULT:
self.ctype = intsizes[field_options.int_size]
if desc.type == FieldD.TYPE_UINT32 or desc.type == FieldD.TYPE_UINT64:
self.ctype = 'u' + self.ctype;
elif desc.type == FieldD.TYPE_ENUM:
self.pbtype = 'ENUM'
self.ctype = names_from_type_name(desc.type_name)
if self.default is not None:
self.default = self.ctype + self.default
self.enc_size = None # Needs to be filled in when enum values are known
elif desc.type == FieldD.TYPE_STRING:
self.pbtype = 'STRING'
self.ctype = 'char'
if self.allocation == 'STATIC':
self.ctype = 'char'
self.array_decl += '[%d]' % self.max_size
self.enc_size = varint_max_size(self.max_size) + self.max_size
elif desc.type == FieldD.TYPE_BYTES:
if field_options.fixed_length:
self.pbtype = 'FIXED_LENGTH_BYTES'
if self.max_size is None:
raise Exception("Field '%s' is defined as fixed length, "
"but max_size is not given." % self.name)
self.enc_size = varint_max_size(self.max_size) + self.max_size
self.ctype = 'pb_byte_t'
self.array_decl += '[%d]' % self.max_size
else:
self.pbtype = 'BYTES'
self.ctype = 'pb_bytes_array_t'
if self.allocation == 'STATIC':
self.ctype = self.struct_name + self.name + 't'
self.enc_size = varint_max_size(self.max_size) + self.max_size
elif desc.type == FieldD.TYPE_MESSAGE:
self.pbtype = 'MESSAGE'
self.ctype = self.submsgname = names_from_type_name(desc.type_name)
self.enc_size = None # Needs to be filled in after the message type is available
else:
raise NotImplementedError(desc.type)
def __lt__(self, other):
return self.tag < other.tag
def __str__(self):
result = ''
if self.allocation == 'POINTER':
if self.rules == 'REPEATED':
result += ' pb_size_t ' + self.name + '_count;\n'
if self.pbtype == 'MESSAGE':
# Use struct definition, so recursive submessages are possible
result += ' struct _%s *%s;' % (self.ctype, self.name)
elif self.pbtype == 'FIXED_LENGTH_BYTES':
# Pointer to fixed size array
result += ' %s (*%s)%s;' % (self.ctype, self.name, self.array_decl)
elif self.rules == 'REPEATED' and self.pbtype in ['STRING', 'BYTES']:
# String/bytes arrays need to be defined as pointers to pointers
result += ' %s **%s;' % (self.ctype, self.name)
else:
result += ' %s *%s;' % (self.ctype, self.name)
elif self.allocation == 'CALLBACK':
result += ' pb_callback_t %s;' % self.name
else:
if self.rules == 'OPTIONAL' and self.allocation == 'STATIC':
result += ' bool has_' + self.name + ';\n'
elif (self.rules == 'REPEATED' and
self.allocation == 'STATIC' and
not self.fixed_count):
result += ' pb_size_t ' + self.name + '_count;\n'
result += ' %s %s%s;' % (self.ctype, self.name, self.array_decl)
return result
def types(self):
'''Return definitions for any special types this field might need.'''
if self.pbtype == 'BYTES' and self.allocation == 'STATIC':
result = 'typedef PB_BYTES_ARRAY_T(%d) %s;\n' % (self.max_size, self.ctype)
else:
result = ''
return result
def get_dependencies(self):
'''Get list of type names used by this field.'''
if self.allocation == 'STATIC':
return [str(self.ctype)]
else:
return []
def get_initializer(self, null_init, inner_init_only = False):
'''Return literal expression for this field's default value.
null_init: If True, initialize to a 0 value instead of default from .proto
inner_init_only: If True, exclude initialization for any count/has fields
'''
inner_init = None
if self.pbtype == 'MESSAGE':
if null_init:
inner_init = '%s_init_zero' % self.ctype
else:
inner_init = '%s_init_default' % self.ctype
elif self.default is None or null_init:
if self.pbtype == 'STRING':
inner_init = '""'
elif self.pbtype == 'BYTES':
inner_init = '{0, {0}}'
elif self.pbtype == 'FIXED_LENGTH_BYTES':
inner_init = '{0}'
elif self.pbtype in ('ENUM', 'UENUM'):
inner_init = '_%s_MIN' % self.ctype
else:
inner_init = '0'
else:
if self.pbtype == 'STRING':
data = codecs.escape_encode(self.default.encode('utf-8'))[0]
inner_init = '"' + data.decode('ascii') + '"'
elif self.pbtype == 'BYTES':
data = codecs.escape_decode(self.default)[0]
data = ["0x%02x" % c for c in bytearray(data)]
if len(data) == 0:
inner_init = '{0, {0}}'
else:
inner_init = '{%d, {%s}}' % (len(data), ','.join(data))
elif self.pbtype == 'FIXED_LENGTH_BYTES':
data = codecs.escape_decode(self.default)[0]
data = ["0x%02x" % c for c in bytearray(data)]
if len(data) == 0:
inner_init = '{0}'
else:
inner_init = '{%s}' % ','.join(data)
elif self.pbtype in ['FIXED32', 'UINT32']:
inner_init = str(self.default) + 'u'
elif self.pbtype in ['FIXED64', 'UINT64']:
inner_init = str(self.default) + 'ull'
elif self.pbtype in ['SFIXED64', 'INT64']:
inner_init = str(self.default) + 'll'
else:
inner_init = str(self.default)
if inner_init_only:
return inner_init
outer_init = None
if self.allocation == 'STATIC':
if self.rules == 'REPEATED':
outer_init = ''
if not self.fixed_count:
outer_init += '0, '
outer_init += '{'
outer_init += ', '.join([inner_init] * self.max_count)
outer_init += '}'
elif self.rules == 'OPTIONAL':
outer_init = 'false, ' + inner_init
else:
outer_init = inner_init
elif self.allocation == 'POINTER':
if self.rules == 'REPEATED':
outer_init = '0, NULL'
else:
outer_init = 'NULL'
elif self.allocation == 'CALLBACK':
if self.pbtype == 'EXTENSION':
outer_init = 'NULL'
else:
outer_init = '{{NULL}, NULL}'
return outer_init
def default_decl(self, declaration_only = False):
'''Return definition for this field's default value.'''
if self.default is None:
return None
ctype = self.ctype
default = self.get_initializer(False, True)
array_decl = ''
if self.pbtype == 'STRING':
if self.allocation != 'STATIC':
return None # Not implemented
array_decl = '[%d]' % self.max_size
elif self.pbtype == 'BYTES':
if self.allocation != 'STATIC':
return None # Not implemented
elif self.pbtype == 'FIXED_LENGTH_BYTES':
if self.allocation != 'STATIC':
return None # Not implemented
array_decl = '[%d]' % self.max_size
if declaration_only:
return 'extern const %s %s_default%s;' % (ctype, self.struct_name + self.name, array_decl)
else:
return 'const %s %s_default%s = %s;' % (ctype, self.struct_name + self.name, array_decl, default)
def tags(self):
'''Return the #define for the tag number of this field.'''
identifier = '%s_%s_tag' % (self.struct_name, self.name)
return '#define %-40s %d\n' % (identifier, self.tag)
def pb_field_t(self, prev_field_name, union_index = None):
'''Return the pb_field_t initializer to use in the constant array.
prev_field_name is the name of the previous field or None. For OneOf
unions, union_index is the index of this field inside the OneOf.
'''
if self.rules == 'ONEOF':
if self.anonymous:
result = ' PB_ANONYMOUS_ONEOF_FIELD(%s, ' % self.union_name
else:
result = ' PB_ONEOF_FIELD(%s, ' % self.union_name
elif self.fixed_count:
result = ' PB_REPEATED_FIXED_COUNT('
else:
result = ' PB_FIELD('
result += '%3d, ' % self.tag
result += '%-8s, ' % self.pbtype
if not self.fixed_count:
result += '%s, ' % self.rules
result += '%-8s, ' % self.allocation
if union_index is not None and union_index > 0:
result += 'UNION, '
elif prev_field_name is None:
result += 'FIRST, '
else:
result += 'OTHER, '
result += '%s, ' % self.struct_name
result += '%s, ' % self.name
result += '%s, ' % (prev_field_name or self.name)
if self.pbtype == 'MESSAGE':
result += '&%s_fields)' % self.submsgname
elif self.default is None:
result += '0)'
elif self.pbtype in ['BYTES', 'STRING', 'FIXED_LENGTH_BYTES'] and self.allocation != 'STATIC':
result += '0)' # Arbitrary size default values not implemented
elif self.rules == 'OPTEXT':
result += '0)' # Default value for extensions is not implemented
else:
result += '&%s_default)' % (self.struct_name + self.name)
return result
def get_last_field_name(self):
return self.name
def largest_field_value(self):
'''Determine if this field needs 16bit or 32bit pb_field_t structure to compile properly.
Returns numeric value or a C-expression for assert.'''
check = []
if self.pbtype == 'MESSAGE' and self.allocation == 'STATIC':
if self.rules == 'REPEATED':
check.append('pb_membersize(%s, %s[0])' % (self.struct_name, self.name))
elif self.rules == 'ONEOF':
if self.anonymous:
check.append('pb_membersize(%s, %s)' % (self.struct_name, self.name))
else:
check.append('pb_membersize(%s, %s.%s)' % (self.struct_name, self.union_name, self.name))
else:
check.append('pb_membersize(%s, %s)' % (self.struct_name, self.name))
elif self.pbtype == 'BYTES' and self.allocation == 'STATIC':
if self.max_size > 251:
check.append('pb_membersize(%s, %s)' % (self.struct_name, self.name))
return FieldMaxSize([self.tag, self.max_size, self.max_count],
check,
('%s.%s' % (self.struct_name, self.name)))
def encoded_size(self, dependencies):
'''Return the maximum size that this field can take when encoded,
including the field tag. If the size cannot be determined, returns
None.'''
if self.allocation != 'STATIC':
return None
if self.pbtype == 'MESSAGE':
encsize = None
if str(self.submsgname) in dependencies:
submsg = dependencies[str(self.submsgname)]
encsize = submsg.encoded_size(dependencies)
if encsize is not None:
# Include submessage length prefix
encsize += varint_max_size(encsize.upperlimit())
else:
my_msg = dependencies.get(str(self.struct_name))
if my_msg and submsg.protofile == my_msg.protofile:
# The dependency is from the same file and size cannot be
# determined for it, thus we know it will not be possible
# in runtime either.
return None
if encsize is None:
# Submessage or its size cannot be found.
# This can occur if submessage is defined in different
# file, and it or its .options could not be found.
# Instead of direct numeric value, reference the size that
# has been #defined in the other file.
encsize = EncodedSize(self.submsgname + 'size')
# We will have to make a conservative assumption on the length
# prefix size, though.
encsize += 5
elif self.pbtype in ['ENUM', 'UENUM']:
if str(self.ctype) in dependencies:
enumtype = dependencies[str(self.ctype)]
encsize = enumtype.encoded_size()
else:
# Conservative assumption
encsize = 10
elif self.enc_size is None:
raise RuntimeError("Could not determine encoded size for %s.%s"
% (self.struct_name, self.name))
else:
encsize = EncodedSize(self.enc_size)
encsize += varint_max_size(self.tag << 3) # Tag + wire type
if self.rules == 'REPEATED':
# Decoders must be always able to handle unpacked arrays.
# Therefore we have to reserve space for it, even though
# we emit packed arrays ourselves. For length of 1, packed
# arrays are larger however so we need to add allowance
# for the length byte.
encsize *= self.max_count
if self.max_count == 1:
encsize += 1
return encsize
class ExtensionRange(Field):
def __init__(self, struct_name, range_start, field_options):
'''Implements a special pb_extension_t* field in an extensible message
structure. The range_start signifies the index at which the extensions
start. Not necessarily all tags above this are extensions, it is merely
a speed optimization.
'''
self.tag = range_start
self.struct_name = struct_name
self.name = 'extensions'
self.pbtype = 'EXTENSION'
self.rules = 'OPTIONAL'
self.allocation = 'CALLBACK'
self.ctype = 'pb_extension_t'
self.array_decl = ''
self.default = None
self.max_size = 0
self.max_count = 0
self.fixed_count = False
def __str__(self):
return ' pb_extension_t *extensions;'
def types(self):
return ''
def tags(self):
return ''
def encoded_size(self, dependencies):
# We exclude extensions from the count, because they cannot be known
# until runtime. Other option would be to return None here, but this
# way the value remains useful if extensions are not used.
return EncodedSize(0)
class ExtensionField(Field):
def __init__(self, fullname, desc, field_options):
self.fullname = fullname
self.extendee_name = names_from_type_name(desc.extendee)
Field.__init__(self, self.fullname + 'struct', desc, field_options)
if self.rules != 'OPTIONAL':
self.skip = True
else:
self.skip = False
self.rules = 'OPTEXT'
def tags(self):
'''Return the #define for the tag number of this field.'''
identifier = '%s_tag' % self.fullname
return '#define %-40s %d\n' % (identifier, self.tag)
def extension_decl(self):
'''Declaration of the extension type in the .pb.h file'''
if self.skip:
msg = '/* Extension field %s was skipped because only "optional"\n' % self.fullname
msg +=' type of extension fields is currently supported. */\n'
return msg
return ('extern const pb_extension_type_t %s; /* field type: %s */\n' %
(self.fullname, str(self).strip()))
def extension_def(self):
'''Definition of the extension type in the .pb.c file'''
if self.skip:
return ''
result = 'typedef struct {\n'
result += str(self)
result += '\n} %s;\n\n' % self.struct_name
result += ('static const pb_field_t %s_field = \n %s;\n\n' %
(self.fullname, self.pb_field_t(None)))
result += 'const pb_extension_type_t %s = {\n' % self.fullname
result += ' NULL,\n'
result += ' NULL,\n'
result += ' &%s_field\n' % self.fullname
result += '};\n'
return result
# ---------------------------------------------------------------------------
# Generation of oneofs (unions)
# ---------------------------------------------------------------------------
class OneOf(Field):
def __init__(self, struct_name, oneof_desc):
self.struct_name = struct_name
self.name = oneof_desc.name
self.ctype = 'union'
self.pbtype = 'oneof'
self.fields = []
self.allocation = 'ONEOF'
self.default = None
self.rules = 'ONEOF'
self.anonymous = False
def add_field(self, field):
if field.allocation == 'CALLBACK':
raise Exception("Callback fields inside of oneof are not supported"
+ " (field %s)" % field.name)
field.union_name = self.name
field.rules = 'ONEOF'
field.anonymous = self.anonymous
self.fields.append(field)
self.fields.sort(key = lambda f: f.tag)
# Sort by the lowest tag number inside union
self.tag = min([f.tag for f in self.fields])
def __str__(self):
result = ''
if self.fields:
result += ' pb_size_t which_' + self.name + ";\n"
result += ' union {\n'
for f in self.fields:
result += ' ' + str(f).replace('\n', '\n ') + '\n'
if self.anonymous:
result += ' };'
else:
result += ' } ' + self.name + ';'
return result
def types(self):
return ''.join([f.types() for f in self.fields])
def get_dependencies(self):
deps = []
for f in self.fields:
deps += f.get_dependencies()
return deps
def get_initializer(self, null_init):
return '0, {' + self.fields[0].get_initializer(null_init) + '}'
def default_decl(self, declaration_only = False):
return None
def tags(self):
return ''.join([f.tags() for f in self.fields])
def pb_field_t(self, prev_field_name):
parts = []
for union_index, field in enumerate(self.fields):
parts.append(field.pb_field_t(prev_field_name, union_index))
return ',\n'.join(parts)
def get_last_field_name(self):
if self.anonymous:
return self.fields[-1].name
else:
return self.name + '.' + self.fields[-1].name
def largest_field_value(self):
largest = FieldMaxSize()
for f in self.fields:
largest.extend(f.largest_field_value())
return largest
def encoded_size(self, dependencies):
'''Returns the size of the largest oneof field.'''
largest = 0
symbols = []
for f in self.fields:
size = EncodedSize(f.encoded_size(dependencies))
if size is None or size.value is None:
return None
elif size.symbols:
symbols.append((f.tag, size.symbols[0]))
elif size.value > largest:
largest = size.value
if not symbols:
# Simple case, all sizes were known at generator time
return largest
if largest > 0:
# Some sizes were known, some were not
symbols.insert(0, (0, largest))
if len(symbols) == 1:
# Only one symbol was needed
return EncodedSize(5, [symbols[0][1]])
else:
# Use sizeof(union{}) construct to find the maximum size of
# submessages.
union_def = ' '.join('char f%d[%s];' % s for s in symbols)
return EncodedSize(5, ['sizeof(union{%s})' % union_def])
# ---------------------------------------------------------------------------
# Generation of messages (structures)
# ---------------------------------------------------------------------------
class Message:
def __init__(self, names, desc, message_options):
self.name = names
self.fields = []
self.oneofs = {}
no_unions = []
if message_options.msgid:
self.msgid = message_options.msgid
if hasattr(desc, 'oneof_decl'):
for i, f in enumerate(desc.oneof_decl):
oneof_options = get_nanopb_suboptions(desc, message_options, self.name + f.name)
if oneof_options.no_unions:
no_unions.append(i) # No union, but add fields normally
elif oneof_options.type == nanopb_pb2.FT_IGNORE:
pass # No union and skip fields also
else:
oneof = OneOf(self.name, f)
if oneof_options.anonymous_oneof:
oneof.anonymous = True
self.oneofs[i] = oneof
self.fields.append(oneof)
else:
sys.stderr.write('Note: This Python protobuf library has no OneOf support\n')
for f in desc.field:
field_options = get_nanopb_suboptions(f, message_options, self.name + f.name)
if field_options.type == nanopb_pb2.FT_IGNORE:
continue
field = Field(self.name, f, field_options)
if (hasattr(f, 'oneof_index') and
f.HasField('oneof_index') and
f.oneof_index not in no_unions):
if f.oneof_index in self.oneofs:
self.oneofs[f.oneof_index].add_field(field)
else:
self.fields.append(field)
if len(desc.extension_range) > 0:
field_options = get_nanopb_suboptions(desc, message_options, self.name + 'extensions')
range_start = min([r.start for r in desc.extension_range])
if field_options.type != nanopb_pb2.FT_IGNORE:
self.fields.append(ExtensionRange(self.name, range_start, field_options))
self.packed = message_options.packed_struct
self.ordered_fields = self.fields[:]
self.ordered_fields.sort()
def get_dependencies(self):
'''Get list of type names that this structure refers to.'''
deps = []
for f in self.fields:
deps += f.get_dependencies()
return deps
def __str__(self):
result = 'typedef struct _%s {\n' % self.name
if not self.ordered_fields:
# Empty structs are not allowed in C standard.
# Therefore add a dummy field if an empty message occurs.
result += ' char dummy_field;'
result += '\n'.join([str(f) for f in self.ordered_fields])
result += '\n/* @@protoc_insertion_point(struct:%s) */' % self.name
result += '\n}'
if self.packed:
result += ' pb_packed'
result += ' %s;' % self.name
if self.packed:
result = 'PB_PACKED_STRUCT_START\n' + result
result += '\nPB_PACKED_STRUCT_END'
return result
def types(self):
return ''.join([f.types() for f in self.fields])
def get_initializer(self, null_init):
if not self.ordered_fields:
return '{0}'
parts = []
for field in self.ordered_fields:
parts.append(field.get_initializer(null_init))
return '{' + ', '.join(parts) + '}'
def default_decl(self, declaration_only = False):
result = ""
for field in self.fields:
default = field.default_decl(declaration_only)
if default is not None:
result += default + '\n'
return result
def count_required_fields(self):
'''Returns number of required fields inside this message'''
count = 0
for f in self.fields:
if not isinstance(f, OneOf):
if f.rules == 'REQUIRED':
count += 1
return count
def count_all_fields(self):
count = 0
for f in self.fields:
if isinstance(f, OneOf):
count += len(f.fields)
else:
count += 1
return count
def fields_declaration(self):
result = 'extern const pb_field_t %s_fields[%d];' % (self.name, self.count_all_fields() + 1)
return result
def fields_definition(self):
result = 'const pb_field_t %s_fields[%d] = {\n' % (self.name, self.count_all_fields() + 1)
prev = None
for field in self.ordered_fields:
result += field.pb_field_t(prev)
result += ',\n'
prev = field.get_last_field_name()
result += ' PB_LAST_FIELD\n};'
return result
def encoded_size(self, dependencies):
'''Return the maximum size that this message can take when encoded.
If the size cannot be determined, returns None.
'''
size = EncodedSize(0)
for field in self.fields:
fsize = field.encoded_size(dependencies)
if fsize is None:
return None
size += fsize
return size
# ---------------------------------------------------------------------------
# Processing of entire .proto files
# ---------------------------------------------------------------------------
def iterate_messages(desc, flatten = False, names = Names()):
'''Recursively find all messages. For each, yield name, DescriptorProto.'''
if hasattr(desc, 'message_type'):
submsgs = desc.message_type
else:
submsgs = desc.nested_type
for submsg in submsgs:
sub_names = names + submsg.name
if flatten:
yield Names(submsg.name), submsg
else:
yield sub_names, submsg
for x in iterate_messages(submsg, flatten, sub_names):
yield x
def iterate_extensions(desc, flatten = False, names = Names()):
'''Recursively find all extensions.
For each, yield name, FieldDescriptorProto.
'''
for extension in desc.extension:
yield names, extension
for subname, subdesc in iterate_messages(desc, flatten, names):
for extension in subdesc.extension:
yield subname, extension
def toposort2(data):
'''Topological sort.
From http://code.activestate.com/recipes/577413-topological-sort/
This function is under the MIT license.
'''
for k, v in list(data.items()):
v.discard(k) # Ignore self dependencies
extra_items_in_deps = reduce(set.union, list(data.values()), set()) - set(data.keys())
data.update(dict([(item, set()) for item in extra_items_in_deps]))
while True:
ordered = set(item for item,dep in list(data.items()) if not dep)
if not ordered:
break
for item in sorted(ordered):
yield item
data = dict([(item, (dep - ordered)) for item,dep in list(data.items())
if item not in ordered])
assert not data, "A cyclic dependency exists amongst %r" % data
def sort_dependencies(messages):
'''Sort a list of Messages based on dependencies.'''
dependencies = {}
message_by_name = {}
for message in messages:
dependencies[str(message.name)] = set(message.get_dependencies())
message_by_name[str(message.name)] = message
for msgname in toposort2(dependencies):
if msgname in message_by_name:
yield message_by_name[msgname]
def make_identifier(headername):
'''Make #ifndef identifier that contains uppercase A-Z and digits 0-9'''
result = ""
for c in headername.upper():
if c.isalnum():
result += c
else:
result += '_'
return result
class ProtoFile:
def __init__(self, fdesc, file_options):
'''Takes a FileDescriptorProto and parses it.'''
self.fdesc = fdesc
self.file_options = file_options
self.dependencies = {}
self.parse()
# Some of types used in this file probably come from the file itself.
# Thus it has implicit dependency on itself.
self.add_dependency(self)
def parse(self):
self.enums = []
self.messages = []
self.extensions = []
mangle_names = self.file_options.mangle_names
flatten = mangle_names == nanopb_pb2.M_FLATTEN
strip_prefix = None
if mangle_names == nanopb_pb2.M_STRIP_PACKAGE:
strip_prefix = "." + self.fdesc.package
def create_name(names):
if mangle_names == nanopb_pb2.M_NONE:
return base_name + names
elif mangle_names == nanopb_pb2.M_STRIP_PACKAGE:
return Names(names)
else:
single_name = names
if isinstance(names, Names):
single_name = names.parts[-1]
return Names(single_name)
def mangle_field_typename(typename):
if mangle_names == nanopb_pb2.M_FLATTEN:
return "." + typename.split(".")[-1]
elif strip_prefix is not None and typename.startswith(strip_prefix):
return typename[len(strip_prefix):]
else:
return typename
if self.fdesc.package:
base_name = Names(self.fdesc.package.split('.'))
else:
base_name = Names()
for enum in self.fdesc.enum_type:
name = create_name(enum.name)
enum_options = get_nanopb_suboptions(enum, self.file_options, name)
self.enums.append(Enum(name, enum, enum_options))
for names, message in iterate_messages(self.fdesc, flatten):
name = create_name(names)
message_options = get_nanopb_suboptions(message, self.file_options, name)
if message_options.skip_message:
continue
for field in message.field:
if field.type in (FieldD.TYPE_MESSAGE, FieldD.TYPE_ENUM):
field.type_name = mangle_field_typename(field.type_name)
self.messages.append(Message(name, message, message_options))
for enum in message.enum_type:
name = create_name(names + enum.name)
enum_options = get_nanopb_suboptions(enum, message_options, name)
self.enums.append(Enum(name, enum, enum_options))
for names, extension in iterate_extensions(self.fdesc, flatten):
name = create_name(names + extension.name)
field_options = get_nanopb_suboptions(extension, self.file_options, name)
if field_options.type != nanopb_pb2.FT_IGNORE:
self.extensions.append(ExtensionField(name, extension, field_options))
def add_dependency(self, other):
for enum in other.enums:
self.dependencies[str(enum.names)] = enum
enum.protofile = other
for msg in other.messages:
self.dependencies[str(msg.name)] = msg
msg.protofile = other
# Fix field default values where enum short names are used.
for enum in other.enums:
if not enum.options.long_names:
for message in self.messages:
for field in message.fields:
if field.default in enum.value_longnames:
idx = enum.value_longnames.index(field.default)
field.default = enum.values[idx][0]
# Fix field data types where enums have negative values.
for enum in other.enums:
if not enum.has_negative():
for message in self.messages:
for field in message.fields:
if field.pbtype == 'ENUM' and field.ctype == enum.names:
field.pbtype = 'UENUM'
def generate_header(self, includes, headername, options):
'''Generate content for a header file.
Generates strings, which should be concatenated and stored to file.
'''
yield '/* Automatically generated nanopb header */\n'
if options.notimestamp:
yield '/* Generated by %s */\n\n' % (nanopb_version)
else:
yield '/* Generated by %s at %s. */\n\n' % (nanopb_version, time.asctime())
if self.fdesc.package:
symbol = make_identifier(self.fdesc.package + '_' + headername)
else:
symbol = make_identifier(headername)
yield '#ifndef PB_%s_INCLUDED\n' % symbol
yield '#define PB_%s_INCLUDED\n' % symbol
try:
yield options.libformat % ('pb.h')
except TypeError:
# no %s specified - use whatever was passed in as options.libformat
yield options.libformat
yield '\n'
for incfile in includes:
noext = os.path.splitext(incfile)[0]
yield options.genformat % (noext + options.extension + options.header_extension)
yield '\n'
yield '/* @@protoc_insertion_point(includes) */\n'
yield '#if PB_PROTO_HEADER_VERSION != 30\n'
yield '#error Regenerate this file with the current version of nanopb generator.\n'
yield '#endif\n'
yield '\n'
yield '#ifdef __cplusplus\n'
yield 'extern "C" {\n'
yield '#endif\n\n'
if self.enums:
yield '/* Enum definitions */\n'
for enum in self.enums:
yield str(enum) + '\n\n'
if self.messages:
yield '/* Struct definitions */\n'
for msg in sort_dependencies(self.messages):
yield msg.types()
yield str(msg) + '\n\n'
if self.extensions:
yield '/* Extensions */\n'
for extension in self.extensions:
yield extension.extension_decl()
yield '\n'
if self.messages:
yield '/* Default values for struct fields */\n'
for msg in self.messages:
yield msg.default_decl(True)
yield '\n'
yield '/* Initializer values for message structs */\n'
for msg in self.messages:
identifier = '%s_init_default' % msg.name
yield '#define %-40s %s\n' % (identifier, msg.get_initializer(False))
for msg in self.messages:
identifier = '%s_init_zero' % msg.name
yield '#define %-40s %s\n' % (identifier, msg.get_initializer(True))
yield '\n'
yield '/* Field tags (for use in manual encoding/decoding) */\n'
for msg in sort_dependencies(self.messages):
for field in msg.fields:
yield field.tags()
for extension in self.extensions:
yield extension.tags()
yield '\n'
yield '/* Struct field encoding specification for nanopb */\n'
for msg in self.messages:
yield msg.fields_declaration() + '\n'
yield '\n'
yield '/* Maximum encoded size of messages (where known) */\n'
for msg in self.messages:
msize = msg.encoded_size(self.dependencies)
identifier = '%s_size' % msg.name
if msize is not None:
yield '#define %-40s %s\n' % (identifier, msize)
else:
yield '/* %s depends on runtime parameters */\n' % identifier
yield '\n'
yield '/* Message IDs (where set with "msgid" option) */\n'
yield '#ifdef PB_MSGID\n'
for msg in self.messages:
if hasattr(msg,'msgid'):
yield '#define PB_MSG_%d %s\n' % (msg.msgid, msg.name)
yield '\n'
symbol = make_identifier(headername.split('.')[0])
yield '#define %s_MESSAGES \\\n' % symbol
for msg in self.messages:
m = "-1"
msize = msg.encoded_size(self.dependencies)
if msize is not None:
m = msize
if hasattr(msg,'msgid'):
yield '\tPB_MSG(%d,%s,%s) \\\n' % (msg.msgid, m, msg.name)
yield '\n'
for msg in self.messages:
if hasattr(msg,'msgid'):
yield '#define %s_msgid %d\n' % (msg.name, msg.msgid)
yield '\n'
yield '#endif\n\n'
yield '#ifdef __cplusplus\n'
yield '} /* extern "C" */\n'
yield '#endif\n'
# End of header
yield '/* @@protoc_insertion_point(eof) */\n'
yield '\n#endif\n'
def generate_source(self, headername, options):
'''Generate content for a source file.'''
yield '/* Automatically generated nanopb constant definitions */\n'
if options.notimestamp:
yield '/* Generated by %s */\n\n' % (nanopb_version)
else:
yield '/* Generated by %s at %s. */\n\n' % (nanopb_version, time.asctime())
yield options.genformat % (headername)
yield '\n'
yield '/* @@protoc_insertion_point(includes) */\n'
yield '#if PB_PROTO_HEADER_VERSION != 30\n'
yield '#error Regenerate this file with the current version of nanopb generator.\n'
yield '#endif\n'
yield '\n'
for msg in self.messages:
yield msg.default_decl(False)
yield '\n\n'
for msg in self.messages:
yield msg.fields_definition() + '\n\n'
for ext in self.extensions:
yield ext.extension_def() + '\n'
for enum in self.enums:
yield enum.enum_to_string_definition() + '\n'
# Add checks for numeric limits
if self.messages:
largest_msg = max(self.messages, key = lambda m: m.count_required_fields())
largest_count = largest_msg.count_required_fields()
if largest_count > 64:
yield '\n/* Check that missing required fields will be properly detected */\n'
yield '#if PB_MAX_REQUIRED_FIELDS < %d\n' % largest_count
yield '#error Properly detecting missing required fields in %s requires \\\n' % largest_msg.name
yield ' setting PB_MAX_REQUIRED_FIELDS to %d or more.\n' % largest_count
yield '#endif\n'
max_field = FieldMaxSize()
checks_msgnames = []
for msg in self.messages:
checks_msgnames.append(msg.name)
for field in msg.fields:
max_field.extend(field.largest_field_value())
for field in self.extensions:
max_field.extend(field.largest_field_value())
worst = max_field.worst
worst_field = max_field.worst_field
checks = max_field.checks
if worst > 255 or checks:
yield '\n/* Check that field information fits in pb_field_t */\n'
if worst > 65535 or checks:
yield '#if !defined(PB_FIELD_32BIT)\n'
if worst > 65535:
yield '#error Field descriptor for %s is too large. Define PB_FIELD_32BIT to fix this.\n' % worst_field
else:
assertion = ' && '.join(str(c) + ' < 65536' for c in checks)
msgs = '_'.join(str(n) for n in checks_msgnames)
yield '/* If you get an error here, it means that you need to define PB_FIELD_32BIT\n'
yield ' * compile-time option. You can do that in pb.h or on compiler command line.\n'
yield ' * \n'
yield ' * The reason you need to do this is that some of your messages contain tag\n'
yield ' * numbers or field sizes that are larger than what can fit in 8 or 16 bit\n'
yield ' * field descriptors.\n'
yield ' */\n'
yield 'PB_STATIC_ASSERT((%s), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_%s)\n'%(assertion,msgs)
yield '#endif\n\n'
if worst < 65536:
yield '#if !defined(PB_FIELD_16BIT) && !defined(PB_FIELD_32BIT)\n'
if worst > 255:
yield '#error Field descriptor for %s is too large. Define PB_FIELD_16BIT to fix this.\n' % worst_field
else:
assertion = ' && '.join(str(c) + ' < 256' for c in checks)
msgs = '_'.join(str(n) for n in checks_msgnames)
yield '/* If you get an error here, it means that you need to define PB_FIELD_16BIT\n'
yield ' * compile-time option. You can do that in pb.h or on compiler command line.\n'
yield ' * \n'
yield ' * The reason you need to do this is that some of your messages contain tag\n'
yield ' * numbers or field sizes that are larger than what can fit in the default\n'
yield ' * 8 bit descriptors.\n'
yield ' */\n'
yield 'PB_STATIC_ASSERT((%s), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_%s)\n'%(assertion,msgs)
yield '#endif\n\n'
# Add check for sizeof(double)
has_double = False
for msg in self.messages:
for field in msg.fields:
if field.ctype == 'double':
has_double = True
if has_double:
yield '\n'
yield '/* On some platforms (such as AVR), double is really float.\n'
yield ' * These are not directly supported by nanopb, but see example_avr_double.\n'
yield ' * To get rid of this error, remove any double fields from your .proto.\n'
yield ' */\n'
yield 'PB_STATIC_ASSERT(sizeof(double) == 8, DOUBLE_MUST_BE_8_BYTES)\n'
yield '\n'
yield '/* @@protoc_insertion_point(eof) */\n'
# ---------------------------------------------------------------------------
# Options parsing for the .proto files
# ---------------------------------------------------------------------------
from fnmatch import fnmatch
def read_options_file(infile):
'''Parse a separate options file to list:
[(namemask, options), ...]
'''
results = []
data = infile.read()
data = re.sub('/\*.*?\*/', '', data, flags = re.MULTILINE)
data = re.sub('//.*?$', '', data, flags = re.MULTILINE)
data = re.sub('#.*?$', '', data, flags = re.MULTILINE)
for i, line in enumerate(data.split('\n')):
line = line.strip()
if not line:
continue
parts = line.split(None, 1)
if len(parts) < 2:
sys.stderr.write("%s:%d: " % (infile.name, i + 1) +
"Option lines should have space between field name and options. " +
"Skipping line: '%s'\n" % line)
continue
opts = nanopb_pb2.NanoPBOptions()
try:
text_format.Merge(parts[1], opts)
except Exception as e:
sys.stderr.write("%s:%d: " % (infile.name, i + 1) +
"Unparseable option line: '%s'. " % line +
"Error: %s\n" % str(e))
continue
results.append((parts[0], opts))
return results
class Globals:
'''Ugly global variables, should find a good way to pass these.'''
verbose_options = False
separate_options = []
matched_namemasks = set()
def get_nanopb_suboptions(subdesc, options, name):
'''Get copy of options, and merge information from subdesc.'''
new_options = nanopb_pb2.NanoPBOptions()
new_options.CopyFrom(options)
if hasattr(subdesc, 'syntax') and subdesc.syntax == "proto3":
new_options.proto3 = True
# Handle options defined in a separate file
dotname = '.'.join(name.parts)
for namemask, options in Globals.separate_options:
if fnmatch(dotname, namemask):
Globals.matched_namemasks.add(namemask)
new_options.MergeFrom(options)
# Handle options defined in .proto
if isinstance(subdesc.options, descriptor.FieldOptions):
ext_type = nanopb_pb2.nanopb
elif isinstance(subdesc.options, descriptor.FileOptions):
ext_type = nanopb_pb2.nanopb_fileopt
elif isinstance(subdesc.options, descriptor.MessageOptions):
ext_type = nanopb_pb2.nanopb_msgopt
elif isinstance(subdesc.options, descriptor.EnumOptions):
ext_type = nanopb_pb2.nanopb_enumopt
else:
raise Exception("Unknown options type")
if subdesc.options.HasExtension(ext_type):
ext = subdesc.options.Extensions[ext_type]
new_options.MergeFrom(ext)
if Globals.verbose_options:
sys.stderr.write("Options for " + dotname + ": ")
sys.stderr.write(text_format.MessageToString(new_options) + "\n")
return new_options
# ---------------------------------------------------------------------------
# Command line interface
# ---------------------------------------------------------------------------
import sys
import os.path
from optparse import OptionParser
optparser = OptionParser(
usage = "Usage: nanopb_generator.py [options] file.pb ...",
epilog = "Compile file.pb from file.proto by: 'protoc -ofile.pb file.proto'. " +
"Output will be written to file.pb.h and file.pb.c.")
optparser.add_option("-x", dest="exclude", metavar="FILE", action="append", default=[],
help="Exclude file from generated #include list.")
optparser.add_option("-e", "--extension", dest="extension", metavar="EXTENSION", default=".pb",
help="Set extension to use instead of '.pb' for generated files. [default: %default]")
optparser.add_option("-H", "--header-extension", dest="header_extension", metavar="EXTENSION", default=".h",
help="Set extension to use for generated header files. [default: %default]")
optparser.add_option("-S", "--source-extension", dest="source_extension", metavar="EXTENSION", default=".c",
help="Set extension to use for generated source files. [default: %default]")
optparser.add_option("-f", "--options-file", dest="options_file", metavar="FILE", default="%s.options",
help="Set name of a separate generator options file.")
optparser.add_option("-I", "--options-path", dest="options_path", metavar="DIR",
action="append", default = [],
help="Search for .options files additionally in this path")
optparser.add_option("-D", "--output-dir", dest="output_dir",
metavar="OUTPUTDIR", default=None,
help="Output directory of .pb.h and .pb.c files")
optparser.add_option("-Q", "--generated-include-format", dest="genformat",
metavar="FORMAT", default='#include "%s"\n',
help="Set format string to use for including other .pb.h files. [default: %default]")
optparser.add_option("-L", "--library-include-format", dest="libformat",
metavar="FORMAT", default='#include <%s>\n',
help="Set format string to use for including the nanopb pb.h header. [default: %default]")
optparser.add_option("--strip-path", dest="strip_path", action="store_true", default=True,
help="Strip directory path from #included .pb.h file name [default: %default]")
optparser.add_option("--no-strip-path", dest="strip_path", action="store_false",
help="Opposite of --strip-path")
optparser.add_option("-T", "--no-timestamp", dest="notimestamp", action="store_true", default=False,
help="Don't add timestamp to .pb.h and .pb.c preambles")
optparser.add_option("-q", "--quiet", dest="quiet", action="store_true", default=False,
help="Don't print anything except errors.")
optparser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False,
help="Print more information.")
optparser.add_option("-s", dest="settings", metavar="OPTION:VALUE", action="append", default=[],
help="Set generator option (max_size, max_count etc.).")
def parse_file(filename, fdesc, options):
'''Parse a single file. Returns a ProtoFile instance.'''
toplevel_options = nanopb_pb2.NanoPBOptions()
for s in options.settings:
text_format.Merge(s, toplevel_options)
if not fdesc:
data = open(filename, 'rb').read()
fdesc = descriptor.FileDescriptorSet.FromString(data).file[0]
# Check if there is a separate .options file
had_abspath = False
try:
optfilename = options.options_file % os.path.splitext(filename)[0]
except TypeError:
# No %s specified, use the filename as-is
optfilename = options.options_file
had_abspath = True
paths = ['.'] + options.options_path
for p in paths:
if os.path.isfile(os.path.join(p, optfilename)):
optfilename = os.path.join(p, optfilename)
if options.verbose:
sys.stderr.write('Reading options from ' + optfilename + '\n')
Globals.separate_options = read_options_file(open(optfilename, "rU"))
break
else:
# If we are given a full filename and it does not exist, give an error.
# However, don't give error when we automatically look for .options file
# with the same name as .proto.
if options.verbose or had_abspath:
sys.stderr.write('Options file not found: ' + optfilename + '\n')
Globals.separate_options = []
Globals.matched_namemasks = set()
# Parse the file
file_options = get_nanopb_suboptions(fdesc, toplevel_options, Names([filename]))
f = ProtoFile(fdesc, file_options)
f.optfilename = optfilename
return f
def process_file(filename, fdesc, options, other_files = {}):
'''Process a single file.
filename: The full path to the .proto or .pb source file, as string.
fdesc: The loaded FileDescriptorSet, or None to read from the input file.
options: Command line options as they come from OptionsParser.
Returns a dict:
{'headername': Name of header file,
'headerdata': Data for the .h header file,
'sourcename': Name of the source code file,
'sourcedata': Data for the .c source code file
}
'''
f = parse_file(filename, fdesc, options)
# Provide dependencies if available
for dep in f.fdesc.dependency:
if dep in other_files:
f.add_dependency(other_files[dep])
# Decide the file names
noext = os.path.splitext(filename)[0]
headername = noext + options.extension + options.header_extension
sourcename = noext + options.extension + options.source_extension
if options.strip_path:
headerbasename = os.path.basename(headername)
else:
headerbasename = headername
# List of .proto files that should not be included in the C header file
# even if they are mentioned in the source .proto.
excludes = ['nanopb.proto', 'google/protobuf/descriptor.proto'] + options.exclude
includes = [d for d in f.fdesc.dependency if d not in excludes]
headerdata = ''.join(f.generate_header(includes, headerbasename, options))
sourcedata = ''.join(f.generate_source(headerbasename, options))
# Check if there were any lines in .options that did not match a member
unmatched = [n for n,o in Globals.separate_options if n not in Globals.matched_namemasks]
if unmatched and not options.quiet:
sys.stderr.write("Following patterns in " + f.optfilename + " did not match any fields: "
+ ', '.join(unmatched) + "\n")
if not Globals.verbose_options:
sys.stderr.write("Use protoc --nanopb-out=-v:. to see a list of the field names.\n")
return {'headername': headername, 'headerdata': headerdata,
'sourcename': sourcename, 'sourcedata': sourcedata}
def main_cli():
'''Main function when invoked directly from the command line.'''
options, filenames = optparser.parse_args()
if not filenames:
optparser.print_help()
sys.exit(1)
if options.quiet:
options.verbose = False
if options.output_dir and not os.path.exists(options.output_dir):
optparser.print_help()
sys.stderr.write("\noutput_dir does not exist: %s\n" % options.output_dir)
sys.exit(1)
if options.verbose:
sys.stderr.write('Google Python protobuf library imported from %s, version %s\n'
% (google.protobuf.__file__, google.protobuf.__version__))
Globals.verbose_options = options.verbose
for filename in filenames:
results = process_file(filename, None, options)
base_dir = options.output_dir or ''
to_write = [
(os.path.join(base_dir, results['headername']), results['headerdata']),
(os.path.join(base_dir, results['sourcename']), results['sourcedata']),
]
if not options.quiet:
paths = " and ".join([x[0] for x in to_write])
sys.stderr.write("Writing to %s\n" % paths)
for path, data in to_write:
with open(path, 'w') as f:
f.write(data)
def main_plugin():
'''Main function when invoked as a protoc plugin.'''
import io, sys
if sys.platform == "win32":
import os, msvcrt
# Set stdin and stdout to binary mode
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
data = io.open(sys.stdin.fileno(), "rb").read()
request = plugin_pb2.CodeGeneratorRequest.FromString(data)
try:
# Versions of Python prior to 2.7.3 do not support unicode
# input to shlex.split(). Try to convert to str if possible.
params = str(request.parameter)
except UnicodeEncodeError:
params = request.parameter
import shlex
args = shlex.split(params)
options, dummy = optparser.parse_args(args)
Globals.verbose_options = options.verbose
if options.verbose:
sys.stderr.write('Google Python protobuf library imported from %s, version %s\n'
% (google.protobuf.__file__, google.protobuf.__version__))
response = plugin_pb2.CodeGeneratorResponse()
# Google's protoc does not currently indicate the full path of proto files.
# Instead always add the main file path to the search dirs, that works for
# the common case.
import os.path
options.options_path.append(os.path.dirname(request.file_to_generate[0]))
# Process any include files first, in order to have them
# available as dependencies
other_files = {}
for fdesc in request.proto_file:
other_files[fdesc.name] = parse_file(fdesc.name, fdesc, options)
for filename in request.file_to_generate:
for fdesc in request.proto_file:
if fdesc.name == filename:
results = process_file(filename, fdesc, options, other_files)
f = response.file.add()
f.name = results['headername']
f.content = results['headerdata']
f = response.file.add()
f.name = results['sourcename']
f.content = results['sourcedata']
io.open(sys.stdout.fileno(), "wb").write(response.SerializeToString())
if __name__ == '__main__':
# Check if we are running as a plugin under protoc
if 'protoc-gen-' in sys.argv[0] or '--protoc-plugin' in sys.argv:
main_plugin()
else:
main_cli()
|
google/myelin-acorn-electron-hardware
|
third_party/nanopb/generator/nanopb_generator.py
|
Python
|
apache-2.0
| 70,423
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0003_auto_20151109_2002'),
]
operations = [
migrations.AlterField(
model_name='bodega',
name='sucursal',
field=models.ForeignKey(to='base.Sucursal', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='cliente',
name='direccion',
field=models.CharField(max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='cliente',
name='telefono',
field=models.CharField(max_length=30, null=True, blank=True),
preserve_default=True,
),
]
|
harvyv/johnmay
|
base/migrations/0004_auto_20151223_1440.py
|
Python
|
apache-2.0
| 883
|
import os
from copy import deepcopy
bids_schema = {
# BIDS identification bits
'modality': {
'type': 'string',
'required': True
},
'subject_id': {
'type': 'string',
'required': True
},
'session_id': {'type': 'string'},
'run_id': {'type': 'string'},
'acq_id': {'type': 'string'},
'task_id': {'type': 'string'},
'run_id': {'type': 'string'},
# BIDS metadata
'AccelNumReferenceLines': {'type': 'integer'},
'AccelerationFactorPE': {'type': 'integer'},
'AcquisitionMatrix': {'type': 'string'},
'CogAtlasID': {'type': 'string'},
'CogPOID': {'type': 'string'},
'CoilCombinationMethod': {'type': 'string'},
'ContrastBolusIngredient': {'type': 'string'},
'ConversionSoftware': {'type': 'string'},
'ConversionSoftwareVersion': {'type': 'string'},
'DelayTime': {'type': 'float'},
'DeviceSerialNumber': {'type': 'string'},
'EchoTime': {'type': 'float'},
'EchoTrainLength': {'type': 'integer'},
'EffectiveEchoSpacing': {'type': 'float'},
'FlipAngle': {'type': 'integer'},
'GradientSetType': {'type': 'string'},
'HardcopyDeviceSoftwareVersion': {'type': 'string'},
'ImagingFrequency': {'type': 'integer'},
'InPlanePhaseEncodingDirection': {'type': 'string'},
'InstitutionAddress': {'type': 'string'},
'InstitutionName': {'type': 'string'},
'Instructions': {'type': 'string'},
'InversionTime': {'type': 'float'},
'MRAcquisitionType': {'type': 'string'},
'MRTransmitCoilSequence': {'type': 'string'},
'MagneticFieldStrength': {'type': 'float'},
'Manufacturer': {'type': 'string'},
'ManufacturersModelName': {'type': 'string'},
'MatrixCoilMode': {'type': 'string'},
'MultibandAccelerationFactor': {'type': 'float'},
'NumberOfAverages': {'type': 'integer'},
'NumberOfPhaseEncodingSteps': {'type': 'integer'},
'NumberOfVolumesDiscardedByScanner': {'type': 'float'},
'NumberOfVolumesDiscardedByUser': {'type': 'float'},
'NumberShots': {'type': 'integer'},
'ParallelAcquisitionTechnique': {'type': 'string'},
'ParallelReductionFactorInPlane': {'type': 'float'},
'PartialFourier': {'type': 'boolean'},
'PartialFourierDirection': {'type': 'string'},
'PatientPosition': {'type': 'string'},
'PercentPhaseFieldOfView': {'type': 'integer'},
'PercentSampling': {'type': 'integer'},
'PhaseEncodingDirection': {'type': 'string'},
'PixelBandwidth': {'type': 'integer'},
'ProtocolName': {'type': 'string'},
'PulseSequenceDetails': {'type': 'string'},
'PulseSequenceType': {'type': 'string'},
'ReceiveCoilName': {'type': 'string'},
'RepetitionTime': {'type': 'float'},
'ScanOptions': {'type': 'string'},
'ScanningSequence': {'type': 'string'},
'SequenceName': {'type': 'string'},
'SequenceVariant': {'type': 'string'},
'SliceEncodingDirection': {'type': 'string'},
'SoftwareVersions': {'type': 'string'},
'TaskDescription': {'type': 'string'},
'TotalReadoutTime': {'type': 'float'},
'TotalScanTimeSec': {'type': 'integer'},
'TransmitCoilName': {'type': 'string'},
'VariableFlipAngleFlag': {'type': 'string'},
}
prov_schema = {
'version': {
'type': 'string',
'required': True
},
'md5sum': {
'type': 'string',
'required': True
},
'software': {
'type': 'string',
'required': True
},
'settings': {
'type': 'dict',
'schema': {
'fd_thres': {'type': 'float'},
'hmc_fsl': {'type': 'boolean'},
'testing': {'type': 'boolean'}
},
},
'mriqc_pred': {'type': 'integer'},
'email': {'type': 'string'},
}
bold_iqms_schema = {
'aor': {
'type': 'float',
'required': True
},
'aqi': {
'type': 'float',
'required': True
},
'dummy_trs': {'type': 'integer'},
'dvars_nstd': {
'type': 'float',
'required': True
},
'dvars_std': {
'type': 'float',
'required': True
},
'dvars_vstd': {
'type': 'float',
'required': True
},
'efc': {
'type': 'float',
'required': True
},
'fber': {
'type': 'float',
'required': True
},
'fd_mean': {
'type': 'float',
'required': True
},
'fd_num': {
'type': 'float',
'required': True
},
'fd_perc': {
'type': 'float',
'required': True
},
'fwhm_avg': {
'type': 'float',
'required': True
},
'fwhm_x': {
'type': 'float',
'required': True
},
'fwhm_y': {
'type': 'float',
'required': True
},
'fwhm_z': {
'type': 'float',
'required': True
},
'gcor': {
'type': 'float',
'required': True
},
'gsr_x': {
'type': 'float',
'required': True
},
'gsr_y': {
'type': 'float',
'required': True
},
'size_t': {
'type': 'float',
'required': True
},
'size_x': {
'type': 'float',
'required': True
},
'size_y': {
'type': 'float',
'required': True
},
'size_z': {
'type': 'float',
'required': True
},
'snr': {
'type': 'float',
'required': True
},
'spacing_tr': {
'type': 'float',
'required': True
},
'spacing_x': {
'type': 'float',
'required': True
},
'spacing_y': {
'type': 'float',
'required': True
},
'spacing_z': {
'type': 'float',
'required': True
},
'summary_bg_k': {
'type': 'float',
'required': True
},
'summary_bg_mean': {
'type': 'float',
'required': True
},
'summary_bg_median': {
'type': 'float',
'required': True
},
'summary_bg_mad': {
'type': 'float',
'required': True
},
'summary_bg_p05': {
'type': 'float',
'required': True
},
'summary_bg_p95': {
'type': 'float',
'required': True
},
'summary_bg_stdv': {
'type': 'float',
'required': True
},
'summary_bg_n': {
'type': 'float',
'required': True
},
'summary_fg_k': {
'type': 'float',
'required': True
},
'summary_fg_mean': {
'type': 'float',
'required': True
},
'summary_fg_median': {
'type': 'float',
'required': True
},
'summary_fg_mad': {
'type': 'float',
'required': True
},
'summary_fg_p05': {
'type': 'float',
'required': True
},
'summary_fg_p95': {
'type': 'float',
'required': True
},
'summary_fg_stdv': {
'type': 'float',
'required': True
},
'summary_fg_n': {
'type': 'float',
'required': True
},
'tsnr': {
'type': 'float',
'required': True
},
}
struct_iqms_schema = {
'cjv': {
'type': 'float',
'required': True
},
'cnr': {
'type': 'float',
'required': True
},
'efc': {
'type': 'float',
'required': True
},
'fber': {
'type': 'float',
'required': True
},
'fwhm_avg': {
'type': 'float',
'required': True
},
'fwhm_x': {
'type': 'float',
'required': True
},
'fwhm_y': {
'type': 'float',
'required': True
},
'fwhm_z': {
'type': 'float',
'required': True
},
'icvs_csf': {
'type': 'float',
'required': True
},
'icvs_gm': {
'type': 'float',
'required': True
},
'icvs_wm': {
'type': 'float',
'required': True
},
'inu_med': {
'type': 'float',
'required': True
},
'inu_range': {
'type': 'float',
'required': True
},
'qi_1': {
'type': 'float',
'required': True
},
'qi_2': {
'type': 'float',
'required': True
},
'rpve_csf': {
'type': 'float',
'required': True
},
'rpve_gm': {
'type': 'float',
'required': True
},
'rpve_wm': {
'type': 'float',
'required': True
},
'size_x': {
'type': 'integer',
'required': True
},
'size_y': {
'type': 'integer',
'required': True
},
'size_z': {
'type': 'integer',
'required': True
},
'snr_csf': {
'type': 'float',
'required': True
},
'snr_gm': {
'type': 'float',
'required': True
},
'snr_total': {
'type': 'float',
'required': True
},
'snr_wm': {
'type': 'float',
'required': True
},
'snrd_csf': {
'type': 'float',
'required': True
},
'snrd_gm': {
'type': 'float',
'required': True
},
'snrd_total': {
'type': 'float',
'required': True
},
'snrd_wm': {
'type': 'float',
'required': True
},
'spacing_x': {
'type': 'float',
'required': True
},
'spacing_y': {
'type': 'float',
'required': True
},
'spacing_z': {
'type': 'float',
'required': True
},
'summary_bg_k': {
'type': 'float',
'required': True
},
'summary_bg_mean': {
'type': 'float',
'required': True
},
'summary_bg_median': {
'type': 'float'
},
'summary_bg_mad': {
'type': 'float'
},
'summary_bg_p05': {
'type': 'float',
'required': True
},
'summary_bg_p95': {
'type': 'float',
'required': True
},
'summary_bg_stdv': {
'type': 'float',
'required': True
},
'summary_bg_n': {
'type': 'float'
},
'summary_csf_k': {
'type': 'float',
'required': True
},
'summary_csf_mean': {
'type': 'float',
'required': True
},
'summary_csf_median': {
'type': 'float'
},
'summary_csf_mad': {
'type': 'float'
},
'summary_csf_p05': {
'type': 'float',
'required': True
},
'summary_csf_p95': {
'type': 'float',
'required': True
},
'summary_csf_stdv': {
'type': 'float',
'required': True
},
'summary_csf_n': {
'type': 'float'
},
'summary_gm_k': {
'type': 'float',
'required': True
},
'summary_gm_mean': {
'type': 'float',
'required': True
},
'summary_gm_median': {
'type': 'float'
},
'summary_gm_mad': {
'type': 'float'
},
'summary_gm_p05': {
'type': 'float',
'required': True
},
'summary_gm_p95': {
'type': 'float',
'required': True
},
'summary_gm_stdv': {
'type': 'float',
'required': True
},
'summary_gm_n': {
'type': 'float'
},
'summary_wm_k': {
'type': 'float',
'required': True
},
'summary_wm_mean': {
'type': 'float',
'required': True
},
'summary_wm_median': {
'type': 'float'
},
'summary_wm_mad': {
'type': 'float'
},
'summary_wm_p05': {
'type': 'float',
'required': True
},
'summary_wm_p95': {
'type': 'float',
'required': True
},
'summary_wm_stdv': {
'type': 'float',
'required': True
},
'summary_wm_n': {
'type': 'float'
},
'tpm_overlap_csf': {
'type': 'float',
'required': True
},
'tpm_overlap_gm': {
'type': 'float',
'required': True
},
'tpm_overlap_wm': {
'type': 'float',
'required': True
},
'wm2max': {
'type': 'float',
'required': True
},
}
settings = {
'URL_PREFIX': 'api',
'API_VERSION': 'v1',
'ALLOWED_FILTERS': ['*'],
'MONGO_HOST': os.environ.get('MONGODB_HOST', ''),
'MONGO_PORT': int(os.environ.get('MONGODB_PORT', 27017)),
'MONGO_DBNAME': 'mriqc_api',
'PUBLIC_METHODS': ['GET'],
'PUBLIC_ITEM_METHODS': ['GET'],
'RESOURCE_METHODS': ['GET', 'POST'],
'ITEM_METHODS': ['GET'],
'X_DOMAINS': '*',
'X_HEADERS': ['Authorization', 'Content-Type'],
'DOMAIN': {
'bold': {
'item_title': 'bold',
},
'T1w': {
'item_title': 'T1w',
},
'T2w': {
'item_title': 'T2w',
}
}
}
rating_schema = {
'rating': {
'type': 'string',
'required': True
},
'name': {
'type': 'string',
'required': False
},
'comment': {
'type': 'string',
'required': False
},
'md5sum': {
'type': 'string',
'required': True
}
}
nipype_schema = {
'interface_class_name': {
'type': 'string',
'required': True
},
'version': {
'type': 'string',
'required': True
},
'mem_peak_gb': {
'type': 'float',
'required': True
},
'duration_sec': {
'type': 'float',
'required': True
},
'inputs': {
'type': 'dict',
'required': True
}
}
settings['DOMAIN']['nipype_telemetry'] = {
'type': 'dict',
'required': False,
'schema': deepcopy(nipype_schema)
}
settings['DOMAIN']['rating'] ={
'type': 'dict',
'required': False,
'schema': deepcopy(rating_schema)
}
settings['DOMAIN']['rating_counts'] = {
'datasource': {
'source': 'rating',
'aggregation': {
'pipeline': [
{"$match": {"md5sum": "$value"}},
{"$unwind": "$rating"},
{"$group": {"_id": "$rating", "count": {"$sum": 1}}},
],
}
}
}
settings['DOMAIN']['bold']['schema'] = deepcopy(bold_iqms_schema)
settings['DOMAIN']['bold']['schema'].update(
{
'bids_meta': {
'type': 'dict',
'required': True,
'allow_unknown': True,
'schema': deepcopy(bids_schema)
},
'provenance': {
'type': 'dict',
'required': True,
'schema': deepcopy(prov_schema)
},
'rating': {
'type': 'dict',
'required': False,
'schema': deepcopy(rating_schema)
},
}
)
settings['DOMAIN']['bold']['schema']['bids_meta']['schema'].update({
'TaskName': {
'type': 'string',
'required': True
},
})
settings['DOMAIN']['T1w']['schema'] = deepcopy(struct_iqms_schema)
settings['DOMAIN']['T1w']['schema'].update(
{
'bids_meta': {
'type': 'dict',
'required': True,
'allow_unknown': True,
'schema': deepcopy(bids_schema)
},
'provenance': {
'type': 'dict',
'required': True,
'schema': deepcopy(prov_schema)
},
}
)
settings['DOMAIN']['T2w']['schema'] = deepcopy(settings['DOMAIN']['T1w']['schema'])
|
poldracklab/mriqcwebapi
|
dockereve-master/eve-app/settings.py
|
Python
|
apache-2.0
| 15,268
|
#!/usr/bin/env python2
'''
Description:
Author: Ronald van Haren, NLeSC (r.vanharen@esciencecenter.nl)
Created: -
Last Modified: -
License: Apache 2.0
Notes: -
'''
from lxml.html import parse
import csv
import urllib2
from lxml import html
import numbers
import json
import os
import utils
from numpy import vstack
import argparse
class get_knmi_reference_data:
'''
description
'''
def __init__(self, opts):
#self.outputdir = opts.outputdir
self.csvfile = opts.csvfile
self.outputdir = opts.outputdir
self.keep = opts.keep
self.check_output_dir()
if len(opts.stationid)==0:
self.get_station_ids()
else:
self.stationdids = [opts.stationid]
self.download_station_data()
self.get_station_locations()
def get_station_ids(self):
'''
get all stationids from the KNMI website
'''
self.url = 'http://www.knmi.nl/klimatologie/uurgegevens/'
page = parse(self.url)
# get list of ids
rows = page.xpath(".//tbody/@id")
#self.stationids = [int(stationid[3:]) for stationid in rows]
self.stationids = [str(stationid) for stationid in rows]
def download_station_data(self):
page = parse(self.url)
for stationid in self.stationids:
print stationid
relpaths = page.xpath(".//tbody[@id='" + stationid + "']/tr/td/span/a/@href")
for path in relpaths:
fullpath = os.path.join(self.url, path)
request = urllib2.urlopen(fullpath)
filename = os.path.basename(path)
outputfile = os.path.join(self.outputdir, filename)
if self.keep:
if os.path.exists(outputfile):
# check if filesize is not null
if os.path.getsize(outputfile) > 0:
# file exists and is not null, continue next iteration
continue
else:
# file exists but is null, so remove and redownload
os.remove(outputfile)
elif os.path.exists(outputfile):
os.remove(outputfile)
#save
output = open(outputfile, "w")
output.write(request.read())
output.close()
def get_station_locations(self):
# get station names for stationids
url = 'http://www.knmi.nl/klimatologie/metadata/stationslijst.html'
page = parse(url)
url_metadata = page.xpath(".//table/tr/td/a/@href")
station_name_id = [c.text for c in page.xpath(".//table/tr/td/a")]
station_id = [s.split()[0] for s in station_name_id]
station_names = [" ".join(s.split()[1:]) for s in station_name_id]
for idx, stationid in enumerate(station_id):
station_url = os.path.join(os.path.split(url)[0],
url_metadata[idx])
page = parse(station_url)
rows = [c.text for c in page.xpath(".//table/tr/td")]
idx_position = rows.index('Positie:') + 1
idx_startdate = rows.index('Startdatum:') + 1
lat, lon = rows[idx_position].encode('UTF-8').replace(
'\xc2\xb0','').replace(' N.B. ', ',').replace(
'O.L.','').strip().split(',')
lat,lon = self.latlon_conversion(lat,lon)
try:
dataout = vstack((dataout,
[station_id[idx], station_names[idx],
lat, lon, station_url]))
except NameError:
dataout = [station_id[idx], station_names[idx],
lat, lon, station_url]
header = ['station_id', 'station_name','latitude', 'longitude', 'url']
dataout = vstack((header, dataout))
# write to csv file
utils.write_csvfile(self.csvfile, dataout)
# get station locations
pass
def latlon_conversion(self, lat, lon):
'''
conversion of GPS position to lat/lon decimals
example string for lat and lon input: "52 11'"
'''
# latitude conversion
latd = lat.replace("'","").split()
lat = float(latd[0]) + float(latd[1])/60
# longitude conversion
lond = lon.replace("'","").split()
lon = float(lond[0]) + float(lond[1])/60
return lat,lon
def check_output_dir(self):
'''
check if outputdir exists and create if not
'''
if not os.path.exists(self.outputdir):
os.makedirs(self.outputdir)
if __name__ == "__main__":
# define argument menu
description = 'Get data KNMI reference stations'
parser = argparse.ArgumentParser(description=description)
# fill argument groups
parser.add_argument('-o', '--outputdir', help='Data output directory',
default=os.path.join(os.getcwd(),'KNMI'),
required=False)
parser.add_argument('-s', '--stationid', help='Station id',
default='', required=False, action='store')
parser.add_argument('-c', '--csvfile', help='CSV data file',
required=True, action='store')
parser.add_argument('-k', '--keep', help='Keep downloaded files',
required=False, action='store_true')
parser.add_argument('-l', '--log', help='Log level',
choices=utils.LOG_LEVELS_LIST,
default=utils.DEFAULT_LOG_LEVEL)
# extract user entered arguments
opts = parser.parse_args()
# define logger
logname = os.path.basename(__file__) + '.log'
logger = utils.start_logging(filename=logname, level=opts.log)
# process data
get_knmi_reference_data(opts)
|
rvanharen/SitC
|
knmi_getdata.py
|
Python
|
apache-2.0
| 5,962
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
DataFrame-based machine learning APIs to let users quickly assemble and configure practical
machine learning pipelines.
"""
from pyspark.ml.base import Estimator, Model, Transformer
from pyspark.ml.pipeline import Pipeline, PipelineModel
__all__ = ["Transformer", "Estimator", "Model", "Pipeline", "PipelineModel"]
|
wangyixiaohuihui/spark2-annotation
|
python/pyspark/ml/__init__.py
|
Python
|
apache-2.0
| 1,130
|
from django.db.models import Count
from dcim.models import Device, Interface
from extras.api.views import CustomFieldModelViewSet
from utilities.api import FieldChoicesViewSet, ModelViewSet
from utilities.utils import get_subquery
from virtualization import filters
from virtualization.models import Cluster, ClusterGroup, ClusterType, VirtualMachine
from . import serializers
#
# Field choices
#
class VirtualizationFieldChoicesViewSet(FieldChoicesViewSet):
fields = (
(VirtualMachine, ['status']),
)
#
# Clusters
#
class ClusterTypeViewSet(ModelViewSet):
queryset = ClusterType.objects.annotate(
cluster_count=Count('clusters')
)
serializer_class = serializers.ClusterTypeSerializer
filterset_class = filters.ClusterTypeFilter
class ClusterGroupViewSet(ModelViewSet):
queryset = ClusterGroup.objects.annotate(
cluster_count=Count('clusters')
)
serializer_class = serializers.ClusterGroupSerializer
filterset_class = filters.ClusterGroupFilter
class ClusterViewSet(CustomFieldModelViewSet):
queryset = Cluster.objects.select_related(
'type', 'group', 'site',
).prefetch_related(
'tags'
).annotate(
device_count=get_subquery(Device, 'cluster'),
virtualmachine_count=get_subquery(VirtualMachine, 'cluster')
)
serializer_class = serializers.ClusterSerializer
filterset_class = filters.ClusterFilter
#
# Virtual machines
#
class VirtualMachineViewSet(CustomFieldModelViewSet):
queryset = VirtualMachine.objects.select_related(
'cluster__site', 'role', 'tenant', 'platform', 'primary_ip4', 'primary_ip6'
).prefetch_related('tags')
filterset_class = filters.VirtualMachineFilter
def get_serializer_class(self):
"""
Select the specific serializer based on the request context.
If the `brief` query param equates to True, return the NestedVirtualMachineSerializer
If the `exclude` query param includes `config_context` as a value, return the VirtualMachineSerializer
Else, return the VirtualMachineWithConfigContextSerializer
"""
request = self.get_serializer_context()['request']
if request.query_params.get('brief', False):
return serializers.NestedVirtualMachineSerializer
elif 'config_context' in request.query_params.get('exclude', []):
return serializers.VirtualMachineSerializer
return serializers.VirtualMachineWithConfigContextSerializer
class InterfaceViewSet(ModelViewSet):
queryset = Interface.objects.filter(
virtual_machine__isnull=False
).select_related('virtual_machine').prefetch_related('tags')
serializer_class = serializers.InterfaceSerializer
filterset_class = filters.InterfaceFilter
def get_serializer_class(self):
request = self.get_serializer_context()['request']
if request.query_params.get('brief', False):
# Override get_serializer_for_model(), which will return the DCIM NestedInterfaceSerializer
return serializers.NestedInterfaceSerializer
return serializers.InterfaceSerializer
|
lampwins/netbox
|
netbox/virtualization/api/views.py
|
Python
|
apache-2.0
| 3,151
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('data', one_hot=True)
mnist_train = mnist.train
mnist_val = mnist.validation
p = 28 * 28
n = 10
h1 = 300
func_act = tf.nn.sigmoid
x_pl = tf.placeholder(dtype=tf.float32, shape=[None, p])
y_pl = tf.placeholder(dtype=tf.float32, shape=[None, n])
w1 = tf.Variable(tf.truncated_normal(shape=[p, h1], stddev=0.1))
b1 = tf.Variable(tf.zeros(shape=[h1]))
w2 = tf.Variable(tf.truncated_normal(shape=[h1, n], stddev=0.1))
b2 = tf.Variable(tf.zeros(shape=[n]))
hidden1 = func_act(tf.matmul(x_pl, w1) + b1)
y_pre = tf.matmul(hidden1, w2) + b2
y_ = tf.nn.softmax(y_pre)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_pl, logits=y_pre))
correct_prediction = tf.equal(tf.argmax(y_pl, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
eta = 0.3
train_op = tf.train.AdagradOptimizer(learning_rate=0.3).minimize(cross_entropy)
batch_size = 50
batch_per_epoch = mnist_train.num_examples // batch_size
epoch = 2
with tf.Session() as sess:
tf.global_variables_initializer().run()
x_val = mnist_val.images
y_val = mnist_val.labels
val_fd = {x_pl: x_val, y_pl: y_val}
for ep in range(epoch):
print(f'Epoch {ep+1}:')
for sp in range(batch_per_epoch):
xtr, ytr = mnist_train.next_batch(batch_size)
loss_value, _ = sess.run([cross_entropy, train_op], feed_dict={x_pl: xtr, y_pl: ytr})
if sp == 0 or (sp + 1) % 100 == 0:
print(f'Loss: {loss_value:.4f}')
acc = sess.run(accuracy, feed_dict=val_fd)
print(f'Validation Acc: {acc:.4f}')
|
bm2-lab/MLClass
|
cgh_deep_learning/mnist_mlp.py
|
Python
|
apache-2.0
| 1,723
|
from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class DeviceObjectObjectRemote(RemoteModel):
"""
Network Objects cross usage
| ``DeviceObjectObjectID:`` The internal NetMRI identifier of this usage relationship between network objects.
| ``attribute type:`` number
| ``DeviceID:`` The internal NetMRI identifier for the device to which belongs this network objects.
| ``attribute type:`` number
| ``ParentDeviceObjectID:`` The internal NetMRI identifier of the parent network object (the user).
| ``attribute type:`` number
| ``ChildDeviceObjectID:`` The internal NetMRI identifier of the child network object (the used service).
| ``attribute type:`` number
| ``OoFirstSeenTime:`` The timestamp of when NetMRI saw for the first time this relationship.
| ``attribute type:`` datetime
| ``OoProvisionData:`` Internal data - do not modify, may change without warning.
| ``attribute type:`` string
| ``DataSourceID:`` The internal NetMRI identifier for the collector NetMRI that collected this data record.
| ``attribute type:`` number
| ``OoStartTime:`` The starting effective time of this record.
| ``attribute type:`` datetime
| ``OoEndTime:`` The ending effective time of this record, or empty if still in effect.
| ``attribute type:`` datetime
| ``OoTimestamp:`` The date and time this record was collected or calculated.
| ``attribute type:`` datetime
| ``OoChangedCols:`` The fields that changed between this revision of the record and the previous revision.
| ``attribute type:`` string
"""
properties = ("DeviceObjectObjectID",
"DeviceID",
"ParentDeviceObjectID",
"ChildDeviceObjectID",
"OoFirstSeenTime",
"OoProvisionData",
"DataSourceID",
"OoStartTime",
"OoEndTime",
"OoTimestamp",
"OoChangedCols",
)
@property
@check_api_availability
def parent_device_object(self):
"""
The parent network object of this relationship.
``attribute type:`` model
"""
return self.broker.parent_device_object(**{"DeviceObjectObjectID": self.DeviceObjectObjectID})
@property
@check_api_availability
def child_device_object(self):
"""
The child network object of this relationship.
``attribute type:`` model
"""
return self.broker.child_device_object(**{"DeviceObjectObjectID": self.DeviceObjectObjectID})
@property
@check_api_availability
def data_source(self):
"""
The collector NetMRI that collected this data record.
``attribute type:`` model
"""
return self.broker.data_source(**{"DeviceObjectObjectID": self.DeviceObjectObjectID})
@property
@check_api_availability
def device(self):
"""
The device from which this data was collected.
``attribute type:`` model
"""
return self.broker.device(**{"DeviceObjectObjectID": self.DeviceObjectObjectID})
|
infobloxopen/infoblox-netmri
|
infoblox_netmri/api/remote/models/device_object_object_remote.py
|
Python
|
apache-2.0
| 3,242
|
# Copyright 2014 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.core import argument, errors, serializers
from touchdown.core.plan import Plan, Present
from touchdown.core.resource import Resource
from ..common import SimpleApply, SimpleDescribe, SimpleDestroy, TagsMixin
from .network_acl import NetworkACL
from .route_table import RouteTable
from .vpc import VPC
class Subnet(Resource):
resource_name = "subnet"
field_order = ["vpc"]
name = argument.String(field="Name", group="tags")
cidr_block = argument.IPNetwork(field="CidrBlock")
availability_zone = argument.String(field="AvailabilityZone")
route_table = argument.Resource(RouteTable)
network_acl = argument.Resource(NetworkACL)
tags = argument.Dict()
vpc = argument.Resource(VPC, field="VpcId")
def clean_cidr_block(self, cidr_block):
if cidr_block not in self.vpc.cidr_block:
raise errors.InvalidParameter(
"{} not inside network {}".format(self.cidr_block, self.vpc.cidr_block)
)
return cidr_block
class Describe(SimpleDescribe, Plan):
resource = Subnet
service_name = "ec2"
api_version = "2015-10-01"
describe_action = "describe_subnets"
describe_envelope = "Subnets"
key = "SubnetId"
signature = (Present("name"), Present("vpc"), Present("cidr_block"))
def get_describe_filters(self):
vpc = self.runner.get_plan(self.resource.vpc)
if not vpc.resource_id:
return None
return {
"Filters": [
{"Name": "cidrBlock", "Values": [str(self.resource.cidr_block)]},
{"Name": "vpcId", "Values": [vpc.resource_id]},
]
}
def annotate_object(self, obj):
subnet_id = obj[self.key]
network_acl = self.client.describe_network_acls(
Filters=[{"Name": "association.subnet-id", "Values": [subnet_id]}]
)["NetworkAcls"]
if network_acl:
for assoc in network_acl[0].get("Associations", []):
if assoc["SubnetId"] == subnet_id:
obj["NetworkAclId"] = assoc["NetworkAclId"]
obj["NetworkAclAssociationId"] = assoc["NetworkAclAssociationId"]
break
route_tables = self.client.describe_route_tables(
Filters=[{"Name": "association.subnet-id", "Values": [subnet_id]}]
)["RouteTables"]
if route_tables:
for assoc in route_tables[0].get("Associations", []):
if assoc["SubnetId"] == subnet_id:
obj["RouteTableId"] = assoc["RouteTableId"]
obj["RouteTableAssociationId"] = assoc["RouteTableAssociationId"]
break
return obj
class Apply(TagsMixin, SimpleApply, Describe):
create_action = "create_subnet"
waiter = "subnet_available"
def update_object(self):
if self.resource.route_table:
if not self.object.get("RouteTableAssociationId", None):
yield self.generic_action(
"Associate route table",
self.client.associate_route_table,
SubnetId=serializers.Identifier(),
RouteTableId=self.resource.route_table.identifier(),
)
elif (
self.object["RouteTableId"]
!= self.runner.get_plan(self.resource.route_table).resource_id
):
yield self.generic_action(
"Replace route table association",
self.client.replace_route_table_association,
AssociationId=self.object["RouteTableAssociationId"],
RouteTableId=self.resource.route_table.identifier(),
)
elif self.object.get("RouteTableAssociationId", None):
yield self.generic_action(
"Disassociate route table",
self.client.disassociate_route_table,
AssociationId=self.object["RouteTableAssociationId"],
)
naa_changed = False
if not self.resource.network_acl:
return
if not self.object:
naa_changed = True
elif not self.object.get("NetworkAclAssociationId", None):
naa_changed = True
elif self.runner.get_plan(
self.resource.network_acl
).resource_id != self.object.get("NetworkAclId", None):
naa_changed = True
if naa_changed:
yield self.generic_action(
"Replace Network ACL association",
self.client.replace_network_acl_association,
AssociationId=serializers.Property("NetworkAclAssociationId"),
NetworkAclId=self.resource.network_acl.identifier(),
)
class Destroy(SimpleDestroy, Describe):
destroy_action = "delete_subnet"
|
yaybu/touchdown
|
touchdown/aws/vpc/subnet.py
|
Python
|
apache-2.0
| 5,439
|
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class X:
return 3
|
kayhayen/Nuitka
|
tests/syntax/ClassReturn.py
|
Python
|
apache-2.0
| 791
|
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A trainer object that can train models with a single output."""
from absl import logging
from third_party.tf_models import orbit
import tensorflow as tf
class IdentityMetric(tf.keras.metrics.Metric):
"""Keras metric to report value at any instant."""
def __init__(self, name, aggregation):
"""Constructor.
Args:
name: Name of the metric.
aggregation: A tf.VariableAggregation method that indicates how to
aggregate values across replicas.
"""
super(IdentityMetric, self).__init__(name=name)
self.value = self.add_weight(
name='/'.join([name, 'value']),
initializer='zeros',
aggregation=aggregation)
def update_state(self, current_value):
"""Update metrics.
Args:
current_value: A scalar value for the metric.
"""
self.value.assign(current_value)
def result(self):
return self.value
class SingleTaskTrainer(orbit.StandardTrainer):
"""Trains a single-output model on a given dataset.
This trainer will handle running a model with one output on a single
dataset. It will apply the provided loss function to the model's output
to calculate gradients and will apply them via the provided optimizer. It will
also supply the output of that model to one or more `tf.keras.metrics.Metric`
objects.
"""
def __init__(self,
train_dataset,
label_key,
model,
loss_fn,
optimizer,
metrics=None,
trainer_options=None,
summary_fn=None,
grad_clip_norm=0.):
"""Initializes a `SingleTaskTrainer` instance.
If the `SingleTaskTrainer` should run its model under a distribution
strategy, it should be created within that strategy's scope.
This trainer will also calculate metrics during training. The loss metric
is calculated by default, but other metrics can be passed to the `metrics`
arg.
Arguments:
train_dataset: A `tf.data.Dataset` or `DistributedDataset` that contains a
string-keyed dict of `Tensor`s.
label_key: The key corresponding to the label value in feature
dictionaries dequeued from `train_dataset`. This key will be removed
from the dictionary before it is passed to the model.
model: A `tf.Module` or Keras `Model` object to evaluate. It must accept a
`training` kwarg.
loss_fn: A per-element loss function of the form (target, output). The
output of this loss function will be reduced via `tf.reduce_mean` to
create the final loss. We recommend using the functions in the
`tf.keras.losses` package or `tf.keras.losses.Loss` objects with
`reduction=tf.keras.losses.reduction.NONE`.
optimizer: A `tf.keras.optimizers.Optimizer` instance.
metrics: A single `tf.keras.metrics.Metric` object, or a list of
`tf.keras.metrics.Metric` objects.
trainer_options: An optional `orbit.utils.StandardTrainerOptions` object.
summary_fn: A function that adds tf.summary on model input and output
tensors.
grad_clip_norm: A float to clip the gradients by global norm.
"""
self.label_key = label_key
self.model = model
self.loss_fn = loss_fn
self.optimizer = optimizer
self.summary_fn = summary_fn
self.grad_clip_norm = grad_clip_norm
# Capture the strategy from the containing scope.
self.strategy = tf.distribute.get_strategy()
self.train_loss = IdentityMetric('training_loss',
tf.VariableAggregation.SUM)
self.task_loss = IdentityMetric('task_loss', tf.VariableAggregation.SUM)
self.regularization_loss = IdentityMetric('regularization_loss',
tf.VariableAggregation.SUM)
self.learning_rate = IdentityMetric(
'learning_rate', tf.VariableAggregation.ONLY_FIRST_REPLICA)
# We need self.metrics to be an iterable later, so we handle that here.
if metrics is None:
self.metrics = []
elif isinstance(metrics, list):
self.metrics = metrics
else:
self.metrics = [metrics]
super(SingleTaskTrainer, self).__init__(
train_dataset=train_dataset, options=trainer_options)
def train_loop_begin(self):
"""Actions to take once, at the beginning of each train loop."""
self.train_loss.reset_states()
self.task_loss.reset_states()
self.regularization_loss.reset_states()
self.learning_rate.reset_states()
for metric in self.metrics:
metric.reset_states()
def train_step(self, iterator):
"""A train step. Called multiple times per train loop by the superclass."""
def train_fn(inputs):
with tf.GradientTape() as tape:
# Extract the target value and delete it from the input dict, so that
# the model never sees it.
target = inputs.pop(self.label_key)
# Get the outputs of the model.
logging.info('*** Features ***')
for name in sorted(inputs.keys()):
logging.info(' name = %s', name)
output = self.model(inputs, training=True)
# Get the average per-batch loss and scale it down by the number of
# replicas. This ensures that we don't end up multiplying our loss by
# the number of workers - gradients are summed, not averaged, across
# replicas during the apply_gradients call.
loss = tf.reduce_mean(self.loss_fn(target, output))
loss = loss / self.strategy.num_replicas_in_sync
# Since we don't use compile/fit api for training, the only losses added
# to the model are regularization losses.
regularization_loss = 0
if self.model.losses:
regularization_loss = tf.add_n(self.model.losses)
regularization_loss = (
regularization_loss / self.strategy.num_replicas_in_sync)
total_loss = loss + regularization_loss
loss_dict = {
'total_loss': total_loss,
'loss:': loss,
'reg_loss': regularization_loss,
}
if self.summary_fn:
self.summary_fn(loss_dict, self.optimizer.iterations)
# Get the gradients by applying the loss to the model's trainable
# variables.
gradients = tape.gradient(total_loss, self.model.trainable_variables)
if self.grad_clip_norm > 0.:
logging.info('Clipping gradient by norm: {:.3f}'.format(
self.grad_clip_norm))
gradients, _ = tf.clip_by_global_norm(gradients, self.grad_clip_norm)
# Apply the gradients via the optimizer.
self.optimizer.apply_gradients(
list(zip(gradients, self.model.trainable_variables)))
# Update metrics.
self.train_loss.update_state(total_loss)
self.task_loss.update_state(loss)
self.regularization_loss.update_state(regularization_loss)
self.learning_rate.update_state(
self.optimizer.learning_rate(self.optimizer.iterations))
for metric in self.metrics:
metric.update_state(target, output)
# This is needed to handle distributed computation.
self.strategy.run(train_fn, args=(next(iterator),))
def train_loop_end(self):
"""Actions to take once after a training loop."""
with self.strategy.scope():
# Export the metrics.
metrics = {metric.name: metric.result() for metric in self.metrics}
metrics[self.train_loss.name] = self.train_loss.result()
metrics[self.task_loss.name] = self.task_loss.result()
metrics[self.regularization_loss.name] = self.regularization_loss.result()
metrics[self.learning_rate.name] = self.learning_rate.result()
return metrics
|
google-research/mint
|
mint/ctl/single_task_trainer.py
|
Python
|
apache-2.0
| 8,299
|
# -*- coding: utf-8 -*-
from logging import getLogger
from cornice.resource import resource, view
from openprocurement.api.models import Complaint, STAND_STILL_TIME, get_now
from openprocurement.api.utils import (
apply_patch,
save_tender,
add_next_award,
error_handler,
update_journal_handler_params,
)
from openprocurement.api.validation import (
validate_complaint_data,
validate_patch_complaint_data,
)
LOGGER = getLogger(__name__)
@resource(name='Tender Award Complaints',
collection_path='/tenders/{tender_id}/awards/{award_id}/complaints',
path='/tenders/{tender_id}/awards/{award_id}/complaints/{complaint_id}',
description="Tender award complaints",
error_handler=error_handler)
class TenderAwardComplaintResource(object):
def __init__(self, request):
self.request = request
self.db = request.registry.db
@view(content_type="application/json", permission='create_award_complaint', validators=(validate_complaint_data,), renderer='json')
def collection_post(self):
"""Post a complaint for award
"""
tender = self.request.validated['tender']
if tender.status not in ['active.qualification', 'active.awarded']:
self.request.errors.add('body', 'data', 'Can\'t add complaint in current ({}) tender status'.format(tender.status))
self.request.errors.status = 403
return
if self.request.context.complaintPeriod and \
(self.request.context.complaintPeriod.startDate and self.request.context.complaintPeriod.startDate > get_now() or
self.request.context.complaintPeriod.endDate and self.request.context.complaintPeriod.endDate < get_now()):
self.request.errors.add('body', 'data', 'Can add complaint only in complaintPeriod')
self.request.errors.status = 403
return
complaint_data = self.request.validated['data']
complaint = Complaint(complaint_data)
self.request.context.complaints.append(complaint)
if save_tender(self.request):
update_journal_handler_params({'complaint_id': complaint.id})
LOGGER.info('Created tender award complaint {}'.format(complaint.id), extra={'MESSAGE_ID': 'tender_award_complaint_create'})
self.request.response.status = 201
self.request.response.headers['Location'] = self.request.route_url('Tender Award Complaints', tender_id=tender.id, award_id=self.request.validated['award_id'], complaint_id=complaint['id'])
return {'data': complaint.serialize("view")}
@view(renderer='json', permission='view_tender')
def collection_get(self):
"""List complaints for award
"""
return {'data': [i.serialize("view") for i in self.request.context.complaints]}
@view(renderer='json', permission='view_tender')
def get(self):
"""Retrieving the complaint for award
"""
return {'data': self.request.validated['complaint'].serialize("view")}
@view(content_type="application/json", permission='review_complaint', validators=(validate_patch_complaint_data,), renderer='json')
def patch(self):
"""Post a complaint resolution for award
"""
tender = self.request.validated['tender']
if tender.status not in ['active.qualification', 'active.awarded']:
self.request.errors.add('body', 'data', 'Can\'t update complaint in current ({}) tender status'.format(tender.status))
self.request.errors.status = 403
return
complaint = self.request.context
if complaint.status != 'pending':
self.request.errors.add('body', 'data', 'Can\'t update complaint in current ({}) status'.format(complaint.status))
self.request.errors.status = 403
return
apply_patch(self.request, save=False, src=complaint.serialize())
if complaint.status == 'cancelled':
self.request.errors.add('body', 'data', 'Can\'t cancel complaint')
self.request.errors.status = 403
return
if complaint.status == 'resolved':
award = self.request.validated['award']
if tender.status == 'active.awarded':
tender.status = 'active.qualification'
tender.awardPeriod.endDate = None
if award.status == 'unsuccessful':
for i in tender.awards[tender.awards.index(award):]:
i.complaintPeriod.endDate = get_now() + STAND_STILL_TIME
i.status = 'cancelled'
for j in i.complaints:
if j.status == 'pending':
j.status = 'cancelled'
for i in award.contracts:
i.status = 'cancelled'
award.complaintPeriod.endDate = get_now() + STAND_STILL_TIME
award.status = 'cancelled'
add_next_award(self.request)
elif complaint.status in ['declined', 'invalid'] and tender.status == 'active.awarded':
pending_complaints = [
i
for i in tender.complaints
if i.status == 'pending'
]
pending_awards_complaints = [
i
for a in tender.awards
for i in a.complaints
if i.status == 'pending'
]
stand_still_ends = [
a.complaintPeriod.endDate
for a in tender.awards
if a.complaintPeriod.endDate
]
stand_still_end = max(stand_still_ends) if stand_still_ends else get_now()
stand_still_time_expired = stand_still_end < get_now()
if not pending_complaints and not pending_awards_complaints and stand_still_time_expired:
active_awards = [
a
for a in tender.awards
if a.status == 'active'
]
if not active_awards:
tender.status = 'unsuccessful'
if save_tender(self.request):
LOGGER.info('Updated tender award complaint {}'.format(self.request.context.id), extra={'MESSAGE_ID': 'tender_award_complaint_patch'})
return {'data': complaint.serialize("view")}
|
selurvedu/openprocurement.api
|
src/openprocurement/api/views/award_complaint.py
|
Python
|
apache-2.0
| 6,347
|
#!/usr/bin/env python
# For python 2.6-2.7
from __future__ import print_function
from os.path import *
import re
# from parseBrackets import parseBrackets
from parseDirectiveArgs import parseDirectiveArguments
class MyError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
assertVariants = 'Fail|Equal|True|False|LessThan|LessThanOrEqual|GreaterThan|GreaterThanOrEqual'
assertVariants += '|IsMemberOf|Contains|Any|All|NotAll|None|IsPermutationOf'
assertVariants += '|ExceptionRaised|SameShape|IsNaN|IsFinite'
def cppSetLineAndFile(line, file):
return "#line " + str(line) + ' "' + file + '"\n'
def getSubroutineName(line):
try:
m = re.match('\s*subroutine\s+(\w*)\s*(\\([\w\s,]*\\))?\s*(!.*)*$', line, re.IGNORECASE)
return m.groups()[0]
except:
raise MyError('Improper format in declaration of test procedure.')
def parseArgsFirstRest(directiveName,line):
"""If the @-directive has more than one argument, parse into first and rest strings.
Added for assertAssociated.
"""
if directiveName != '':
m = re.match('\s*'+directiveName+'\s*\\((.*\w.*)\\)\s*$',line,re.IGNORECASE)
if m:
argStr = m.groups()[0]
else:
return None
else:
argStr = line
args = parseDirectiveArguments(argStr)
if args == []:
returnArgs = None
elif len(args) == 1:
returnArgs = [args[0]]
else:
returnArgs = [args[0],','.join(args[1:])]
return returnArgs
def parseArgsFirstSecondRest(directiveName,line):
"""If the @-directive must have at least two arguments, parse into first, second,
and rest strings. Added for assertAssociated.
"""
args1 = parseArgsFirstRest(directiveName,line)
returnArgs = None
if args1 != None:
if len(args1) == 1:
returnArgs = args1
elif len(args1) == 2:
args2 = parseArgsFirstRest('',args1[1])
returnArgs = [args1[0]] + args2
elif len(args1) == 3:
print(-999,'parseArgsFirstSecondRest::error!')
returnArgs = None
return returnArgs
def getSelfObjectName(line):
m = re.match('\s*subroutine\s+\w*\s*\\(\s*(\w+)\s*(,\s*\w+\s*)*\\)\s*$', line, re.IGNORECASE)
if m:
return m.groups()[0]
else:
return m
def getTypeName(line):
m = re.match('\s*type(.*::\s*|\s+)(\w*)\s*$', line, re.IGNORECASE)
return m.groups()[1]
class Action():
def apply(self, line):
m = self.match(line)
if m: self.action(m, line)
return m
class AtTest(Action):
def __init__(self, parser):
self.parser = parser
self.keyword = '@test'
def match(self, line):
m = re.match('\s*'+self.keyword+'(\s*(\\(.*\\))?\s*$)', line, re.IGNORECASE)
return m
def action(self, m, line):
options = re.match('\s*'+self.keyword+'\s*\\((.*)\\)\s*$', line, re.IGNORECASE)
method = {}
if options:
npesOption = re.search('npes\s*=\s*\\[([0-9,\s]+)\\]', options.groups()[0], re.IGNORECASE)
if npesOption:
npesString = npesOption.groups()[0]
npes = map(int, npesString.split(','))
method['npRequests'] = npes
#ifdef is optional
matchIfdef = re.match('.*ifdef\s*=\s*(\w+)', options.groups()[0], re.IGNORECASE)
if matchIfdef:
ifdef = matchIfdef.groups()[0]
method['ifdef'] = ifdef
matchIfndef = re.match('.*ifndef\s*=\s*(\w+)', options.groups()[0], re.IGNORECASE)
if matchIfndef:
ifndef = matchIfndef.groups()[0]
method['ifndef'] = ifndef
matchType = re.match('.*type\s*=\s*(\w+)', options.groups()[0], re.IGNORECASE)
if matchType:
print ('Type', matchType.groups()[0])
method['type'] = matchType.groups()[0]
paramOption = re.search('testParameters\s*=\s*[{](.*)[}]', options.groups()[0], re.IGNORECASE)
if paramOption:
paramExpr = paramOption.groups()[0]
method['testParameters'] = paramExpr
casesOption = re.search('cases\s*=\s*(\\[[0-9,\s]+\\])', options.groups()[0], re.IGNORECASE)
if casesOption:
method['cases'] = casesOption.groups()[0]
nextLine = self.parser.nextLine()
method['name'] = getSubroutineName(nextLine)
# save "self" name for use with @mpiAssert
self.parser.currentSelfObjectName = getSelfObjectName(nextLine)
# save "self" name for use with @mpiAssert
dummyArgument = getSelfObjectName(nextLine)
if dummyArgument:
method['selfObjectName'] = dummyArgument
self.parser.userTestMethods.append(method)
self.parser.commentLine(line)
self.parser.outputFile.write(nextLine)
# deprecated - should now just use @test
class AtMpiTest(AtTest):
def __init__(self, parser):
self.parser = parser
self.keyword = '@mpitest'
class AtTestCase(Action):
def __init__(self, parser):
self.parser = parser
def match(self, line):
m = re.match('\s*@testcase\s*(|\\(.*\\))\s*$', line, re.IGNORECASE)
return m
def action(self, m, line):
options = re.match('\s*@testcase\s*\\((.*)\\)\s*$', line, re.IGNORECASE)
if options:
value = re.search('constructor\s*=\s*(\w*)', options.groups()[0], re.IGNORECASE)
if value:
self.parser.userTestCase['constructor'] = value.groups()[0]
value = re.search('npes\s*=\s*\\[([0-9,\s]+)\\]', options.groups()[0], re.IGNORECASE)
if value:
npesString = value.groups()[0]
npes = map(int,npesString.split(','))
self.parser.userTestCase['npRequests'] = npes
value = re.search('cases\s*=\s*(\\[[0-9,\s]+\\])', options.groups()[0], re.IGNORECASE)
if value:
cases = value.groups()[0]
self.parser.userTestCase['cases'] = cases
value = re.search('testParameters\s*=\s*[{](.*)[}]', options.groups()[0], re.IGNORECASE)
if value:
paramExpr = value.groups()[0]
self.parser.userTestCase['testParameters'] = paramExpr
nextLine = self.parser.nextLine()
self.parser.userTestCase['type']=getTypeName(nextLine)
self.parser.commentLine(line)
self.parser.outputFile.write(nextLine)
class AtSuite(Action):
def __init__(self, parser):
self.parser = parser
def match(self, line):
nameRe = "'\w+'|" + """\w+"""
m = re.match("\s*@suite\s*\\(\s*name\s*=\s*("+nameRe+")\s*\\)\s*$", line, re.IGNORECASE)
return m
def action(self, m, line):
self.parser.suiteName=m.groups()[0][1:-1]
self.parser.wrapModuleName = 'Wrap' + self.parser.suiteName
class AtBegin(Action):
def __init__(self, parser):
self.parser = parser
def match(self, line):
m = re.match('\s*module\s+(\w*)\s*$', line, re.IGNORECASE)
return m
def action(self, m, line):
self.parser.userModuleName = m.groups()[0]
self.parser.wrapModuleName = 'Wrap' + self.parser.userModuleName
if not self.parser.suiteName:
self.parser.suiteName = self.parser.userModuleName + "_suite"
self.parser.outputFile.write(line)
class AtAssert(Action):
def __init__(self, parser):
self.parser = parser
def match(self, line):
m = re.match('\s*@assert('+assertVariants+')\s*\\((.*\w.*)\\)\s*$', line, re.IGNORECASE)
return m
def appendSourceLocation(self, fileHandle, fileName, lineNumber):
fileHandle.write(" & location=SourceLocation( &\n")
fileHandle.write(" & '" + str(basename(fileName)) + "', &\n")
fileHandle.write(" & " + str(lineNumber) + ")")
def action(self, m, line):
p = self.parser
p.outputFile.write(cppSetLineAndFile(p.currentLineNumber, p.fileName))
p.outputFile.write(" call assert"+m.groups()[0]+"(" + m.groups()[1] + ", &\n")
self.appendSourceLocation(p.outputFile, p.fileName, p.currentLineNumber)
p.outputFile.write(" )\n")
p.outputFile.write(" if (anyExceptions()) return\n")
p.outputFile.write(cppSetLineAndFile(p.currentLineNumber+1, p.fileName))
class AtAssertAssociated(Action):
def __init__(self,parser):
self.parser = parser
def match(self, line):
m = re.match('\s*@assertassociated\s*\\((.*\w.*)\\)\s*$', line, re.IGNORECASE)
if not m:
m = re.match( \
'\s*@assertassociated\s*\\((\s*([^,]*\w.*),\s*([^,]*\w.*),(.*\w*.*))\\)\s*$', \
line, re.IGNORECASE)
# How to get both (a,b) and (a,b,c) to match?
if not m:
m = re.match( \
'\s*@assertassociated\s*\\((\s*([^,]*\w.*),\s*([^,]*\w.*))\\)\s*$', \
line, re.IGNORECASE)
return m
def appendSourceLocation(self, fileHandle, fileName, lineNumber):
fileHandle.write(" & location=SourceLocation( &\n")
fileHandle.write(" & '" + str(basename(fileName)) + "', &\n")
fileHandle.write(" & " + str(lineNumber) + ")")
def action(self, m, line):
p = self.parser
# args = parseArgsFirstRest('@assertassociated',line)
args = parseArgsFirstSecondRest('@assertassociated',line)
# print(9000,line)
# print(9001,args)
p.outputFile.write(cppSetLineAndFile(p.currentLineNumber, p.fileName))
if len(args) > 1:
if re.match('.*message=.*',args[1],re.IGNORECASE):
p.outputFile.write(" call assertTrue(associated(" + args[0] + "), " + args[1] + ", &\n")
elif len(args) > 2:
p.outputFile.write(" call assertTrue(associated(" + args[0] + "," + args[1] + "), " + args[2] + ", &\n")
else:
p.outputFile.write(" call assertTrue(associated(" + args[0] + "," + args[1] + "), &\n")
else:
p.outputFile.write(" call assertTrue(associated(" + args[0] + "), &\n")
self.appendSourceLocation(p.outputFile, p.fileName, p.currentLineNumber)
p.outputFile.write(" )\n")
p.outputFile.write(" if (anyExceptions()) return\n")
p.outputFile.write(cppSetLineAndFile(p.currentLineNumber+1, p.fileName))
class AtAssertNotAssociated(Action):
def __init__(self,parser):
self.parser = parser
self.name='@assertnotassociated'
def match(self, line):
m = re.match('\s*@assert(not|un)associated\s*\\((.*\w.*)\\)\s*$', line, re.IGNORECASE)
if m:
self.name='@assert'+m.groups()[0]+'associated'
else:
self.name='@assertnotassociated'
if not m:
m = re.match( \
'\s*@assert(not|un)associated\s*\\((\s*([^,]*\w.*),\s*([^,]*\w.*),(.*\w*.*))\\)\s*$', \
line, re.IGNORECASE)
# How to get both (a,b) and (a,b,c) to match?
if not m:
m = re.match( \
'\s*@assert(not|un)associated\s*\\((\s*([^,]*\w.*),\s*([^,]*\w.*))\\)\s*$', \
line, re.IGNORECASE)
if m:
self.name='@assert'+m.groups()[0]+'associated'
else:
self.name='@assertnotassociated'
return m
def appendSourceLocation(self, fileHandle, fileName, lineNumber):
fileHandle.write(" & location=SourceLocation( &\n")
fileHandle.write(" & '" + str(basename(fileName)) + "', &\n")
fileHandle.write(" & " + str(lineNumber) + ")")
def action(self, m, line):
p = self.parser
#-- args = parseArgsFirstRest('@assertassociated',line)
#ok args = parseArgsFirstSecondRest('@assertassociated',line)
args = parseArgsFirstSecondRest(self.name,line)
# print(9000,line)
# print(9001,args)
p.outputFile.write(cppSetLineAndFile(p.currentLineNumber, p.fileName))
if len(args) > 1:
if re.match('.*message=.*',args[1],re.IGNORECASE):
p.outputFile.write(" call assertFalse(associated(" + args[0] + "), " + args[1] + ", &\n")
elif len(args) > 2:
p.outputFile.write(" call assertFalse(associated(" + args[0] + "," + args[1] + "), " + args[2] + ", &\n")
else:
p.outputFile.write(" call assertFalse(associated(" + args[0] + "," + args[1] + "), &\n")
else:
p.outputFile.write(" call assertFalse(associated(" + args[0] + "), &\n")
self.appendSourceLocation(p.outputFile, p.fileName, p.currentLineNumber)
p.outputFile.write(" )\n")
p.outputFile.write(" if (anyExceptions()) return\n")
p.outputFile.write(cppSetLineAndFile(p.currentLineNumber+1, p.fileName))
class AtAssertEqualUserDefined(Action):
"""Convenience directive replacing (a,b) with a call to assertTrue(a==b)
and an error message, if none is provided when invoked.
"""
def __init__(self,parser):
self.parser = parser
def match(self, line):
m = re.match( \
'\s*@assertequaluserdefined\s*\\((\s*([^,]*\w.*),\s*([^,]*\w.*),(.*\w*.*))\\)\s*$', \
line, re.IGNORECASE)
# How to get both (a,b) and (a,b,c) to match?
if not m:
m = re.match( \
'\s*@assertequaluserdefined\s*\\((\s*([^,]*\w.*),\s*([^,]*\w.*))\\)\s*$', \
line, re.IGNORECASE)
return m
def appendSourceLocation(self, fileHandle, fileName, lineNumber):
fileHandle.write(" & location=SourceLocation( &\n")
fileHandle.write(" & '" + str(basename(fileName)) + "', &\n")
fileHandle.write(" & " + str(lineNumber) + ")")
def action(self, m, line):
p = self.parser
args = parseArgsFirstSecondRest('@assertequaluserdefined',line)
p.outputFile.write(cppSetLineAndFile(p.currentLineNumber, p.fileName))
if len(args) > 2:
p.outputFile.write(" call assertTrue(" \
+ args[0] + "==" + args[1] + ", " + args[2] + ", &\n")
else:
p.outputFile.write(" call assertTrue(" \
+ args[0] + "==" + args[1] + ", &\n")
if not re.match('.*message=.*',line,re.IGNORECASE):
p.outputFile.write(" & message='<" + args[0] + "> not equal to <" + args[1] + ">', &\n")
self.appendSourceLocation(p.outputFile, p.fileName, p.currentLineNumber)
p.outputFile.write(" )\n")
p.outputFile.write(" if (anyExceptions()) return\n")
p.outputFile.write(cppSetLineAndFile(p.currentLineNumber+1, p.fileName))
class AtAssertEquivalent(Action):
"""Convenience directive replacing (a,b) with a call to assertTrue(a.eqv.b)
and an error message, if none is provided when invoked.
"""
def __init__(self,parser):
self.parser = parser
def match(self, line):
m = re.match( \
'\s*@assertequivalent\s*\\((\s*([^,]*\w.*),\s*([^,]*\w.*),(.*\w*.*))\\)\s*$', \
line, re.IGNORECASE)
# How to get both (a,b) and (a,b,c) to match?
if not m:
m = re.match( \
'\s*@assertequivalent\s*\\((\s*([^,]*\w.*),\s*([^,]*\w.*))\\)\s*$', \
line, re.IGNORECASE)
return m
def appendSourceLocation(self, fileHandle, fileName, lineNumber):
fileHandle.write(" & location=SourceLocation( &\n")
fileHandle.write(" & '" + str(basename(fileName)) + "', &\n")
fileHandle.write(" & " + str(lineNumber) + ")")
def action(self, m, line):
p = self.parser
args = parseArgsFirstSecondRest('@assertequivalent',line)
p.outputFile.write(cppSetLineAndFile(p.currentLineNumber, p.fileName))
if len(args) > 2:
p.outputFile.write(" call assertTrue(" \
+ args[0] + ".eqv." + args[1] + ", " + args[2] + ", &\n")
else:
p.outputFile.write(" call assertTrue(" \
+ args[0] + ".eqv." + args[1] + ", &\n")
if not re.match('.*message=.*',line,re.IGNORECASE):
p.outputFile.write(" & message='<" + args[0] + "> not equal to <" + args[1] + ">', &\n")
self.appendSourceLocation(p.outputFile, p.fileName, p.currentLineNumber)
p.outputFile.write(" )\n")
p.outputFile.write(" if (anyExceptions()) return\n")
p.outputFile.write(cppSetLineAndFile(p.currentLineNumber+1, p.fileName))
class AtMpiAssert(Action):
def __init__(self, parser):
self.parser = parser
def match(self, line):
m = re.match('\s*@mpiassert('+assertVariants+')\s*\\((.*\w.*)\\)\s*$', line, re.IGNORECASE)
return m
def appendSourceLocation(self, fileHandle, fileName, lineNumber):
fileHandle.write(" & location=SourceLocation( &\n")
fileHandle.write(" & '" + str(basename(fileName)) + "', &\n")
fileHandle.write(" & " + str(lineNumber) + ")")
def action(self, m, line):
p = self.parser
p.outputFile.write(cppSetLineAndFile(p.currentLineNumber, p.fileName))
p.outputFile.write(" call assert"+m.groups()[0]+"(" + m.groups()[1] + ", &\n")
self.appendSourceLocation(p.outputFile, p.fileName, p.currentLineNumber)
p.outputFile.write(" )\n")
# 'this' object may not exist if test is commented out.
if hasattr(p,'currentSelfObjectName'):
p.outputFile.write(" if (anyExceptions("+p.currentSelfObjectName+"%context)) return\n")
p.outputFile.write(cppSetLineAndFile(p.currentLineNumber+1, p.fileName))
class AtBefore(Action):
def __init__(self, parser):
self.parser = parser
def match(self, line):
m = re.match('\s*@before\s*$', line, re.IGNORECASE)
return m
def action(self, m, line):
nextLine = self.parser.nextLine()
self.parser.userTestCase['setUp'] = getSubroutineName(nextLine)
self.parser.commentLine(line)
self.parser.outputFile.write(nextLine)
class AtAfter(Action):
def __init__(self, parser):
self.parser = parser
def match(self, line):
m = re.match('\s*@after\s*$', line, re.IGNORECASE)
return m
def action(self, m, line):
nextLine = self.parser.nextLine()
self.parser.userTestCase['tearDown'] = getSubroutineName(nextLine)
self.parser.commentLine(line)
self.parser.outputFile.write(nextLine)
class AtTestParameter(Action):
def __init__(self, parser):
self.parser = parser
def match(self, line):
m = re.match('\s*@testParameter\s*(|.*)$', line, re.IGNORECASE)
return m
def action(self, m, line):
options = re.match('\s*@testParameter\s*\\((.*)\\)\s*$', line, re.IGNORECASE)
self.parser.commentLine(line)
nextLine = self.parser.nextLine()
if not 'testParameterType' in self.parser.userTestCase:
self.parser.userTestCase['testParameterType'] = getTypeName(nextLine)
self.parser.outputFile.write(nextLine)
if options:
value = re.search('constructor\s*=\s*(\w*)', options.groups()[0], re.IGNORECASE)
if value:
self.parser.userTestCase['testParameterConstructor'] = value.groups()[0]
else:
self.parser.userTestCase['testParameterConstructor'] = self.parser.userTestCase['testParameterType']
class Parser():
def __init__(self, inputFileName, outputFileName):
def getBaseName(fileName):
from os.path import basename, splitext
base = basename(fileName)
return splitext(base)[0]
self.fileName = inputFileName
self.inputFile = open(inputFileName, 'r')
self.outputFile = open(outputFileName, 'w')
self.defaultSuiteName = getBaseName(inputFileName) + "_suite"
self.suiteName = ''
self.currentLineNumber = 0
self.userModuleName = '' # if any
self.userTestCase = {}
self.userTestCase['setUpMethod'] = ''
self.userTestCase['tearDownMethod'] = ''
self.userTestCase['defaultTestParameterNpes'] = [] # is MPI if not empty
self.userTestCase['defaultTestParametersExpr'] = ''
self.userTestCase['defaultTestParameterCases'] = []
self.userTestMethods = [] # each entry is a dictionary
self.wrapModuleName = "Wrap" + getBaseName(inputFileName)
self.currentLineNumber = 0
self.actions=[]
self.actions.append(AtTest(self))
self.actions.append(AtMpiTest(self))
self.actions.append(AtTestCase(self))
self.actions.append(AtSuite(self))
self.actions.append(AtBegin(self))
self.actions.append(AtAssert(self))
self.actions.append(AtAssertAssociated(self))
# self.actions.append(AtAssertAssociatedWith(self))
self.actions.append(AtAssertNotAssociated(self))
# self.actions.append(AtAssertNotAssociatedWith(self))
self.actions.append(AtAssertEqualUserDefined(self))
self.actions.append(AtAssertEquivalent(self))
self.actions.append(AtMpiAssert(self))
self.actions.append(AtBefore(self))
self.actions.append(AtAfter(self))
self.actions.append(AtTestParameter(self))
def commentLine(self, line):
self.outputFile.write(re.sub('@','!@',line))
def run(self):
def parse(line):
for action in self.actions:
if (action.apply(line)): return
self.outputFile.write(line)
while True:
line = self.nextLine()
if not line: break
parse(line)
if (not self.suiteName): self.suiteName = self.defaultSuiteName
if ('testParameterType' in self.userTestCase and (not 'constructor' in self.userTestCase)):
self.userTestCase['constructor'] = self.userTestCase['testParameterType']
self.makeWrapperModule()
def isComment(self, line):
return re.match('\s*(!.*|)$', line)
def nextLine(self):
while True:
self.currentLineNumber += 1
line = self.inputFile.readline()
if not line: break
if (self.isComment(line)):
self.outputFile.write(line)
pass
else:
break
return line
def printHeader(self):
self.outputFile.write('\n')
self.outputFile.write('module ' + self.wrapModuleName + '\n')
self.outputFile.write(' use pFUnit_mod\n')
if (self.userModuleName): self.outputFile.write(' use ' + self.userModuleName + '\n')
self.outputFile.write(' implicit none\n')
self.outputFile.write(' private\n\n')
def printTail(self):
self.outputFile.write('\n')
self.outputFile.write('end module ' + self.wrapModuleName + '\n\n')
def printWrapUserTestCase(self):
self.outputFile.write(' public :: WrapUserTestCase\n')
self.outputFile.write(' public :: makeCustomTest\n')
self.outputFile.write(' type, extends(' + self.userTestCase['type'] + ') :: WrapUserTestCase\n')
self.outputFile.write(' procedure(userTestMethod), nopass, pointer :: testMethodPtr\n')
self.outputFile.write(' contains\n')
self.outputFile.write(' procedure :: runMethod\n')
self.outputFile.write(' end type WrapUserTestCase\n\n')
self.outputFile.write(' abstract interface\n')
self.outputFile.write(' subroutine userTestMethod(this)\n')
if self.userModuleName:
self.outputFile.write(' use ' + self.userModuleName + '\n')
if 'type' in self.userTestCase:
self.outputFile.write(' class (' + self.userTestCase['type'] + '), intent(inout) :: this\n')
self.outputFile.write(' end subroutine userTestMethod\n')
self.outputFile.write(' end interface\n\n')
def printRunMethod(self):
self.outputFile.write(' subroutine runMethod(this)\n')
self.outputFile.write(' class (WrapUserTestCase), intent(inout) :: this\n\n')
self.outputFile.write(' call this%testMethodPtr(this)\n')
self.outputFile.write(' end subroutine runMethod\n\n')
def printParameterHeader(self, type):
self.outputFile.write(' type (' + type + '), allocatable :: testParameters(:)\n')
self.outputFile.write(' type (' + type + ') :: testParameter\n')
self.outputFile.write(' integer :: iParam \n')
self.outputFile.write(' integer, allocatable :: cases(:) \n')
self.outputFile.write(' \n')
def printMakeSuite(self):
self.outputFile.write('function ' + self.suiteName + '() result(suite)\n')
self.outputFile.write(' use pFUnit_mod\n')
if (self.userModuleName): self.outputFile.write(' use ' + self.userModuleName + '\n')
self.outputFile.write(' use '+ self.wrapModuleName + '\n')
self.outputFile.write(' type (TestSuite) :: suite\n\n')
if not self.userModuleName:
for testMethod in self.userTestMethods:
if ('ifdef' in testMethod):
self.outputFile.write('#ifdef ' + testMethod['ifdef'] + '\n')
elif ('ifndef' in testMethod):
self.outputFile.write('#ifndef ' + testMethod['ifndef'] + '\n')
self.outputFile.write(' external ' + testMethod['name'] + '\n')
if ('ifdef' in testMethod or 'ifndef' in testMethod):
self.outputFile.write('#endif\n')
self.outputFile.write('\n')
if 'setUp' in self.userTestCase:
self.outputFile.write(' external ' + self.userTestCase['setUp'] + '\n')
if 'tearDown' in self.userTestCase:
self.outputFile.write(' external ' + self.userTestCase['tearDown'] + '\n')
self.outputFile.write('\n')
if 'testParameterType' in self.userTestCase:
type = self.userTestCase['testParameterType']
self.printParameterHeader(type)
self.outputFile.write(" suite = newTestSuite('" + self.suiteName + "')\n\n")
for testMethod in self.userTestMethods:
if ('ifdef' in testMethod):
self.outputFile.write('#ifdef ' + testMethod['ifdef'] + '\n')
elif ('ifndef' in testMethod):
self.outputFile.write('#ifndef ' + testMethod['ifndef'] + '\n')
if 'type' in self.userTestCase:
self.addUserTestMethod(testMethod)
else:
if 'npRequests' in testMethod:
self.addMpiTestMethod(testMethod)
else: # vanilla
self.addSimpleTestMethod(testMethod)
self.outputFile.write('\n')
if ('ifdef' in testMethod or 'ifndef' in testMethod):
self.outputFile.write('#endif\n')
self.outputFile.write('\nend function ' + self.suiteName + '\n\n')
def addSimpleTestMethod(self, testMethod):
args = "'" + testMethod['name'] + "', " + testMethod['name']
if 'setUp' in testMethod:
args += ', ' + testMethod['setUp']
elif 'setUp' in self.userTestCase:
args += ', ' + self.userTestCase['setUp']
if 'tearDown' in testMethod:
args += ', ' + testMethod['tearDown']
elif 'tearDown' in self.userTestCase:
args += ', ' + self.userTestCase['tearDown']
if 'type' in testMethod:
type = testMethod['type']
else:
type = 'newTestMethod'
self.outputFile.write(' call suite%addTest(' + type + '(' + args + '))\n')
def addMpiTestMethod(self, testMethod):
for npes in testMethod['npRequests']:
args = "'" + testMethod['name'] + "', " + testMethod['name'] + ", " + str(npes)
if 'setUp' in testMethod:
args += ', ' + testMethod['setUp']
elif 'setUp' in self.userTestCase:
args += ', ' + self.userTestCase['setUp']
if 'tearDown' in testMethod:
args += ', ' + testMethod['tearDown']
elif 'tearDown' in self.userTestCase:
args += ', ' + self.userTestCase['tearDown']
if 'type' in testMethod:
type = testMethod['type']
else:
type = 'newMpiTestMethod'
self.outputFile.write(' call suite%addTest(' + type + '(' + args + '))\n')
def addUserTestMethod(self, testMethod):
args = "'" + testMethod['name'] + "', " + testMethod['name']
if 'npRequests' in testMethod:
npRequests = testMethod['npRequests']
else:
if 'npRequests' in self.userTestCase:
npRequests = self.userTestCase['npRequests']
else:
npRequests = [1]
if 'cases' in testMethod:
cases = testMethod['cases']
elif 'cases' in self.userTestCase:
cases = self.userTestCase['cases']
testParameterArg = '' # unless
if 'cases' in locals():
testParameterArg = ', testParameter'
self.outputFile.write(' cases = ' + testMethod['cases'] + '\n')
self.outputFile.write(' testParameters = [(' +
self.userTestCase['testParameterConstructor'] +
'(cases(iCase)), iCase = 1, size(cases))]\n\n')
if 'testParameterType' in self.userTestCase:
if 'testParameters' in testMethod:
testParameters = testMethod['testParameters']
elif 'testParameters' in self.userTestCase:
testParameters = self.userTestCase['testParameters']
isMpiTestCase = 'npRequests' in self.userTestCase
isMpiTestCase = isMpiTestCase or any('npRequests' in testMethod for testMethod in self.userTestMethods)
if 'testParameters' in locals():
testParameterArg = ', testParameter'
self.outputFile.write(' testParameters = ' + testParameters + '\n\n')
elif isMpiTestCase:
testParameterArg = ', testParameter'
for npes in npRequests:
if 'testParameters' in locals() or 'cases' in locals():
self.outputFile.write(' do iParam = 1, size(testParameters)\n')
self.outputFile.write(' testParameter = testParameters(iParam)\n')
if isMpiTestCase:
self.outputFile.write(' call testParameter%setNumProcessesRequested(' + str(npes) + ')\n')
self.outputFile.write(' call suite%addTest(makeCustomTest(' +
args + testParameterArg + '))\n')
if 'cases' in locals() or 'testParameters' in locals():
self.outputFile.write(' end do\n')
def printMakeCustomTest(self, isMpiTestCase):
args = 'methodName, testMethod'
declareArgs = '#ifdef INTEL_13\n'
declareArgs += ' use pfunit_mod, only: testCase\n'
declareArgs += '#endif\n'
declareArgs += ' type (WrapUserTestCase) :: aTest\n'
declareArgs += '#ifdef INTEL_13\n'
declareArgs += ' target :: aTest\n'
declareArgs += ' class (WrapUserTestCase), pointer :: p\n'
declareArgs += '#endif\n'
declareArgs += ' character(len=*), intent(in) :: methodName\n'
declareArgs += ' procedure(userTestMethod) :: testMethod\n'
if 'testParameterType' in self.userTestCase:
args += ', testParameter'
declareArgs += ' type (' + self.userTestCase['testParameterType'] + '), intent(in) :: testParameter\n'
self.outputFile.write(' function makeCustomTest(' + args + ') result(aTest)\n')
self.outputFile.write(declareArgs)
if 'constructor' in self.userTestCase:
if 'testParameterType' in self.userTestCase:
constructor = self.userTestCase['constructor'] + '(testParameter)'
else:
constructor = self.userTestCase['constructor'] + '()'
self.outputFile.write(' aTest%' + self.userTestCase['type'] + ' = ' + constructor + '\n\n')
self.outputFile.write(' aTest%testMethodPtr => testMethod\n')
self.outputFile.write('#ifdef INTEL_13\n')
self.outputFile.write(' p => aTest\n')
self.outputFile.write(' call p%setName(methodName)\n')
self.outputFile.write('#else\n')
self.outputFile.write(' call aTest%setName(methodName)\n')
self.outputFile.write('#endif\n')
if 'testParameterType' in self.userTestCase:
self.outputFile.write(' call aTest%setTestParameter(testParameter)\n')
self.outputFile.write(' end function makeCustomTest\n')
def makeWrapperModule(self):
# ! Start here
self.printHeader()
if 'type' in self.userTestCase:
self.printWrapUserTestCase()
self.outputFile.write('contains\n\n')
if 'type' in self.userTestCase:
self.printRunMethod()
if 'type' in self.userTestCase:
isMpiTestCase = 'npRequests' in self.userTestCase
isMpiTestCase = isMpiTestCase or any('npRequests' in testMethod for testMethod in self.userTestMethods)
if isMpiTestCase and not 'testParameterType' in self.userTestCase:
self.userTestCase['testParameterType'] = 'MpiTestParameter'
self.printMakeCustomTest(isMpiTestCase)
self.printTail()
self.printMakeSuite()
def final(self):
self.inputFile.close()
self.outputFile.close()
if __name__ == "__main__":
import sys
print("Processing file", sys.argv[1])
p = Parser(sys.argv[1], sys.argv[2])
p.run()
p.final()
print(" ... Done. Results in", sys.argv[2])
|
LungNoodle/lungsim
|
tests/pFUnit-3.2.9/bin/pFUnitParser.py
|
Python
|
apache-2.0
| 34,426
|
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.db import connection
from collections import OrderedDict
from datetime import datetime
import time
import scipy.cluster.hierarchy as hcluster
import numpy as np
class ObsoletedTasksReport:
def __init__(self):
pass
def prepareReportTasksV4(self, request, type):
# 1. Select obsolete tasks
# 2. Select obsolete datasets
# 3. Select tasks related to obsolete datasets
# 4. Show datasets, their status, tasks, status
dataSetsSQLQuery = "SELECT t1.TASKID, t1.TIMESTAMP, t1.STATUS, t1.PR_ID, t2.STATUS, t2.NAME, t1.PARENT_TID FROM ATLAS_DEFT.T_PRODUCTION_TASK t1, ATLAS_DEFT.T_PRODUCTION_DATASET t2 WHERE t2.TASKID=t1.TASKID and t1.TIMESTAMP>add_months(sysdate,-1) and (t1.STATUS IN ('obsolete') or (t2.STATUS IN ('toBeDeleted', 'Deleted') and t1.PPTIMESTAMP > add_months(sysdate,-1)))and instr(t2.NAME,'.log.') = 0"
cur = connection.cursor()
cur.execute(dataSetsSQLQuery)
statsDataSets = cur.fetchall()
i = 0
timesecs = []
for taskEntry in statsDataSets:
timesecs.append(time.mktime(taskEntry[1].timetuple()))
i += 1
minT = min(timesecs)
timesecs[:] = [x - minT for x in timesecs]
thresh = 60
dataTmp = [
timesecs,
]
np.asarray(dataTmp)
clusters = hcluster.fclusterdata(np.transpose(np.asarray(dataTmp)), thresh, criterion="distance")
clustersSummary = {}
i = 0
for dsEntry in statsDataSets:
clusterID = clusters[i]
if clusterID in clustersSummary:
currCluster = clustersSummary[clusterID]
currCluster["req"].append(dsEntry[3])
currCluster["datasets"][dsEntry[5]]=dsEntry[4]
currCluster["tasks"][dsEntry[0]]=dsEntry[2]
currCluster["obsoleteStart"] = dsEntry[1]
currCluster["leastParent"] = dsEntry[6] if dsEntry[6] < currCluster["leastParent"] else currCluster["leastParent"]
else:
currCluster = {"req":[dsEntry[3]], "tasks":{dsEntry[0]:dsEntry[2]},
"datasets":{dsEntry[5]:dsEntry[4]}, "obsoleteStart":dsEntry[1], "leastParent":dsEntry[6]}
clustersSummary[clusterID] = currCluster
i+=1
clustersSummary = clustersSummary.values()
cluserssummaryList = sorted(clustersSummary, key=lambda k: k['obsoleteStart'], reverse=True)
data = {}
data['built'] = datetime.now().strftime("%d %b %Y %H:%M:%S")
data['type'] = type
data['clusters'] = cluserssummaryList
return render_to_response('reportObsoletedTasksv4.html', data, RequestContext(request))
def prepareReportTasksV1(self, request, type):
uniqueTasksCond = ""
if type == "tasksview":
uniqueTasksCond ="PART=1 and"
sqlRequest = '''
SELECT * FROM (
WITH RECONSTRUCTEDTASKCHAIN AS (
SELECT TASKID, PR_ID, TASKNAME, CHAIN_TID, PARENT_TID, STATUS as TASKSTATUS, LEVEL as LEV, PPFLAG, CASE WHEN PPGRACEPERIOD = -1 THEN 48 ELSE PPGRACEPERIOD END as PPGRACEPERIOD FROm ATLAS_DEFT.T_PRODUCTION_TASK
START WITH PPFLAG > 0
CONNECT BY NOCYCLE PRIOR TASKID=PARENT_TID ORDER SIBLINGS BY TASKID
) SELECT RECONSTRUCTEDTASKCHAIN.*, STATUS as DSSTATUS, TIMESTAMP, row_number() OVER(PARTITION BY RECONSTRUCTEDTASKCHAIN.TASKID order by t_production_dataset.TIMESTAMP) AS PART, t_production_dataset.NAME as dsname FROM ATLAS_DEFT.RECONSTRUCTEDTASKCHAIN, ATLAS_DEFT.t_production_dataset WHERE t_production_dataset.TASKID=RECONSTRUCTEDTASKCHAIN.TASKID
and instr(t_production_dataset.NAME,'.log.') = 0
) WHERE '''+uniqueTasksCond+''' PPFLAG>=0 ORDER BY LEV DESC
'''
cur = connection.cursor()
cur.execute(sqlRequest)
stats = cur.fetchall()
tasksInfoList = []
timesecs = []
i = 0
for taskEntry in stats:
timesecs.append(time.mktime(stats[i][10].timetuple()))
i += 1
minT = min(timesecs)
timesecs[:] = [x - minT for x in timesecs]
thresh = 21600
data_run = [
timesecs,
]
np.asarray(data_run)
clusters = hcluster.fclusterdata(np.transpose(np.asarray(data_run)), thresh, criterion="distance")
cluserssummary = {}
i = 0
for taskEntry in stats:
clusterID = clusters[i]
tmpDict = {"reqid": taskEntry[1], "taskid": taskEntry[0], "taskname": taskEntry[2], "dsname": taskEntry[12], "clusterid": clusterID}
tasksInfoList.append(tmpDict)
if clusterID not in cluserssummary:
cluserssummary[clusterID] = {"obsoleteStart":taskEntry[10], "obsoleteFinish":taskEntry[10], "requests":[taskEntry[1]], "tasks":[taskEntry[0]], "datasets":[taskEntry[12]]}
else:
if cluserssummary[clusterID]["obsoleteStart"] > taskEntry[10]:
cluserssummary[clusterID]["obsoleteStart"] = taskEntry[10]
if cluserssummary[clusterID]["obsoleteFinish"] < taskEntry[10]:
cluserssummary[clusterID]["obsoleteFinish"] = taskEntry[10]
if taskEntry[0] not in cluserssummary[clusterID]["tasks"]:
cluserssummary[clusterID]["tasks"].append(taskEntry[0])
if taskEntry[12] not in cluserssummary[clusterID]["datasets"]:
cluserssummary[clusterID]["datasets"].append(taskEntry[12])
if taskEntry[1] not in cluserssummary[clusterID]["requests"]:
cluserssummary[clusterID]["requests"].append(taskEntry[1])
i += 1
cluserssummaryList = []
for id, cluster in cluserssummary.items():
cluserssummaryList.append(cluster)
cluserssummaryList = sorted(cluserssummaryList, key=lambda k: k['obsoleteStart'], reverse=True)
data = {}
data['tasksInfo'] = tasksInfoList
data['built'] = datetime.now().strftime("%d %b %Y %H:%M:%S")
data['type'] = type
data['clusters'] = cluserssummaryList
return render_to_response('reportObsoletedTasksv3.html', data, RequestContext(request))
def prepareReportTasksV0(self, request):
sqlRequest = '''
SELECT * FROM (
WITH RECONSTRUCTEDTASKCHAIN AS (
SELECT TASKID, CHAIN_TID, PARENT_TID, STATUS as TASKSTATUS, LEVEL as LEV, PPFLAG, CASE WHEN PPGRACEPERIOD = -1 THEN 48 ELSE PPGRACEPERIOD END as PPGRACEPERIOD FROm ATLAS_DEFT.T_PRODUCTION_TASK
START WITH PPFLAG > 0
CONNECT BY NOCYCLE PRIOR TASKID=PARENT_TID ORDER SIBLINGS BY TASKID
) SELECT RECONSTRUCTEDTASKCHAIN.*, STATUS as DSSTATUS, TIMESTAMP, row_number() OVER(PARTITION BY RECONSTRUCTEDTASKCHAIN.TASKID order by t_production_dataset.TIMESTAMP) AS PART, t_production_dataset.NAME as dsname FROM ATLAS_DEFT.RECONSTRUCTEDTASKCHAIN, ATLAS_DEFT.t_production_dataset WHERE t_production_dataset.TASKID=RECONSTRUCTEDTASKCHAIN.TASKID
and instr(t_production_dataset.NAME,'.log.') = 0
) WHERE PART=1 and PPFLAG>=0 ORDER BY LEV ASC
'''
cur = connection.cursor()
cur.execute(sqlRequest)
stats = cur.fetchall()
tasksInfo = OrderedDict()
inversedMap = {}
for taskEntry in stats:
if taskEntry[4] == 1: #This is entry level of tasks chain
if taskEntry[5] == 1:
tmpDict = {"tofdel":"task force obsoleting"}
if taskEntry[5] == 2:
tmpDict = {"tofdel":"task chain obsoleting"}
tmpDict["date"] = taskEntry[8]
tmpDict["graceperiod"] = taskEntry[6]
tmpDict["dsname"] = taskEntry[10]
tmpDict["dsstatus"] = taskEntry[3]
tasksInfo[taskEntry[0]] = tmpDict
else:
if taskEntry[2] in inversedMap: #here we check if parent task already assigned
inversedMap[taskEntry[0]] = inversedMap[taskEntry[2]]
else:
inversedMap[taskEntry[0]] = taskEntry[2]
tempDic = tasksInfo[inversedMap[taskEntry[0]]]
if "childtasks" not in tempDic:
tempDic["childtasks"] = []
tempDic["childtasks"].append(taskEntry[0])
tempDic["date"] = taskEntry[8]
### If not deleted we should add graceperiod to date
tasksInfo[inversedMap[taskEntry[0]]] = tempDic
tasksInfo = sorted(tasksInfo.items(), key=lambda x: x[1]['date'], reverse=True)
tasksInfoList = []
for (key, value) in tasksInfo:
value['date'] = value['date'].strftime("%d %b %Y %H:%M:%S")
value['rootTask'] = key
tasksInfoList.append(value)
data = {}
data['tasksInfo'] = tasksInfoList
data['built'] = datetime.now().strftime("%d %b %Y %H:%M:%S")
return render_to_response('reportObsoletedTasks.html', data, RequestContext(request))
def prepareReport(self, request):
# if 'obstasks' in request.session['requestParams'] and request.session['requestParams']['obstasks'] == 'tasksview':
# return self.prepareReportTasksV1(request, "tasksview")
# elif 'obstasks' in request.session['requestParams'] and request.session['requestParams']['obstasks'] == 'dsview':
# return self.prepareReportTasksV1(request, "dsview")
# else:
return self.prepareReportTasksV4(request, "tasksview")
|
PanDAWMS/panda-bigmon-core
|
core/reports/ObsoletedTasksReport.py
|
Python
|
apache-2.0
| 9,797
|
from django.db import models, migrations
import uuid
from django.contrib.auth.hashers import make_password
PUBLIC_ID = 1
def apply_migration(apps, schema_editor):
Group = apps.get_model('auth', 'Group')
public_group = Group()
public_group.name = "public"
public_group.id = PUBLIC_ID
public_group.save()
def revert_migration(apps, schema_editor):
Group = apps.get_model('auth', 'Group')
Group.objects.filter(id=PUBLIC_ID).delete()
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20200821_0710'),
]
operations = [
migrations.RunPython(apply_migration, revert_migration)
]
|
kartta-labs/noter-backend
|
noter_backend/main/migrations/0003_create_public_group.py
|
Python
|
apache-2.0
| 664
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Django settings for bmcodelab project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
'''
import os
import pymysql # noqa: 402
pymysql.version_info = (1, 4, 6, 'final', 0) # change mysqlclient version
pymysql.install_as_MySQLdb()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Update the secret key to a value of your own before deploying the app.
SECRET_KEY = '!@#PUT-YOUR-SECRET-KEY-HERE!@#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# SECURITY WARNING: App Engine's security features ensure that it is safe to
# have ALLOWED_HOSTS = ['*'] when the app is deployed. If you deploy a Django
# app not on App Engine, make sure to set an appropriate host here.
# See https://docs.djangoproject.com/en/2.1/ref/settings/
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'bopis.apps.BopisConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bmcodelab.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bmcodelab.wsgi.application'
# [START db_setup]
if os.getenv('GAE_APPLICATION', None):
# Running on production App Engine, so connect to Google Cloud SQL using
# the unix socket at /cloudsql/<your-cloudsql-connection string>
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': 'Place/your/CloudSQL/hostname/here',
'NAME': 'bonjour_meal',
'USER': 'bmdbuser',
'PASSWORD': 'bmdbpassword',
}
}
else:
# Running locally so connect to either a local MySQL instance or connect to
# Cloud SQL via the proxy. To start the proxy via command line:
#
# $ cloud_sql_proxy -instances=[INSTANCE_CONNECTION_NAME]=tcp:3306
#
# See https://cloud.google.com/sql/docs/mysql-connect-proxy
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '127.0.0.1',
'PORT': '3306',
'NAME': 'bonjour_meal',
'USER': 'bmdbuser',
'PASSWORD': 'bmdbpassword',
}
}
# [END db_setup]
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_ROOT = 'static'
STATIC_URL = '/static/'
|
google-business-communications/bm-bonjour-meal-django-starter-code
|
bonjourmeal-codelab/full-sample/bmcodelab/settings.py
|
Python
|
apache-2.0
| 5,084
|
# Copyright 2019 Google LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START ndb_flask]
from flask import Flask
from google.cloud import ndb
client = ndb.Client()
def ndb_wsgi_middleware(wsgi_app):
def middleware(environ, start_response):
with client.context():
return wsgi_app(environ, start_response)
return middleware
app = Flask(__name__)
app.wsgi_app = ndb_wsgi_middleware(app.wsgi_app) # Wrap the app in middleware.
class Book(ndb.Model):
title = ndb.StringProperty()
@app.route('/')
def list_books():
books = Book.query()
return str([book.to_dict() for book in books])
# [END ndb_flask]
|
GoogleCloudPlatform/python-docs-samples
|
datastore/cloud-ndb/flask_app.py
|
Python
|
apache-2.0
| 1,175
|
class BulkInsert(object):
"""
Usage:
user_insert = BulkInsert(session)
address_insert = BulkInsert(session, dependencies=[user_insert])
for user in users:
user_insert.add(user)
from address in user_addresses:
address_insert.add(address)
address_insert.flush()
"""
def __init__(self, session, count=250, dependencies=None):
self.session = session
self.count = count
self._objects = []
self.dependencies = dependencies or []
def add(self, obj):
self._objects.append(obj)
if len(self._objects) >= self.count:
self.flush()
def flush(self):
for dependency in self.dependencies:
dependency.flush()
self.session.bulk_save_objects(self._objects)
self.session.flush()
self._objects = []
|
deliveryhero/lymph-sqlalchemy
|
lymph/sqlalchemy/utils.py
|
Python
|
apache-2.0
| 878
|
#!/usr/bin/env python3
import http.cookiejar
import json
import re
import requests
import urllib.parse
import http.server
import webbrowser
from datetime import datetime, timezone, timedelta
import os.path
from pprint import pprint
PARSE_JSON = True
# Taken from Gwent Client logs. These are the Gwent client client id and secret.
GWENT_CLIENT_ID = "48242550540196492"
GWENT_CLIENT_SECRET = "d9571bba9c10784309a98aa59816696d018c445a0e7b8f979921fba639817392"
PASSWORD="hvXRLFcby0rTbvIqsR8rsDWpYV8="
# Initial log in needs to be with the GOG Client ID, then refreshed with the Gwent client id.
GOG_CLIENT_ID = "46899977096215655"
GOG_CLIENT_SECRET = "9d85c43b1482497dbbce61f6e4aa173a433796eeae2ca8c5f6129f2dc4de46d9"
CLIENT_VERSION = "1.1.24.16" # Just for their statistics
DATA_VERSION="33"
VERSION="11"
GAME="0.9.10"
SECRET="YRvdbcMfxsPSiv1dsk7h"
HASH="739BAE63391829F78129E18DFF6E6307711DACB3"
LOGIN_INSTRUCTIONS = """\
Your web browser has been opened to allow you to log in.
If that did not work, please manually open {auth_url}
After completing the login you will be redirected to a blank page. Copy the
random characters in the URL after &code= and paste them into this window
"""
GOGDATA_RE = re.compile(r"gogData\.?(.*?) = (.+);")
galaxy_config = {
"auth": "auth:/auth?client_id={client_id}&redirect_uri={redir_uri}&response_type=code&layout=client2",
"token": "auth:/token",
"gwent.decks": "decks:/users/{}/decks",
"gwent.cards": "decks:/users/{}/cards?_data_version=" + DATA_VERSION,
"gwent.card": "decks:/users/{}/cards/{}?_data_version=" + DATA_VERSION,
"gwent.templates": "decks:/card_templates?_data_version=" + DATA_VERSION,
"gwent.template": "decks:/card_templates/{}?_data_version=" + DATA_VERSION,
"gwent.deck_definitions": "decks:/deck_definitions?_data_version=" + DATA_VERSION,
"gwent.deck_definition": "decks:/deck_definitions/{}?_data_version=" + DATA_VERSION,
"gwent.definitions": "decks:/card_definitions?_version="+ VERSION + "&?_data_version=" + DATA_VERSION,
"gwent.definition": "decks:/card_definitions/{}?_data_version=" + DATA_VERSION,
"gwent.versions": "decks:/data_versions",
"gwent.version": "decks:/data_versions/{}",
"gwent.config": "config:/game/" + GAME + "_" + SECRET + "/config.json",
"gwent.game": "config:/game/" + GAME + "_" + SECRET + "/data/" + DATA_VERSION + "_" + HASH + "/data_definitions.zip",
"gwent.profile": "profile:/users/{}"
}
gog_servers = {
"gog": "https://www.gog.com",
"embed": "https://embed.gog.com",
"api": "https://api.gog.com",
"users": "https://users.gog.com",
"chat": "https://chat.gog.com",
"presence": "https://presence.gog.com",
"gameplay": "https://gameplay.gog.com",
"cfg": "https://cfg.gog.com",
"auth": "https://auth.gog.com",
"cont": "https://content-system.gog.com",
"cdn": "https://cdn.gog.com",
"decks": "https://seawolf-deck.gog.com",
"profile": "https://seawolf-profile.gog.com",
"config": "https://seawolf-config.gog.com"
}
def saveJson(filename, cardList):
filepath = os.path.join(xml_folder + "../outputs/" + filename)
print("Saving %s cards to: %s" % (len(cardList), filepath))
with open(filepath, "w", encoding="utf-8", newline="\n") as f:
json.dump(cardList, f, sort_keys=True, indent=2, separators=(',', ': '))
def galaxy_url(url_id, *args, **kwargs):
url_config = galaxy_config[url_id]
host_id, api_path = url_config.split(':', 1)
host_url = gog_servers[host_id]
url = urllib.parse.urljoin(host_url, api_path)
url_args = url.format(*args, **kwargs)
return url_args
class ApiError(Exception):
def __init__(self, error, description):
self.error = error
self.description = description
class GogApi:
def __init__(self, token):
self.token = token
# Helpers
def request(self, *args, **kwargs):
if self.token.expired():
self.token.refresh()
headers = {"Authorization": "Bearer " + token.access_token}
headers.update(kwargs.pop("headers", {}))
return requests.request(*args, headers=headers, **kwargs)
def get(self, *args, **kwargs):
return self.request("GET", *args, **kwargs)
def post(self, *args, **kwargs):
return self.request("POST", *args, **kwargs)
def request_json(self, *args, **kwargs):
resp = self.request(*args, **kwargs)
if PARSE_JSON:
return json.loads(resp.text)
else:
return resp.text
def get_json(self, *args, **kwargs):
return self.request_json("GET", *args, **kwargs)
# Gwent APIs
def gwent_decks(self, user_id=None):
if user_id is None:
user_id = self.token.user_id
return self.get_json(galaxy_url("gwent.decks", user_id))
def gwent_cards(self, user_id=None):
if user_id is None:
user_id = self.token.user_id
return self.get_json(galaxy_url("gwent.cards", user_id))
def gwent_game(self):
return self.get(galaxy_url("gwent.game"))
def gwent_config(self):
return self.get_json(galaxy_url("gwent.config"))
def gwent_deck_definitions(self):
return self.get_json(galaxy_url("gwent.deck_definitions"))
def gwent_deck_definition(self, deck):
return self.get_json(galaxy_url("gwent.deck_definition", deck))
def gwent_templates(self):
return self.get_json(galaxy_url("gwent.templates"))
def gwent_template(self, card_id):
return self.get_json(galaxy_url("gwent.template", card_id))
def gwent_definitions(self):
return self.get_json(galaxy_url("gwent.definitions"))
def gwent_definition(self, card_id):
return self.get_json(galaxy_url("gwent.definition", card_id))
# Requires 'extended' scope that we don't have.
def gwent_data_versions(self):
return self.get_json(galaxy_url("gwent.versions"))
# Requires 'extended' scope that we don't have.
def gwent_data_version(self, version):
return self.get_json(galaxy_url("gwent.version", version))
def gwent_card(self, user_id=None):
if user_id is None:
user_id = self.token.user_id
return self.get_json(galaxy_url("gwent.card", user_id, "357886761177080483"))
def gwent_profile(self, user_id=None):
if user_id is None:
user_id = self.token.user_id
return self.get_json(galaxy_url("gwent.profile", user_id))
class Token:
def set_data(self, token_data):
if "error" in token_data:
raise ApiError(token_data["error"], token_data["error_description"])
self.access_token = token_data["access_token"]
self.refresh_token = token_data["refresh_token"]
self.expires_in = timedelta(seconds=token_data["expires_in"])
self.scope = token_data["scope"]
self.session_id = token_data["session_id"]
self.token_type = token_data["token_type"]
self.user_id = token_data["user_id"]
if "created" in token_data:
self.created = datetime.fromtimestamp(
token_data["created"], tz=timezone.utc)
else:
self.created = datetime.now(tz=timezone.utc)
def get_data(self):
token_data = {
"access_token": self.access_token,
"refresh_token": self.refresh_token,
"expires_in": int(self.expires_in.total_seconds()),
"scope": self.scope,
"session_id": self.session_id,
"token_type": self.token_type,
"user_id": self.user_id,
"created": int(self.created.timestamp())
}
return token_data
def __repr__(self):
return str(self.__dict__)
def load(self, filename):
with open(filename, "r") as f:
self.set_data(json.load(f))
def save(self, filename):
with open(filename, "w") as f:
json.dump(self.get_data(), f, indent=2, sort_keys=True)
def from_file(filename):
token = Token()
token.load(filename)
return token
def from_login(server_ip="127.0.0.1", browser_callback=None):
redirect_url = "https://embed.gog.com/on_login_success?origin=client"
redirect_url_quoted = urllib.parse.quote(redirect_url)
auth_url = galaxy_url(
"auth", client_id=GOG_CLIENT_ID, redir_uri=redirect_url_quoted)
if browser_callback is None:
webbrowser.open_new_tab(auth_url)
print(LOGIN_INSTRUCTIONS.format(auth_url=auth_url))
else:
browser_callback(auth_url)
login_code = input("Login code: ")
token_query = {
"client_id": GOG_CLIENT_ID,
"client_secret": GOG_CLIENT_SECRET,
"grant_type": "authorization_code",
"code": login_code,
"redirect_uri": redirect_url # Needed for origin verification
}
token_resp = requests.get(galaxy_url("token"), params=token_query)
token = Token()
token.set_data(token_resp.json())
return token
def refresh(self):
token_query = {
"client_id": GWENT_CLIENT_ID,
"client_secret": GWENT_CLIENT_SECRET,
"grant_type": "refresh_token",
"refresh_token": self.refresh_token
}
token_resp = requests.get(galaxy_url("token"), params=token_query)
self.set_data(token_resp.json())
def expired(self, margin=timedelta(seconds=60)):
expires_at = self.created + self.expires_in
return (datetime.now(timezone.utc) - expires_at) > margin
def saveJson(filename, cardList):
filepath = os.path.join("./" + filename)
print("Saving json to: %s" % (filepath))
with open(filepath, "w", encoding="utf-8", newline="\n") as f:
json.dump(cardList, f, sort_keys=True, indent=2, separators=(',', ': '))
def getZip(gog):
response = gog.gwent_game()
pprint(response)
with open('../outputs/data_definitions.zip', 'wb') as f:
f.write(response.content)
if __name__ == "__main__":
try:
token = Token.from_file("token.json")
token.refresh()
except (ApiError, FileNotFoundError) as e:
token = Token.from_login()
# Immediately refresh with Gwent client info.
token.refresh()
token.save("token.json")
api = GogApi(token)
getZip(api)
#pprint(api.gwent_config())
|
jamieadkins95/Roach-backend
|
scripts/gogapi.py
|
Python
|
apache-2.0
| 10,416
|
import numpy as np
try:
from aurora.ndarray import gpu_op, ndarray
except ImportError:
pass
class Node(object):
""" Node object represents a node in the computational graph"""
def __init__(self):
""" New node will be created by Op objects __call__ method"""
# list of inputs to this node
self.inputs = []
# operator
self.op = None
# constants
self.const = None
# name of the node mainly use for debugging
self.name = ""
def __add__(self, other):
""" Adding two nodes and returns a new node"""
if isinstance(other, Node):
return add(self, other)
else:
return add_const(self, other)
def __sub__(self, other):
if isinstance(other, Node):
return sub(self, other)
else:
return sub_const(self, other)
def __rsub__(self, other):
return ref_sub_const(self, other)
def __mul__(self, other):
if isinstance(other, Node):
return mul(self, other)
else:
return mul_const(self, other)
def __truediv__(self, other):
if isinstance(other, Node):
return div(self, other)
else:
return div_const(self, other)
# Allow left-hand-side add and multiply.
__radd__ = __add__
__rmul__ = __mul__
__rdiv__ = __truediv__
class Op(object):
""" Op class represents operations perform on nodes"""
def __call__(self):
"""
Create a new node which represents operations perform on the graph
Parameters
----------
None
Returns
-------
Node
The new node object
"""
new_node = Node()
new_node.op = self
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
"""
Given the values of input nodes, compute the output value
Parameters
----------
:type use_numpy: object
:param use_numpy:
:param node: Node that performs the computation
:param input_vals: Values of input node
Returns
-------
:return: The output value of the node
"""
raise NotImplementedError
def gradient(self, node, output_grads):
"""
Given the value of output gradients this operation calculate the
gradient contribution of each input node
Parameters
----------
:param node:
:param output_grads:
Returns
-------
:return: A list of gradient contribution to each input node respectively
"""
raise NotImplementedError
def infer_shape(self, node, input_shapes):
raise NotImplementedError
class AddOp(Op):
"""
"""
def __call__(self, nodeA, nodeB):
"""
This Operator element-wise two nodes
Parameters
----------
:param nodeA: LHS operand
:param nodeB: RHS operand
Returns
-------
:return: A new Node which represents the element-wise plus operation
"""
new_node = Op.__call__(self)
new_node.inputs = [nodeA, nodeB]
new_node.name = '({}+{})'.format(nodeA.name, nodeB.name)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
"""
Given values of two input nodes, return result of element-wise addition.
Parameters
----------
:param node:
:param input_vals: List of two input nodes
Returens
--------
:return: The result of the element-wise addition operation
"""
assert len(input_vals) == 2
# return input_vals[0] + input_vals[1]
if use_numpy:
output_val[:] = input_vals[0] + input_vals[1]
else:
if input_vals[0].shape == input_vals[1].shape:
gpu_op.matrix_elementwise_add(input_vals[0], input_vals[1], output_val)
elif input_vals[0].shape == (1,):
const = input_vals[0].asnumpy()[0] # TODO: (upul) do we need this ? check it?
gpu_op.matrix_elementwise_add_by_const(input_vals[1], const, output_val)
elif input_vals[1].shape == (1,):
const = input_vals[1].asnumpy()[1] # TODO: (upul) do we need this ? check it?
gpu_op.matrix_elementwise_add_by_const(input_vals[0], const, output_val)
else:
pass # TODO: (upul) handle input[0] and input[1] in different shapes
def gradient(self, node, output_grads):
"""
Given the values of output gradients, calculate the gradients of input nodes
Parameters
----------
:param node:
:param output_grads: Gradient contribution of output nodes
Returns
-------
:return: A list of gradient contribution of output nodes
"""
return [output_grads, output_grads]
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 2
assert input_shapes[0] == input_shapes[1]
return input_shapes[0]
class AddByConstOp(Op):
"""
Operator represents the element-wise addition of a node and a const
"""
def __call__(self, node_A, const_val):
"""
:param node:
:param const_val:
:return:
"""
new_node = Op.__call__(self)
new_node.const = const_val
new_node.inputs = [node_A]
new_node.name = '({0:s}+{1:f})'.format(node_A.name, const_val)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
"""
:param node:
:param input_vals:
:return:
"""
assert len(input_vals) == 1
if use_numpy:
output_val[:] = node.const + input_vals[0]
else:
gpu_op.matrix_elementwise_add_by_const(
input_vals[0], node.const, output_val)
def gradient(self, node, output_grads):
"""
:param node:
:param output_grads:
:return:
"""
return [output_grads]
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 1
# assert node.const.shape == input_shapes[0]
return input_shapes[0]
class SubOp(Op):
def __call__(self, node_A, node_B):
new_node = Op.__call__(self)
new_node.inputs = [node_A, node_B]
new_node.name = '({0:s}-{1:s})'.format(node_A.name, node_B.name)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
assert len(input_vals) == 2
if use_numpy:
output_val[:] = input_vals[0] - input_vals[1]
else:
gpu_op.matrix_elementwise_subtract(input_vals[0], input_vals[1], output_val)
def gradient(self, node, output_grads):
return [output_grads, -1 * output_grads]
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 2
assert input_shapes[0] == input_shapes[1]
return input_shapes[0]
class SubByConstOp(Op):
def __call__(self, node_A, const_val):
new_node = Op.__call__(self)
new_node.inputs = [node_A]
new_node.const = const_val
new_node.name = '({0:s}-{1:f})'.format(node_A.name, const_val)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
assert len(input_vals) == 1
if use_numpy:
output_val[:] = input_vals[0] - node.const
else:
gpu_op.matrix_elementwise_subtract_by_const(input_vals[0], node.const, output_val)
def gradient(self, node, output_grads):
return [output_grads]
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 1
return input_shapes[0]
class ReflectedSubByConstOp(Op):
def __call__(self, node_A, const_val):
new_node = Op.__call__(self)
new_node.inputs = [node_A]
new_node.const = const_val
new_node.name = '({0:f}-{1:s})'.format(const_val, node_A.name)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
assert len(input_vals) == 1
return node.const - input_vals[0]
def gradient(self, node, output_grads):
return [-1 * output_grads]
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 1
return input_shapes[0]
class OnesLikeOp(Op):
def __call__(self, node_A):
new_node = Op.__call__(self)
new_node.inputs = [node_A]
new_node.name = 'Oneslike({})'.format(node_A.name)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
assert len(input_vals) == 1
if use_numpy:
assert isinstance(input_vals[0], np.ndarray)
output_val[:] = np.ones(input_vals[0].shape)
else:
gpu_op.array_set(output_val, 1)
def gradient(self, node, output_grads):
return [zeros_like(node.inputs[0])]
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 1
if input_shapes[0] == 1: # TODO (upul) do we need this if ?
return (1,)
else:
return input_shapes[0]
class ZerosLikeOp(Op):
def __call__(self, node_A):
new_node = Op.__call__(self)
new_node.inputs = [node_A]
new_node.name = 'Zeroslike({})'.format(node_A.name)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
assert len(input_vals) == 1
if use_numpy:
assert isinstance(input_vals[0], np.ndarray)
output_val[:] = np.zeros(input_vals[0].shape)
else:
gpu_op.array_set(output_val, 0)
def gradient(self, node, output_grads):
return [zeros_like(node.inputs[0])]
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 1
if input_shapes[0] == 1: # TODO (upul) do we need this if ?
return (1,)
else:
return input_shapes[0]
class ReshapeOp(Op):
def __call__(self, node_A, newshape):
new_node = Op.__call__(self)
new_node.inputs = [node_A]
new_node.newshape = newshape
new_node.name = 'Reshape({})'.format(node_A.name)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
assert len(input_vals) == 1
if use_numpy:
assert isinstance(input_vals[0], np.ndarray)
output_val[:] = np.reshape(input_vals[0], newshape=node.newshape)
else:
# TODO: (upul) changing share is not an expensive operation. But looks
# : bit ugly. Can't we find out an alternative approach?
input_shape = input_vals[0].shape
ndarray.reshape(output_val, input_shape)
input_vals[0].copyto(output_val)
ndarray.reshape(output_val, node.newshape)
def gradient(self, node, output_grads):
return [reshape_grad(node.inputs[0], output_grads)]
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 1
return node.newshape
class ReshapeGradientOp(Op):
def __call__(self, node_A, node_B):
new_node = Op.__call__(self)
new_node.inputs = [node_A, node_B]
new_node.name = 'ReshapeGradientOp({0:s})'.format(node_A.name)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
assert len(input_vals) == 2
if use_numpy:
output_val[:] = input_vals[1].reshape(input_vals[0].shape)
else:
# TODO: (upul) changing share is not an expensive operation. But looks
# : bit ugly. Can't we find out an alternative approach?
ndarray.reshape(output_val, input_vals[0].shape)
input_vals[1].copyto(output_val)
def gradient(self, node, output_grads):
raise NotImplementedError('Gradient of ReshapeGradientOp not supported')
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 2
return input_shapes[0]
class MulOp(Op):
def __call__(self, node_A, node_B):
new_node = Op.__call__(self)
new_node.inputs = [node_A, node_B]
new_node.name = '({0:s}*{1:s})'.format(node_A.name, node_B.name)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
assert len(input_vals) == 2
if use_numpy:
output_val[:] = input_vals[0] * input_vals[1]
else:
ip_1_shape = input_vals[0].shape
ip_2_shape = input_vals[1].shape
if ip_1_shape == ip_2_shape:
gpu_op.matrix_elementwise_multiply(input_vals[0], input_vals[1], output_val)
elif ip_1_shape == (1,):
const_val = input_vals[0].asnumpy()[0]
gpu_op.matrix_elementwise_multiply_by_const(input_vals[1], const_val, output_val)
elif ip_2_shape == (1,):
const_val = input_vals[1].asnumpy()[0]
gpu_op.matrix_elementwise_multiply_by_const(input_vals[0], const_val, output_val)
else:
pass # TODO (upul) handle ip_1_shape != ip_2_shape
def gradient(self, node, output_grads):
return [node.inputs[1] * output_grads, node.inputs[0] * output_grads]
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 2
if input_shapes[0] == (1,):
return input_shapes[1]
elif input_shapes[1] == (1,):
return input_shapes[0]
elif input_shapes[0] == input_shapes[1]:
return input_shapes[0]
else:
stmt = 'Invalid dimensions {0:s}, (1:s)'.format(input_shapes[0], input_shapes[1])
raise RuntimeError(stmt)
class MulByConstOp(Op):
def __call__(self, node_A, const_val):
new_node = Op.__call__(self)
new_node.inputs = [node_A]
new_node.const = const_val
new_node.name = '({0:s}*{1:f})'.format(node_A.name, const_val)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
assert len(input_vals) == 1
if use_numpy:
output_val[:] = node.const * input_vals[0]
else:
gpu_op.matrix_elementwise_multiply_by_const(
input_vals[0], node.const, output_val)
def gradient(self, node, output_grads):
return [node.const * output_grads]
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 1
return input_shapes[0]
class DivOp(Op):
def __call__(self, node_A, node_B):
new_node = Op.__call__(self)
new_node.inputs = [node_A, node_B]
new_node.name = '({0:s}/{1:s})'.format(node_A.name, node_B.name)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
assert len(input_vals) == 2
if use_numpy:
output_val[:] = input_vals[0] / input_vals[1]
else:
gpu_op.matrix_elementwise_division(input_vals[0], input_vals[1], output_val)
def gradient(self, node, output_grads):
grad_A = output_grads / node.inputs[1]
grad_B = -1.0 * output_grads * node.inputs[0] / (node.inputs[1] * node.inputs[1])
return [grad_A, grad_B]
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 2
assert input_shapes[0] == input_shapes[1]
return input_shapes[0]
class DivByConstOp(Op):
def __call__(self, node_A, const_val):
new_node = Op.__call__(self)
new_node.inputs = [node_A]
new_node.const = const_val
new_node.name = '({0:s}/{1:f})'.format(node_A.name, const_val)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
assert len(input_vals) == 1
if use_numpy:
output_val[:] = input_vals[0] / node.const
else:
gpu_op.matrix_elementwise_div_by_const(input_vals[0], node.const, output_val)
def gradient(self, node, output_grads):
return [output_grads / node.const]
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 1
return input_shapes[0]
class PlaceholderOp(Op):
"""Op to feed value to a nodes."""
def __call__(self):
"""Creates a variable node."""
new_node = Op.__call__(self)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
"""No compute function since node value is fed directly in Executor."""
assert False, "placeholder values provided by feed_dict"
def gradient(self, node, output_grad):
"""No gradient function since node has no inputs."""
return None
class ReduceSumOp(Op):
"""
"""
def __call__(self, node_A):
new_node = Op.__call__(self)
new_node.inputs = [node_A]
new_node.name = 'ReduceSum({0:s})'.format(node_A.name)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
"""
:param node:
:param input_vals:
:param output_val:
:param use_numpy:
:return:
"""
assert len(input_vals) == 1
if use_numpy:
assert isinstance(output_val, np.ndarray)
output_val[:] = np.sum(input_vals[0], axis=0)
else:
gpu_op.reduce_sum_axis_zero(input_vals[0], output_val)
def gradient(self, node, output_grads):
return [output_grads]
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 1
if len(input_shapes[0]) == 1:
return (1,)
else:
return tuple(input_shapes[0][i]
for i in range(1, len(input_shapes[0])))
class BroadcastToOp(Op):
def __call__(self, node_A, node_B):
new_node = Op.__call__(self)
new_node.inputs = [node_A, node_B]
new_node.name = 'BroadcastTo({0:s}, {1:s}.shape)'.format(node_A.name, node_B.name)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
assert len(input_vals) == 2
if use_numpy:
output_val[:] = np.broadcast_to(input_vals[0], input_vals[1].shape)
else:
gpu_op.broadcast_to(input_vals[0], output_val)
def gradient(self, node, output_grads):
grad_A = reduce_sum(output_grads)
grad_B = zeros_like(node.inputs[1])
return [grad_A, grad_B]
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 2
return input_shapes[1]
class MatMulOp(Op): # TODO: (upul) double check what this class is doing
def __call__(self, node_A, node_B, trans_A=False, trans_B=False):
new_node = Op.__call__(self)
new_node.inputs = [node_A, node_B]
new_node.trans_A = trans_A
new_node.trans_B = trans_B
new_node.name = 'MatMul({0:s}, {1:s}'.format(node_A.name, node_B.name)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
assert len(input_vals) == 2
if use_numpy:
if node.trans_A:
input_vals[0] = input_vals[0].T
if node.trans_B:
input_vals[1] = input_vals[1].T
output_val[:] = np.dot(input_vals[0], input_vals[1])
else:
gpu_op.matrix_multiply(
input_vals[0], node.trans_A,
input_vals[1], node.trans_B,
output_val)
def gradient(self, node, output_grads):
grad_A = matmul(output_grads, node.inputs[1], trans_A=False, trans_B=True)
grad_B = matmul(node.inputs[0], output_grads, trans_A=True, trans_B=False)
return [grad_A, grad_B]
def infer_shape(self, node, input_shapes):
"""Need to handle input_vals[0].shape != input_vals[1].shape"""
assert len(input_shapes) == 2
(row_A, col_A) = input_shapes[0]
if node.trans_A:
row_A, col_A = col_A, row_A
(row_B, col_B) = input_shapes[1]
if node.trans_B:
row_B, col_B = col_B, row_B
assert col_A == row_B
return (row_A, col_B)
def Variable(name):
"""User defined variables in an expression.
e.g. x = Variable(name = "x")
"""
placeholder_node = placeholder()
placeholder_node.name = name
return placeholder_node
def Parameter(name, init):
"""
example: w = Parameter(name='w', state=...)
:param name:
:param init:
:return:
"""
parameter_node = placeholder()
parameter_node.name = name
parameter_node.const = init
return parameter_node
# Global singleton operations
add = AddOp()
add_const = AddByConstOp()
sub = SubOp()
sub_const = SubByConstOp()
ref_sub_const = ReflectedSubByConstOp()
mul = MulOp()
mul_const = MulByConstOp()
div = DivOp()
div_const = DivByConstOp()
zeros_like = ZerosLikeOp()
ones_like = OnesLikeOp()
reduce_sum = ReduceSumOp()
broadcast_to = BroadcastToOp()
reshape = ReshapeOp()
reshape_grad = ReshapeGradientOp()
matmul = MatMulOp()
placeholder = PlaceholderOp()
|
upul/Aurora
|
aurora/autodiff/autodiff.py
|
Python
|
apache-2.0
| 21,346
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from functools import lru_cache
import inspect
import pickle
import pytest
import random
import textwrap
import numpy as np
import pyarrow as pa
import pyarrow.compute as pc
all_array_types = [
('bool', [True, False, False, True, True]),
('uint8', np.arange(5)),
('int8', np.arange(5)),
('uint16', np.arange(5)),
('int16', np.arange(5)),
('uint32', np.arange(5)),
('int32', np.arange(5)),
('uint64', np.arange(5, 10)),
('int64', np.arange(5, 10)),
('float', np.arange(0, 0.5, 0.1)),
('double', np.arange(0, 0.5, 0.1)),
('string', ['a', 'b', None, 'ddd', 'ee']),
('binary', [b'a', b'b', b'c', b'ddd', b'ee']),
(pa.binary(3), [b'abc', b'bcd', b'cde', b'def', b'efg']),
(pa.list_(pa.int8()), [[1, 2], [3, 4], [5, 6], None, [9, 16]]),
(pa.large_list(pa.int16()), [[1], [2, 3, 4], [5, 6], None, [9, 16]]),
(pa.struct([('a', pa.int8()), ('b', pa.int8())]), [
{'a': 1, 'b': 2}, None, {'a': 3, 'b': 4}, None, {'a': 5, 'b': 6}]),
]
exported_functions = [
func for (name, func) in sorted(pc.__dict__.items())
if hasattr(func, '__arrow_compute_function__')]
exported_option_classes = [
cls for (name, cls) in sorted(pc.__dict__.items())
if (isinstance(cls, type) and
cls is not pc.FunctionOptions and
issubclass(cls, pc.FunctionOptions))]
numerical_arrow_types = [
pa.int8(),
pa.int16(),
pa.int64(),
pa.uint8(),
pa.uint16(),
pa.uint64(),
pa.float32(),
pa.float64()
]
def test_exported_functions():
# Check that all exported concrete functions can be called with
# the right number of arguments.
# Note that unregistered functions (e.g. with a mismatching name)
# will raise KeyError.
functions = exported_functions
assert len(functions) >= 10
for func in functions:
args = [object()] * func.__arrow_compute_function__['arity']
with pytest.raises(TypeError,
match="Got unexpected argument type "
"<class 'object'> for compute function"):
func(*args)
def test_exported_option_classes():
classes = exported_option_classes
assert len(classes) >= 10
for cls in classes:
# Option classes must have an introspectable constructor signature,
# and that signature should not have any *args or **kwargs.
sig = inspect.signature(cls)
for param in sig.parameters.values():
assert param.kind not in (param.VAR_POSITIONAL,
param.VAR_KEYWORD)
def test_list_functions():
assert len(pc.list_functions()) > 10
assert "add" in pc.list_functions()
def _check_get_function(name, expected_func_cls, expected_ker_cls,
min_num_kernels=1):
func = pc.get_function(name)
assert isinstance(func, expected_func_cls)
n = func.num_kernels
assert n >= min_num_kernels
assert n == len(func.kernels)
assert all(isinstance(ker, expected_ker_cls) for ker in func.kernels)
def test_get_function_scalar():
_check_get_function("add", pc.ScalarFunction, pc.ScalarKernel, 8)
def test_get_function_vector():
_check_get_function("unique", pc.VectorFunction, pc.VectorKernel, 8)
def test_get_function_aggregate():
_check_get_function("mean", pc.ScalarAggregateFunction,
pc.ScalarAggregateKernel, 8)
def test_call_function_with_memory_pool():
arr = pa.array(["foo", "bar", "baz"])
indices = np.array([2, 2, 1])
result1 = arr.take(indices)
result2 = pc.call_function('take', [arr, indices],
memory_pool=pa.default_memory_pool())
expected = pa.array(["baz", "baz", "bar"])
assert result1.equals(expected)
assert result2.equals(expected)
result3 = pc.take(arr, indices, memory_pool=pa.default_memory_pool())
assert result3.equals(expected)
def test_pickle_functions():
# Pickle registered functions
for name in pc.list_functions():
func = pc.get_function(name)
reconstructed = pickle.loads(pickle.dumps(func))
assert type(reconstructed) is type(func)
assert reconstructed.name == func.name
assert reconstructed.arity == func.arity
assert reconstructed.num_kernels == func.num_kernels
def test_pickle_global_functions():
# Pickle global wrappers (manual or automatic) of registered functions
for name in pc.list_functions():
func = getattr(pc, name)
reconstructed = pickle.loads(pickle.dumps(func))
assert reconstructed is func
def test_function_attributes():
# Sanity check attributes of registered functions
for name in pc.list_functions():
func = pc.get_function(name)
assert isinstance(func, pc.Function)
assert func.name == name
kernels = func.kernels
assert func.num_kernels == len(kernels)
assert all(isinstance(ker, pc.Kernel) for ker in kernels)
assert func.arity >= 1 # no varargs functions for now
repr(func)
for ker in kernels:
repr(ker)
def test_input_type_conversion():
# Automatic array conversion from Python
arr = pc.add([1, 2], [4, None])
assert arr.to_pylist() == [5, None]
# Automatic scalar conversion from Python
arr = pc.add([1, 2], 4)
assert arr.to_pylist() == [5, 6]
# Other scalar type
assert pc.equal(["foo", "bar", None],
"foo").to_pylist() == [True, False, None]
@pytest.mark.parametrize('arrow_type', numerical_arrow_types)
def test_sum_array(arrow_type):
arr = pa.array([1, 2, 3, 4], type=arrow_type)
assert arr.sum().as_py() == 10
assert pc.sum(arr).as_py() == 10
arr = pa.array([], type=arrow_type)
assert arr.sum().as_py() is None # noqa: E711
@pytest.mark.parametrize('arrow_type', numerical_arrow_types)
def test_sum_chunked_array(arrow_type):
arr = pa.chunked_array([pa.array([1, 2, 3, 4], type=arrow_type)])
assert pc.sum(arr).as_py() == 10
arr = pa.chunked_array([
pa.array([1, 2], type=arrow_type), pa.array([3, 4], type=arrow_type)
])
assert pc.sum(arr).as_py() == 10
arr = pa.chunked_array([
pa.array([1, 2], type=arrow_type),
pa.array([], type=arrow_type),
pa.array([3, 4], type=arrow_type)
])
assert pc.sum(arr).as_py() == 10
arr = pa.chunked_array((), type=arrow_type)
assert arr.num_chunks == 0
assert pc.sum(arr).as_py() is None # noqa: E711
def test_mode_array():
# ARROW-9917
arr = pa.array([1, 1, 3, 4, 3, 5], type='int64')
mode = pc.mode(arr)
assert len(mode) == 1
assert mode[0].as_py() == {"mode": 1, "count": 2}
mode = pc.mode(arr, 2)
assert len(mode) == 2
assert mode[0].as_py() == {"mode": 1, "count": 2}
assert mode[1].as_py() == {"mode": 3, "count": 2}
arr = pa.array([], type='int64')
assert len(pc.mode(arr)) == 0
def test_mode_chunked_array():
# ARROW-9917
arr = pa.chunked_array([pa.array([1, 1, 3, 4, 3, 5], type='int64')])
mode = pc.mode(arr)
assert len(mode) == 1
assert mode[0].as_py() == {"mode": 1, "count": 2}
mode = pc.mode(arr, 2)
assert len(mode) == 2
assert mode[0].as_py() == {"mode": 1, "count": 2}
assert mode[1].as_py() == {"mode": 3, "count": 2}
arr = pa.chunked_array((), type='int64')
assert arr.num_chunks == 0
assert len(pc.mode(arr)) == 0
def test_variance():
data = [1, 2, 3, 4, 5, 6, 7, 8]
assert pc.variance(data).as_py() == 5.25
assert pc.variance(data, ddof=0).as_py() == 5.25
assert pc.variance(data, ddof=1).as_py() == 6.0
def test_match_substring():
arr = pa.array(["ab", "abc", "ba", None])
result = pc.match_substring(arr, "ab")
expected = pa.array([True, True, False, None])
assert expected.equals(result)
def test_split_pattern():
arr = pa.array(["-foo---bar--", "---foo---b"])
result = pc.split_pattern(arr, pattern="---")
expected = pa.array([["-foo", "bar--"], ["", "foo", "b"]])
assert expected.equals(result)
result = pc.split_pattern(arr, pattern="---", max_splits=1)
expected = pa.array([["-foo", "bar--"], ["", "foo---b"]])
assert expected.equals(result)
result = pc.split_pattern(arr, pattern="---", max_splits=1, reverse=True)
expected = pa.array([["-foo", "bar--"], ["---foo", "b"]])
assert expected.equals(result)
def test_split_whitespace_utf8():
arr = pa.array(["foo bar", " foo \u3000\tb"])
result = pc.utf8_split_whitespace(arr)
expected = pa.array([["foo", "bar"], ["", "foo", "b"]])
assert expected.equals(result)
result = pc.utf8_split_whitespace(arr, max_splits=1)
expected = pa.array([["foo", "bar"], ["", "foo \u3000\tb"]])
assert expected.equals(result)
result = pc.utf8_split_whitespace(arr, max_splits=1, reverse=True)
expected = pa.array([["foo", "bar"], [" foo", "b"]])
assert expected.equals(result)
def test_split_whitespace_ascii():
arr = pa.array(["foo bar", " foo \u3000\tb"])
result = pc.ascii_split_whitespace(arr)
expected = pa.array([["foo", "bar"], ["", "foo", "\u3000", "b"]])
assert expected.equals(result)
result = pc.ascii_split_whitespace(arr, max_splits=1)
expected = pa.array([["foo", "bar"], ["", "foo \u3000\tb"]])
assert expected.equals(result)
result = pc.ascii_split_whitespace(arr, max_splits=1, reverse=True)
expected = pa.array([["foo", "bar"], [" foo \u3000", "b"]])
assert expected.equals(result)
def test_min_max():
# An example generated function wrapper with possible options
data = [4, 5, 6, None, 1]
s = pc.min_max(data)
assert s.as_py() == {'min': 1, 'max': 6}
s = pc.min_max(data, options=pc.MinMaxOptions())
assert s.as_py() == {'min': 1, 'max': 6}
s = pc.min_max(data, options=pc.MinMaxOptions(null_handling='skip'))
assert s.as_py() == {'min': 1, 'max': 6}
s = pc.min_max(data, options=pc.MinMaxOptions(null_handling='emit_null'))
assert s.as_py() == {'min': None, 'max': None}
# Options as dict of kwargs
s = pc.min_max(data, options={'null_handling': 'emit_null'})
assert s.as_py() == {'min': None, 'max': None}
# Options as named functions arguments
s = pc.min_max(data, null_handling='emit_null')
assert s.as_py() == {'min': None, 'max': None}
# Both options and named arguments
with pytest.raises(TypeError):
s = pc.min_max(data, options=pc.MinMaxOptions(),
null_handling='emit_null')
# Wrong options type
options = pc.TakeOptions()
with pytest.raises(TypeError):
s = pc.min_max(data, options=options)
# Missing argument
with pytest.raises(
TypeError,
match=r"min_max\(\) missing 1 required positional argument"):
s = pc.min_max()
def test_is_valid():
# An example generated function wrapper without options
data = [4, 5, None]
assert pc.is_valid(data).to_pylist() == [True, True, False]
with pytest.raises(TypeError):
pc.is_valid(data, options=None)
def test_generated_docstrings():
assert pc.min_max.__doc__ == textwrap.dedent("""\
Compute the minimum and maximum values of a numeric array.
Null values are ignored by default.
This can be changed through MinMaxOptions.
Parameters
----------
array : Array-like
Argument to compute function
memory_pool : pyarrow.MemoryPool, optional
If not passed, will allocate memory from the default memory pool.
options : pyarrow.compute.MinMaxOptions, optional
Parameters altering compute function semantics
**kwargs: optional
Parameters for MinMaxOptions constructor. Either `options`
or `**kwargs` can be passed, but not both at the same time.
""")
assert pc.add.__doc__ == textwrap.dedent("""\
Add the arguments element-wise.
Results will wrap around on integer overflow.
Use function "add_checked" if you want overflow
to return an error.
Parameters
----------
x : Array-like or scalar-like
Argument to compute function
y : Array-like or scalar-like
Argument to compute function
memory_pool : pyarrow.MemoryPool, optional
If not passed, will allocate memory from the default memory pool.
""")
# We use isprintable to find about codepoints that Python doesn't know, but
# utf8proc does (or in a future version of Python the other way around).
# These codepoints cannot be compared between Arrow and the Python
# implementation.
@lru_cache()
def find_new_unicode_codepoints():
new = set()
characters = [chr(c) for c in range(0x80, 0x11000)
if not (0xD800 <= c < 0xE000)]
is_printable = pc.utf8_is_printable(pa.array(characters)).to_pylist()
for i, c in enumerate(characters):
if is_printable[i] != c.isprintable():
new.add(ord(c))
return new
# Python claims there are not alpha, not sure why, they are in
# gc='Other Letter': https://graphemica.com/%E1%B3%B2
unknown_issue_is_alpha = {0x1cf2, 0x1cf3}
# utf8proc does not know if codepoints are lower case
utf8proc_issue_is_lower = {
0xaa, 0xba, 0x2b0, 0x2b1, 0x2b2, 0x2b3, 0x2b4,
0x2b5, 0x2b6, 0x2b7, 0x2b8, 0x2c0, 0x2c1, 0x2e0,
0x2e1, 0x2e2, 0x2e3, 0x2e4, 0x37a, 0x1d2c, 0x1d2d,
0x1d2e, 0x1d2f, 0x1d30, 0x1d31, 0x1d32, 0x1d33,
0x1d34, 0x1d35, 0x1d36, 0x1d37, 0x1d38, 0x1d39,
0x1d3a, 0x1d3b, 0x1d3c, 0x1d3d, 0x1d3e, 0x1d3f,
0x1d40, 0x1d41, 0x1d42, 0x1d43, 0x1d44, 0x1d45,
0x1d46, 0x1d47, 0x1d48, 0x1d49, 0x1d4a, 0x1d4b,
0x1d4c, 0x1d4d, 0x1d4e, 0x1d4f, 0x1d50, 0x1d51,
0x1d52, 0x1d53, 0x1d54, 0x1d55, 0x1d56, 0x1d57,
0x1d58, 0x1d59, 0x1d5a, 0x1d5b, 0x1d5c, 0x1d5d,
0x1d5e, 0x1d5f, 0x1d60, 0x1d61, 0x1d62, 0x1d63,
0x1d64, 0x1d65, 0x1d66, 0x1d67, 0x1d68, 0x1d69,
0x1d6a, 0x1d78, 0x1d9b, 0x1d9c, 0x1d9d, 0x1d9e,
0x1d9f, 0x1da0, 0x1da1, 0x1da2, 0x1da3, 0x1da4,
0x1da5, 0x1da6, 0x1da7, 0x1da8, 0x1da9, 0x1daa,
0x1dab, 0x1dac, 0x1dad, 0x1dae, 0x1daf, 0x1db0,
0x1db1, 0x1db2, 0x1db3, 0x1db4, 0x1db5, 0x1db6,
0x1db7, 0x1db8, 0x1db9, 0x1dba, 0x1dbb, 0x1dbc,
0x1dbd, 0x1dbe, 0x1dbf, 0x2071, 0x207f, 0x2090,
0x2091, 0x2092, 0x2093, 0x2094, 0x2095, 0x2096,
0x2097, 0x2098, 0x2099, 0x209a, 0x209b, 0x209c,
0x2c7c, 0x2c7d, 0xa69c, 0xa69d, 0xa770, 0xa7f8,
0xa7f9, 0xab5c, 0xab5d, 0xab5e, 0xab5f, }
# utf8proc does not store if a codepoint is numeric
numeric_info_missing = {
0x3405, 0x3483, 0x382a, 0x3b4d, 0x4e00, 0x4e03,
0x4e07, 0x4e09, 0x4e5d, 0x4e8c, 0x4e94, 0x4e96,
0x4ebf, 0x4ec0, 0x4edf, 0x4ee8, 0x4f0d, 0x4f70,
0x5104, 0x5146, 0x5169, 0x516b, 0x516d, 0x5341,
0x5343, 0x5344, 0x5345, 0x534c, 0x53c1, 0x53c2,
0x53c3, 0x53c4, 0x56db, 0x58f1, 0x58f9, 0x5e7a,
0x5efe, 0x5eff, 0x5f0c, 0x5f0d, 0x5f0e, 0x5f10,
0x62fe, 0x634c, 0x67d2, 0x6f06, 0x7396, 0x767e,
0x8086, 0x842c, 0x8cae, 0x8cb3, 0x8d30, 0x9621,
0x9646, 0x964c, 0x9678, 0x96f6, 0xf96b, 0xf973,
0xf978, 0xf9b2, 0xf9d1, 0xf9d3, 0xf9fd, 0x10fc5,
0x10fc6, 0x10fc7, 0x10fc8, 0x10fc9, 0x10fca,
0x10fcb, }
# utf8proc has no no digit/numeric information
digit_info_missing = {
0xb2, 0xb3, 0xb9, 0x1369, 0x136a, 0x136b, 0x136c,
0x136d, 0x136e, 0x136f, 0x1370, 0x1371, 0x19da, 0x2070,
0x2074, 0x2075, 0x2076, 0x2077, 0x2078, 0x2079, 0x2080,
0x2081, 0x2082, 0x2083, 0x2084, 0x2085, 0x2086, 0x2087,
0x2088, 0x2089, 0x2460, 0x2461, 0x2462, 0x2463, 0x2464,
0x2465, 0x2466, 0x2467, 0x2468, 0x2474, 0x2475, 0x2476,
0x2477, 0x2478, 0x2479, 0x247a, 0x247b, 0x247c, 0x2488,
0x2489, 0x248a, 0x248b, 0x248c, 0x248d, 0x248e, 0x248f,
0x2490, 0x24ea, 0x24f5, 0x24f6, 0x24f7, 0x24f8, 0x24f9,
0x24fa, 0x24fb, 0x24fc, 0x24fd, 0x24ff, 0x2776, 0x2777,
0x2778, 0x2779, 0x277a, 0x277b, 0x277c, 0x277d, 0x277e,
0x2780, 0x2781, 0x2782, 0x2783, 0x2784, 0x2785, 0x2786,
0x2787, 0x2788, 0x278a, 0x278b, 0x278c, 0x278d, 0x278e,
0x278f, 0x2790, 0x2791, 0x2792, 0x10a40, 0x10a41,
0x10a42, 0x10a43, 0x10e60, 0x10e61, 0x10e62, 0x10e63,
0x10e64, 0x10e65, 0x10e66, 0x10e67, 0x10e68, }
numeric_info_missing = {
0x3405, 0x3483, 0x382a, 0x3b4d, 0x4e00, 0x4e03,
0x4e07, 0x4e09, 0x4e5d, 0x4e8c, 0x4e94, 0x4e96,
0x4ebf, 0x4ec0, 0x4edf, 0x4ee8, 0x4f0d, 0x4f70,
0x5104, 0x5146, 0x5169, 0x516b, 0x516d, 0x5341,
0x5343, 0x5344, 0x5345, 0x534c, 0x53c1, 0x53c2,
0x53c3, 0x53c4, 0x56db, 0x58f1, 0x58f9, 0x5e7a,
0x5efe, 0x5eff, 0x5f0c, 0x5f0d, 0x5f0e, 0x5f10,
0x62fe, 0x634c, 0x67d2, 0x6f06, 0x7396, 0x767e,
0x8086, 0x842c, 0x8cae, 0x8cb3, 0x8d30, 0x9621,
0x9646, 0x964c, 0x9678, 0x96f6, 0xf96b, 0xf973,
0xf978, 0xf9b2, 0xf9d1, 0xf9d3, 0xf9fd, }
codepoints_ignore = {
'is_alnum': numeric_info_missing | digit_info_missing |
unknown_issue_is_alpha,
'is_alpha': unknown_issue_is_alpha,
'is_digit': digit_info_missing,
'is_numeric': numeric_info_missing,
'is_lower': utf8proc_issue_is_lower
}
@pytest.mark.parametrize('function_name', ['is_alnum', 'is_alpha',
'is_ascii', 'is_decimal',
'is_digit', 'is_lower',
'is_numeric', 'is_printable',
'is_space', 'is_upper', ])
@pytest.mark.parametrize('variant', ['ascii', 'utf8'])
def test_string_py_compat_boolean(function_name, variant):
arrow_name = variant + "_" + function_name
py_name = function_name.replace('_', '')
ignore = codepoints_ignore.get(function_name, set()) |\
find_new_unicode_codepoints()
for i in range(128 if ascii else 0x11000):
if i in range(0xD800, 0xE000):
continue # bug? pyarrow doesn't allow utf16 surrogates
# the issues we know of, we skip
if i in ignore:
continue
# Compare results with the equivalent Python predicate
# (except "is_space" where functions are known to be incompatible)
c = chr(i)
if hasattr(pc, arrow_name) and function_name != 'is_space':
ar = pa.array([c])
arrow_func = getattr(pc, arrow_name)
assert arrow_func(ar)[0].as_py() == getattr(c, py_name)()
@pytest.mark.parametrize(('ty', 'values'), all_array_types)
def test_take(ty, values):
arr = pa.array(values, type=ty)
for indices_type in [pa.int8(), pa.int64()]:
indices = pa.array([0, 4, 2, None], type=indices_type)
result = arr.take(indices)
result.validate()
expected = pa.array([values[0], values[4], values[2], None], type=ty)
assert result.equals(expected)
# empty indices
indices = pa.array([], type=indices_type)
result = arr.take(indices)
result.validate()
expected = pa.array([], type=ty)
assert result.equals(expected)
indices = pa.array([2, 5])
with pytest.raises(IndexError):
arr.take(indices)
indices = pa.array([2, -1])
with pytest.raises(IndexError):
arr.take(indices)
def test_take_indices_types():
arr = pa.array(range(5))
for indices_type in ['uint8', 'int8', 'uint16', 'int16',
'uint32', 'int32', 'uint64', 'int64']:
indices = pa.array([0, 4, 2, None], type=indices_type)
result = arr.take(indices)
result.validate()
expected = pa.array([0, 4, 2, None])
assert result.equals(expected)
for indices_type in [pa.float32(), pa.float64()]:
indices = pa.array([0, 4, 2], type=indices_type)
with pytest.raises(NotImplementedError):
arr.take(indices)
def test_take_on_chunked_array():
# ARROW-9504
arr = pa.chunked_array([
[
"a",
"b",
"c",
"d",
"e"
],
[
"f",
"g",
"h",
"i",
"j"
]
])
indices = np.array([0, 5, 1, 6, 9, 2])
result = arr.take(indices)
expected = pa.chunked_array([["a", "f", "b", "g", "j", "c"]])
assert result.equals(expected)
indices = pa.chunked_array([[1], [9, 2]])
result = arr.take(indices)
expected = pa.chunked_array([
[
"b"
],
[
"j",
"c"
]
])
assert result.equals(expected)
@pytest.mark.parametrize('ordered', [False, True])
def test_take_dictionary(ordered):
arr = pa.DictionaryArray.from_arrays([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'],
ordered=ordered)
result = arr.take(pa.array([0, 1, 3]))
result.validate()
assert result.to_pylist() == ['a', 'b', 'a']
assert result.dictionary.to_pylist() == ['a', 'b', 'c']
assert result.type.ordered is ordered
def test_take_null_type():
# ARROW-10027
arr = pa.array([None] * 10)
chunked_arr = pa.chunked_array([[None] * 5] * 2)
batch = pa.record_batch([arr], names=['a'])
table = pa.table({'a': arr})
indices = pa.array([1, 3, 7, None])
assert len(arr.take(indices)) == 4
assert len(chunked_arr.take(indices)) == 4
assert len(batch.take(indices).column(0)) == 4
assert len(table.take(indices).column(0)) == 4
@pytest.mark.parametrize(('ty', 'values'), all_array_types)
def test_filter(ty, values):
arr = pa.array(values, type=ty)
mask = pa.array([True, False, False, True, None])
result = arr.filter(mask, null_selection_behavior='drop')
result.validate()
assert result.equals(pa.array([values[0], values[3]], type=ty))
result = arr.filter(mask, null_selection_behavior='emit_null')
result.validate()
assert result.equals(pa.array([values[0], values[3], None], type=ty))
# non-boolean dtype
mask = pa.array([0, 1, 0, 1, 0])
with pytest.raises(NotImplementedError):
arr.filter(mask)
# wrong length
mask = pa.array([True, False, True])
with pytest.raises(ValueError, match="must all be the same length"):
arr.filter(mask)
def test_filter_chunked_array():
arr = pa.chunked_array([["a", None], ["c", "d", "e"]])
expected_drop = pa.chunked_array([["a"], ["e"]])
expected_null = pa.chunked_array([["a"], [None, "e"]])
for mask in [
# mask is array
pa.array([True, False, None, False, True]),
# mask is chunked array
pa.chunked_array([[True, False, None], [False, True]]),
# mask is python object
[True, False, None, False, True]
]:
result = arr.filter(mask)
assert result.equals(expected_drop)
result = arr.filter(mask, null_selection_behavior="emit_null")
assert result.equals(expected_null)
def test_filter_record_batch():
batch = pa.record_batch(
[pa.array(["a", None, "c", "d", "e"])], names=["a'"])
# mask is array
mask = pa.array([True, False, None, False, True])
result = batch.filter(mask)
expected = pa.record_batch([pa.array(["a", "e"])], names=["a'"])
assert result.equals(expected)
result = batch.filter(mask, null_selection_behavior="emit_null")
expected = pa.record_batch([pa.array(["a", None, "e"])], names=["a'"])
assert result.equals(expected)
def test_filter_table():
table = pa.table([pa.array(["a", None, "c", "d", "e"])], names=["a"])
expected_drop = pa.table([pa.array(["a", "e"])], names=["a"])
expected_null = pa.table([pa.array(["a", None, "e"])], names=["a"])
for mask in [
# mask is array
pa.array([True, False, None, False, True]),
# mask is chunked array
pa.chunked_array([[True, False], [None, False, True]]),
# mask is python object
[True, False, None, False, True]
]:
result = table.filter(mask)
assert result.equals(expected_drop)
result = table.filter(mask, null_selection_behavior="emit_null")
assert result.equals(expected_null)
def test_filter_errors():
arr = pa.chunked_array([["a", None], ["c", "d", "e"]])
batch = pa.record_batch(
[pa.array(["a", None, "c", "d", "e"])], names=["a'"])
table = pa.table([pa.array(["a", None, "c", "d", "e"])], names=["a"])
for obj in [arr, batch, table]:
# non-boolean dtype
mask = pa.array([0, 1, 0, 1, 0])
with pytest.raises(NotImplementedError):
obj.filter(mask)
# wrong length
mask = pa.array([True, False, True])
with pytest.raises(pa.ArrowInvalid,
match="must all be the same length"):
obj.filter(mask)
def test_filter_null_type():
# ARROW-10027
arr = pa.array([None] * 10)
chunked_arr = pa.chunked_array([[None] * 5] * 2)
batch = pa.record_batch([arr], names=['a'])
table = pa.table({'a': arr})
mask = pa.array([True, False] * 5)
assert len(arr.filter(mask)) == 5
assert len(chunked_arr.filter(mask)) == 5
assert len(batch.filter(mask).column(0)) == 5
assert len(table.filter(mask).column(0)) == 5
@pytest.mark.parametrize("typ", ["array", "chunked_array"])
def test_compare_array(typ):
if typ == "array":
def con(values): return pa.array(values)
else:
def con(values): return pa.chunked_array([values])
arr1 = con([1, 2, 3, 4, None])
arr2 = con([1, 1, 4, None, 4])
result = pc.equal(arr1, arr2)
assert result.equals(con([True, False, False, None, None]))
result = pc.not_equal(arr1, arr2)
assert result.equals(con([False, True, True, None, None]))
result = pc.less(arr1, arr2)
assert result.equals(con([False, False, True, None, None]))
result = pc.less_equal(arr1, arr2)
assert result.equals(con([True, False, True, None, None]))
result = pc.greater(arr1, arr2)
assert result.equals(con([False, True, False, None, None]))
result = pc.greater_equal(arr1, arr2)
assert result.equals(con([True, True, False, None, None]))
@pytest.mark.parametrize("typ", ["array", "chunked_array"])
def test_compare_scalar(typ):
if typ == "array":
def con(values): return pa.array(values)
else:
def con(values): return pa.chunked_array([values])
arr = con([1, 2, 3, None])
# TODO this is a hacky way to construct a scalar ..
scalar = pa.array([2]).sum()
result = pc.equal(arr, scalar)
assert result.equals(con([False, True, False, None]))
result = pc.not_equal(arr, scalar)
assert result.equals(con([True, False, True, None]))
result = pc.less(arr, scalar)
assert result.equals(con([True, False, False, None]))
result = pc.less_equal(arr, scalar)
assert result.equals(con([True, True, False, None]))
result = pc.greater(arr, scalar)
assert result.equals(con([False, False, True, None]))
result = pc.greater_equal(arr, scalar)
assert result.equals(con([False, True, True, None]))
def test_compare_chunked_array_mixed():
arr = pa.array([1, 2, 3, 4, None])
arr_chunked = pa.chunked_array([[1, 2, 3], [4, None]])
arr_chunked2 = pa.chunked_array([[1, 2], [3, 4, None]])
expected = pa.chunked_array([[True, True, True, True, None]])
for left, right in [
(arr, arr_chunked),
(arr_chunked, arr),
(arr_chunked, arr_chunked2),
]:
result = pc.equal(left, right)
assert result.equals(expected)
def test_arithmetic_add():
left = pa.array([1, 2, 3, 4, 5])
right = pa.array([0, -1, 1, 2, 3])
result = pc.add(left, right)
expected = pa.array([1, 1, 4, 6, 8])
assert result.equals(expected)
def test_arithmetic_subtract():
left = pa.array([1, 2, 3, 4, 5])
right = pa.array([0, -1, 1, 2, 3])
result = pc.subtract(left, right)
expected = pa.array([1, 3, 2, 2, 2])
assert result.equals(expected)
def test_arithmetic_multiply():
left = pa.array([1, 2, 3, 4, 5])
right = pa.array([0, -1, 1, 2, 3])
result = pc.multiply(left, right)
expected = pa.array([0, -2, 3, 8, 15])
assert result.equals(expected)
def test_is_null():
arr = pa.array([1, 2, 3, None])
result = arr.is_null()
result = arr.is_null()
expected = pa.array([False, False, False, True])
assert result.equals(expected)
assert result.equals(pc.is_null(arr))
result = arr.is_valid()
expected = pa.array([True, True, True, False])
assert result.equals(expected)
assert result.equals(pc.is_valid(arr))
arr = pa.chunked_array([[1, 2], [3, None]])
result = arr.is_null()
expected = pa.chunked_array([[False, False], [False, True]])
assert result.equals(expected)
result = arr.is_valid()
expected = pa.chunked_array([[True, True], [True, False]])
assert result.equals(expected)
def test_fill_null():
arr = pa.array([1, 2, None, 4], type=pa.int8())
fill_value = pa.array([5], type=pa.int8())
with pytest.raises(pa.ArrowInvalid, match="tried to convert to int"):
arr.fill_null(fill_value)
arr = pa.array([None, None, None, None], type=pa.null())
fill_value = pa.scalar(None, type=pa.null())
result = arr.fill_null(fill_value)
expected = pa.array([None, None, None, None])
assert result.equals(expected)
arr = pa.array(['a', 'bb', None])
result = arr.fill_null('ccc')
expected = pa.array(['a', 'bb', 'ccc'])
assert result.equals(expected)
arr = pa.array([b'a', b'bb', None], type=pa.large_binary())
result = arr.fill_null('ccc')
expected = pa.array([b'a', b'bb', b'ccc'], type=pa.large_binary())
assert result.equals(expected)
@pytest.mark.parametrize('arrow_type', numerical_arrow_types)
def test_fill_null_array(arrow_type):
arr = pa.array([1, 2, None, 4], type=arrow_type)
fill_value = pa.scalar(5, type=arrow_type)
result = arr.fill_null(fill_value)
expected = pa.array([1, 2, 5, 4], type=arrow_type)
assert result.equals(expected)
# Implicit conversions
result = arr.fill_null(5)
assert result.equals(expected)
# ARROW-9451: Unsigned integers allow this for some reason
if not pa.types.is_unsigned_integer(arr.type):
with pytest.raises((ValueError, TypeError)):
arr.fill_null('5')
result = arr.fill_null(pa.scalar(5, type='int8'))
assert result.equals(expected)
@pytest.mark.parametrize('arrow_type', numerical_arrow_types)
def test_fill_null_chunked_array(arrow_type):
fill_value = pa.scalar(5, type=arrow_type)
arr = pa.chunked_array([pa.array([None, 2, 3, 4], type=arrow_type)])
result = arr.fill_null(fill_value)
expected = pa.chunked_array([pa.array([5, 2, 3, 4], type=arrow_type)])
assert result.equals(expected)
arr = pa.chunked_array([
pa.array([1, 2], type=arrow_type),
pa.array([], type=arrow_type),
pa.array([None, 4], type=arrow_type)
])
expected = pa.chunked_array([
pa.array([1, 2], type=arrow_type),
pa.array([], type=arrow_type),
pa.array([5, 4], type=arrow_type)
])
result = arr.fill_null(fill_value)
assert result.equals(expected)
# Implicit conversions
result = arr.fill_null(5)
assert result.equals(expected)
result = arr.fill_null(pa.scalar(5, type='int8'))
assert result.equals(expected)
def test_logical():
a = pa.array([True, False, False, None])
b = pa.array([True, True, False, True])
assert pc.and_(a, b) == pa.array([True, False, False, None])
assert pc.and_kleene(a, b) == pa.array([True, False, False, None])
assert pc.or_(a, b) == pa.array([True, True, False, None])
assert pc.or_kleene(a, b) == pa.array([True, True, False, True])
assert pc.xor(a, b) == pa.array([False, True, False, None])
assert pc.invert(a) == pa.array([False, True, True, None])
def test_cast():
arr = pa.array([2**63 - 1], type='int64')
with pytest.raises(pa.ArrowInvalid):
pc.cast(arr, 'int32')
assert pc.cast(arr, 'int32', safe=False) == pa.array([-1], type='int32')
arr = pa.array([datetime(2010, 1, 1), datetime(2015, 1, 1)])
expected = pa.array([1262304000000, 1420070400000], type='timestamp[ms]')
assert pc.cast(arr, 'timestamp[ms]') == expected
def test_strptime():
arr = pa.array(["5/1/2020", None, "12/13/1900"])
got = pc.strptime(arr, format='%m/%d/%Y', unit='s')
expected = pa.array([datetime(2020, 5, 1), None, datetime(1900, 12, 13)],
type=pa.timestamp('s'))
assert got == expected
def test_count():
arr = pa.array([1, 2, 3, None, None])
assert pc.count(arr).as_py() == 3
assert pc.count(arr, count_mode='count_non_null').as_py() == 3
assert pc.count(arr, count_mode='count_null').as_py() == 2
with pytest.raises(ValueError, match="'zzz' is not a valid count_mode"):
pc.count(arr, count_mode='zzz')
def test_partition_nth():
data = list(range(100, 140))
random.shuffle(data)
pivot = 10
indices = pc.partition_nth_indices(data, pivot=pivot).to_pylist()
assert len(indices) == len(data)
assert sorted(indices) == list(range(len(data)))
assert all(data[indices[i]] <= data[indices[pivot]]
for i in range(pivot))
assert all(data[indices[i]] >= data[indices[pivot]]
for i in range(pivot, len(data)))
|
xhochy/arrow
|
python/pyarrow/tests/test_compute.py
|
Python
|
apache-2.0
| 34,188
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from oslo_config import cfg
from st2common.services import rbac as rbac_services
from st2common.rbac.types import PermissionType
from st2common.rbac.types import ResourceType
from st2common.rbac.types import SystemRole
from st2common.persistence.auth import User
from st2common.persistence.rbac import Role
from st2common.persistence.rbac import UserRoleAssignment
from st2common.persistence.rbac import PermissionGrant
from st2common.persistence.pack import Pack
from st2common.models.db.auth import UserDB
from st2common.models.db.rbac import RoleDB
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.models.db.rbac import PermissionGrantDB
from st2common.models.db.pack import PackDB
from st2common.rbac.resolvers import get_resolver_for_resource_type
from st2common.rbac.migrations import insert_system_roles
from st2tests.base import CleanDbTestCase
__all__ = [
'BasePermissionsResolverTestCase',
'PermissionsResolverUtilsTestCase'
]
class BasePermissionsResolverTestCase(CleanDbTestCase):
def setUp(self):
super(BasePermissionsResolverTestCase, self).setUp()
# Make sure RBAC is enabeld
cfg.CONF.set_override(name='enable', override=True, group='rbac')
self.users = {}
self.roles = {}
self.resources = {}
# Run role "migrations"
insert_system_roles()
# Insert common mock objects
self._insert_common_mocks()
def _user_has_resource_db_permissions(self, resolver, user_db, resource_db, permission_types):
"""
Method which verifies that user has all the provided permissions.
"""
self.assertTrue(isinstance(permission_types, (list, tuple)))
self.assertTrue(len(permission_types) > 1)
for permission_type in permission_types:
result = resolver.user_has_resource_db_permission(
user_db=user_db,
resource_db=resource_db,
permission_type=permission_type)
if not result:
return False
return True
def _insert_common_mocks(self):
self._insert_common_mock_users()
self._insert_common_mock_resources()
self._insert_common_mock_roles()
self._insert_common_mock_role_assignments()
def _insert_common_mock_users(self):
# Insert common mock users
user_1_db = UserDB(name='admin')
user_1_db = User.add_or_update(user_1_db)
self.users['admin'] = user_1_db
user_2_db = UserDB(name='observer')
user_2_db = User.add_or_update(user_2_db)
self.users['observer'] = user_2_db
user_3_db = UserDB(name='no_roles')
user_3_db = User.add_or_update(user_3_db)
self.users['no_roles'] = user_3_db
user_4_db = UserDB(name='1_custom_role_no_permissions')
user_4_db = User.add_or_update(user_4_db)
self.users['1_custom_role_no_permissions'] = user_4_db
user_5_db = UserDB(name='1_role_pack_grant')
user_5_db = User.add_or_update(user_5_db)
self.users['custom_role_pack_grant'] = user_5_db
def _insert_common_mock_resources(self):
pack_1_db = PackDB(name='test_pack_1', ref='test_pack_1', description='',
version='0.1.0', author='foo', email='test@example.com')
pack_1_db = Pack.add_or_update(pack_1_db)
self.resources['pack_1'] = pack_1_db
pack_2_db = PackDB(name='test_pack_2', ref='test_pack_2', description='',
version='0.1.0', author='foo', email='test@example.com')
pack_2_db = Pack.add_or_update(pack_2_db)
self.resources['pack_2'] = pack_2_db
def _insert_common_mock_roles(self):
# Insert common mock roles
admin_role_db = rbac_services.get_role_by_name(name=SystemRole.ADMIN)
observer_role_db = rbac_services.get_role_by_name(name=SystemRole.OBSERVER)
self.roles['admin_role'] = admin_role_db
self.roles['observer_role'] = observer_role_db
# Custom role 1 - no grants
role_1_db = rbac_services.create_role(name='custom_role_1')
self.roles['custom_role_1'] = role_1_db
# Custom role 2 - one grant on pack_1
# "pack_create" on pack_1
grant_db = PermissionGrantDB(resource_uid=self.resources['pack_1'].get_uid(),
resource_type=ResourceType.PACK,
permission_types=[PermissionType.PACK_CREATE])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_3_db = RoleDB(name='custom_role_pack_grant', permission_grants=permission_grants)
role_3_db = Role.add_or_update(role_3_db)
self.roles['custom_role_pack_grant'] = role_3_db
def _insert_common_mock_role_assignments(self):
# Insert common mock role assignments
role_assignment_admin = UserRoleAssignmentDB(user=self.users['admin'].name,
role=self.roles['admin_role'].name)
role_assignment_admin = UserRoleAssignment.add_or_update(role_assignment_admin)
role_assignment_observer = UserRoleAssignmentDB(user=self.users['observer'].name,
role=self.roles['observer_role'].name)
role_assignment_observer = UserRoleAssignment.add_or_update(role_assignment_observer)
user_db = self.users['1_custom_role_no_permissions']
role_assignment_db = UserRoleAssignmentDB(user=user_db.name,
role=self.roles['custom_role_1'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['custom_role_pack_grant']
role_assignment_db = UserRoleAssignmentDB(user=user_db.name,
role=self.roles['custom_role_pack_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
class PermissionsResolverUtilsTestCase(unittest2.TestCase):
def test_get_resolver_for_resource_type_valid_resource_type(self):
valid_resources_types = [ResourceType.PACK, ResourceType.SENSOR, ResourceType.ACTION,
ResourceType.RULE, ResourceType.EXECUTION,
ResourceType.KEY_VALUE_PAIR,
ResourceType.WEBHOOK]
for resource_type in valid_resources_types:
resolver_instance = get_resolver_for_resource_type(resource_type=resource_type)
resource_name = resource_type.split('_')[0].lower()
class_name = resolver_instance.__class__.__name__.lower()
self.assertTrue(resource_name in class_name)
def test_get_resolver_for_resource_type_unsupported_resource_type(self):
expected_msg = 'Unsupported resource: alias'
self.assertRaisesRegexp(ValueError, expected_msg, get_resolver_for_resource_type,
resource_type='alias')
|
alfasin/st2
|
st2common/tests/unit/test_rbac_resolvers.py
|
Python
|
apache-2.0
| 7,834
|
import re
from ztag.annotation import Annotation
from ztag.annotation import OperatingSystem
from ztag import protocols
import ztag.test
class FtpCesarFtpd(Annotation):
protocol = protocols.FTP
subprotocol = protocols.FTP.BANNER
port = None
impl_re = re.compile("^220[- ]CesarFTP 0\.\d+", re.IGNORECASE)
version_re = re.compile("CesarFTP (\d+\.\d+)([a-z])?", re.IGNORECASE)
tests = {
"FtpCesarFtpd_1": {
"global_metadata": {
"os": OperatingSystem.WINDOWS,
},
"local_metadata": {
"product": "Cesar FTP",
"version": "0.99",
"revision": "g"
}
}
}
def process(self, obj, meta):
banner = obj["banner"]
if self.impl_re.search(banner):
meta.global_metadata.os = OperatingSystem.WINDOWS
meta.local_metadata.product = "Cesar FTP"
version = self.version_re.search(banner).group(1)
meta.local_metadata.version = version
rev = self.version_re.search(banner).group(2)
meta.local_metadata.revision = rev
return meta
""" Tests
"220 CesarFTP 0.99g Server Welcome !\r\n"
"220 CesarFTP 0.99g Server Welcome !\r\n"
"220 CesarFTP 0.99g Server Welcome !\r\n"
"220 CesarFTP 0.99g Server Welcome !\r\n"
"220 CesarFTP 0.99g Server Welcome !\r\n"
"220 CesarFTP 0.99g Server Welcome !\r\n"
"220 CesarFTP 0.99g Server Welcome !\r\n"
"220 CesarFTP 0.99g Server Welcome !\r\n"
"220 CesarFTP 0.99g Server Welcome !\r\n"
"220 CesarFTP 0.99g Server Welcome !\r\n"
"""
|
zmap/ztag
|
ztag/annotations/FtpCesarFtpd.py
|
Python
|
apache-2.0
| 1,652
|
#!/usr/bin/env python
"""Test the file transfer mechanism."""
import hashlib
import io
import itertools
import os
import platform
import struct
import unittest
from unittest import mock
from absl import app
from grr_response_core.lib import constants
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import temp
from grr_response_server import data_store
from grr_response_server import file_store
from grr_response_server import flow_base
from grr_response_server.databases import db
from grr_response_server.flows.general import transfer
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import action_mocks
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
# pylint:mode=test
class ClientMock(action_mocks.ActionMock):
BUFFER_SIZE = 1024 * 1024
def __init__(self, mbr_data=None, client_id=None):
self.mbr = mbr_data
self.client_id = client_id
def ReadBuffer(self, args):
return_data = self.mbr[args.offset:args.offset + args.length]
return [
rdf_client.BufferReference(
data=return_data, offset=args.offset, length=len(return_data))
]
class GetMBRFlowTest(flow_test_lib.FlowTestsBaseclass):
"""Test the transfer mechanism."""
mbr = (b"123456789" * 1000)[:4096]
def setUp(self):
super().setUp()
self.client_id = self.SetupClient(0)
def testGetMBR(self):
"""Test that the GetMBR flow works."""
flow_id = flow_test_lib.TestFlowHelper(
transfer.GetMBR.__name__,
ClientMock(self.mbr),
creator=self.test_username,
client_id=self.client_id)
results = flow_test_lib.GetFlowResults(self.client_id, flow_id)
self.assertLen(results, 1)
self.assertEqual(results[0], self.mbr)
def _RunAndCheck(self, chunk_size, download_length):
with utils.Stubber(constants, "CLIENT_MAX_BUFFER_SIZE", chunk_size):
flow_id = flow_test_lib.TestFlowHelper(
transfer.GetMBR.__name__,
ClientMock(self.mbr),
creator=self.test_username,
client_id=self.client_id,
length=download_length)
results = flow_test_lib.GetFlowResults(self.client_id, flow_id)
self.assertLen(results, 1)
self.assertEqual(results[0], self.mbr[:download_length])
def testGetMBRChunked(self):
chunk_size = 100
download_length = 15 * chunk_size
self._RunAndCheck(chunk_size, download_length)
# Not a multiple of the chunk size.
download_length = 15 * chunk_size + chunk_size // 2
self._RunAndCheck(chunk_size, download_length)
class CompareFDsMixin(object):
def CompareFDs(self, fd1, fd2):
# Seek the files to the end to make sure they are the same size.
fd2.seek(0, 2)
fd1.seek(0, 2)
self.assertEqual(fd2.tell(), fd1.tell())
ranges = [
# Start of file
(0, 100),
# Straddle the first chunk
(16 * 1024 - 100, 300),
# Read past end of file
(fd2.tell() - 100, 300),
# Zero length reads
(100, 0),
]
for offset, length in ranges:
fd1.seek(offset)
data1 = fd1.read(length)
fd2.seek(offset)
data2 = fd2.read(length)
self.assertEqual(data1, data2)
class GetFileFlowTest(CompareFDsMixin, flow_test_lib.FlowTestsBaseclass):
"""Test the transfer mechanism."""
def setUp(self):
super().setUp()
self.client_id = self.SetupClient(0)
def testGetFile(self):
"""Test that the GetFile flow works."""
client_mock = action_mocks.GetFileClientMock()
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=os.path.join(self.base_path, "test_img.dd"))
flow_test_lib.TestFlowHelper(
transfer.GetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
pathspec=pathspec)
# Fix path for Windows testing.
pathspec.path = pathspec.path.replace("\\", "/")
with open(pathspec.path, "rb") as fd2:
cp = db.ClientPath.FromPathSpec(self.client_id, pathspec)
fd_rel_db = file_store.OpenFile(cp)
self.CompareFDs(fd2, fd_rel_db)
# Only the sha256 hash of the contents should have been calculated:
# in order to put file contents into the file store.
history = data_store.REL_DB.ReadPathInfoHistory(cp.client_id, cp.path_type,
cp.components)
self.assertEqual(history[-1].hash_entry.sha256, fd_rel_db.hash_id.AsBytes())
self.assertIsNone(history[-1].hash_entry.sha1)
self.assertIsNone(history[-1].hash_entry.md5)
def testGetFilePathCorrection(self):
"""Tests that the pathspec returned is used for the aff4path."""
client_mock = action_mocks.GetFileClientMock()
# Deliberately using the wrong casing.
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=os.path.join(self.base_path, "TEST_IMG.dd"))
expected_size = os.path.getsize(os.path.join(self.base_path, "test_img.dd"))
session_id = flow_test_lib.TestFlowHelper(
transfer.GetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
pathspec=pathspec)
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
self.assertLen(results, 1)
res_pathspec = results[0].pathspec
# Fix path for Windows testing.
pathspec.path = pathspec.path.replace("\\", "/")
with open(res_pathspec.path, "rb") as fd2:
fd2.seek(0, 2)
cp = db.ClientPath.FromPathSpec(self.client_id, res_pathspec)
fd_rel_db = file_store.OpenFile(cp)
self.CompareFDs(fd2, fd_rel_db)
# Only the sha256 hash of the contents should have been calculated:
# in order to put file contents into the file store.
history = data_store.REL_DB.ReadPathInfoHistory(cp.client_id, cp.path_type,
cp.components)
self.assertEqual(history[-1].hash_entry.sha256, fd_rel_db.hash_id.AsBytes())
self.assertEqual(history[-1].hash_entry.num_bytes, expected_size)
self.assertIsNone(history[-1].hash_entry.sha1)
self.assertIsNone(history[-1].hash_entry.md5)
def testGetFileIsDirectory(self):
"""Tests that the flow raises when called on directory."""
client_mock = action_mocks.GetFileClientMock()
with temp.AutoTempDirPath() as temp_dir:
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS, path=temp_dir)
with self.assertRaises(RuntimeError):
flow_test_lib.TestFlowHelper(
transfer.GetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
pathspec=pathspec)
def testFailsIfStatFailsAndIgnoreStatFailureFlagNotSet(self):
with temp.AutoTempFilePath() as test_path:
with open(test_path, "wb") as fd:
fd.write(b"foo")
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=test_path,
)
args = transfer.GetFileArgs(
pathspec=pathspec,
read_length=1,
)
client_mock = action_mocks.GetFileWithFailingStatClientMock()
with self.assertRaises(RuntimeError):
flow_test_lib.TestFlowHelper(
transfer.GetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
args=args)
def testWorksIfStatFailsAndIgnoreStatFailureFlagIsSet(self):
with temp.AutoTempFilePath() as test_path:
with open(test_path, "wb") as fd:
fd.write(b"foo")
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=test_path,
)
args = transfer.GetFileArgs(
pathspec=pathspec,
read_length=1,
ignore_stat_failure=True,
)
client_mock = action_mocks.GetFileWithFailingStatClientMock()
flow_test_lib.TestFlowHelper(
transfer.GetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
args=args)
def _ReadBytesWithGetFile(self,
path,
stat_available=False,
offset=None,
file_size_override=None,
read_length=None):
if stat_available:
client_mock = action_mocks.GetFileClientMock()
else:
client_mock = action_mocks.GetFileWithFailingStatClientMock()
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=path,
)
if offset is not None:
pathspec.offset = offset
if file_size_override is not None:
pathspec.file_size_override = file_size_override
args = transfer.GetFileArgs(
pathspec=pathspec,
ignore_stat_failure=not stat_available,
)
if read_length is not None:
args.read_length = read_length
flow_id = flow_test_lib.TestFlowHelper(
transfer.GetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
args=args)
results = flow_test_lib.GetFlowResults(self.client_id, flow_id)
self.assertLen(
results, 1, f"Expected 1 result for offset={offset}, "
f"file_size_override={file_size_override}, "
f"read_length={read_length}, ")
res_pathspec = results[0].pathspec
cp = db.ClientPath.FromPathSpec(self.client_id, res_pathspec)
return file_store.OpenFile(cp).Read()
TEST_DATA_LENGTH = transfer.GetFile.CHUNK_SIZE * 10 + 1
TEST_DATA = b"".join(
itertools.islice(
itertools.cycle(
[b"0", b"1", b"2", b"3", b"4", b"5", b"6", b"7", b"8", b"9"]),
TEST_DATA_LENGTH))
def testReadsTheWholeStatableFileWhenNoSizesPassed(self):
with temp.AutoTempFilePath() as test_path:
with open(test_path, "wb") as fd:
fd.write(self.TEST_DATA)
actual_bytes = self._ReadBytesWithGetFile(test_path, stat_available=True)
self.assertEqual(self.TEST_DATA, actual_bytes)
def testRaisesOnNonStatableFileWhenNoSizesPassed(self):
with temp.AutoTempFilePath() as test_path:
with self.assertRaises(RuntimeError):
self._ReadBytesWithGetFile(test_path, stat_available=False)
READ_LENGTH_INTERVALS = (
# Check for intervals within the file size.
(0, 10),
(10, 20),
(0, transfer.GetFile.CHUNK_SIZE),
(1, transfer.GetFile.CHUNK_SIZE),
(1, transfer.GetFile.CHUNK_SIZE - 1),
(0, transfer.GetFile.CHUNK_SIZE * 2),
(1, transfer.GetFile.CHUNK_SIZE * 2),
(1, transfer.GetFile.CHUNK_SIZE * 2 - 1),
(TEST_DATA_LENGTH - transfer.GetFile.CHUNK_SIZE,
transfer.GetFile.CHUNK_SIZE),
(TEST_DATA_LENGTH - transfer.GetFile.CHUNK_SIZE - 1,
transfer.GetFile.CHUNK_SIZE),
(TEST_DATA_LENGTH - transfer.GetFile.CHUNK_SIZE + 1,
transfer.GetFile.CHUNK_SIZE - 1),
# Check for intervals outside of the file size (an EOF might
# happen also on a device file, like when a disk file is read).
(TEST_DATA_LENGTH - 10, 20),
(TEST_DATA_LENGTH - transfer.GetFile.CHUNK_SIZE - 1,
transfer.GetFile.CHUNK_SIZE + 2),
)
def testWorksWithReadLengthOnSeekableFile(self):
with temp.AutoTempFilePath() as test_path:
with open(test_path, "wb") as fd:
fd.write(self.TEST_DATA)
for offset, read_length in self.READ_LENGTH_INTERVALS:
with self.subTest(
offset=offset, read_length=read_length, stat_available=True):
actual_bytes = self._ReadBytesWithGetFile(
test_path,
stat_available=True,
offset=offset,
read_length=read_length)
self.assertEqual(self.TEST_DATA[offset:offset + read_length],
actual_bytes)
with self.subTest(
offset=offset, read_length=read_length, stat_available=False):
actual_bytes = self._ReadBytesWithGetFile(
test_path,
stat_available=False,
offset=offset,
read_length=read_length)
self.assertEqual(self.TEST_DATA[offset:offset + read_length],
actual_bytes)
def testWorksWithReadLengthOnNonSeekableFile(self):
for offset, read_length in self.READ_LENGTH_INTERVALS:
# Check non-seekable file that still can be stat-ed.
with self.subTest(
offset=offset, read_length=read_length, stat_available=True):
actual_bytes = self._ReadBytesWithGetFile(
"/dev/random",
stat_available=True,
offset=offset,
read_length=read_length)
# Using assertEqual instead of assertLen for easier-to-process
# failure messages (as long byte sequences get dumped to stdout
# in case of a failure).
self.assertEqual(len(actual_bytes), read_length)
# Check non-seekable file that can't be stat-ed.
with self.subTest(
offset=offset, read_length=read_length, stat_available=False):
actual_bytes = self._ReadBytesWithGetFile(
"/dev/random",
stat_available=False,
offset=offset,
read_length=read_length)
# Using assertEqual instead of assertLen for easier-to-process
# failure messages (as long byte sequences get dumped to stdout
# in case of a failure).
self.assertEqual(len(actual_bytes), read_length)
FILE_SIZE_OVERRIDE_INTERVALS = (
# Check intervals within the file boundaries.
(0, 10),
(10, 30),
(0, transfer.GetFile.CHUNK_SIZE),
(1, 1 + transfer.GetFile.CHUNK_SIZE),
(1, transfer.GetFile.CHUNK_SIZE),
(0, transfer.GetFile.CHUNK_SIZE * 2),
(1, 1 + transfer.GetFile.CHUNK_SIZE * 2),
(1, transfer.GetFile.CHUNK_SIZE * 2),
(TEST_DATA_LENGTH - transfer.GetFile.CHUNK_SIZE, TEST_DATA_LENGTH),
(TEST_DATA_LENGTH - transfer.GetFile.CHUNK_SIZE - 1,
TEST_DATA_LENGTH - 1),
(TEST_DATA_LENGTH - transfer.GetFile.CHUNK_SIZE + 1, TEST_DATA_LENGTH),
# Checks intervals outside of the file size.
(TEST_DATA_LENGTH - 10, TEST_DATA_LENGTH + 10),
(TEST_DATA_LENGTH - transfer.GetFile.CHUNK_SIZE - 1,
TEST_DATA_LENGTH + 1),
)
def testWorksWithFileSizeOverrideOnSeekableFile(self):
with temp.AutoTempFilePath() as test_path:
with open(test_path, "wb") as fd:
fd.write(self.TEST_DATA)
for offset, file_size_override in self.FILE_SIZE_OVERRIDE_INTERVALS:
with self.subTest(
offset=offset,
file_size_override=file_size_override,
stat_available=True):
actual_bytes = self._ReadBytesWithGetFile(
test_path,
stat_available=True,
offset=offset,
file_size_override=file_size_override)
self.assertEqual(self.TEST_DATA[offset:file_size_override],
actual_bytes)
with self.subTest(
offset=offset,
file_size_override=file_size_override,
stat_available=False):
actual_bytes = self._ReadBytesWithGetFile(
test_path,
stat_available=False,
offset=offset,
file_size_override=file_size_override)
self.assertEqual(self.TEST_DATA[offset:file_size_override],
actual_bytes)
def testWorksWithFileSizeOverrideOnNonSeekableFile(self):
for offset, file_size_override in self.FILE_SIZE_OVERRIDE_INTERVALS:
with self.subTest(
offset=offset,
file_size_override=file_size_override,
stat_available=True):
actual_bytes = self._ReadBytesWithGetFile(
"/dev/random",
stat_available=True,
offset=offset,
file_size_override=file_size_override)
self.assertEqual(len(actual_bytes), file_size_override - offset)
with self.subTest(
offset=offset,
file_size_override=file_size_override,
stat_available=False):
actual_bytes = self._ReadBytesWithGetFile(
"/dev/random",
stat_available=False,
offset=offset,
file_size_override=file_size_override)
self.assertEqual(len(actual_bytes), file_size_override - offset)
READ_LENGTH_FILE_SIZE_OVERRIDE_INTERVALS = (
# offset, read_length, file_size_override
(0, 10, 5),
(0, 10, 15),
(0, 5, 10),
(0, 15, 10),
(0, transfer.GetFile.CHUNK_SIZE * 2, transfer.GetFile.CHUNK_SIZE * 2 - 1),
(0, transfer.GetFile.CHUNK_SIZE * 2, transfer.GetFile.CHUNK_SIZE * 2 + 1),
(1, transfer.GetFile.CHUNK_SIZE * 2, transfer.GetFile.CHUNK_SIZE * 2),
(1, transfer.GetFile.CHUNK_SIZE * 2, transfer.GetFile.CHUNK_SIZE * 2 + 2),
(TEST_DATA_LENGTH - transfer.GetFile.CHUNK_SIZE,
transfer.GetFile.CHUNK_SIZE, TEST_DATA_LENGTH - 1),
(TEST_DATA_LENGTH - transfer.GetFile.CHUNK_SIZE,
transfer.GetFile.CHUNK_SIZE, TEST_DATA_LENGTH + 1),
)
def testWorksWithReadLengthAndFileSizeOverrideOnSeekableFiles(self):
with temp.AutoTempFilePath() as test_path:
with open(test_path, "wb") as fd:
fd.write(self.TEST_DATA)
for (offset, read_length,
file_size_override) in self.READ_LENGTH_FILE_SIZE_OVERRIDE_INTERVALS:
upper_limit = min(offset + read_length, file_size_override)
with self.subTest(
offset=offset,
read_length=read_length,
file_size_override=file_size_override,
stat_available=True):
actual_bytes = self._ReadBytesWithGetFile(
test_path,
stat_available=True,
offset=offset,
read_length=read_length,
file_size_override=file_size_override)
self.assertEqual(self.TEST_DATA[offset:upper_limit], actual_bytes)
with self.subTest(
offset=offset,
read_length=read_length,
file_size_override=file_size_override,
stat_available=False):
actual_bytes = self._ReadBytesWithGetFile(
test_path,
stat_available=False,
offset=offset,
read_length=read_length,
file_size_override=file_size_override)
self.assertEqual(self.TEST_DATA[offset:upper_limit], actual_bytes)
def testWorksWithReadLengthAndFileSizeOverrideOnNonSeekableFiles(self):
for (offset, read_length,
file_size_override) in self.READ_LENGTH_FILE_SIZE_OVERRIDE_INTERVALS:
with self.subTest(
offset=offset,
read_length=read_length,
file_size_override=file_size_override,
stat_available=True):
actual_bytes = self._ReadBytesWithGetFile(
"/dev/random",
stat_available=True,
offset=offset,
read_length=read_length,
file_size_override=file_size_override)
# Using assertEqual instead of assertLen for easier-to-process
# failure messages (as long byte sequences get dumped to stdout
# in case of a failure).
self.assertEqual(
len(actual_bytes), min(read_length, file_size_override - offset))
with self.subTest(
offset=offset,
read_length=read_length,
file_size_override=file_size_override,
stat_available=False):
actual_bytes = self._ReadBytesWithGetFile(
"/dev/random",
stat_available=False,
offset=offset,
read_length=read_length,
file_size_override=file_size_override)
# Using assertEqual instead of assertLen for easier-to-process
# failure messages (as long byte sequences get dumped to stdout
# in case of a failure).
self.assertEqual(
len(actual_bytes), min(read_length, file_size_override - offset))
class MultiGetFileFlowTest(CompareFDsMixin, flow_test_lib.FlowTestsBaseclass):
"""Test the transfer mechanism."""
def setUp(self):
super().setUp()
self.client_id = self.SetupClient(0)
@unittest.skipUnless(platform.system() == "Linux",
"/proc only exists on Linux")
def testMultiGetFileOfSpecialFiles(self):
"""Test that special /proc/ files are handled correctly.
/proc/ files have the property that they are non seekable from their end
(i.e. seeking them relative to the end is not supported). They also return
an st_size of 0. For example:
$ stat /proc/self/maps
File: '/proc/self/maps'
Size: 0 Blocks: 0 IO Block: 1024 regular empty file
$ head /proc/self/maps
00400000-00409000 r-xp 00000000 fc:01 9180740 /usr/bin/head
00608000-00609000 r--p 00008000 fc:01 9180740 /usr/bin/head
...
When we try to use the MultiGetFile flow, it deduplicates the files and
since it thinks the file has a zero size, the flow will not download the
file, and instead copy the zero size file into it.
"""
client_mock = action_mocks.MultiGetFileClientMock()
# # Create a zero sized file.
zero_sized_filename = os.path.join(self.temp_dir, "zero_size")
with open(zero_sized_filename, "wb"):
pass
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS, path=zero_sized_filename)
flow_test_lib.TestFlowHelper(
transfer.MultiGetFile.__name__,
client_mock,
creator=self.test_username,
file_size="1MiB",
client_id=self.client_id,
pathspecs=[pathspec])
# Now if we try to fetch a real /proc/ filename this will fail because the
# filestore already contains the zero length file
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS, path="/proc/self/environ")
flow_test_lib.TestFlowHelper(
transfer.MultiGetFile.__name__,
client_mock,
creator=self.test_username,
file_size=1024 * 1024,
client_id=self.client_id,
pathspecs=[pathspec])
with open(pathspec.last.path, "rb") as fd:
data = fd.read()
cp = db.ClientPath.FromPathSpec(self.client_id, pathspec)
fd_rel_db = file_store.OpenFile(cp)
self.assertEqual(fd_rel_db.size, len(data))
self.assertEqual(fd_rel_db.read(), data)
# Check that SHA256 hash of the file matches the contents
# hash and that MD5 and SHA1 are set.
history = data_store.REL_DB.ReadPathInfoHistory(cp.client_id, cp.path_type,
cp.components)
self.assertEqual(history[-1].hash_entry.sha256, fd_rel_db.hash_id.AsBytes())
self.assertEqual(history[-1].hash_entry.num_bytes, len(data))
self.assertIsNotNone(history[-1].hash_entry.sha1)
self.assertIsNotNone(history[-1].hash_entry.md5)
def testMultiGetFile(self):
"""Test MultiGetFile."""
client_mock = action_mocks.MultiGetFileClientMock()
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=os.path.join(self.base_path, "test_img.dd"))
expected_size = os.path.getsize(pathspec.path)
args = transfer.MultiGetFileArgs(pathspecs=[pathspec, pathspec])
with test_lib.Instrument(transfer.MultiGetFile,
"_ReceiveFileStat") as receivestat_instrument:
flow_test_lib.TestFlowHelper(
transfer.MultiGetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
args=args)
# We should only have called StoreStat once because the two paths
# requested were identical.
self.assertLen(receivestat_instrument.args, 1)
# Fix path for Windows testing.
pathspec.path = pathspec.path.replace("\\", "/")
with open(pathspec.path, "rb") as fd2:
# Test the file that was created.
cp = db.ClientPath.FromPathSpec(self.client_id, pathspec)
fd_rel_db = file_store.OpenFile(cp)
self.CompareFDs(fd2, fd_rel_db)
# Check that SHA256 hash of the file matches the contents
# hash and that MD5 and SHA1 are set.
history = data_store.REL_DB.ReadPathInfoHistory(cp.client_id, cp.path_type,
cp.components)
self.assertEqual(history[-1].hash_entry.sha256, fd_rel_db.hash_id.AsBytes())
self.assertEqual(history[-1].hash_entry.num_bytes, expected_size)
self.assertIsNotNone(history[-1].hash_entry.sha1)
self.assertIsNotNone(history[-1].hash_entry.md5)
# Setting MIN_CALL_TO_FILE_STORE to a smaller value emulates MultiGetFile's
# behavior when dealing with large files.
@mock.patch.object(transfer.MultiGetFile, "MIN_CALL_TO_FILE_STORE", 1)
def testMultiGetFileCorrectlyFetchesSameFileMultipleTimes(self):
"""Test MultiGetFile."""
client_mock = action_mocks.MultiGetFileClientMock()
total_num_chunks = 10
total_size = transfer.MultiGetFile.CHUNK_SIZE * total_num_chunks
path = os.path.join(self.temp_dir, "test_big.txt")
with io.open(path, "wb") as fd:
for i in range(total_num_chunks):
fd.write(struct.pack("b", i) * transfer.MultiGetFile.CHUNK_SIZE)
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS, path=path)
def _Check(expected_size):
args = transfer.MultiGetFileArgs(
pathspecs=[pathspec], file_size=expected_size)
flow_test_lib.TestFlowHelper(
transfer.MultiGetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
args=args)
# Test the file that was created.
cp = db.ClientPath.FromPathSpec(self.client_id, pathspec)
fd = file_store.OpenFile(cp)
self.assertEqual(fd.size, expected_size)
# Fetch the file twice to test a real-world scenario when a file is first
# fetched with a smaller limit, and then - with a bigger one.
# This tests against a bug in MultiGetFileLogic when first N chunks of
# the file were already fetched during a previous MultiGetFileLogic run,
# and as a consequence the file was considered fully fetched, even if
# the max_file_size value of the current run was much bigger than
# the size of the previously fetched file.
_Check(transfer.MultiGetFileLogic.CHUNK_SIZE * 2)
_Check(total_size)
def testMultiGetFileMultiFiles(self):
"""Test MultiGetFile downloading many files at once."""
client_mock = action_mocks.MultiGetFileClientMock()
pathspecs = []
# Make 30 files to download.
for i in range(30):
path = os.path.join(self.temp_dir, "test_%s.txt" % i)
with io.open(path, "wb") as fd:
fd.write(b"Hello")
pathspecs.append(
rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS, path=path))
args = transfer.MultiGetFileArgs(
pathspecs=pathspecs, maximum_pending_files=10)
flow_test_lib.TestFlowHelper(
transfer.MultiGetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
args=args)
# Now open each file and make sure the data is there.
for pathspec in pathspecs:
cp = db.ClientPath.FromPathSpec(self.client_id, pathspec)
fd_rel_db = file_store.OpenFile(cp)
self.assertEqual(b"Hello", fd_rel_db.read())
# Check that SHA256 hash of the file matches the contents
# hash and that MD5 and SHA1 are set.
history = data_store.REL_DB.ReadPathInfoHistory(cp.client_id,
cp.path_type,
cp.components)
self.assertEqual(history[-1].hash_entry.sha256,
fd_rel_db.hash_id.AsBytes())
self.assertEqual(history[-1].hash_entry.num_bytes, 5)
self.assertIsNotNone(history[-1].hash_entry.sha1)
self.assertIsNotNone(history[-1].hash_entry.md5)
def testMultiGetFileDeduplication(self):
client_mock = action_mocks.MultiGetFileClientMock()
pathspecs = []
# Make 10 files to download.
for i in range(10):
path = os.path.join(self.temp_dir, "test_%s.txt" % i)
with io.open(path, "wb") as fd:
fd.write(b"Hello")
pathspecs.append(
rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS, path=path))
# All those files are the same so the individual chunks should
# only be downloaded once. By forcing maximum_pending_files=1,
# there should only be a single TransferBuffer call.
args = transfer.MultiGetFileArgs(
pathspecs=pathspecs, maximum_pending_files=1)
flow_test_lib.TestFlowHelper(
transfer.MultiGetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
args=args)
self.assertEqual(client_mock.action_counts["TransferBuffer"], 1)
for pathspec in pathspecs:
# Check that each referenced file can be read.
cp = db.ClientPath.FromPathSpec(self.client_id, pathspec)
fd_rel_db = file_store.OpenFile(cp)
self.assertEqual(b"Hello", fd_rel_db.read())
# Check that SHA256 hash of the file matches the contents
# hash and that MD5 and SHA1 are set.
history = data_store.REL_DB.ReadPathInfoHistory(cp.client_id,
cp.path_type,
cp.components)
self.assertEqual(history[-1].hash_entry.sha256,
fd_rel_db.hash_id.AsBytes())
self.assertEqual(history[-1].hash_entry.num_bytes, 5)
self.assertIsNotNone(history[-1].hash_entry.sha1)
self.assertIsNotNone(history[-1].hash_entry.md5)
def testExistingChunks(self):
client_mock = action_mocks.MultiGetFileClientMock()
# Make a file to download that is three chunks long.
# For the second run, we change the middle chunk. This will lead to a
# different hash for the whole file and three chunks to download of which we
# already have two.
chunk_size = transfer.MultiGetFile.CHUNK_SIZE
for data in [
b"A" * chunk_size + b"B" * chunk_size + b"C" * 100,
b"A" * chunk_size + b"X" * chunk_size + b"C" * 100
]:
path = os.path.join(self.temp_dir, "test.txt")
with io.open(path, "wb") as fd:
fd.write(data)
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS, path=path)
args = transfer.MultiGetFileArgs(pathspecs=[pathspec])
flow_test_lib.TestFlowHelper(
transfer.MultiGetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
args=args)
cp = db.ClientPath.FromPathSpec(self.client_id, pathspec)
fd_rel_db = file_store.OpenFile(cp)
self.assertEqual(fd_rel_db.size, len(data))
self.assertEqual(fd_rel_db.read(), data)
# Check that SHA256 hash of the file matches the contents
# hash and that MD5 and SHA1 are set.
history = data_store.REL_DB.ReadPathInfoHistory(cp.client_id,
cp.path_type,
cp.components)
self.assertEqual(history[-1].hash_entry.sha256,
fd_rel_db.hash_id.AsBytes())
self.assertEqual(history[-1].hash_entry.num_bytes, len(data))
self.assertIsNotNone(history[-1].hash_entry.sha1)
self.assertIsNotNone(history[-1].hash_entry.md5)
# Three chunks to get for the first file, only one for the second.
self.assertEqual(client_mock.action_counts["TransferBuffer"], 4)
def testMultiGetFileSetsFileHashAttributeWhenMultipleChunksDownloaded(self):
client_mock = action_mocks.MultiGetFileClientMock()
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=os.path.join(self.base_path, "test_img.dd"))
expected_size = os.path.getsize(pathspec.path)
args = transfer.MultiGetFileArgs(pathspecs=[pathspec])
flow_test_lib.TestFlowHelper(
transfer.MultiGetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
args=args)
h = hashlib.sha256()
with io.open(os.path.join(self.base_path, "test_img.dd"), "rb") as model_fd:
h.update(model_fd.read())
cp = db.ClientPath.FromPathSpec(self.client_id, pathspec)
fd_rel_db = file_store.OpenFile(cp)
self.assertEqual(fd_rel_db.hash_id.AsBytes(), h.digest())
# Check that SHA256 hash of the file matches the contents
# hash and that MD5 and SHA1 are set.
history = data_store.REL_DB.ReadPathInfoHistory(cp.client_id, cp.path_type,
cp.components)
self.assertEqual(history[-1].hash_entry.sha256, fd_rel_db.hash_id.AsBytes())
self.assertEqual(history[-1].hash_entry.num_bytes, expected_size)
self.assertIsNotNone(history[-1].hash_entry.sha1)
self.assertIsNotNone(history[-1].hash_entry.md5)
def testMultiGetFileSizeLimit(self):
client_mock = action_mocks.MultiGetFileClientMock()
image_path = os.path.join(self.base_path, "test_img.dd")
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS, path=image_path)
# Read a bit more than one chunk (600 * 1024).
expected_size = 750 * 1024
args = transfer.MultiGetFileArgs(
pathspecs=[pathspec], file_size=expected_size)
flow_test_lib.TestFlowHelper(
transfer.MultiGetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
args=args)
with open(image_path, "rb") as fd:
expected_data = fd.read(expected_size)
cp = db.ClientPath.FromPathSpec(self.client_id, pathspec)
fd_rel_db = file_store.OpenFile(cp)
self.assertEqual(fd_rel_db.size, expected_size)
data = fd_rel_db.read(2 * expected_size)
self.assertLen(data, expected_size)
d = hashlib.sha256()
d.update(expected_data)
self.assertEqual(fd_rel_db.hash_id.AsBytes(), d.digest())
# Check that SHA256 hash of the file matches the contents
# hash and that MD5 and SHA1 are set.
history = data_store.REL_DB.ReadPathInfoHistory(cp.client_id, cp.path_type,
cp.components)
self.assertEqual(history[-1].hash_entry.sha256, fd_rel_db.hash_id.AsBytes())
self.assertEqual(history[-1].hash_entry.num_bytes, expected_size)
self.assertIsNotNone(history[-1].hash_entry.sha1)
self.assertIsNotNone(history[-1].hash_entry.md5)
def testMultiGetFileProgressReportsFailuresAndSuccessesCorrectly(self):
client_mock = action_mocks.MultiGetFileClientMock()
image_path = os.path.join(self.base_path, "test_img.dd")
pathspec_1 = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS, path=image_path)
pathspec_2 = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS, path="/non/existing/path")
args = transfer.MultiGetFileArgs(pathspecs=[
pathspec_1,
pathspec_2,
])
flow_id = flow_test_lib.TestFlowHelper(
transfer.MultiGetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
args=args)
f_obj = flow_test_lib.GetFlowObj(self.client_id, flow_id)
f_instance = transfer.MultiGetFile(f_obj)
p = f_instance.GetProgress()
self.assertEqual(p.num_pending_hashes, 0)
self.assertEqual(p.num_pending_files, 0)
self.assertEqual(p.num_skipped, 0)
self.assertEqual(p.num_collected, 1)
self.assertEqual(p.num_failed, 1)
# Check that pathspecs in the progress proto are returned in the same order
# as in the args proto.
self.assertEqual(p.pathspecs_progress[0].pathspec, pathspec_1)
self.assertEqual(p.pathspecs_progress[1].pathspec, pathspec_2)
# Check that per-pathspecs statuses are correct.
self.assertEqual(p.pathspecs_progress[0].status,
transfer.PathSpecProgress.Status.COLLECTED)
self.assertEqual(p.pathspecs_progress[1].status,
transfer.PathSpecProgress.Status.FAILED)
def testMultiGetFileProgressReportsSkippedDuplicatesCorrectly(self):
client_mock = action_mocks.MultiGetFileClientMock()
image_path = os.path.join(self.base_path, "test_img.dd")
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS, path=image_path)
args = transfer.MultiGetFileArgs(pathspecs=[pathspec])
# Let the flow run to make sure the file is collected.
flow_test_lib.TestFlowHelper(
transfer.MultiGetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
args=args)
# Run the flow second time to make sure duplicates are collected.
flow_id = flow_test_lib.TestFlowHelper(
transfer.MultiGetFile.__name__,
client_mock,
creator=self.test_username,
client_id=self.client_id,
args=args)
f_obj = flow_test_lib.GetFlowObj(self.client_id, flow_id)
f_instance = transfer.MultiGetFile(f_obj)
p = f_instance.GetProgress()
self.assertEqual(p.num_collected, 0)
self.assertEqual(p.num_failed, 0)
self.assertEqual(p.num_skipped, 1)
self.assertLen(p.pathspecs_progress, 1)
self.assertEqual(p.pathspecs_progress[0].pathspec, pathspec)
self.assertEqual(p.pathspecs_progress[0].status,
transfer.PathSpecProgress.Status.SKIPPED)
@mock.patch.object(file_store.EXTERNAL_FILE_STORE, "AddFiles")
def testExternalFileStoreSubmissionIsTriggeredWhenFileIsSentToFileStore(
self, add_file_mock):
client_mock = action_mocks.GetFileClientMock()
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=os.path.join(self.base_path, "test_img.dd"))
flow_test_lib.TestFlowHelper(
compatibility.GetName(transfer.GetFile),
client_mock,
creator=self.test_username,
client_id=self.client_id,
pathspec=pathspec)
add_file_mock.assert_called_once()
args = add_file_mock.call_args_list[0][0]
hash_id = list(args[0].keys())[0]
self.assertIsInstance(hash_id, rdf_objects.SHA256HashID)
self.assertEqual(args[0][hash_id].client_path,
db.ClientPath.FromPathSpec(self.client_id, pathspec))
self.assertNotEmpty(args[0][hash_id].blob_refs)
for blob_ref in args[0][hash_id].blob_refs:
self.assertIsInstance(blob_ref, rdf_objects.BlobReference)
class DummyMultiGetFileLogic(transfer.MultiGetFileLogic, flow_base.FlowBase):
args_type = rdf_paths.PathSpec
def Start(self):
super().Start()
self.StartFileFetch(self.args)
def ReceiveFileStat(self, stat_entry, request_data=None):
pass
def ReceiveFileHash(self, stat_entry, file_hash, request_data=None):
pass
def ReceiveFetchedFile(self,
stat_entry,
file_hash,
request_data=None,
is_duplicate=False):
pass
def FileFetchFailed(self, pathspec, request_data=None, status=None):
pass
class DummyMultiGetFileLogicStat(transfer.MultiGetFileLogic,
flow_base.FlowBase):
args_type = rdf_paths.PathSpec
def Start(self):
super().Start()
self.state.stop_at_stat = True
self.StartFileFetch(self.args)
def ReceiveFileStat(self, stat_entry, request_data=None):
pass
def ReceiveFileHash(self, stat_entry, file_hash, request_data=None):
pass
def ReceiveFetchedFile(self,
stat_entry,
file_hash,
request_data=None,
is_duplicate=False):
pass
def FileFetchFailed(self, pathspec, request_data=None, status=None):
pass
class DummyMultiGetFileLogicHash(transfer.MultiGetFileLogic,
flow_base.FlowBase):
args_type = rdf_paths.PathSpec
def Start(self):
super().Start()
self.state.stop_at_hash = True
self.StartFileFetch(self.args)
def ReceiveFileStat(self, stat_entry, request_data=None):
del stat_entry, request_data # Unused.
def ReceiveFileHash(self, stat_entry, file_hash, request_data=None):
del stat_entry, file_hash, request_data # Unused.
def ReceiveFetchedFile(self,
stat_entry,
file_hash,
request_data=None,
is_duplicate=False):
del stat_entry, file_hash, request_data, is_duplicate # Unused.
def FileFetchFailed(self, pathspec, request_data=None, status=None):
del pathspec, request_data, status # Unused.
class MultiGetFileLogicTest(flow_test_lib.FlowTestsBaseclass):
"""Test the MultiGetFileLogicTest base class using DummyMultiGetFileLogic."""
def setUp(self):
super().setUp()
self.client_id = self.SetupClient(0)
self.client_mock = action_mocks.MultiGetFileClientMock()
def testStatCallsStatReceiveFileStatOnly(self):
pathtype = rdf_paths.PathSpec.PathType.OS
path = os.path.join(self.base_path, "test_img.dd")
with mock.patch.object(DummyMultiGetFileLogicStat,
"ReceiveFetchedFileStat") as dummy_fetched_stat:
with mock.patch.object(DummyMultiGetFileLogicStat,
"ReceiveFetchedFileHash") as dummy_fetched_hash:
with mock.patch.object(DummyMultiGetFileLogicStat,
"ReceiveFetchedFile") as dummy_fetched_file:
with mock.patch.object(DummyMultiGetFileLogicStat,
"FileFetchFailed") as mock_failure:
flow_test_lib.TestFlowHelper(
DummyMultiGetFileLogicStat.__name__,
self.client_mock,
creator=self.test_username,
client_id=self.client_id,
pathtype=pathtype,
path=path)
self.assertTrue(dummy_fetched_stat.called)
self.assertEqual(dummy_fetched_stat.call_args[0][0].pathspec.path,
path)
self.assertEqual(
dummy_fetched_stat.call_args[0][0].pathspec.pathtype, pathtype)
self.assertFalse(dummy_fetched_hash.called)
self.assertFalse(dummy_fetched_file.called)
self.assertFalse(mock_failure.called)
def testStatCallsFileFetchFailed(self):
pathtype = rdf_paths.PathSpec.PathType.OS
path = os.path.join(self.base_path, "invalid.dd")
with mock.patch.object(DummyMultiGetFileLogicStat,
"ReceiveFetchedFileStat") as dummy_fetched_stat:
with mock.patch.object(DummyMultiGetFileLogicStat,
"ReceiveFetchedFileHash") as dummy_fetched_hash:
with mock.patch.object(DummyMultiGetFileLogicStat,
"ReceiveFetchedFile") as dummy_fetched_file:
with mock.patch.object(DummyMultiGetFileLogicStat,
"FileFetchFailed") as mock_failure:
flow_test_lib.TestFlowHelper(
DummyMultiGetFileLogicStat.__name__,
self.client_mock,
creator=self.test_username,
client_id=self.client_id,
pathtype=pathtype,
path=path)
self.assertFalse(dummy_fetched_stat.called)
self.assertFalse(dummy_fetched_hash.called)
self.assertFalse(dummy_fetched_file.called)
self.assertTrue(mock_failure.called)
self.assertEqual(mock_failure.call_args[0][0].path, path)
self.assertEqual(mock_failure.call_args[0][0].pathtype, pathtype)
def testHashCallsReceiveFileHash(self):
pathtype = rdf_paths.PathSpec.PathType.OS
path = os.path.join(self.base_path, "test_img.dd")
with mock.patch.object(DummyMultiGetFileLogicHash,
"ReceiveFetchedFileStat") as dummy_fetched_stat:
with mock.patch.object(DummyMultiGetFileLogicHash,
"ReceiveFetchedFileHash") as dummy_fetched_hash:
with mock.patch.object(DummyMultiGetFileLogicHash,
"ReceiveFetchedFile") as dummy_fetched_file:
with mock.patch.object(DummyMultiGetFileLogicHash,
"FileFetchFailed") as mock_failure:
flow_test_lib.TestFlowHelper(
DummyMultiGetFileLogicHash.__name__,
self.client_mock,
creator=self.test_username,
client_id=self.client_id,
pathtype=pathtype,
path=path)
self.assertTrue(dummy_fetched_stat.called)
self.assertTrue(dummy_fetched_hash.called)
self.assertEqual(dummy_fetched_hash.call_args[0][0].pathspec.path,
path)
self.assertEqual(
dummy_fetched_hash.call_args[0][0].pathspec.pathtype, pathtype)
self.assertFalse(dummy_fetched_file.called)
self.assertFalse(mock_failure.called)
def testHashCallsFileFetchFailed(self):
pathtype = rdf_paths.PathSpec.PathType.OS
path = os.path.join(self.base_path, "invalid.dd")
with mock.patch.object(DummyMultiGetFileLogicHash,
"ReceiveFetchedFileStat") as dummy_fetched_stat:
with mock.patch.object(DummyMultiGetFileLogicHash,
"ReceiveFetchedFileHash") as dummy_fetched_hash:
with mock.patch.object(DummyMultiGetFileLogicHash,
"ReceiveFetchedFile") as dummy_fetched_file:
with mock.patch.object(DummyMultiGetFileLogicHash,
"FileFetchFailed") as mock_failure:
flow_test_lib.TestFlowHelper(
DummyMultiGetFileLogicHash.__name__,
self.client_mock,
creator=self.test_username,
client_id=self.client_id,
pathtype=pathtype,
path=path)
self.assertFalse(dummy_fetched_stat.called)
self.assertFalse(dummy_fetched_hash.called)
self.assertFalse(dummy_fetched_file.called)
self.assertTrue(mock_failure.called)
self.assertEqual(mock_failure.call_args[0][0].path, path)
self.assertEqual(mock_failure.call_args[0][0].pathtype, pathtype)
def testFileCallsReceiveFetchedFile(self):
pathtype = rdf_paths.PathSpec.PathType.OS
path = os.path.join(self.base_path, "test_img.dd")
with mock.patch.object(DummyMultiGetFileLogic,
"ReceiveFetchedFileStat") as dummy_fetched_stat:
with mock.patch.object(DummyMultiGetFileLogic,
"ReceiveFetchedFileHash") as dummy_fetched_hash:
with mock.patch.object(DummyMultiGetFileLogic,
"ReceiveFetchedFile") as dummy_fetched_file:
with mock.patch.object(DummyMultiGetFileLogic,
"FileFetchFailed") as mock_failure:
flow_test_lib.TestFlowHelper(
DummyMultiGetFileLogic.__name__,
self.client_mock,
creator=self.test_username,
client_id=self.client_id,
pathtype=pathtype,
path=path)
self.assertTrue(dummy_fetched_stat.called)
self.assertTrue(dummy_fetched_hash.called)
self.assertTrue(dummy_fetched_file.called)
self.assertEqual(dummy_fetched_file.call_args[0][0].pathspec.path,
path)
self.assertEqual(
dummy_fetched_file.call_args[0][0].pathspec.pathtype, pathtype)
self.assertFalse(mock_failure.called)
def testFileCallsFileFetchFailed(self):
pathtype = rdf_paths.PathSpec.PathType.OS
path = os.path.join(self.base_path, "invalid.dd")
with mock.patch.object(DummyMultiGetFileLogic,
"ReceiveFetchedFileStat") as dummy_fetched_stat:
with mock.patch.object(DummyMultiGetFileLogic,
"ReceiveFetchedFileHash") as dummy_fetched_hash:
with mock.patch.object(DummyMultiGetFileLogic,
"ReceiveFetchedFile") as dummy_fetched_file:
with mock.patch.object(DummyMultiGetFileLogic,
"FileFetchFailed") as mock_failure:
flow_test_lib.TestFlowHelper(
DummyMultiGetFileLogic.__name__,
self.client_mock,
creator=self.test_username,
client_id=self.client_id,
pathtype=pathtype,
path=path)
self.assertFalse(dummy_fetched_stat.called)
self.assertFalse(dummy_fetched_hash.called)
self.assertFalse(dummy_fetched_file.called)
self.assertTrue(mock_failure.called)
self.assertEqual(mock_failure.call_args[0][0].path, path)
self.assertEqual(mock_failure.call_args[0][0].pathtype, pathtype)
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
google/grr
|
grr/server/grr_response_server/flows/general/transfer_test.py
|
Python
|
apache-2.0
| 49,622
|
# # stdlib
# from typing import Any
# from typing import Dict
# from typing import Iterable
# from typing import List
# from typing import Tuple
# # third party
# import pytest
# # syft absolute
# from syft.core.smpc.store import CryptoStore
# from syft.core.smpc.store import register_primitive_store_add
# from syft.core.smpc.store import register_primitive_store_get
# # Rasswanth : Fix tests after solving .get() issues
# @pytest.mark.skip
# @pytest.mark.smpc
# @register_primitive_store_get("test_crypto_store")
# def provider_test_get(
# store: Dict[str, List[Any]], nr_instances: int
# ) -> List[Tuple[int]]:
# return [store["test_key_store"][i] for i in range(nr_instances)]
# @pytest.mark.skip
# @register_primitive_store_add("test_crypto_store")
# def provider_test_add(
# store: Dict[str, List[Any]], primitives: Iterable[Any]
# ) -> List[Tuple[int]]:
# store["test_key_store"] = primitives
# @pytest.mark.skip
# def test_add_store() -> None:
# crypto_store = CryptoStore()
# primitives = list(range(100))
# crypto_store.populate_store("test_crypto_store", primitives)
# crypto_store.store["test_key_store"] == primitives
# @pytest.mark.skip
# @pytest.mark.parametrize("nr_instances", [1, 5, 7, 100])
# def test_get_store(nr_instances: int) -> None:
# crypto_store = CryptoStore()
# primitives = list(range(100))
# crypto_store.store["test_key_store"] = primitives
# primitives_store = crypto_store.get_primitives_from_store(
# "test_crypto_store", nr_instances
# )
# assert primitives[:nr_instances] == primitives_store
|
OpenMined/PySyft
|
tests/integration/smpc/store/crypto_store_test.py
|
Python
|
apache-2.0
| 1,613
|
"""
Django settings for pybr11_tutorial project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4b5prz%55i#ay!qf=7w=61p^am-4a_jknjf8&jzu1d6ib@-*d^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pybr11_tutorial',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'pybr11_tutorial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pybr11_tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
Rondineli/pybr11_tutorial
|
pybr11_tutorial/pybr11_tutorial/settings.py
|
Python
|
apache-2.0
| 2,688
|
import random
def append_letter_or_number():
alphabet = ['a','b','c','d','e','f']
use_number = 0
use_letter = 1
letter_or_string = random.randrange(2)
if letter_or_string == use_number:
result = str(random.randrange(0,9))
elif letter_or_string == use_letter:
next_character = random.randrange(len(alphabet))
result = str(alphabet[next_character])
else:
print("Uh-oh! You've got a bug. This should have selected number or letter.")
return -1
return result
# generates a random 16-byte NFC ID tag when a NFC is unavailable
def create_nfc_tag():
random_nfc_tag = ''
tag_size = 7 # number of hex pairs
end_of_tag = tag_size - 1
current_byte = 0
byte_half = 0
while current_byte < tag_size:
while byte_half != 2:
random_nfc_tag += append_letter_or_number()
byte_half += 1
if current_byte != end_of_tag:
random_nfc_tag += ':'
current_byte += 1
byte_half = 0
return random_nfc_tag
|
ossem/member_database
|
nfc_lib.py
|
Python
|
apache-2.0
| 1,047
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Motor INA219 hardware monitor configuration."""
from makani.avionics.firmware.drivers import ina219_types
from makani.avionics.firmware.serial import motor_serial_params as rev
ina219_default = {
'name': '',
'address': 0x0,
'shunt_resistor': 0.01,
'bus_voltage': ina219_types.kIna219BusVoltage16V,
'range': ina219_types.kIna219Range40mv,
'bus_adc': ina219_types.kIna219Adc128Samples,
'shunt_adc': ina219_types.kIna219Adc128Samples,
'mode': ina219_types.kIna219ModeShuntAndBusContinuous,
'current_max': -1,
'voltage_limits_percent': [95, 105],
}
ina219_16v_40mv = dict(ina219_default, **{
'bus_voltage': ina219_types.kIna219BusVoltage16V,
'range': ina219_types.kIna219Range40mv,
})
ina219_16v_80mv = dict(ina219_default, **{
'bus_voltage': ina219_types.kIna219BusVoltage16V,
'range': ina219_types.kIna219Range80mv,
})
ina219_32v_40mv = dict(ina219_default, **{
'bus_voltage': ina219_types.kIna219BusVoltage32V,
'range': ina219_types.kIna219Range40mv,
})
ina219_32v_160mv = dict(ina219_default, **{
'bus_voltage': ina219_types.kIna219BusVoltage32V,
'range': ina219_types.kIna219Range160mv,
})
gin_a1 = [
dict(ina219_32v_40mv, name='12v', address=0x40, shunt_resistor=0.012),
dict(ina219_16v_40mv, name='1v2', address=0x42, shunt_resistor=0.02),
dict(ina219_16v_40mv, name='3v3', address=0x45, shunt_resistor=0.02),
]
gin_a2 = gin_a1
gin_a3 = [
dict(ina219_32v_160mv, name='12v', address=0x41, shunt_resistor=0.05),
dict(ina219_16v_80mv, name='1v2', address=0x42, shunt_resistor=0.05),
dict(ina219_16v_80mv, name='3v3', address=0x45, shunt_resistor=0.05),
]
ina219_config = (rev.MotorHardware, {
rev.MotorHardware.GIN_A1: gin_a1,
rev.MotorHardware.GIN_A2: gin_a2,
rev.MotorHardware.GIN_A3: gin_a3,
rev.MotorHardware.GIN_A4_CLK16: gin_a3,
rev.MotorHardware.GIN_A4_CLK8: gin_a3,
rev.MotorHardware.OZONE_A1: gin_a3,
})
|
google/makani
|
avionics/motor/monitors/motor_ina219.py
|
Python
|
apache-2.0
| 2,535
|
#!/usr/bin/env python3
"""
Lazy 'tox' to quickly check if branch is up to PR standards.
This is NOT a tox replacement, only a quick check during development.
"""
import os
import asyncio
import sys
import re
import shlex
from collections import namedtuple
try:
from colorlog.escape_codes import escape_codes
except ImportError:
escape_codes = None
RE_ASCII = re.compile(r"\033\[[^m]*m")
Error = namedtuple("Error", ["file", "line", "col", "msg", "skip"])
PASS = "green"
FAIL = "bold_red"
def printc(the_color, *args):
"""Color print helper."""
msg = " ".join(args)
if not escape_codes:
print(msg)
return
try:
print(escape_codes[the_color] + msg + escape_codes["reset"])
except KeyError:
print(msg)
raise ValueError("Invalid color {}".format(the_color))
def validate_requirements_ok():
"""Validate requirements, returns True of ok."""
from gen_requirements_all import main as req_main
return req_main(True) == 0
async def read_stream(stream, display):
"""Read from stream line by line until EOF, display, and capture lines."""
output = []
while True:
line = await stream.readline()
if not line:
break
output.append(line)
display(line.decode()) # assume it doesn't block
return b"".join(output)
async def async_exec(*args, display=False):
"""Execute, return code & log."""
argsp = []
for arg in args:
if os.path.isfile(arg):
argsp.append("\\\n {}".format(shlex.quote(arg)))
else:
argsp.append(shlex.quote(arg))
printc("cyan", *argsp)
try:
kwargs = {
"loop": LOOP,
"stdout": asyncio.subprocess.PIPE,
"stderr": asyncio.subprocess.STDOUT,
}
if display:
kwargs["stderr"] = asyncio.subprocess.PIPE
proc = await asyncio.create_subprocess_exec(*args, **kwargs)
except FileNotFoundError as err:
printc(
FAIL,
"Could not execute {}. Did you install test requirements?".format(args[0]),
)
raise err
if not display:
# Readin stdout into log
stdout, _ = await proc.communicate()
else:
# read child's stdout/stderr concurrently (capture and display)
stdout, _ = await asyncio.gather(
read_stream(proc.stdout, sys.stdout.write),
read_stream(proc.stderr, sys.stderr.write),
)
exit_code = await proc.wait()
stdout = stdout.decode("utf-8")
return exit_code, stdout
async def git():
"""Exec git."""
if len(sys.argv) > 2 and sys.argv[1] == "--":
return sys.argv[2:]
_, log = await async_exec("git", "merge-base", "upstream/dev", "HEAD")
merge_base = log.splitlines()[0]
_, log = await async_exec("git", "diff", merge_base, "--name-only")
return log.splitlines()
async def pylint(files):
"""Exec pylint."""
_, log = await async_exec("pylint", "-f", "parseable", "--persistent=n", *files)
res = []
for line in log.splitlines():
line = line.split(":")
if len(line) < 3:
continue
_fn = line[0].replace("\\", "/")
res.append(Error(_fn, line[1], "", line[2].strip(), _fn.startswith("tests/")))
return res
async def flake8(files):
"""Exec flake8."""
_, log = await async_exec("flake8", "--doctests", *files)
res = []
for line in log.splitlines():
line = line.split(":")
if len(line) < 4:
continue
_fn = line[0].replace("\\", "/")
res.append(Error(_fn, line[1], line[2], line[3].strip(), False))
return res
async def lint(files):
"""Perform lint."""
files = [file for file in files if os.path.isfile(file)]
fres, pres = await asyncio.gather(flake8(files), pylint(files))
res = fres + pres
res.sort(key=lambda item: item.file)
if res:
print("Pylint & Flake8 errors:")
else:
printc(PASS, "Pylint and Flake8 passed")
lint_ok = True
for err in res:
err_msg = "{} {}:{} {}".format(err.file, err.line, err.col, err.msg)
# tests/* does not have to pass lint
if err.skip:
print(err_msg)
else:
printc(FAIL, err_msg)
lint_ok = False
return lint_ok
async def main():
"""Run the main loop."""
# Ensure we are in the homeassistant root
os.chdir(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
files = await git()
if not files:
print(
"No changed files found. Please ensure you have added your "
"changes with git add & git commit"
)
return
pyfile = re.compile(r".+\.py$")
pyfiles = [file for file in files if pyfile.match(file)]
print("=============================")
printc("bold", "CHANGED FILES:\n", "\n ".join(pyfiles))
print("=============================")
skip_lint = len(sys.argv) > 1 and sys.argv[1] == "--skiplint"
if skip_lint:
printc(FAIL, "LINT DISABLED")
elif not await lint(pyfiles):
printc(FAIL, "Please fix your lint issues before continuing")
return
test_files = set()
gen_req = False
for fname in pyfiles:
if fname.startswith("homeassistant/components/"):
gen_req = True # requirements script for components
# Find test files...
if fname.startswith("tests/"):
if "/test_" in fname and os.path.isfile(fname):
# All test helpers should be excluded
test_files.add(fname)
else:
parts = fname.split("/")
parts[0] = "tests"
if parts[-1] == "__init__.py":
parts[-1] = "test_init.py"
elif parts[-1] == "__main__.py":
parts[-1] = "test_main.py"
else:
parts[-1] = "test_" + parts[-1]
fname = "/".join(parts)
if os.path.isfile(fname):
test_files.add(fname)
if gen_req:
print("=============================")
if validate_requirements_ok():
printc(PASS, "script/gen_requirements.py passed")
else:
printc(FAIL, "Please run script/gen_requirements.py")
return
print("=============================")
if not test_files:
print("No test files identified, ideally you should run tox")
return
code, _ = await async_exec(
"pytest", "-vv", "--force-sugar", "--", *test_files, display=True
)
print("=============================")
if code == 0:
printc(PASS, "Yay! This will most likely pass tox")
else:
printc(FAIL, "Tests not passing")
if skip_lint:
printc(FAIL, "LINT DISABLED")
if __name__ == "__main__":
LOOP = (
asyncio.ProactorEventLoop()
if sys.platform == "win32"
else asyncio.get_event_loop()
)
try:
LOOP.run_until_complete(main())
except (FileNotFoundError, KeyboardInterrupt):
pass
finally:
LOOP.close()
|
fbradyirl/home-assistant
|
script/lazytox.py
|
Python
|
apache-2.0
| 7,111
|
"""
Define the Python wrapper-functions which provide an interface to the C++
implementations.
"""
from .present import CPP_BINDINGS_PRESENT
from .imager import cpp_image_visibilities, CppKernelFuncs
|
SKA-ScienceDataProcessor/FastImaging-Python
|
src/fastimgproto/bindings/__init__.py
|
Python
|
apache-2.0
| 200
|
from django.db import models
from django.db import models
from django.utils.translation import ugettext as _
# from django.core.urlresolvers import reverse_lazy, reverse
from django.conf import settings
from projects.models import Project, Customer
class Report (models.Model):
HIGHLIGHT = 'HL'
LOWLIGHT = 'LL'
ESCALATION = 'XS'
LIGHTS = (
(HIGHLIGHT, _('Highlight')),
(LOWLIGHT, _('Lowlight')),
(ESCALATION,_('Escalation')),
)
year = models.PositiveIntegerField (null=False, blank=False,
verbose_name=_("year"))
period = models.PositiveIntegerField (null=False, blank=False,
verbose_name=_("period"))
light = models.CharField (max_length=2, choices=LIGHTS, default=HIGHLIGHT)
description = models.TextField (null=False, blank=True, verbose_name=_("description"))
created = models.DateTimeField (auto_now_add=True)
created_by = models.ForeignKey (settings.AUTH_USER_MODEL, related_name='+', editable=False)
modified = models.DateTimeField (auto_now=True)
modified_by = models.ForeignKey (settings.AUTH_USER_MODEL, related_name='+', editable=False)
class Meta:
verbose_name = _('Report')
verbose_name_plural = _('Reports')
ordering = ['light','project','category']
unique_together = ("target", "year", "period")
abstract = True
def __str__(self):
return self.title
#return str(self.project) + (self.title if len(self.title) < 30 else (self.title[:27]+'...'))
return mark_safe("<b>%s</b>: %s" % (self.project, self.title))
# def get_absolute_url (self):
# return reverse('reporting-detail', args=[str(self.id)])
class ProjectReport (Report):
target = models.ForeignKey (Project, related_name='reports')
class Meta:
verbose_name = _('Project report')
verbose_name_plural = _('Project reports')
class CustomerReport (Report):
target = models.ForeignKey (Customer, related_name='reports')
class Meta:
verbose_name = _('Customer report')
verbose_name_plural = _('Customer reports')
|
jjagielka/reporter
|
src/reports/models.py
|
Python
|
apache-2.0
| 2,148
|
# Copyright 2012-2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library of random helper functionality."""
import functools
import sys
import stat
import time
import platform, subprocess, operator, os, shutil, re
import collections
from enum import Enum
from functools import lru_cache
from mesonbuild import mlog
have_fcntl = False
have_msvcrt = False
# {subproject: project_meson_version}
project_meson_versions = {}
try:
import fcntl
have_fcntl = True
except Exception:
pass
try:
import msvcrt
have_msvcrt = True
except Exception:
pass
from glob import glob
if os.path.basename(sys.executable) == 'meson.exe':
# In Windows and using the MSI installed executable.
python_command = [sys.executable, 'runpython']
else:
python_command = [sys.executable]
meson_command = None
def set_meson_command(mainfile):
global python_command
global meson_command
# On UNIX-like systems `meson` is a Python script
# On Windows `meson` and `meson.exe` are wrapper exes
if not mainfile.endswith('.py'):
meson_command = [mainfile]
elif os.path.isabs(mainfile) and mainfile.endswith('mesonmain.py'):
# Can't actually run meson with an absolute path to mesonmain.py, it must be run as -m mesonbuild.mesonmain
meson_command = python_command + ['-m', 'mesonbuild.mesonmain']
else:
# Either run uninstalled, or full path to meson-script.py
meson_command = python_command + [mainfile]
# We print this value for unit tests.
if 'MESON_COMMAND_TESTS' in os.environ:
mlog.log('meson_command is {!r}'.format(meson_command))
def is_ascii_string(astring):
try:
if isinstance(astring, str):
astring.encode('ascii')
if isinstance(astring, bytes):
astring.decode('ascii')
except UnicodeDecodeError:
return False
return True
def check_direntry_issues(direntry_array):
import locale
# Warn if the locale is not UTF-8. This can cause various unfixable issues
# such as os.stat not being able to decode filenames with unicode in them.
# There is no way to reset both the preferred encoding and the filesystem
# encoding, so we can just warn about it.
e = locale.getpreferredencoding()
if e.upper() != 'UTF-8' and not is_windows():
if not isinstance(direntry_array, list):
direntry_array = [direntry_array]
for de in direntry_array:
if is_ascii_string(de):
continue
mlog.warning('''You are using {!r} which is not a Unicode-compatible '
locale but you are trying to access a file system entry called {!r} which is
not pure ASCII. This may cause problems.
'''.format(e, de), file=sys.stderr)
# Put this in objects that should not get dumped to pickle files
# by accident.
import threading
an_unpicklable_object = threading.Lock()
class MesonException(Exception):
'''Exceptions thrown by Meson'''
def get_msg_with_context(self):
s = ''
if hasattr(self, 'lineno') and hasattr(self, 'file'):
s = get_error_location_string(self.file, self.lineno) + ' '
s += str(self)
return s
class EnvironmentException(MesonException):
'''Exceptions thrown while processing and creating the build environment'''
class FileMode:
# The first triad is for owner permissions, the second for group permissions,
# and the third for others (everyone else).
# For the 1st character:
# 'r' means can read
# '-' means not allowed
# For the 2nd character:
# 'w' means can write
# '-' means not allowed
# For the 3rd character:
# 'x' means can execute
# 's' means can execute and setuid/setgid is set (owner/group triads only)
# 'S' means cannot execute and setuid/setgid is set (owner/group triads only)
# 't' means can execute and sticky bit is set ("others" triads only)
# 'T' means cannot execute and sticky bit is set ("others" triads only)
# '-' means none of these are allowed
#
# The meanings of 'rwx' perms is not obvious for directories; see:
# https://www.hackinglinuxexposed.com/articles/20030424.html
#
# For information on this notation such as setuid/setgid/sticky bits, see:
# https://en.wikipedia.org/wiki/File_system_permissions#Symbolic_notation
symbolic_perms_regex = re.compile('[r-][w-][xsS-]' # Owner perms
'[r-][w-][xsS-]' # Group perms
'[r-][w-][xtT-]') # Others perms
def __init__(self, perms=None, owner=None, group=None):
self.perms_s = perms
self.perms = self.perms_s_to_bits(perms)
self.owner = owner
self.group = group
def __repr__(self):
ret = '<FileMode: {!r} owner={} group={}'
return ret.format(self.perms_s, self.owner, self.group)
@classmethod
def perms_s_to_bits(cls, perms_s):
'''
Does the opposite of stat.filemode(), converts strings of the form
'rwxr-xr-x' to st_mode enums which can be passed to os.chmod()
'''
if perms_s is None:
# No perms specified, we will not touch the permissions
return -1
eg = 'rwxr-xr-x'
if not isinstance(perms_s, str):
msg = 'Install perms must be a string. For example, {!r}'
raise MesonException(msg.format(eg))
if len(perms_s) != 9 or not cls.symbolic_perms_regex.match(perms_s):
msg = 'File perms {!r} must be exactly 9 chars. For example, {!r}'
raise MesonException(msg.format(perms_s, eg))
perms = 0
# Owner perms
if perms_s[0] == 'r':
perms |= stat.S_IRUSR
if perms_s[1] == 'w':
perms |= stat.S_IWUSR
if perms_s[2] == 'x':
perms |= stat.S_IXUSR
elif perms_s[2] == 'S':
perms |= stat.S_ISUID
elif perms_s[2] == 's':
perms |= stat.S_IXUSR
perms |= stat.S_ISUID
# Group perms
if perms_s[3] == 'r':
perms |= stat.S_IRGRP
if perms_s[4] == 'w':
perms |= stat.S_IWGRP
if perms_s[5] == 'x':
perms |= stat.S_IXGRP
elif perms_s[5] == 'S':
perms |= stat.S_ISGID
elif perms_s[5] == 's':
perms |= stat.S_IXGRP
perms |= stat.S_ISGID
# Others perms
if perms_s[6] == 'r':
perms |= stat.S_IROTH
if perms_s[7] == 'w':
perms |= stat.S_IWOTH
if perms_s[8] == 'x':
perms |= stat.S_IXOTH
elif perms_s[8] == 'T':
perms |= stat.S_ISVTX
elif perms_s[8] == 't':
perms |= stat.S_IXOTH
perms |= stat.S_ISVTX
return perms
class File:
def __init__(self, is_built, subdir, fname):
self.is_built = is_built
self.subdir = subdir
self.fname = fname
assert(isinstance(self.subdir, str))
assert(isinstance(self.fname, str))
def __str__(self):
return self.relative_name()
def __repr__(self):
ret = '<File: {0}'
if not self.is_built:
ret += ' (not built)'
ret += '>'
return ret.format(self.relative_name())
@staticmethod
@lru_cache(maxsize=None)
def from_source_file(source_root, subdir, fname):
if not os.path.isfile(os.path.join(source_root, subdir, fname)):
raise MesonException('File %s does not exist.' % fname)
return File(False, subdir, fname)
@staticmethod
def from_built_file(subdir, fname):
return File(True, subdir, fname)
@staticmethod
def from_absolute_file(fname):
return File(False, '', fname)
@lru_cache(maxsize=None)
def rel_to_builddir(self, build_to_src):
if self.is_built:
return self.relative_name()
else:
return os.path.join(build_to_src, self.subdir, self.fname)
@lru_cache(maxsize=None)
def absolute_path(self, srcdir, builddir):
absdir = srcdir
if self.is_built:
absdir = builddir
return os.path.join(absdir, self.relative_name())
def endswith(self, ending):
return self.fname.endswith(ending)
def split(self, s):
return self.fname.split(s)
def __eq__(self, other):
return (self.fname, self.subdir, self.is_built) == (other.fname, other.subdir, other.is_built)
def __hash__(self):
return hash((self.fname, self.subdir, self.is_built))
@lru_cache(maxsize=None)
def relative_name(self):
return os.path.join(self.subdir, self.fname)
def get_compiler_for_source(compilers, src):
for comp in compilers:
if comp.can_compile(src):
return comp
raise MesonException('No specified compiler can handle file {!s}'.format(src))
def classify_unity_sources(compilers, sources):
compsrclist = {}
for src in sources:
comp = get_compiler_for_source(compilers, src)
if comp not in compsrclist:
compsrclist[comp] = [src]
else:
compsrclist[comp].append(src)
return compsrclist
class OrderedEnum(Enum):
"""
An Enum which additionally offers homogeneous ordered comparison.
"""
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
MachineChoice = OrderedEnum('MachineChoice', ['BUILD', 'HOST', 'TARGET'])
class PerMachine:
def __init__(self, build, host, target):
self.build = build
self.host = host
self.target = target
def __getitem__(self, machine: MachineChoice):
return {
MachineChoice.BUILD: self.build,
MachineChoice.HOST: self.host,
MachineChoice.TARGET: self.target
}[machine]
def __setitem__(self, machine: MachineChoice, val):
key = {
MachineChoice.BUILD: 'build',
MachineChoice.HOST: 'host',
MachineChoice.TARGET: 'target'
}[machine]
setattr(self, key, val)
def is_osx():
return platform.system().lower() == 'darwin'
def is_linux():
return platform.system().lower() == 'linux'
def is_android():
return platform.system().lower() == 'android'
def is_haiku():
return platform.system().lower() == 'haiku'
def is_openbsd():
return platform.system().lower() == 'openbsd'
def is_windows():
platname = platform.system().lower()
return platname == 'windows' or 'mingw' in platname
def is_cygwin():
platname = platform.system().lower()
return platname.startswith('cygwin')
def is_debianlike():
return os.path.isfile('/etc/debian_version')
def is_dragonflybsd():
return platform.system().lower() == 'dragonfly'
def is_freebsd():
return platform.system().lower() == 'freebsd'
def _get_machine_is_cross(env, is_cross):
"""
This is not morally correct, but works for now. For cross builds the build
and host machines differ. `is_cross == true` means the host machine, while
`is_cross == false` means the build machine. Both are used in practice,
even though the documentation refers to the host machine implying we should
hard-code it. For non-cross builds `is_cross == false` is passed but the
host and build machines are identical so it doesn't matter.
Users for `for_*` should instead specify up front which machine they want
and query that like:
env.machines[MachineChoice.HOST].is_haiku()
"""
for_machine = MachineChoice.HOST if is_cross else MachineChoice.BUILD
return env.machines[for_machine]
def for_windows(is_cross, env):
"""
Host machine is windows?
Deprecated: Please use `env.machines[for_machine].is_windows()`.
Note: 'host' is the machine on which compiled binaries will run
"""
return _get_machine_is_cross(env, is_cross).is_windows()
def for_cygwin(is_cross, env):
"""
Host machine is cygwin?
Deprecated: Please use `env.machines[for_machine].is_cygwin()`.
Note: 'host' is the machine on which compiled binaries will run
"""
return _get_machine_is_cross(env, is_cross).is_cygwin()
def for_linux(is_cross, env):
"""
Host machine is linux?
Deprecated: Please use `env.machines[for_machine].is_linux()`.
Note: 'host' is the machine on which compiled binaries will run
"""
return _get_machine_is_cross(env, is_cross).is_linux()
def for_darwin(is_cross, env):
"""
Host machine is Darwin (iOS/OS X)?
Deprecated: Please use `env.machines[for_machine].is_darwin()`.
Note: 'host' is the machine on which compiled binaries will run
"""
return _get_machine_is_cross(env, is_cross).is_darwin()
def for_android(is_cross, env):
"""
Host machine is Android?
Deprecated: Please use `env.machines[for_machine].is_android()`.
Note: 'host' is the machine on which compiled binaries will run
"""
return _get_machine_is_cross(env, is_cross).is_android()
def for_haiku(is_cross, env):
"""
Host machine is Haiku?
Deprecated: Please use `env.machines[for_machine].is_haiku()`.
Note: 'host' is the machine on which compiled binaries will run
"""
return _get_machine_is_cross(env, is_cross).is_haiku()
def for_openbsd(is_cross, env):
"""
Host machine is OpenBSD?
Deprecated: Please use `env.machines[for_machine].is_openbsd()`.
Note: 'host' is the machine on which compiled binaries will run
"""
return _get_machine_is_cross(env, is_cross).is_openbsd()
def exe_exists(arglist):
try:
p = subprocess.Popen(arglist, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode == 0:
return True
except FileNotFoundError:
pass
return False
def detect_vcs(source_dir):
vcs_systems = [
dict(name = 'git', cmd = 'git', repo_dir = '.git', get_rev = 'git describe --dirty=+', rev_regex = '(.*)', dep = '.git/logs/HEAD'),
dict(name = 'mercurial', cmd = 'hg', repo_dir = '.hg', get_rev = 'hg id -i', rev_regex = '(.*)', dep = '.hg/dirstate'),
dict(name = 'subversion', cmd = 'svn', repo_dir = '.svn', get_rev = 'svn info', rev_regex = 'Revision: (.*)', dep = '.svn/wc.db'),
dict(name = 'bazaar', cmd = 'bzr', repo_dir = '.bzr', get_rev = 'bzr revno', rev_regex = '(.*)', dep = '.bzr'),
]
segs = source_dir.replace('\\', '/').split('/')
for i in range(len(segs), -1, -1):
curdir = '/'.join(segs[:i])
for vcs in vcs_systems:
if os.path.isdir(os.path.join(curdir, vcs['repo_dir'])) and shutil.which(vcs['cmd']):
vcs['wc_dir'] = curdir
return vcs
return None
# a helper class which implements the same version ordering as RPM
@functools.total_ordering
class Version:
def __init__(self, s):
self._s = s
# split into numeric, alphabetic and non-alphanumeric sequences
sequences = re.finditer(r'(\d+|[a-zA-Z]+|[^a-zA-Z\d]+)', s)
# non-alphanumeric separators are discarded
sequences = [m for m in sequences if not re.match(r'[^a-zA-Z\d]+', m.group(1))]
# numeric sequences have leading zeroes discarded
sequences = [re.sub(r'^0+(\d)', r'\1', m.group(1), 1) for m in sequences]
self._v = sequences
def __str__(self):
return '%s (V=%s)' % (self._s, str(self._v))
def __lt__(self, other):
return self.__cmp__(other) == -1
def __eq__(self, other):
return self.__cmp__(other) == 0
def __cmp__(self, other):
def cmp(a, b):
return (a > b) - (a < b)
# compare each sequence in order
for i in range(0, min(len(self._v), len(other._v))):
# sort a non-digit sequence before a digit sequence
if self._v[i].isdigit() != other._v[i].isdigit():
return 1 if self._v[i].isdigit() else -1
# compare as numbers
if self._v[i].isdigit():
# because leading zeros have already been removed, if one number
# has more digits, it is greater
c = cmp(len(self._v[i]), len(other._v[i]))
if c != 0:
return c
# fallthrough
# compare lexicographically
c = cmp(self._v[i], other._v[i])
if c != 0:
return c
# if equal length, all components have matched, so equal
# otherwise, the version with a suffix remaining is greater
return cmp(len(self._v), len(other._v))
def _version_extract_cmpop(vstr2):
if vstr2.startswith('>='):
cmpop = operator.ge
vstr2 = vstr2[2:]
elif vstr2.startswith('<='):
cmpop = operator.le
vstr2 = vstr2[2:]
elif vstr2.startswith('!='):
cmpop = operator.ne
vstr2 = vstr2[2:]
elif vstr2.startswith('=='):
cmpop = operator.eq
vstr2 = vstr2[2:]
elif vstr2.startswith('='):
cmpop = operator.eq
vstr2 = vstr2[1:]
elif vstr2.startswith('>'):
cmpop = operator.gt
vstr2 = vstr2[1:]
elif vstr2.startswith('<'):
cmpop = operator.lt
vstr2 = vstr2[1:]
else:
cmpop = operator.eq
return (cmpop, vstr2)
def version_compare(vstr1, vstr2):
(cmpop, vstr2) = _version_extract_cmpop(vstr2)
return cmpop(Version(vstr1), Version(vstr2))
def version_compare_many(vstr1, conditions):
if not isinstance(conditions, (list, tuple, frozenset)):
conditions = [conditions]
found = []
not_found = []
for req in conditions:
if not version_compare(vstr1, req):
not_found.append(req)
else:
found.append(req)
return not_found == [], not_found, found
# determine if the minimum version satisfying the condition |condition| exceeds
# the minimum version for a feature |minimum|
def version_compare_condition_with_min(condition, minimum):
if condition.startswith('>='):
cmpop = operator.le
condition = condition[2:]
elif condition.startswith('<='):
return False
elif condition.startswith('!='):
return False
elif condition.startswith('=='):
cmpop = operator.le
condition = condition[2:]
elif condition.startswith('='):
cmpop = operator.le
condition = condition[1:]
elif condition.startswith('>'):
cmpop = operator.lt
condition = condition[1:]
elif condition.startswith('<'):
return False
else:
cmpop = operator.le
# Declaring a project(meson_version: '>=0.46') and then using features in
# 0.46.0 is valid, because (knowing the meson versioning scheme) '0.46.0' is
# the lowest version which satisfies the constraint '>=0.46'.
#
# But this will fail here, because the minimum version required by the
# version constraint ('0.46') is strictly less (in our version comparison)
# than the minimum version needed for the feature ('0.46.0').
#
# Map versions in the constraint of the form '0.46' to '0.46.0', to embed
# this knowledge of the meson versioning scheme.
condition = condition.strip()
if re.match('^\d+.\d+$', condition):
condition += '.0'
return cmpop(Version(minimum), Version(condition))
def default_libdir():
if is_debianlike():
try:
pc = subprocess.Popen(['dpkg-architecture', '-qDEB_HOST_MULTIARCH'],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
(stdo, _) = pc.communicate()
if pc.returncode == 0:
archpath = stdo.decode().strip()
return 'lib/' + archpath
except Exception:
pass
if os.path.isdir('/usr/lib64') and not os.path.islink('/usr/lib64'):
return 'lib64'
return 'lib'
def default_libexecdir():
# There is no way to auto-detect this, so it must be set at build time
return 'libexec'
def default_prefix():
return 'c:/' if is_windows() else '/usr/local'
def get_library_dirs():
if is_windows():
return ['C:/mingw/lib'] # Fixme
if is_osx():
return ['/usr/lib'] # Fix me as well.
# The following is probably Debian/Ubuntu specific.
# /usr/local/lib is first because it contains stuff
# installed by the sysadmin and is probably more up-to-date
# than /usr/lib. If you feel that this search order is
# problematic, please raise the issue on the mailing list.
unixdirs = ['/usr/local/lib', '/usr/lib', '/lib']
plat = subprocess.check_output(['uname', '-m']).decode().strip()
# This is a terrible hack. I admit it and I'm really sorry.
# I just don't know what the correct solution is.
if plat == 'i686':
plat = 'i386'
if plat.startswith('arm'):
plat = 'arm'
unixdirs += glob('/usr/lib/' + plat + '*')
if os.path.exists('/usr/lib64'):
unixdirs.append('/usr/lib64')
unixdirs += glob('/lib/' + plat + '*')
if os.path.exists('/lib64'):
unixdirs.append('/lib64')
unixdirs += glob('/lib/' + plat + '*')
return unixdirs
def has_path_sep(name, sep='/\\'):
'Checks if any of the specified @sep path separators are in @name'
for each in sep:
if each in name:
return True
return False
def do_replacement(regex, line, format, confdata):
missing_variables = set()
start_tag = '@'
backslash_tag = '\\@'
if format == 'cmake':
start_tag = '${'
backslash_tag = '\\${'
def variable_replace(match):
# Pairs of escape characters before '@' or '\@'
if match.group(0).endswith('\\'):
num_escapes = match.end(0) - match.start(0)
return '\\' * (num_escapes // 2)
# Single escape character and '@'
elif match.group(0) == backslash_tag:
return start_tag
# Template variable to be replaced
else:
varname = match.group(1)
if varname in confdata:
(var, desc) = confdata.get(varname)
if isinstance(var, str):
pass
elif isinstance(var, int):
var = str(var)
else:
msg = 'Tried to replace variable {!r} value with ' \
'something other than a string or int: {!r}'
raise MesonException(msg.format(varname, var))
else:
missing_variables.add(varname)
var = ''
return var
return re.sub(regex, variable_replace, line), missing_variables
def do_mesondefine(line, confdata):
arr = line.split()
if len(arr) != 2:
raise MesonException('#mesondefine does not contain exactly two tokens: %s' % line.strip())
varname = arr[1]
try:
(v, desc) = confdata.get(varname)
except KeyError:
return '/* #undef %s */\n' % varname
if isinstance(v, bool):
if v:
return '#define %s\n' % varname
else:
return '#undef %s\n' % varname
elif isinstance(v, int):
return '#define %s %d\n' % (varname, v)
elif isinstance(v, str):
return '#define %s %s\n' % (varname, v)
else:
raise MesonException('#mesondefine argument "%s" is of unknown type.' % varname)
def do_conf_file(src, dst, confdata, format, encoding='utf-8'):
try:
with open(src, encoding=encoding) as f:
data = f.readlines()
except Exception as e:
raise MesonException('Could not read input file %s: %s' % (src, str(e)))
# Only allow (a-z, A-Z, 0-9, _, -) as valid characters for a define
# Also allow escaping '@' with '\@'
if format in ['meson', 'cmake@']:
regex = re.compile(r'(?:\\\\)+(?=\\?@)|\\@|@([-a-zA-Z0-9_]+)@')
elif format == 'cmake':
regex = re.compile(r'(?:\\\\)+(?=\\?\$)|\\\${|\${([-a-zA-Z0-9_]+)}')
else:
raise MesonException('Format "{}" not handled'.format(format))
search_token = '#mesondefine'
if format != 'meson':
search_token = '#cmakedefine'
result = []
missing_variables = set()
# Detect when the configuration data is empty and no tokens were found
# during substitution so we can warn the user to use the `copy:` kwarg.
confdata_useless = not confdata.keys()
for line in data:
if line.startswith(search_token):
confdata_useless = False
line = do_mesondefine(line, confdata)
else:
line, missing = do_replacement(regex, line, format, confdata)
missing_variables.update(missing)
if missing:
confdata_useless = False
result.append(line)
dst_tmp = dst + '~'
try:
with open(dst_tmp, 'w', encoding=encoding) as f:
f.writelines(result)
except Exception as e:
raise MesonException('Could not write output file %s: %s' % (dst, str(e)))
shutil.copymode(src, dst_tmp)
replace_if_different(dst, dst_tmp)
return missing_variables, confdata_useless
CONF_C_PRELUDE = '''/*
* Autogenerated by the Meson build system.
* Do not edit, your changes will be lost.
*/
#pragma once
'''
CONF_NASM_PRELUDE = '''; Autogenerated by the Meson build system.
; Do not edit, your changes will be lost.
'''
def dump_conf_header(ofilename, cdata, output_format):
if output_format == 'c':
prelude = CONF_C_PRELUDE
prefix = '#'
elif output_format == 'nasm':
prelude = CONF_NASM_PRELUDE
prefix = '%'
ofilename_tmp = ofilename + '~'
with open(ofilename_tmp, 'w', encoding='utf-8') as ofile:
ofile.write(prelude)
for k in sorted(cdata.keys()):
(v, desc) = cdata.get(k)
if desc:
if output_format == 'c':
ofile.write('/* %s */\n' % desc)
elif output_format == 'nasm':
for line in desc.split('\n'):
ofile.write('; %s\n' % line)
if isinstance(v, bool):
if v:
ofile.write('%sdefine %s\n\n' % (prefix, k))
else:
ofile.write('%sundef %s\n\n' % (prefix, k))
elif isinstance(v, (int, str)):
ofile.write('%sdefine %s %s\n\n' % (prefix, k, v))
else:
raise MesonException('Unknown data type in configuration file entry: ' + k)
replace_if_different(ofilename, ofilename_tmp)
def replace_if_different(dst, dst_tmp):
# If contents are identical, don't touch the file to prevent
# unnecessary rebuilds.
different = True
try:
with open(dst, 'rb') as f1, open(dst_tmp, 'rb') as f2:
if f1.read() == f2.read():
different = False
except FileNotFoundError:
pass
if different:
os.replace(dst_tmp, dst)
else:
os.unlink(dst_tmp)
def listify(item, flatten=True, unholder=False):
'''
Returns a list with all args embedded in a list if they are not a list.
This function preserves order.
@flatten: Convert lists of lists to a flat list
@unholder: Replace each item with the object it holds, if required
Note: unholding only works recursively when flattening
'''
if not isinstance(item, list):
if unholder and hasattr(item, 'held_object'):
item = item.held_object
return [item]
result = []
for i in item:
if unholder and hasattr(i, 'held_object'):
i = i.held_object
if flatten and isinstance(i, list):
result += listify(i, flatten=True, unholder=unholder)
else:
result.append(i)
return result
def extract_as_list(dict_object, *keys, pop=False, **kwargs):
'''
Extracts all values from given dict_object and listifies them.
'''
result = []
fetch = dict_object.get
if pop:
fetch = dict_object.pop
# If there's only one key, we don't return a list with one element
if len(keys) == 1:
return listify(fetch(keys[0], []), **kwargs)
# Return a list of values corresponding to *keys
for key in keys:
result.append(listify(fetch(key, []), **kwargs))
return result
def typeslistify(item, types):
'''
Ensure that type(@item) is one of @types or a
list of items all of which are of type @types
'''
if isinstance(item, types):
item = [item]
if not isinstance(item, list):
raise MesonException('Item must be a list or one of {!r}'.format(types))
for i in item:
if i is not None and not isinstance(i, types):
raise MesonException('List item must be one of {!r}'.format(types))
return item
def stringlistify(item):
return typeslistify(item, str)
def expand_arguments(args):
expended_args = []
for arg in args:
if not arg.startswith('@'):
expended_args.append(arg)
continue
args_file = arg[1:]
try:
with open(args_file) as f:
extended_args = f.read().split()
expended_args += extended_args
except Exception as e:
print('Error expanding command line arguments, %s not found' % args_file)
print(e)
return None
return expended_args
def Popen_safe(args, write=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs):
import locale
encoding = locale.getpreferredencoding()
if sys.version_info < (3, 6) or not sys.stdout.encoding or encoding.upper() != 'UTF-8':
return Popen_safe_legacy(args, write=write, stdout=stdout, stderr=stderr, **kwargs)
p = subprocess.Popen(args, universal_newlines=True, close_fds=False,
stdout=stdout, stderr=stderr, **kwargs)
o, e = p.communicate(write)
return p, o, e
def Popen_safe_legacy(args, write=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs):
p = subprocess.Popen(args, universal_newlines=False,
stdout=stdout, stderr=stderr, **kwargs)
if write is not None:
write = write.encode('utf-8')
o, e = p.communicate(write)
if o is not None:
if sys.stdout.encoding:
o = o.decode(encoding=sys.stdout.encoding, errors='replace').replace('\r\n', '\n')
else:
o = o.decode(errors='replace').replace('\r\n', '\n')
if e is not None:
if sys.stderr.encoding:
e = e.decode(encoding=sys.stderr.encoding, errors='replace').replace('\r\n', '\n')
else:
e = e.decode(errors='replace').replace('\r\n', '\n')
return p, o, e
def iter_regexin_iter(regexiter, initer):
'''
Takes each regular expression in @regexiter and tries to search for it in
every item in @initer. If there is a match, returns that match.
Else returns False.
'''
for regex in regexiter:
for ii in initer:
if not isinstance(ii, str):
continue
match = re.search(regex, ii)
if match:
return match.group()
return False
def _substitute_values_check_errors(command, values):
# Error checking
inregex = ('@INPUT([0-9]+)?@', '@PLAINNAME@', '@BASENAME@')
outregex = ('@OUTPUT([0-9]+)?@', '@OUTDIR@')
if '@INPUT@' not in values:
# Error out if any input-derived templates are present in the command
match = iter_regexin_iter(inregex, command)
if match:
m = 'Command cannot have {!r}, since no input files were specified'
raise MesonException(m.format(match))
else:
if len(values['@INPUT@']) > 1:
# Error out if @PLAINNAME@ or @BASENAME@ is present in the command
match = iter_regexin_iter(inregex[1:], command)
if match:
raise MesonException('Command cannot have {!r} when there is '
'more than one input file'.format(match))
# Error out if an invalid @INPUTnn@ template was specified
for each in command:
if not isinstance(each, str):
continue
match = re.search(inregex[0], each)
if match and match.group() not in values:
m = 'Command cannot have {!r} since there are only {!r} inputs'
raise MesonException(m.format(match.group(), len(values['@INPUT@'])))
if '@OUTPUT@' not in values:
# Error out if any output-derived templates are present in the command
match = iter_regexin_iter(outregex, command)
if match:
m = 'Command cannot have {!r} since there are no outputs'
raise MesonException(m.format(match))
else:
# Error out if an invalid @OUTPUTnn@ template was specified
for each in command:
if not isinstance(each, str):
continue
match = re.search(outregex[0], each)
if match and match.group() not in values:
m = 'Command cannot have {!r} since there are only {!r} outputs'
raise MesonException(m.format(match.group(), len(values['@OUTPUT@'])))
def substitute_values(command, values):
'''
Substitute the template strings in the @values dict into the list of
strings @command and return a new list. For a full list of the templates,
see get_filenames_templates_dict()
If multiple inputs/outputs are given in the @values dictionary, we
substitute @INPUT@ and @OUTPUT@ only if they are the entire string, not
just a part of it, and in that case we substitute *all* of them.
'''
# Error checking
_substitute_values_check_errors(command, values)
# Substitution
outcmd = []
rx_keys = [re.escape(key) for key in values if key not in ('@INPUT@', '@OUTPUT@')]
value_rx = re.compile('|'.join(rx_keys)) if rx_keys else None
for vv in command:
if not isinstance(vv, str):
outcmd.append(vv)
elif '@INPUT@' in vv:
inputs = values['@INPUT@']
if vv == '@INPUT@':
outcmd += inputs
elif len(inputs) == 1:
outcmd.append(vv.replace('@INPUT@', inputs[0]))
else:
raise MesonException("Command has '@INPUT@' as part of a "
"string and more than one input file")
elif '@OUTPUT@' in vv:
outputs = values['@OUTPUT@']
if vv == '@OUTPUT@':
outcmd += outputs
elif len(outputs) == 1:
outcmd.append(vv.replace('@OUTPUT@', outputs[0]))
else:
raise MesonException("Command has '@OUTPUT@' as part of a "
"string and more than one output file")
# Append values that are exactly a template string.
# This is faster than a string replace.
elif vv in values:
outcmd.append(values[vv])
# Substitute everything else with replacement
elif value_rx:
outcmd.append(value_rx.sub(lambda m: values[m.group(0)], vv))
else:
outcmd.append(vv)
return outcmd
def get_filenames_templates_dict(inputs, outputs):
'''
Create a dictionary with template strings as keys and values as values for
the following templates:
@INPUT@ - the full path to one or more input files, from @inputs
@OUTPUT@ - the full path to one or more output files, from @outputs
@OUTDIR@ - the full path to the directory containing the output files
If there is only one input file, the following keys are also created:
@PLAINNAME@ - the filename of the input file
@BASENAME@ - the filename of the input file with the extension removed
If there is more than one input file, the following keys are also created:
@INPUT0@, @INPUT1@, ... one for each input file
If there is more than one output file, the following keys are also created:
@OUTPUT0@, @OUTPUT1@, ... one for each output file
'''
values = {}
# Gather values derived from the input
if inputs:
# We want to substitute all the inputs.
values['@INPUT@'] = inputs
for (ii, vv) in enumerate(inputs):
# Write out @INPUT0@, @INPUT1@, ...
values['@INPUT{}@'.format(ii)] = vv
if len(inputs) == 1:
# Just one value, substitute @PLAINNAME@ and @BASENAME@
values['@PLAINNAME@'] = plain = os.path.basename(inputs[0])
values['@BASENAME@'] = os.path.splitext(plain)[0]
if outputs:
# Gather values derived from the outputs, similar to above.
values['@OUTPUT@'] = outputs
for (ii, vv) in enumerate(outputs):
values['@OUTPUT{}@'.format(ii)] = vv
# Outdir should be the same for all outputs
values['@OUTDIR@'] = os.path.dirname(outputs[0])
# Many external programs fail on empty arguments.
if values['@OUTDIR@'] == '':
values['@OUTDIR@'] = '.'
return values
def _make_tree_writable(topdir):
# Ensure all files and directories under topdir are writable
# (and readable) by owner.
for d, _, files in os.walk(topdir):
os.chmod(d, os.stat(d).st_mode | stat.S_IWRITE | stat.S_IREAD)
for fname in files:
fpath = os.path.join(d, fname)
if os.path.isfile(fpath):
os.chmod(fpath, os.stat(fpath).st_mode | stat.S_IWRITE | stat.S_IREAD)
def windows_proof_rmtree(f):
# On Windows if anyone is holding a file open you can't
# delete it. As an example an anti virus scanner might
# be scanning files you are trying to delete. The only
# way to fix this is to try again and again.
delays = [0.1, 0.1, 0.2, 0.2, 0.2, 0.5, 0.5, 1, 1, 1, 1, 2]
# Start by making the tree wriable.
_make_tree_writable(f)
for d in delays:
try:
shutil.rmtree(f)
return
except FileNotFoundError:
return
except (OSError, PermissionError):
time.sleep(d)
# Try one last time and throw if it fails.
shutil.rmtree(f)
def detect_subprojects(spdir_name, current_dir='', result=None):
if result is None:
result = {}
spdir = os.path.join(current_dir, spdir_name)
if not os.path.exists(spdir):
return result
for trial in glob(os.path.join(spdir, '*')):
basename = os.path.basename(trial)
if trial == 'packagecache':
continue
append_this = True
if os.path.isdir(trial):
detect_subprojects(spdir_name, trial, result)
elif trial.endswith('.wrap') and os.path.isfile(trial):
basename = os.path.splitext(basename)[0]
else:
append_this = False
if append_this:
if basename in result:
result[basename].append(trial)
else:
result[basename] = [trial]
return result
def get_error_location_string(fname, lineno):
return '{}:{}:'.format(fname, lineno)
def substring_is_in_list(substr, strlist):
for s in strlist:
if substr in s:
return True
return False
class OrderedSet(collections.abc.MutableSet):
"""A set that preserves the order in which items are added, by first
insertion.
"""
def __init__(self, iterable=None):
self.__container = collections.OrderedDict()
if iterable:
self.update(iterable)
def __contains__(self, value):
return value in self.__container
def __iter__(self):
return iter(self.__container.keys())
def __len__(self):
return len(self.__container)
def __repr__(self):
# Don't print 'OrderedSet("")' for an empty set.
if self.__container:
return 'OrderedSet("{}")'.format(
'", "'.join(repr(e) for e in self.__container.keys()))
return 'OrderedSet()'
def __reversed__(self):
return reversed(self.__container)
def add(self, value):
self.__container[value] = None
def discard(self, value):
if value in self.__container:
del self.__container[value]
def update(self, iterable):
for item in iterable:
self.__container[item] = None
def difference(self, set_):
return type(self)(e for e in self if e not in set_)
class BuildDirLock:
def __init__(self, builddir):
self.lockfilename = os.path.join(builddir, 'meson-private/meson.lock')
def __enter__(self):
self.lockfile = open(self.lockfilename, 'w')
try:
if have_fcntl:
fcntl.flock(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
elif have_msvcrt:
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
except (BlockingIOError, PermissionError):
self.lockfile.close()
raise MesonException('Some other Meson process is already using this build directory. Exiting.')
def __exit__(self, *args):
if have_fcntl:
fcntl.flock(self.lockfile, fcntl.LOCK_UN)
elif have_msvcrt:
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
self.lockfile.close()
def relpath(path, start):
# On Windows a relative path can't be evaluated for paths on two different
# drives (i.e. c:\foo and f:\bar). The only thing left to do is to use the
# original absolute path.
try:
return os.path.relpath(path, start)
except ValueError:
return path
|
MathieuDuponchelle/meson
|
mesonbuild/mesonlib.py
|
Python
|
apache-2.0
| 42,764
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Attention types.
ATT_LUONG = "luong"
ATT_LUONG_SCALED = "luong_scaled"
ATT_BAHDANAU = "bahdanau"
ATT_BAHDANAU_NORM = "bahdanau_norm"
ATT_TYPES = (ATT_LUONG, ATT_LUONG_SCALED, ATT_BAHDANAU, ATT_BAHDANAU_NORM)
# Encoder types.
ENC_UNI = "uni"
ENC_BI = "bi"
ENC_GNMT = "gnmt"
ENC_TYPES = (ENC_UNI, ENC_BI, ENC_GNMT)
# Decoder types.
DEC_BASIC = "basic"
DEC_ATTENTIVE = "attentive"
DEC_TYPES = (DEC_BASIC, DEC_ATTENTIVE)
# Language model types.
LM_L2R = "left2right"
LM_TYPES = (LM_L2R,)
|
google-research/language
|
language/labs/consistent_zero_shot_nmt/utils/common_utils.py
|
Python
|
apache-2.0
| 1,241
|
#!/usr/bin/env python
"""
Copyright 2016 ARC Centre of Excellence for Climate Systems Science
author: Scott Wales <scott.wales@unimelb.edu.au>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from subprocess import Popen, PIPE
from textwrap import dedent
from os import environ, path
from distutils.util import strtobool
import ldap
import getpass
from . import auth, gpg
def colour(text, colour):
if colour == 'red':
code = '\033[31;1m'
elif colour == 'green':
code = '\033[32m'
elif colour == 'blue':
code = '\033[93m'
else:
raise Exception
reset = '\033[m'
return code + text + reset
def info(text):
print("%s: %s"%(colour('INFO','blue'),text))
def warning(text):
print("%s: %s"%(colour('WARN','red'),text))
def todo(text):
print("%s: %s"%(colour('TODO','green'),text))
class SetupError(Exception):
"""
Indicates user needs to take action before setup can complete
"""
pass
def userinfo():
"""
Get current user's common name and email from LDAP
Returns: Tuple of (name, email)
"""
l = ldap.initialize(ldap.get_option(ldap.OPT_URI))
people = 'ou=People,dc=apac,dc=edu,dc=au'
info = l.search_s(people, ldap.SCOPE_SUBTREE, '(uid=%s)'%getpass.getuser())
return (info[0][1]['cn'][0],info[0][1]['mail'][0])
def prompt_bool(prompt):
"""
Ask a yes/no question
Returns: true/false answer
"""
raw_value = raw_input(prompt + ' [yes/no] ')
try:
return strtobool(raw_value)
except ValueError:
return ask_bool(prompt)
def prompt_or_default(prompt, default):
"""
Ask a question with a default answer
Returns: answer or default
"""
response = raw_input('%s [%s]: '%(prompt,default)).strip()
if response == '':
response = default
return response
def gpg_startup():
agent = dedent("""
[ -f ~/.gpg-agent-info ] && source ~/.gpg-agent-info
if [ -S "${GPG_AGENT_INFO%%:*}" ]; then
export GPG_AGENT_INFO
else
eval $( gpg-agent --daemon --allow-preset-passphrase --batch --max-cache-ttl 43200 --write-env-file ~/.gpg-agent-info )
fi
""")
home = environ['HOME']
for f in ['.profile','.bash_profile']:
p = path.join(home,f)
if path.exists(p):
# Check if gpg-agent is already referenced
grep = Popen(['grep','gpg-agent',p],stdout=PIPE)
grep.communicate()
if grep.returncode == 0:
warning('GPG Agent is referenced in ~/%s but is not currently running. '%f+
'Try relogging to start it again, if that doesn\'t work please contact the helpdesk')
continue
# Add script to file
with open(p,'a') as profile:
profile.write(agent)
todo('GPG Agent has been added to your startup scripts. '+
'Please log out of Accessdev then back in again to make sure it has been activated\n')
def check_gpg_agent():
"""
Make sure GPG-Agent is running
If the environment variable is not found add activation script to the
users's .profile
"""
try:
gpg.send('GETINFO version')
info('GPG Agent is running')
except Exception:
gpg_startup()
raise SetupError
def register_mosrs_account():
name, email = userinfo()
name = prompt_or_default('What is your name?',name)
email = prompt_or_default('What is your work email address?',email)
request = Popen(['mail', '-s','MOSRS account request for %s'%name, 'access_help@nf.nci.org.au'], stdin=PIPE)
request.communicate(dedent("""
ACCESS user %s (NCI id %s, email <%s>) would like to request an account on MOSRS.
Can the sponsor for their institution please submit a request on their behalf at
https://code.metoffice.gov.uk/trac/admin/newticket?type=account-request
You can check if they have an existing account at
https://code.metoffice.gov.uk/trac/home/wiki/UserList
"""%(name, environ['USER'], email)))
print('\n')
info('Submitting MOSRS account request for %s <%s> to access_help'%(name,email))
info('Once your account has been activated (will take at least one UK business day) '+
'you will receive an email detailing how to set up your password\n')
def setup_mosrs_account():
"""
Setup Mosrs
"""
check_gpg_agent()
mosrs_request = None
while mosrs_request not in ['yes', 'no', 'y', 'n']:
mosrs_request = prompt_or_default("Do you have a MOSRS account", "yes")
mosrs_request = mosrs_request.lower()
if mosrs_request.startswith('y'):
auth.check_or_update()
else:
print(dedent(
"""
If you need to access new versions of the UM please send a
request to 'cws_help@nci.org.au' saying that you'd like a MOSRS account
Once you have an account run this script again
"""
))
print('\n')
def check_raijin_ssh():
"""
Raijin has been decommissioned. There should no longer be any calls to this
procedure. In case there is, I'm leaving this stub in.
"""
raise ValueError("raijin should no longer be used. Please contact CMS")
def check_gadi_ssh():
"""
Test Rose/Cylc can be found on Gadi
"""
print('Testing Rose can be accessed on Gadi...')
# ssh -oBatchMode=yes /projects/access/bin/cylc --version
ssh = Popen(['ssh','-oBatchMode=yes','gadi','/projects/access/bin/cylc --version'])
result = ssh.wait()
if result == 0:
print('Successfully found Rose\n')
else:
warning('Unable to connect to Gadi')
warning('Follow the instructions at https://accessdev.nci.org.au/trac/wiki/Guides/SSH to set up a SSH agent\n')
raise SetupError
def accesssvn_setup():
"""
Setup GPG for access-svn access
"""
try:
check_gpg_agent()
print('\n')
print('To store your password for 12 hours run:')
print(' access-auth\n')
except SetupError:
todo('Once this has been done please run this setup script again\n')
def main():
print('\n')
print('Welcome to Accessdev, the user interface and control server for the ACCESS model at NCI')
print('This script will set up your account to use Rose and the UM\n')
try:
setup_mosrs_account()
check_gadi_ssh()
# Account successfully created
print('You are now able to use Rose and the UM. To see a list of available experiments run:')
print(' rosie go\n')
print('Your password will be cached for a maximum of 12 hours. To store your password again run:')
print(' mosrs-auth\n')
except SetupError:
todo('Once this has been done please run this setup script again\n')
finally:
print('You can ask for help with the ACCESS systems by emailing "access_help@nf.nci.org.au"\n')
if __name__ == '__main__':
main()
|
ScottWales/mosrs-setup
|
mosrs/setup.py
|
Python
|
apache-2.0
| 7,573
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import unittest
from pyflink.table import DataTypes
from pyflink.table.udf import TableFunction, udtf, ScalarFunction, udf
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkOldStreamTableTestCase, \
PyFlinkBlinkStreamTableTestCase, PyFlinkOldBatchTableTestCase, PyFlinkBlinkBatchTableTestCase
class UserDefinedTableFunctionTests(object):
def test_table_function(self):
self._register_table_sink(
['a', 'b', 'c'],
[DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT()])
multi_emit = udtf(MultiEmit(), result_types=[DataTypes.BIGINT(), DataTypes.BIGINT()])
multi_num = udf(MultiNum(), result_type=DataTypes.BIGINT())
t = self.t_env.from_elements([(1, 1, 3), (2, 1, 6), (3, 2, 9)], ['a', 'b', 'c'])
t = t.join_lateral(multi_emit(t.a, multi_num(t.b)).alias('x', 'y'))
t = t.left_outer_join_lateral(condition_multi_emit(t.x, t.y).alias('m')) \
.select("x, y, m")
t = t.left_outer_join_lateral(identity(t.m).alias('n')) \
.select("x, y, n")
actual = self._get_output(t)
self.assert_equals(actual,
["+I[1, 0, null]", "+I[1, 1, null]", "+I[2, 0, null]", "+I[2, 1, null]",
"+I[3, 0, 0]", "+I[3, 0, 1]", "+I[3, 0, 2]", "+I[3, 1, 1]",
"+I[3, 1, 2]", "+I[3, 2, 2]", "+I[3, 3, null]"])
def test_table_function_with_sql_query(self):
self._register_table_sink(
['a', 'b', 'c'],
[DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.create_temporary_system_function(
"multi_emit", udtf(MultiEmit(), result_types=[DataTypes.BIGINT(), DataTypes.BIGINT()]))
t = self.t_env.from_elements([(1, 1, 3), (2, 1, 6), (3, 2, 9)], ['a', 'b', 'c'])
self.t_env.register_table("MyTable", t)
t = self.t_env.sql_query(
"SELECT a, x, y FROM MyTable LEFT JOIN LATERAL TABLE(multi_emit(a, b)) as T(x, y)"
" ON TRUE")
actual = self._get_output(t)
self.assert_equals(actual, ["+I[1, 1, 0]", "+I[2, 2, 0]", "+I[3, 3, 0]", "+I[3, 3, 1]"])
def _register_table_sink(self, field_names: list, field_types: list):
table_sink = source_sink_utils.TestAppendSink(field_names, field_types)
self.t_env.register_table_sink("Results", table_sink)
def _get_output(self, t):
t.execute_insert("Results").wait()
return source_sink_utils.results()
class PyFlinkStreamUserDefinedTableFunctionTests(UserDefinedTableFunctionTests,
PyFlinkOldStreamTableTestCase):
pass
class PyFlinkBlinkStreamUserDefinedFunctionTests(UserDefinedTableFunctionTests,
PyFlinkBlinkStreamTableTestCase):
pass
class PyFlinkBlinkBatchUserDefinedFunctionTests(UserDefinedTableFunctionTests,
PyFlinkBlinkBatchTableTestCase):
pass
class PyFlinkBatchUserDefinedTableFunctionTests(UserDefinedTableFunctionTests,
PyFlinkOldBatchTableTestCase):
def _register_table_sink(self, field_names: list, field_types: list):
pass
def _get_output(self, t):
return self.collect(t)
def test_row_type_as_input_types_and_result_types(self):
# test input_types and result_types are DataTypes.ROW
a = udtf(lambda i: i,
input_types=DataTypes.ROW([DataTypes.FIELD("a", DataTypes.BIGINT())]),
result_types=DataTypes.ROW([DataTypes.FIELD("a", DataTypes.BIGINT())]))
self.assertEqual(a._input_types,
[DataTypes.ROW([DataTypes.FIELD("a", DataTypes.BIGINT())])])
self.assertEqual(a._result_types,
[DataTypes.ROW([DataTypes.FIELD("a", DataTypes.BIGINT())])])
class MultiEmit(TableFunction, unittest.TestCase):
def open(self, function_context):
mg = function_context.get_metric_group()
self.counter = mg.add_group("key", "value").counter("my_counter")
self.counter_sum = 0
def eval(self, x, y):
self.counter.inc(y)
self.counter_sum += y
for i in range(y):
yield x, i
@udtf(result_types=[DataTypes.BIGINT()])
def identity(x):
if x is not None:
from pyflink.common import Row
return Row(x)
# test specify the input_types
@udtf(input_types=[DataTypes.BIGINT(), DataTypes.BIGINT()],
result_types=DataTypes.BIGINT())
def condition_multi_emit(x, y):
if x == 3:
return range(y, x)
class MultiNum(ScalarFunction):
def eval(self, x):
return x * 2
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
clarkyzl/flink
|
flink-python/pyflink/table/tests/test_udtf.py
|
Python
|
apache-2.0
| 5,986
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import time
import netaddr
from neutron_lib import constants
from oslo_log import log as logging
import six
from neutron.agent.common import ovs_lib
from neutron.agent.linux import ip_lib
from neutron.common import constants as n_const
from neutron.common import exceptions
LOG = logging.getLogger(__name__)
def _get_veth(name1, name2, namespace2):
return (ip_lib.IPDevice(name1),
ip_lib.IPDevice(name2, namespace=namespace2))
@six.add_metaclass(abc.ABCMeta)
class LinuxInterfaceDriver(object):
DEV_NAME_LEN = n_const.LINUX_DEV_LEN
DEV_NAME_PREFIX = constants.TAP_DEVICE_PREFIX
def __init__(self, conf):
self.conf = conf
self._mtu_update_warn_logged = False
@property
def use_gateway_ips(self):
"""Whether to use gateway IPs instead of unique IP allocations.
In each place where the DHCP agent runs, and for each subnet for
which DHCP is handling out IP addresses, the DHCP port needs -
at the Linux level - to have an IP address within that subnet.
Generally this needs to be a unique Neutron-allocated IP
address, because the subnet's underlying L2 domain is bridged
across multiple compute hosts and network nodes, and for HA
there may be multiple DHCP agents running on that same bridged
L2 domain.
However, if the DHCP ports - on multiple compute/network nodes
but for the same network - are _not_ bridged to each other,
they do not need each to have a unique IP address. Instead
they can all share the same address from the relevant subnet.
This works, without creating any ambiguity, because those
ports are not all present on the same L2 domain, and because
no data within the network is ever sent to that address.
(DHCP requests are broadcast, and it is the network's job to
ensure that such a broadcast will reach at least one of the
available DHCP servers. DHCP responses will be sent _from_
the DHCP port address.)
Specifically, for networking backends where it makes sense,
the DHCP agent allows all DHCP ports to use the subnet's
gateway IP address, and thereby to completely avoid any unique
IP address allocation. This behaviour is selected by running
the DHCP agent with a configured interface driver whose
'use_gateway_ips' property is True.
When an operator deploys Neutron with an interface driver that
makes use_gateway_ips True, they should also ensure that a
gateway IP address is defined for each DHCP-enabled subnet,
and that the gateway IP address doesn't change during the
subnet's lifetime.
"""
return False
def init_l3(self, device_name, ip_cidrs, namespace=None,
preserve_ips=None, clean_connections=False):
"""Set the L3 settings for the interface using data from the port.
ip_cidrs: list of 'X.X.X.X/YY' strings
preserve_ips: list of ip cidrs that should not be removed from device
clean_connections: Boolean to indicate if we should cleanup connections
associated to removed ips
"""
preserve_ips = preserve_ips or []
device = ip_lib.IPDevice(device_name, namespace=namespace)
# The LLA generated by the operating system is not known to
# Neutron, so it would be deleted if we added it to the 'previous'
# list here
default_ipv6_lla = ip_lib.get_ipv6_lladdr(device.link.address)
cidrs = set()
remove_ips = set()
# normalize all the IP addresses first
for ip_cidr in ip_cidrs:
net = netaddr.IPNetwork(ip_cidr)
# Convert to compact IPv6 address because the return values of
# "ip addr list" are compact.
if net.version == 6:
ip_cidr = str(net)
cidrs.add(ip_cidr)
# Determine the addresses that must be added and removed
for address in device.addr.list():
cidr = address['cidr']
dynamic = address['dynamic']
# skip the IPv6 link-local
if cidr == default_ipv6_lla:
# it's already configured, leave it alone
cidrs.discard(cidr)
continue
if cidr in preserve_ips:
continue
# Statically created addresses are OK, dynamically created
# addresses must be removed and replaced
if cidr in cidrs and not dynamic:
cidrs.remove(cidr)
continue
remove_ips.add(cidr)
# Clean up any old addresses. This must be done first since there
# could be a dynamic address being replaced with a static one.
for ip_cidr in remove_ips:
if clean_connections:
device.delete_addr_and_conntrack_state(ip_cidr)
else:
device.addr.delete(ip_cidr)
# add any new addresses
for ip_cidr in cidrs:
device.addr.add(ip_cidr)
def init_router_port(self,
device_name,
ip_cidrs,
namespace,
preserve_ips=None,
extra_subnets=None,
clean_connections=False):
"""Set the L3 settings for a router interface using data from the port.
ip_cidrs: list of 'X.X.X.X/YY' strings
preserve_ips: list of ip cidrs that should not be removed from device
clean_connections: Boolean to indicate if we should cleanup connections
associated to removed ips
extra_subnets: An iterable of cidrs to add as routes without address
"""
LOG.debug("init_router_port: device_name(%s), namespace(%s)",
device_name, namespace)
self.init_l3(device_name=device_name,
ip_cidrs=ip_cidrs,
namespace=namespace,
preserve_ips=preserve_ips or [],
clean_connections=clean_connections)
device = ip_lib.IPDevice(device_name, namespace=namespace)
# Manage on-link routes (routes without an associated address)
new_onlink_cidrs = set(s['cidr'] for s in extra_subnets or [])
v4_onlink = device.route.list_onlink_routes(constants.IP_VERSION_4)
v6_onlink = device.route.list_onlink_routes(constants.IP_VERSION_6)
existing_onlink_cidrs = set(r['cidr'] for r in v4_onlink + v6_onlink)
for route in new_onlink_cidrs - existing_onlink_cidrs:
LOG.debug("adding onlink route(%s)", route)
device.route.add_onlink_route(route)
for route in (existing_onlink_cidrs - new_onlink_cidrs -
set(preserve_ips or [])):
LOG.debug("deleting onlink route(%s)", route)
device.route.delete_onlink_route(route)
def add_ipv6_addr(self, device_name, v6addr, namespace, scope='global'):
device = ip_lib.IPDevice(device_name,
namespace=namespace)
net = netaddr.IPNetwork(v6addr)
device.addr.add(str(net), scope)
def delete_ipv6_addr(self, device_name, v6addr, namespace):
device = ip_lib.IPDevice(device_name,
namespace=namespace)
device.delete_addr_and_conntrack_state(v6addr)
def delete_ipv6_addr_with_prefix(self, device_name, prefix, namespace):
"""Delete the first listed IPv6 address that falls within a given
prefix.
"""
device = ip_lib.IPDevice(device_name, namespace=namespace)
net = netaddr.IPNetwork(prefix)
for address in device.addr.list(scope='global', filters=['permanent']):
ip_address = netaddr.IPNetwork(address['cidr'])
if ip_address in net:
device.delete_addr_and_conntrack_state(address['cidr'])
break
def get_ipv6_llas(self, device_name, namespace):
device = ip_lib.IPDevice(device_name,
namespace=namespace)
return device.addr.list(scope='link', ip_version=6)
def check_bridge_exists(self, bridge):
if not ip_lib.device_exists(bridge):
raise exceptions.BridgeDoesNotExist(bridge=bridge)
def get_device_name(self, port):
return (self.DEV_NAME_PREFIX + port.id)[:self.DEV_NAME_LEN]
def remove_vlan_tag(self, bridge, interface_name):
"""Remove vlan tag from given interface.
This method is necessary only for the case when deprecated
option 'external_network_bridge' is used in L3 agent as
external gateway port is then created in this external bridge
directly and it will have DEAD_VLAN_TAG added by default.
"""
# TODO(slaweq): remove it when external_network_bridge option will be
# removed
@staticmethod
def configure_ipv6_ra(namespace, dev_name, value):
"""Configure handling of IPv6 Router Advertisements on an
interface. See common/constants.py for possible values.
"""
cmd = ['net.ipv6.conf.%(dev)s.accept_ra=%(value)s' % {'dev': dev_name,
'value': value}]
ip_lib.sysctl(cmd, namespace=namespace)
@staticmethod
def configure_ipv6_forwarding(namespace, dev_name, enabled):
"""Configure IPv6 forwarding on an interface."""
cmd = ['net.ipv6.conf.%(dev)s.forwarding=%(enabled)s' %
{'dev': dev_name, 'enabled': int(enabled)}]
ip_lib.sysctl(cmd, namespace=namespace)
@abc.abstractmethod
def plug_new(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None, mtu=None):
"""Plug in the interface only for new devices that don't exist yet."""
def plug(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None, mtu=None):
if not ip_lib.device_exists(device_name,
namespace=namespace):
self.plug_new(network_id, port_id, device_name, mac_address,
bridge, namespace, prefix, mtu)
else:
LOG.info("Device %s already exists", device_name)
if mtu:
self.set_mtu(
device_name, mtu, namespace=namespace, prefix=prefix)
else:
LOG.warning("No MTU configured for port %s", port_id)
@abc.abstractmethod
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
@property
def bridged(self):
"""Whether the DHCP port is bridged to the VM TAP interfaces.
When the DHCP port is bridged to the TAP interfaces for the
VMs for which it is providing DHCP service - as is the case
for most Neutron network implementations - the DHCP server
only needs to listen on the DHCP port, and will still receive
DHCP requests from all the relevant VMs.
If the DHCP port is not bridged to the relevant VM TAP
interfaces, the DHCP server needs to listen explicitly on
those TAP interfaces, and to treat those as aliases of the
DHCP port where the IP subnet is defined.
"""
return True
def set_mtu(self, device_name, mtu, namespace=None, prefix=None):
"""Set MTU on the interface."""
if not self._mtu_update_warn_logged:
LOG.warning("Interface driver cannot update MTU for ports")
self._mtu_update_warn_logged = True
class NullDriver(LinuxInterfaceDriver):
def plug_new(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None, mtu=None):
pass
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
pass
class OVSInterfaceDriver(LinuxInterfaceDriver):
"""Driver for creating an internal interface on an OVS bridge."""
DEV_NAME_PREFIX = constants.TAP_DEVICE_PREFIX
def __init__(self, conf):
super(OVSInterfaceDriver, self).__init__(conf)
if self.conf.ovs_use_veth:
self.DEV_NAME_PREFIX = 'ns-'
def _get_tap_name(self, dev_name, prefix=None):
if self.conf.ovs_use_veth:
dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX,
constants.TAP_DEVICE_PREFIX)
return dev_name
def _ovs_add_port(self, bridge, device_name, port_id, mac_address,
internal=True):
attrs = [('external_ids', {'iface-id': port_id,
'iface-status': 'active',
'attached-mac': mac_address})]
if internal:
attrs.insert(0, ('type', 'internal'))
ovs = ovs_lib.OVSBridge(bridge)
ovs.replace_port(device_name, *attrs)
def remove_vlan_tag(self, bridge, interface):
ovs = ovs_lib.OVSBridge(bridge)
ovs.clear_db_attribute("Port", interface, "tag")
def plug_new(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None, mtu=None):
"""Plug in the interface."""
if not bridge:
bridge = self.conf.ovs_integration_bridge
self.check_bridge_exists(bridge)
ip = ip_lib.IPWrapper()
tap_name = self._get_tap_name(device_name, prefix)
if self.conf.ovs_use_veth:
# Create ns_dev in a namespace if one is configured.
root_dev, ns_dev = ip.add_veth(tap_name,
device_name,
namespace2=namespace)
root_dev.disable_ipv6()
else:
ns_dev = ip.device(device_name)
internal = not self.conf.ovs_use_veth
self._ovs_add_port(bridge, tap_name, port_id, mac_address,
internal=internal)
for i in range(9):
# workaround for the OVS shy port syndrome. ports sometimes
# hide for a bit right after they are first created.
# see bug/1618987
try:
ns_dev.link.set_address(mac_address)
break
except RuntimeError as e:
LOG.warning("Got error trying to set mac, retrying: %s",
str(e))
time.sleep(1)
else:
# didn't break, we give it one last shot without catching
ns_dev.link.set_address(mac_address)
# Add an interface created by ovs to the namespace.
if not self.conf.ovs_use_veth and namespace:
namespace_obj = ip.ensure_namespace(namespace)
namespace_obj.add_device_to_namespace(ns_dev)
# NOTE(ihrachys): the order here is significant: we must set MTU after
# the device is moved into a namespace, otherwise OVS bridge does not
# allow to set MTU that is higher than the least of all device MTUs on
# the bridge
if mtu:
self.set_mtu(device_name, mtu, namespace=namespace, prefix=prefix)
else:
LOG.warning("No MTU configured for port %s", port_id)
ns_dev.link.set_up()
if self.conf.ovs_use_veth:
root_dev.link.set_up()
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
if not bridge:
bridge = self.conf.ovs_integration_bridge
tap_name = self._get_tap_name(device_name, prefix)
self.check_bridge_exists(bridge)
ovs = ovs_lib.OVSBridge(bridge)
try:
ovs.delete_port(tap_name)
if self.conf.ovs_use_veth:
device = ip_lib.IPDevice(device_name, namespace=namespace)
device.link.delete()
LOG.debug("Unplugged interface '%s'", device_name)
except RuntimeError:
LOG.error("Failed unplugging interface '%s'",
device_name)
def set_mtu(self, device_name, mtu, namespace=None, prefix=None):
if self.conf.ovs_use_veth:
tap_name = self._get_tap_name(device_name, prefix)
root_dev, ns_dev = _get_veth(
tap_name, device_name, namespace2=namespace)
root_dev.link.set_mtu(mtu)
else:
ns_dev = ip_lib.IPWrapper(namespace=namespace).device(device_name)
ns_dev.link.set_mtu(mtu)
class BridgeInterfaceDriver(LinuxInterfaceDriver):
"""Driver for creating bridge interfaces."""
DEV_NAME_PREFIX = 'ns-'
def plug_new(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None, mtu=None):
"""Plugin the interface."""
ip = ip_lib.IPWrapper()
# Enable agent to define the prefix
tap_name = device_name.replace(prefix or self.DEV_NAME_PREFIX,
constants.TAP_DEVICE_PREFIX)
# Create ns_veth in a namespace if one is configured.
root_veth, ns_veth = ip.add_veth(tap_name, device_name,
namespace2=namespace)
root_veth.disable_ipv6()
ns_veth.link.set_address(mac_address)
if mtu:
self.set_mtu(device_name, mtu, namespace=namespace, prefix=prefix)
else:
LOG.warning("No MTU configured for port %s", port_id)
root_veth.link.set_up()
ns_veth.link.set_up()
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
device = ip_lib.IPDevice(device_name, namespace=namespace)
try:
device.link.delete()
LOG.debug("Unplugged interface '%s'", device_name)
except RuntimeError:
LOG.error("Failed unplugging interface '%s'",
device_name)
def set_mtu(self, device_name, mtu, namespace=None, prefix=None):
tap_name = device_name.replace(prefix or self.DEV_NAME_PREFIX,
constants.TAP_DEVICE_PREFIX)
root_dev, ns_dev = _get_veth(
tap_name, device_name, namespace2=namespace)
root_dev.link.set_mtu(mtu)
ns_dev.link.set_mtu(mtu)
|
huntxu/neutron
|
neutron/agent/linux/interface.py
|
Python
|
apache-2.0
| 19,111
|
from distutils.core import setup
setup(
name = 'nicknester',
version = '1.3.0',
py_modules = ['nester'],
author = 'htýthon',
author_email = 'hfpython@headfirstlabs.com',
url = 'http://www.headfirstlabs.com',
description = 'A simple printer of nested list',
)
|
leobarros/use_cabeca_python
|
nester/setup.py
|
Python
|
apache-2.0
| 341
|
import errno
import os
import unittest
import mock
from taskcat._common_utils import (
exit_with_code,
fetch_ssm_parameter_value,
get_s3_domain,
make_dir,
merge_dicts,
name_from_stack_id,
param_list_to_dict,
pascal_to_snake,
region_from_stack_id,
s3_bucket_name_from_url,
s3_key_from_url,
s3_url_maker,
)
from taskcat.exceptions import TaskCatException
class TestCommonUtils(unittest.TestCase):
def test_get_param_includes(self):
bad_testcases = [{}, [[]], [{}]]
for bad in bad_testcases:
with self.assertRaises(TaskCatException):
param_list_to_dict(bad)
def test_region_from_stack_id(self):
actual = region_from_stack_id("arn:::us-east-1")
self.assertEqual("us-east-1", actual)
def test_name_from_stack_id(self):
actual = name_from_stack_id("arn:::us-east-1::Stack/test-name")
self.assertEqual("test-name", actual)
@mock.patch("taskcat._common_utils.get_s3_domain", return_value="amazonaws.com")
def test_s3_url_maker(self, m_get_s3_domain):
m_s3 = mock.Mock()
m_s3.get_bucket_location.return_value = {"LocationConstraint": None}
actual = s3_url_maker("test-bucket", "test-key/1", m_s3)
self.assertEqual(
"https://test-bucket.s3.us-east-1.amazonaws.com/test-key/1", actual
)
m_s3.get_bucket_location.return_value = {"LocationConstraint": "us-west-2"}
actual = s3_url_maker("test-bucket", "test-key/1", m_s3)
self.assertEqual(
"https://test-bucket.s3.us-west-2.amazonaws.com/test-key/1", actual
)
m_get_s3_domain.assert_called_once()
def test_get_s3_domain(self):
actual = get_s3_domain("cn-north-1")
self.assertEqual("amazonaws.com.cn", actual)
with self.assertRaises(TaskCatException):
get_s3_domain("totally-invalid-region")
def test_merge_dicts(self):
input = [{}, {}]
actual = merge_dicts(input)
self.assertEqual({}, actual)
input = [{"a": 1}, {"b": 2}]
actual = merge_dicts(input)
self.assertEqual({"a": 1, "b": 2}, actual)
def test_pascal_to_snake(self):
actual = pascal_to_snake("MyParam")
self.assertEqual("my_param", actual)
actual = pascal_to_snake("VPCParam")
self.assertEqual("vpcparam", actual)
def test_make_dir(self):
path = "/tmp/test_make_dir_path"
try:
os.rmdir(path)
except FileNotFoundError:
pass
os.makedirs(path)
make_dir(path)
os.rmdir(path)
make_dir(path)
self.assertEqual(os.path.isdir(path), True)
with self.assertRaises(FileExistsError) as cm:
make_dir(path, False)
self.assertEqual(cm.exception.errno, errno.EEXIST)
os.rmdir(path)
@mock.patch("taskcat._common_utils.sys.exit", autospec=True)
@mock.patch("taskcat._common_utils.LOG", autospec=True)
def test_exit_with_code(self, mock_log, mock_exit):
exit_with_code(1)
mock_log.error.assert_not_called()
mock_exit.assert_called_once_with(1)
mock_exit.reset_mock()
exit_with_code(0, "msg")
mock_exit.assert_called_once_with(0)
mock_exit.assert_called_once()
def test_s3_key_from_url(self):
k = s3_key_from_url("https://testbuk.s3.amazonaws.com/testprefix/testobj.yaml")
self.assertEqual("testprefix/testobj.yaml", k)
def test_s3_bucket_name_from_url(self):
bucket = s3_bucket_name_from_url("https://buk.s3.amazonaws.com/obj.yaml")
self.assertEqual("buk", bucket)
def test_fetch_ssm_parameter_value(self):
# String, no explicit version.
m_boto_client = mock.Mock()
m_ssm = mock.Mock()
m_boto_client.return_value = m_ssm
m_ssm.get_parameter.return_value = {
"Parameter": {"Name": "foo", "Type": "String", "Value": "bar", "Version": 1}
}
expected = "bar"
actual = fetch_ssm_parameter_value(m_boto_client, "foo")
self.assertEqual(expected, actual)
m_ssm.get_parameter.return_value = {
"Parameter": {
"Name": "foo",
"Type": "StringList",
"Value": "bar,baz,11",
"Version": 1,
}
}
expected = "bar,baz,11"
actual = fetch_ssm_parameter_value(m_boto_client, "foo")
self.assertEqual(expected, actual)
|
aws-quickstart/taskcat
|
tests/test_common_utils.py
|
Python
|
apache-2.0
| 4,514
|
import random
import numpy
from graph_diff.nirvana_object_model.workflow import Workflow
from .standard_workflow_generator import StandardWorkflowGenerator
class ChainWorkflowGenerator(StandardWorkflowGenerator):
"""Generator for chained workflows"""
def __init__(self):
super().__init__()
self.chain_number = None
def generate_workflow(self) -> Workflow:
"""
Generates workflow by set a given set of blocks.
If types not given they may be generated.
chain_number is a number of the chains, distribution is geometric
with settable expectation.
Then chain_number chains are created consisting of given types
and connected by execution.
After that chain_number random connections are generated.
:return: generated workflow
"""
if self.types_of_block is None:
raise Exception("Blocks not set yet! Set them explicitly or generate them by generate_blocks method.")
workflow = Workflow()
chain_number = numpy.random.geometric(p=1 / self.chain_number) + 1
for _ in range(0, chain_number):
prev_block = self.types_of_block[0]
prev_number = workflow.add_block(prev_block)
for new_block in self.types_of_block[1:]:
new_number = workflow.add_block(new_block)
workflow.add_connection_by_execution(prev_block, prev_number, new_block, new_number)
prev_block = new_block
prev_number = new_number
for _ in range(0, chain_number):
from_block_num = random.randint(0, len(self.types_of_block) - 1)
from_num = random.randint(0, chain_number - 1)
to_block_num = random.randint(0, len(self.types_of_block) - 1)
to_num = random.randint(0, chain_number - 1)
if (from_block_num > to_block_num):
from_block_num, to_block_num = to_block_num, from_block_num
from_num, to_num = to_num, from_num
from_block = self.types_of_block[from_block_num]
to_block = self.types_of_block[to_block_num]
workflow.add_connection_by_execution(from_block, from_num, to_block, to_num)
return workflow
def generate_blocks(self,
min_block_num=8,
max_block_num=12,
chain_number=20,
min_input_output_number=0,
max_input_output_number=3,
min_key_value_number=0,
max_key_value_number=10):
"""
Generate blocks with given parameters.
:param min_block_num:
:param max_block_num:
:param chain_number:
:param min_input_output_number:
:param max_input_output_number:
:param min_key_value_number:
:param max_key_value_number:
:return:
"""
super().generate_blocks(min_block_num,
max_block_num,
min_input_output_number,
max_input_output_number,
min_key_value_number,
max_key_value_number)
self.chain_number = chain_number
return self
|
alexander-bzikadze/graph_diff
|
graph_diff/nirvana_object_model/worflow_generator/chain_workflow_generator.py
|
Python
|
apache-2.0
| 3,332
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os
import subprocess
import sys
from test_util import TestFailedError, run_command, \
serializeIncrParseMarkupFile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Utility for testing incremental syntax tree transfer',
epilog='''
Based of a single template the utility generates a pre-edit and a post-edit
file. It then verifies that the incrementally transferred syntax tree
matches the syntax tree passed as --expected-incremental-syntax-tree.
To generate the pre-edit and the post-edit file from the template, it
operates on markers of the form:
<<test_case<pre|||post>>>
These placeholders are replaced by:
- 'pre' if a different test case than 'test_case' is run
- 'pre' for the pre-edit version of 'test_case'
- 'post' for the post-edit version of 'test_case'
''')
parser.add_argument(
'file', type=argparse.FileType(),
help='The template file to test')
parser.add_argument(
'--test-case', default='',
help='The test case to execute. If no test case is specified all '
'unnamed substitutions are applied')
parser.add_argument(
'--temp-dir', required=True,
help='A temporary directory where pre-edit and post-edit files can be '
'saved')
parser.add_argument(
'--swift-syntax-test', required=True,
help='The path to swift-syntax-test')
parser.add_argument(
'--expected-incremental-syntax-tree', required=True,
help='The path to a file that contains the expected incrementally '
'transferred syntax tree')
args = parser.parse_args(sys.argv[1:])
test_file = args.file.name
test_file_name = os.path.basename(test_file)
test_case = args.test_case
temp_dir = args.temp_dir
swift_syntax_test = args.swift_syntax_test
expected_syntax_tree_file = args.expected_incremental_syntax_tree
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
incremental_serialized_file = temp_dir + '/' + test_file_name + '.' \
+ test_case + '.incr.json'
try:
serializeIncrParseMarkupFile(test_file=test_file,
test_case=test_case,
mode='incremental',
serialization_mode='incremental',
omit_node_ids=False,
output_file=incremental_serialized_file,
temp_dir=temp_dir + '/temp',
swift_syntax_test=swift_syntax_test,
print_visual_reuse_info=False)
except TestFailedError as e:
print('Test case "%s" of %s FAILed' % (test_case, test_file),
file=sys.stderr)
print(e.message, file=sys.stderr)
sys.exit(1)
# Check if the two syntax trees are the same
try:
run_command(
[
'diff', '-u',
incremental_serialized_file,
expected_syntax_tree_file
])
except subprocess.CalledProcessError as e:
print('Test case "%s" of %s FAILed' % (test_case, test_file),
file=sys.stderr)
print('Syntax tree of incremental parsing does not match expected '
'incrementally transfer syntax tree:\n\n', file=sys.stderr)
print(e.output, file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
|
jopamer/swift
|
utils/incrparse/incr_transfer_tree.py
|
Python
|
apache-2.0
| 3,699
|
import datetime
import time
from pandac.PandaModules import TextNode, Vec3, Vec4, PlaneNode, Plane, Point3
from toontown.pgui.DirectGui import DirectFrame, DirectLabel, DirectButton, DirectScrolledList, DGG
from direct.directnotify import DirectNotifyGlobal
from toontown.pgui import DirectGuiGlobals
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
from toontown.parties.PartyInfo import PartyInfo
from toontown.parties import PartyGlobals
from toontown.ai.NewsManager import NewsManager
def myStrftime(myTime):
result = ''
result = myTime.strftime('%I')
if result[0] == '0':
result = result[1:]
result += myTime.strftime(':%M %p')
return result
class CalendarGuiDay(DirectFrame):
notify = directNotify.newCategory('CalendarGuiDay')
ScrollListTextSize = 0.03
def __init__(self, parent, myDate, startDate, dayClickCallback = None, onlyFutureDaysClickable = False):
self.origParent = parent
self.startDate = startDate
self.myDate = myDate
self.dayClickCallback = dayClickCallback
self.onlyFutureDaysClickable = onlyFutureDaysClickable
DirectFrame.__init__(self, parent=parent)
self.timedEvents = []
self.partiesInvitedToToday = []
self.hostedPartiesToday = []
self.yearlyHolidaysToday = []
self.showMarkers = config.GetBool('show-calendar-markers', 0)
self.filter = ToontownGlobals.CalendarFilterShowAll
self.load()
self.createGuiObjects()
self.update()
def createDummyLocators(self):
self.dayButtonLocator = self.attachNewNode('dayButtonLocator')
self.dayButtonLocator.setX(0.1)
self.dayButtonLocator.setZ(-0.05)
self.numberLocator = self.attachNewNode('numberLocator')
self.numberLocator.setX(0.09)
self.scrollLocator = self.attachNewNode('scrollLocator')
self.selectedLocator = self.attachNewNode('selectedLocator')
self.selectedLocator.setX(0.11)
self.selectedLocator.setZ(-0.06)
def load(self):
dayAsset = loader.loadModel('phase_4/models/parties/tt_m_gui_sbk_calendar_box')
dayAsset.reparentTo(self)
self.dayButtonLocator = self.find('**/loc_origin')
self.numberLocator = self.find('**/loc_number')
self.scrollLocator = self.find('**/loc_topLeftList')
self.selectedLocator = self.find('**/loc_origin')
self.todayBox = self.find('**/boxToday')
self.todayBox.hide()
self.selectedFrame = self.find('**/boxHover')
self.selectedFrame.hide()
self.defaultBox = self.find('**/boxBlank')
self.scrollBottomRightLocator = self.find('**/loc_bottomRightList')
self.scrollDownLocator = self.find('**/loc_scrollDown')
self.attachMarker(self.scrollDownLocator)
self.scrollUpLocator = self.find('**/loc_scrollUp')
self.attachMarker(self.scrollUpLocator)
def attachMarker(self, parent, scale = 0.005, color = (1, 0, 0)):
if self.showMarkers:
marker = loader.loadModel('phase_3/models/misc/sphere')
marker.reparentTo(parent)
marker.setScale(scale)
marker.setColor(*color)
def createGuiObjects(self):
self.dayButton = DirectButton(parent=self.dayButtonLocator, image=self.selectedFrame, relief=None, command=self.__clickedOnDay, pressEffect=1, rolloverSound=None, clickSound=None)
self.numberWidget = DirectLabel(parent=self.numberLocator, relief=None, text=str(self.myDate.day), text_scale=0.04, text_align=TextNode.ACenter, text_font=ToontownGlobals.getInterfaceFont(), text_fg=Vec4(110 / 255.0, 126 / 255.0, 255 / 255.0, 1))
self.attachMarker(self.numberLocator)
self.listXorigin = 0
self.listFrameSizeX = self.scrollBottomRightLocator.getX() - self.scrollLocator.getX()
self.scrollHeight = self.scrollLocator.getZ() - self.scrollBottomRightLocator.getZ()
self.listZorigin = self.scrollBottomRightLocator.getZ()
self.listFrameSizeZ = self.scrollLocator.getZ() - self.scrollBottomRightLocator.getZ()
self.arrowButtonXScale = 1
self.arrowButtonZScale = 1
self.itemFrameXorigin = 0
self.itemFrameZorigin = 0
self.buttonXstart = self.itemFrameXorigin + 0.21
self.gui = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
buttonOffSet = -0.01
incButtonPos = (0.0, 0, 0)
decButtonPos = (0.0, 0, 0)
itemFrameMinZ = self.listZorigin
itemFrameMaxZ = self.listZorigin + self.listFrameSizeZ
arrowUp = self.find('**/downScroll_up')
arrowDown = self.find('**/downScroll_down')
arrowHover = self.find('**/downScroll_hover')
self.scrollList = DirectScrolledList(parent=self.scrollLocator, relief=None, pos=(0, 0, 0), incButton_image=(arrowUp,
arrowDown,
arrowHover,
arrowUp), incButton_relief=None, incButton_scale=(self.arrowButtonXScale, 1, self.arrowButtonZScale), incButton_pos=incButtonPos, incButton_image3_color=Vec4(1, 1, 1, 0.2), decButton_image=(arrowUp,
arrowDown,
arrowHover,
arrowUp), decButton_relief=None, decButton_scale=(self.arrowButtonXScale, 1, -self.arrowButtonZScale), decButton_pos=decButtonPos, decButton_image3_color=Vec4(1, 1, 1, 0.2), itemFrame_pos=(self.itemFrameXorigin, 0, -0.03), numItemsVisible=4, incButtonCallback=self.scrollButtonPressed, decButtonCallback=self.scrollButtonPressed)
itemFrameParent = self.scrollList.itemFrame.getParent()
self.scrollList.incButton.reparentTo(self.scrollDownLocator)
self.scrollList.decButton.reparentTo(self.scrollUpLocator)
arrowUp.removeNode()
arrowDown.removeNode()
arrowHover.removeNode()
clipper = PlaneNode('clipper')
clipper.setPlane(Plane(Vec3(-1, 0, 0), Point3(0.23, 0, 0)))
clipNP = self.scrollList.component('itemFrame').attachNewNode(clipper)
self.scrollList.component('itemFrame').setClipPlane(clipNP)
return
def scrollButtonPressed(self):
self.__clickedOnDay()
def adjustForMonth(self):
curServerDate = base.cr.toontownTimeManager.getCurServerDateTime()
if self.onlyFutureDaysClickable:
if self.myDate.year < curServerDate.year or self.myDate.year == curServerDate.year and self.myDate.month < curServerDate.month or self.myDate.year == curServerDate.year and self.myDate.month == curServerDate.month and self.myDate.day < curServerDate.day:
self.numberWidget.setColorScale(0.5, 0.5, 0.5, 0.5)
self.numberWidget['state'] = DirectGuiGlobals.DISABLED
else:
self.numberWidget.setColorScale(1, 1, 1, 1)
if self.myDate.month != self.startDate.month:
self.setColorScale(0.75, 0.75, 0.75, 1.0)
if self.dayClickCallback is not None:
self.numberWidget['state'] = DirectGuiGlobals.DISABLED
else:
self.setColorScale(1, 1, 1, 1)
if self.myDate.date() == curServerDate.date():
self.defaultBox.hide()
self.todayBox.show()
else:
self.defaultBox.show()
self.todayBox.hide()
return
def destroy(self):
if self.dayClickCallback is not None:
self.numberWidget.destroy()
self.dayClickCallback = None
self.notify.debug('desroying %s' % self.myDate)
try:
for item in self.scrollList['items']:
if hasattr(item, 'description') and item.description and hasattr(item.description, 'destroy'):
self.notify.debug('desroying description of item %s' % item)
item.unbind(DGG.ENTER)
item.unbind(DGG.EXIT)
item.description.destroy()
except e:
self.notify.debug('pass %s' % self.myDate)
self.scrollList.removeAndDestroyAllItems()
self.scrollList.destroy()
self.dayButton.destroy()
DirectFrame.destroy(self)
return
def addWeeklyHolidays(self):
if not self.filter == ToontownGlobals.CalendarFilterShowAll and not self.filter == ToontownGlobals.CalendarFilterShowOnlyHolidays:
return
if base.cr.newsManager:
holidays = base.cr.newsManager.getHolidaysForWeekday(self.myDate.weekday())
holidayName = ''
holidayDesc = ''
for holidayId in holidays:
if holidayId in TTLocalizer.HolidayNamesInCalendar:
holidayName = TTLocalizer.HolidayNamesInCalendar[holidayId][0]
holidayDesc = TTLocalizer.HolidayNamesInCalendar[holidayId][1]
else:
holidayName = TTLocalizer.UnknownHoliday % holidayId
self.addTitleAndDescToScrollList(holidayName, holidayDesc)
self.scrollList.refresh()
if config.GetBool('calendar-test-items', 0):
if self.myDate.date() + datetime.timedelta(days=-1) == base.cr.toontownTimeManager.getCurServerDateTime().date():
testItems = ('1:00 AM Party', '2:00 AM CEO', '11:15 AM Party', '5:30 PM CJ', '11:00 PM Party', 'Really Really Long String')
for text in testItems:
newItem = DirectLabel(relief=None, text=text, text_scale=self.ScrollListTextSize, text_align=TextNode.ALeft)
self.scrollList.addItem(newItem)
if self.myDate.date() + datetime.timedelta(days=-2) == base.cr.toontownTimeManager.getCurServerDateTime().date():
testItems = ('1:00 AM Party', '3:00 AM CFO', '11:00 AM Party')
textSize = self.ScrollListTextSize
for text in testItems:
newItem = DirectLabel(relief=None, text=text, text_scale=textSize, text_align=TextNode.ALeft)
self.scrollList.addItem(newItem)
def updateArrowButtons(self):
numItems = 0
try:
numItems = len(self.scrollList['items'])
except e:
numItems = 0
if numItems <= self.scrollList.numItemsVisible:
self.scrollList.incButton.hide()
self.scrollList.decButton.hide()
else:
self.scrollList.incButton.show()
self.scrollList.decButton.show()
def collectTimedEvents(self):
self.timedEvents = []
if self.filter == ToontownGlobals.CalendarFilterShowAll or self.filter == ToontownGlobals.CalendarFilterShowOnlyParties:
for party in localAvatar.partiesInvitedTo:
if party.startTime.date() == self.myDate.date():
self.partiesInvitedToToday.append(party)
self.timedEvents.append((party.startTime.time(), party))
for party in localAvatar.hostedParties:
if party.startTime.date() == self.myDate.date():
self.hostedPartiesToday.append(party)
self.timedEvents.append((party.startTime.time(), party))
if base.cr.newsManager and (self.filter == ToontownGlobals.CalendarFilterShowAll or self.filter == ToontownGlobals.CalendarFilterShowOnlyHolidays):
yearlyHolidays = base.cr.newsManager.getYearlyHolidaysForDate(self.myDate)
for holiday in yearlyHolidays:
holidayId = holiday[1]
holidayStart = holiday[2]
holidayEnd = holiday[3]
holidayType = holiday[0]
if holidayStart[0] == self.myDate.month and holidayStart[1] == self.myDate.day:
myTime = datetime.time(holidayStart[2], holidayStart[3])
elif holidayEnd[0] == self.myDate.month and holidayEnd[1] == self.myDate.day:
myTime = datetime.time(holidayEnd[2], holidayEnd[3])
else:
self.notify.error('holiday is not today %s' % holiday)
self.timedEvents.append((myTime, holiday))
oncelyHolidays = base.cr.newsManager.getOncelyHolidaysForDate(self.myDate)
for holiday in oncelyHolidays:
holidayId = holiday[1]
holidayStart = holiday[2]
holidayEnd = holiday[3]
holidayType = holiday[0]
if holidayStart[0] == self.myDate.year and holidayStart[1] == self.myDate.month and holidayStart[2] == self.myDate.day:
myTime = datetime.time(holidayStart[3], holidayStart[4])
elif holidayEnd[0] == self.myDate.year and holidayEnd[1] == self.myDate.month and holidayEnd[2] == self.myDate.day:
myTime = datetime.time(holidayEnd[3], holidayEnd[4])
else:
self.notify.error('holiday is not today %s' % holiday)
self.timedEvents.append((myTime, holiday))
multipleStartHolidays = base.cr.newsManager.getMultipleStartHolidaysForDate(self.myDate)
for holiday in multipleStartHolidays:
holidayId = holiday[1]
holidayStart = holiday[2]
holidayEnd = holiday[3]
holidayType = holiday[0]
if holidayStart[0] == self.myDate.year and holidayStart[1] == self.myDate.month and holidayStart[2] == self.myDate.day:
myTime = datetime.time(holidayStart[3], holidayStart[4])
elif holidayEnd[0] == self.myDate.year and holidayEnd[1] == self.myDate.month and holidayEnd[2] == self.myDate.day:
myTime = datetime.time(holidayEnd[3], holidayEnd[4])
else:
self.notify.error('holiday is not today %s' % holiday)
self.timedEvents.append((myTime, holiday))
relativelyHolidays = base.cr.newsManager.getRelativelyHolidaysForDate(self.myDate)
for holiday in relativelyHolidays:
holidayId = holiday[1]
holidayStart = holiday[2]
holidayEnd = holiday[3]
holidayType = holiday[0]
if holidayStart[0] == self.myDate.month and holidayStart[1] == self.myDate.day:
myTime = datetime.time(holidayStart[2], holidayStart[3])
elif holidayEnd[0] == self.myDate.month and holidayEnd[1] == self.myDate.day:
myTime = datetime.time(holidayEnd[2], holidayEnd[3])
else:
self.notify.error('holiday is not today %s' % holiday)
self.timedEvents.append((myTime, holiday))
def timedEventCompare(te1, te2):
if te1[0] < te2[0]:
return -1
elif te1[0] == te2[0]:
return 0
else:
return 1
self.timedEvents.sort(cmp=timedEventCompare)
for timedEvent in self.timedEvents:
if isinstance(timedEvent[1], PartyInfo):
self.addPartyToScrollList(timedEvent[1])
elif isinstance(timedEvent[1], tuple) and timedEvent[1][0] == NewsManager.YearlyHolidayType:
self.addYearlyHolidayToScrollList(timedEvent[1])
elif isinstance(timedEvent[1], tuple) and timedEvent[1][0] == NewsManager.OncelyHolidayType:
self.addOncelyHolidayToScrollList(timedEvent[1])
elif isinstance(timedEvent[1], tuple) and timedEvent[1][0] == NewsManager.OncelyMultipleStartHolidayType:
self.addOncelyMultipleStartHolidayToScrollList(timedEvent[1])
elif isinstance(timedEvent[1], tuple) and timedEvent[1][0] == NewsManager.RelativelyHolidayType:
self.addRelativelyHolidayToScrollList(timedEvent[1])
def addYearlyHolidayToScrollList(self, holiday):
holidayId = holiday[1]
holidayStart = holiday[2]
holidayEnd = holiday[3]
holidayType = holiday[0]
holidayText = ''
startTime = datetime.time(holidayStart[2], holidayStart[3])
endTime = datetime.time(holidayEnd[2], holidayEnd[3])
startDate = datetime.date(self.myDate.year, holidayStart[0], holidayStart[1])
endDate = datetime.date(self.myDate.year, holidayEnd[0], holidayEnd[1])
if endDate < startDate:
endDate = datetime.date(endDate.year + 1, endDate.month, endDate.day)
if holidayId in TTLocalizer.HolidayNamesInCalendar:
holidayName = TTLocalizer.HolidayNamesInCalendar[holidayId][0]
holidayDesc = TTLocalizer.HolidayNamesInCalendar[holidayId][1]
else:
holidayName = TTLocalizer.UnknownHoliday % holidayId
holidayDesc = TTLocalizer.UnknownHoliday % holidayId
if holidayStart[0] == holidayEnd[0] and holidayStart[1] == holidayEnd[1]:
holidayText = myStrftime(startTime)
holidayText += ' ' + holidayName
holidayDesc += ' ' + TTLocalizer.CalendarEndsAt + myStrftime(endTime)
elif self.myDate.month == holidayStart[0] and self.myDate.day == holidayStart[1]:
holidayText = myStrftime(startTime)
holidayText += ' ' + holidayName
holidayDesc = holidayName + '. ' + holidayDesc
holidayDesc += ' ' + TTLocalizer.CalendarEndsAt + endDate.strftime(TTLocalizer.HolidayFormat) + myStrftime(endTime)
elif self.myDate.month == holidayEnd[0] and self.myDate.day == holidayEnd[1]:
holidayText = myStrftime(endTime)
holidayText += ' ' + TTLocalizer.CalendarEndDash + holidayName
holidayDesc = TTLocalizer.CalendarEndOf + holidayName
holidayDesc += '. ' + TTLocalizer.CalendarStartedOn + startDate.strftime(TTLocalizer.HolidayFormat) + myStrftime(startTime)
else:
self.notify.error('unhandled case')
self.addTitleAndDescToScrollList(holidayText, holidayDesc)
def addOncelyHolidayToScrollList(self, holiday):
holidayId = holiday[1]
holidayStart = holiday[2]
holidayEnd = holiday[3]
holidayType = holiday[0]
holidayText = ''
startTime = datetime.time(holidayStart[3], holidayStart[4])
endTime = datetime.time(holidayEnd[3], holidayEnd[4])
startDate = datetime.date(holidayStart[0], holidayStart[1], holidayStart[2])
endDate = datetime.date(holidayStart[0], holidayEnd[1], holidayEnd[2])
if endDate < startDate:
endDate = datetime.date(endDate.year + 1, endDate.month, endDate.day)
if holidayId in TTLocalizer.HolidayNamesInCalendar:
holidayName = TTLocalizer.HolidayNamesInCalendar[holidayId][0]
holidayDesc = TTLocalizer.HolidayNamesInCalendar[holidayId][1]
else:
holidayName = TTLocalizer.UnknownHoliday % holidayId
holidayDesc = ''
if holidayStart[1] == holidayEnd[1] and holidayStart[2] == holidayEnd[2]:
holidayText = myStrftime(startTime)
holidayText += ' ' + holidayName
holidayDesc = holidayName + '. ' + holidayDesc
holidayDesc += ' ' + TTLocalizer.CalendarEndsAt + myStrftime(endTime)
elif self.myDate.year == holidayStart[0] and self.myDate.month == holidayStart[1] and self.myDate.day == holidayStart[2]:
holidayText = myStrftime(startTime)
holidayText += ' ' + holidayName
holidayDesc = holidayName + '. ' + holidayDesc
holidayDesc += ' ' + TTLocalizer.CalendarEndsAt + endDate.strftime(TTLocalizer.HolidayFormat) + myStrftime(endTime)
elif self.myDate.year == holidayEnd[0] and self.myDate.month == holidayEnd[1] and self.myDate.day == holidayEnd[2]:
holidayText = myStrftime(endTime)
holidayText += ' ' + TTLocalizer.CalendarEndDash + holidayName
holidayDesc = TTLocalizer.CalendarEndOf + holidayName
holidayDesc += '. ' + TTLocalizer.CalendarStartedOn + startDate.strftime(TTLocalizer.HolidayFormat) + myStrftime(startTime)
else:
self.notify.error('unhandled case')
self.addTitleAndDescToScrollList(holidayText, holidayDesc)
def addOncelyMultipleStartHolidayToScrollList(self, holiday):
self.addOncelyHolidayToScrollList(holiday)
def addRelativelyHolidayToScrollList(self, holiday):
holidayId = holiday[1]
holidayStart = holiday[2]
holidayEnd = holiday[3]
holidayType = holiday[0]
holidayText = ''
startTime = datetime.time(holidayStart[2], holidayStart[3])
endTime = datetime.time(holidayEnd[2], holidayEnd[3])
startDate = datetime.date(self.myDate.year, holidayStart[0], holidayStart[1])
endDate = datetime.date(self.myDate.year, holidayEnd[0], holidayEnd[1])
if endDate < startDate:
endDate.year += 1
if holidayId in TTLocalizer.HolidayNamesInCalendar:
holidayName = TTLocalizer.HolidayNamesInCalendar[holidayId][0]
holidayDesc = TTLocalizer.HolidayNamesInCalendar[holidayId][1]
else:
holidayName = TTLocalizer.UnknownHoliday % holidayId
holidayDesc = ''
if holidayStart[0] == holidayEnd[0] and holidayStart[1] == holidayEnd[1]:
holidayText = myStrftime(startTime)
holidayText += ' ' + holidayName
holidayDesc += ' ' + TTLocalizer.CalendarEndsAt + myStrftime(endTime)
elif self.myDate.month == holidayStart[0] and self.myDate.day == holidayStart[1]:
holidayText = myStrftime(startTime)
holidayText += ' ' + holidayName
holidayDesc = holidayName + '. ' + holidayDesc
holidayDesc += ' ' + TTLocalizer.CalendarEndsAt + endDate.strftime(TTLocalizer.HolidayFormat) + myStrftime(endTime)
elif self.myDate.month == holidayEnd[0] and self.myDate.day == holidayEnd[1]:
holidayText = myStrftime(endTime)
holidayText += ' ' + TTLocalizer.CalendarEndDash + holidayName
holidayDesc = TTLocalizer.CalendarEndOf + holidayName
holidayDesc += '. ' + TTLocalizer.CalendarStartedOn + startDate.strftime(TTLocalizer.HolidayFormat) + myStrftime(startTime)
else:
self.notify.error('unhandled case')
self.addTitleAndDescToScrollList(holidayText, holidayDesc)
def addTitleAndDescToScrollList(self, title, desc):
textSize = self.ScrollListTextSize
descTextSize = 0.05
newItem = DirectButton(relief=None, text=title, text_scale=textSize, text_align=TextNode.ALeft, rolloverSound=None, clickSound=None, pressEffect=0, command=self.__clickedOnScrollItem)
scrollItemHeight = newItem.getHeight()
descUnderItemZAdjust = scrollItemHeight * descTextSize / textSize
descUnderItemZAdjust = max(0.0534, descUnderItemZAdjust)
descUnderItemZAdjust = -descUnderItemZAdjust
descZAdjust = descUnderItemZAdjust
newItem.description = DirectLabel(parent=newItem, pos=(0.115, 0, descZAdjust), text='', text_wordwrap=15, pad=(0.02, 0.02), text_scale=descTextSize, text_align=TextNode.ACenter, textMayChange=0)
newItem.description.checkedHeight = False
newItem.description.setBin('gui-popup', 0)
newItem.description.hide()
newItem.bind(DGG.ENTER, self.enteredTextItem, extraArgs=[newItem, desc, descUnderItemZAdjust])
newItem.bind(DGG.EXIT, self.exitedTextItem, extraArgs=[newItem])
self.scrollList.addItem(newItem)
return
def exitedTextItem(self, newItem, mousepos):
newItem.description.hide()
def enteredTextItem(self, newItem, descText, descUnderItemZAdjust, mousePos):
if not newItem.description.checkedHeight:
newItem.description.checkedHeight = True
newItem.description['text'] = descText
bounds = newItem.description.getBounds()
descHeight = newItem.description.getHeight()
scrollItemHeight = newItem.getHeight()
descOverItemZAdjust = descHeight - scrollItemHeight / 2.0
descZPos = self.getPos(aspect2d)[2] + descUnderItemZAdjust - descHeight
if descZPos < -1.0:
newItem.description.setZ(descOverItemZAdjust)
descWidth = newItem.description.getWidth()
brightFrame = loader.loadModel('phase_4/models/parties/tt_m_gui_sbk_calendar_popUp_bg')
newItem.description['geom'] = brightFrame
newItem.description['geom_scale'] = (descWidth, 1, descHeight)
descGeomZ = (bounds[2] - bounds[3]) / 2.0
descGeomZ += bounds[3]
newItem.description['geom_pos'] = (0, 0, descGeomZ)
newItem.description.show()
def addPartyToScrollList(self, party):
textSize = self.ScrollListTextSize
descTextSize = 0.05
partyTitle = myStrftime(party.startTime)
partyTitle = partyTitle + ' ' + TTLocalizer.EventsPageCalendarTabParty
textSize = self.ScrollListTextSize
descTextSize = 0.05
newItem = DirectButton(relief=None, text=partyTitle, text_scale=textSize, text_align=TextNode.ALeft, rolloverSound=None, clickSound=None, pressEffect=0, command=self.__clickedOnScrollItem)
scrollItemHeight = newItem.getHeight()
descUnderItemZAdjust = scrollItemHeight * descTextSize / textSize
descUnderItemZAdjust = max(0.0534, descUnderItemZAdjust)
descUnderItemZAdjust = -descUnderItemZAdjust
descZAdjust = descUnderItemZAdjust
self.scrollList.addItem(newItem)
newItem.description = MiniInviteVisual(newItem, party)
newItem.description.setBin('gui-popup', 0)
newItem.description.hide()
newItem.bind(DGG.ENTER, self.enteredTextItem, extraArgs=[newItem, newItem.description, descUnderItemZAdjust])
newItem.bind(DGG.EXIT, self.exitedTextItem, extraArgs=[newItem])
return
def __clickedOnScrollItem(self):
self.__clickedOnDay()
def __clickedOnDay(self):
acceptClick = True
if self.onlyFutureDaysClickable:
curServerDate = base.cr.toontownTimeManager.getCurServerDateTime()
if self.myDate.date() < curServerDate.date():
acceptClick = False
if not acceptClick:
return
if self.dayClickCallback:
self.dayClickCallback(self)
self.notify.debug('we got clicked on %s' % self.myDate.date())
messenger.send('clickedOnDay', [self.myDate.date()])
def updateSelected(self, selected):
multiplier = 1.1
if selected:
self.selectedFrame.show()
self.setScale(multiplier)
self.setPos(-0.01, 0, 0.01)
grandParent = self.origParent.getParent()
self.origParent.reparentTo(grandParent)
else:
self.selectedFrame.hide()
self.setScale(1.0)
self.setPos(0, 0, 0)
def changeDate(self, startDate, myDate):
self.startDate = startDate
self.myDate = myDate
self.scrollList.removeAndDestroyAllItems()
self.update()
def update(self):
self.numberWidget['text'] = str(self.myDate.day)
self.adjustForMonth()
self.addWeeklyHolidays()
self.collectTimedEvents()
self.updateArrowButtons()
def changeFilter(self, filter):
oldFilter = self.filter
self.filter = filter
if self.filter != oldFilter:
self.scrollList.removeAndDestroyAllItems()
self.update()
class MiniInviteVisual(DirectFrame):
def __init__(self, parent, partyInfo):
DirectFrame.__init__(self, parent, pos=(0.1, 0, -0.018))
self.checkedHeight = True
self.partyInfo = partyInfo
self._parent = parent
self.inviteBackgrounds = loader.loadModel('phase_4/models/parties/partyStickerbook')
backgrounds = ['calendar_popup_birthday',
'calendar_popup_fun',
'calendar_popup_cupcake',
'tt_t_gui_sbk_calendar_popup_racing',
'tt_t_gui_sbk_calendar_popup_valentine1',
'tt_t_gui_sbk_calendar_popup_victoryParty',
'tt_t_gui_sbk_calendar_popup_winter1']
self.background = DirectFrame(parent=self, relief=None, geom=self.inviteBackgrounds.find('**/%s' % backgrounds[self.partyInfo.inviteTheme]), scale=(0.7, 1.0, 0.23), pos=(0.0, 0.0, -0.1))
self.whosePartyLabel = DirectLabel(parent=self, relief=None, pos=(0.07, 0.0, -0.04), text=' ', text_scale=0.04, text_wordwrap=8, textMayChange=True)
self.whenTextLabel = DirectLabel(parent=self, relief=None, text=' ', pos=(0.07, 0.0, -0.13), text_scale=0.04, textMayChange=True)
self.partyStatusLabel = DirectLabel(parent=self, relief=None, text=' ', pos=(0.07, 0.0, -0.175), text_scale=0.04, textMayChange=True)
return
def show(self):
self.reparentTo(self._parent)
self.setPos(0.1, 0, -0.018)
newParent = self._parent.getParent().getParent()
self.wrtReparentTo(newParent)
if self.whosePartyLabel['text'] == ' ':
host = base.cr.identifyAvatar(self.partyInfo.hostId)
if host:
name = host.getName()
self.whosePartyLabel['text'] = name
if self.whenTextLabel['text'] == ' ':
time = myStrftime(self.partyInfo.startTime)
self.whenTextLabel['text'] = time
if self.partyStatusLabel['text'] == ' ':
if self.partyInfo.status == PartyGlobals.PartyStatus.Cancelled:
self.partyStatusLabel['text'] = TTLocalizer.CalendarPartyCancelled
elif self.partyInfo.status == PartyGlobals.PartyStatus.Finished:
self.partyStatusLabel['text'] = TTLocalizer.CalendarPartyFinished
elif self.partyInfo.status == PartyGlobals.PartyStatus.Started:
self.partyStatusLabel['text'] = TTLocalizer.CalendarPartyGo
elif self.partyInfo.status == PartyGlobals.PartyStatus.NeverStarted:
self.partyStatusLabel['text'] = TTLocalizer.CalendarPartyNeverStarted
else:
self.partyStatusLabel['text'] = TTLocalizer.CalendarPartyGetReady
DirectFrame.show(self)
def destroy(self):
del self.checkedHeight
del self.partyInfo
del self._parent
del self.background
del self.whosePartyLabel
del self.whenTextLabel
del self.partyStatusLabel
DirectFrame.destroy(self)
|
silly-wacky-3-town-toon/SOURCE-COD
|
toontown/parties/CalendarGuiDay.py
|
Python
|
apache-2.0
| 30,399
|
"""
Sponge Knowledge Base
Using unordered rules
"""
from java.util.concurrent.atomic import AtomicInteger
from org.openksavi.sponge.examples import SameSourceJavaUnorderedRule
from org.openksavi.sponge.core.library import Deduplication
def onInit():
# Variables for assertions only
sponge.setVariable("hardwareFailureJavaCount", AtomicInteger(0))
sponge.setVariable("hardwareFailureScriptCount", AtomicInteger(0))
sponge.setVariable("sameSourceFirstFireCount", AtomicInteger(0))
class FirstRule(Rule):
def onConfigure(self):
self.withEvents(["filesystemFailure", "diskFailure"]).withOrdered(False)
self.withAllConditions([
lambda rule, event: rule.firstEvent.get("source") == event.get("source"),
lambda rule, event:Duration.between(rule.firstEvent.time, event.time).seconds <= 2
])
self.withDuration(Duration.ofSeconds(5))
def onRun(self, event):
self.logger.debug("Running rule for events: {}", self.eventSequence)
sponge.getVariable("sameSourceFirstFireCount").incrementAndGet()
sponge.event("alarm").set("source", self.firstEvent.get("source")).send()
class SameSourceAllRule(Rule):
def onConfigure(self):
self.withEvents(["filesystemFailure e1", "diskFailure e2 :all"]).withOrdered(False)
self.withCondition("e1", self.severityCondition)
self.withConditions("e2", [self.severityCondition, self.diskFailureSourceCondition])
self.withDuration(Duration.ofSeconds(5))
def onRun(self, event):
self.logger.info("Monitoring log [{}]: Critical failure in {}! Events: {}", event.time, event.get("source"),
self.eventSequence)
sponge.getVariable("hardwareFailureScriptCount").incrementAndGet()
def severityCondition(self, event):
return int(event.get("severity")) > 5
def diskFailureSourceCondition(self, event):
# Both events have to have the same source
return event.get("source") == self.firstEvent.get("source") and \
Duration.between(self.firstEvent.time, event.time).seconds <= 4
class AlarmFilter(Filter):
def onConfigure(self):
self.withEvent("alarm")
def onInit(self):
self.deduplication = Deduplication("source")
self.deduplication.cacheBuilder.expireAfterWrite(2, TimeUnit.SECONDS)
def onAccept(self, event):
return self.deduplication.onAccept(event)
class Alarm(Trigger):
def onConfigure(self):
self.withEvent("alarm")
def onRun(self, event):
self.logger.debug("Received alarm from {}", event.get("source"))
def onLoad():
sponge.enableJava(SameSourceJavaUnorderedRule)
def onStartup():
sponge.event("diskFailure").set("severity", 10).set("source", "server1").send()
sponge.event("diskFailure").set("severity", 10).set("source", "server2").send()
sponge.event("diskFailure").set("severity", 8).set("source", "server1").send()
sponge.event("diskFailure").set("severity", 8).set("source", "server1").send()
sponge.event("filesystemFailure").set("severity", 8).set("source", "server1").send()
sponge.event("filesystemFailure").set("severity", 6).set("source", "server1").send()
sponge.event("diskFailure").set("severity", 6).set("source", "server1").send()
|
softelnet/sponge
|
sponge-jython/examples/script/py/unordered_rules.py
|
Python
|
apache-2.0
| 3,372
|
"""
Django settings for librarymanagementsystem project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=^%a6&@*aq8$sa$_f_r&b_gczd@sr77hv$xys7k!8f85g6-$e1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social.apps.django_app.default',
'django_extensions',
'djangosecure',
'mainapp',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'social.backends.google.GoogleOpenId',
)
SECURE_FRAME_DENY = True
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social.apps.django_app.middleware.SocialAuthExceptionMiddleware',
"djangosecure.middleware.SecurityMiddleware"
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
'django.contrib.messages.context_processors.messages',
)
CRISPY_TEMPLATE_PACK = 'bootstrap3'
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
LOGIN_REDIRECT_URL = '/admin/'
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/admin/'
LOGIN_ERROR_URL = '/login-error/'
ROOT_URLCONF = 'librarymanagementsystem.urls'
WSGI_APPLICATION = 'librarymanagementsystem.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.app_directories.Loader',
)
SOCIAL_AUTH_GOOGLE_WHITELISTED_DOMAINS = ['gmail.com']
try:
from local_settings import *
except ImportError:
pass
|
vinu76jsr/librarymanagementsystem
|
librarymanagementsystem/settings.py
|
Python
|
apache-2.0
| 3,254
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteConversationDataset
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_v2_generated_ConversationDatasets_DeleteConversationDataset_sync]
from google.cloud import dialogflow_v2
def sample_delete_conversation_dataset():
# Create a client
client = dialogflow_v2.ConversationDatasetsClient()
# Initialize request argument(s)
request = dialogflow_v2.DeleteConversationDatasetRequest(
name="name_value",
)
# Make the request
operation = client.delete_conversation_dataset(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END dialogflow_v2_generated_ConversationDatasets_DeleteConversationDataset_sync]
|
googleapis/python-dialogflow
|
samples/generated_samples/dialogflow_v2_generated_conversation_datasets_delete_conversation_dataset_sync.py
|
Python
|
apache-2.0
| 1,640
|
#MenuTitle: Guides through All Selected Nodes
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Creates guides through all selected nodes.
"""
from Foundation import NSPoint
import math
thisFont = Glyphs.font # frontmost font
selectedLayers = thisFont.selectedLayers # active layers of selected glyphs
def angle( firstPoint, secondPoint ):
"""
Returns the angle (in degrees) of the straight line between firstPoint and secondPoint,
0 degrees being the second point to the right of first point.
firstPoint, secondPoint: must be NSPoint or GSNode
"""
xDiff = secondPoint.x - firstPoint.x
yDiff = secondPoint.y - firstPoint.y
return math.degrees(math.atan2(yDiff,xDiff))
def newGuide( position, angle=0 ):
try:
# GLYPHS 3
newGuide = GSGuide()
except:
# GLYPHS 2
newGuide = GSGuideLine()
newGuide.position = position
newGuide.angle = angle
return newGuide
def isThereAlreadyAGuideWithTheseProperties(thisLayer,guideposition,guideangle):
if guideangle < 0:
guideangle += 180
if guideangle > 180:
guideangle -= 180
for thisGuide in thisLayer.guides:
thisAngle = thisGuide.angle
if thisAngle < 0:
thisAngle += 180
if thisAngle > 180:
thisAngle -= 180
if abs(thisAngle - guideangle) < 0.01 and abs(thisGuide.position.x - guideposition.x) < 0.01 and abs(thisGuide.position.y - guideposition.y) < 0.01:
return True
return False
if len(selectedLayers) == 1:
thisLayer = selectedLayers[0]
thisGlyph = thisLayer.parent
currentPointSelection = [point.position for point in thisLayer.selection if type(point) in (GSNode,GSAnchor)]
# thisGlyph.beginUndo() # undo grouping causes crashes
try:
if len(currentPointSelection) > 1:
# clear selection:
thisLayer.clearSelection()
currentPointSelection.append(currentPointSelection[0])
for i,j in enumerate(range(1,len(currentPointSelection))):
point1 = currentPointSelection[i]
point2 = currentPointSelection[j]
angleBetweenPoints = angle(point1,point2)
middlePoint = addPoints(point1,point2)
middlePoint.x *= 0.5
middlePoint.y *= 0.5
# create guide and add it to layer:
if not isThereAlreadyAGuideWithTheseProperties(thisLayer, middlePoint, angleBetweenPoints):
guideBetweenPoints = newGuide(middlePoint, angleBetweenPoints)
thisLayer.guides.append( guideBetweenPoints )
# select it:
thisLayer.selection.append(guideBetweenPoints)
elif len(currentPointSelection) == 1:
point = currentPointSelection[0]
guide = newGuide(point)
thisLayer.guides.append(guide)
# select only guide:
thisLayer.clearSelection()
thisLayer.selection.append(guide)
except Exception as e:
raise e
# finally:
# thisGlyph.endUndo() # undo grouping causes crashes
|
mekkablue/Glyphs-Scripts
|
Guides/Guides through All Selected Nodes.py
|
Python
|
apache-2.0
| 2,786
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v9.resources.types import campaign_simulation
from google.ads.googleads.v9.services.types import campaign_simulation_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class CampaignSimulationServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for CampaignSimulationService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_campaign_simulation: gapic_v1.method.wrap_method(
self.get_campaign_simulation,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def get_campaign_simulation(
self,
) -> typing.Callable[
[campaign_simulation_service.GetCampaignSimulationRequest],
campaign_simulation.CampaignSimulation,
]:
raise NotImplementedError
__all__ = ("CampaignSimulationServiceTransport",)
|
googleads/google-ads-python
|
google/ads/googleads/v9/services/services/campaign_simulation_service/transports/base.py
|
Python
|
apache-2.0
| 3,952
|
import codecs
import os
import re
import setuptools
from setuptools import find_packages, setup
from setuptools.command.develop import develop
from setuptools.command.install import install
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
REQUIREMENTS_FILE = os.path.join(PROJECT_ROOT, "requirements.txt")
REQUIREMENTS_OPTIONAL_FILE = os.path.join(PROJECT_ROOT, "requirements-optional.txt")
REQUIREMENTS_DEV_FILE = os.path.join(PROJECT_ROOT, "requirements-dev.txt")
README_FILE = os.path.join(PROJECT_ROOT, "README.md")
VERSION_FILE = os.path.join(PROJECT_ROOT, "arviz", "__init__.py")
def get_requirements():
with codecs.open(REQUIREMENTS_FILE) as buff:
return buff.read().splitlines()
def get_requirements_dev():
with codecs.open(REQUIREMENTS_DEV_FILE) as buff:
return buff.read().splitlines()
def get_requirements_optional():
with codecs.open(REQUIREMENTS_OPTIONAL_FILE) as buff:
return buff.read().splitlines()
def get_long_description():
with codecs.open(README_FILE, "rt") as buff:
return buff.read()
def get_version():
lines = open(VERSION_FILE, "rt").readlines()
version_regex = r"^__version__ = ['\"]([^'\"]*)['\"]"
for line in lines:
mo = re.search(version_regex, line, re.M)
if mo:
return mo.group(1)
raise RuntimeError("Unable to find version in %s." % (VERSION_FILE,))
setup(
name="arviz",
license="Apache-2.0",
version=get_version(),
description="Exploratory analysis of Bayesian models",
author="ArviZ Developers",
url="http://github.com/arviz-devs/arviz",
packages=find_packages(),
install_requires=get_requirements(),
extras_require=dict(all=get_requirements_optional()), # test=get_requirements_dev(),
long_description=get_long_description(),
long_description_content_type="text/markdown",
include_package_data=True,
python_requires='>=3.7',
classifiers=[
"Development Status :: 4 - Beta",
"Framework :: Matplotlib",
"Intended Audience :: Science/Research",
"Intended Audience :: Education",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Scientific/Engineering :: Mathematics",
],
)
|
arviz-devs/arviz
|
setup.py
|
Python
|
apache-2.0
| 2,583
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
analyze_gsimg.py - analyze G Suite image processing workflow
Download image from Google Drive, archive to Google Cloud Storage, send
to Google Cloud Vision for processing, add results row to Google Sheet.
'''
from __future__ import print_function
import argparse
import base64
import io
import os
import webbrowser
from googleapiclient import discovery, http
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2 import credentials
k_ize = lambda b: '%6.2fK' % (b/1000.) # bytes to kBs
FILE = 'YOUR_IMG_ON_DRIVE'
BUCKET = 'YOUR_BUCKET_NAME'
PARENT = '' # YOUR IMG FILE PREFIX
SHEET = 'YOUR_SHEET_ID'
TOP = 5 # TOP # of VISION LABELS TO SAVE
DEBUG = False
# process credentials for OAuth2 tokens
creds = None
TOKENS = 'tokens.json' # OAuth2 token storage
SCOPES = (
'https://www.googleapis.com/auth/drive.readonly',
'https://www.googleapis.com/auth/devstorage.full_control',
'https://www.googleapis.com/auth/cloud-vision',
'https://www.googleapis.com/auth/spreadsheets',
)
if os.path.exists(TOKENS):
creds = credentials.Credentials.from_authorized_user_file(TOKENS)
if not (creds and creds.valid):
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'client_secret.json', SCOPES)
creds = flow.run_local_server()
with open(TOKENS, 'w') as token:
token.write(creds.to_json())
# create API service endpoints
DRIVE = discovery.build('drive', 'v3', credentials=creds)
GCS = discovery.build('storage', 'v1', credentials=creds)
VISION = discovery.build('vision', 'v1', credentials=creds)
SHEETS = discovery.build('sheets', 'v4', credentials=creds)
def drive_get_img(fname):
'download file from Drive and return file info & binary if found'
# search for file on Google Drive
rsp = DRIVE.files().list(q="name='%s'" % fname,
fields='files(id,name,mimeType,modifiedTime)'
).execute().get('files', [])
# download binary & return file info if found, else return None
if rsp:
target = rsp[0] # use first matching file
fileId = target['id']
fname = target['name']
mtype = target['mimeType']
binary = DRIVE.files().get_media(fileId=fileId).execute()
return fname, mtype, target['modifiedTime'], binary
def gcs_blob_upload(fname, bucket, media, mimetype):
'upload an object to a Google Cloud Storage bucket'
# build blob metadata and upload via GCS API
body = {'name': fname, 'uploadType': 'multipart', 'contentType': mimetype}
return GCS.objects().insert(bucket=bucket, body=body,
media_body=http.MediaIoBaseUpload(io.BytesIO(media), mimetype),
fields='bucket,name').execute()
def vision_label_img(img, top):
'send image to Vision API for label annotation'
# build image metadata and call Vision API to process
body = {'requests': [{
'image': {'content': img},
'features': [{'type': 'LABEL_DETECTION', 'maxResults': top}],
}]}
rsp = VISION.images().annotate(body=body).execute().get('responses', [{}])[0]
# return top labels for image as CSV for Sheet (row)
if 'labelAnnotations' in rsp:
return ', '.join('(%.2f%%) %s' % (
label['score']*100., label['description']) \
for label in rsp['labelAnnotations'])
def sheet_append_row(sheet, row):
'append row to a Google Sheet, return #cells added'
# call Sheets API to write row to Sheet (via its ID)
rsp = SHEETS.spreadsheets().values().append(
spreadsheetId=sheet, range='Sheet1',
valueInputOption='USER_ENTERED', body={'values': [row]}
).execute()
if rsp:
return rsp.get('updates').get('updatedCells')
def main(fname, bucket, sheet_id, folder, top, debug):
'"main()" drives process from image download through report generation'
# download img file & info from Drive
rsp = drive_get_img(fname)
if not rsp:
return
fname, mtype, ftime, data = rsp
if debug:
print('Downloaded %r (%s, %s, size: %d)' % (fname, mtype, ftime, len(data)))
# upload file to GCS
gcsname = '%s/%s'% (folder, fname)
rsp = gcs_blob_upload(gcsname, bucket, data, mtype)
if not rsp:
return
if debug:
print('Uploaded %r to GCS bucket %r' % (rsp['name'], rsp['bucket']))
# process w/Vision
rsp = vision_label_img(base64.b64encode(data).decode('utf-8'), top)
if not rsp:
return
if debug:
print('Top %d labels from Vision API: %s' % (top, rsp))
# push results to Sheet, get cells-saved count
fsize = k_ize(len(data))
row = [folder,
'=HYPERLINK("storage.cloud.google.com/%s/%s", "%s")' % (
bucket, gcsname, fname), mtype, ftime, fsize, rsp
]
rsp = sheet_append_row(sheet_id, row)
if not rsp:
return
if debug:
print('Added %d cells to Google Sheet' % rsp)
return True
if __name__ == '__main__':
# args: [-hv] [-i imgfile] [-b bucket] [-f folder] [-s Sheet ID] [-t top labels]
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--imgfile", action="store_true",
default=FILE, help="image file filename")
parser.add_argument("-b", "--bucket_id", action="store_true",
default=BUCKET, help="Google Cloud Storage bucket name")
parser.add_argument("-f", "--folder", action="store_true",
default=PARENT, help="Google Cloud Storage image folder")
parser.add_argument("-s", "--sheet_id", action="store_true",
default=SHEET, help="Google Sheet Drive file ID (44-char str)")
parser.add_argument("-t", "--viz_top", action="store_true",
default=TOP, help="return top N (default %d) Vision API labels" % TOP)
parser.add_argument("-v", "--verbose", action="store_true",
default=DEBUG, help="verbose display output")
args = parser.parse_args()
print('Processing file %r... please wait' % args.imgfile)
rsp = main(args.imgfile, args.bucket_id,
args.sheet_id, args.folder, args.viz_top, args.verbose)
if rsp:
sheet_url = 'https://docs.google.com/spreadsheets/d/%s/edit' % args.sheet_id
print('DONE: opening web browser to it, or see %s' % sheet_url)
webbrowser.open(sheet_url, new=1, autoraise=True)
else:
print('ERROR: could not process %r' % args.imgfile)
|
googlecodelabs/analyze_gsimg
|
alt/analyze_gsimg-newauth.py
|
Python
|
apache-2.0
| 7,125
|
#!/usr/bin/python
from optparse import OptionParser
import filecmp
import json
import of_daemon
import of_node
import of_util
import os
import subprocess
import sys
import tempfile
import time
of_util.check_python_version()
parser = OptionParser()
(opts, args, node_list) = of_util.parse_deploy_opts(parser)
if opts.bld_dir == None:
sys.stderr.write("you must give a Redfish build directory\n")
sys.exit(1)
# get a chunk ID that we think will be unique
cid = int(time.clock())
cid = cid + (os.getpid() << 32)
# create input file
input_file = opts.bld_dir + "/hello.in"
f = open(input_file, "w")
try:
print >>f, "hello, world!"
finally:
f.close()
output_file = opts.bld_dir + "/hello.out"
for d in of_node.OfNodeIter(node_list, ["osd"]):
print "writing chunk to " + d.get_short_name()
tool_cmd = [ opts.bld_dir + "/tool/fishtool", "chunk_write",
"-i", input_file, "-k", str(d.id), hex(cid) ]
of_util.subprocess_check_output(tool_cmd)
for d in of_node.OfNodeIter(node_list, ["osd"]):
print "reading chunk from " + d.get_short_name()
tool_cmd = [ opts.bld_dir + "/tool/fishtool", "chunk_read",
"-o", output_file, "-k", str(d.id), hex(cid) ]
of_util.subprocess_check_output(tool_cmd)
filecmp.cmp(input_file, output_file)
|
cmccabe/redfish
|
deploy/st_chunk_io.py
|
Python
|
apache-2.0
| 1,287
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import encodeutils
import six
from six.moves.urllib import parse
from eclcli.orchestration.heatclient.common import utils
from eclcli.orchestration.heatclient.openstack.common.apiclient import base
from eclcli.orchestration.heatclient.v1 import stacks
DEFAULT_PAGE_SIZE = 20
class Event(base.Resource):
def __repr__(self):
return "<Event %s>" % self._info
def update(self, **fields):
self.manager.update(self, **fields)
def delete(self):
return self.manager.delete(self)
def data(self, **kwargs):
return self.manager.data(self, **kwargs)
class EventManager(stacks.StackChildManager):
resource_class = Event
def list(self, stack_id, resource_name=None, **kwargs):
"""Get a list of events.
:param stack_id: ID of stack the events belong to
:param resource_name: Optional name of resources to filter events by
:rtype: list of :class:`Event`
"""
params = {}
if 'filters' in kwargs:
filters = kwargs.pop('filters')
params.update(filters)
for key, value in six.iteritems(kwargs):
if value:
params[key] = value
if resource_name is None:
url = '/stacks/%s/events' % stack_id
else:
stack_id = self._resolve_stack_id(stack_id)
url = '/stacks/%s/resources/%s/events' % (
parse.quote(stack_id, ''),
parse.quote(encodeutils.safe_encode(resource_name), ''))
if params:
url += '?%s' % parse.urlencode(params, True)
return self._list(url, 'events')
def get(self, stack_id, resource_name, event_id):
"""Get the details for a specific event.
:param stack_id: ID of stack containing the event
:param resource_name: ID of resource the event belongs to
:param event_id: ID of event to get the details for
"""
stack_id = self._resolve_stack_id(stack_id)
url_str = '/stacks/%s/resources/%s/events/%s' % (
parse.quote(stack_id, ''),
parse.quote(encodeutils.safe_encode(resource_name), ''),
parse.quote(event_id, ''))
resp = self.client.get(url_str)
body = utils.get_response_body(resp)
return Event(self, body.get('event'))
|
nttcom/eclcli
|
eclcli/orchestration/heatclient/v1/events.py
|
Python
|
apache-2.0
| 2,990
|
# Copyright 2017 The UAI-SDK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
from uaitrain.operation.pack_docker_image.caffe_pack_op import CaffeUAITrainDockerImagePackOp
from uaitrain.operation.create_train_job.base_create_op import BaseUAITrainCreateTrainJobOp
from uaitrain.operation.stop_train_job.base_stop_op import BaseUAITrainStopTrainJobOp
from uaitrain.operation.delete_train_job.base_delete_op import BaseUAITrainDeleteTrainJobOp
from uaitrain.operation.list_train_job.base_list_job_op import BaseUAITrainListTrainJobOp
from uaitrain.operation.info_train_job.info_train_op import BaseUAITrainRunningInfoOp
from uaitrain.operation.get_realtime_log.base_log_op import BaseUAITrainGetRealtimeLogOp
from uaitrain.operation.list_bill_info.base_bill_op import BaseUAITrainListBillInfoOp
from uaitrain.operation.rename_train_job.base_rename_op import BaseUAITrainRenameTrainJobOp
from uaitrain.operation.get_train_job_conf.base_conf_op import BaseUAITrainTrainJobConfOp
from uaitrain.operation.get_log_topic.get_log_topic import BaseUAITrainGetLogTopicOp
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='AI Caffe Arch Deployer',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
subparsers = parser.add_subparsers(dest='commands', help='commands')
pack_op = CaffeUAITrainDockerImagePackOp(subparsers)
create_op = BaseUAITrainCreateTrainJobOp(subparsers)
stop_op = BaseUAITrainStopTrainJobOp(subparsers)
delete_op = BaseUAITrainDeleteTrainJobOp(subparsers)
list_op = BaseUAITrainListTrainJobOp(subparsers)
info_op = BaseUAITrainRunningInfoOp(subparsers)
log_op = BaseUAITrainGetRealtimeLogOp(subparsers)
bill_op = BaseUAITrainListBillInfoOp(subparsers)
rename_op = BaseUAITrainRenameTrainJobOp(subparsers)
conf_op = BaseUAITrainTrainJobConfOp(subparsers)
topic_op = BaseUAITrainGetLogTopicOp(subparsers)
cmd_args = vars(parser.parse_args())
if cmd_args['commands'] == 'pack':
pack_op.cmd_run(cmd_args)
elif cmd_args['commands'] == 'create':
create_op.cmd_run(cmd_args)
elif cmd_args['commands'] == 'stop':
stop_op.cmd_run(cmd_args)
elif cmd_args['commands'] == 'delete':
delete_op.cmd_run(cmd_args)
elif cmd_args['commands'] == 'list':
list_op.cmd_run(cmd_args)
elif cmd_args['commands'] == 'info':
info_op.cmd_run(cmd_args)
elif cmd_args['commands'] == 'log':
log_op.cmd_run(cmd_args)
elif cmd_args['commands'] == 'bill':
bill_op.cmd_run(cmd_args)
elif cmd_args['commands'] == 'rename':
rename_op.cmd_run(cmd_args)
elif cmd_args['commands'] == 'conf':
conf_op.cmd_run(cmd_args)
elif cmd_args['commands'] == 'topic':
topic_op.cmd_run(cmd_args)
else:
print("Unknown CMD, please use python caffe_tool.py -h to check")
|
ucloud/uai-sdk
|
uaitrain_tool/caffe/caffe_tool.py
|
Python
|
apache-2.0
| 3,505
|
from django.apps import AppConfig
class AttachmentsConfig(AppConfig):
verbose_name = 'Attachments'
|
iamsteadman/bambu-attachments
|
bambu_attachments/apps.py
|
Python
|
apache-2.0
| 104
|
import time
import logging
from typing import Callable, List, TypeVar, Text
from psycopg2.extensions import cursor
CursorObj = TypeVar('CursorObj', bound=cursor)
from django.db import connection
from zerver.models import UserProfile
'''
NOTE! Be careful modifying this library, as it is used
in a migration, and it needs to be valid for the state
of the database that is in place when the 0104_fix_unreads
migration runs.
'''
logger = logging.getLogger('zulip.fix_unreads')
logger.setLevel(logging.WARNING)
def build_topic_mute_checker(cursor, user_profile):
# type: (CursorObj, UserProfile) -> Callable[[int, Text], bool]
'''
This function is similar to the function of the same name
in zerver/lib/topic_mutes.py, but it works without the ORM,
so that we can use it in migrations.
'''
query = '''
SELECT
recipient_id,
topic_name
FROM
zerver_mutedtopic
WHERE
user_profile_id = %s
'''
cursor.execute(query, [user_profile.id])
rows = cursor.fetchall()
tups = {
(recipient_id, topic_name.lower())
for (recipient_id, topic_name) in rows
}
def is_muted(recipient_id, topic):
# type: (int, Text) -> bool
return (recipient_id, topic.lower()) in tups
return is_muted
def update_unread_flags(cursor, user_message_ids):
# type: (CursorObj, List[int]) -> None
um_id_list = ', '.join(str(id) for id in user_message_ids)
query = '''
UPDATE zerver_usermessage
SET flags = flags | 1
WHERE id IN (%s)
''' % (um_id_list,)
cursor.execute(query)
def get_timing(message, f):
# type: (str, Callable) -> None
start = time.time()
logger.info(message)
f()
elapsed = time.time() - start
logger.info('elapsed time: %.03f\n' % (elapsed,))
def fix_unsubscribed(cursor, user_profile):
# type: (CursorObj, UserProfile) -> None
recipient_ids = []
def find_recipients():
# type: () -> None
query = '''
SELECT
zerver_subscription.recipient_id
FROM
zerver_subscription
INNER JOIN zerver_recipient ON (
zerver_recipient.id = zerver_subscription.recipient_id
)
WHERE (
zerver_subscription.user_profile_id = '%s' AND
zerver_recipient.type = 2 AND
(NOT zerver_subscription.active)
)
'''
cursor.execute(query, [user_profile.id])
rows = cursor.fetchall()
for row in rows:
recipient_ids.append(row[0])
logger.info(str(recipient_ids))
get_timing(
'get recipients',
find_recipients
)
if not recipient_ids:
return
user_message_ids = []
def find():
# type: () -> None
recips = ', '.join(str(id) for id in recipient_ids)
query = '''
SELECT
zerver_usermessage.id
FROM
zerver_usermessage
INNER JOIN zerver_message ON (
zerver_message.id = zerver_usermessage.message_id
)
WHERE (
zerver_usermessage.user_profile_id = %s AND
(zerver_usermessage.flags & 1) = 0 AND
zerver_message.recipient_id in (%s)
)
''' % (user_profile.id, recips)
logger.info('''
EXPLAIN analyze''' + query.rstrip() + ';')
cursor.execute(query)
rows = cursor.fetchall()
for row in rows:
user_message_ids.append(row[0])
logger.info('rows found: %d' % (len(user_message_ids),))
get_timing(
'finding unread messages for non-active streams',
find
)
if not user_message_ids:
return
def fix():
# type: () -> None
update_unread_flags(cursor, user_message_ids)
get_timing(
'fixing unread messages for non-active streams',
fix
)
def fix_pre_pointer(cursor, user_profile):
# type: (CursorObj, UserProfile) -> None
pointer = user_profile.pointer
if not pointer:
return
recipient_ids = []
def find_non_muted_recipients():
# type: () -> None
query = '''
SELECT
zerver_subscription.recipient_id
FROM
zerver_subscription
INNER JOIN zerver_recipient ON (
zerver_recipient.id = zerver_subscription.recipient_id
)
WHERE (
zerver_subscription.user_profile_id = '%s' AND
zerver_recipient.type = 2 AND
zerver_subscription.in_home_view AND
zerver_subscription.active
)
'''
cursor.execute(query, [user_profile.id])
rows = cursor.fetchall()
for row in rows:
recipient_ids.append(row[0])
logger.info(str(recipient_ids))
get_timing(
'find_non_muted_recipients',
find_non_muted_recipients
)
if not recipient_ids:
return
user_message_ids = []
def find_old_ids():
# type: () -> None
recips = ', '.join(str(id) for id in recipient_ids)
is_topic_muted = build_topic_mute_checker(cursor, user_profile)
query = '''
SELECT
zerver_usermessage.id,
zerver_message.recipient_id,
zerver_message.subject
FROM
zerver_usermessage
INNER JOIN zerver_message ON (
zerver_message.id = zerver_usermessage.message_id
)
WHERE (
zerver_usermessage.user_profile_id = %s AND
zerver_usermessage.message_id <= %s AND
(zerver_usermessage.flags & 1) = 0 AND
zerver_message.recipient_id in (%s)
)
''' % (user_profile.id, pointer, recips)
logger.info('''
EXPLAIN analyze''' + query.rstrip() + ';')
cursor.execute(query)
rows = cursor.fetchall()
for (um_id, recipient_id, topic) in rows:
if not is_topic_muted(recipient_id, topic):
user_message_ids.append(um_id)
logger.info('rows found: %d' % (len(user_message_ids),))
get_timing(
'finding pre-pointer messages that are not muted',
find_old_ids
)
if not user_message_ids:
return
def fix():
# type: () -> None
update_unread_flags(cursor, user_message_ids)
get_timing(
'fixing unread messages for pre-pointer non-muted messages',
fix
)
def fix(user_profile):
# type: (UserProfile) -> None
logger.info('\n---\nFixing %s:' % (user_profile.email,))
with connection.cursor() as cursor:
fix_unsubscribed(cursor, user_profile)
fix_pre_pointer(cursor, user_profile)
|
amanharitsh123/zulip
|
zerver/lib/fix_unreads.py
|
Python
|
apache-2.0
| 6,949
|
p = dict(
subject = 'EG009',
#Fixation size (in degrees):
fixation_size = 0.4,
monitor='testMonitor',
scanner=True,
screen_number = 1,
full_screen = True,
radial_cyc = 10,
angular_cyc = 15,
angular_width=30,
size = 60, #This just needs to be larger than the screen
temporal_freq = 2,
sf = 10,
n_blocks = 20, #20 blocks = 200 sec = 3:20 minutes
block_duration=10,
color_dur = 1/3. # 2 Hz
)
|
arokem/bowties
|
params.py
|
Python
|
apache-2.0
| 464
|
"""
Component that will help set the level of logging for components.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/logger/
"""
import logging
from collections import OrderedDict
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
DOMAIN = 'logger'
DATA_LOGGER = 'logger'
SERVICE_SET_DEFAULT_LEVEL = 'set_default_level'
SERVICE_SET_LEVEL = 'set_level'
LOGSEVERITY = {
'CRITICAL': 50,
'FATAL': 50,
'ERROR': 40,
'WARNING': 30,
'WARN': 30,
'INFO': 20,
'DEBUG': 10,
'NOTSET': 0
}
LOGGER_DEFAULT = 'default'
LOGGER_LOGS = 'logs'
ATTR_LEVEL = 'level'
_VALID_LOG_LEVEL = vol.All(vol.Upper, vol.In(LOGSEVERITY))
SERVICE_SET_DEFAULT_LEVEL_SCHEMA = vol.Schema({ATTR_LEVEL: _VALID_LOG_LEVEL})
SERVICE_SET_LEVEL_SCHEMA = vol.Schema({cv.string: _VALID_LOG_LEVEL})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(LOGGER_DEFAULT): _VALID_LOG_LEVEL,
vol.Optional(LOGGER_LOGS): vol.Schema({cv.string: _VALID_LOG_LEVEL}),
}),
}, extra=vol.ALLOW_EXTRA)
def set_level(hass, logs):
"""Set log level for components."""
hass.services.call(DOMAIN, SERVICE_SET_LEVEL, logs)
class HomeAssistantLogFilter(logging.Filter):
"""A log filter."""
def __init__(self, logfilter):
"""Initialize the filter."""
super().__init__()
self.logfilter = logfilter
def filter(self, record):
"""Filter the log entries."""
# Log with filtered severity
if LOGGER_LOGS in self.logfilter:
for filtername in self.logfilter[LOGGER_LOGS]:
logseverity = self.logfilter[LOGGER_LOGS][filtername]
if record.name.startswith(filtername):
return record.levelno >= logseverity
# Log with default severity
default = self.logfilter[LOGGER_DEFAULT]
return record.levelno >= default
async def async_setup(hass, config):
"""Set up the logger component."""
logfilter = {}
def set_default_log_level(level):
"""Set the default log level for components."""
logfilter[LOGGER_DEFAULT] = LOGSEVERITY[level]
def set_log_levels(logpoints):
"""Set the specified log levels."""
logs = {}
# Preserve existing logs
if LOGGER_LOGS in logfilter:
logs.update(logfilter[LOGGER_LOGS])
# Add new logpoints mapped to correct severity
for key, value in logpoints.items():
logs[key] = LOGSEVERITY[value]
logfilter[LOGGER_LOGS] = OrderedDict(
sorted(
logs.items(),
key=lambda t: len(t[0]),
reverse=True
)
)
# Set default log severity
if LOGGER_DEFAULT in config.get(DOMAIN):
set_default_log_level(config.get(DOMAIN)[LOGGER_DEFAULT])
else:
set_default_log_level('DEBUG')
logger = logging.getLogger('')
logger.setLevel(logging.NOTSET)
# Set log filter for all log handler
for handler in logging.root.handlers:
handler.setLevel(logging.NOTSET)
handler.addFilter(HomeAssistantLogFilter(logfilter))
if LOGGER_LOGS in config.get(DOMAIN):
set_log_levels(config.get(DOMAIN)[LOGGER_LOGS])
async def async_service_handler(service):
"""Handle logger services."""
if service.service == SERVICE_SET_DEFAULT_LEVEL:
set_default_log_level(service.data.get(ATTR_LEVEL))
else:
set_log_levels(service.data)
hass.services.async_register(
DOMAIN, SERVICE_SET_DEFAULT_LEVEL, async_service_handler,
schema=SERVICE_SET_DEFAULT_LEVEL_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_SET_LEVEL, async_service_handler,
schema=SERVICE_SET_LEVEL_SCHEMA)
return True
|
persandstrom/home-assistant
|
homeassistant/components/logger.py
|
Python
|
apache-2.0
| 3,869
|
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironicclient.tests.functional import base
class IronicClientHelp(base.FunctionalTestBase):
"""Test for python-ironicclient help messages."""
def test_ironic_help(self):
"""Check Ironic client main help message contents."""
caption = ("Command-line interface to the "
"OpenStack Bare Metal Provisioning API.")
subcommands = {
'bash-completion',
'chassis-create',
'chassis-delete',
'chassis-list',
'chassis-node-list',
'chassis-show',
'chassis-update',
'driver-list',
'driver-properties',
'driver-show',
'driver-vendor-passthru',
'help',
'node-create',
'node-delete',
'node-get-boot-device',
'node-get-console',
'node-get-supported-boot-devices',
'node-list',
'node-port-list',
'node-set-boot-device',
'node-set-console-mode',
'node-set-maintenance',
'node-set-power-state',
'node-set-provision-state',
'node-show',
'node-show-states',
'node-update',
'node-validate',
'node-vendor-passthru',
'port-create',
'port-delete',
'port-list',
'port-show',
'port-update'
}
output = self._ironic('help', flags='', params='')
self.assertIn(caption, output)
for string in subcommands:
self.assertIn(string, output)
|
NaohiroTamura/python-ironicclient
|
ironicclient/tests/functional/test_help_msg.py
|
Python
|
apache-2.0
| 2,193
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name='fetcher',
version='0.4',
install_requires=['pycurl==7.19.0.2'],
packages=['fetcher'])
|
vistarmedia/fetcher
|
setup.py
|
Python
|
apache-2.0
| 165
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.recommendationengine_v1beta1.types import prediction_service
from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO
class PredictionServiceGrpcTransport(PredictionServiceTransport):
"""gRPC backend transport for PredictionService.
Service for making recommendation prediction.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "recommendationengine.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "recommendationengine.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def predict(
self,
) -> Callable[
[prediction_service.PredictRequest], prediction_service.PredictResponse
]:
r"""Return a callable for the predict method over gRPC.
Makes a recommendation prediction. If using API Key based
authentication, the API Key must be registered using the
[PredictionApiKeyRegistry][google.cloud.recommendationengine.v1beta1.PredictionApiKeyRegistry]
service. `Learn
more </recommendations-ai/docs/setting-up#register-key>`__.
Returns:
Callable[[~.PredictRequest],
~.PredictResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "predict" not in self._stubs:
self._stubs["predict"] = self.grpc_channel.unary_unary(
"/google.cloud.recommendationengine.v1beta1.PredictionService/Predict",
request_serializer=prediction_service.PredictRequest.serialize,
response_deserializer=prediction_service.PredictResponse.deserialize,
)
return self._stubs["predict"]
def close(self):
self.grpc_channel.close()
__all__ = ("PredictionServiceGrpcTransport",)
|
googleapis/python-recommendations-ai
|
google/cloud/recommendationengine_v1beta1/services/prediction_service/transports/grpc.py
|
Python
|
apache-2.0
| 11,847
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from flask import Flask, jsonify, redirect, render_template, request, url_for
from itertools import repeat, izip, imap
from jinja2 import Markup
from kazoo.client import KazooClient
from kazoo.exceptions import NoNodeException
from kazoo.security import make_acl, make_digest_acl_credential
from raven.contrib.flask import Sentry
from werkzeug.contrib.fixers import ProxyFix
import json
from jones import Jones, Env
import zkutil
import jonesconfig
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
app.config.from_object(jonesconfig)
app.config.from_envvar('JONES_SETTINGS', silent=True)
if 'SENTRY_DSN' in app.config:
sentry = Sentry(app)
jones_credential = make_digest_acl_credential(
'Jones', app.config['ZK_DIGEST_PASSWORD']
)
_zk = None
def get_zk():
global _zk
if _zk is None:
_zk = KazooClient(
app.config['ZK_CONNECTION_STRING'],
default_acl=(
# grants read permissions to anyone.
make_acl('world', 'anyone', read=True),
# grants all permissions to the creator of the node.
make_acl('auth', '', all=True)
)
)
_zk.start()
_zk.add_auth('digest', jones_credential)
_zk.DataWatch('/services', func=ensure_root)
return _zk
def ensure_root(data, stat):
if not data:
get_zk().ensure_path('/services')
def request_wants(t):
types = ['text/plain', 'text/html', 'application/json']
assert t in types
best = request.accept_mimetypes \
.best_match(types)
return best == t
@app.template_filter()
def as_json(d, indent=None):
return Markup(json.dumps(d, indent=indent))
@app.context_processor
def inject_services():
return dict(services=[child for child in get_zk().get_children('/services') if
Jones(child, get_zk()).exists()])
@app.route('/')
def index():
return render_template('index.j2')
def service_create(env, jones):
jones.create_config(env, {})
if request_wants('application/json') or request_wants('text/plain'):
r = jsonify(service=jones.service)
r.status_code = 201
return r
else:
if env.is_root:
env = None
return redirect(url_for(
'services', service=jones.service, env=env))
def service_update(env, jones):
jones.set_config(
env,
json.loads(request.form['data']),
int(request.form['version'])
)
return env
def service_delete(env, jones):
if env.is_root:
# deleting whole service
jones.delete_all()
#return redirect(url_for('index'))
else:
jones.delete_config(env, -1)
return env, 200
def service_get(env, jones):
if not jones.exists():
return redirect(url_for('index'))
children = jones.get_child_envs(Env.Root)
is_leaf = lambda child: len(child) and not any(
c.find(child + '/') >= 0 for c in children)
try:
version, config = jones.get_config_by_env(env)
except NoNodeException:
return redirect(url_for('services', service=jones.service))
childs = imap(dict, izip(
izip(repeat('env'), imap(Env, children)),
izip(repeat('is_leaf'), imap(is_leaf, children))))
vals = {
"env": env,
"version": version,
"children": list(childs),
"config": config,
"view": jones.get_view_by_env(env),
"service": jones.service,
"associations": jones.get_associations(env)
}
if request_wants('application/json'):
return jsonify(vals)
else:
return render_template('service.j2', **vals)
SERVICE = {
'get': service_get,
'put': service_update,
'post': service_create,
'delete': service_delete
}
ALL_METHODS = ['GET', 'PUT', 'POST', 'DELETE']
@app.route('/service/<string:service>/', defaults={'env': None},
methods=ALL_METHODS)
@app.route('/service/<string:service>/<path:env>/', methods=ALL_METHODS)
def services(service, env):
jones = Jones(service, get_zk())
environment = Env(env)
return SERVICE[request.method.lower()](environment, jones)
@app.route('/service/<string:service>/association/<string:assoc>',
methods=['GET', 'PUT', 'DELETE'])
def association(service, assoc):
jones = Jones(service, get_zk())
if request.method == 'GET':
if request_wants('application/json'):
return jsonify(jones.get_config(assoc))
if request.method == 'PUT':
jones.assoc_host(assoc, Env(request.form['env']))
return service, 201
elif request.method == 'DELETE':
jones.delete_association(assoc)
return service, 200
@app.route('/export')
def export():
return zkutil.export_tree(get_zk(), '/')
if __name__ == '__main__':
app.run()
|
mwhooker/jones
|
jones/web.py
|
Python
|
apache-2.0
| 5,362
|
#!/usr/bin/env python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test classes for the pubsub-to-datastore module."""
import os
import time
import unittest
import urllib
import json
import sys
sys.path.insert(1, 'lib')
import httplib2
GAE_HOST = "pubsub-to-datastore-dot-cloud-iot-dev.appspot.com"
def url_for(path):
"""Returns the URL of the endpoint for the given path."""
return 'https://%s%s' % (GAE_HOST, path)
class IntegrationTestCase(unittest.TestCase):
"""A test case for the pubsub-to-datastore module."""
def setUp(self):
self.http = httplib2.Http()
def test_push_success(self):
"""Test processing a new message."""
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data = {"message": {"data": "eyJzZXNzaW9uSWQiOiI1OWE0N2VhNS1jMjAxLTA4MzItZjU2Zi1hM2ZlNGUxNzA0ODciLCJkYXRhIjp7ImV2IjoiZGV2aWNlb3JpZW50YXRpb24iLCJ4IjowLjE5MDEzNDE2NTg0MTU2ODk4LCJ5IjoyMy45MDQxMTQ5MzYzNzg0NTJ9LCJ0aW1lc3RhbXAiOjE0NjI1NTI3MzcyMDl9","message_id": "34536788863333"}}
(resp, content) = self.http.request(url_for('/'), 'POST', body=json.dumps(data), headers=headers)
# This ensures that our App Engine service account is working correctly.
self.assertEquals(204, resp.status)
# [START main]
if __name__ == '__main__':
unittest.main()
# [END main]
|
GoogleCloudPlatform/IoT-Icebreaker
|
appengine/pubsub-to-datastore/test_deploy.py
|
Python
|
apache-2.0
| 1,902
|
"""This example follows the simple text document Pipeline illustrated in the figures above.
"""
from pyspark.ml import Pipeline
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.feature import HashingTF, Tokenizer
# Prepare training documents from a list of (id, text, label) tuples.
training = spark.createDataFrame([
(0, "a b c d e spark", 1.0),
(1, "b d", 0.0),
(2, "spark f g h", 1.0),
(3, "hadoop mapreduce", 0.0)
], ["id", "text", "label"])
# Configure an ML pipeline, which consists of three stages: tokenizer, hashingTF, and lr.
tokenizer = Tokenizer(inputCol="text", outputCol="words")
hashingTF = HashingTF(inputCol=tokenizer.getOutputCol(), outputCol="features")
lr = LogisticRegression(maxIter=10, regParam=0.001)
pipeline = Pipeline(stages=[tokenizer, hashingTF, lr])
# Fit the pipeline to training documents.
model = pipeline.fit(training)
# Prepare test documents, which are unlabeled (id, text) tuples.
test = spark.createDataFrame([
(4, "spark i j k"),
(5, "l m n"),
(6, "spark hadoop spark"),
(7, "apache hadoop")
], ["id", "text"])
# Make predictions on test documents and print columns of interest.
prediction = model.transform(test)
selected = prediction.select("id", "text", "probability", "prediction")
for row in selected.collect():
rid, text, prob, prediction = row
print("(%d, %s) --> prob=%s, prediction=%f" % (rid, text, str(prob), prediction))
|
xiligey/xiligey.github.io
|
code/2.py
|
Python
|
apache-2.0
| 1,442
|
# -*- coding: utf-8 -*-
import os
import base64
from random import choice
def random_file_from_dir(relative_path):
random_file = choice(os.listdir(os.path.join(os.getcwd(), relative_path)))
return abs_path_to_file(os.path.join(relative_path, random_file))
def abs_path_to_file(relative_path):
# print os.getcwd()
return os.path.abspath(os.path.join(os.getcwd(), relative_path))
def encode_base64(abs_path):
print "abs_path", abs_path
with open(abs_path, 'rb') as f:
return base64.b64encode(f.read())
|
alazanman/py_epg_tests
|
utils/file_util.py
|
Python
|
apache-2.0
| 535
|
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import ddt
from zaqar.tests.functional import base
from zaqar.tests.functional import helpers
@ddt.ddt
class TestClaims(base.V1_1FunctionalTestBase):
"""Tests for Claims."""
server_class = base.ZaqarServer
def setUp(self):
super(TestClaims, self).setUp()
self.headers = helpers.create_zaqar_headers(self.cfg)
self.client.headers = self.headers
self.queue = uuid.uuid1()
self.queue_url = ("{url}/{version}/queues/{queue}".format(
url=self.cfg.zaqar.url,
version="v1.1",
queue=self.queue))
self.client.put(self.queue_url)
self.claim_url = self.queue_url + '/claims'
self.client.set_base_url(self.claim_url)
# Post Messages
url = self.queue_url + '/messages'
doc = helpers.create_message_body_v1_1(
messagecount=self.limits.max_messages_per_page)
for i in range(10):
self.client.post(url, data=doc)
@ddt.data({}, {'limit': 2})
def test_claim_messages(self, params):
"""Claim messages."""
message_count = params.get('limit',
self.limits.max_messages_per_claim_or_pop)
doc = {"ttl": 300, "grace": 100}
result = self.client.post(params=params, data=doc)
self.assertEqual(201, result.status_code)
self.assertSchema(result.json(), 'claim_create')
actual_message_count = len(result.json()['messages'])
self.assertMessageCount(actual_message_count, message_count)
response_headers = set(result.headers.keys())
self.assertIsSubset(self.headers_response_with_body, response_headers)
test_claim_messages.tags = ['smoke', 'positive']
def test_query_claim(self):
"""Query Claim."""
params = {'limit': 1}
doc = {"ttl": 300, "grace": 100}
result = self.client.post(params=params, data=doc)
location = result.headers['Location']
url = self.cfg.zaqar.url + location
result = self.client.get(url)
self.assertEqual(200, result.status_code)
test_query_claim.tags = ['smoke', 'positive']
@ddt.data({}, {"grace": 100})
def test_claim_default_ttl(self, doc):
"""Create claim with default TTL and grace values."""
params = {'limit': 1}
result = self.client.post(params=params, data=doc)
self.assertEqual(201, result.status_code)
location = result.headers['Location']
url = self.cfg.zaqar.url + location
result = self.client.get(url)
self.assertEqual(200, result.status_code)
default_ttl = result.json()['ttl']
self.assertEqual(self.resource_defaults.claim_ttl, default_ttl)
test_claim_default_ttl.tags = ['smoke', 'positive']
def test_claim_more_than_allowed(self):
"""Claim more than max allowed per request.
Zaqar allows a maximum of 20 messages per claim by default.
"""
params = {"limit": self.limits.max_messages_per_claim_or_pop + 1}
doc = {"ttl": 300, "grace": 100}
result = self.client.post(params=params, data=doc)
self.assertEqual(400, result.status_code)
test_claim_more_than_allowed.tags = ['negative']
def test_claim_patch(self):
"""Update Claim."""
# Test Setup - Post Claim
doc = {"ttl": 300, "grace": 400}
result = self.client.post(data=doc)
self.assertEqual(201, result.status_code)
# Patch Claim
claim_location = result.headers['Location']
url = self.cfg.zaqar.url + claim_location
doc_updated = {"ttl": 300, 'grace': 60}
result = self.client.patch(url, data=doc_updated)
self.assertEqual(204, result.status_code)
# verify that the claim TTL is updated
result = self.client.get(url)
new_ttl = result.json()['ttl']
self.assertEqual(doc_updated['ttl'], new_ttl)
test_claim_patch.tags = ['smoke', 'positive']
def test_delete_claimed_message(self):
"""Delete message belonging to a Claim."""
# Test Setup - Post claim
doc = {"ttl": 60, "grace": 60}
result = self.client.post(data=doc)
self.assertEqual(201, result.status_code)
# Delete Claimed Messages
for rst in result.json()['messages']:
href = rst['href']
url = self.cfg.zaqar.url + href
result = self.client.delete(url)
self.assertEqual(204, result.status_code)
test_delete_claimed_message.tags = ['smoke', 'positive']
def test_claim_release(self):
"""Release Claim."""
doc = {"ttl": 300, "grace": 100}
result = self.client.post(data=doc)
self.assertEqual(201, result.status_code)
# Extract claim location and construct the claim URL.
location = result.headers['Location']
url = self.cfg.zaqar.url + location
# Release Claim.
result = self.client.delete(url)
self.assertEqual(204, result.status_code)
test_claim_release.tags = ['smoke', 'positive']
@ddt.data(10000000000000000000, -100, 1, 59, 43201, -10000000000000000000)
def test_claim_invalid_ttl(self, ttl):
"""Post Claim with invalid TTL.
The request JSON body will have a TTL value
outside the allowed range.Allowed ttl values is
60 <= ttl <= 43200.
"""
doc = {"ttl": ttl, "grace": 100}
result = self.client.post(data=doc)
self.assertEqual(400, result.status_code)
test_claim_invalid_ttl.tags = ['negative']
@ddt.data(10000000000000000000, -100, 1, 59, 43201, -10000000000000000000)
def test_claim_invalid_grace(self, grace):
"""Post Claim with invalid grace.
The request JSON body will have a grace value
outside the allowed range.Allowed grace values is
60 <= grace <= 43200.
"""
doc = {"ttl": 100, "grace": grace}
result = self.client.post(data=doc)
self.assertEqual(400, result.status_code)
test_claim_invalid_grace.tags = ['negative']
@ddt.data(0, -100, 30, 10000000000000000000)
def test_claim_invalid_limit(self, grace):
"""Post Claim with invalid limit.
The request url will have a limit outside the allowed range.
Allowed limit values are 0 < limit <= 20(default max).
"""
doc = {"ttl": 100, "grace": grace}
result = self.client.post(data=doc)
self.assertEqual(400, result.status_code)
test_claim_invalid_limit.tags = ['negative']
@ddt.data(10000000000000000000, -100, 1, 59, 43201, -10000000000000000000)
def test_patch_claim_invalid_ttl(self, ttl):
"""Patch Claim with invalid TTL.
The request JSON body will have a TTL value
outside the allowed range.Allowed ttl values is
60 <= ttl <= 43200.
"""
doc = {"ttl": 100, "grace": 100}
result = self.client.post(data=doc)
self.assertEqual(201, result.status_code)
# Extract claim location and construct the claim URL.
location = result.headers['Location']
url = self.cfg.zaqar.url + location
# Patch Claim.
doc = {"ttl": ttl}
result = self.client.patch(url, data=doc)
self.assertEqual(400, result.status_code)
test_patch_claim_invalid_ttl.tags = ['negative']
def test_query_non_existing_claim(self):
"""Query Non Existing Claim."""
path = '/non-existing-claim'
result = self.client.get(path)
self.assertEqual(404, result.status_code)
test_query_non_existing_claim.tags = ['negative']
def test_patch_non_existing_claim(self):
"""Patch Non Existing Claim."""
path = '/non-existing-claim'
doc = {"ttl": 400}
result = self.client.patch(path, data=doc)
self.assertEqual(404, result.status_code)
test_patch_non_existing_claim.tags = ['negative']
def test_delete_non_existing_claim(self):
"""Patch Non Existing Claim."""
path = '/non-existing-claim'
result = self.client.delete(path)
self.assertEqual(204, result.status_code)
test_delete_non_existing_claim.tags = ['negative']
def tearDown(self):
"""Delete Queue after Claim Test."""
super(TestClaims, self).tearDown()
self.client.delete(self.queue_url)
|
openstack/zaqar
|
zaqar/tests/functional/wsgi/v1_1/test_claims.py
|
Python
|
apache-2.0
| 9,025
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-03-07 23:48
from hanlp.metrics.parsing import conllx_eval
from hanlp.datasets.parsing.ptb import PTB_SD330_DEV, PTB_SD330_TRAIN, PTB_SD330_TEST, PTB_TOKEN_MAPPING
from hanlp.components.parsers.biaffine_parser_tf import BiaffineTransformerDependencyParserTF, \
StructuralAttentionDependencyParserTF
from hanlp.pretrained.glove import GLOVE_840B_300D
from tests import cdroot
cdroot()
save_dir = 'data/model/dep/ptb_sa_topk'
parser = StructuralAttentionDependencyParserTF()
parser.fit(PTB_SD330_TRAIN, PTB_SD330_DEV, save_dir, 'bert-base-uncased',
batch_size=3000,
warmup_steps_ratio=.1,
token_mapping=PTB_TOKEN_MAPPING,
samples_per_batch=150,
transformer_dropout=.33,
masked_lm_dropout=.33,
learning_rate=2e-3,
learning_rate_transformer=1e-5,
# alpha=1,
# early_stopping_patience=10,
# num_decoder_layers=2,
)
parser.load(save_dir)
# output = f'{save_dir}/test.predict.conll'
parser.evaluate(PTB_SD330_TEST, save_dir, warm_up=False)
# uas, las = conllx_eval.evaluate(PTB_SD330_TEST, output)
# print(f'Official UAS: {uas:.4f} LAS: {las:.4f}')
print(f'Model saved in {save_dir}')
|
hankcs/HanLP
|
plugins/hanlp_demo/hanlp_demo/zh/tf/train/train_ptb_dep_sa_albert_topk.py
|
Python
|
apache-2.0
| 1,269
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logic to update a TensorFlow model graph with quantization operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib import graph_editor
from tensorflow.contrib.quantize.python import graph_matcher
from tensorflow.contrib.quantize.python import input_to_ops
from tensorflow.contrib.quantize.python import quant_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import training_util
# Quantizable operation types that are supported by the quantization rewrite.
_QUANTIZABLE_TYPES = {'Conv2D', 'MatMul', 'DepthwiseConv2dNative'}
# Activations that are supported by the quantization rewrite.
_ACTIVATION_TYPES = {'Relu', 'Relu6', 'Identity'}
# Weight types that are supported by the quantization rewrite.
# TODO(suharshs): Add support for ResourceVariable.
_WEIGHT_TYPES = {'Variable', 'VariableV2'}
def Quantize(graph,
weight_bits=8,
activation_bits=8,
ema_decay=0.999,
quant_delay=None,
vars_collection=ops.GraphKeys.MOVING_AVERAGE_VARIABLES,
is_training=True):
"""Updates graph with quantization operations.
Args:
graph: Graph to modify.
weight_bits: Number of bits to use for quantizing weights.
activation_bits: Number of bits to use for quantizing activations.
ema_decay: (Optional) Float, EMA decay parameter. EMA is used to update
quantization intervals for quantizing activations (see here about EMA:
https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average).
quant_delay: (Optional, default None) Int, count of global steps for which
to delay quantization. This helps weights stabilize at the start of
training.
vars_collection: (Optional) Collection where to store the variables for
quantization interval ends.
is_training: (Optional) Whether quantizing training graph or eval graph.
Raises:
ValueError: When quantization fails.
"""
input_to_ops_map = input_to_ops.InputToOps(graph)
for layer_match in _FindLayersToQuantize(graph):
# Quantize the weights.
context = _GetContextFromOp(layer_match.layer_op)
_InsertQuantOp(
context,
layer_match.weight_tensor.op, [layer_match.layer_op],
name='weights_quant',
moving_avg=False,
bits=weight_bits,
ema_decay=ema_decay,
quant_delay=quant_delay,
is_training=is_training,
narrow_range=True,
vars_collection=vars_collection)
# Quantize the activations.
consumer_ops = input_to_ops_map.ConsumerOperations(
layer_match.activation_op)
add_context = context
if layer_match.bypass_op:
add_context = re.search(r'^(.*)/([^/]+)', context).group(1)
_InsertQuantOp(
add_context,
layer_match.activation_op,
consumer_ops,
name='act_quant',
moving_avg=True,
init_min=0.0,
ema_decay=ema_decay,
quant_delay=quant_delay,
bits=activation_bits,
vars_collection=vars_collection)
# Quantize the inputs and output to the bypass (if it exists). The input to
# the bypass is the bias add, and the output is the activation.
if layer_match.bypass_op is not None:
_InsertQuantOp(
context,
layer_match.bias_add_op, [layer_match.bypass_op],
name='conv_quant',
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits)
_InsertQuantOp(
add_context,
layer_match.bypass_op,
input_to_ops_map.ConsumerOperations(layer_match.bypass_op),
name='add_quant',
moving_avg=True,
bits=activation_bits)
def _FindLayersToQuantize(graph):
"""Matches layers in graph to quantize.
Args:
graph: Graph to perform match on.
Yields:
_LayerMatches.
"""
input_pattern = graph_matcher.OpTypePattern('*')
weight_var_pattern = graph_matcher.OpTypePattern('|'.join(_WEIGHT_TYPES))
weight_pattern = graph_matcher.OpTypePattern(
'Identity', inputs=[weight_var_pattern])
folded_weight_pattern = graph_matcher.OpTypePattern('Mul')
# The weights inputs to the layer operation can either be from the Variable or
# the folded weight (Mul).
layer_pattern = graph_matcher.OpTypePattern(
'|'.join(_QUANTIZABLE_TYPES),
inputs=[
input_pattern,
graph_matcher.OneofPattern([weight_pattern, folded_weight_pattern])
])
folded_bias_mul_pattern = graph_matcher.OpTypePattern(
'Mul', inputs=[graph_matcher.OpTypePattern('*'), layer_pattern])
post_layer_op_correction_pattern = graph_matcher.OpTypePattern(
'Add', inputs=[folded_bias_mul_pattern,
graph_matcher.OpTypePattern('*')])
folded_bias_add_pattern = graph_matcher.OpTypePattern(
'Add',
inputs=[
post_layer_op_correction_pattern,
graph_matcher.OpTypePattern('*')
])
bias_add_pattern = graph_matcher.OpTypePattern(
'Add|BiasAdd', inputs=[layer_pattern, '*'])
# The bias can come from the bias add or the folded bias add.
bypass_pattern_a = graph_matcher.OpTypePattern(
'Add',
inputs=[
graph_matcher.OneofPattern(
[bias_add_pattern, folded_bias_add_pattern]), '*'
])
bypass_pattern_b = graph_matcher.OpTypePattern(
'Add',
inputs=[
'*',
graph_matcher.OneofPattern(
[bias_add_pattern, folded_bias_add_pattern])
])
# The input to the activation can come from bias add, fold bias add or the
# bypasses.
activation_pattern = graph_matcher.OpTypePattern(
'|'.join(_ACTIVATION_TYPES),
inputs=[
graph_matcher.OneofPattern([
bias_add_pattern, folded_bias_add_pattern, bypass_pattern_a,
bypass_pattern_b
])
])
layer_matcher = graph_matcher.GraphMatcher(activation_pattern)
for match_result in layer_matcher.match_graph(graph):
layer_op = match_result.get_op(layer_pattern)
weight_tensor = match_result.get_tensor(weight_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(folded_weight_pattern)
activation_op = match_result.get_op(activation_pattern)
bias_add_op = match_result.get_op(bias_add_pattern)
if bias_add_op is None:
bias_add_op = match_result.get_op(folded_bias_add_pattern)
bypass_op = match_result.get_op(bypass_pattern_a)
if bypass_op is None:
bypass_op = match_result.get_op(bypass_pattern_b)
yield _LayerMatch(layer_op, weight_tensor, activation_op, bypass_op,
bias_add_op)
class _LayerMatch(object):
"""Contains all information related to a matched Layer."""
def __init__(self, layer_op, weight_tensor, activation_op, bypass_op,
bias_add_op):
self._layer_op = layer_op
self._weight_tensor = weight_tensor
self._activation_op = activation_op
self._bypass_op = bypass_op
self._bias_add_op = bias_add_op
@property
def layer_op(self):
return self._layer_op
@property
def weight_tensor(self):
return self._weight_tensor
@property
def activation_op(self):
return self._activation_op
@property
def bypass_op(self):
return self._bypass_op
@property
def bias_add_op(self):
return self._bias_add_op
def _InsertQuantOp(context,
producer,
consumers,
name,
moving_avg=True,
init_min=-6.0,
init_max=6.0,
bits=8,
ema_decay=0.999,
quant_delay=None,
vars_collection=ops.GraphKeys.MOVING_AVERAGE_VARIABLES,
is_training=True,
narrow_range=False):
"""Inserts a quant op between a producer op and (multiple) consumer ops.
Args:
context: Context w,here producer and consumer operations are nested.
producer: Producer operation of the pairs where quantization will be
inserted.
consumers: Consumer operations of the pairs.
name: Name for the new quantization op within the context.
moving_avg: Specifies whether to use exponential moving average or just
the last value seen.
init_min: Starting minimum value for the new quantization op.
init_max: Starting maximum value for the new quantization op.
bits: Number of bits to use for quantization, must be between 2 and 8.
ema_decay: (Optional) Float, EMA decay parameter. EMA is used to update
quantization intervals for quantizing activations (see here about EMA:
https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average).
quant_delay: (Optional, default None) Int, count of global steps for which
to delay quantization. This helps weights stabilize at the start of
training.
vars_collection: (Optional) Collection where to store the variables for
quantization interval ends.
is_training: (Optional) Whether quantizing training graph or eval graph.
narrow_range: Whether to use the narrow quantization range
[1; 2^bits - 1] or wide range [0; 2^bits - 1].
Raises:
ValueError: When producer operation is not directly connected to the
consumer operation.
"""
name_prefix = _AddContextToName(context, name)
inputs = producer.outputs[0]
if moving_avg:
quant = (
quant_ops.MovingAvgQuantize(
inputs,
init_min=init_min,
init_max=init_max,
ema_decay=ema_decay,
is_training=is_training,
num_bits=bits,
narrow_range=narrow_range,
vars_collection=vars_collection,
name_prefix=name_prefix))
else:
quant = (
quant_ops.LastValueQuantize(
inputs,
init_min=init_min,
init_max=init_max,
is_training=is_training,
num_bits=bits,
narrow_range=narrow_range,
vars_collection=vars_collection,
name_prefix=name_prefix))
if quant_delay and quant_delay > 0:
activate_quant = math_ops.greater_equal(
training_util.get_or_create_global_step(),
quant_delay,
name=name_prefix + '/activate_quant')
quant = control_flow_ops.cond(
activate_quant,
lambda: quant,
lambda: inputs,
name=name_prefix + '/delayed_quant')
nodes_modified_count = graph_editor.reroute_ts(
[quant], [inputs], can_modify=consumers)
if nodes_modified_count != len(consumers):
raise ValueError('Some inputs not quantized for ops: [%s]' % ', '.join(
[consumer.name for consumer in consumers]))
def _GetContextFromOp(op):
"""Gets the root context name from the op name."""
context_re = re.search(r'^(.*)/([^/]+)', op.name)
if context_re:
return context_re.group(1)
return ''
def _AddContextToName(context, name):
"""Adds the context to the name if it exists."""
if not context:
return name
return context + '/' + name
|
nolanliou/tensorflow
|
tensorflow/contrib/quantize/python/quantize.py
|
Python
|
apache-2.0
| 11,989
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_messaging.rpc import dispatcher
import six
from heat.common import exception
from heat.common import identifier
from heat.engine.clients.os import keystone
from heat.engine import dependencies
from heat.engine import resource as res
from heat.engine import service
from heat.engine import stack
from heat.engine import stack_lock
from heat.engine import template as templatem
from heat.objects import stack as stack_object
from heat.tests import common
from heat.tests.engine import tools
from heat.tests import fakes as test_fakes
from heat.tests import generic_resource as generic_rsrc
from heat.tests import utils
policy_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "alarming",
"Resources" : {
"WebServerScaleDownPolicy" : {
"Type" : "AWS::AutoScaling::ScalingPolicy",
"Properties" : {
"AdjustmentType" : "ChangeInCapacity",
"AutoScalingGroupName" : "",
"Cooldown" : "60",
"ScalingAdjustment" : "-1"
}
},
"Random" : {
"Type" : "OS::Heat::RandomString"
}
}
}
'''
class StackResourcesServiceTest(common.HeatTestCase):
def setUp(self):
super(StackResourcesServiceTest, self).setUp()
self.ctx = utils.dummy_context(tenant_id='stack_resource_test_tenant')
self.eng = service.EngineService('a-host', 'a-topic')
self.eng.thread_group_mgr = tools.DummyThreadGroupManager()
self.eng.engine_id = 'engine-fake-uuid'
cfg.CONF.set_default('heat_stack_user_role', 'stack_user_role')
@mock.patch.object(stack.Stack, 'load')
def _test_describe_stack_resource(self, mock_load):
mock_load.return_value = self.stack
# Patch _resolve_all_attributes or it tries to call novaclient
self.patchobject(res.Resource, '_resolve_all_attributes',
return_value=None)
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertIn('resource_identity', r)
self.assertIn('description', r)
self.assertIn('updated_time', r)
self.assertIn('stack_identity', r)
self.assertIsNotNone(r['stack_identity'])
self.assertIn('stack_name', r)
self.assertEqual(self.stack.name, r['stack_name'])
self.assertIn('metadata', r)
self.assertIn('resource_status', r)
self.assertIn('resource_status_reason', r)
self.assertIn('resource_type', r)
self.assertIn('physical_resource_id', r)
self.assertIn('resource_name', r)
self.assertIn('attributes', r)
self.assertEqual('WebServer', r['resource_name'])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@tools.stack_context('service_stack_resource_describe__test_stack')
def test_stack_resource_describe(self):
self._test_describe_stack_resource()
@mock.patch.object(service.EngineService, '_get_stack')
def test_stack_resource_describe_nonexist_stack(self, mock_get):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
mock_get.side_effect = exception.EntityNotFound(
entity='Stack', name='test')
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resource,
self.ctx, non_exist_identifier, 'WebServer')
self.assertEqual(exception.EntityNotFound, ex.exc_info[0])
mock_get.assert_called_once_with(self.ctx, non_exist_identifier)
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resource_describe_nonexist_test_stack')
def test_stack_resource_describe_nonexist_resource(self, mock_load):
mock_load.return_value = self.stack
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resource,
self.ctx, self.stack.identifier(), 'foo')
self.assertEqual(exception.ResourceNotFound, ex.exc_info[0])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@tools.stack_context('service_resource_describe_noncreated_test_stack',
create_res=False)
def test_stack_resource_describe_noncreated_resource(self):
self._test_describe_stack_resource()
@mock.patch.object(service.EngineService, '_authorize_stack_user')
@tools.stack_context('service_resource_describe_user_deny_test_stack')
def test_stack_resource_describe_stack_user_deny(self, mock_auth):
self.ctx.roles = [cfg.CONF.heat_stack_user_role]
mock_auth.return_value = False
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resource,
self.ctx, self.stack.identifier(), 'foo')
self.assertEqual(exception.Forbidden, ex.exc_info[0])
mock_auth.assert_called_once_with(self.ctx, mock.ANY, 'foo')
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_describe_test_stack')
def test_stack_resources_describe(self, mock_load):
mock_load.return_value = self.stack
resources = self.eng.describe_stack_resources(self.ctx,
self.stack.identifier(),
'WebServer')
self.assertEqual(1, len(resources))
r = resources[0]
self.assertIn('resource_identity', r)
self.assertIn('description', r)
self.assertIn('updated_time', r)
self.assertIn('stack_identity', r)
self.assertIsNotNone(r['stack_identity'])
self.assertIn('stack_name', r)
self.assertEqual(self.stack.name, r['stack_name'])
self.assertIn('resource_status', r)
self.assertIn('resource_status_reason', r)
self.assertIn('resource_type', r)
self.assertIn('physical_resource_id', r)
self.assertIn('resource_name', r)
self.assertEqual('WebServer', r['resource_name'])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_describe_no_filter_test_stack')
def test_stack_resources_describe_no_filter(self, mock_load):
mock_load.return_value = self.stack
resources = self.eng.describe_stack_resources(
self.ctx, self.stack.identifier(), None)
self.assertEqual(1, len(resources))
r = resources[0]
self.assertIn('resource_name', r)
self.assertEqual('WebServer', r['resource_name'])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@mock.patch.object(service.EngineService, '_get_stack')
def test_stack_resources_describe_bad_lookup(self, mock_get):
mock_get.side_effect = TypeError
self.assertRaises(TypeError,
self.eng.describe_stack_resources,
self.ctx, None, 'WebServer')
mock_get.assert_called_once_with(self.ctx, None)
def test_stack_resources_describe_nonexist_stack(self):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resources,
self.ctx, non_exist_identifier, 'WebServer')
self.assertEqual(exception.EntityNotFound, ex.exc_info[0])
@tools.stack_context('find_phys_res_stack')
def test_find_physical_resource(self):
resources = self.eng.describe_stack_resources(self.ctx,
self.stack.identifier(),
None)
phys_id = resources[0]['physical_resource_id']
result = self.eng.find_physical_resource(self.ctx, phys_id)
self.assertIsInstance(result, dict)
resource_identity = identifier.ResourceIdentifier(**result)
self.assertEqual(self.stack.identifier(), resource_identity.stack())
self.assertEqual('WebServer', resource_identity.resource_name)
def test_find_physical_resource_nonexist(self):
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.find_physical_resource,
self.ctx, 'foo')
self.assertEqual(exception.EntityNotFound, ex.exc_info[0])
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_list_test_stack')
def test_stack_resources_list(self, mock_load):
mock_load.return_value = self.stack
resources = self.eng.list_stack_resources(self.ctx,
self.stack.identifier())
self.assertEqual(1, len(resources))
r = resources[0]
self.assertIn('resource_identity', r)
self.assertIn('updated_time', r)
self.assertIn('physical_resource_id', r)
self.assertIn('resource_name', r)
self.assertEqual('WebServer', r['resource_name'])
self.assertIn('resource_status', r)
self.assertIn('resource_status_reason', r)
self.assertIn('resource_type', r)
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_list_test_stack_with_depth')
def test_stack_resources_list_with_depth(self, mock_load):
mock_load.return_value = self.stack
resources = six.itervalues(self.stack)
self.stack.iter_resources = mock.Mock(return_value=resources)
self.eng.list_stack_resources(self.ctx,
self.stack.identifier(),
2)
self.stack.iter_resources.assert_called_once_with(2,
filters=None)
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_list_test_stack_with_max_depth')
def test_stack_resources_list_with_max_depth(self, mock_load):
mock_load.return_value = self.stack
resources = six.itervalues(self.stack)
self.stack.iter_resources = mock.Mock(return_value=resources)
self.eng.list_stack_resources(self.ctx,
self.stack.identifier(),
99)
max_depth = cfg.CONF.max_nested_stack_depth
self.stack.iter_resources.assert_called_once_with(max_depth,
filters=None)
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_list_test_stack')
def test_stack_resources_filter_type(self, mock_load):
mock_load.return_value = self.stack
resources = six.itervalues(self.stack)
self.stack.iter_resources = mock.Mock(return_value=resources)
filters = {'type': 'AWS::EC2::Instance'}
resources = self.eng.list_stack_resources(self.ctx,
self.stack.identifier(),
filters=filters)
self.stack.iter_resources.assert_called_once_with(
0, filters={})
self.assertIn('AWS::EC2::Instance', resources[0]['resource_type'])
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_list_test_stack')
def test_stack_resources_filter_type_not_found(self, mock_load):
mock_load.return_value = self.stack
resources = six.itervalues(self.stack)
self.stack.iter_resources = mock.Mock(return_value=resources)
filters = {'type': 'NonExisted'}
resources = self.eng.list_stack_resources(self.ctx,
self.stack.identifier(),
filters=filters)
self.stack.iter_resources.assert_called_once_with(
0, filters={})
self.assertEqual(0, len(resources))
@mock.patch.object(stack.Stack, 'load')
def test_stack_resources_list_deleted_stack(self, mock_load):
stk = tools.setup_stack('resource_list_deleted_stack', self.ctx)
stack_id = stk.identifier()
mock_load.return_value = stk
tools.clean_up_stack(stk)
resources = self.eng.list_stack_resources(self.ctx, stack_id)
self.assertEqual(1, len(resources))
res = resources[0]
self.assertEqual('DELETE', res['resource_action'])
self.assertEqual('COMPLETE', res['resource_status'])
@mock.patch.object(service.EngineService, '_get_stack')
def test_stack_resources_list_nonexist_stack(self, mock_get):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
mock_get.side_effect = exception.EntityNotFound(entity='Stack',
name='test')
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.list_stack_resources,
self.ctx, non_exist_identifier)
self.assertEqual(exception.EntityNotFound, ex.exc_info[0])
mock_get.assert_called_once_with(self.ctx, non_exist_identifier,
show_deleted=True)
def _stack_create(self, stack_name):
self.patchobject(keystone.KeystoneClientPlugin, '_create',
return_value=test_fakes.FakeKeystoneClient())
stk = tools.get_stack(stack_name, self.ctx, policy_template)
stk.store()
stk.create()
s = stack_object.Stack.get_by_id(self.ctx, stk.id)
self.patchobject(service.EngineService, '_get_stack', return_value=s)
return stk
def test_signal_reception_async(self):
self.eng.thread_group_mgr = tools.DummyThreadGroupMgrLogStart()
stack_name = 'signal_reception_async'
self.stack = self._stack_create(stack_name)
test_data = {'food': 'yum'}
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
test_data)
self.assertEqual([(self.stack.id, mock.ANY)],
self.eng.thread_group_mgr.started)
@mock.patch.object(res.Resource, 'signal')
def test_signal_reception_sync(self, mock_signal):
mock_signal.return_value = None
stack_name = 'signal_reception_sync'
self.stack = self._stack_create(stack_name)
test_data = {'food': 'yum'}
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
test_data,
sync_call=True)
mock_signal.assert_called_once_with(mock.ANY, False)
def test_signal_reception_no_resource(self):
stack_name = 'signal_reception_no_resource'
self.stack = self._stack_create(stack_name)
test_data = {'food': 'yum'}
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_signal, self.ctx,
dict(self.stack.identifier()),
'resource_does_not_exist',
test_data)
self.assertEqual(exception.ResourceNotFound, ex.exc_info[0])
@mock.patch.object(stack.Stack, 'load')
@mock.patch.object(service.EngineService, '_get_stack')
def test_signal_reception_unavailable_resource(self, mock_get, mock_load):
stack_name = 'signal_reception_unavailable_resource'
stk = tools.get_stack(stack_name, self.ctx, policy_template)
stk.store()
self.stack = stk
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
mock_load.return_value = stk
mock_get.return_value = s
test_data = {'food': 'yum'}
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_signal, self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
test_data)
self.assertEqual(exception.ResourceNotAvailable, ex.exc_info[0])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY,
use_stored_context=mock.ANY)
mock_get.assert_called_once_with(self.ctx, self.stack.identifier())
@mock.patch.object(res.Resource, 'signal')
def test_signal_returns_metadata(self, mock_signal):
mock_signal.return_value = None
self.stack = self._stack_create('signal_reception')
rsrc = self.stack['WebServerScaleDownPolicy']
test_metadata = {'food': 'yum'}
rsrc.metadata_set(test_metadata)
md = self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy', None,
sync_call=True)
self.assertEqual(test_metadata, md)
mock_signal.assert_called_once_with(mock.ANY, False)
def test_signal_unset_invalid_hook(self):
self.stack = self._stack_create('signal_unset_invalid_hook')
details = {'unset_hook': 'invalid_hook'}
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_signal,
self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
details)
msg = 'Invalid hook type "invalid_hook"'
self.assertIn(msg, six.text_type(ex.exc_info[1]))
self.assertEqual(exception.InvalidBreakPointHook,
ex.exc_info[0])
def test_signal_unset_not_defined_hook(self):
self.stack = self._stack_create('signal_unset_not_defined_hook')
details = {'unset_hook': 'pre-update'}
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_signal,
self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
details)
msg = ('The "pre-update" hook is not defined on '
'AWSScalingPolicy "WebServerScaleDownPolicy"')
self.assertIn(msg, six.text_type(ex.exc_info[1]))
self.assertEqual(exception.InvalidBreakPointHook,
ex.exc_info[0])
@mock.patch.object(res.Resource, 'metadata_update')
@mock.patch.object(res.Resource, 'signal')
@mock.patch.object(service.EngineService, '_get_stack')
def test_signal_calls_metadata_update(self, mock_get, mock_signal,
mock_update):
# fake keystone client
self.patchobject(keystone.KeystoneClientPlugin, '_create',
return_value=test_fakes.FakeKeystoneClient())
stk = tools.get_stack('signal_reception', self.ctx, policy_template)
self.stack = stk
stk.store()
stk.create()
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
mock_get.return_value = s
mock_signal.return_value = True
# this will be called once for the Random resource
mock_update.return_value = None
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy', None,
sync_call=True)
mock_get.assert_called_once_with(self.ctx, self.stack.identifier())
mock_signal.assert_called_once_with(mock.ANY, False)
mock_update.assert_called_once_with()
@mock.patch.object(res.Resource, 'metadata_update')
@mock.patch.object(res.Resource, 'signal')
@mock.patch.object(service.EngineService, '_get_stack')
def test_signal_no_calls_metadata_update(self, mock_get, mock_signal,
mock_update):
# fake keystone client
self.patchobject(keystone.KeystoneClientPlugin, '_create',
return_value=test_fakes.FakeKeystoneClient())
stk = tools.get_stack('signal_reception', self.ctx, policy_template)
self.stack = stk
stk.store()
stk.create()
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
mock_get.return_value = s
mock_signal.return_value = False
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy', None,
sync_call=True)
mock_get.assert_called_once_with(self.ctx, self.stack.identifier())
mock_signal.assert_called_once_with(mock.ANY, False)
# this will never be called
self.assertEqual(0, mock_update.call_count)
def test_lazy_load_resources(self):
stack_name = 'lazy_load_test'
lazy_load_template = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'},
'bar': {
'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Ref': 'foo'},
}
}
}
}
templ = templatem.Template(lazy_load_template)
stk = stack.Stack(self.ctx, stack_name, templ)
self.assertIsNone(stk._resources)
self.assertIsNone(stk._dependencies)
resources = stk.resources
self.assertIsInstance(resources, dict)
self.assertEqual(2, len(resources))
self.assertIsInstance(resources.get('foo'),
generic_rsrc.GenericResource)
self.assertIsInstance(resources.get('bar'),
generic_rsrc.ResourceWithProps)
stack_dependencies = stk.dependencies
self.assertIsInstance(stack_dependencies, dependencies.Dependencies)
self.assertEqual(2, len(stack_dependencies.graph()))
@tools.stack_context('service_mark_healthy_create_complete_test_stk')
def test_mark_healthy_in_create_complete(self):
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', False,
resource_status_reason='noop')
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertIn('resource_action', r)
self.assertIn('resource_status', r)
self.assertIn('resource_status_reason', r)
self.assertEqual(r['resource_action'], 'CREATE')
self.assertEqual(r['resource_status'], 'COMPLETE')
self.assertEqual(r['resource_status_reason'], 'state changed')
@tools.stack_context('service_mark_unhealthy_create_complete_test_stk')
def test_mark_unhealthy_in_create_complete(self):
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', True,
resource_status_reason='Some Reason')
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertEqual(r['resource_action'], 'CHECK')
self.assertEqual(r['resource_status'], 'FAILED')
self.assertEqual(r['resource_status_reason'], 'Some Reason')
@tools.stack_context('service_mark_healthy_check_failed_test_stk')
def test_mark_healthy_check_failed(self):
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', True,
resource_status_reason='Some Reason')
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertEqual(r['resource_action'], 'CHECK')
self.assertEqual(r['resource_status'], 'FAILED')
self.assertEqual(r['resource_status_reason'], 'Some Reason')
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', False,
resource_status_reason='Good Reason')
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertEqual(r['resource_action'], 'CHECK')
self.assertEqual(r['resource_status'], 'COMPLETE')
self.assertEqual(r['resource_status_reason'], 'Good Reason')
@tools.stack_context('service_mark_unhealthy_check_failed_test_stack')
def test_mark_unhealthy_check_failed(self):
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', True,
resource_status_reason='Some Reason')
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertEqual(r['resource_action'], 'CHECK')
self.assertEqual(r['resource_status'], 'FAILED')
self.assertEqual(r['resource_status_reason'], 'Some Reason')
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', True,
resource_status_reason='New Reason')
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertEqual(r['resource_action'], 'CHECK')
self.assertEqual(r['resource_status'], 'FAILED')
self.assertEqual(r['resource_status_reason'], 'New Reason')
@tools.stack_context('service_mark_unhealthy_invalid_value_test_stk')
def test_mark_unhealthy_invalid_value(self):
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_mark_unhealthy,
self.ctx,
self.stack.identifier(),
'WebServer', "This is wrong",
resource_status_reason="Some Reason")
self.assertEqual(exception.Invalid, ex.exc_info[0])
@tools.stack_context('service_mark_unhealthy_none_reason_test_stk')
def test_mark_unhealthy_none_reason(self):
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', True)
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertEqual(r['resource_action'], 'CHECK')
self.assertEqual(r['resource_status'], 'FAILED')
self.assertEqual(r['resource_status_reason'],
'state changed by resource_mark_unhealthy api')
@tools.stack_context('service_mark_unhealthy_empty_reason_test_stk')
def test_mark_unhealthy_empty_reason(self):
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', True,
resource_status_reason="")
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertEqual(r['resource_action'], 'CHECK')
self.assertEqual(r['resource_status'], 'FAILED')
self.assertEqual(r['resource_status_reason'],
'state changed by resource_mark_unhealthy api')
@tools.stack_context('service_mark_unhealthy_lock_no_converge_test_stk')
def test_mark_unhealthy_lock_no_convergence(self):
mock_acquire = self.patchobject(stack_lock.StackLock,
'acquire',
return_value=None)
mock_release = self.patchobject(stack_lock.StackLock,
'release',
return_value=None)
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', True,
resource_status_reason="")
mock_acquire.assert_called_once_with()
mock_release.assert_called_once_with()
@tools.stack_context('service_mark_unhealthy_lock_converge_test_stk',
convergence=True)
def test_mark_unhealthy_stack_lock_convergence(self):
mock_acquire = self.patchobject(res.Resource,
'_acquire',
return_value=None)
self.eng.resource_mark_unhealthy(self.ctx, self.stack.identifier(),
'WebServer', True,
resource_status_reason="")
mock_acquire.assert_called_once_with(self.eng.engine_id)
@tools.stack_context('service_mark_unhealthy_lockexc_converge_test_stk',
convergence=True)
def test_mark_unhealthy_stack_lock_exc_convergence(self):
def _acquire(*args, **kwargs):
raise exception.UpdateInProgress(self.stack.name)
self.patchobject(
res.Resource,
'_acquire',
return_value=None,
side_effect=exception.UpdateInProgress(self.stack.name))
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_mark_unhealthy,
self.ctx,
self.stack.identifier(),
'WebServer', True,
resource_status_reason="")
self.assertEqual(exception.ActionInProgress, ex.exc_info[0])
@tools.stack_context('service_mark_unhealthy_lockexc_no_converge_test_stk')
def test_mark_unhealthy_stack_lock_exc_no_convergence(self):
self.patchobject(
stack_lock.StackLock,
'acquire',
return_value=None,
side_effect=exception.ActionInProgress(
stack_name=self.stack.name,
action=self.stack.action))
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_mark_unhealthy,
self.ctx,
self.stack.identifier(),
'WebServer', True,
resource_status_reason="")
self.assertEqual(exception.ActionInProgress, ex.exc_info[0])
|
steveb/heat
|
heat/tests/engine/service/test_stack_resources.py
|
Python
|
apache-2.0
| 32,282
|
from distutils.core import setup
setup(
name='megad-mqtt-gw',
version='0.3',
description='Gateway between MQTT queue and MegaD devices (http://ab-log.ru)',
author='rs',
author_email='repalov@gmail.com',
url='https://github.com/repalov/megad-mqtt-gw',
license='Apache License',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: System :: Networking',
'License :: OSI Approved :: Apache Software License',
],
packages=['megad'],
install_requires=['aiohttp>=3.4.4', 'paho-mqtt>=1.4', 'netifaces>=0.10.7'],
data_files=[('/etc', ['megad-mqtt-gw.homeassistant.conf', 'megad-mqtt-gw.wirenboard.conf']),
('/lib/systemd/system', ['megad-mqtt-gw.service'])
],
entry_points={'console_scripts': ['megad-mqtt-gw=megad.megad_mqtt_gw:main']}
)
|
repalov/megad-mqtt-gw
|
setup.py
|
Python
|
apache-2.0
| 1,025
|
# coding: utf-8
import os
import sys
import logging
import webbrowser
import socket
import time
import json
import traceback
import cv2
import tornado.ioloop
import tornado.web
import tornado.websocket
from tornado.concurrent import run_on_executor
from concurrent.futures import ThreadPoolExecutor # `pip install futures` for python2
import atx
from atx import logutils
from atx import imutils
from atx import base
__dir__ = os.path.dirname(os.path.abspath(__file__))
log = logutils.getLogger("webide", level=logging.DEBUG)
log.setLevel(logging.DEBUG)
IMAGE_PATH = ['.', 'imgs', 'images']
workdir = '.'
device = None
atx_settings = {}
def read_file(filename, default=''):
if not os.path.isfile(filename):
return default
with open(filename, 'rb') as f:
return f.read()
def write_file(filename, content):
with open(filename, 'w') as f:
f.write(content.encode('utf-8'))
def get_valid_port():
for port in range(10010, 10100):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('127.0.0.1', port))
sock.close()
if result != 0:
return port
raise SystemError("Can not find a unused port, amazing!")
class FakeStdout(object):
def __init__(self, fn=sys.stdout.write):
self._fn = fn
def write(self, s):
self._fn(s)
def flush(self):
pass
class ImageHandler(tornado.web.RequestHandler):
def get(self):
imgs = base.list_images(path=IMAGE_PATH)
images = []
for name in imgs:
realpath = name.replace('\\', '/') # fix for windows
name = os.path.basename(name).split('@')[0]
images.append([name, realpath])
self.write({
'images': images,
'baseURL': self.request.protocol + '://' + self.request.host+'/static_imgs/'
})
class MainHandler(tornado.web.RequestHandler):
def get(self):
imgs = base.list_images(path=IMAGE_PATH)
imgs = [(os.path.basename(name), name) for name in imgs]
self.render('index.html', images=imgs)
def post(self):
print self.get_argument('xml_text')
self.write("Good")
class DebugWebSocket(tornado.websocket.WebSocketHandler):
executor = ThreadPoolExecutor(max_workers=1)
def open(self):
log.info("WebSocket connected")
self._run = False
def _highlight_block(self, id):
self.write_message({'type': 'highlight', 'id': id})
if not self._run:
raise RuntimeError("stopped")
else:
time.sleep(.1)
def write_console(self, s):
self.write_message({'type': 'console', 'output': s})
def run_blockly(self, code):
filename = '__tmp.py'
fake_sysout = FakeStdout(self.write_console)
__sysout = sys.stdout
sys.stdout = fake_sysout # TODOs
self.write_message({'type': 'console', 'output': '# '+time.strftime('%H:%M:%S') + ' start running\n'})
try:
# python code always UTF-8
code = code.encode('utf-8')
# hot patch
code = code.replace('atx.click_image', 'd.click_image')
exec code in {
'highlight_block': self._highlight_block,
'__name__': '__main__',
'__file__': filename}
except RuntimeError as e:
if str(e) != 'stopped':
raise
print 'Program stopped'
except Exception as e:
self.write_message({'type': 'traceback', 'output': traceback.format_exc()})
finally:
self._run = False
self.write_message({'type': 'run', 'status': 'ready'})
sys.stdout = __sysout
@run_on_executor
def background_task(self, code):
self.write_message({'type': 'run', 'status': 'running'})
self.run_blockly(code)
return True
@tornado.gen.coroutine
def on_message(self, message_text):
message = None
try:
message = json.loads(message_text)
except:
print 'Invalid message from browser:', message_text
return
command = message.get('command')
if command == 'refresh':
imgs = base.list_images(path=IMAGE_PATH)
imgs = [dict(
path=name.replace('\\', '/'), name=os.path.basename(name)) for name in imgs]
self.write_message({'type': 'image_list', 'data': list(imgs)})
elif command == 'stop':
self._run = False
self.write_message({'type': 'run', 'notify': '停止中'})
elif command == 'run':
if self._run:
self.write_message({'type': 'run', 'notify': '运行中'})
return
self._run = True
res = yield self.background_task(message.get('code'))
self.write_message({'type': 'run', 'status': 'ready', 'notify': '运行结束', 'result': res})
else:
self.write_message(u"You said: " + message)
def on_close(self):
log.info("WebSocket closed")
def check_origin(self, origin):
return True
class WorkspaceHandler(tornado.web.RequestHandler):
def get(self):
ret = {}
ret['xml_text'] = read_file('blockly.xml', default='<xml xmlns="http://www.w3.org/1999/xhtml"></xml>')
ret['python_text'] = read_file('blockly.py')
self.write(ret)
def post(self):
log.info("Save workspace")
xml_text = self.get_argument('xml_text')
python_text = self.get_argument('python_text')
write_file('blockly.xml', xml_text)
write_file('blockly.py', python_text)
class ScreenshotHandler(tornado.web.RequestHandler):
def get(self):
d = atx.connect(**atx_settings)
d.screenshot('_screen.png')
self.set_header('Content-Type', 'image/png')
with open('_screen.png', 'rb') as f:
while 1:
data = f.read(16000)
if not data:
break
self.write(data)
self.finish()
def post(self):
raw_image = self.get_argument('raw_image')
filename = self.get_argument('filename')
image = imutils.open(raw_image)
cv2.imwrite(filename, image)
self.write({'status': 'ok'})
class StaticFileHandler(tornado.web.StaticFileHandler):
def get(self, path=None, include_body=True):
path = path.encode(base.SYSTEM_ENCODING) # fix for windows
return super(StaticFileHandler, self).get(path, include_body)
def make_app(settings={}):
static_path = os.getcwd()
application = tornado.web.Application([
(r"/", MainHandler),
(r'/ws', DebugWebSocket), # code debug
(r"/workspace", WorkspaceHandler), # save and write workspace
(r"/images/screenshot", ScreenshotHandler),
(r'/api/images', ImageHandler),
(r'/static_imgs/(.*)', StaticFileHandler, {'path': static_path}),
], **settings)
return application
def main(web_port=None, host=None, port=None, open_browser=True, workdir='.'):
application = make_app({
'static_path': os.path.join(__dir__, 'static'),
'template_path': os.path.join(__dir__, 'static'),
'debug': True,
})
if not web_port:
web_port = get_valid_port()
global device
# global workdir
atx_settings['host'] = host
atx_settings['port'] = port
# device = atx.connect(host=kws.get('host'), port=kws.get('port'))
# TODO
# filename = 'blockly.py'
IMAGE_PATH.append('images/blockly')
if open_browser:
url = 'http://127.0.0.1:{}'.format(web_port)
webbrowser.open(url, new=2) # 2: open new tab if possible
application.listen(web_port)
log.info("Server started.")
log.info("Listening port on 127.0.0.1:{}".format(web_port))
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
Andy-hpliu/AirtestX
|
atx/cmds/webide.py
|
Python
|
apache-2.0
| 7,985
|
from charms.reactive import Endpoint, when
class TestAltProvides(Endpoint):
invocations = []
@when('endpoint.{endpoint_name}.joined')
def handle_joined(self):
self.invocations.append('joined: {}'.format(self.endpoint_name))
@when('endpoint.{endpoint_name}.changed')
def handle_changed(self):
self.invocations.append('changed: {}'.format(self.endpoint_name))
@when('endpoint.{endpoint_name}.changed.foo')
def handle_changed_foo(self):
self.invocations.append('changed.foo: {}'.format(self.endpoint_name))
|
juju-solutions/charms.reactive
|
tests/data/reactive/relations/test-alt/provides.py
|
Python
|
apache-2.0
| 561
|
# Licensed to Hortonworks, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Hortonworks, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from djangomako.shortcuts import render_to_response
from django.shortcuts import redirect
from django.http import HttpResponse, Http404
from models import UserLocation
import settings
import os
import time
import string
from urlparse import urlparse
def tutorials_last_url(tutorial_view):
def save_user_location(request, *args):
if request.user.is_authenticated() \
and request.user.username != "AnonymousUser":
user_location = UserLocation.objects.get_or_create(user=request.user)[0]
user_location.step_location = request.build_absolute_uri()
user_location.save()
return tutorial_view(request, *args)
return save_user_location
def index(request):
location = settings.CONTENT_FRAME_URL
step_location = "/lesson/"
if request.user.is_authenticated() \
and request.user.username != "AnonymousUser":
try:
ustep = UserLocation.objects.get(user=request.user)
hue_location = ustep.hue_location
step_location = ustep.step_location
if step_location == None:
step_location = "/lesson/"
if urlparse(hue_location).netloc==urlparse(location).netloc:
location = hue_location
except UserLocation.DoesNotExist:
pass
return render_to_response("lessons.html",
{'content' : location,
'step_location': step_location})
def content(request, page):
if page == '':
return redirect('/')
return render_to_response("content.html", {})
def sync_location(request):
if request.method == 'GET':
if not request.user.is_authenticated() \
or request.user.username == 'AnonymousUser':
return HttpResponse('')
hue_location = None
if 'loc' in request.GET:
hue_location = request.GET['loc']
ustep = UserLocation.objects.get_or_create(user=request.user)[0]
ustep.hue_location = hue_location
ustep.save()
return HttpResponse('')
else:
raise Http404
def get_file(request, path):
import mimetypes
from django.core.servers.basehttp import FileWrapper
git_files = os.path.join(settings.PROJECT_PATH, 'run/git_files')
rfile = os.path.join(git_files, path)
response = HttpResponse(FileWrapper(file(rfile, 'rb')),
mimetype=mimetypes.guess_type(rfile)[0])
return response
def network_info(request):
import subprocess
commands = [
"route -n",
"getent ahosts",
"ip addr",
"cat /etc/resolv.conf",
"cat /etc/hosts",
"ps aux | grep java",
"netstat -lnp",
]
netinfo = {cmd: subprocess.check_output(cmd, shell=True)
for cmd in commands}
return render_to_response("netinfo.html", {'info': netinfo})
|
hortonworks/hortonworks-sandbox
|
tutorials/tutorials_app/views.py
|
Python
|
apache-2.0
| 3,659
|
#!/usr/bin/env python
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import web
urls = (
'/(.*)', 'router'
)
app = web.application(urls, globals())
class router:
def GET(self, path):
if path == '':
path = 'index.html'
f = open('_build/html/'+path)
return f.read()
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
|
intel-analytics/analytics-zoo
|
pyzoo/docs/doc-web.py
|
Python
|
apache-2.0
| 942
|
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.ec2.helpers.query
@copyright Copyright (c) 2012 Institute of Nuclear Physics PAS <http://www.ifj.edu.pl/>
@author Oleksandr Gituliar <gituliar@gmail.com>
"""
from datetime import datetime
import urllib
from ec2.base.auth import _sign_parameters_ver2
def query(parameters, aws_key=None, aws_secret=None, endpoint=None,
method=None, secure=False):
parameters.setdefault('SignatureMethod', 'HmacSHA256')
parameters.setdefault('SignatureVersion', '2')
parameters['AWSAccessKeyId'] = aws_key
parameters['Timestamp'] = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
parameters['Version'] = "2012-03-01"
# set Signature
signature = _sign_parameters_ver2(
parameters,
aws_secret,
endpoint=endpoint,
method=method,
)
parameters['Signature'] = signature
# build request
protocol = 'http' if not secure else 'https'
query_parameters = urllib.urlencode(parameters)
if method == 'GET':
request = ("%s://%s/?%s" % (protocol, endpoint, query_parameters),)
elif method == 'POST':
request = ("%s://%s" % (protocol, endpoint), query_parameters)
else:
raise Exception('Unsupported %s method: %s' % (protocol.upper(), method))
response = urllib.urlopen(*request).read()
return request, response
def get_instance_tags(cluster_manager):
instances = cluster_manager.user.vm.get_list()
tags = []
for instance in instances:
tags.append({'resource-id': 'i-' + str(instance['vm_id']),
'key': 'Name',
'resource-type': 'instance',
'value': instance['name']})
return tags
def get_instance_name_tag(cluster_manager, id):
instance = cluster_manager.user.vm.get_by_id({'vm_id': id})
tags = {'resource-id': 'i-' + str(instance['vm_id']),
'key': 'Name',
'resource-type': 'instance',
'value': instance['name']}
return tags
def get_volume_name_tag(cluster_manager, id):
volume = cluster_manager.user.storage_image.get_by_id({'vm_id': id})
tags = {'resource-id': 'i-' + str(volume['storage_image_id']),
'key': 'Name',
'resource-type': 'volume',
'value': volume['name']}
return tags
def get_volume_tags(cluster_manager):
volumes = cluster_manager.user.storage_image.get_list()
tags = []
for volume in volumes:
tags.append({'resource-id': 'vol-' + str(volume['storage_image_id']),
'key': 'Name',
'resource-type': 'volume',
'value': volume['name']})
return tags
|
Dev-Cloud-Platform/Dev-Cloud
|
dev_cloud/cc1/src/ec2/helpers/query.py
|
Python
|
apache-2.0
| 3,363
|
__author__ = 'tom'
from django.contrib import admin
from core.models import Post, Project
admin.site.register(Post)
admin.site.register(Project)
|
probablytom/tomwallis.net
|
core/admin.py
|
Python
|
artistic-2.0
| 146
|
from .base import *
class OpStash(object):
cache = {}
@classmethod
def Add(cls, object):
t = object.type
cls.cache[t] = object
@classmethod
def Lookup(cls, type):
return cls.cache[type]
@classmethod
def Define(cls, pt):
cls.Add(pt)
return pt
class OpRecord(pstruct.type):
def __data(self):
return OpStash.Lookup( int(self['code'].li) )
_fields_ = [
(Opcode_v1, 'code'),
(__data, 'data'),
]
class picSize(pstruct.type):
_fields_ = [
(Integer, 'size'),
(Integer, 'top'),
(Integer, 'left'),
(Integer, 'bottom'),
(Integer, 'right'),
]
class picFrame(pstruct.type):
_fields_ = [
(uint8, 'version'),
(uint8, 'picture'),
]
class bounds(pstruct.type):
_fields_ = [
(Integer, 'top'),
(Integer, 'left'),
(Integer, 'bottom'),
(Integer, 'right'),
]
class pixMap(pstruct.type):
_fields_ = [
(Long, 'baseAddr'),
(Integer, 'rowBytes'),
(bounds, 'bounds'),
(Integer, 'pmVersion'),
(Integer, 'packType'),
(Long, 'packSize'),
(Long, 'hRes'),
(Long, 'vRes'),
(Integer, 'pixelType'),
(Integer, 'pixelSize'),
(Integer, 'cmpCount'),
(Integer, 'cmpSize'),
(Long, 'planeBytes'),
(Long, 'pmTable'),
(Long, 'pmReserved'),
]
class directBitsRect(pstruct.type):
opcode = 0x009a
_fields_ = [
(pixMap, 'pixMap'),
(bounds, 'srcRect'),
(bounds, 'dstRect'),
(Integer, 'mode'),
]
class File(parray.terminated):
_object_ = OpRecord
def isTerminator(self, value):
return int(value['code']) == 0xff
|
arizvisa/syringe
|
template/image/pict/v1.py
|
Python
|
bsd-2-clause
| 1,778
|
from django import template
from django.utils.safestring import mark_safe
from django.template.base import Node
register = template.Library()
from tag_parser import template_tag
from tag_parser.basetags import BaseNode
@template_tag(register, 'asset')
class JsCssAssetNode(BaseNode):
"""
Implement asset filter
Usage:
{% asset js jquery 3.0.0-beta1 %}
{% asset css bootstrap 3.3.6 %}
"""
max_args = 10
compile_args = False
HOST_MAPPINGS = {
'bootcss': 'cdn.bootcss.com',
'baidu': 'libs.baidu.com',
'sinaapp': 'lib.sinaapp.com',
'aspnetcdn': 'jax.aspnetcdn.com',
'google': 'ajax.googleapis.com',
'upai': 'upcdn.b0.upaiyun.com',
'cdnjs': 'cdnjs.cloudflare.com',
'staticfile': 'cdn.staticfile.org',
'360': 'libs.useso.com'
}
def render_tag(self, context, *tag_args, **tag_kwargs):
asset_type = tag_args[-1]
host = self.HOST_MAPPINGS.get(self.host)
url = '//%s/%s.%s' % (host, '/'.join(tag_args[:-1]), asset_type)
if asset_type == 'css':
output = '<link href="%s" rel="stylesheet">' % url
elif asset_type == 'js':
output = '<script src="%s"></script>' % url
else:
output = ''
return mark_safe(output)
class CDNNode(Node):
"""Implements the actions of the asserts tag."""
def __init__(self, host, nodelist):
self.host = host
self.nodelist = nodelist
for node in self.nodelist:
node.host = host
def __repr__(self):
return "<CDNNode>"
def render(self, context):
output = self.nodelist.render(context)
return mark_safe(output)
@register.tag('cdn')
def do_cdn(parser, token):
"""
Implement cdn filter
Usage:
{% cdn bootcss %}
{% asset jquery 3.0.0-beta1 jquery.min js %}
{% asset bootstrap 3.3.6 css bootstrap.min css %}
{% asset bootstrap 3.3.6 js bootstrap.min js %}
{% endcdn %}
this will output:
<script src="//cdn.bootcss.com/jquery/3.0.0-beta1/jquery.min.js"></script>
<link href="//cdn.bootcss.com/bootstrap/3.3.6/css/bootstrap.min.css" rel="stylesheet">
"""
bits = token.split_contents()[1]
nodelist = parser.parse(('endcdn',))
token = parser.next_token()
return CDNNode(bits, nodelist)
class JsCssFontNode(Node):
"""Implements the actions of the js tag."""
def __init__(self, asset_type, bits):
self.type = asset_type #JS or CSS or font
self.bits = bits # name,version such as jquery 1.11
def __repr__(self):
return "<JsCssNode>"
def render(self, context):
output = ''
return mark_safe(output)
@register.tag
def js(parser, token):
bits = token.split_contents()
nodelist = parser.parse(('endjs',))
token = parser.next_token()
return JsCssFontNode('js', bits)
@register.tag
def css(parser, token):
bits = token.split_contents()
nodelist = parser.parse(('endcss',))
token = parser.next_token()
return JsCssFontNode('css', bits)
@register.tag
def font(parser, token):
bits = token.split_contents()
nodelist = parser.parse(('endfont',))
token = parser.next_token()
return JsCssFontNode('font', bits)
@register.simple_tag
def local(file):
return mark_safe('<link href="" rel="stylesheet">')
|
lsc20051426/cdn-asserts
|
cdn_assets/templatetags/cdn_assets.py
|
Python
|
bsd-2-clause
| 3,436
|
#!/usr/bin/env python
import tensorflow as tf
import edward as ed
import numpy as np
from numpy import array
from numpy.linalg import norm
from edward.models import Dirichlet, Multinomial, Gamma, Poisson
sess = tf.Session()
def build_toy_dataset(n, p, A, b):
"""
toy HMM with:
n=number of timesteps,
p=m length array where m is the number of hidden states and p_i is the
initial probability of being in state i
A=mxm transition matrix indexed by i, j where the (i,j) element is the
probability of transitioning from element j to element i
b=m length array where b_i contains the poison rate for state i
"""
p = array(p)/float(sum(p))
z = [np.random.multinomial(1, p)]
obs = [np.random.poisson(z[-1].dot(b))]
for step in range(n-1):
z += [np.random.multinomial(1, z[-1].dot(A))]
obs += [float(np.random.poisson(z[-1].dot(b)))]
return obs, z
n = 162
p_true = [.7, .3]
A_true = array([[0.8,0.4],[0.2,0.6]])
b_true = [0.1, 3.]
obs_train, z_train = build_toy_dataset(n, p_true, A_true, b_true)
obs_test, z_test = build_toy_dataset(n, p_true, A_true, b_true)
#obs = tf.placeholder(tf.float32, [n])
def gen_hmm(vd):
z = tf.expand_dims(
tf.transpose(
tf.expand_dims(Multinomial(total_count=1., probs=vd['p']), 0)), 0)
obs = tf.expand_dims(
Poisson(rate=tf.matmul(tf.expand_dims(vd['b'],0), z[-1])), 0)
for t in range(n-1):
z_new = tf.transpose(Multinomial(total_count=1.,
probs=tf.transpose(tf.matmul(tf.transpose(vd['A']),z[-1]),
name='tx_prob')),name='z_new')
z = tf.concat([z,tf.expand_dims(z_new,0)],0)
obs = tf.concat([obs,
tf.expand_dims(
Poisson(rate=tf.matmul(
tf.expand_dims(vd['b'],0), z_new)),0)], 0)
return obs, z
p_p_alpha = [2.,2.]
p_A_alpha = [[2.,1.],[1.,2.]]
p_b_alpha = [0.5,2.0]
p_b_beta = [1.,1.]
q_p_alpha = tf.Variable(p_p_alpha)
q_A_alpha = tf.Variable(p_A_alpha)
q_b_alpha = tf.Variable(p_b_alpha)
q_b_beta = tf.Variable(p_b_beta)
p = Dirichlet(p_p_alpha, name='p')
A = Dirichlet(p_A_alpha, name='A')
b = Gamma(p_b_alpha, p_b_beta)
qp = Dirichlet(q_p_alpha, name='p')
qA = Dirichlet(q_A_alpha, name='A')
qb = Gamma(q_b_alpha, q_b_beta)
obs, z = gen_hmm({'p':p, 'A':A, 'b':b})
obs_train, z_train = build_toy_dataset(n, p_true, A_true, b_true)
obs_train = tf.expand_dims(tf.expand_dims(obs_train, 0), 0)
latent_vars = {p: qp, A: qA, b: qb}
data = {tf.squeeze(obs): tf.squeeze(obs_train)}
inference = ed.KLqp(latent_vars, data)
inference.run(n_samples=5, n_iter=2500)
print(qp.eval())
print(tf.transpose(qA).eval())
print(qb.eval())
obs_post = ed.copy(obs, {p: qp, A: qA, b: qb})
print("posterior observations")
print(tf.squeeze(obs_post).eval())
print("training observations")
print(tf.squeeze(obs_train).eval())
print("Mean absolute error on training data:")
print(ed.evaluate('mean_absolute_error', data={tf.squeeze(obs_post): tf.squeeze(obs_train)}))
print("test observations")
print(tf.squeeze(obs_test).eval())
print("Mean absolute error on test data:")
print(ed.evaluate('mean_absolute_error', data={tf.squeeze(obs_post): tf.squeeze(obs_test)}))
file_writer = tf.summary.FileWriter('/home/kyjohnso/projects/mlbslice/tb_logs',
tf.get_default_graph())
sess.close()
|
kyjohnso/mlbslice
|
hmm_sandbox.py
|
Python
|
bsd-2-clause
| 3,478
|
# -*- coding: utf-8 -*-
import json
import numpy as np
import sys
# Compare a set of computed solutions to best known solutions on the
# same problems.
# See src/vrptw_to_json.py, src/pdptw_to_json.py and
# src/hvrp_to_json.py.
CUSTOM_PRECISION = 1000
BENCH_DOUBLE_PRECISION = 100
CUSTOM_PRECISION_CLASSES = [
"solomon",
"homberger",
"li_lim",
"VFMP_V",
"HFVRP",
"cordeau",
]
def uses_custom_precision(bench):
custom = False
for current_class in CUSTOM_PRECISION_CLASSES:
if current_class in bench:
custom = True
break
return custom
def s_round(v, d):
if d == 0:
return str(int(v))
else:
return str(round(v, d))
JOB_TYPES = ["job", "pickup", "delivery"]
def nb_jobs(solution):
jobs = 0
for r in solution["routes"]:
for s in r["steps"]:
if s["type"] in JOB_TYPES:
jobs += 1
return jobs
def log_comparisons(BKS, files):
print(
",".join(
[
"Instance",
"Jobs",
"Vehicles",
"tightness",
"Best known cost",
"Assigned jobs",
"Used vehicles",
"Solution cost",
"Unassigned jobs",
"Gap (%)",
"Computing time (ms)",
]
)
)
jobs = []
vehicles = []
assigned = []
unassigned = []
tightnesses = []
gaps = []
computing_times = []
assigned_jobs = 0
total_files = len(files)
job_ok_files = 0
optimal_sols = 0
for f in files:
instance = f[0 : f.rfind("_sol.json")]
instance = instance[instance.rfind("/") + 1 :]
if instance not in BKS and instance + "_distance" not in BKS:
total_files -= 1
continue
if instance + "_distance" in BKS:
# Specific entry for approach targeting distance as optimization
# objective.
indicators = BKS[instance + "_distance"]
else:
indicators = BKS[instance]
BK_cost = indicators["best_known_cost"]
bench = indicators["class"]
if uses_custom_precision(bench):
BK_cost = int(BENCH_DOUBLE_PRECISION * BK_cost)
nb_job = indicators["jobs"]
jobs.append(nb_job)
nb_vehicle = indicators["vehicles"]
vehicles.append(nb_vehicle)
if "capacity" in indicators:
total_capacity = nb_vehicle * indicators["capacity"]
else:
total_capacity = indicators["total_capacity"]
tightness = round(float(indicators["total_amount"]) / total_capacity, 3)
tightnesses.append(tightness)
line = [instance, nb_job, nb_vehicle, tightness, BK_cost]
with open(f, "r") as sol_file:
solution = json.load(sol_file)
if solution["code"] != 0:
continue
sol_jobs = nb_jobs(solution)
assigned.append(sol_jobs)
line.append(sol_jobs)
line.append(len(solution["routes"]))
cost = solution["summary"]["cost"]
if uses_custom_precision(bench):
cost = int(round(BENCH_DOUBLE_PRECISION * float(cost) / CUSTOM_PRECISION))
line.append(cost)
line.append(nb_job - sol_jobs)
unassigned.append(nb_job - sol_jobs)
if sol_jobs == nb_job:
job_ok_files += 1
gap = 100 * (float(cost) / BK_cost - 1)
line.append(round(gap, 2))
gaps.append(gap)
if cost <= BK_cost:
optimal_sols += 1
else:
line.append("")
computing_time = (
solution["summary"]["computing_times"]["loading"]
+ solution["summary"]["computing_times"]["solving"]
)
line.append(computing_time)
computing_times.append(computing_time)
print(",".join(map(lambda x: str(x), line)))
print(
"Average,"
+ s_round(np.mean(jobs), 1)
+ ","
+ s_round(np.mean(vehicles), 1)
+ ","
+ s_round(np.mean(tightnesses), 2)
+ ","
+ s_round(np.mean(assigned), 1)
+ ",,,,"
+ s_round(np.mean(unassigned), 1)
+ ","
+ s_round(np.mean(gaps), 2)
+ ","
+ s_round(np.mean(computing_times), 0)
)
total_jobs = np.sum(jobs)
assigned_jobs = np.sum(assigned)
print(",")
print("Total jobs," + s_round(total_jobs, 0))
print(
"Total jobs assigned,"
+ s_round(assigned_jobs, 0)
+ ","
+ s_round(100 * float(assigned_jobs) / total_jobs, 2)
+ "%"
)
print(",")
print("Instances," + s_round(total_files, 0))
print(
"All jobs solutions,"
+ s_round(job_ok_files, 0)
+ ","
+ s_round(100 * float(job_ok_files) / total_files, 2)
+ "%"
)
print(
"Optimal solutions,"
+ s_round(optimal_sols, 0)
+ ","
+ s_round(100 * float(optimal_sols) / total_files, 2)
+ "%"
)
# Percentiles
print(",")
gaps_percentiles = np.percentile(gaps, [0, 10, 25, 50, 75, 90, 100])
ct_percentiles = np.percentile(computing_times, [0, 10, 25, 50, 75, 90, 100])
print(",Gaps,Computing times")
titles = [
"Min",
"First decile",
"Lower quartile",
"Median",
"Upper quartile",
"Ninth decile",
"Max",
]
for i in range(len(titles)):
print(
titles[i]
+ ","
+ s_round(gaps_percentiles[i], 2)
+ ","
+ s_round(ct_percentiles[i], 0)
)
if __name__ == "__main__":
# First argument if the best known solution file.
with open(sys.argv[1], "r") as sol_file:
bks = json.load(sol_file)
# Remaining arguments are computed solution files to use.
log_comparisons(bks, sys.argv[2:])
|
VROOM-Project/vroom-scripts
|
benchmarks/compare_to_BKS.py
|
Python
|
bsd-2-clause
| 5,946
|
# vim: set fileencoding=utf-8 ts=4 sw=4 expandtab fdm=marker:
"""
Small wrapper around the python ConfigParser module.
"""
import ConfigParser
CONFIG = ConfigParser.ConfigParser()
DEFAULTS = {
'patterns': {
'path' : '(?P<artist>\w+) - (?P<year>\d+) - (?P<album>\w+)'
}
}
def get_param(section, name):
try:
param = CONFIG.get(section, name)
except ConfigParser.NoOptionError or ConfigParser.NoSectionError:
param = None
if not param:
# Do a default lookup
try:
param = DEFAULTS[section][name]
except KeyError:
# Parameter is not in defaults
LOG.error("Error: Parameter [%s][%s] does not exist", section, name)
param = ""
return param
|
turbofish/mcverify
|
config.py
|
Python
|
bsd-2-clause
| 772
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforge.simulation.neuron.core.neuronsimulationenvironment import NEURONEnvironment
from morphforgecontrib.simulation.channels.neuroml_via_neurounits.neuroml_via_neurounits_core import NeuroML_Via_NeuroUnits_Channel
from neurounits.importers.neuroml import ChannelMLReader
from morphforgecontrib.simulation.channels.neurounits.neuro_units_bridge import Neuron_NeuroUnitEqnsetMechanism
class NeuroML_Via_NeuroUnits_ChannelNEURON(Neuron_NeuroUnitEqnsetMechanism, NeuroML_Via_NeuroUnits_Channel):
def __init__(self, xml_filename, chlname=None,**kwargs):
(eqnset, chlinfo, default_params) = ChannelMLReader.BuildEqnset(xml_filename)
default_params = dict([(k, v.as_quantities_quantity()) for (k, v) in default_params.iteritems()])
super(NeuroML_Via_NeuroUnits_ChannelNEURON,self).__init__(eqnset=eqnset, default_parameters=default_params, recordables_map=None, recordables_data=None, xml_filename=xml_filename, chlname=chlname, **kwargs)
NEURONEnvironment.channels.register_plugin(NeuroML_Via_NeuroUnits_Channel, NeuroML_Via_NeuroUnits_ChannelNEURON)
|
mikehulluk/morphforge
|
src/morphforgecontrib/simulation/channels/neuroml_via_neurounits/neuroml_via_neurounits_neuron.py
|
Python
|
bsd-2-clause
| 2,634
|
'''
Created by auto_sdk on 2015.04.21
'''
from aliyun.api.base import RestApi
class Ecs20140526DeleteSnapshotRequest(RestApi):
def __init__(self,domain='ecs.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.SnapshotId = None
def getapiname(self):
return 'ecs.aliyuncs.com.DeleteSnapshot.2014-05-26'
|
wanghe4096/website
|
aliyun/api/rest/Ecs20140526DeleteSnapshotRequest.py
|
Python
|
bsd-2-clause
| 334
|
import bee
from bee import *
import dragonfly
from dragonfly.commandhive import commandhive, commandapp
from dragonfly.sys import exitactuator
from dragonfly.io import display, commandsensor
from dragonfly.std import variable, transistor, test
from dragonfly.sys import on_next_tick
from components.workers.chessprocessor2 import chessprocessor2
from components.workers.chesskeeper import chesskeeper
from components.workers.chessboard2 import chessboard2
from components.workers.except_valueerror import except_valueerror
from components.workers.human import human
from components.workers.computer2b import computer2b
from direct.showbase.ShowBase import taskMgr
from panda3d.core import getModelPath
import os
getModelPath().prependPath(os.getcwd())
from bee import hivemodule
class myapp(commandapp):
def on_tick(self):
taskMgr.step()
taskMgr.step()
class myhive(commandhive):
_hivecontext = hivemodule.appcontext(myapp)
g = chessprocessor2()
exc_v = except_valueerror()
connect(g.evexc, exc_v)
com = commandsensor()
turn = variable("str")("White")
t_turn = transistor("str")()
connect(g.turn, t_turn)
connect(t_turn, turn)
on_next = on_next_tick()
connect(on_next, t_turn)
connect(g.made_move, on_next)
on_next2 = on_next_tick()
connect(g.made_move, on_next2)
p1 = computer2b(("White", "glaurung"))
connect(g.turn, p1.turn)
connect(on_next, p1.trigger_move)
p2 = computer2b(("Black", "glaurung"))
connect(g.turn, p2.turn)
connect(on_next2, p2.trigger_move)
k = chesskeeper()
connect(k, g)
connect(p1.move, g)
connect(p2.move, g)
connect(g, k)
connect(g, p1.make_move)
connect(g, p2.make_move)
b = chessboard2(None)
connect(turn, b.turn)
connect(b.get_move, g)
connect(g, b.make_move)
d = display("str")()
connect(g, d)
ex = exitactuator()
connect(g.finished, ex)
raiser = bee.raiser()
bee.connect("evexc", raiser)
m = myhive().getinstance()
m.build("m")
m.place()
m.close()
m.init()
m.run()
|
agoose77/hivesystem
|
manual/chess/tut-worker-3d.py
|
Python
|
bsd-2-clause
| 2,088
|
""" Normal-Gamma density."""
import numpy as np
from scipy.special import gammaln, psi
class NormalGamma(object):
"""Normal-Gamma density.
Attributes
----------
mu : numpy.ndarray
Mean of the Gaussian density.
kappa : float
Factor of the precision matrix.
alpha : float
Shape parameter of the Gamma density.
beta : numpy.ndarray
Rate parameters of the Gamma density.
Methods
-------
expLogPrecision()
Expected value of the logarithm of the precision.
expPrecision()
Expected value of the precision.
KL(self, pdf)
KL divergence between the current and the given densities.
newPosterior(self, stats)
Create a new Normal-Gamma density.
"""
def __init__(self, mu, kappa, alpha, beta):
self.mu = mu
self.kappa = kappa
self.alpha = alpha
self.beta = beta
def expLogPrecision(self):
'''Expected value of the logarithm of the precision.
Returns
-------
E_log_prec : numpy.ndarray
Log precision.
'''
return psi(self.alpha) - np.log(self.beta)
def expPrecision(self):
"""Expected value of the precision.
Returns
-------
E_prec : numpy.ndarray
Precision.
"""
return self.alpha/self.beta
def KL(self, q):
"""KL divergence between the current and the given densities.
Returns
-------
KL : float
KL divergence.
"""
p = self
exp_lambda = p.expPrecision()
exp_log_lambda = p.expLogPrecision()
return (.5 * (np.log(p.kappa) - np.log(q.kappa))
- .5 * (1 - q.kappa * (1./p.kappa + exp_lambda * (p.mu - q.mu)**2))
- (gammaln(p.alpha) - gammaln(q.alpha))
+ (p.alpha * np.log(p.beta) - q.alpha * np.log(q.beta))
+ exp_log_lambda * (p.alpha - q.alpha)
- exp_lambda * (p.beta - q.beta)).sum()
def newPosterior(self, stats):
"""Create a new Normal-Gamma density.
Create a new Normal-Gamma density given the parameters of the
current model and the statistics provided.
Parameters
----------
stats : :class:MultivariateGaussianDiagCovStats
Accumulated sufficient statistics for the update.
Returns
-------
post : :class:NormalGamma
New Dirichlet density.
"""
# stats[0]: counts
# stats[1]: sum(x)
# stats[2]: sum(x**2)
kappa_n = self.kappa + stats[0]
mu_n = (self.kappa * self.mu + stats[1]) / kappa_n
alpha_n = self.alpha + .5 * stats[0]
v = (self.kappa * self.mu + stats[1])**2
v /= (self.kappa + stats[0])
beta_n = self.beta + 0.5*(-v + stats[2] + self.kappa * self.mu**2)
return NormalGamma(mu_n, kappa_n, alpha_n, beta_n)
|
OliverWalter/amdtk
|
amdtk/models/normal_gamma.py
|
Python
|
bsd-2-clause
| 2,962
|
import sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db import IntegrityError, transaction
from django.contrib.auth.models import User
from annotate.models import *
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--run', action='store_true', default=False, help='')
pass
@transaction.atomic
def handle(self, *args, **options):
#hours = options['hours'] + 24 * options['days']
#check_and_import(hours, not options['run'], options['check'])
run = options['run']
for line in sys.stdin:
fs = line.strip().split('\t')
if len(fs) == 1:
path = fs[0]
meta = path
elif len(fs) == 2:
path, meta = fs
else:
raise Exception("bad line: %s" % line)
if run:
Image.objects.create(path = path, meta = meta)
else:
print(path, meta)
pass
pass
pass
|
aaalgo/owl
|
annotate/management/commands/import.py
|
Python
|
bsd-2-clause
| 1,100
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('textbox38.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [48060288, 48300032]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
worksheet.insert_image('E25',
self.image_dir + 'red.png',
{'url': 'https://github.com/jmcnamara/foo'})
worksheet.insert_textbox('G25', 'This is some text',
{'url': 'https://github.com/jmcnamara/bar'})
workbook.close()
self.assertExcelEqual()
|
jmcnamara/XlsxWriter
|
xlsxwriter/test/comparison/test_textbox38.py
|
Python
|
bsd-2-clause
| 1,661
|
"""professor => instructor
Revision ID: 3fab9480c190
Revises: 31ded1f6ad6
Create Date: 2014-02-17 00:56:12.566690
"""
# revision identifiers, used by Alembic.
revision = '3fab9480c190'
down_revision = '31ded1f6ad6'
from alembic import op
import sqlalchemy as sa
metadata = sa.MetaData()
role = sa.Table('Role', metadata,
sa.Column('id', sa.Integer()),
sa.Column('name', sa.String(20)),
)
def upgrade():
update_stmt = role.update().where(role.c.name == 'professor').values(name = 'instructor')
op.execute(update_stmt)
def downgrade():
update_stmt = role.update().where(role.c.name == 'instructor').values(name = 'professor')
op.execute(update_stmt)
|
zstars/weblabdeusto
|
server/src/weblab/db/upgrade/regular/versions/3fab9480c190_professor_instructor.py
|
Python
|
bsd-2-clause
| 681
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import copy
from ..util import import_
from ._base import _NativeCodeBase, _NativeSysBase, _compile_kwargs
pyodeint = import_('pyodeint')
class NativeOdeintCode(_NativeCodeBase):
wrapper_name = '_odeint_wrapper'
def __init__(self, *args, **kwargs):
self.compile_kwargs = copy.deepcopy(_compile_kwargs)
self.compile_kwargs['include_dirs'].append(pyodeint.get_include())
self.compile_kwargs['libraries'].extend(['m'])
super(NativeOdeintCode, self).__init__(*args, **kwargs)
class NativeOdeintSys(_NativeSysBase):
_NativeCode = NativeOdeintCode
_native_name = 'odeint'
|
bjodah/pyodesys
|
pyodesys/native/odeint.py
|
Python
|
bsd-2-clause
| 713
|
# -*- coding: utf-8 -*-
# Author: Florian Mayer <florian.mayer@bitsrc.org>
from __future__ import absolute_import
import shutil
from tempfile import mkdtemp
from datetime import datetime
import pytest
import os
import glob
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_allclose
import sunpy.data.test
from sunpy.spectra.sources.callisto import (
CallistoSpectrogram, query, download, minimal_pairs
)
@pytest.fixture
def CALLISTO_IMAGE():
testpath = sunpy.data.test.rootdir
return os.path.join(testpath, 'BIR_20110922_050000_01.fit')
@pytest.fixture
def CALLISTO_IMAGE_GLOB_KEY():
return 'BIR_*'
@pytest.fixture
def CALLISTO_IMAGE_GLOB_INDEX(CALLISTO_IMAGE, CALLISTO_IMAGE_GLOB_KEY):
testpath = sunpy.data.test.rootdir
res = glob.glob(os.path.join(testpath, CALLISTO_IMAGE_GLOB_KEY))
return res.index(CALLISTO_IMAGE)
def test_read(CALLISTO_IMAGE):
ca = CallistoSpectrogram.read(CALLISTO_IMAGE)
assert ca.start == datetime(2011, 9, 22, 5, 0, 0, 454000)
assert ca.t_init == 18000.0
assert ca.shape == (200, 3600)
assert ca.t_delt == 0.25
# Test linearity of time axis.
assert np.array_equal(
ca.time_axis, np.linspace(0, 0.25 * (ca.shape[1] - 1), ca.shape[1])
)
assert ca.dtype == np.uint8
@pytest.mark.online
def test_query():
URL = 'http://soleil.i4ds.ch/solarradio/data/2002-20yy_Callisto/2011/09/22/'
result = list(query(
datetime(2011, 9, 22, 5), datetime(2011, 9, 22, 6), set(["BIR"])
))
RESULTS = [
"BIR_20110922_050000_01.fit.gz",
"BIR_20110922_051500_01.fit.gz",
"BIR_20110922_053000_01.fit.gz",
"BIR_20110922_050000_03.fit.gz",
"BIR_20110922_051500_03.fit.gz",
"BIR_20110922_053000_03.fit.gz",
"BIR_20110922_054500_03.fit.gz",
]
RESULTS.sort()
# Should be sorted anyway, but better to assume as little as possible.
result.sort()
for item in RESULTS:
assert URL + item in result
@pytest.mark.online
@pytest.mark.xfail
def test_query_number():
URL = 'http://soleil.i4ds.ch/solarradio/data/2002-20yy_Callisto/2011/09/22/'
result = list(query(
datetime(2011, 9, 22, 5), datetime(2011, 9, 22, 6), set([("BIR", 1)])
))
RESULTS = [
"BIR_20110922_050000_01.fit.gz",
"BIR_20110922_051500_01.fit.gz",
"BIR_20110922_053000_01.fit.gz",
]
RESULTS.sort()
# Should be sorted anyway, but better to assume as little as possible.
result.sort()
assert len(result) == len(RESULTS)
@pytest.mark.online
@pytest.mark.xfail
def test_download():
directory = mkdtemp()
try:
result = query(
datetime(2011, 9, 22, 5), datetime(2011, 9, 22, 6), set([("BIR", 1)])
)
RESULTS = [
"BIR_20110922_050000_01.fit.gz",
"BIR_20110922_051500_01.fit.gz",
"BIR_20110922_053000_01.fit.gz",
]
download(result, directory)
for item in RESULTS:
assert item in sorted(os.listdir(directory))
finally:
shutil.rmtree(directory)
def test_create_file(CALLISTO_IMAGE):
ca = CallistoSpectrogram.create(CALLISTO_IMAGE)
assert np.array_equal(ca.data, CallistoSpectrogram.read(CALLISTO_IMAGE).data)
def test_create_file_kw(CALLISTO_IMAGE):
ca = CallistoSpectrogram.create(filename=CALLISTO_IMAGE)
assert np.array_equal(ca.data, CallistoSpectrogram.read(CALLISTO_IMAGE).data)
@pytest.mark.online
def test_create_url():
URL = (
"http://soleil.i4ds.ch/solarradio/data/2002-20yy_Callisto/2011/09/22/"
"BIR_20110922_050000_01.fit.gz"
)
ca = CallistoSpectrogram.create(URL)
assert np.array_equal(ca.data, CallistoSpectrogram.read(URL).data)
@pytest.mark.online
def test_create_url_kw():
URL = (
"http://soleil.i4ds.ch/solarradio/data/2002-20yy_Callisto/2011/09/22/"
"BIR_20110922_050000_01.fit.gz"
)
ca = CallistoSpectrogram.create(url=URL)
assert np.array_equal(ca.data, CallistoSpectrogram.read(URL).data)
def test_create_single_glob(CALLISTO_IMAGE, CALLISTO_IMAGE_GLOB_INDEX, CALLISTO_IMAGE_GLOB_KEY):
PATTERN = os.path.join(os.path.dirname(CALLISTO_IMAGE), CALLISTO_IMAGE_GLOB_KEY)
ca = CallistoSpectrogram.create(PATTERN)
assert_allclose(ca[CALLISTO_IMAGE_GLOB_INDEX].data,
CallistoSpectrogram.read(CALLISTO_IMAGE).data)
# seems like this does not work anymore and can't figure out what it is for
#def test_create_single_glob_kw(CALLISTO_IMAGE):
# PATTERN = os.path.join( os.path.dirname(CALLISTO_IMAGE), "BIR_*")
# ca = CallistoSpectrogram.create(singlepattern=PATTERN)
# assert np.array_equal(ca[0].data, CallistoSpectrogram.read(CALLISTO_IMAGE).data)
def test_create_glob_kw(CALLISTO_IMAGE, CALLISTO_IMAGE_GLOB_INDEX, CALLISTO_IMAGE_GLOB_KEY):
PATTERN = os.path.join(
os.path.dirname(CALLISTO_IMAGE),
CALLISTO_IMAGE_GLOB_KEY
)
ca = CallistoSpectrogram.create(pattern=PATTERN)[CALLISTO_IMAGE_GLOB_INDEX]
assert_allclose(ca.data, CallistoSpectrogram.read(CALLISTO_IMAGE).data)
def test_create_glob(CALLISTO_IMAGE_GLOB_KEY):
PATTERN = os.path.join(
os.path.dirname(sunpy.data.test.__file__),
CALLISTO_IMAGE_GLOB_KEY
)
ca = CallistoSpectrogram.create(PATTERN)
assert len(ca) == 2
def test_minimum_pairs_commotative():
A = [0, 1, 2]
B = [1, 2, 3]
first = list(minimal_pairs(A, B))
assert first == [(b, a, d) for a, b, d in minimal_pairs(B, A)]
def test_minimum_pairs_end():
assert (
list(minimal_pairs([0, 1, 2, 4], [1, 2, 3, 4])) ==
[(1, 0, 0), (2, 1, 0), (3, 3, 0)]
)
def test_minimum_pairs_end_more():
assert (
list(minimal_pairs([0, 1, 2, 4, 8], [1, 2, 3, 4])) ==
[(1, 0, 0), (2, 1, 0), (3, 3, 0)]
)
def test_minimum_pairs_end_diff():
assert (
list(minimal_pairs([0, 1, 2, 8], [1, 2, 3, 4])) ==
[(1, 0, 0), (2, 1, 0), (3, 3, 4)]
)
def test_closest():
assert (
list(minimal_pairs([50, 60], [0, 10, 20, 30, 40, 51, 52])) ==
[(0, 5, 1), (1, 6, 8)]
)
def test_homogenize_factor():
a = np.float64(np.random.randint(0, 255, 3600))[np.newaxis, :]
c1 = CallistoSpectrogram(
a,
np.arange(3600),
np.array([1]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
b = 2 * a
c2 = CallistoSpectrogram(
b,
np.arange(3600),
np.array([1]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
pairs_indices, factors, constants = c1._homogenize_params(
c2, 0
)
assert pairs_indices == [(0, 0)]
assert_array_almost_equal(factors, [0.5], 2)
assert_array_almost_equal(constants, [0], 2)
assert_array_almost_equal(factors[0] * b + constants[0], a)
def test_homogenize_constant():
a = np.float64(np.random.randint(0, 255, 3600))[np.newaxis, :]
c1 = CallistoSpectrogram(
a,
np.arange(3600),
np.array([1]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
b = a + 10
c2 = CallistoSpectrogram(
b,
np.arange(3600),
np.array([1]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
pairs_indices, factors, constants = c1._homogenize_params(
c2, 0
)
assert pairs_indices == [(0, 0)]
assert_array_almost_equal(factors, [1], 2)
assert_array_almost_equal(constants, [-10], 2)
assert_array_almost_equal(factors[0] * b + constants[0], a)
def test_homogenize_both():
a = np.float64(np.random.randint(0, 255, 3600))[np.newaxis, :]
c1 = CallistoSpectrogram(
a,
np.arange(3600),
np.array([1]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
b = 2 * a + 1
c2 = CallistoSpectrogram(
b,
np.arange(3600),
np.array([1]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
pairs_indices, factors, constants = c1._homogenize_params(
c2, 0
)
assert pairs_indices == [(0, 0)]
assert_array_almost_equal(factors, [0.5], 2)
assert_array_almost_equal(constants, [-0.5], 2)
assert_array_almost_equal(factors[0] * b + constants[0], a)
def test_homogenize_rightfq():
a = np.float64(np.random.randint(0, 255, 3600))[np.newaxis, :]
c1 = CallistoSpectrogram(
a,
np.arange(3600),
np.array([1]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
b = 2 * a + 1
c2 = CallistoSpectrogram(
np.concatenate([
np.arange(3600)[np.newaxis, :], b,
np.arange(3600)[np.newaxis, :]
], 0),
np.arange(3600),
np.array([0, 1, 2]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
pairs_indices, factors, constants = c1._homogenize_params(
c2, 0
)
assert pairs_indices == [(0, 1)]
assert_array_almost_equal(factors, [0.5], 2)
assert_array_almost_equal(constants, [-0.5], 2)
assert_array_almost_equal(factors[0] * b + constants[0], a)
@pytest.mark.online
def test_extend(CALLISTO_IMAGE):
im = CallistoSpectrogram.create(CALLISTO_IMAGE)
im2 = im.extend()
# Not too stable test, but works.
assert im2.data.shape == (200, 7200)
|
Alex-Ian-Hamilton/sunpy
|
sunpy/spectra/tests/test_callisto.py
|
Python
|
bsd-2-clause
| 10,406
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os
from falafel import findout_terminal_width
from falafel import test_list
from falafel.runners import FalafelTestRunner
from falafel.loaders import FalafelTestLoader
parser = argparse.ArgumentParser(description='custom test runner for '
'project1')
parser.add_argument("-S", "--suite", help="Test suite",
choices=['moda', 'modb'], type=str, required=True)
parser.add_argument("--test", action='append', help="Testcase(s)/Test(s) "
"to run")
parser.add_argument("-L", "--list", action="store_true",
help="List tests which match the specified "
"suite/testcases(s)")
parser.add_argument("--pdb", action="store_true",
help="drop into pdb/ipdb in case of a failure/error")
parser.add_argument("--log", action="store_true",
help='write all log messages + header and footer of '
'each test to a logfiles in a dirctory specified with '
'--logdirectory. If the logdirectory does not exist '
'it gets automatically.')
parser.add_argument("-d", "--logdirectory",
help="log directory [default=%(default)s]",
default='log')
parser.add_argument("--debug", action="store_true",
help="print logging messages")
parser.add_argument("--interactive", '--enable-interactive-tests',
action="store_true", dest='interactive',
help="if not set then all interactive tests are skipped")
args = parser.parse_args()
pkg = args.suite
allowed_tests = args.test
width = findout_terminal_width()
print(" info ".center(width, '='))
print("suite: %s" % pkg)
print("tests: %s" % allowed_tests)
print("interactive tests: %s" % args.interactive)
print('=' * width)
if args.interactive:
os.environ['INTERACTIVE_TESTS'] = '1'
loader = FalafelTestLoader(allowed_tests=allowed_tests)
suite = loader.discover('mypackage.' + pkg)
tdata = []
if args.debug or args.list:
with_skipped = args.list
tdata = test_list(suite, with_skipped=with_skipped)
if not with_skipped:
print("The following tests will be run:", end='')
try:
from tabulate import tabulate
except ImportError:
for data in tdata:
print(" %-30s\tin %-30s\tskipped: %s" % data)
else:
headers = ['class.method', 'module']
if with_skipped:
headers.append('skipped')
print('\n%s' % tabulate(tdata, headers=headers))
print("%d tests available" % len(tdata))
if args.list:
exit()
logdir = args.logdirectory if args.log else None
runner = FalafelTestRunner(
verbosity=2, logger='st', debug=args.debug,
logdirectory=logdir, width=width, pdb=args.pdb)
runner.run(suite)
|
thisch/python-falafel
|
examples/project1/testrunner.py
|
Python
|
bsd-2-clause
| 2,913
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
"""
import logging
import numpy as np
from numpy import ma
from cotede.qctests import QCCheckVar
try:
import pandas as pd
PANDAS_AVAILABLE = True
except ImportError:
PANDAS_AVAILABLE = False
module_logger = logging.getLogger(__name__)
def gradient(x):
return curvature(x)
def _curvature_pandas(x):
"""Equivalent to curvature() but using pandas
It looks like the numpy implementation is faster even for larger datasets,
so the default is with numpy.
Note
----
- In the future this will be useful to handle specific window widths.
"""
if isinstance(x, ma.MaskedArray):
x[x.mask] = np.nan
x = x.data
if not PANDAS_AVAILABLE:
return curvature(x)
if hasattr(x, "to_series"):
x = x.to_series()
elif not isinstance(x, pd.Series):
x = pd.Series(x)
y = np.nan * x
y = x - (x.shift(1) + x.shift(-1)) / 2.0
return np.array(y)
def curvature(x):
"""Curvature of a timeseries
This test is commonly known as gradient for historical reasons, but that
is a bad name choice since it is not the actual gradient, like:
d/dx + d/dy + d/dz,
but as defined by GTSPP, EuroGOOS and others, which is actually the
curvature of the timeseries..
Note
----
- Pandas.Series operates with indexes, so it should be done different. In
that case, call for _curvature_pandas.
"""
if isinstance(x, ma.MaskedArray):
x[x.mask] = np.nan
x = x.data
if PANDAS_AVAILABLE and isinstance(x, pd.Series):
return _curvature_pandas(x)
x = np.atleast_1d(x)
y = np.nan * x
y[1:-1] = x[1:-1] - (x[:-2] + x[2:]) / 2.0
return y
class Gradient(QCCheckVar):
def set_features(self):
self.features = {"gradient": curvature(self.data[self.varname])}
def test(self):
self.flags = {}
try:
threshold = self.cfg["threshold"]
except KeyError:
module_logger.debug(
"Deprecated cfg format. It should contain a threshold item."
)
threshold = self.cfg
assert (
(np.size(threshold) == 1)
and (threshold is not None)
and (np.isfinite(threshold))
)
flag = np.zeros(np.shape(self.data[self.varname]), dtype="i1")
feature = np.absolute(self.features["gradient"])
flag[feature > threshold] = self.flag_bad
flag[feature <= threshold] = self.flag_good
x = np.atleast_1d(self.data[self.varname])
flag[ma.getmaskarray(x) | ~np.isfinite(x)] = 9
self.flags["gradient"] = flag
|
castelao/CoTeDe
|
cotede/qctests/gradient.py
|
Python
|
bsd-3-clause
| 2,756
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 30, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 0);
|
antoinecarme/pyaf
|
tests/artificial/transf_Quantization/trend_PolyTrend/cycle_30/ar_/test_artificial_1024_Quantization_PolyTrend_30__0.py
|
Python
|
bsd-3-clause
| 268
|
from django.conf.urls import *
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# django-flags for internationalization
(r'^lang/', include('sampleproject.flags.urls')),
# FOR DEBUG AND TEST ONLY
(r'^.*switch/(?P<username>.*)/(?P<password>.*)/$', 'goflow.workflow.views.debug_switch_user'),
# home page
(r'^$', 'sampleproject.sampleapp.views.home'),
# home redirection
(r'^.*home/$', 'django.views.generic.simple.redirect_to', {'url':'/'}),
# login/logout
(r'^logout/$', 'django.contrib.auth.views.logout'),
(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name':'goflow/login.html'}),
# Example:
(r'^sampleapp/', include('sampleproject.sampleapp.urls')),
# Uncomment the next line to enable admin documentation:
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# FOR TEST - insert before admin/(.*)
(r'^admin/workflow/', include('goflow.apptools.urls_admin')),
# special
(r'^admin/apptools/', include('goflow.apptools.urls_admin')),
# Uncomment the next line for to enable the admin:
(r'^admin/(.*)', admin.site.root),
# workflow pages
(r'^workflow/', include('goflow.urls')),
# static files
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
|
mikewolfli/django-goflow
|
sampleproject/urls.py
|
Python
|
bsd-3-clause
| 1,453
|
from django.test import TestCase
from freezegun import freeze_time
from unittest.mock import patch
from testil import eq
from corehq.util.soft_assert.core import SoftAssert
from casexml.apps.case.exceptions import ReconciliationError
from casexml.apps.case.xml.parser import CaseUpdateAction, KNOWN_PROPERTIES
from corehq.form_processor.backends.sql.processor import FormProcessorSQL
from corehq.form_processor.backends.sql.update_strategy import SqlCaseUpdateStrategy
from corehq.form_processor.interfaces.processor import ProcessedForms
from corehq.form_processor.models import (
CommCareCase,
CaseTransaction,
RebuildWithReason,
)
from corehq.form_processor.utils import TestFormMetadata
from corehq.form_processor.tests.utils import sharded, FormProcessorTestUtils
from corehq.util.test_utils import get_form_ready_to_save
import uuid
from datetime import datetime
@sharded
class SqlUpdateStrategyTest(TestCase):
DOMAIN = 'update-strategy-test-' + uuid.uuid4().hex
USER_ID = 'mr_wednesday_'
@classmethod
def setUpClass(cls):
super(SqlUpdateStrategyTest, cls).setUpClass()
FormProcessorTestUtils.delete_all_sql_forms()
FormProcessorTestUtils.delete_all_sql_cases()
@classmethod
def tearDownClass(cls):
FormProcessorTestUtils.delete_all_sql_forms()
FormProcessorTestUtils.delete_all_sql_cases()
super(SqlUpdateStrategyTest, cls).tearDownClass()
@patch.object(SoftAssert, '_call')
def test_reconcile_transactions(self, soft_assert_mock):
""" tests a transanction with an early client date and late server date """
with freeze_time("2018-10-10"):
case = self._create_case()
with freeze_time("2018-10-11"):
new_old_xform = self._create_form()
with freeze_time("2018-10-08"):
new_old_trans = self._create_case_transaction(case, new_old_xform)
with freeze_time("2018-10-11"):
self._save(new_old_xform, case, new_old_trans)
case = CommCareCase.objects.get_case(case.case_id)
update_strategy = SqlCaseUpdateStrategy(case)
self.assertTrue(update_strategy.reconcile_transactions_if_necessary())
self._check_for_reconciliation_error_soft_assert(soft_assert_mock)
case.save(with_tracked_models=True)
case = CommCareCase.objects.get_case(case.case_id)
update_strategy = SqlCaseUpdateStrategy(case)
self.assertFalse(update_strategy.reconcile_transactions_if_necessary())
self._check_for_reconciliation_error_soft_assert(soft_assert_mock)
def test_reconcile_not_necessary(self):
with freeze_time("2018-10-10"):
case = self._create_case()
with freeze_time("2018-10-11"):
new_old_xform = self._create_form()
new_old_trans = self._create_case_transaction(case, new_old_xform)
self._save(new_old_xform, case, new_old_trans)
case = CommCareCase.objects.get_case(case.case_id)
update_strategy = SqlCaseUpdateStrategy(case)
self.assertFalse(update_strategy.reconcile_transactions_if_necessary())
def test_ignores_before_rebuild_transaction(self):
with freeze_time("2018-10-10"):
case = self._create_case()
with freeze_time("2018-10-11"):
new_old_xform = self._create_form()
with freeze_time("2018-10-08"):
new_old_trans = self._create_case_transaction(case, new_old_xform)
with freeze_time("2018-10-11"):
self._save(new_old_xform, case, new_old_trans)
self.assertFalse(case.check_transaction_order())
with freeze_time("2018-10-13"):
new_rebuild_xform = self._create_form()
rebuild_detail = RebuildWithReason(reason="shadow's golden coin")
rebuild_transaction = CaseTransaction.rebuild_transaction(case, rebuild_detail)
self._save(new_rebuild_xform, case, rebuild_transaction)
case = CommCareCase.objects.get_case(case.case_id)
update_strategy = SqlCaseUpdateStrategy(case)
self.assertFalse(update_strategy.reconcile_transactions_if_necessary())
def test_first_transaction_not_create(self):
with freeze_time("2018-10-10"):
case = self._create_case()
with freeze_time("2018-10-08"):
new_old_xform = self._create_form()
new_old_trans = self._create_case_transaction(case, new_old_xform)
self._save(new_old_xform, case, new_old_trans)
self.assertTrue(case.check_transaction_order())
case = CommCareCase.objects.get_case(case.case_id)
update_strategy = SqlCaseUpdateStrategy(case)
self.assertRaises(ReconciliationError, update_strategy.reconcile_transactions)
@patch.object(SoftAssert, '_call')
def test_reconcile_transactions_within_fudge_factor(self, soft_assert_mock):
""" tests a transanction with an early client date and late server date """
with freeze_time("2018-10-10"):
case = self._create_case()
with freeze_time("2018-10-11 06:00"):
new_old_xform = self._create_form()
with freeze_time("2018-10-10 18:00"):
new_old_trans = self._create_case_transaction(case, new_old_xform)
with freeze_time("2018-10-11 06:00"):
self._save(new_old_xform, case, new_old_trans)
with freeze_time("2018-10-11"):
new_old_xform = self._create_form()
new_old_trans = self._create_case_transaction(case, new_old_xform)
self._save(new_old_xform, case, new_old_trans)
case = CommCareCase.objects.get_case(case.case_id)
update_strategy = SqlCaseUpdateStrategy(case)
self.assertTrue(update_strategy.reconcile_transactions_if_necessary())
self._check_for_reconciliation_error_soft_assert(soft_assert_mock)
case.save(with_tracked_models=True)
case = CommCareCase.objects.get_case(case.case_id)
update_strategy = SqlCaseUpdateStrategy(case)
self.assertFalse(update_strategy.reconcile_transactions_if_necessary())
self._check_for_reconciliation_error_soft_assert(soft_assert_mock)
def _create_form(self, user_id=None, received_on=None):
"""
Create the models directly so that these tests aren't dependent on any
other apps.
:return: XFormInstance
"""
user_id = user_id or 'mr_wednesday'
received_on = received_on or datetime.utcnow()
metadata = TestFormMetadata(
domain=self.DOMAIN,
received_on=received_on,
user_id=user_id
)
form = get_form_ready_to_save(metadata)
return form
def _create_case_transaction(self, case, form=None, submitted_on=None, action_types=None):
form = form or self._create_form()
submitted_on = submitted_on or datetime.utcnow()
return CaseTransaction.form_transaction(case, form, submitted_on, action_types)
def _create_case(self, case_type=None, user_id=None, case_id=None):
case_id = case_id or uuid.uuid4().hex
user_id = user_id or 'mr_wednesday'
utcnow = datetime.utcnow()
case = CommCareCase(
case_id=case_id,
domain=self.DOMAIN,
type=case_type or '',
owner_id=user_id,
opened_on=utcnow,
modified_on=utcnow,
modified_by=utcnow,
server_modified_on=utcnow
)
form = self._create_form(user_id, utcnow)
trans = self._create_case_transaction(case, form, utcnow, action_types=[128])
self._save(form, case, trans)
return CommCareCase.objects.get_case(case_id)
def _save(self, form, case, transaction):
# disable publish to Kafka to avoid intermittent errors caused by
# the nexus of kafka's consumer thread and freeze_time
with patch.object(FormProcessorSQL, "publish_changes_to_kafka"):
case.track_create(transaction)
FormProcessorSQL.save_processed_models(ProcessedForms(form, []), [case])
def _check_for_reconciliation_error_soft_assert(self, soft_assert_mock):
for call in soft_assert_mock.call_args_list:
self.assertNotIn('ReconciliationError', call[0][1])
soft_assert_mock.reset_mock()
def test_update_known_properties_with_empty_values():
def test(prop):
case = SqlCaseUpdateStrategy.case_implementation_class()
setattr(case, prop, "value")
action = CaseUpdateAction(block=None, **{prop: ""})
SqlCaseUpdateStrategy(case)._update_known_properties(action)
eq(getattr(case, prop), "")
# verify that at least one property will be tested
assert any(v is not None for v in KNOWN_PROPERTIES.values()), KNOWN_PROPERTIES
for prop, default in KNOWN_PROPERTIES.items():
if default is not None:
yield test, prop
|
dimagi/commcare-hq
|
corehq/form_processor/tests/test_sql_update_strategy.py
|
Python
|
bsd-3-clause
| 8,947
|
"""
subplot - Manage modern mode figure subplot configuration and selection.
"""
import contextlib
from pygmt.clib import Session
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers import (
build_arg_string,
fmt_docstring,
is_nonstr_iter,
kwargs_to_strings,
use_alias,
)
@fmt_docstring
@contextlib.contextmanager
@use_alias(
Ff="figsize",
Fs="subsize",
A="autolabel",
B="frame",
C="clearance",
J="projection",
M="margins",
R="region",
SC="sharex",
SR="sharey",
T="title",
V="verbose",
X="xshift",
Y="yshift",
)
@kwargs_to_strings(Ff="sequence", Fs="sequence", M="sequence", R="sequence")
def subplot(self, nrows=1, ncols=1, **kwargs):
r"""
Create multi-panel subplot figures.
This function is used to split the current figure into a rectangular layout
of subplots that each may contain a single self-contained figure. Begin by
defining the layout of the entire multi-panel illustration. Several
parameters are available to specify the systematic layout, labeling,
dimensions, and more for the subplots.
Full option list at :gmt-docs:`subplot.html#synopsis-begin-mode`
{aliases}
Parameters
----------
nrows : int
Number of vertical rows of the subplot grid.
ncols : int
Number of horizontal columns of the subplot grid.
figsize : tuple
Specify the final figure dimensions as (*width*, *height*).
subsize : tuple
Specify the dimensions of each subplot directly as (*width*, *height*).
Note that only one of ``figsize`` or ``subsize`` can be provided at
once.
autolabel : bool or str
[*autolabel*][**+c**\ *dx*\ [/*dy*]][**+g**\ *fill*][**+j**\|\ **J**\
*refpoint*][**+o**\ *dx*\ [/*dy*]][**+p**\ *pen*][**+r**\|\ **R**]
[**+v**].
Specify automatic tagging of each subplot. Append either a number or
letter [a]. This sets the tag of the first, top-left subplot and others
follow sequentially. Surround the number or letter by parentheses on
any side if these should be typeset as part of the tag. Use
**+j**\|\ **J**\ *refpoint* to specify where the tag should be placed
in the subplot [TL]. Note: **+j** sets the justification of the tag to
*refpoint* (suitable for interior tags) while **+J** instead selects
the mirror opposite (suitable for exterior tags). Append
**+c**\ *dx*\[/*dy*] to set the clearance between the tag and a
surrounding text box requested via **+g** or **+p** [3p/3p, i.e., 15%
of the :gmt-term:`FONT_TAG` size dimension]. Append **+g**\ *fill* to
paint the tag's text box with *fill* [no painting]. Append
**+o**\ *dx*\ [/*dy*] to offset the tag's reference point in the
direction implied by the justification [4p/4p, i.e., 20% of the
:gmt-term:`FONT_TAG` size]. Append **+p**\ *pen* to draw the outline of
the tag's text box using selected *pen* [no outline]. Append **+r** to
typeset your tag numbers using lowercase Roman numerals; use **+R** for
uppercase Roman numerals [Arabic numerals]. Append **+v** to increase
tag numbers vertically down columns [horizontally across rows].
{B}
clearance : str or list
[*side*]\ *clearance*.
Reserve a space of dimension *clearance* between the margin and the
subplot on the specified side, using *side* values from **w**, **e**,
**s**, or **n**; or **x** for both **w** and **e**; or **y** for both
**s** and **n**. No *side* means all sides (i.e. ``clearance='1c'``
would set a clearance of 1 cm on all sides). The option is repeatable
to set aside space on more than one side (e.g. ``clearance=['w1c',
's2c']`` would set a clearance of 1 cm on west side and 2 cm on south
side). Such space will be left untouched by the main map plotting but
can be accessed by modules that plot scales, bars, text, etc.
{J}
margins : str or list
This is margin space that is added between neighboring subplots (i.e.,
the interior margins) in addition to the automatic space added for tick
marks, annotations, and labels. The margins can be specified as either:
- a single value (for same margin on all sides). E.g. '5c'.
- a pair of values (for setting separate horizontal and vertical
margins). E.g. ['5c', '3c'].
- a set of four values (for setting separate left, right, bottom, and
top margins). E.g. ['1c', '2c', '3c', '4c'].
The actual gap created is always a sum of the margins for the two
opposing sides (e.g., east plus west or south plus north margins)
[Default is half the primary annotation font size, giving the full
annotation font size as the default gap].
{R}
sharex : bool or str
Set subplot layout for shared x-axes. Use when all subplots in a column
share a common *x*-range. If ``sharex=True``, the first (i.e.,
**t**\ op) and the last (i.e., **b**\ ottom) rows will have
*x*-annotations; use ``sharex='t'`` or ``sharex='b'`` to select only
one of those two rows [both]. Append **+l** if annotated *x*-axes
should have a label [none]; optionally append the label if it is the
same for the entire subplot. Append **+t** to make space for subplot
titles for each row; use **+tc** for top row titles only [no subplot
titles].
sharey : bool or str
Set subplot layout for shared y-axes. Use when all subplots in a row
share a common *y*-range. If ``sharey=True``, the first (i.e.,
**l**\ eft) and the last (i.e., **r**\ ight) columns will have
*y*-annotations; use ``sharey='l'`` or ``sharey='r'`` to select only
one of those two columns [both]. Append **+l** if annotated *y*-axes
will have a label [none]; optionally, append the label if it is the
same for the entire subplot. Append **+p** to make all annotations
axis-parallel [horizontal]; if not used you may have to set
``clearance`` to secure extra space for long horizontal annotations.
Notes for ``sharex``/``sharey``:
- Labels and titles that depends on which row or column are specified
as usual via a subplot's own ``frame`` setting.
- Append **+w** to the ``figsize`` or ``subsize`` parameter to draw
horizontal and vertical lines between interior panels using selected
pen [no lines].
title : str
While individual subplots can have titles (see ``sharex``/``sharey`` or
``frame``), the entire figure may also have an overarching *heading*
[no heading]. Font is determined by setting :gmt-term:`FONT_HEADING`.
{V}
{XY}
"""
kwargs = self._preprocess(**kwargs) # pylint: disable=protected-access
# allow for spaces in string without needing double quotes
if isinstance(kwargs.get("A"), str):
kwargs["A"] = f'"{kwargs.get("A")}"'
kwargs["T"] = f'"{kwargs.get("T")}"' if kwargs.get("T") else None
if nrows < 1 or ncols < 1:
raise GMTInvalidInput("Please ensure that both 'nrows'>=1 and 'ncols'>=1.")
if kwargs.get("Ff") and kwargs.get("Fs"):
raise GMTInvalidInput(
"Please provide either one of 'figsize' or 'subsize' only."
)
with Session() as lib:
try:
arg_str = " ".join(["begin", f"{nrows}x{ncols}", build_arg_string(kwargs)])
lib.call_module("subplot", arg_str)
yield
finally:
v_arg = build_arg_string({"V": kwargs.get("V")})
lib.call_module("subplot", f"end {v_arg}")
@fmt_docstring
@contextlib.contextmanager
@use_alias(A="fixedlabel", C="clearance", V="verbose")
def set_panel(self, panel=None, **kwargs):
r"""
Set the current subplot panel to plot on.
Before you start plotting you must first select the active subplot. Note:
If any *projection* option is passed with the question mark **?** as scale
or width when plotting subplots, then the dimensions of the map are
automatically determined by the subplot size and your region. For Cartesian
plots: If you want the scale to apply equally to both dimensions then you
must specify ``projection="x"`` [The default ``projection="X"`` will fill
the subplot by using unequal scales].
{aliases}
Parameters
----------
panel : str or list
*row,col*\|\ *index*.
Sets the current subplot until further notice. **Note**: First *row*
or *col* is 0, not 1. If not given we go to the next subplot by order
specified via ``autolabel`` in :meth:`pygmt.Figure.subplot`. As an
alternative, you may bypass using :meth:`pygmt.Figure.set_panel` and
instead supply the common option **panel**\ =[*row,col*] to the first
plot command you issue in that subplot. GMT maintains information about
the current figure and subplot. Also, you may give the one-dimensional
*index* instead which starts at 0 and follows the row or column order
set via ``autolabel`` in :meth:`pygmt.Figure.subplot`.
fixedlabel : str
Overrides the automatic labeling with the given string. No modifiers
are allowed. Placement, justification, etc. are all inherited from how
``autolabel`` was specified by the initial :meth:`pygmt.Figure.subplot`
command.
clearance : str or list
[*side*]\ *clearance*.
Reserve a space of dimension *clearance* between the margin and the
subplot on the specified side, using *side* values from **w**, **e**,
**s**, or **n**. The option is repeatable to set aside space on more
than one side (e.g. ``clearance=['w1c', 's2c']`` would set a clearance
of 1 cm on west side and 2 cm on south side). Such space will be left
untouched by the main map plotting but can be accessed by modules that
plot scales, bars, text, etc. This setting overrides the common
clearances set by ``clearance`` in the initial
:meth:`pygmt.Figure.subplot` call.
{V}
"""
kwargs = self._preprocess(**kwargs) # pylint: disable=protected-access
# allow for spaces in string with needing double quotes
kwargs["A"] = f'"{kwargs.get("A")}"' if kwargs.get("A") is not None else None
# convert tuple or list to comma-separated str
panel = ",".join(map(str, panel)) if is_nonstr_iter(panel) else panel
with Session() as lib:
arg_str = " ".join(["set", f"{panel}", build_arg_string(kwargs)])
lib.call_module(module="subplot", args=arg_str)
yield
|
GenericMappingTools/gmt-python
|
pygmt/src/subplot.py
|
Python
|
bsd-3-clause
| 10,779
|
#!/usr/bin/env python
"""
EVENNIA SERVER LAUNCHER SCRIPT
This is the start point for running Evennia.
Sets the appropriate environmental variables and launches the server
and portal through the evennia_runner. Run without arguments to get a
menu. Run the script with the -h flag to see usage information.
"""
from __future__ import print_function
from builtins import input, range
import os
import sys
import signal
import shutil
import importlib
from argparse import ArgumentParser
from subprocess import Popen, check_output, call, CalledProcessError, STDOUT
import django
# Signal processing
SIG = signal.SIGINT
# Set up the main python paths to Evennia
EVENNIA_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import evennia
EVENNIA_LIB = os.path.join(os.path.dirname(os.path.abspath(evennia.__file__)))
EVENNIA_SERVER = os.path.join(EVENNIA_LIB, "server")
EVENNIA_RUNNER = os.path.join(EVENNIA_SERVER, "evennia_runner.py")
EVENNIA_TEMPLATE = os.path.join(EVENNIA_LIB, "game_template")
EVENNIA_PROFILING = os.path.join(EVENNIA_SERVER, "profiling")
EVENNIA_DUMMYRUNNER = os.path.join(EVENNIA_PROFILING, "dummyrunner.py")
TWISTED_BINARY = "twistd"
# Game directory structure
SETTINGFILE = "settings.py"
SERVERDIR = "server"
CONFDIR = os.path.join(SERVERDIR, "conf")
SETTINGS_PATH = os.path.join(CONFDIR, SETTINGFILE)
SETTINGS_DOTPATH = "server.conf.settings"
CURRENT_DIR = os.getcwd()
GAMEDIR = CURRENT_DIR
# Operational setup
SERVER_LOGFILE = None
PORTAL_LOGFILE = None
HTTP_LOGFILE = None
SERVER_PIDFILE = None
PORTAL_PIDFILE = None
SERVER_RESTART = None
PORTAL_RESTART = None
SERVER_PY_FILE = None
PORTAL_PY_FILE = None
PYTHON_MIN = '2.7'
TWISTED_MIN = '16.0.0'
DJANGO_MIN = '1.8'
DJANGO_REC = '1.9'
sys.path[1] = EVENNIA_ROOT
#------------------------------------------------------------
#
# Messages
#
#------------------------------------------------------------
CREATED_NEW_GAMEDIR = \
"""
Welcome to Evennia!
Created a new Evennia game directory '{gamedir}'.
You can now optionally edit your new settings file
at {settings_path}. If you don't, the defaults
will work out of the box. When ready to continue, 'cd' to your
game directory and run:
evennia migrate
This initializes the database. To start the server for the first
time, run:
evennia start
Make sure to create a superuser when asked for it (the email can
be blank if you want). You should now be able to (by default)
connect to your server on 'localhost', port 4000 using a
telnet/mud client or http://localhost:8000 using your web browser.
If things don't work, check so those ports are open.
"""
ERROR_INPUT = \
"""
Command
{args} {kwargs}
raised an error: '{traceback}'.
"""
ERROR_NO_GAMEDIR = \
"""
ERROR: No Evennia settings file was found. Evennia looks for the
file in your game directory as server/conf/settings.py.
You must run this command from somewhere inside a valid game
directory first created with
evennia --init mygamename
If you are in a game directory but is missing a settings.py file,
it may be because you have git-cloned an existing game directory.
The settings.py file is not cloned by git (it's in .gitignore)
since it can contain sensitive and/or server-specific information.
You can create a new, empty settings file with
evennia --initsettings
If cloning the settings file is not a problem you could manually
copy over the old settings file or remove its entry in .gitignore
"""
WARNING_MOVING_SUPERUSER = \
"""
WARNING: Evennia expects a Player superuser with id=1. No such
Player was found. However, another superuser ('{other_key}',
id={other_id}) was found in the database. If you just created this
superuser and still see this text it is probably due to the
database being flushed recently - in this case the database's
internal auto-counter might just start from some value higher than
one.
We will fix this by assigning the id 1 to Player '{other_key}'.
Please confirm this is acceptable before continuing.
"""
WARNING_RUNSERVER = \
"""
WARNING: There is no need to run the Django development
webserver to test out Evennia web features (the web client
will in fact not work since the Django test server knows
nothing about MUDs). Instead, just start Evennia with the
webserver component active (this is the default).
"""
ERROR_SETTINGS = \
"""
ERROR: There was an error importing Evennia's config file
{settingspath}.
There is usually one of three reasons for this:
1) You are not running this command from your game directory.
Change directory to your game directory and try again (or
create a new game directory using evennia --init <dirname>)
2) The settings file contains a syntax error. If you see a
traceback above, review it, resolve the problem and try again.
3) Django is not correctly installed. This usually shows as
errors mentioning 'DJANGO_SETTINGS_MODULE'. If you run a
virtual machine, it might be worth to restart it to see if
this resolves the issue.
""".format(settingsfile=SETTINGFILE, settingspath=SETTINGS_PATH)
ERROR_INITSETTINGS = \
"""
ERROR: 'evennia --initsettings' must be called from the root of
your game directory, since it tries to (re)create the new
settings.py file in a subfolder server/conf/.
"""
RECREATED_SETTINGS = \
"""
(Re)created an empty settings file in server/conf/settings.py.
Note that if you were using an existing database, the password
salt of this new settings file will be different from the old one.
This means that any existing players may not be able to log in to
their accounts with their old passwords.
"""
ERROR_DATABASE = \
"""
ERROR: Your database does not seem to be set up correctly.
(error was '{traceback}')
Standing in your game directory, run
evennia migrate
to initialize/update the database according to your settings.
"""
ERROR_WINDOWS_WIN32API = \
"""
ERROR: Unable to import win32api, which Twisted requires to run.
You may download it from:
http://sourceforge.net/projects/pywin32/files/pywin32/
If you are running in a virtual environment, browse to the
location of the latest win32api exe file for your computer and
Python version and copy the url to it; then paste it into a call
to easy_install:
easy_install http://<url to win32api exe>
"""
INFO_WINDOWS_BATFILE = \
"""
INFO: Since you are running Windows, a file 'twistd.bat' was
created for you. This is a simple batch file that tries to call
the twisted executable. Evennia determined this to be:
{twistd_path}
If you run into errors at startup you might need to edit
twistd.bat to point to the actual location of the Twisted
executable (usually called twistd.py) on your machine.
This procedure is only done once. Run evennia.py again when you
are ready to start the server.
"""
CMDLINE_HELP = \
"""
Starts or operates the Evennia MU* server. Also allows for
initializing a new game directory and manages the game's database.
You can also pass most standard django-admin arguments and
options.
"""
VERSION_INFO = \
"""
Evennia {version}
OS: {os}
Python: {python}
Twisted: {twisted}
Django: {django}{about}
"""
ABOUT_INFO = \
"""
Evennia MUD/MUX/MU* development system
Licence: BSD 3-Clause Licence
Web: http://www.evennia.com
Irc: #evennia on FreeNode
Forum: http://www.evennia.com/discussions
Maintainer (2010-): Griatch (griatch AT gmail DOT com)
Maintainer (2006-10): Greg Taylor
Use -h for command line options.
"""
HELP_ENTRY = \
"""
Enter 'evennia -h' for command-line options.
Use option (1) in a production environment. During development (2) is
usually enough, portal debugging is usually only useful if you are
adding new protocols or are debugging Evennia itself.
Reload with (5) to update the server with your changes without
disconnecting any players.
Note: Reload and stop are sometimes poorly supported in Windows. If you
have issues, log into the game to stop or restart the server instead.
"""
MENU = \
"""
+----Evennia Launcher-------------------------------------------+
| |
+--- Starting --------------------------------------------------+
| |
| 1) (normal): All output to logfiles |
| 2) (server devel): Server logs to terminal (-i option) |
| 3) (portal devel): Portal logs to terminal |
| 4) (full devel): Both Server and Portal logs to terminal |
| |
+--- Restarting ------------------------------------------------+
| |
| 5) Reload the Server |
| 6) Reload the Portal (only works with portal/full debug) |
| |
+--- Stopping --------------------------------------------------+
| |
| 7) Stopping both Portal and Server |
| 8) Stopping only Server |
| 9) Stopping only Portal |
| |
+---------------------------------------------------------------+
| h) Help i) About info q) Abort |
+---------------------------------------------------------------+
"""
ERROR_LOGDIR_MISSING = \
"""
ERROR: One or more log-file directory locations could not be
found:
{logfiles}
This is simple to fix: Just manually create the missing log
directory (or directories) and re-launch the server (the log files
will be created automatically).
(Explanation: Evennia creates the log directory automatically when
initializating a new game directory. This error usually happens if
you used git to clone a pre-created game directory - since log
files are in .gitignore they will not be cloned, which leads to
the log directory also not being created.)
"""
ERROR_PYTHON_VERSION = \
"""
ERROR: Python {pversion} used. Evennia requires version
{python_min} or higher (but not 3.x).
"""
ERROR_TWISTED_VERSION = \
"""
ERROR: Twisted {tversion} found. Evennia requires
version {twisted_min} or higher.
"""
ERROR_NOTWISTED = \
"""
ERROR: Twisted does not seem to be installed.
"""
ERROR_DJANGO_MIN = \
"""
ERROR: Django {dversion} found. Evennia requires version {django_min}
or higher.
Install it with for example `pip install --upgrade django`
or with `pip install django=={django_min}` to get a specific version.
It's also a good idea to run `evennia migrate` after this upgrade.
"""
NOTE_DJANGO_MIN = \
"""
NOTE: Django {dversion} found. This will work, but v{django_rec}
is recommended for production.
"""
NOTE_DJANGO_NEW = \
"""
NOTE: Django {dversion} found. This is newer than Evennia's
recommended version (v{django_rec}). It might work, but may be new
enough to not be fully tested yet. Report any issues.
"""
ERROR_NODJANGO = \
"""
ERROR: Django does not seem to be installed.
"""
NOTE_KEYBOARDINTERRUPT = \
"""
STOP: Caught keyboard interrupt while in interactive mode.
"""
#------------------------------------------------------------
#
# Functions
#
#------------------------------------------------------------
def evennia_version():
"""
Get the Evennia version info from the main package.
"""
version = "Unknown"
try:
import evennia
version = evennia.__version__
except ImportError:
pass
try:
rev = check_output(
"git rev-parse --short HEAD",
shell=True, cwd=EVENNIA_ROOT, stderr=STDOUT).strip()
version = "%s (rev %s)" % (version, rev)
except (IOError, CalledProcessError):
pass
return version
EVENNIA_VERSION = evennia_version()
def check_main_evennia_dependencies():
"""
Checks and imports the Evennia dependencies. This must be done
already before the paths are set up.
Returns:
not_error (bool): True if no dependency error was found.
"""
error = False
# Python
pversion = ".".join(str(num) for num in sys.version_info if type(num) == int)
if pversion < PYTHON_MIN:
print(ERROR_PYTHON_VERSION.format(pversion=pversion, python_min=PYTHON_MIN))
error = True
# Twisted
try:
import twisted
tversion = twisted.version.short()
if tversion < TWISTED_MIN:
print(ERROR_TWISTED_VERSION.format(
tversion=tversion, twisted_min=TWISTED_MIN))
error = True
except ImportError:
print(ERROR_NOTWISTED)
error = True
# Django
try:
dversion = ".".join(str(num) for num in django.VERSION if type(num) == int)
# only the main version (1.5, not 1.5.4.0)
dversion_main = ".".join(dversion.split(".")[:2])
if dversion < DJANGO_MIN:
print(ERROR_DJANGO_MIN.format(
dversion=dversion_main, django_min=DJANGO_MIN))
error = True
elif DJANGO_MIN <= dversion < DJANGO_REC:
print(NOTE_DJANGO_MIN.format(
dversion=dversion_main, django_rec=DJANGO_REC))
elif DJANGO_REC < dversion_main:
print(NOTE_DJANGO_NEW.format(
dversion=dversion_main, django_rec=DJANGO_REC))
except ImportError:
print(ERROR_NODJANGO)
error = True
if error:
sys.exit()
# return True/False if error was reported or not
return not error
def set_gamedir(path):
"""
Set GAMEDIR based on path, by figuring out where the setting file
is inside the directory tree.
"""
global GAMEDIR
Ndepth = 10
settings_path = os.path.join("server", "conf", "settings.py")
for i in range(Ndepth):
gpath = os.getcwd()
if "server" in os.listdir(gpath):
if os.path.isfile(settings_path):
GAMEDIR = gpath
return
os.chdir(os.pardir)
print(ERROR_NO_GAMEDIR)
sys.exit()
def create_secret_key():
"""
Randomly create the secret key for the settings file
"""
import random
import string
secret_key = list((string.letters +
string.digits + string.punctuation).replace("\\", "")\
.replace("'", '"').replace("{","_").replace("}","-"))
random.shuffle(secret_key)
secret_key = "".join(secret_key[:40])
return secret_key
def create_settings_file(init=True):
"""
Uses the template settings file to build a working settings file.
Args:
init (bool): This is part of the normal evennia --init
operation. If false, this function will copy a fresh
template file in (asking if it already exists).
"""
settings_path = os.path.join(GAMEDIR, "server", "conf", "settings.py")
if not init:
# if not --init mode, settings file may already exist from before
if os.path.exists(settings_path):
inp = raw_input("server/conf/settings.py already exists. "
"Do you want to reset it? y/[N]> ")
if not inp.lower() == 'y':
print ("Aborted.")
sys.exit()
else:
print ("Reset the settings file.")
default_settings_path = os.path.join(EVENNIA_TEMPLATE, "server", "conf", "settings.py")
shutil.copy(default_settings_path, settings_path)
with open(settings_path, 'r') as f:
settings_string = f.read()
# tweak the settings
setting_dict = {
"settings_default": os.path.join(EVENNIA_LIB, "settings_default.py"),
"servername": "\"%s\"" % GAMEDIR.rsplit(os.path.sep, 1)[1].capitalize(),
"secret_key": "\'%s\'" % create_secret_key()}
settings_string = settings_string.format(**setting_dict)
with open(settings_path, 'w') as f:
f.write(settings_string)
def create_game_directory(dirname):
"""
Initialize a new game directory named dirname
at the current path. This means copying the
template directory from evennia's root.
Args:
dirname (str): The directory name to create.
"""
global GAMEDIR
GAMEDIR = os.path.abspath(os.path.join(CURRENT_DIR, dirname))
if os.path.exists(GAMEDIR):
print("Cannot create new Evennia game dir: '%s' already exists." % dirname)
sys.exit()
# copy template directory
shutil.copytree(EVENNIA_TEMPLATE, GAMEDIR)
# pre-build settings file in the new GAMEDIR
create_settings_file()
def create_superuser():
"""
Create the superuser player
"""
print(
"\nCreate a superuser below. The superuser is Player #1, the 'owner' "
"account of the server.\n")
django.core.management.call_command("createsuperuser", interactive=True)
def check_database():
"""
Check so the database exists.
Returns:
exists (bool): `True` if the database exists, otherwise `False`.
"""
# Check so a database exists and is accessible
from django.db import connection
tables = connection.introspection.get_table_list(connection.cursor())
if not tables or not isinstance(tables[0], basestring): # django 1.8+
tables = [tableinfo.name for tableinfo in tables]
if tables and u'players_playerdb' in tables:
# database exists and seems set up. Initialize evennia.
import evennia
evennia._init()
# Try to get Player#1
from evennia.players.models import PlayerDB
try:
PlayerDB.objects.get(id=1)
except django.db.utils.OperationalError as e:
print(ERROR_DATABASE.format(traceback=e))
sys.exit()
except PlayerDB.DoesNotExist:
# no superuser yet. We need to create it.
other_superuser = PlayerDB.objects.filter(is_superuser=True)
if other_superuser:
# Another superuser was found, but not with id=1. This may
# happen if using flush (the auto-id starts at a higher
# value). Wwe copy this superuser into id=1. To do
# this we must deepcopy it, delete it then save the copy
# with the new id. This allows us to avoid the UNIQUE
# constraint on usernames.
other = other_superuser[0]
other_id = other.id
other_key = other.username
print(WARNING_MOVING_SUPERUSER.format(
other_key=other_key, other_id=other_id))
res = ""
while res.upper() != "Y":
# ask for permission
res = input("Continue [Y]/N: ")
if res.upper() == "N":
sys.exit()
elif not res:
break
# continue with the
from copy import deepcopy
new = deepcopy(other)
other.delete()
new.id = 1
new.save()
else:
create_superuser()
check_database()
return True
def getenv():
"""
Get current environment and add PYTHONPATH.
Returns:
env (dict): Environment global dict.
"""
sep = ";" if os.name == 'nt' else ":"
env = os.environ.copy()
env['PYTHONPATH'] = sep.join(sys.path)
return env
def get_pid(pidfile):
"""
Get the PID (Process ID) by trying to access an PID file.
Args:
pidfile (str): The path of the pid file.
Returns:
pid (str): The process id.
"""
pid = None
if os.path.exists(pidfile):
f = open(pidfile, 'r')
pid = f.read()
return pid
def del_pid(pidfile):
"""
The pidfile should normally be removed after a process has
finished, but when sending certain signals they remain, so we need
to clean them manually.
Args:
pidfile (str): The path of the pid file.
"""
if os.path.exists(pidfile):
os.remove(pidfile)
def kill(pidfile, signal=SIG, succmsg="", errmsg="",
restart_file=SERVER_RESTART, restart=False):
"""
Send a kill signal to a process based on PID. A customized
success/error message will be returned. If clean=True, the system
will attempt to manually remove the pid file.
Args:
pidfile (str): The path of the pidfile to get the PID from.
signal (int, optional): Signal identifier.
succmsg (str, optional): Message to log on success.
errmsg (str, optional): Message to log on failure.
restart_file (str, optional): Restart file location.
restart (bool, optional): Are we in restart mode or not.
"""
pid = get_pid(pidfile)
if pid:
if os.name == 'nt':
os.remove(pidfile)
# set restart/norestart flag
if restart:
django.core.management.call_command(
'collectstatic', interactive=False, verbosity=0)
with open(restart_file, 'w') as f:
f.write("reload")
else:
with open(restart_file, 'w') as f:
f.write("shutdown")
try:
os.kill(int(pid), signal)
except OSError:
print("Process %(pid)s cannot be stopped. "\
"The PID file 'server/%(pidfile)s' seems stale. "\
"Try removing it." % {'pid': pid, 'pidfile': pidfile})
return
print("Evennia:", succmsg)
return
print("Evennia:", errmsg)
def show_version_info(about=False):
"""
Display version info.
Args:
about (bool): Include ABOUT info as well as version numbers.
Returns:
version_info (str): A complete version info string.
"""
import os
import sys
import twisted
import django
return VERSION_INFO.format(
version=EVENNIA_VERSION, about=ABOUT_INFO if about else "",
os=os.name, python=sys.version.split()[0],
twisted=twisted.version.short(),
django=django.get_version())
def error_check_python_modules():
"""
Import settings modules in settings. This will raise exceptions on
pure python-syntax issues which are hard to catch gracefully with
exceptions in the engine (since they are formatting errors in the
python source files themselves). Best they fail already here
before we get any further.
Raises:
DeprecationWarning: For trying to access various modules
(usually in `settings.py`) which are no longer supported.
"""
from django.conf import settings
def imp(path, split=True):
mod, fromlist = path, "None"
if split:
mod, fromlist = path.rsplit('.', 1)
__import__(mod, fromlist=[fromlist])
# core modules
imp(settings.COMMAND_PARSER)
imp(settings.SEARCH_AT_RESULT)
imp(settings.CONNECTION_SCREEN_MODULE)
#imp(settings.AT_INITIAL_SETUP_HOOK_MODULE, split=False)
for path in settings.LOCK_FUNC_MODULES:
imp(path, split=False)
# cmdsets
deprstring = ("settings.%s should be renamed to %s. If defaults are used, "
"their path/classname must be updated "
"(see evennia/settings_default.py).")
if hasattr(settings, "CMDSET_DEFAULT"):
raise DeprecationWarning(deprstring % (
"CMDSET_DEFAULT", "CMDSET_CHARACTER"))
if hasattr(settings, "CMDSET_OOC"):
raise DeprecationWarning(deprstring % ("CMDSET_OOC", "CMDSET_PLAYER"))
if settings.WEBSERVER_ENABLED and not isinstance(settings.WEBSERVER_PORTS[0], tuple):
raise DeprecationWarning(
"settings.WEBSERVER_PORTS must be on the form "
"[(proxyport, serverport), ...]")
if hasattr(settings, "BASE_COMM_TYPECLASS"):
raise DeprecationWarning(deprstring % (
"BASE_COMM_TYPECLASS", "BASE_CHANNEL_TYPECLASS"))
if hasattr(settings, "COMM_TYPECLASS_PATHS"):
raise DeprecationWarning(deprstring % (
"COMM_TYPECLASS_PATHS", "CHANNEL_TYPECLASS_PATHS"))
if hasattr(settings, "CHARACTER_DEFAULT_HOME"):
raise DeprecationWarning(
"settings.CHARACTER_DEFAULT_HOME should be renamed to "
"DEFAULT_HOME. See also settings.START_LOCATION "
"(see evennia/settings_default.py).")
deprstring = ("settings.%s is now merged into settings.TYPECLASS_PATHS. "
"Update your settings file.")
if hasattr(settings, "OBJECT_TYPECLASS_PATHS"):
raise DeprecationWarning(deprstring % "OBJECT_TYPECLASS_PATHS")
if hasattr(settings, "SCRIPT_TYPECLASS_PATHS"):
raise DeprecationWarning(deprstring % "SCRIPT_TYPECLASS_PATHS")
if hasattr(settings, "PLAYER_TYPECLASS_PATHS"):
raise DeprecationWarning(deprstring % "PLAYER_TYPECLASS_PATHS")
if hasattr(settings, "CHANNEL_TYPECLASS_PATHS"):
raise DeprecationWarning(deprstring % "CHANNEL_TYPECLASS_PATHS")
from evennia.commands import cmdsethandler
if not cmdsethandler.import_cmdset(settings.CMDSET_UNLOGGEDIN, None):
print("Warning: CMDSET_UNLOGGED failed to load!")
if not cmdsethandler.import_cmdset(settings.CMDSET_CHARACTER, None):
print("Warning: CMDSET_CHARACTER failed to load")
if not cmdsethandler.import_cmdset(settings.CMDSET_PLAYER, None):
print("Warning: CMDSET_PLAYER failed to load")
# typeclasses
imp(settings.BASE_PLAYER_TYPECLASS)
imp(settings.BASE_OBJECT_TYPECLASS)
imp(settings.BASE_CHARACTER_TYPECLASS)
imp(settings.BASE_ROOM_TYPECLASS)
imp(settings.BASE_EXIT_TYPECLASS)
imp(settings.BASE_SCRIPT_TYPECLASS)
def init_game_directory(path, check_db=True):
"""
Try to analyze the given path to find settings.py - this defines
the game directory and also sets PYTHONPATH as well as the django
path.
Args:
path (str): Path to new game directory, including its name.
check_db (bool, optional): Check if the databae exists.
"""
# set the GAMEDIR path
set_gamedir(path)
# Add gamedir to python path
sys.path.insert(0, GAMEDIR)
if sys.argv[1] == 'test':
os.environ['DJANGO_SETTINGS_MODULE'] = 'evennia.settings_default'
else:
os.environ['DJANGO_SETTINGS_MODULE'] = SETTINGS_DOTPATH
# required since django1.7
django.setup()
# test existence of the settings module
try:
from django.conf import settings
except Exception as ex:
if not str(ex).startswith("No module named"):
import traceback
print(traceback.format_exc().strip())
print(ERROR_SETTINGS)
sys.exit()
# this will both check the database and initialize the evennia dir.
if check_db:
check_database()
# set up the Evennia executables and log file locations
global SERVER_PY_FILE, PORTAL_PY_FILE
global SERVER_LOGFILE, PORTAL_LOGFILE, HTTP_LOGFILE
global SERVER_PIDFILE, PORTAL_PIDFILE
global SERVER_RESTART, PORTAL_RESTART
global EVENNIA_VERSION
SERVER_PY_FILE = os.path.join(EVENNIA_LIB, "server", "server.py")
PORTAL_PY_FILE = os.path.join(EVENNIA_LIB, "portal", "portal", "portal.py")
SERVER_PIDFILE = os.path.join(GAMEDIR, SERVERDIR, "server.pid")
PORTAL_PIDFILE = os.path.join(GAMEDIR, SERVERDIR, "portal.pid")
SERVER_RESTART = os.path.join(GAMEDIR, SERVERDIR, "server.restart")
PORTAL_RESTART = os.path.join(GAMEDIR, SERVERDIR, "portal.restart")
SERVER_LOGFILE = settings.SERVER_LOG_FILE
PORTAL_LOGFILE = settings.PORTAL_LOG_FILE
HTTP_LOGFILE = settings.HTTP_LOG_FILE
# verify existence of log file dir (this can be missing e.g.
# if the game dir itself was cloned since log files are in .gitignore)
logdirs = [logfile.rsplit(os.path.sep, 1)
for logfile in (SERVER_LOGFILE, PORTAL_LOGFILE, HTTP_LOGFILE)]
if not all(os.path.isdir(pathtup[0]) for pathtup in logdirs):
errstr = "\n ".join("%s (log file %s)" % (pathtup[0], pathtup[1]) for pathtup in logdirs
if not os.path.isdir(pathtup[0]))
print(ERROR_LOGDIR_MISSING.format(logfiles=errstr))
sys.exit()
if os.name == 'nt':
# We need to handle Windows twisted separately. We create a
# batchfile in game/server, linking to the actual binary
global TWISTED_BINARY
# Windows requires us to use the absolute path for the bat file.
server_path = os.path.dirname(os.path.abspath(__file__))
TWISTED_BINARY = os.path.join(server_path, "twistd.bat")
# add path so system can find the batfile
sys.path.insert(1, os.path.join(GAMEDIR, SERVERDIR))
try:
importlib.import_module("win32api")
except ImportError:
print(ERROR_WINDOWS_WIN32API)
sys.exit()
batpath = os.path.join(EVENNIA_SERVER, TWISTED_BINARY)
if not os.path.exists(batpath):
# Test for executable twisted batch file. This calls the
# twistd.py executable that is usually not found on the
# path in Windows. It's not enough to locate
# scripts.twistd, what we want is the executable script
# C:\PythonXX/Scripts/twistd.py. Alas we cannot hardcode
# this location since we don't know if user has Python in
# a non-standard location. So we try to figure it out.
twistd = importlib.import_module("twisted.scripts.twistd")
twistd_dir = os.path.dirname(twistd.__file__)
# note that we hope the twistd package won't change here, since we
# try to get to the executable by relative path.
twistd_path = os.path.abspath(
os.path.join(twistd_dir, os.pardir, os.pardir, os.pardir,
os.pardir, 'scripts', 'twistd.py'))
with open(batpath, 'w') as bat_file:
# build a custom bat file for windows
bat_file.write("@\"%s\" \"%s\" %%*" % (
sys.executable, twistd_path))
print(INFO_WINDOWS_BATFILE.format(twistd_path=twistd_path))
def run_dummyrunner(number_of_dummies):
"""
Start an instance of the dummyrunner
Args:
number_of_dummies (int): The number of dummy players to start.
Notes:
The dummy players' behavior can be customized by adding a
`dummyrunner_settings.py` config file in the game's conf/
directory.
"""
number_of_dummies = str(int(number_of_dummies)) if number_of_dummies else 1
cmdstr = [sys.executable, EVENNIA_DUMMYRUNNER, "-N", number_of_dummies]
config_file = os.path.join(SETTINGS_PATH, "dummyrunner_settings.py")
if os.path.exists(config_file):
cmdstr.extend(["--config", config_file])
try:
call(cmdstr, env=getenv())
except KeyboardInterrupt:
pass
def list_settings(keys):
"""
Display the server settings. We only display the Evennia specific
settings here. The result will be printed to the terminal.
Args:
keys (str or list): Setting key or keys to inspect.
"""
from importlib import import_module
from evennia.utils import evtable
evsettings = import_module(SETTINGS_DOTPATH)
if len(keys) == 1 and keys[0].upper() == "ALL":
# show a list of all keys
# a specific key
table = evtable.EvTable()
confs = [key for key in sorted(evsettings.__dict__) if key.isupper()]
for i in range(0, len(confs), 4):
table.add_row(*confs[i:i+4])
else:
# a specific key
table = evtable.EvTable(width=131)
keys = [key.upper() for key in keys]
confs = dict((key, var) for key, var in evsettings.__dict__.items()
if key in keys)
for key, val in confs.items():
table.add_row(key, str(val))
print(table)
def run_menu():
"""
This launches an interactive menu.
"""
while True:
# menu loop
print(MENU)
inp = input(" option > ")
# quitting and help
if inp.lower() == 'q':
return
elif inp.lower() == 'h':
print(HELP_ENTRY)
input("press <return> to continue ...")
continue
elif inp.lower() in ('v', 'i', 'a'):
print(show_version_info(about=True))
input("press <return> to continue ...")
continue
# options
try:
inp = int(inp)
except ValueError:
print("Not a valid option.")
continue
if inp == 1:
# start everything, log to log files
server_operation("start", "all", False, False)
elif inp == 2:
# start everything, server interactive start
server_operation("start", "all", True, False)
elif inp == 3:
# start everything, portal interactive start
server_operation("start", "server", False, False)
server_operation("start", "portal", True, False)
elif inp == 4:
# start both server and portal interactively
server_operation("start", "server", True, False)
server_operation("start", "portal", True, False)
elif inp == 5:
# reload the server
server_operation("reload", "server", None, None)
elif inp == 6:
# reload the portal
server_operation("reload", "portal", None, None)
elif inp == 7:
# stop server and portal
server_operation("stop", "all", None, None)
elif inp == 8:
# stop server
server_operation("stop", "server", None, None)
elif inp == 9:
# stop portal
server_operation("stop", "portal", None, None)
else:
print("Not a valid option.")
continue
return
def server_operation(mode, service, interactive, profiler, logserver=False):
"""
Handle argument options given on the command line.
Args:
mode (str): Start/stop/restart and so on.
service (str): "server", "portal" or "all".
interactive (bool). Use interactive mode or daemon.
profiler (bool): Run the service under the profiler.
logserver (bool, optional): Log Server data to logfile
specified by settings.SERVER_LOG_FILE.
"""
cmdstr = [sys.executable, EVENNIA_RUNNER]
errmsg = "The %s does not seem to be running."
if mode == 'start':
# launch the error checker. Best to catch the errors already here.
error_check_python_modules()
# starting one or many services
if service == 'server':
if profiler:
cmdstr.append('--pserver')
if interactive:
cmdstr.append('--iserver')
if logserver:
cmdstr.append('--logserver')
cmdstr.append('--noportal')
elif service == 'portal':
if profiler:
cmdstr.append('--pportal')
if interactive:
cmdstr.append('--iportal')
cmdstr.append('--noserver')
django.core.management.call_command(
'collectstatic', verbosity=1, interactive=False)
else:
# all
# for convenience we don't start logging of
# portal, only of server with this command.
if profiler:
# this is the common case
cmdstr.append('--pserver')
if interactive:
cmdstr.append('--iserver')
if logserver:
cmdstr.append('--logserver')
django.core.management.call_command(
'collectstatic', verbosity=1, interactive=False)
cmdstr.extend([
GAMEDIR, TWISTED_BINARY, SERVER_LOGFILE,
PORTAL_LOGFILE, HTTP_LOGFILE])
# start the server
process = Popen(cmdstr, env=getenv())
if interactive:
try:
process.wait()
except KeyboardInterrupt:
server_operation("stop", "portal", False, False)
return
finally:
print(NOTE_KEYBOARDINTERRUPT)
elif mode == 'reload':
# restarting services
if os.name == 'nt':
print(
"Restarting from command line is not supported under Windows. "
"Log into the game to restart.")
return
if service == 'server':
kill(SERVER_PIDFILE, SIG, "Server reloaded.",
errmsg % 'Server', SERVER_RESTART, restart=True)
elif service == 'portal':
print(
"Note: Portal usually doesnt't need to be reloaded unless you "
"are debugging in interactive mode. If Portal was running in "
"default Daemon mode, it cannot be restarted. In that case "
"you have to restart it manually with 'evennia.py "
"start portal'")
kill(PORTAL_PIDFILE, SIG,
"Portal reloaded (or stopped, if it was in daemon mode).",
errmsg % 'Portal', PORTAL_RESTART, restart=True)
else:
# all
# default mode, only restart server
kill(SERVER_PIDFILE, SIG,
"Server reload.",
errmsg % 'Server', SERVER_RESTART, restart=True)
elif mode == 'stop':
# stop processes, avoiding reload
if service == 'server':
kill(SERVER_PIDFILE, SIG,
"Server stopped.", errmsg % 'Server', SERVER_RESTART)
elif service == 'portal':
kill(PORTAL_PIDFILE, SIG,
"Portal stopped.", errmsg % 'Portal', PORTAL_RESTART)
else:
kill(PORTAL_PIDFILE, SIG,
"Portal stopped.", errmsg % 'Portal', PORTAL_RESTART)
kill(SERVER_PIDFILE, SIG,
"Server stopped.", errmsg % 'Server', SERVER_RESTART)
def main():
"""
Run the evennia launcher main program.
"""
# set up argument parser
parser = ArgumentParser(description=CMDLINE_HELP)
parser.add_argument(
'-v', '--version', action='store_true',
dest='show_version', default=False,
help="Show version info.")
parser.add_argument(
'-i', '--interactive', action='store_true',
dest='interactive', default=False,
help="Start given processes in interactive mode.")
parser.add_argument(
'-l', '--log', action='store_true',
dest="logserver", default=False,
help="Log Server data to log file.")
parser.add_argument(
'--init', action='store', dest="init", metavar="name",
help="Creates a new game directory 'name' at the current location.")
parser.add_argument(
'--list', nargs='+', action='store', dest='listsetting', metavar="key",
help=("List values for server settings. Use 'all' to list all "
"available keys."))
parser.add_argument(
'--profiler', action='store_true', dest='profiler', default=False,
help="Start given server component under the Python profiler.")
parser.add_argument(
'--dummyrunner', nargs=1, action='store', dest='dummyrunner',
metavar="N",
help="Tests a running server by connecting N dummy players to it.")
parser.add_argument(
'--settings', nargs=1, action='store', dest='altsettings',
default=None, metavar="filename.py",
help=("Start evennia with alternative settings file in "
"gamedir/server/conf/."))
parser.add_argument(
'--initsettings', action='store_true', dest="initsettings",
default=False,
help="Creates a new, empty settings file as gamedir/server/conf/settings.py.")
parser.add_argument(
"option", nargs='?', default="noop",
help="Operational mode: 'start', 'stop', 'restart' or 'menu'.")
parser.add_argument(
"service", metavar="component", nargs='?', default="all",
help=("Server component to operate on: "
"'server', 'portal' or 'all' (default)."))
parser.epilog = (
"Example django-admin commands: "
"'migrate', 'flush', 'shell' and 'dbshell'. "
"See the django documentation for more django-admin commands.")
args, unknown_args = parser.parse_known_args()
# handle arguments
option, service = args.option, args.service
# make sure we have everything
check_main_evennia_dependencies()
if not args:
# show help pane
print(CMDLINE_HELP)
sys.exit()
elif args.init:
# initialization of game directory
create_game_directory(args.init)
print(CREATED_NEW_GAMEDIR.format(
gamedir=args.init,
settings_path=os.path.join(args.init, SETTINGS_PATH)))
sys.exit()
if args.show_version:
# show the version info
print(show_version_info(option == "help"))
sys.exit()
if args.altsettings:
# use alternative settings file
sfile = args.altsettings[0]
global SETTINGSFILE, SETTINGS_DOTPATH
SETTINGSFILE = sfile
SETTINGS_DOTPATH = "server.conf.%s" % sfile.rstrip(".py")
print("Using settings file '%s' (%s)." % (
SETTINGSFILE, SETTINGS_DOTPATH))
if args.initsettings:
# create new settings file
global GAMEDIR
GAMEDIR = os.getcwd()
try:
create_settings_file(init=False)
print(RECREATED_SETTINGS)
except IOError:
print(ERROR_INITSETTINGS)
sys.exit()
if args.dummyrunner:
# launch the dummy runner
init_game_directory(CURRENT_DIR, check_db=True)
run_dummyrunner(args.dummyrunner[0])
elif args.listsetting:
# display all current server settings
init_game_directory(CURRENT_DIR, check_db=False)
list_settings(args.listsetting)
elif option == 'menu':
# launch menu for operation
init_game_directory(CURRENT_DIR, check_db=True)
run_menu()
elif option in ('start', 'reload', 'stop'):
# operate the server directly
init_game_directory(CURRENT_DIR, check_db=True)
server_operation(option, service, args.interactive, args.profiler, args.logserver)
elif option != "noop":
# pass-through to django manager
check_db = False
if option in ('runserver', 'testserver'):
print(WARNING_RUNSERVER)
if option == "shell":
# to use the shell we need to initialize it first,
# and this only works if the database is set up
check_db = True
init_game_directory(CURRENT_DIR, check_db=check_db)
args = [option]
kwargs = {}
if service not in ("all", "server", "portal"):
args.append(service)
if unknown_args:
for arg in unknown_args:
if arg.startswith("--"):
print("arg:", arg)
if "=" in arg:
arg, value = [p.strip() for p in arg.split("=", 1)]
else:
value = True
kwargs[arg.lstrip("--")] = [value]
else:
args.append(arg)
try:
django.core.management.call_command(*args, **kwargs)
except django.core.management.base.CommandError as exc:
args = ", ".join(args)
kwargs = ", ".join(["--%s" % kw for kw in kwargs])
print(ERROR_INPUT.format(traceback=exc, args=args, kwargs=kwargs))
else:
# no input; print evennia info
print(ABOUT_INFO)
if __name__ == '__main__':
# start Evennia from the command line
main()
|
ergodicbreak/evennia
|
evennia/server/evennia_launcher.py
|
Python
|
bsd-3-clause
| 44,636
|
# Enthought library imports
from traits.api import HasTraits, Int, Bool
from kiva.trait_defs.api import KivaFont
from enable.colors import ColorTrait
class TextFieldStyle(HasTraits):
""" This class holds style settings for rendering an EnableTextField.
fixme: See docstring on EnableBoxStyle
"""
# The color of the text
text_color = ColorTrait((0,0,0,1.0))
# The font for the text (must be monospaced!)
font = KivaFont("Courier 12")
# The color of highlighted text
highlight_color = ColorTrait((.65,0,0,1.0))
# The background color of highlighted items
highlight_bgcolor = ColorTrait("lightgray")
# The font for flagged text (must be monospaced!)
highlight_font = KivaFont("Courier 14 bold")
# The number of pixels between each line
line_spacing = Int(3)
# Space to offset text from the widget's border
text_offset = Int(5)
# Cursor properties
cursor_color = ColorTrait((0,0,0,1))
cursor_width = Int(2)
# Drawing properties
border_visible = Bool(False)
border_color = ColorTrait((0,0,0,1))
bgcolor = ColorTrait((1,1,1,1))
|
tommy-u/enable
|
enable/text_field_style.py
|
Python
|
bsd-3-clause
| 1,133
|
import warnings
from django.core.urlresolvers import ResolverMatch
from django.core.urlresolvers import (
RegexURLPattern as DjangoRegexURLPattern,
RegexURLResolver
)
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django_view_timer.profiler import ViewTimeProfiler
from django_view_timer.settings import DJANGO_VIEW_TIMER_ENABLED
class RegexURLPattern(DjangoRegexURLPattern):
def resolve(self, path):
match = self.regex.search(path)
if match:
kwargs = match.groupdict()
if kwargs:
args = ()
else:
args = match.groups()
kwargs.update(self.default_args)
callback = ViewTimeProfiler(self.callback) if DJANGO_VIEW_TIMER_ENABLED else self.callback
return ResolverMatch(callback, args, kwargs, self.name)
def url(regex, view, kwargs=None, name=None, prefix=''):
if isinstance(view, (list, tuple)):
urlconf_module, app_name, namespace = ViewTimeProfiler(view) if DJANGO_VIEW_TIMER_ENABLED else view
return RegexURLResolver(regex, urlconf_module, kwargs, app_name=app_name, namespace=namespace)
else:
if isinstance(view, six.string_types):
warnings.warn(
'Support for string view arguments to url() is deprecated and '
'will be removed in Django 2.0 (got %s). Pass the callable '
'instead.' % view,
RemovedInDjango20Warning, stacklevel=2
)
if not view:
raise ImproperlyConfigured('Empty URL pattern view name not permitted (for pattern %r)' % regex)
if prefix:
view = prefix + '.' + view
return RegexURLPattern(regex, view, kwargs, name)
def patterns(prefix, *args):
pattern_list = []
for t in args:
if isinstance(t, (list, tuple)):
t = url(prefix=prefix, *t)
elif isinstance(t, RegexURLPattern):
t.add_prefix(prefix)
pattern_list.append(t)
return pattern_list
|
aledista/django-view-timer
|
django_view_timer/urls.py
|
Python
|
bsd-3-clause
| 2,142
|
import datetime
from django.db import models
from django.conf import settings
#server variables that needed to be stored in db
class ServerVariable(models.Model):
name = models.CharField(max_length=64, blank=True, null=True)
value = models.TextField(blank=True, null=True)
class Meta:
app_label = 'website'
@staticmethod
def get(name):
try:
server_variable = ServerVariable.objects.get(name=name)
except:
return None
return server_variable.value
@staticmethod
def set(name, value):
try:
server_variable = ServerVariable.objects.get(name=name)
except:
server_variable = ServerVariable(name=name)
server_variable.value = value
server_variable.save()
return server_variable
class MigrationHistory(models.Model):
jurisdiction_id = models.IntegerField(blank=True, null=True, db_index=True)
source_table = models.CharField(max_length=64, blank=True, null=True, db_index=True)
source_id = models.IntegerField(blank=True, null=True, db_index=True)
target_table = models.CharField(max_length=64, blank=True, null=True, db_index=True)
target_id = models.IntegerField(blank=True, null=True, db_index=True)
notes = models.TextField(blank=True, null=True)
notes2 = models.TextField(blank=True, null=True)
create_datetime = models.DateTimeField(auto_now_add=True)
modify_datetime = models.DateTimeField(auto_now=True)
class Meta:
app_label = 'website'
@staticmethod
def save_history(jurisdiction, source_table, source_id, target_table, target_id, notes='', notes2=''):
history, created = MigrationHistory.objects.get_or_create(source_table=source_table, source_id=source_id, target_table=target_table, target_id=target_id)
if jurisdiction != None:
history.jurisdiction_id = jurisdiction.id
history.notes = notes
history.notes2 = notes2
history.save()
return history
@staticmethod
def get_target_id(source_table, source_id, target_table):
try:
history = MigrationHistory.objects.get(source_table=source_table, source_id=source_id, target_table=target_table)
return history.target_id
except:
return None
|
solarpermit/solarpermit
|
website/models/server.py
|
Python
|
bsd-3-clause
| 2,352
|
"""The WaveBlocks Project
Function for stem-plotting functions of the type f:IxI -> C
with abs(f) as z-value and phase(f) as color code.
This function makes a three dimensional stem plot.
@author: R. Bourquin
@copyright: Copyright (C) 2012, 2014, 2016 R. Bourquin
@license: Modified BSD License
"""
from numpy import real, squeeze
import mpl_toolkits.mplot3d.art3d as art3d
from matplotlib.pyplot import gcf
from WaveBlocksND.Plot.color_map import color_map
def stemcf3d(gridu, gridv, phase, modulus, darken=None, fig=None, markerp="o", **kwargs):
r"""Stemplot the modulus of a complex valued function :math:`f:I\times I -> \mathbb{C}` together with its
phase in a color coded fashion. Additional keyword arguments are passed to the plot function.
:param gridu: The x components of the grid nodes of the real domain grid :math:`\Gamma`
:param gridv: The y components of the grid nodes of the real domain grid :math:`\Gamma`
:param phase: The phase of the complex domain result :math:`f(\Gamma)`
:param modulus: The modulus of the complex domain result :math:`f(\Gamma)`
:param darken: Whether to take into account the modulus of the data to darken colors.
:param fig: The figure instance used for plotting.
:param markerp: The shape of the stemmed markers.
"""
# Color mapping
rgb_colors = squeeze(color_map(gridv, phase=phase, modulus=modulus, darken=darken))
# Plot to the given axis instance or retrieve the current one
if fig is None:
fig = gcf()
axes = fig.add_subplot(1, 1, 1, projection='3d')
for ui, vi, wi, col in zip(gridu, gridv, modulus, rgb_colors):
line = art3d.Line3D(*list(zip((ui, vi, 0), (ui, vi, wi))), marker=markerp, markevery=(1, 1), color=col)
axes.add_line(line)
axes.set_xlim3d(real(gridu).min(), real(gridu).max())
axes.set_ylim3d(real(gridv).min(), real(gridv).max())
axes.set_zlim3d(real(modulus).min(), real(modulus).max())
|
WaveBlocks/WaveBlocksND
|
WaveBlocksND/Plot/stemcf3d.py
|
Python
|
bsd-3-clause
| 1,964
|