gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
"""Namespace for all type objects"""
import re
import logging
import os
from argparse import ArgumentTypeError
from terseparse.utils import classproperty, rep
log = logging.getLogger('terseparse.types')
# Lists of items are separated by commas, semi-colons and/or whitespace
list_regex = re.compile(r'[^,;\s]+')
class Type(object):
"""ABC for type objects.
Types are callable, taking a string and converting it
to their given type. The call method should have no side effects.
"""
def __call__(self, val):
return self.convert(val)
def __str__(self):
"""Implement this method to show a simple representation of the class"""
return self.name
def __or__(self, obj):
if isinstance(obj, str):
obj = Keyword(obj)
return Or(self, obj)
def fail(self, val_str, message):
msg = "{!r} is an invalid <{}>. {}".format(val_str, self.name, message)
raise ArgumentTypeError(msg)
class GreedyType(Type):
"""Mixin to indicate that a type will greedily consume arguments."""
class Keyword(Type):
"""A Keyword maps a string to a static value"""
def __init__(self, name, *value):
"""Initialize Keywords
Args:
name -- keyword name
value -- Optional value, otherwise name is used
value is setup as *value to detect if the parameter is supplied, while
still supporting None. If no value is supplied then name should be used.
If any value is supplied (even None), then that value is used instead
"""
self.name = name
self.key = name
self.value = name if len(value) != 1 else value[0]
self.description = "Matches {!r} and maps it to {!r}".format(name, self.value)
def convert(self, val):
if val == self.key:
return self.value
self.fail(val, "Must be {!r}".format(self.key))
def __repr__(self):
return rep(self, 'name', 'value')
class Str(Type):
"""Convert string to string
Use this instad of str, to get a clean type name
"""
def __init__(self):
self.name = 'str'
def convert(self, val):
return str(val)
def __repr__(self):
return rep(self, 'name')
class Bool(Type):
"""Convert string to bool"""
def __init__(self):
self.name = 'bool'
self.true_vals = ('true', 't', '1', 'yes')
def __init__(self, val):
return val.lower() in self.true_vals
def __repr__(self):
return rep(self, 'name')
class Set(Type):
"""A Set is a comma separated list of unique values that satisfy the specified type.
>>> s = Set(Int())
Set(Int(minval=None, maxval=None))
>>> s('1,2,3,4')
{1,2,3,4}
>>> s('1,1,1,1')
{1}
"""
def __init__(self, typ):
self.name = 'set(<{}>)'.format(typ)
self.description = 'A set is a comma separated list of unique values ' \
'of type <{}>.'.format(typ.name)
self.typ = typ
def convert(self, val):
seq = set()
for k in list_regex.findall(val):
try:
seq.add(self.typ(k))
except ArgumentTypeError as e:
self.fail(val, self.description + '\n' + str(e))
return seq
def __repr__(self):
return rep(self, 'typ')
class List(Type):
"""A List is a comma separated list of values that satisfy the specified type
>>> l = List(Int())
List(Int(minval=None, maxval=None))
>>> l('1,2,3,4')
[1,2,3,4]
>>> l('1,1,1,1')
[1,1,1,1]
"""
def __init__(self, typ):
self.name = 'list(<{}>)'.format(typ)
self.description = 'A list is a comma separated list of values of type <{}>.' \
.format(typ)
self.typ = typ
def convert(self, val):
seq = list()
for k in list_regex.findall(val):
try:
seq.append(self.typ(k))
except ArgumentTypeError as e:
self.fail(val, self.description + '\n' + str(e))
return seq
def __repr__(self):
return rep(self, 'typ')
class Dict(Type):
"""Converts a string to a dictionary
Support a comma, semi-colon or space separated list of key value pairs.
Key-value pairs can be separated by either a colon or and equals sign.
The following will all parse to {'a': 'b', 'c': 'd'}
>>> d = Dict({'a': Str, 'c': Str})
>>> d('a:b c:d')
{'a': 'b', 'c': 'd'}
>>> d('a=b,c=d')
{'a': 'b', 'c': 'd'}
>>> d('a:b, c=d')
{'a': 'b', 'c': 'd'}
>>> d('a=b,,,c=d')
{'a': 'b', 'c': 'd'}
If no value is given, then it is passed to the validator as the empty string (ie '')
>>> Dict({'a': Int() | Keyword('', None)})('a')
{'a': None}
Keys can be specified multiple times, the latest (farthest to right) key's
value will overwrite previous values.
"""
def __init__(self, validator_map):
"""Create a dictonary type from a dictionary of other types
Args:
validator_map -- a mapping from names to types
Examples:
>>> Dict({'a': int, 'b': int})('a:1,b:2')
{'a': 1, 'b': 2}
>>> Dict({'a': str, 'b': int})('a:asdf b=1234')
{'a': 'asdf', 'b': 1234}
>>> Dict({'a': Int() | Keyword('', None), 'b': Int()})('a,b=1')
{'a': None, 'b': 1}
"""
self.validators = dict(validator_map)
v_sorted = sorted(self.validators.items(), key=lambda t: t[0])
self.validator_descriptions = ['{}:<{}>'.format(k, v) for k, v in v_sorted]
self.name = 'dict({})'.format(', '.join(self.validator_descriptions))
self.description = '\nDict options: \n '
self.description += '\n '.join(self.validator_descriptions)
self.kv_regex = re.compile(r'[=:]+')
def keys_to_set_type(self):
kws = tuple(Keyword(k) for k in self.validators)
return Set(Or(*kws))
def convert(self, val):
try:
return self._convert(val)
except (AssertionError, ValueError):
self.fail(val, self.description)
except ArgumentTypeError as e:
self.fail(val, self.description + '\n' + str(e))
def _convert(self, val):
obj = {}
for pair in list_regex.findall(val):
pair = self.kv_regex.split(pair)
if len(pair) == 1:
k, v = pair[0], ''
else:
k, v = pair
assert k in self.validators
val = self.validators[k](v)
if k in obj:
log.warn('key: {!r} overwritten '
'new: {!r} old: {!r}'.format(k, val, obj[k]))
obj[k] = val
return obj
def __iter__(self):
return self.validators.items()
def __repr__(self):
return rep(self, validator_map=self.validators)
MODE_STRS = {
'r': 'readable',
'w': 'writable',
'r+': 'readable and writeable'}
class File(Type):
@classproperty
def r(cls):
return cls('r')
@classproperty
def rw(cls):
return cls('r+')
@classproperty
def w(cls):
return cls('w')
def __init__(self, mode):
self.name = 'file'
self.mode = mode
self.mode_str = MODE_STRS[mode]
self.description = 'file({})'.format(mode)
def convert(self, val):
try:
with open(val, self.mode):
return val
except IOError:
self.fail(val, 'Must be a {} file'.format(self.mode_str))
def __repr__(self):
return rep(self, 'mode')
DIR_MODES = {
'r': os.R_OK,
'w': os.W_OK,
'rw': os.R_OK | os.W_OK
}
class Dir(File):
def __init__(self, mode):
self.name = 'dir'
self.mode = DIR_MODES[mode]
self.mode_str = MODE_STRS[mode]
self.description = 'dir({})'.format(mode)
def convert(self, val):
if not os.access(val, self.mode):
self.fail(val, 'Must be a {} directory'.format(self.mode_str))
return val
def __repr__(self):
return rep(self, 'mode')
class Int(Type):
"""Int: Integer parseing class that supports range restrictions
Supports automatic parsing of base 10 and 16 characters
>>> Int()('0xFF')
255
>>> Int()('1234')
1234
>>> Int()('01234')
1234
"""
@classproperty
def u8(cls):
obj = cls(0, 2**8)
obj.name = 'u8'
obj.description = 'unsigned 8-bit integer'
return obj
@classproperty
def u16(cls):
obj = cls(0, 2**16)
obj.name = 'u16'
obj.description = 'unsigned 16-bit integer'
return obj
@classproperty
def u32(cls):
obj = cls(0, 2**32)
obj.name = 'u32'
obj.description = 'unsigned 32-bit integer'
return obj
@classproperty
def positive(cls):
return cls(0)
@classproperty
def negative(cls):
return cls(None, 0)
def __init__(self, minval=None, maxval=None):
"""Create an Integer that satisfies the requirements minval <= val < maxval
"""
self.name = 'int'
self.minval = minval
self.maxval = maxval
domain = ''
if minval is not None and maxval is not None:
domain = '{} <= val < {}'.format(minval, maxval)
elif minval is not None:
domain = '{} <= val'.format(minval)
elif maxval is not None:
domain = 'val < {}'.format(maxval)
self.description = 'int({})'.format(domain) if domain else 'int'
self.error_message = 'Value must satisfy: {}'.format(domain) if domain else ''
def convert(self, val_str):
try:
val = self._convert(val_str)
except (ValueError, AssertionError):
self.fail(val_str, self.error_message)
if (self.minval is not None and val < self.minval) or (
self.maxval is not None and val >= self.maxval):
self.fail(val_str, self.error_message)
return val
def _convert(self, val):
# Not using int(val, 0) because that parses '011' to 9 (in octal), which
# is a bit misleading if you aren't use to the convention.
try:
return int(val, 10)
except ValueError:
# have to check for '0x' otherwise 'abcd' would parse to 4391, which
# on first glance does not appear to be a number
assert '0x' in val.lower()
return int(val, 16)
def __repr__(self):
return rep(self, 'minval', 'maxval')
class Or(Type):
"""Combine types in a shortcircuit fashion.
The first type to match wins.
If an Or is one of the types then its nested types are flattened.
Automatically convert string to Keywords
"""
def __init__(self, *types):
_types = []
for typ in types:
if isinstance(typ, Or):
_types.extend(typ.types)
else:
if isinstance(typ, str):
typ = Keyword(typ)
_types.append(typ)
self.name = '|'.join(map(str, _types))
self.description = ' or '.join(t.description for t in _types)
self.types = _types
def convert(self, val):
for t in self.types:
try:
return t(val)
except ArgumentTypeError:
pass
self.fail(val, 'Must be {}'.format(self.description))
def __repr__(self):
return rep(self, 'types')
| |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: signal/v2/model.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import kik_unofficial.protobuf.protobuf_validation_pb2 as protobuf__validation__pb2
from kik_unofficial.protobuf.common.v2 import model_pb2 as common_dot_v2_dot_model__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='signal/v2/model.proto',
package='common.signal.v2',
syntax='proto3',
serialized_pb=_b('\n\x15signal/v2/model.proto\x12\x10\x63ommon.signal.v2\x1a\x19protobuf_validation.proto\x1a\x15\x63ommon/v2/model.proto\"\xda\x01\n\x06Signal\x12/\n\x02id\x18\x01 \x01(\x0b\x32\x1b.common.signal.v2.Signal.IdB\x06\xca\x9d%\x02\x08\x01\x12\x37\n\rglobal_signal\x18\x02 \x01(\x0b\x32\x1e.common.signal.v2.GlobalSignalH\x00\x12\x39\n\x0epersona_signal\x18\x03 \x01(\x0b\x32\x1f.common.signal.v2.PersonaSignalH\x00\x1a#\n\x02Id\x12\x1d\n\traw_value\x18\x01 \x01(\x0c\x42\n\xca\x9d%\x06\x08\x01(\x10\x30 B\x06\n\x04kind\"\x0e\n\x0cGlobalSignal\"A\n\rPersonaSignal\x12\x30\n\npersona_id\x18\x01 \x01(\x0b\x32\x14.common.v2.PersonaIdB\x06\xca\x9d%\x02\x08\x00\x42y\n\x15\x63om.kik.gen.signal.v2ZLgithub.com/kikinteractive/xiphias-model-common/generated/go/signal/v2;signal\xa2\x02\x11KPBCommonSignalV2b\x06proto3')
,
dependencies=[protobuf__validation__pb2.DESCRIPTOR,common_dot_v2_dot_model__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SIGNAL_ID = _descriptor.Descriptor(
name='Id',
full_name='common.signal.v2.Signal.Id',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='raw_value', full_name='common.signal.v2.Signal.Id.raw_value', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\006\010\001(\0200 '))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=269,
serialized_end=304,
)
_SIGNAL = _descriptor.Descriptor(
name='Signal',
full_name='common.signal.v2.Signal',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='common.signal.v2.Signal.id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))),
_descriptor.FieldDescriptor(
name='global_signal', full_name='common.signal.v2.Signal.global_signal', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='persona_signal', full_name='common.signal.v2.Signal.persona_signal', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_SIGNAL_ID, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='kind', full_name='common.signal.v2.Signal.kind',
index=0, containing_type=None, fields=[]),
],
serialized_start=94,
serialized_end=312,
)
_GLOBALSIGNAL = _descriptor.Descriptor(
name='GlobalSignal',
full_name='common.signal.v2.GlobalSignal',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=314,
serialized_end=328,
)
_PERSONASIGNAL = _descriptor.Descriptor(
name='PersonaSignal',
full_name='common.signal.v2.PersonaSignal',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='persona_id', full_name='common.signal.v2.PersonaSignal.persona_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\000'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=330,
serialized_end=395,
)
_SIGNAL_ID.containing_type = _SIGNAL
_SIGNAL.fields_by_name['id'].message_type = _SIGNAL_ID
_SIGNAL.fields_by_name['global_signal'].message_type = _GLOBALSIGNAL
_SIGNAL.fields_by_name['persona_signal'].message_type = _PERSONASIGNAL
_SIGNAL.oneofs_by_name['kind'].fields.append(
_SIGNAL.fields_by_name['global_signal'])
_SIGNAL.fields_by_name['global_signal'].containing_oneof = _SIGNAL.oneofs_by_name['kind']
_SIGNAL.oneofs_by_name['kind'].fields.append(
_SIGNAL.fields_by_name['persona_signal'])
_SIGNAL.fields_by_name['persona_signal'].containing_oneof = _SIGNAL.oneofs_by_name['kind']
_PERSONASIGNAL.fields_by_name['persona_id'].message_type = common_dot_v2_dot_model__pb2._PERSONAID
DESCRIPTOR.message_types_by_name['Signal'] = _SIGNAL
DESCRIPTOR.message_types_by_name['GlobalSignal'] = _GLOBALSIGNAL
DESCRIPTOR.message_types_by_name['PersonaSignal'] = _PERSONASIGNAL
Signal = _reflection.GeneratedProtocolMessageType('Signal', (_message.Message,), dict(
Id = _reflection.GeneratedProtocolMessageType('Id', (_message.Message,), dict(
DESCRIPTOR = _SIGNAL_ID,
__module__ = 'signal.v2.model_pb2'
# @@protoc_insertion_point(class_scope:common.signal.v2.Signal.Id)
))
,
DESCRIPTOR = _SIGNAL,
__module__ = 'signal.v2.model_pb2'
# @@protoc_insertion_point(class_scope:common.signal.v2.Signal)
))
_sym_db.RegisterMessage(Signal)
_sym_db.RegisterMessage(Signal.Id)
GlobalSignal = _reflection.GeneratedProtocolMessageType('GlobalSignal', (_message.Message,), dict(
DESCRIPTOR = _GLOBALSIGNAL,
__module__ = 'signal.v2.model_pb2'
# @@protoc_insertion_point(class_scope:common.signal.v2.GlobalSignal)
))
_sym_db.RegisterMessage(GlobalSignal)
PersonaSignal = _reflection.GeneratedProtocolMessageType('PersonaSignal', (_message.Message,), dict(
DESCRIPTOR = _PERSONASIGNAL,
__module__ = 'signal.v2.model_pb2'
# @@protoc_insertion_point(class_scope:common.signal.v2.PersonaSignal)
))
_sym_db.RegisterMessage(PersonaSignal)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\025com.kik.gen.signal.v2ZLgithub.com/kikinteractive/xiphias-model-common/generated/go/signal/v2;signal\242\002\021KPBCommonSignalV2'))
_SIGNAL_ID.fields_by_name['raw_value'].has_options = True
_SIGNAL_ID.fields_by_name['raw_value']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\006\010\001(\0200 '))
_SIGNAL.fields_by_name['id'].has_options = True
_SIGNAL.fields_by_name['id']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))
_PERSONASIGNAL.fields_by_name['persona_id'].has_options = True
_PERSONASIGNAL.fields_by_name['persona_id']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\000'))
# @@protoc_insertion_point(module_scope)
| |
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from crm.views import (
CampaignCreate,
CampaignDelete,
CampaignEdit,
CampaignList,
Dashboard,
IndividualList,
IndividualDetail,
IndividualCreate,
IndividualEdit,
IndividualDelete,
IndividualAddOtherInfo,
IndividualAddressCreate,
IndividualAddressEdit,
IndividualAddressDelete,
IndividualEmailCreate,
IndividualEmailEdit,
IndividualEmailDelete,
IndividualPhoneCreate,
IndividualPhoneEdit,
IndividualPhoneDelete,
SourceCreate,
SourceDelete,
SourceEdit,
SourceList,
SourceTypeCreate,
SourceTypeDelete,
SourceTypeEdit,
SourceTypeList,
InboundContactList,
InboundContactDetail,
OutboundContactList,
OutboundContactDetail
)
urlpatterns = [
url(
r'^dashboard/$',
login_required(Dashboard.as_view()),
name='dashboard'
),
# Campaign
url(
r'^campaigns/$',
login_required(CampaignList.as_view()),
name='campaign-list'
),
url(
r'^campaigns/create/$',
login_required(CampaignCreate.as_view()),
name='campaign-create'
),
url(
r'^campaigns/edit/(?P<pk>\d+)/$',
login_required(CampaignEdit.as_view()),
name='campaign-edit'
),
url(
r'^campaigns/delete/(?P<pk>\d+)/$',
login_required(CampaignDelete.as_view()),
name='campaign-delete'
),
# Individuals
url(
r'^individuals/$',
login_required(IndividualList.as_view()),
name='individual-list'
),
url(
r'^individuals/(?P<pk>\d+)/$',
login_required(IndividualDetail.as_view()),
name='individual-detail'
),
url(
r'^individuals/create/$',
login_required(IndividualCreate.as_view()),
name='individual-create'
),
url(
r'^individuals/edit/(?P<pk>\d+)/$',
login_required(IndividualEdit.as_view()),
name='individual-edit'
),
url(
r'^individuals/delete/(?P<pk>\d+)/$',
login_required(IndividualDelete.as_view()),
name='individual-delete'
),
url(
r'^individuals/add-other-info/(?P<id>\d+)/$',
login_required(IndividualAddOtherInfo.as_view()),
name='individual-add-other-info'
),
# Individual address
url(
r'^individuals/(?P<pk>\d+)/create/address/$',
login_required(IndividualAddressCreate.as_view()),
name='individual-create-address'
),
url(
r'^individuals/address/edit(?P<pk>\d+)/$',
login_required(IndividualAddressEdit.as_view()),
name='individual-edit-address'
),
url(
r'^individuals/address/delete/(?P<pk>\d+)/$',
login_required(IndividualAddressDelete.as_view()),
name='individual-delete-address'
),
# Individual email
url(
r'^individuals/(?P<pk>\d+)/create/email/$',
login_required(IndividualEmailCreate.as_view()),
name='individual-create-email'
),
url(
r'^individuals/email/edit(?P<pk>\d+)/$',
login_required(IndividualEmailEdit.as_view()),
name='individual-edit-email'
),
url(
r'^individuals/email/delete/(?P<pk>\d+)/$',
login_required(IndividualEmailDelete.as_view()),
name='individual-delete-email'
),
# Individual phone
url(
r'^individuals/(?P<pk>\d+)/create/phone/$',
login_required(IndividualPhoneCreate.as_view()),
name='individual-create-phone'
),
url(
r'^individuals/phone/edit(?P<pk>\d+)/$',
login_required(IndividualPhoneEdit.as_view()),
name='individual-edit-phone'
),
url(
r'^individuals/phone/delete/(?P<pk>\d+)/$',
login_required(IndividualPhoneDelete.as_view()),
name='individual-delete-phone'
),
# Source type
url(
r'^source-types/$',
login_required(SourceTypeList.as_view()),
name='source-type-list'
),
url(
r'^source-types/create/$',
login_required(SourceTypeCreate.as_view()),
name='source-type-create'
),
url(
r'^source-types/edit/(?P<pk>\d+)/$',
login_required(SourceTypeEdit.as_view()),
name='source-type-edit'
),
url(
r'^source-types/delete/(?P<pk>\d+)/$',
login_required(SourceTypeDelete.as_view()),
name='source-type-delete'
),
# Source
url(
r'^sources/$',
login_required(SourceList.as_view()),
name='source-list'
),
url(
r'^sources/create/$',
login_required(SourceCreate.as_view()),
name='source-create'
),
url(
r'^sources/edit/(?P<pk>\d+)/$',
login_required(SourceEdit.as_view()),
name='campaign-edit'
),
url(
r'^sources/delete/(?P<pk>\d+)/$',
login_required(SourceDelete.as_view()),
name='source-delete'
),
url(
r'^inbound-contacts/$',
login_required(InboundContactList.as_view()),
name='inbound-contact-list'
),
url(
r'^inbound-contacts/(?P<pk>\d+)/$',
login_required(InboundContactDetail.as_view()),
name='inbound-contact-detail'
),
url(
r'^outbound-contacts/$',
login_required(OutboundContactList.as_view()),
name='outbound-contact-list'
),
url(
r'^outbound-contacts/(?P<pk>\d+)/$',
login_required(OutboundContactDetail.as_view()),
name='outbound-contact-detail'
),
]
| |
from sqlalchemy import (
MetaData, Table, Column, CheckConstraint, ForeignKeyConstraint, UniqueConstraint, Index,
Boolean, DateTime, Integer, String, Text, func, text)
from sqlalchemy.dialects.postgresql import ARRAY, BYTEA, ENUM, JSONB, TIMESTAMP
from sqlalchemy.schema import ForeignKey
from libweasyl.models.helpers import (
ArrowColumn, CharSettingsColumn, JSONValuesColumn, RatingColumn, WeasylTimestampColumn)
from libweasyl import constants
metadata = MetaData()
def default_fkey(*args, **kwargs):
return ForeignKeyConstraint(*args, onupdate='CASCADE', ondelete='CASCADE', **kwargs)
api_tokens = Table(
'api_tokens', metadata,
Column('userid', Integer(), primary_key=True, nullable=False),
Column('token', String(length=64), primary_key=True, nullable=False),
Column('description', String()),
default_fkey(['userid'], ['login.userid'], name='api_tokens_userid_fkey'),
)
authbcrypt = Table(
'authbcrypt', metadata,
Column('userid', Integer(), primary_key=True, nullable=False),
Column('hashsum', String(length=100), nullable=False),
default_fkey(['userid'], ['login.userid'], name='authbcrypt_userid_fkey'),
)
blocktag = Table(
'blocktag', metadata,
Column('userid', Integer(), primary_key=True, nullable=False),
Column('tagid', Integer(), primary_key=True, nullable=False),
Column('rating', RatingColumn, nullable=False),
default_fkey(['tagid'], ['searchtag.tagid'], name='blocktag_tagid_fkey'),
default_fkey(['userid'], ['login.userid'], name='blocktag_userid_fkey'),
)
Index('ind_blocktag_userid', blocktag.c.userid)
charcomment = Table(
'charcomment', metadata,
Column('commentid', Integer(), primary_key=True, nullable=False),
Column('userid', Integer(), nullable=False),
Column('targetid', Integer(), nullable=False),
Column('parentid', Integer(), nullable=False, server_default='0'),
Column('content', String(length=10000), nullable=False),
Column('unixtime', WeasylTimestampColumn(), nullable=False),
Column('settings', String(length=20), nullable=False, server_default=''),
Column('hidden_by', Integer(), nullable=True),
default_fkey(['targetid'], ['character.charid'], name='charcomment_targetid_fkey'),
default_fkey(['userid'], ['login.userid'], name='charcomment_userid_fkey'),
default_fkey(['hidden_by'], ['login.userid'], name='charcomment_hidden_by_fkey'),
)
Index('ind_charcomment_targetid_commentid', charcomment.c.targetid, charcomment.c.commentid)
collection = Table(
'collection', metadata,
Column('userid', Integer(), primary_key=True, nullable=False),
Column('submitid', Integer(), primary_key=True, nullable=False),
Column('unixtime', WeasylTimestampColumn(), nullable=False),
Column('settings', String(length=20), nullable=False, server_default='p'),
default_fkey(['userid'], ['login.userid'], name='collection_userid_fkey'),
default_fkey(['submitid'], ['submission.submitid'], name='collection_submitid_fkey'),
)
Index('ind_collection_userid', collection.c.userid)
comments = Table(
'comments', metadata,
Column('commentid', Integer(), primary_key=True),
Column('userid', Integer(), nullable=False),
Column('target_user', Integer(), nullable=True),
Column('target_sub', Integer(), nullable=True),
Column('parentid', Integer(), nullable=True),
Column('content', Text(), nullable=False),
Column('unixtime', WeasylTimestampColumn(), nullable=False),
Column('settings', CharSettingsColumn({
'h': 'hidden',
's': 'staff-note',
}, length=20), nullable=False, server_default=''),
Column('hidden_by', Integer(), nullable=True),
default_fkey(['userid'], ['login.userid'], name='comments_userid_fkey'),
default_fkey(['target_user'], ['login.userid'], name='comments_target_user_fkey'),
default_fkey(['target_sub'], ['submission.submitid'], name='comments_target_sub_fkey'),
default_fkey(['parentid'], ['comments.commentid'], name='comments_parentid_fkey'),
default_fkey(['hidden_by'], ['login.userid'], name='comments_hidden_by_fkey'),
CheckConstraint('(target_user IS NOT NULL) != (target_sub IS NOT NULL)', name='comments_target_check'),
)
Index('ind_comments_target_user_commentid', comments.c.target_user, comments.c.commentid, postgresql_where=comments.c.target_user != None)
Index('ind_comments_target_sub_commentid', comments.c.target_sub, comments.c.commentid, postgresql_where=comments.c.target_sub != None)
commishclass = Table(
'commishclass', metadata,
Column('classid', Integer(), primary_key=True, nullable=False),
Column('userid', Integer(), primary_key=True, nullable=False),
Column('title', String(length=100), nullable=False),
default_fkey(['userid'], ['login.userid'], name='commishclass_userid_fkey'),
)
Index('ind_userid_title', commishclass.c.userid, commishclass.c.title, unique=True)
Index('ind_commishclass_userid', commishclass.c.userid)
commishdesc = Table(
'commishdesc', metadata,
Column('userid', Integer(), primary_key=True, nullable=False),
Column('content', String(length=20000), nullable=False),
default_fkey(['userid'], ['login.userid'], name='commishdesc_userid_fkey'),
)
commishprice = Table(
'commishprice', metadata,
Column('priceid', Integer(), primary_key=True, nullable=False),
Column('classid', Integer(), primary_key=True, nullable=False),
Column('userid', Integer(), primary_key=True, nullable=False),
Column('title', String(length=500), nullable=False),
Column('amount_min', Integer(), nullable=False),
Column('amount_max', Integer(), nullable=False),
Column('settings', String(length=20), nullable=False, server_default=''),
default_fkey(['userid'], ['login.userid'], name='commishprice_userid_fkey'),
)
Index('ind_classid_userid_title', commishprice.c.classid, commishprice.c.userid, commishprice.c.title, unique=True)
emailblacklist = Table(
'emailblacklist', metadata,
Column('id', Integer(), primary_key=True, nullable=False),
Column('added_by', Integer(), nullable=False),
Column('domain_name', String(length=252), nullable=False, unique=True),
Column('reason', Text(), nullable=False),
default_fkey(['added_by'], ['login.userid'], name='emailblacklist_userid_fkey'),
)
emailverify = Table(
'emailverify', metadata,
Column('userid', Integer(), primary_key=True, nullable=False),
Column('email', String(length=100), nullable=False, unique=True),
Column('createtimestamp', DateTime(timezone=True), nullable=False, server_default=func.now()),
Column('token', String(length=100), nullable=False),
default_fkey(['userid'], ['login.userid'], name='emailverify_userid_fkey'),
)
Index('ind_emailverify_token', emailverify.c.token)
favorite = Table(
'favorite', metadata,
Column('userid', Integer(), primary_key=True, nullable=False),
Column('targetid', Integer(), primary_key=True, nullable=False, autoincrement=False),
Column('type', String(length=5), primary_key=True, nullable=False, server_default=''),
Column('unixtime', WeasylTimestampColumn(), nullable=False),
default_fkey(['userid'], ['login.userid'], name='favorite_userid_fkey'),
)
Index('ind_favorite_userid', favorite.c.userid)
Index('ind_favorite_type_targetid', favorite.c.type, favorite.c.targetid)
Index('ind_favorite_userid_type_unixtime', favorite.c.userid, favorite.c.type, favorite.c.unixtime)
folder = Table(
'folder', metadata,
Column('folderid', Integer(), primary_key=True, nullable=False),
Column('parentid', Integer(), nullable=False),
Column('userid', Integer(), nullable=False),
Column('title', String(length=100), nullable=False),
Column('settings', CharSettingsColumn({
'h': 'hidden',
'n': 'no-notifications',
'u': 'profile-filter',
'm': 'index-filter',
'f': 'featured-filter',
}, length=20), nullable=False, server_default=''),
default_fkey(['userid'], ['login.userid'], name='folder_userid_fkey'),
)
Index('ind_folder_userid', folder.c.userid)
forgotpassword = Table(
'forgotpassword', metadata,
Column('token_sha256', BYTEA(), primary_key=True, nullable=False),
Column('email', String(length=254), nullable=False),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, server_default=func.now()),
)
Index('ind_forgotpassword_created_at', forgotpassword.c.created_at)
frienduser = Table(
'frienduser', metadata,
Column('userid', Integer(), primary_key=True, nullable=False),
Column('otherid', Integer(), primary_key=True, nullable=False),
Column('settings', CharSettingsColumn({
'p': 'pending',
}, length=20), nullable=False, server_default='p'),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, server_default=func.now()),
default_fkey(['otherid'], ['login.userid'], name='frienduser_otherid_fkey'),
default_fkey(['userid'], ['login.userid'], name='frienduser_userid_fkey'),
)
Index('ind_frienduser_otherid', frienduser.c.otherid)
Index('ind_frienduser_userid', frienduser.c.userid)
character = Table(
'character', metadata,
Column('charid', Integer(), primary_key=True, nullable=False),
Column('userid', Integer(), nullable=False),
Column('unixtime', WeasylTimestampColumn(), nullable=False),
Column('char_name', String(length=100), nullable=False, server_default=''),
Column('age', String(length=100), nullable=False, server_default=''),
Column('gender', String(length=100), nullable=False, server_default=''),
Column('height', String(length=100), nullable=False, server_default=''),
Column('weight', String(length=100), nullable=False, server_default=''),
Column('species', String(length=100), nullable=False, server_default=''),
Column('content', String(length=100000), nullable=False, server_default=u""),
Column('rating', RatingColumn, nullable=False),
Column('settings', CharSettingsColumn({
'h': 'hidden',
'f': 'friends-only',
}, length=20), nullable=False, server_default=''),
Column('hidden', Boolean(), nullable=False, server_default='f'),
Column('friends_only', Boolean(), nullable=False, server_default='f'),
Column('page_views', Integer(), nullable=False, server_default='0'),
default_fkey(['userid'], ['login.userid'], name='character_userid_fkey'),
)
Index('ind_character_userid', character.c.userid)
google_doc_embeds = Table(
'google_doc_embeds', metadata,
Column('submitid', Integer(), primary_key=True, nullable=False),
Column('embed_url', String(length=255), nullable=False),
default_fkey(['submitid'], ['submission.submitid'], name='google_doc_embeds_submitid_fkey'),
)
ignoreuser = Table(
'ignoreuser', metadata,
Column('userid', Integer(), primary_key=True, nullable=False),
Column('otherid', Integer(), primary_key=True, nullable=False),
default_fkey(['userid'], ['login.userid'], name='ignoreuser_userid_fkey'),
default_fkey(['otherid'], ['login.userid'], name='ignoreuser_otherid_fkey'),
)
Index('ind_ignoreuser_userid', ignoreuser.c.userid)
journal = Table(
'journal', metadata,
Column('journalid', Integer(), primary_key=True, nullable=False),
Column('userid', Integer(), nullable=False),
Column('title', String(length=200), nullable=False),
Column('content', String(length=100000), nullable=False),
Column('rating', RatingColumn, nullable=False),
Column('unixtime', WeasylTimestampColumn(), nullable=False),
Column('settings', CharSettingsColumn({
'h': 'hidden',
'f': 'friends-only',
}, length=20), nullable=False, server_default=''),
Column('hidden', Boolean(), nullable=False, server_default='f'),
Column('friends_only', Boolean(), nullable=False, server_default='f'),
Column('page_views', Integer(), nullable=False, server_default='0'),
Column('submitter_ip_address', String(length=45), nullable=True),
Column('submitter_user_agent_id', Integer(), nullable=True),
default_fkey(['userid'], ['login.userid'], name='journal_userid_fkey'),
ForeignKeyConstraint(
['submitter_user_agent_id'],
['user_agents.user_agent_id'],
name="journal_user_agent_id_fkey",
),
)
Index('ind_journal_userid', journal.c.userid)
journalcomment = Table(
'journalcomment', metadata,
Column('commentid', Integer(), primary_key=True, nullable=False),
Column('userid', Integer(), nullable=False),
Column('targetid', Integer(), nullable=False),
Column('parentid', Integer(), nullable=False, server_default='0'),
Column('content', String(length=10000), nullable=False),
Column('unixtime', WeasylTimestampColumn(), nullable=False),
Column('settings', String(length=20), nullable=False, server_default=''),
Column('hidden_by', Integer(), nullable=True),
default_fkey(['targetid'], ['journal.journalid'], name='journalcomment_targetid_fkey'),
default_fkey(['userid'], ['login.userid'], name='journalcomment_userid_fkey'),
default_fkey(['hidden_by'], ['login.userid'], name='journalcomment_hidden_by_fkey'),
)
Index('ind_journalcomment_targetid_commentid', journalcomment.c.targetid, journalcomment.c.commentid)
login = Table(
'login', metadata,
Column('userid', Integer(), primary_key=True, nullable=False),
Column('login_name', String(length=40), nullable=False, unique=True),
Column('last_login', TIMESTAMP(timezone=True), nullable=False),
Column('email', String(length=100), nullable=False, server_default=''),
Column('twofa_secret', String(length=420), nullable=True),
# Must be nullable, since existing accounts will not have this information
Column('ip_address_at_signup', String(length=39), nullable=True),
Column('voucher', Integer, ForeignKey('login.userid'), nullable=True),
)
Index('ind_login_login_name', login.c.login_name)
Index('ind_login_lower_email', func.lower(login.c.login_name.collate('C')))
twofa_recovery_codes = Table(
'twofa_recovery_codes', metadata,
Column('userid', Integer(), nullable=False),
Column('recovery_code_hash', String(length=100), nullable=False),
default_fkey(['userid'], ['login.userid'], name='twofa_recovery_codes_userid_fkey'),
)
Index('ind_twofa_recovery_codes_userid', twofa_recovery_codes.c.userid)
logincreate = Table(
'logincreate', metadata,
Column('token', String(length=100), primary_key=True, nullable=False),
Column('username', String(length=40), nullable=False),
Column('login_name', String(length=40), nullable=False, unique=True),
Column('hashpass', String(length=100), nullable=False),
Column('email', String(length=100), nullable=False, unique=True),
Column('birthday', WeasylTimestampColumn(), nullable=False),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, server_default=func.now()),
# Used to determine if a record is invalid for purposes of plausible deniability of email addresses
# AKA, create a logincreate entry if an in-use email address is provided, thus preserving the effect of
# a pending username triggering a username taken error.
Column('invalid', Boolean(), server_default='f', nullable=False),
Column('invalid_email_addr', String(length=100), nullable=True, server_default=None),
Column('ip_address_signup_request', String(length=39), nullable=True),
)
media = Table(
'media', metadata,
Column('mediaid', Integer(), primary_key=True, nullable=False),
Column('file_type', String(length=8), nullable=False),
Column('attributes', JSONValuesColumn(), nullable=False, server_default=text(u"''::hstore")),
Column('sha256', String(length=64)),
)
Index('ind_media_sha256', media.c.sha256)
message = Table(
'message', metadata,
Column('noteid', Integer(), primary_key=True, nullable=False),
Column('userid', Integer(), nullable=False),
Column('otherid', Integer(), nullable=False),
Column('user_folder', Integer(), nullable=False, server_default='0'),
Column('other_folder', Integer(), nullable=False, server_default='0'),
Column('title', String(length=100), nullable=False),
Column('content', String(length=100000), nullable=False),
Column('unixtime', WeasylTimestampColumn(), nullable=False),
Column('settings', String(length=20), nullable=False, server_default='u'),
default_fkey(['otherid'], ['login.userid'], name='message_otherid_fkey'),
default_fkey(['userid'], ['login.userid'], name='message_userid_fkey'),
)
Index('ind_message_otherid_noteid', message.c.otherid, message.c.noteid)
Index('ind_message_userid_noteid', message.c.userid, message.c.noteid)
oauth_bearer_tokens = Table(
'oauth_bearer_tokens', metadata,
Column('id', Integer(), primary_key=True, nullable=False),
Column('clientid', String(length=32), nullable=False),
Column('userid', Integer(), nullable=False),
Column('scopes', ARRAY(Text()), nullable=False),
Column('access_token', String(length=64), nullable=False, unique=True),
Column('refresh_token', String(length=64), nullable=False, unique=True),
Column('expires_at', ArrowColumn(), nullable=False),
default_fkey(['clientid'], ['oauth_consumers.clientid'], name='oauth_bearer_tokens_clientid_fkey'),
default_fkey(['userid'], ['login.userid'], name='oauth_bearer_tokens_userid_fkey'),
)
oauth_consumers = Table(
'oauth_consumers', metadata,
Column('clientid', String(length=32), primary_key=True, nullable=False),
Column('description', Text(), nullable=False),
Column('ownerid', Integer(), nullable=False),
Column('grant_type', String(length=32), nullable=False),
Column('response_type', String(length=32), nullable=False),
Column('scopes', ARRAY(Text()), nullable=False),
Column('redirect_uris', ARRAY(Text()), nullable=False),
Column('client_secret', String(length=64), nullable=False),
default_fkey(['ownerid'], ['login.userid'], name='oauth_consumers_owner_fkey'),
)
permaban = Table(
'permaban', metadata,
Column('userid', Integer(), primary_key=True, nullable=False),
Column('reason', Text(), nullable=False),
default_fkey(['userid'], ['login.userid'], name='permaban_userid_fkey'),
)
permitted_senders = Table(
'permitted_senders', metadata,
Column('userid', Integer(), primary_key=True),
Column('sender', Integer(), primary_key=True),
default_fkey(['userid'], ['login.userid'], name='permitted_senders_userid_fkey'),
default_fkey(['sender'], ['login.userid'], name='permitted_senders_sender_fkey'),
)
profile = Table(
'profile', metadata,
Column('userid', Integer(), primary_key=True, nullable=False),
Column('username', String(length=40), nullable=False, unique=True),
Column('full_name', String(length=100), nullable=False),
Column('catchphrase', String(length=200), nullable=False, server_default=''),
Column('artist_type', String(length=100), nullable=False, server_default=''),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, server_default=func.now()),
Column('latest_submission_time', ArrowColumn(), nullable=False, server_default='epoch'),
Column('profile_text', String(length=100000), nullable=False, server_default=''),
Column('settings', String(length=20), nullable=False, server_default='ccci'),
Column('stream_url', String(length=500), nullable=False, server_default=''),
Column('page_views', Integer(), nullable=False, server_default='0'),
Column('config', CharSettingsColumn({
'b': 'show-birthday',
'2': '12-hour-time',
'g': 'tagging-disabled',
'd': 'premium',
'u': 'hide-favorites-bar',
'v': 'hide-favorites',
'w': 'staff-shouts-only',
'x': 'friend-shouts-only',
'y': 'staff-notes-only',
'z': 'friend-notes-only',
'h': 'hide-profile-from-guests',
'i': 'hide-profile-stats',
'k': 'disallow-others-tag-removal',
's': 'watch-user-submissions',
'c': 'watch-user-collections',
'f': 'watch-user-characters',
't': 'watch-user-stream-status',
'j': 'watch-user-journals',
}, {
'tagging-level': {
'a': 'max-rating-mature',
'p': 'max-rating-explicit',
},
'thumbnail-bar': {
'O': 'collections',
'A': 'characters',
},
}, length=50), nullable=False, server_default=''),
Column('jsonb_settings', JSONB()),
Column('stream_text', String(length=2000)),
default_fkey(['userid'], ['login.userid'], name='profile_userid_fkey'),
)
report = Table(
'report', metadata,
Column('target_user', Integer(), nullable=True),
Column('target_sub', Integer(), nullable=True),
Column('target_char', Integer(), nullable=True),
Column('target_journal', Integer(), nullable=True),
Column('target_comment', Integer(), nullable=True),
Column('opened_at', ArrowColumn(), nullable=False),
Column('urgency', Integer(), nullable=False),
Column('closerid', Integer(), nullable=True),
Column('settings', CharSettingsColumn({
'r': 'under-review',
}, length=20), nullable=False, server_default=''),
Column('reportid', Integer(), primary_key=True, nullable=False),
Column('closed_at', ArrowColumn(), nullable=True),
Column('closure_reason',
ENUM(constants.ReportClosureReason,
name='report_closure_reason',
metadata=metadata,
validate_strings=True,
values_callable=lambda enum_cls: [e.value for e in enum_cls]),
nullable=True),
Column('closure_explanation', Text(), nullable=True),
default_fkey(['target_user'], ['login.userid'], name='report_target_user_fkey'),
default_fkey(['target_sub'], ['submission.submitid'], name='report_target_sub_fkey'),
default_fkey(['target_char'], ['character.charid'], name='report_target_char_fkey'),
default_fkey(['target_journal'], ['journal.journalid'], name='report_target_journal_fkey'),
default_fkey(['target_comment'], ['comments.commentid'], name='report_target_comment_fkey'),
default_fkey(['closerid'], ['login.userid'], name='report_closerid_fkey'),
CheckConstraint(
'((target_user IS NOT NULL)::int + (target_sub IS NOT NULL)::int '
' + (target_char IS NOT NULL)::int + (target_journal IS NOT NULL)::int '
' + (target_comment IS NOT NULL)::int) = 1',
name='report_target_check'),
CheckConstraint(
'((closed_at IS NOT NULL)::int + (closure_reason IS NOT NULL)::int '
' + (closure_explanation IS NOT NULL)::int) IN (0, 3)',
name='report_closure_check'),
)
reportcomment = Table(
'reportcomment', metadata,
Column('violation', Integer(), nullable=False),
Column('userid', Integer(), nullable=False),
Column('unixtime', WeasylTimestampColumn(), nullable=False),
Column('content', String(length=2000), nullable=False, server_default=''),
Column('commentid', Integer(), primary_key=True, nullable=False),
Column('reportid', Integer(), nullable=False),
default_fkey(['userid'], ['login.userid'], name='reportcomment_userid_fkey'),
default_fkey(['reportid'], ['report.reportid'], name='reportcomment_reportid_fkey'),
)
searchmapchar = Table(
'searchmapchar', metadata,
Column('tagid', Integer(), primary_key=True, nullable=False),
Column('targetid', Integer(), primary_key=True, nullable=False),
Column('settings', String(), nullable=False, server_default=''),
default_fkey(['targetid'], ['character.charid'], name='searchmapchar_targetid_fkey'),
default_fkey(['tagid'], ['searchtag.tagid'], name='searchmapchar_tagid_fkey'),
)
Index('ind_searchmapchar_tagid', searchmapchar.c.tagid)
Index('ind_searchmapchar_targetid', searchmapchar.c.targetid)
searchmapjournal = Table(
'searchmapjournal', metadata,
Column('tagid', Integer(), primary_key=True, nullable=False),
Column('targetid', Integer(), primary_key=True, nullable=False),
Column('settings', String(), nullable=False, server_default=''),
default_fkey(['targetid'], ['journal.journalid'], name='searchmapjournal_targetid_fkey'),
default_fkey(['tagid'], ['searchtag.tagid'], name='searchmapjournal_tagid_fkey'),
)
Index('ind_searchmapjournal_targetid', searchmapjournal.c.targetid)
Index('ind_searchmapjournal_tagid', searchmapjournal.c.tagid)
searchmapsubmit = Table(
'searchmapsubmit', metadata,
Column('tagid', Integer(), primary_key=True, nullable=False),
Column('targetid', Integer(), primary_key=True, nullable=False),
Column('settings', CharSettingsColumn({
'a': 'artist-tag',
}), nullable=False, server_default=''),
default_fkey(['targetid'], ['submission.submitid'], name='searchmapsubmit_targetid_fkey'),
default_fkey(['tagid'], ['searchtag.tagid'], name='searchmapsubmit_tagid_fkey'),
)
Index('ind_searchmapsubmit_tagid', searchmapsubmit.c.tagid)
Index('ind_searchmapsubmit_targetid', searchmapsubmit.c.targetid)
artist_preferred_tags = Table(
'artist_preferred_tags', metadata,
Column('tagid', Integer(), primary_key=True, nullable=False),
Column('targetid', Integer(), primary_key=True, nullable=False),
Column('settings', String(), nullable=False, server_default=''),
default_fkey(['targetid'], ['login.userid'], name='artist_preferred_tags_targetid_fkey'),
default_fkey(['tagid'], ['searchtag.tagid'], name='artist_preferred_tags_tagid_fkey'),
)
Index('ind_artist_preferred_tags_tagid', artist_preferred_tags.c.tagid)
Index('ind_artist_preferred_tags_targetid', artist_preferred_tags.c.targetid)
artist_optout_tags = Table(
'artist_optout_tags', metadata,
Column('tagid', Integer(), primary_key=True, nullable=False),
Column('targetid', Integer(), primary_key=True, nullable=False),
Column('settings', String(), nullable=False, server_default=''),
default_fkey(['targetid'], ['login.userid'], name='artist_optout_tags_targetid_fkey'),
default_fkey(['tagid'], ['searchtag.tagid'], name='artist_optout_tags_tagid_fkey'),
)
Index('ind_artist_optout_tags_tagid', artist_optout_tags.c.tagid)
Index('ind_artist_optout_tags_targetid', artist_optout_tags.c.targetid)
globally_restricted_tags = Table(
'globally_restricted_tags', metadata,
Column('tagid', Integer(), primary_key=True, nullable=False),
Column('userid', Integer(), primary_key=True, nullable=False),
default_fkey(['userid'], ['login.userid'], name='globally_restricted_tags_userid_fkey'),
default_fkey(['tagid'], ['searchtag.tagid'], name='globally_restricted_tags_tagid_fkey'),
)
Index('ind_globally_restricted_tags_tagid', globally_restricted_tags.c.tagid)
Index('ind_globally_restricted_tags_userid', globally_restricted_tags.c.userid)
user_restricted_tags = Table(
'user_restricted_tags', metadata,
Column('tagid', Integer(), primary_key=True, nullable=False),
Column('userid', Integer(), primary_key=True, nullable=False),
default_fkey(['userid'], ['login.userid'], name='user_restricted_tags_userid_fkey'),
default_fkey(['tagid'], ['searchtag.tagid'], name='user_restricted_tags_tagid_fkey'),
)
Index('ind_user_restricted_tags_tagid', user_restricted_tags.c.tagid)
Index('ind_user_restricted_tags_userid', user_restricted_tags.c.userid)
searchtag = Table(
'searchtag', metadata,
Column('tagid', Integer(), primary_key=True, nullable=False),
Column('title', String(length=162), nullable=False, unique=True),
)
Index('ind_searchtag_tagid', searchtag.c.tagid)
sessions = Table(
'sessions', metadata,
Column('sessionid', String(length=64), primary_key=True, nullable=False),
Column('created_at', ArrowColumn(), nullable=False, server_default=text('now()')),
Column('last_active', TIMESTAMP(timezone=True), nullable=True, server_default=func.now()),
Column('userid', Integer()),
Column('additional_data', JSONValuesColumn(), nullable=False, server_default=text(u"''::hstore")),
Column('ip_address', String(length=39), nullable=True),
Column('user_agent_id', Integer(), nullable=True),
default_fkey(['userid'], ['login.userid'], name='sessions_userid_fkey'),
default_fkey(['user_agent_id'], ['user_agents.user_agent_id'], name='sessions_user_agent_id_fkey'),
CheckConstraint("userid IS NOT NULL OR additional_data != ''", name='sessions_no_guest_check'),
)
Index('ind_sessions_created_at', sessions.c.created_at)
Index('ind_sessions_last_active', sessions.c.last_active)
Index('ind_sessions_userid', sessions.c.userid)
user_agents = Table(
'user_agents', metadata,
Column('user_agent_id', Integer(), primary_key=True, nullable=False),
Column('user_agent', String(length=1024), nullable=False, unique=True),
)
user_events = Table(
'user_events', metadata,
Column('eventid', Integer(), primary_key=True, nullable=False),
Column('userid', Integer(), ForeignKey('login.userid'), nullable=False),
Column('event', String(length=100), nullable=False),
Column('data', JSONB(), nullable=False),
Column('occurred', TIMESTAMP(timezone=True), nullable=False, server_default=func.now()),
)
Index('ind_user_events_userid_eventid', user_events.c.userid, user_events.c.eventid)
siteupdate = Table(
'siteupdate', metadata,
Column('updateid', Integer(), primary_key=True, nullable=False),
Column('userid', Integer(), nullable=False),
Column('wesley', Boolean(), nullable=False, server_default='f'),
Column('title', String(length=100), nullable=False),
Column('content', Text(), nullable=False),
Column('unixtime', WeasylTimestampColumn(), nullable=False),
default_fkey(['userid'], ['login.userid'], name='siteupdate_userid_fkey'),
)
siteupdatecomment = Table(
'siteupdatecomment', metadata,
Column('commentid', Integer(), primary_key=True, nullable=False),
Column('userid', Integer(), nullable=False),
Column('targetid', Integer(), nullable=False),
Column('parentid', Integer(), nullable=True),
Column('content', String(length=10000), nullable=False),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, server_default=func.now()),
Column('hidden_at', TIMESTAMP(timezone=True), nullable=True),
Column('hidden_by', Integer(), nullable=True),
ForeignKeyConstraint(['targetid'], ['siteupdate.updateid'], name='siteupdatecomment_targetid_fkey'),
ForeignKeyConstraint(
['targetid', 'parentid'],
['siteupdatecomment.targetid', 'siteupdatecomment.commentid'],
name='siteupdatecomment_parentid_fkey'),
ForeignKeyConstraint(['userid'], ['login.userid'], name='siteupdatecomment_userid_fkey'),
ForeignKeyConstraint(['hidden_by'], ['login.userid'], name='siteupdatecomment_hidden_by_fkey', ondelete='SET NULL'),
CheckConstraint("hidden_by IS NULL OR hidden_at IS NOT NULL", name='siteupdatecomment_hidden_check'),
UniqueConstraint('targetid', 'commentid'),
)
submission = Table(
'submission', metadata,
Column('submitid', Integer(), primary_key=True, nullable=False),
Column('folderid', Integer(), nullable=True),
Column('userid', Integer(), nullable=False),
Column('unixtime', WeasylTimestampColumn(), nullable=False),
Column('title', String(length=200), nullable=False),
Column('content', String(length=300000), nullable=False),
Column('subtype', Integer(), nullable=False),
Column('rating', RatingColumn, nullable=False),
Column('page_views', Integer(), nullable=False, server_default='0'),
Column('hidden', Boolean(), nullable=False, server_default='f'),
Column('friends_only', Boolean(), nullable=False, server_default='f'),
Column('critique', Boolean(), nullable=False, server_default='f'),
Column('embed_type', ENUM('google-drive', 'other', name="embed_types"), nullable=True),
Column('favorites', Integer(), nullable=False),
Column('submitter_ip_address', String(length=45), nullable=True),
Column('submitter_user_agent_id', Integer(), nullable=True),
default_fkey(['userid'], ['login.userid'], name='submission_userid_fkey'),
default_fkey(['folderid'], ['folder.folderid'], name='submission_folderid_fkey'),
ForeignKeyConstraint(
['submitter_user_agent_id'],
['user_agents.user_agent_id'],
name="submission_agent_id_fkey",
),
Index(
'ind_submission_score',
text("""(
log(favorites + 1)
+ log(page_views + 1) / 2
+ unixtime / 180000.0
)"""),
),
)
Index('ind_submission_folderid', submission.c.folderid)
Index('ind_submission_userid_submitid', submission.c.userid, submission.c.submitid)
Index('ind_submission_userid_folderid_submitid', submission.c.userid, submission.c.folderid, submission.c.submitid, postgresql_where=submission.c.folderid.isnot(None))
Index('ind_submission_submitid_critique', submission.c.submitid, postgresql_where=submission.c.critique & ~submission.c.hidden)
submission_media_links = Table(
'submission_media_links', metadata,
Column('linkid', Integer(), primary_key=True, nullable=False),
Column('mediaid', Integer(), nullable=False),
Column('submitid', Integer(), nullable=False),
Column('link_type', String(length=32), nullable=False),
default_fkey(['submitid'], ['submission.submitid'], name='submission_media_links_submitid_fkey'),
default_fkey(['mediaid'], ['media.mediaid'], name='submission_media_links_mediaid_fkey'),
)
Index('ind_submission_media_links_submitid', submission_media_links.c.submitid)
Index('ind_submission_media_links_mediaid', submission_media_links.c.mediaid, unique=False)
submission_tags = Table(
'submission_tags', metadata,
Column('submitid', Integer(), primary_key=True, nullable=False),
Column('tags', ARRAY(Integer()), nullable=False),
default_fkey(['submitid'], ['submission.submitid'], name='submission_tags_submitid_fkey'),
)
Index('ind_submission_tags_tags', submission_tags.c.tags, postgresql_using='gin')
suspension = Table(
'suspension', metadata,
Column('userid', Integer(), primary_key=True, nullable=False),
Column('reason', Text(), nullable=False),
Column('release', Integer(), nullable=False),
default_fkey(['userid'], ['login.userid'], name='suspension_userid_fkey'),
)
tag_updates = Table(
'tag_updates', metadata,
Column('updateid', Integer(), primary_key=True),
Column('submitid', Integer(), nullable=False),
Column('userid', Integer(), nullable=False),
Column('added', ARRAY(Text())),
Column('removed', ARRAY(Text())),
Column('updated_at', Integer(), nullable=False,
server_default=text(u"(date_part('epoch'::text, now()) - (18000)::double precision)")),
default_fkey(['submitid'], ['submission.submitid'], name='tag_updates_submitid_fkey'),
default_fkey(['userid'], ['login.userid'], name='tag_updates_userid_fkey'),
)
Index('ind_tag_updates_submitid', tag_updates.c.submitid)
user_media_links = Table(
'user_media_links', metadata,
Column('linkid', Integer(), primary_key=True, nullable=False),
Column('mediaid', Integer(), nullable=False),
Column('userid', Integer(), nullable=False),
Column('link_type', String(length=32), nullable=False),
default_fkey(['userid'], ['login.userid'], name='user_media_links_userid_fkey'),
default_fkey(['mediaid'], ['media.mediaid'], name='user_media_links_mediaid_fkey'),
)
Index('ind_user_media_links_submitid', user_media_links.c.userid)
Index('ind_user_media_links_userid', user_media_links.c.userid)
Index('ind_user_media_links_mediaid', user_media_links.c.mediaid, unique=False)
user_links = Table(
'user_links', metadata,
Column('linkid', Integer(), primary_key=True, nullable=False),
Column('userid', Integer(), nullable=False),
Column('link_type', String(length=64), nullable=False),
Column('link_value', String(length=2000), nullable=False),
default_fkey(['userid'], ['login.userid'], name='user_links_userid_fkey'),
)
Index('ind_user_links_userid', user_links.c.userid)
user_streams = Table(
'user_streams', metadata,
Column('userid', Integer(), primary_key=True, nullable=False),
Column('start_time', Integer(), nullable=False),
Column('end_time', Integer(), nullable=False),
default_fkey(['userid'], ['login.userid'], name='user_streams_userid_fkey'),
)
Index('ind_user_streams_end', user_streams.c.end_time)
Index('ind_user_streams_end_time', user_streams.c.end_time)
user_timezones = Table(
'user_timezones', metadata,
Column('userid', Integer(), primary_key=True, nullable=False),
Column('timezone', String(length=255), nullable=False),
default_fkey(['userid'], ['login.userid'], name='user_timezones_userid_fkey'),
)
useralias = Table(
'useralias', metadata,
Column('userid', Integer(), primary_key=True, nullable=False),
Column('alias_name', String(length=40), primary_key=True, nullable=False),
Column('settings', String(), nullable=False, server_default=''),
default_fkey(['userid'], ['login.userid'], name='useralias_userid_fkey'),
)
userinfo = Table(
'userinfo', metadata,
Column('userid', Integer(), primary_key=True, nullable=False),
Column('birthday', WeasylTimestampColumn(), nullable=False),
Column('gender', String(length=100), nullable=False, server_default=''),
Column('country', String(length=50), nullable=False, server_default=''),
default_fkey(['userid'], ['login.userid'], name='userinfo_userid_fkey'),
)
username_history = Table(
'username_history', metadata,
Column('historyid', Integer(), primary_key=True, nullable=False),
Column('userid', Integer(), ForeignKey('login.userid'), nullable=False),
Column('username', String(length=25), nullable=False),
Column('login_name', String(length=25), nullable=False),
Column('replaced_at', TIMESTAMP(timezone=True), nullable=False),
Column('replaced_by', Integer(), ForeignKey('login.userid'), nullable=False),
Column('active', Boolean(), nullable=False),
Column('deactivated_at', TIMESTAMP(timezone=True), nullable=True),
Column('deactivated_by', Integer(), ForeignKey('login.userid'), nullable=True),
# true if the username changed but the login_name didn't
Column('cosmetic', Boolean(), nullable=False),
CheckConstraint("username !~ '[^ -~]' AND username !~ ';'", name='username_history_username_check'),
# TODO: replace with generated column once on PostgreSQL 12
CheckConstraint("login_name = lower(regexp_replace(username, '[^0-9A-Za-z]', '', 'g'))", name='username_history_login_name_check'),
CheckConstraint("(active OR cosmetic) = (deactivated_at IS NULL) AND (active OR cosmetic) = (deactivated_by IS NULL)", name='username_history_active_check'),
CheckConstraint("NOT (cosmetic AND active)", name='username_history_cosmetic_inactive_check'),
)
# enforces one active redirect per user
Index('ind_username_history_userid', username_history.c.userid, postgresql_where=username_history.c.active, unique=True)
# enforces that active redirects have unique usernames within this table, although they also need to be unique in all of login, logincreate, useralias, and username_history together
Index('ind_username_history_login_name', username_history.c.login_name, postgresql_where=username_history.c.active, unique=True)
# lookup for a user's most recent change
Index('ind_username_history_userid_historyid', username_history.c.userid, username_history.c.historyid, postgresql_where=~username_history.c.cosmetic, unique=True)
views = Table(
'views', metadata,
Column('viewer', String(length=127), primary_key=True, nullable=False),
Column('targetid', Integer(), primary_key=True, nullable=False, autoincrement=False),
Column('type', Integer(), primary_key=True, nullable=False, autoincrement=False),
Column('viewed_at', TIMESTAMP(timezone=True), nullable=False, server_default=func.now()),
)
Index('ind_views_viewed_at', views.c.viewed_at)
watchuser = Table(
'watchuser', metadata,
Column('userid', Integer(), primary_key=True, nullable=False),
Column('otherid', Integer(), primary_key=True, nullable=False),
Column('settings', String(length=20), nullable=False),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, server_default=func.now()),
default_fkey(['otherid'], ['login.userid'], name='watchuser_otherid_fkey'),
default_fkey(['userid'], ['login.userid'], name='watchuser_userid_fkey'),
)
Index('ind_watchuser_userid', watchuser.c.userid)
Index('ind_watchuser_settings', watchuser.c.settings)
Index('ind_watchuser_userid_settings', watchuser.c.userid, watchuser.c.settings)
Index('ind_watchuser_otherid', watchuser.c.otherid)
Index('ind_watchuser_otherid_settings', watchuser.c.otherid, watchuser.c.settings)
welcome = Table(
'welcome', metadata,
Column('welcomeid', Integer(), primary_key=True, nullable=False),
Column('userid', Integer(), nullable=False),
Column('otherid', Integer(), nullable=False),
Column('referid', Integer(), nullable=False, server_default='0'),
Column('targetid', Integer(), nullable=False, server_default='0'),
Column('unixtime', WeasylTimestampColumn(), nullable=False),
Column('type', Integer(), nullable=False),
Column('deleted', ArrowColumn()),
default_fkey(['userid'], ['login.userid'], name='welcome_userid_fkey'),
)
Index('ind_welcome_otherid', welcome.c.otherid)
Index('ind_welcome_referid', welcome.c.referid)
Index('ind_welcome_targetid', welcome.c.targetid)
Index('ind_welcome_type', welcome.c.type)
Index('ind_welcome_unixtime', welcome.c.unixtime)
Index('ind_welcome_userid_type', welcome.c.userid, welcome.c.type)
welcomecount = Table(
'welcomecount', metadata,
Column('userid', Integer(), primary_key=True, nullable=False),
Column('journal', Integer(), nullable=False, server_default='0'),
Column('submit', Integer(), nullable=False, server_default='0'),
Column('notify', Integer(), nullable=False, server_default='0'),
Column('comment', Integer(), nullable=False, server_default='0'),
Column('note', Integer(), nullable=False, server_default='0'),
default_fkey(['userid'], ['login.userid'], name='welcomecount_userid_fkey'),
)
Index('ind_welcomecount_userid', welcomecount.c.userid)
| |
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Miscellaneous utility functions."""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import os
import sys
import re
import shutil
import fnmatch
from collections import Counter
import traceback
import subprocess
import platform
import shlex
MAX_FILENAME_LENGTH = 200
WINDOWS_MAGIC_PREFIX = u'\\\\?\\'
class HumanReadableException(Exception):
"""An Exception that can include a human-readable error message to
be logged without a traceback. Can preserve a traceback for
debugging purposes as well.
Has at least two fields: `reason`, the underlying exception or a
string describing the problem; and `verb`, the action being
performed during the error.
If `tb` is provided, it is a string containing a traceback for the
associated exception. (Note that this is not necessary in Python 3.x
and should be removed when we make the transition.)
"""
error_kind = 'Error' # Human-readable description of error type.
def __init__(self, reason, verb, tb=None):
self.reason = reason
self.verb = verb
self.tb = tb
super(HumanReadableException, self).__init__(self.get_message())
def _gerund(self):
"""Generate a (likely) gerund form of the English verb.
"""
if ' ' in self.verb:
return self.verb
gerund = self.verb[:-1] if self.verb.endswith('e') else self.verb
gerund += 'ing'
return gerund
def _reasonstr(self):
"""Get the reason as a string."""
if isinstance(self.reason, unicode):
return self.reason
elif isinstance(self.reason, basestring): # Byte string.
return self.reason.decode('utf8', 'ignore')
elif hasattr(self.reason, 'strerror'): # i.e., EnvironmentError
return self.reason.strerror
else:
return u'"{0}"'.format(unicode(self.reason))
def get_message(self):
"""Create the human-readable description of the error, sans
introduction.
"""
raise NotImplementedError
def log(self, logger):
"""Log to the provided `logger` a human-readable message as an
error and a verbose traceback as a debug message.
"""
if self.tb:
logger.debug(self.tb)
logger.error(u'{0}: {1}', self.error_kind, self.args[0])
class FilesystemError(HumanReadableException):
"""An error that occurred while performing a filesystem manipulation
via a function in this module. The `paths` field is a sequence of
pathnames involved in the operation.
"""
def __init__(self, reason, verb, paths, tb=None):
self.paths = paths
super(FilesystemError, self).__init__(reason, verb, tb)
def get_message(self):
# Use a nicer English phrasing for some specific verbs.
if self.verb in ('move', 'copy', 'rename'):
clause = u'while {0} {1} to {2}'.format(
self._gerund(),
displayable_path(self.paths[0]),
displayable_path(self.paths[1])
)
elif self.verb in ('delete', 'write', 'create', 'read'):
clause = u'while {0} {1}'.format(
self._gerund(),
displayable_path(self.paths[0])
)
else:
clause = u'during {0} of paths {1}'.format(
self.verb, u', '.join(displayable_path(p) for p in self.paths)
)
return u'{0} {1}'.format(self._reasonstr(), clause)
def normpath(path):
"""Provide the canonical form of the path suitable for storing in
the database.
"""
path = syspath(path, prefix=False)
path = os.path.normpath(os.path.abspath(os.path.expanduser(path)))
return bytestring_path(path)
def ancestry(path):
"""Return a list consisting of path's parent directory, its
grandparent, and so on. For instance:
>>> ancestry('/a/b/c')
['/', '/a', '/a/b']
The argument should *not* be the result of a call to `syspath`.
"""
out = []
last_path = None
while path:
path = os.path.dirname(path)
if path == last_path:
break
last_path = path
if path:
# don't yield ''
out.insert(0, path)
return out
def sorted_walk(path, ignore=(), logger=None):
"""Like `os.walk`, but yields things in case-insensitive sorted,
breadth-first order. Directory and file names matching any glob
pattern in `ignore` are skipped. If `logger` is provided, then
warning messages are logged there when a directory cannot be listed.
"""
# Make sure the path isn't a Unicode string.
path = bytestring_path(path)
# Get all the directories and files at this level.
try:
contents = os.listdir(syspath(path))
except OSError as exc:
if logger:
logger.warn(u'could not list directory {0}: {1}'.format(
displayable_path(path), exc.strerror
))
return
dirs = []
files = []
for base in contents:
base = bytestring_path(base)
# Skip ignored filenames.
skip = False
for pat in ignore:
if fnmatch.fnmatch(base, pat):
skip = True
break
if skip:
continue
# Add to output as either a file or a directory.
cur = os.path.join(path, base)
if os.path.isdir(syspath(cur)):
dirs.append(base)
else:
files.append(base)
# Sort lists (case-insensitive) and yield the current level.
dirs.sort(key=bytes.lower)
files.sort(key=bytes.lower)
yield (path, dirs, files)
# Recurse into directories.
for base in dirs:
cur = os.path.join(path, base)
# yield from sorted_walk(...)
for res in sorted_walk(cur, ignore, logger):
yield res
def mkdirall(path):
"""Make all the enclosing directories of path (like mkdir -p on the
parent).
"""
for ancestor in ancestry(path):
if not os.path.isdir(syspath(ancestor)):
try:
os.mkdir(syspath(ancestor))
except (OSError, IOError) as exc:
raise FilesystemError(exc, 'create', (ancestor,),
traceback.format_exc())
def fnmatch_all(names, patterns):
"""Determine whether all strings in `names` match at least one of
the `patterns`, which should be shell glob expressions.
"""
for name in names:
matches = False
for pattern in patterns:
matches = fnmatch.fnmatch(name, pattern)
if matches:
break
if not matches:
return False
return True
def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')):
"""If path is an empty directory, then remove it. Recursively remove
path's ancestry up to root (which is never removed) where there are
empty directories. If path is not contained in root, then nothing is
removed. Glob patterns in clutter are ignored when determining
emptiness. If root is not provided, then only path may be removed
(i.e., no recursive removal).
"""
path = normpath(path)
if root is not None:
root = normpath(root)
ancestors = ancestry(path)
if root is None:
# Only remove the top directory.
ancestors = []
elif root in ancestors:
# Only remove directories below the root.
ancestors = ancestors[ancestors.index(root) + 1:]
else:
# Remove nothing.
return
# Traverse upward from path.
ancestors.append(path)
ancestors.reverse()
for directory in ancestors:
directory = syspath(directory)
if not os.path.exists(directory):
# Directory gone already.
continue
if fnmatch_all(os.listdir(directory), clutter):
# Directory contains only clutter (or nothing).
try:
shutil.rmtree(directory)
except OSError:
break
else:
break
def components(path):
"""Return a list of the path components in path. For instance:
>>> components('/a/b/c')
['a', 'b', 'c']
The argument should *not* be the result of a call to `syspath`.
"""
comps = []
ances = ancestry(path)
for anc in ances:
comp = os.path.basename(anc)
if comp:
comps.append(comp)
else: # root
comps.append(anc)
last = os.path.basename(path)
if last:
comps.append(last)
return comps
def _fsencoding():
"""Get the system's filesystem encoding. On Windows, this is always
UTF-8 (not MBCS).
"""
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
if encoding == b'mbcs':
# On Windows, a broken encoding known to Python as "MBCS" is
# used for the filesystem. However, we only use the Unicode API
# for Windows paths, so the encoding is actually immaterial so
# we can avoid dealing with this nastiness. We arbitrarily
# choose UTF-8.
encoding = b'utf8'
return encoding
def bytestring_path(path):
"""Given a path, which is either a bytes or a unicode, returns a str
path (ensuring that we never deal with Unicode pathnames).
"""
# Pass through bytestrings.
if isinstance(path, bytes):
return path
# On Windows, remove the magic prefix added by `syspath`. This makes
# ``bytestring_path(syspath(X)) == X``, i.e., we can safely
# round-trip through `syspath`.
if os.path.__name__ == b'ntpath' and path.startswith(WINDOWS_MAGIC_PREFIX):
path = path[len(WINDOWS_MAGIC_PREFIX):]
# Try to encode with default encodings, but fall back to UTF8.
try:
return path.encode(_fsencoding())
except (UnicodeError, LookupError):
return path.encode('utf8')
def displayable_path(path, separator=u'; '):
"""Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user. If the `path` argument is a
list or a tuple, the elements are joined with `separator`.
"""
if isinstance(path, (list, tuple)):
return separator.join(displayable_path(p) for p in path)
elif isinstance(path, unicode):
return path
elif not isinstance(path, bytes):
# A non-string object: just get its unicode representation.
return unicode(path)
try:
return path.decode(_fsencoding(), 'ignore')
except (UnicodeError, LookupError):
return path.decode('utf8', 'ignore')
def syspath(path, prefix=True):
"""Convert a path for use by the operating system. In particular,
paths on Windows must receive a magic prefix and must be converted
to Unicode before they are sent to the OS. To disable the magic
prefix on Windows, set `prefix` to False---but only do this if you
*really* know what you're doing.
"""
# Don't do anything if we're not on windows
if os.path.__name__ != b'ntpath':
return path
if not isinstance(path, unicode):
# Beets currently represents Windows paths internally with UTF-8
# arbitrarily. But earlier versions used MBCS because it is
# reported as the FS encoding by Windows. Try both.
try:
path = path.decode('utf8')
except UnicodeError:
# The encoding should always be MBCS, Windows' broken
# Unicode representation.
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
path = path.decode(encoding, 'replace')
# Add the magic prefix if it isn't already there.
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
if prefix and not path.startswith(WINDOWS_MAGIC_PREFIX):
if path.startswith(u'\\\\'):
# UNC path. Final path should look like \\?\UNC\...
path = u'UNC' + path[1:]
path = WINDOWS_MAGIC_PREFIX + path
return path
def samefile(p1, p2):
"""Safer equality for paths."""
return shutil._samefile(syspath(p1), syspath(p2))
def remove(path, soft=True):
"""Remove the file. If `soft`, then no error will be raised if the
file does not exist.
"""
path = syspath(path)
if soft and not os.path.exists(path):
return
try:
os.remove(path)
except (OSError, IOError) as exc:
raise FilesystemError(exc, 'delete', (path,), traceback.format_exc())
def copy(path, dest, replace=False):
"""Copy a plain file. Permissions are not copied. If `dest` already
exists, raises a FilesystemError unless `replace` is True. Has no
effect if `path` is the same as `dest`. Paths are translated to
system paths before the syscall.
"""
if samefile(path, dest):
return
path = syspath(path)
dest = syspath(dest)
if not replace and os.path.exists(dest):
raise FilesystemError('file exists', 'copy', (path, dest))
try:
shutil.copyfile(path, dest)
except (OSError, IOError) as exc:
raise FilesystemError(exc, 'copy', (path, dest),
traceback.format_exc())
def move(path, dest, replace=False):
"""Rename a file. `dest` may not be a directory. If `dest` already
exists, raises an OSError unless `replace` is True. Has no effect if
`path` is the same as `dest`. If the paths are on different
filesystems (or the rename otherwise fails), a copy is attempted
instead, in which case metadata will *not* be preserved. Paths are
translated to system paths.
"""
if samefile(path, dest):
return
path = syspath(path)
dest = syspath(dest)
if os.path.exists(dest) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest),
traceback.format_exc())
# First, try renaming the file.
try:
os.rename(path, dest)
except OSError:
# Otherwise, copy and delete the original.
try:
shutil.copyfile(path, dest)
os.remove(path)
except (OSError, IOError) as exc:
raise FilesystemError(exc, 'move', (path, dest),
traceback.format_exc())
def link(path, dest, replace=False):
"""Create a symbolic link from path to `dest`. Raises an OSError if
`dest` already exists, unless `replace` is True. Does nothing if
`path` == `dest`."""
if (samefile(path, dest)):
return
path = syspath(path)
dest = syspath(dest)
if os.path.exists(dest) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest),
traceback.format_exc())
try:
os.symlink(path, dest)
except OSError:
raise FilesystemError('Operating system does not support symbolic '
'links.', 'link', (path, dest),
traceback.format_exc())
def unique_path(path):
"""Returns a version of ``path`` that does not exist on the
filesystem. Specifically, if ``path` itself already exists, then
something unique is appended to the path.
"""
if not os.path.exists(syspath(path)):
return path
base, ext = os.path.splitext(path)
match = re.search(br'\.(\d)+$', base)
if match:
num = int(match.group(1))
base = base[:match.start()]
else:
num = 0
while True:
num += 1
new_path = b'%s.%i%s' % (base, num, ext)
if not os.path.exists(new_path):
return new_path
# Note: The Windows "reserved characters" are, of course, allowed on
# Unix. They are forbidden here because they cause problems on Samba
# shares, which are sufficiently common as to cause frequent problems.
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
CHAR_REPLACE = [
(re.compile(r'[\\/]'), u'_'), # / and \ -- forbidden everywhere.
(re.compile(r'^\.'), u'_'), # Leading dot (hidden files on Unix).
(re.compile(r'[\x00-\x1f]'), u''), # Control characters.
(re.compile(r'[<>:"\?\*\|]'), u'_'), # Windows "reserved characters".
(re.compile(r'\.$'), u'_'), # Trailing dots.
(re.compile(r'\s+$'), u''), # Trailing whitespace.
]
def sanitize_path(path, replacements=None):
"""Takes a path (as a Unicode string) and makes sure that it is
legal. Returns a new path. Only works with fragments; won't work
reliably on Windows when a path begins with a drive letter. Path
separators (including altsep!) should already be cleaned from the
path components. If replacements is specified, it is used *instead*
of the default set of replacements; it must be a list of (compiled
regex, replacement string) pairs.
"""
replacements = replacements or CHAR_REPLACE
comps = components(path)
if not comps:
return ''
for i, comp in enumerate(comps):
for regex, repl in replacements:
comp = regex.sub(repl, comp)
comps[i] = comp
return os.path.join(*comps)
def truncate_path(path, length=MAX_FILENAME_LENGTH):
"""Given a bytestring path or a Unicode path fragment, truncate the
components to a legal length. In the last component, the extension
is preserved.
"""
comps = components(path)
out = [c[:length] for c in comps]
base, ext = os.path.splitext(comps[-1])
if ext:
# Last component has an extension.
base = base[:length - len(ext)]
out[-1] = base + ext
return os.path.join(*out)
def _legalize_stage(path, replacements, length, extension, fragment):
"""Perform a single round of path legalization steps
(sanitation/replacement, encoding from Unicode to bytes,
extension-appending, and truncation). Return the path (Unicode if
`fragment` is set, `bytes` otherwise) and whether truncation was
required.
"""
# Perform an initial sanitization including user replacements.
path = sanitize_path(path, replacements)
# Encode for the filesystem.
if not fragment:
path = bytestring_path(path)
# Preserve extension.
path += extension.lower()
# Truncate too-long components.
pre_truncate_path = path
path = truncate_path(path, length)
return path, path != pre_truncate_path
def legalize_path(path, replacements, length, extension, fragment):
"""Given a path-like Unicode string, produce a legal path. Return
the path and a flag indicating whether some replacements had to be
ignored (see below).
The legalization process (see `_legalize_stage`) consists of
applying the sanitation rules in `replacements`, encoding the string
to bytes (unless `fragment` is set), truncating components to
`length`, appending the `extension`.
This function performs up to three calls to `_legalize_stage` in
case truncation conflicts with replacements (as can happen when
truncation creates whitespace at the end of the string, for
example). The limited number of iterations iterations avoids the
possibility of an infinite loop of sanitation and truncation
operations, which could be caused by replacement rules that make the
string longer. The flag returned from this function indicates that
the path has to be truncated twice (indicating that replacements
made the string longer again after it was truncated); the
application should probably log some sort of warning.
"""
if fragment:
# Outputting Unicode.
extension = extension.decode('utf8', 'ignore')
first_stage_path, _ = _legalize_stage(
path, replacements, length, extension, fragment
)
# Convert back to Unicode with extension removed.
first_stage_path, _ = os.path.splitext(displayable_path(first_stage_path))
# Re-sanitize following truncation (including user replacements).
second_stage_path, retruncated = _legalize_stage(
first_stage_path, replacements, length, extension, fragment
)
# If the path was once again truncated, discard user replacements
# and run through one last legalization stage.
if retruncated:
second_stage_path, _ = _legalize_stage(
first_stage_path, None, length, extension, fragment
)
return second_stage_path, retruncated
def str2bool(value):
"""Returns a boolean reflecting a human-entered string."""
return value.lower() in ('yes', '1', 'true', 't', 'y')
def as_string(value):
"""Convert a value to a Unicode object for matching with a query.
None becomes the empty string. Bytestrings are silently decoded.
"""
if value is None:
return u''
elif isinstance(value, buffer):
return bytes(value).decode('utf8', 'ignore')
elif isinstance(value, bytes):
return value.decode('utf8', 'ignore')
else:
return unicode(value)
def plurality(objs):
"""Given a sequence of hashble objects, returns the object that
is most common in the set and the its number of appearance. The
sequence must contain at least one object.
"""
c = Counter(objs)
if not c:
raise ValueError('sequence must be non-empty')
return c.most_common(1)[0]
def cpu_count():
"""Return the number of hardware thread contexts (cores or SMT
threads) in the system.
"""
# Adapted from the soundconverter project:
# https://github.com/kassoulet/soundconverter
if sys.platform == b'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif sys.platform == b'darwin':
try:
num = int(command_output([b'/usr/sbin/sysctl', b'-n', b'hw.ncpu']))
except (ValueError, OSError, subprocess.CalledProcessError):
num = 0
else:
try:
num = os.sysconf(b'SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
return 1
def command_output(cmd, shell=False):
"""Runs the command and returns its output after it has exited.
``cmd`` is a list of byte string arguments starting with the command names.
If ``shell`` is true, ``cmd`` is assumed to be a string and passed to a
shell to execute.
If the process exits with a non-zero return code
``subprocess.CalledProcessError`` is raised. May also raise
``OSError``.
This replaces `subprocess.check_output` which can have problems if lots of
output is sent to stderr.
"""
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=platform.system() != b'Windows',
shell=shell
)
stdout, stderr = proc.communicate()
if proc.returncode:
raise subprocess.CalledProcessError(
returncode=proc.returncode,
cmd=b' '.join(cmd),
output=stdout + stderr,
)
return stdout
def max_filename_length(path, limit=MAX_FILENAME_LENGTH):
"""Attempt to determine the maximum filename length for the
filesystem containing `path`. If the value is greater than `limit`,
then `limit` is used instead (to prevent errors when a filesystem
misreports its capacity). If it cannot be determined (e.g., on
Windows), return `limit`.
"""
if hasattr(os, b'statvfs'):
try:
res = os.statvfs(path)
except OSError:
return limit
return min(res[9], limit)
else:
return limit
def open_anything():
"""Return the system command that dispatches execution to the correct
program.
"""
sys_name = platform.system()
if sys_name == 'Darwin':
base_cmd = 'open'
elif sys_name == 'Windows':
base_cmd = 'start'
else: # Assume Unix
base_cmd = 'xdg-open'
return base_cmd
def editor_command():
"""Get a command for opening a text file.
Use the `EDITOR` environment variable by default. If it is not
present, fall back to `open_anything()`, the platform-specific tool
for opening files in general.
"""
editor = os.environ.get('EDITOR')
if editor:
return editor
return open_anything()
def shlex_split(s):
"""Split a Unicode or bytes string according to shell lexing rules.
Raise `ValueError` if the string is not a well-formed shell string.
This is a workaround for a bug in some versions of Python.
"""
if isinstance(s, bytes):
# Shlex works fine.
return shlex.split(s)
elif isinstance(s, unicode):
# Work around a Python bug.
# http://bugs.python.org/issue6988
bs = s.encode('utf8')
return [c.decode('utf8') for c in shlex.split(bs)]
else:
raise TypeError('shlex_split called with non-string')
def interactive_open(targets, command):
"""Open the files in `targets` by `exec`ing a new `command`, given
as a Unicode string. (The new program takes over, and Python
execution ends: this does not fork a subprocess.)
Can raise `OSError`.
"""
assert command
# Split the command string into its arguments.
try:
args = shlex_split(command)
except ValueError: # Malformed shell tokens.
args = [command]
args.insert(0, args[0]) # for argv[0]
args += targets
return os.execlp(*args)
def _windows_long_path_name(short_path):
"""Use Windows' `GetLongPathNameW` via ctypes to get the canonical,
long path given a short filename.
"""
if not isinstance(short_path, unicode):
short_path = unicode(short_path)
import ctypes
buf = ctypes.create_unicode_buffer(260)
get_long_path_name_w = ctypes.windll.kernel32.GetLongPathNameW
return_value = get_long_path_name_w(short_path, buf, 260)
if return_value == 0 or return_value > 260:
# An error occurred
return short_path
else:
long_path = buf.value
# GetLongPathNameW does not change the case of the drive
# letter.
if len(long_path) > 1 and long_path[1] == ':':
long_path = long_path[0].upper() + long_path[1:]
return long_path
def case_sensitive(path):
"""Check whether the filesystem at the given path is case sensitive.
To work best, the path should point to a file or a directory. If the path
does not exist, assume a case sensitive file system on every platform
except Windows.
"""
# A fallback in case the path does not exist.
if not os.path.exists(syspath(path)):
# By default, the case sensitivity depends on the platform.
return platform.system() != 'Windows'
# If an upper-case version of the path exists but a lower-case
# version does not, then the filesystem must be case-sensitive.
# (Otherwise, we have more work to do.)
if not (os.path.exists(syspath(path.lower())) and
os.path.exists(syspath(path.upper()))):
return True
# Both versions of the path exist on the file system. Check whether
# they refer to different files by their inodes. Alas,
# `os.path.samefile` is only available on Unix systems on Python 2.
if platform.system() != 'Windows':
return not os.path.samefile(syspath(path.lower()),
syspath(path.upper()))
# On Windows, we check whether the canonical, long filenames for the
# files are the same.
lower = _windows_long_path_name(path.lower())
upper = _windows_long_path_name(path.upper())
return lower != upper
def raw_seconds_short(string):
"""Formats a human-readable M:SS string as a float (number of seconds).
Raises ValueError if the conversion cannot take place due to `string` not
being in the right format.
"""
match = re.match('^(\d+):([0-5]\d)$', string)
if not match:
raise ValueError('String not in M:SS format')
minutes, seconds = map(int, match.groups())
return float(minutes * 60 + seconds)
| |
import warnings
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.db.models.indexes import Index
from django.utils.deprecation import RemovedInDjango21Warning
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type codes to Django Field types.
data_types_reverse = {
16: 'BooleanField',
17: 'BinaryField',
20: 'BigIntegerField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
700: 'FloatField',
701: 'FloatField',
869: 'GenericIPAddressField',
1042: 'CharField', # blank-padded
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'DecimalField',
2950: 'UUIDField',
}
ignored_tables = []
_get_indexes_query = """
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND attr.attnum = idx.indkey[0]
AND c.relname = %s"""
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if description.default and 'nextval' in description.default:
if field_type == 'IntegerField':
return 'AutoField'
elif field_type == 'BigIntegerField':
return 'BigAutoField'
return field_type
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
cursor.execute("""
SELECT c.relname, c.relkind
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [TableInfo(row[0], {'r': 't', 'v': 'v'}.get(row[1]))
for row in cursor.fetchall()
if row[0] not in self.ignored_tables]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
SELECT column_name, is_nullable, column_default
FROM information_schema.columns
WHERE table_name = %s""", [table_name])
field_map = {line[0]: line[1:] for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [
FieldInfo(*(line[0:6] + (field_map[line.name][0] == 'YES', field_map[line.name][1])))
for line in cursor.description
]
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
cursor.execute("""
SELECT c2.relname, a1.attname, a2.attname
FROM pg_constraint con
LEFT JOIN pg_class c1 ON con.conrelid = c1.oid
LEFT JOIN pg_class c2 ON con.confrelid = c2.oid
LEFT JOIN pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1]
LEFT JOIN pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1]
WHERE c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
relations[row[1]] = (row[2], row[0])
return relations
def get_key_columns(self, cursor, table_name):
key_columns = []
cursor.execute("""
SELECT kcu.column_name, ccu.table_name AS referenced_table, ccu.column_name AS referenced_column
FROM information_schema.constraint_column_usage ccu
LEFT JOIN information_schema.key_column_usage kcu
ON ccu.constraint_catalog = kcu.constraint_catalog
AND ccu.constraint_schema = kcu.constraint_schema
AND ccu.constraint_name = kcu.constraint_name
LEFT JOIN information_schema.table_constraints tc
ON ccu.constraint_catalog = tc.constraint_catalog
AND ccu.constraint_schema = tc.constraint_schema
AND ccu.constraint_name = tc.constraint_name
WHERE kcu.table_name = %s AND tc.constraint_type = 'FOREIGN KEY'""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
warnings.warn(
"get_indexes() is deprecated in favor of get_constraints().",
RemovedInDjango21Warning, stacklevel=2
)
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute(self._get_indexes_query, [table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
if row[0] not in indexes:
indexes[row[0]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[3]:
indexes[row[0]]['primary_key'] = True
if row[2]:
indexes[row[0]]['unique'] = True
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns. Also retrieve the definition of expression-based
indexes.
"""
constraints = {}
# Loop over the key table, collecting things as constraints. The column
# array must return column names in the same order in which they were
# created.
# The subquery containing generate_series can be replaced with
# "WITH ORDINALITY" when support for PostgreSQL 9.3 is dropped.
cursor.execute("""
SELECT
c.conname,
array(
SELECT attname
FROM (
SELECT unnest(c.conkey) AS colid,
generate_series(1, array_length(c.conkey, 1)) AS arridx
) AS cols
JOIN pg_attribute AS ca ON cols.colid = ca.attnum
WHERE ca.attrelid = c.conrelid
ORDER BY cols.arridx
),
c.contype,
(SELECT fkc.relname || '.' || fka.attname
FROM pg_attribute AS fka
JOIN pg_class AS fkc ON fka.attrelid = fkc.oid
WHERE fka.attrelid = c.confrelid AND fka.attnum = c.confkey[1]),
cl.reloptions
FROM pg_constraint AS c
JOIN pg_class AS cl ON c.conrelid = cl.oid
JOIN pg_namespace AS ns ON cl.relnamespace = ns.oid
WHERE ns.nspname = %s AND cl.relname = %s
""", ["public", table_name])
for constraint, columns, kind, used_cols, options in cursor.fetchall():
constraints[constraint] = {
"columns": columns,
"primary_key": kind == "p",
"unique": kind in ["p", "u"],
"foreign_key": tuple(used_cols.split(".", 1)) if kind == "f" else None,
"check": kind == "c",
"index": False,
"definition": None,
"options": options,
}
# Now get indexes
# The row_number() function for ordering the index fields can be
# replaced by WITH ORDINALITY in the unnest() functions when support
# for PostgreSQL 9.3 is dropped.
cursor.execute("""
SELECT
indexname, array_agg(attname ORDER BY rnum), indisunique, indisprimary,
array_agg(ordering ORDER BY rnum), amname, exprdef, s2.attoptions
FROM (
SELECT
row_number() OVER () as rnum, c2.relname as indexname,
idx.*, attr.attname, am.amname,
CASE
WHEN idx.indexprs IS NOT NULL THEN
pg_get_indexdef(idx.indexrelid)
END AS exprdef,
CASE am.amname
WHEN 'btree' THEN
CASE (option & 1)
WHEN 1 THEN 'DESC' ELSE 'ASC'
END
END as ordering,
c2.reloptions as attoptions
FROM (
SELECT
*, unnest(i.indkey) as key, unnest(i.indoption) as option
FROM pg_index i
) idx
LEFT JOIN pg_class c ON idx.indrelid = c.oid
LEFT JOIN pg_class c2 ON idx.indexrelid = c2.oid
LEFT JOIN pg_am am ON c2.relam = am.oid
LEFT JOIN pg_attribute attr ON attr.attrelid = c.oid AND attr.attnum = idx.key
WHERE c.relname = %s
) s2
GROUP BY indexname, indisunique, indisprimary, amname, exprdef, attoptions;
""", [table_name])
for index, columns, unique, primary, orders, type_, definition, options in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": columns if columns != [None] else [],
"orders": orders if orders != [None] else [],
"primary_key": primary,
"unique": unique,
"foreign_key": None,
"check": False,
"index": True,
"type": Index.suffix if type_ == 'btree' else type_,
"definition": definition,
"options": options,
}
return constraints
| |
import datetime
from copy import deepcopy
from django.core.exceptions import FieldError, MultipleObjectsReturned
from django.db import models, transaction
from django.test import TestCase
from django.utils import six
from django.utils.translation import ugettext_lazy
from .models import (
Article, Category, Child, First, Parent, Record, Relation, Reporter,
School, Student, Third, ToFieldChild,
)
class ManyToOneTests(TestCase):
def setUp(self):
# Create a few Reporters.
self.r = Reporter(first_name='John', last_name='Smith', email='john@example.com')
self.r.save()
self.r2 = Reporter(first_name='Paul', last_name='Jones', email='paul@example.com')
self.r2.save()
# Create an Article.
self.a = Article(id=None, headline="This is a test",
pub_date=datetime.date(2005, 7, 27), reporter=self.r)
self.a.save()
def test_get(self):
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
# These are strings instead of unicode strings because that's what was used in
# the creation of this reporter (and we haven't refreshed the data from the
# database, which always returns unicode strings).
self.assertEqual((r.first_name, self.r.last_name), ('John', 'Smith'))
def test_create(self):
# You can also instantiate an Article by passing the Reporter's ID
# instead of a Reporter object.
a3 = Article(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
a3.save()
self.assertEqual(a3.reporter.id, self.r.id)
# Similarly, the reporter ID can be a string.
a4 = Article(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
a4.save()
self.assertEqual(repr(a4.reporter), "<Reporter: John Smith>")
def test_add(self):
# Create an Article via the Reporter object.
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.assertEqual(repr(new_article), "<Article: John's second story>")
self.assertEqual(new_article.reporter.id, self.r.id)
# Create a new article, and add it to the article set.
new_article2 = Article(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
msg = "<Article: Paul's story> instance isn't saved. Use bulk=False or save the object first."
with self.assertRaisesMessage(ValueError, msg):
self.r.article_set.add(new_article2)
self.r.article_set.add(new_article2, bulk=False)
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# Add the same article to a different article set - check that it moves.
self.r2.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r2.id)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Adding an object of the wrong type raises TypeError.
with transaction.atomic():
with six.assertRaisesRegex(self, TypeError,
"'Article' instance expected, got <Reporter.*"):
self.r.article_set.add(self.r2)
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_set(self):
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again.
self.r2.article_set.set([new_article, new_article2])
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(self.r2.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
])
# Funny case - because the ForeignKey cannot be null,
# existing members of the set must remain.
self.r.article_set.set([new_article])
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
def test_assign(self):
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter directly using the descriptor.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again using set descriptor.
self.r2.article_set = [new_article, new_article2]
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(self.r2.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
])
# Funny case - assignment notation can only go so far; because the
# ForeignKey cannot be null, existing members of the set must remain.
self.r.article_set = [new_article]
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Reporter cannot be null - there should not be a clear or remove method
self.assertFalse(hasattr(self.r2.article_set, 'remove'))
self.assertFalse(hasattr(self.r2.article_set, 'clear'))
def test_selects(self):
self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='This'),
["<Article: This is a test>"])
self.assertEqual(self.r.article_set.count(), 2)
self.assertEqual(self.r2.article_set.count(), 1)
# Get articles by id
self.assertQuerysetEqual(Article.objects.filter(id__exact=self.a.id),
["<Article: This is a test>"])
self.assertQuerysetEqual(Article.objects.filter(pk=self.a.id),
["<Article: This is a test>"])
# Query on an article property
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='This'),
["<Article: This is a test>"])
# The API automatically follows relationships as far as you need.
# Use double underscores to separate relationships.
# This works as many levels deep as you want. There's no limit.
# Find all Articles for any Reporter whose first name is "John".
self.assertQuerysetEqual(Article.objects.filter(reporter__first_name__exact='John'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Check that implied __exact also works
self.assertQuerysetEqual(Article.objects.filter(reporter__first_name='John'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Query twice over the related field.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John',
reporter__last_name__exact='Smith'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# The underlying query only makes one join when a related table is referenced twice.
queryset = Article.objects.filter(reporter__first_name__exact='John',
reporter__last_name__exact='Smith')
self.assertNumQueries(1, list, queryset)
self.assertEqual(queryset.query.get_compiler(queryset.db).as_sql()[0].count('INNER JOIN'), 1)
# The automatically joined table has a predictable name.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John').extra(
where=["many_to_one_reporter.last_name='Smith'"]),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# ... and should work fine with the unicode that comes out of forms.Form.cleaned_data
self.assertQuerysetEqual(
(Article.objects
.filter(reporter__first_name__exact='John')
.extra(where=["many_to_one_reporter.last_name='%s'" % 'Smith'])),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Find all Articles for a Reporter.
# Use direct ID check, pk check, and object comparison
self.assertQuerysetEqual(
Article.objects.filter(reporter__id__exact=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__pk=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r.id, self.r2.id]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r, self.r2]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# You can also use a queryset instead of a literal list of instances.
# The queryset must be reduced to a list of values using values(),
# then converted into a query
self.assertQuerysetEqual(
Article.objects.filter(
reporter__in=Reporter.objects.filter(first_name='John').values('pk').query
).distinct(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_reverse_selects(self):
a3 = Article.objects.create(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
Article.objects.create(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
# Reporters can be queried
self.assertQuerysetEqual(Reporter.objects.filter(id__exact=self.r.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(pk=self.r.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(first_name__startswith='John'),
["<Reporter: John Smith>"])
# Reporters can query in opposite direction of ForeignKey definition
self.assertQuerysetEqual(Reporter.objects.filter(article__id__exact=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article__pk=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a.id, a3.id]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a.id, a3]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a, a3]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T'),
["<Reporter: John Smith>", "<Reporter: John Smith>"],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T').distinct(),
["<Reporter: John Smith>"])
# Counting in the opposite direction works in conjunction with distinct()
self.assertEqual(
Reporter.objects.filter(article__headline__startswith='T').count(), 2)
self.assertEqual(
Reporter.objects.filter(article__headline__startswith='T').distinct().count(), 1)
# Queries can go round in circles.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John'),
[
"<Reporter: John Smith>",
"<Reporter: John Smith>",
"<Reporter: John Smith>",
],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John').distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__exact=self.r).distinct(),
["<Reporter: John Smith>"])
# Check that implied __exact also works.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter=self.r).distinct(),
["<Reporter: John Smith>"])
# It's possible to use values() calls across many-to-one relations.
# (Note, too, that we clear the ordering here so as not to drag the
# 'headline' field into the columns being used to determine uniqueness)
d = {'reporter__first_name': 'John', 'reporter__last_name': 'Smith'}
self.assertEqual([d],
list(Article.objects.filter(reporter=self.r).distinct().order_by()
.values('reporter__first_name', 'reporter__last_name')))
def test_select_related(self):
# Check that Article.objects.select_related().dates() works properly when
# there are multiple Articles with the same date but different foreign-key
# objects (Reporters).
r1 = Reporter.objects.create(first_name='Mike', last_name='Royko', email='royko@suntimes.com')
r2 = Reporter.objects.create(first_name='John', last_name='Kass', email='jkass@tribune.com')
Article.objects.create(headline='First', pub_date=datetime.date(1980, 4, 23), reporter=r1)
Article.objects.create(headline='Second', pub_date=datetime.date(1980, 4, 23), reporter=r2)
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'day')),
[
datetime.date(1980, 4, 23),
datetime.date(2005, 7, 27),
])
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'month')),
[
datetime.date(1980, 4, 1),
datetime.date(2005, 7, 1),
])
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'year')),
[
datetime.date(1980, 1, 1),
datetime.date(2005, 1, 1),
])
def test_delete(self):
self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
Article.objects.create(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
Article.objects.create(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
# If you delete a reporter, his articles will be deleted.
self.assertQuerysetEqual(Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: Third article>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'),
[
"<Reporter: John Smith>",
"<Reporter: Paul Jones>",
])
self.r2.delete()
self.assertQuerysetEqual(Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Third article>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'),
["<Reporter: John Smith>"])
# You can delete using a JOIN in the query.
Reporter.objects.filter(article__headline__startswith='This').delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertQuerysetEqual(Article.objects.all(), [])
def test_explicit_fk(self):
# Create a new Article with get_or_create using an explicit value
# for a ForeignKey.
a2, created = Article.objects.get_or_create(id=None,
headline="John's second test",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r.id)
self.assertTrue(created)
self.assertEqual(a2.reporter.id, self.r.id)
# You can specify filters containing the explicit FK value.
self.assertQuerysetEqual(
Article.objects.filter(reporter_id__exact=self.r.id),
[
"<Article: John's second test>",
"<Article: This is a test>",
])
# Create an Article by Paul for the same date.
a3 = Article.objects.create(id=None, headline="Paul's commentary",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r2.id)
self.assertEqual(a3.reporter.id, self.r2.id)
# Get should respect explicit foreign keys as well.
self.assertRaises(MultipleObjectsReturned,
Article.objects.get, reporter_id=self.r.id)
self.assertEqual(repr(a3),
repr(Article.objects.get(reporter_id=self.r2.id,
pub_date=datetime.date(2011, 5, 7))))
def test_deepcopy_and_circular_references(self):
# Regression for #12876 -- Model methods that include queries that
# recursive don't cause recursion depth problems under deepcopy.
self.r.cached_query = Article.objects.filter(reporter=self.r)
self.assertEqual(repr(deepcopy(self.r)), "<Reporter: John Smith>")
def test_manager_class_caching(self):
r1 = Reporter.objects.create(first_name='Mike')
r2 = Reporter.objects.create(first_name='John')
# Same twice
self.assertIs(r1.article_set.__class__, r1.article_set.__class__)
# Same as each other
self.assertIs(r1.article_set.__class__, r2.article_set.__class__)
def test_create_relation_with_ugettext_lazy(self):
reporter = Reporter.objects.create(first_name='John',
last_name='Smith',
email='john.smith@example.com')
lazy = ugettext_lazy('test')
reporter.article_set.create(headline=lazy,
pub_date=datetime.date(2011, 6, 10))
notlazy = six.text_type(lazy)
article = reporter.article_set.get()
self.assertEqual(article.headline, notlazy)
def test_values_list_exception(self):
expected_message = "Cannot resolve keyword 'notafield' into field. Choices are: %s"
self.assertRaisesMessage(FieldError,
expected_message % ', '.join(sorted(f.name for f in Reporter._meta.get_fields())),
Article.objects.values_list,
'reporter__notafield')
self.assertRaisesMessage(
FieldError,
expected_message % ', '.join(['EXTRA'] + sorted(f.name for f in Article._meta.get_fields())),
Article.objects.extra(select={'EXTRA': 'EXTRA_SELECT'}).values_list,
'notafield'
)
def test_fk_assignment_and_related_object_cache(self):
# Tests of ForeignKey assignment and the related-object cache (see #6886).
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
# Look up the object again so that we get a "fresh" object.
c = Child.objects.get(name="Child")
p = c.parent
# Accessing the related object again returns the exactly same object.
self.assertIs(c.parent, p)
# But if we kill the cache, we get a new object.
del c._parent_cache
self.assertIsNot(c.parent, p)
# Assigning a new object results in that object getting cached immediately.
p2 = Parent.objects.create(name="Parent 2")
c.parent = p2
self.assertIs(c.parent, p2)
# Assigning None succeeds if field is null=True.
p.bestchild = None
self.assertIsNone(p.bestchild)
# bestchild should still be None after saving.
p.save()
self.assertIsNone(p.bestchild)
# bestchild should still be None after fetching the object again.
p = Parent.objects.get(name="Parent")
self.assertIsNone(p.bestchild)
# Assigning None fails: Child.parent is null=False.
self.assertRaises(ValueError, setattr, c, "parent", None)
# You also can't assign an object of the wrong type here
self.assertRaises(ValueError, setattr, c, "parent", First(id=1, second=1))
# Nor can you explicitly assign None to Child.parent during object
# creation (regression for #9649).
self.assertRaises(ValueError, Child, name='xyzzy', parent=None)
self.assertRaises(ValueError, Child.objects.create, name='xyzzy', parent=None)
# Creation using keyword argument should cache the related object.
p = Parent.objects.get(name="Parent")
c = Child(parent=p)
self.assertIs(c.parent, p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Parent()
msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
with self.assertRaisesMessage(ValueError, msg):
Child.objects.create(parent=p)
msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
with self.assertRaisesMessage(ValueError, msg):
ToFieldChild.objects.create(parent=p)
# Creation using attname keyword argument and an id will cause the
# related object to be fetched.
p = Parent.objects.get(name="Parent")
c = Child(parent_id=p.id)
self.assertIsNot(c.parent, p)
self.assertEqual(c.parent, p)
def test_multiple_foreignkeys(self):
# Test of multiple ForeignKeys to the same model (bug #7125).
c1 = Category.objects.create(name='First')
c2 = Category.objects.create(name='Second')
c3 = Category.objects.create(name='Third')
r1 = Record.objects.create(category=c1)
r2 = Record.objects.create(category=c1)
r3 = Record.objects.create(category=c2)
r4 = Record.objects.create(category=c2)
r5 = Record.objects.create(category=c3)
Relation.objects.create(left=r1, right=r2)
Relation.objects.create(left=r3, right=r4)
Relation.objects.create(left=r1, right=r3)
Relation.objects.create(left=r5, right=r2)
Relation.objects.create(left=r3, right=r2)
q1 = Relation.objects.filter(left__category__name__in=['First'], right__category__name__in=['Second'])
self.assertQuerysetEqual(q1, ["<Relation: First - Second>"])
q2 = Category.objects.filter(record__left_set__right__category__name='Second').order_by('name')
self.assertQuerysetEqual(q2, ["<Category: First>", "<Category: Second>"])
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
self.assertRaises(ValueError, Child.objects.create, name="Grandchild", parent=c)
def test_fk_instantiation_outside_model(self):
# Regression for #12190 -- Should be able to instantiate a FK outside
# of a model, and interrogate its related field.
cat = models.ForeignKey(Category, models.CASCADE)
self.assertEqual('id', cat.remote_field.get_related_field().name)
def test_relation_unsaved(self):
# Test that the <field>_set manager does not join on Null value fields (#17541)
Third.objects.create(name='Third 1')
Third.objects.create(name='Third 2')
th = Third(name="testing")
# The object isn't saved an thus the relation field is null - we won't even
# execute a query in this case.
with self.assertNumQueries(0):
self.assertEqual(th.child_set.count(), 0)
th.save()
# Now the model is saved, so we will need to execute an query.
with self.assertNumQueries(1):
self.assertEqual(th.child_set.count(), 0)
def test_related_object(self):
public_school = School.objects.create(is_public=True)
public_student = Student.objects.create(school=public_school)
private_school = School.objects.create(is_public=False)
private_student = Student.objects.create(school=private_school)
# Only one school is available via all() due to the custom default manager.
self.assertQuerysetEqual(
School.objects.all(),
["<School: School object>"]
)
self.assertEqual(public_student.school, public_school)
# Make sure the base manager is used so that an student can still access
# its related school even if the default manager doesn't normally
# allow it.
self.assertEqual(private_student.school, private_school)
# If the manager is marked "use_for_related_fields", it'll get used instead
# of the "bare" queryset. Usually you'd define this as a property on the class,
# but this approximates that in a way that's easier in tests.
School.objects.use_for_related_fields = True
try:
private_student = Student.objects.get(pk=private_student.pk)
self.assertRaises(School.DoesNotExist, lambda: private_student.school)
finally:
School.objects.use_for_related_fields = False
def test_hasattr_related_object(self):
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(Article(), 'reporter'))
| |
"""
Megnet graph layer implementation
"""
import tensorflow as tf
import tensorflow.keras.backend as kb
from megnet.layers.graph import GraphNetworkLayer
from megnet.utils.layer import repeat_with_index
__author__ = "Chi Chen"
__copyright__ = "Copyright 2018, Materials Virtual Lab "
__version__ = "0.1"
__date__ = "Dec 1, 2018"
class MEGNetLayer(GraphNetworkLayer):
"""
The MEGNet graph implementation as described in the paper
Chen, Chi; Ye, Weike Ye; Zuo, Yunxing; Zheng, Chen; Ong, Shyue Ping.
Graph Networks as a Universal Machine Learning Framework for Molecules and Crystals,
2018, arXiv preprint. [arXiv:1812.05055](https://arxiv.org/abs/1812.05055)
Methods:
call(inputs, mask=None): the logic of the layer, returns the final graph
compute_output_shape(input_shape): compute static output shapes, returns list of tuple shapes
build(input_shape): initialize the weights and biases for each function
phi_e(inputs): update function for bonds and returns updated bond attribute e_p
rho_e_v(e_p, inputs): aggregate updated bonds e_p to per atom attributes, b_e_p
phi_v(b_e_p, inputs): update the atom attributes by the results from previous step b_e_p and all the inputs
returns v_p.
rho_e_u(e_p, inputs): aggregate bonds to global attribute
rho_v_u(v_p, inputs): aggregate atom to global attributes
get_config(): part of keras interface for serialization
"""
def __init__(
self,
units_v,
units_e,
units_u,
pool_method="mean",
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
"""
Args:
units_v (list of integers): the hidden layer sizes for node update neural network
units_e (list of integers): the hidden layer sizes for edge update neural network
units_u (list of integers): the hidden layer sizes for state update neural network
pool_method (str): 'mean' or 'sum', determines how information is gathered to nodes from neighboring edges
activation (str): Default: None. The activation function used for each sub-neural network. Examples include
'relu', 'softmax', 'tanh', 'sigmoid' and etc.
use_bias (bool): Default: True. Whether to use the bias term in the neural network.
kernel_initializer (str): Default: 'glorot_uniform'. Initialization function for the layer kernel weights,
bias_initializer (str): Default: 'zeros'
activity_regularizer (str): Default: None. The regularization function for the output
kernel_constraint (str): Default: None. Keras constraint for kernel values
bias_constraint (str): Default: None .Keras constraint for bias values
"""
super().__init__(
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.units_v = units_v
self.units_e = units_e
self.units_u = units_u
self.pool_method = pool_method
if pool_method == "mean":
self.reduce_method = tf.reduce_mean
self.unsorted_seg_method = tf.math.unsorted_segment_mean
self.seg_method = tf.math.segment_mean
elif pool_method == "sum":
self.reduce_method = tf.reduce_sum
self.seg_method = tf.math.segment_sum
self.unsorted_seg_method = tf.math.unsorted_segment_sum
else:
raise ValueError("Pool method: " + pool_method + " not understood!")
def build(self, input_shapes):
"""
Build the weights for the layer
Args:
input_shapes (sequence of tuple): the shapes of all input tensors
"""
vdim = input_shapes[0][2]
edim = input_shapes[1][2]
udim = input_shapes[2][2]
with kb.name_scope(self.name):
with kb.name_scope("phi_v"):
v_shapes = [self.units_e[-1] + vdim + udim] + self.units_v
v_shapes = list(zip(v_shapes[:-1], v_shapes[1:]))
self.phi_v_weights = [
self.add_weight(
shape=i,
initializer=self.kernel_initializer,
name=f"weight_v_{j}",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
for j, i in enumerate(v_shapes)
]
if self.use_bias:
self.phi_v_biases = [
self.add_weight(
shape=(i[-1],),
initializer=self.bias_initializer,
name=f"bias_v_{j}",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
for j, i in enumerate(v_shapes)
]
else:
self.phi_v_biases = None
with kb.name_scope("phi_e"):
e_shapes = [2 * vdim + edim + udim] + self.units_e
e_shapes = list(zip(e_shapes[:-1], e_shapes[1:]))
self.phi_e_weights = [
self.add_weight(
shape=i,
initializer=self.kernel_initializer,
name=f"weight_e_{j}",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
for j, i in enumerate(e_shapes)
]
if self.use_bias:
self.phi_e_biases = [
self.add_weight(
shape=(i[-1],),
initializer=self.bias_initializer,
name=f"bias_e_{j}",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
for j, i in enumerate(e_shapes)
]
else:
self.phi_e_biases = None
with kb.name_scope("phi_u"):
u_shapes = [self.units_e[-1] + self.units_v[-1] + udim] + self.units_u
u_shapes = list(zip(u_shapes[:-1], u_shapes[1:]))
self.phi_u_weights = [
self.add_weight(
shape=i,
initializer=self.kernel_initializer,
name=f"weight_u_{j}",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
for j, i in enumerate(u_shapes)
]
if self.use_bias:
self.phi_u_biases = [
self.add_weight(
shape=(i[-1],),
initializer=self.bias_initializer,
name=f"bias_u_{j}",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
for j, i in enumerate(u_shapes)
]
else:
self.phi_u_biases = None
self.built = True
def compute_output_shape(self, input_shape):
"""
Compute output shapes from input shapes
Args:
input_shape (sequence of tuple): input shapes
Returns: sequence of tuples output shapes
"""
node_feature_shape = input_shape[0]
edge_feature_shape = input_shape[1]
state_feature_shape = input_shape[2]
output_shape = [
(node_feature_shape[0], node_feature_shape[1], self.units_v[-1]),
(edge_feature_shape[0], edge_feature_shape[1], self.units_e[-1]),
(state_feature_shape[0], state_feature_shape[1], self.units_u[-1]),
]
return output_shape
def phi_e(self, inputs):
"""
Edge update function
Args:
inputs (tuple of tensor)
Returns:
output tensor
"""
nodes, edges, u, index1, index2, gnode, gbond = inputs
index1 = tf.reshape(index1, (-1,))
index2 = tf.reshape(index2, (-1,))
fs = tf.gather(nodes, index1, axis=1)
fr = tf.gather(nodes, index2, axis=1)
concate_node = tf.concat([fs, fr], -1)
u_expand = repeat_with_index(u, gbond, axis=1)
concated = tf.concat([concate_node, edges, u_expand], -1)
return self._mlp(concated, self.phi_e_weights, self.phi_e_biases)
def rho_e_v(self, e_p, inputs):
"""
Reduce edge attributes to node attribute, eqn 5 in the paper
Args:
e_p: updated bond
inputs: the whole input list
Returns: summed tensor
"""
node, edges, u, index1, index2, gnode, gbond = inputs
index1 = tf.reshape(index1, (-1,))
return tf.expand_dims(self.unsorted_seg_method(tf.squeeze(e_p), index1, num_segments=tf.shape(node)[1]), axis=0)
def phi_v(self, b_ei_p, inputs):
"""
Node update function
Args:
b_ei_p (tensor): edge aggregated tensor
inputs (tuple of tensors): other graph inputs
Returns: updated node tensor
"""
nodes, edges, u, index1, index2, gnode, gbond = inputs
u_expand = repeat_with_index(u, gnode, axis=1)
concated = tf.concat([b_ei_p, nodes, u_expand], -1)
return self._mlp(concated, self.phi_v_weights, self.phi_v_biases)
def rho_e_u(self, e_p, inputs):
"""
aggregate edge to state
Args:
e_p (tensor): edge tensor
inputs (tuple of tensors): other graph input tensors
Returns: edge aggregated tensor for states
"""
nodes, edges, u, index1, index2, gnode, gbond = inputs
gbond = tf.reshape(gbond, (-1,))
return tf.expand_dims(self.seg_method(tf.squeeze(e_p), gbond), axis=0)
def rho_v_u(self, v_p, inputs):
"""
Args:
v_p (tf.Tensor): updated atom/node attributes
inputs (Sequence): list or tuple for the graph inputs
Returns:
atom/node to global/state aggregated tensor
"""
nodes, edges, u, index1, index2, gnode, gbond = inputs
gnode = tf.reshape(gnode, (-1,))
return tf.expand_dims(self.seg_method(tf.squeeze(v_p, axis=0), gnode), axis=0)
def phi_u(self, b_e_p, b_v_p, inputs):
"""
Args:
b_e_p (tf.Tensor): edge/bond to global aggregated tensor
b_v_p (tf.Tensor): node/atom to global aggregated tensor
inputs (Sequence): list or tuple for the graph inputs
Returns:
updated globa/state attributes
"""
concated = tf.concat([b_e_p, b_v_p, inputs[2]], -1)
return self._mlp(concated, self.phi_u_weights, self.phi_u_biases)
def _mlp(self, input_, weights, biases):
if biases is None:
biases = [0] * len(weights)
act = input_
for w, b in zip(weights, biases):
output = kb.dot(act, w) + b
act = self.activation(output)
return output
def get_config(self):
"""
Part of keras layer interface, where the signature is converted into a dict
Returns:
configurational dictionary
"""
config = {
"units_e": self.units_e,
"units_v": self.units_v,
"units_u": self.units_u,
"pool_method": self.pool_method,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| |
#!/usr/bin/env python
"""Distutils based setup script for SymPy.
This uses Distutils (http://python.org/sigs/distutils-sig/) the standard
python mechanism for installing packages. Optionally, you can use
Setuptools (http://pythonhosted.org/setuptools/setuptools.html) to automatically
handle dependencies. For the easiest installation
just type the command (you'll probably need root privileges for that):
python setup.py install
This will install the library in the default location. For instructions on
how to customize the install procedure read the output of:
python setup.py --help install
In addition, there are some other commands:
python setup.py clean -> will clean all trash (*.pyc and stuff)
python setup.py test -> will run the complete test suite
python setup.py bench -> will run the complete benchmark suite
python setup.py audit -> will run pyflakes checker on source code
To get a full list of avaiable commands, read the output of:
python setup.py --help-commands
Or, if all else fails, feel free to write to the sympy list at
sympy@googlegroups.com and ask for help.
"""
import sys
import subprocess
import os
import shutil
import glob
mpmath_version = '0.19'
try:
from setuptools import setup, Command
except ImportError:
from distutils.core import setup, Command
# handle mpmath deps in the hard way:
from distutils.version import LooseVersion
try:
import mpmath
if mpmath.__version__ < LooseVersion(mpmath_version):
raise ImportError
except ImportError:
print("Please install the mpmath package with a version >= %s" % mpmath_version)
sys.exit(-1)
PY3 = sys.version_info[0] > 2
# Make sure I have the right Python version.
if sys.version_info[:2] < (2, 6):
print("SymPy requires Python 2.6 or newer. Python %d.%d detected" % sys.version_info[:2])
sys.exit(-1)
# Check that this list is uptodate against the result of the command:
# for i in `find sympy -name __init__.py | rev | cut -f 2- -d '/' | rev | egrep -v "^sympy$" | egrep -v "tests$" `; do echo "'${i//\//.}',"; done | sort
modules = [
'sympy.assumptions',
'sympy.assumptions.handlers',
'sympy.benchmarks',
'sympy.calculus',
'sympy.categories',
'sympy.combinatorics',
'sympy.concrete',
'sympy.core',
'sympy.core.benchmarks',
'sympy.crypto',
'sympy.deprecated',
'sympy.diffgeom',
'sympy.external',
'sympy.functions',
'sympy.functions.combinatorial',
'sympy.functions.elementary',
'sympy.functions.elementary.benchmarks',
'sympy.functions.special',
'sympy.functions.special.benchmarks',
'sympy.geometry',
'sympy.integrals',
'sympy.integrals.benchmarks',
'sympy.interactive',
'sympy.liealgebras',
'sympy.logic',
'sympy.logic.algorithms',
'sympy.logic.utilities',
'sympy.matrices',
'sympy.matrices.benchmarks',
'sympy.matrices.expressions',
'sympy.ntheory',
'sympy.parsing',
'sympy.physics',
'sympy.physics.hep',
'sympy.physics.mechanics',
'sympy.physics.optics',
'sympy.physics.quantum',
'sympy.physics.unitsystems',
'sympy.physics.unitsystems.systems',
'sympy.physics.vector',
'sympy.plotting',
'sympy.plotting.intervalmath',
'sympy.plotting.pygletplot',
'sympy.polys',
'sympy.polys.agca',
'sympy.polys.benchmarks',
'sympy.polys.domains',
'sympy.printing',
'sympy.printing.pretty',
'sympy.sandbox',
'sympy.series',
'sympy.series.benchmarks',
'sympy.sets',
'sympy.simplify',
'sympy.solvers',
'sympy.solvers.benchmarks',
'sympy.stats',
'sympy.strategies',
'sympy.strategies.branch',
'sympy.tensor',
'sympy.tensor.array',
'sympy.unify',
'sympy.utilities',
'sympy.utilities.mathml',
'sympy.vector',
]
class audit(Command):
"""Audits SymPy's source code for following issues:
- Names which are used but not defined or used before they are defined.
- Names which are redefined without having been used.
"""
description = "Audit SymPy source with PyFlakes"
user_options = []
def initialize_options(self):
self.all = None
def finalize_options(self):
pass
def run(self):
import os
try:
import pyflakes.scripts.pyflakes as flakes
except ImportError:
print("In order to run the audit, you need to have PyFlakes installed.")
sys.exit(-1)
dirs = (os.path.join(*d) for d in (m.split('.') for m in modules))
warns = 0
for dir in dirs:
for filename in os.listdir(dir):
if filename.endswith('.py') and filename != '__init__.py':
warns += flakes.checkPath(os.path.join(dir, filename))
if warns > 0:
print("Audit finished with total %d warnings" % warns)
class clean(Command):
"""Cleans *.pyc and debian trashs, so you should get the same copy as
is in the VCS.
"""
description = "remove build files"
user_options = [("all", "a", "the same")]
def initialize_options(self):
self.all = None
def finalize_options(self):
pass
def run(self):
dir_setup = os.path.dirname(os.path.realpath(__file__))
curr_dir = os.getcwd()
for root, dirs, files in os.walk(dir_setup):
for file in files:
if file.endswith('.pyc') and os.path.isfile:
os.remove(os.path.join(root, file))
os.chdir(dir_setup)
names = ["python-build-stamp-2.4", "MANIFEST", "build", "dist", "doc/_build", "sample.tex"]
for f in names:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
for name in glob.glob(os.path.join(dir_setup, "doc", "src", "modules", \
"physics", "vector", "*.pdf")):
if os.path.isfile(name):
os.remove(name)
os.chdir(curr_dir)
class test_sympy(Command):
"""Runs all tests under the sympy/ folder
"""
description = "run all tests and doctests; also see bin/test and bin/doctest"
user_options = [] # distutils complains if this is not here.
def __init__(self, *args):
self.args = args[0] # so we can pass it to other classes
Command.__init__(self, *args)
def initialize_options(self): # distutils wants this
pass
def finalize_options(self): # this too
pass
def run(self):
from sympy.utilities import runtests
runtests.run_all_tests()
class run_benchmarks(Command):
"""Runs all SymPy benchmarks"""
description = "run all benchmarks"
user_options = [] # distutils complains if this is not here.
def __init__(self, *args):
self.args = args[0] # so we can pass it to other classes
Command.__init__(self, *args)
def initialize_options(self): # distutils wants this
pass
def finalize_options(self): # this too
pass
# we use py.test like architecture:
#
# o collector -- collects benchmarks
# o runner -- executes benchmarks
# o presenter -- displays benchmarks results
#
# this is done in sympy.utilities.benchmarking on top of py.test
def run(self):
from sympy.utilities import benchmarking
benchmarking.main(['sympy'])
# Check that this list is uptodate against the result of the command:
# $ python bin/generate_test_list.py
tests = [
'sympy.assumptions.tests',
'sympy.calculus.tests',
'sympy.categories.tests',
'sympy.combinatorics.tests',
'sympy.concrete.tests',
'sympy.core.tests',
'sympy.crypto.tests',
'sympy.deprecated.tests',
'sympy.diffgeom.tests',
'sympy.external.tests',
'sympy.functions.combinatorial.tests',
'sympy.functions.elementary.tests',
'sympy.functions.special.tests',
'sympy.geometry.tests',
'sympy.integrals.tests',
'sympy.interactive.tests',
'sympy.liealgebras.tests',
'sympy.logic.tests',
'sympy.matrices.expressions.tests',
'sympy.matrices.tests',
'sympy.ntheory.tests',
'sympy.parsing.tests',
'sympy.physics.hep.tests',
'sympy.physics.mechanics.tests',
'sympy.physics.optics.tests',
'sympy.physics.quantum.tests',
'sympy.physics.tests',
'sympy.physics.unitsystems.tests',
'sympy.physics.vector.tests',
'sympy.plotting.intervalmath.tests',
'sympy.plotting.pygletplot.tests',
'sympy.plotting.tests',
'sympy.polys.agca.tests',
'sympy.polys.domains.tests',
'sympy.polys.tests',
'sympy.printing.pretty.tests',
'sympy.printing.tests',
'sympy.sandbox.tests',
'sympy.series.tests',
'sympy.sets.tests',
'sympy.simplify.tests',
'sympy.solvers.tests',
'sympy.stats.tests',
'sympy.strategies.branch.tests',
'sympy.strategies.tests',
'sympy.tensor.array.tests',
'sympy.tensor.tests',
'sympy.unify.tests',
'sympy.utilities.tests',
'sympy.vector.tests',
]
long_description = '''SymPy is a Python library for symbolic mathematics. It aims
to become a full-featured computer algebra system (CAS) while keeping the code
as simple as possible in order to be comprehensible and easily extensible.
SymPy is written entirely in Python.'''
exec(open('sympy/release.py').read())
with open('sympy/__init__.py') as f:
long_description = f.read().split('"""')[1]
setup(name='sympy',
version=__version__,
description='Computer algebra system (CAS) in Python',
long_description=long_description,
author='SymPy development team',
author_email='sympy@googlegroups.com',
license='BSD',
keywords="Math CAS",
url='http://sympy.org',
packages=['sympy'] + modules + tests,
scripts=['bin/isympy'],
ext_modules=[],
package_data={
'sympy.utilities.mathml': ['data/*.xsl'],
'sympy.logic.benchmarks': ['input/*.cnf'],
},
data_files=[('share/man/man1', ['doc/man/isympy.1'])],
cmdclass={'test': test_sympy,
'bench': run_benchmarks,
'clean': clean,
'audit': audit},
classifiers=[
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
install_requires=['mpmath>=%s' % mpmath_version]
)
| |
# -*- coding: utf-8 -*-
#
# discretize documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 30 18:42:44 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from datetime import datetime
import discretize
import subprocess
from sphinx_gallery.sorting import FileNameSortKey
import shutil
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# sys.path.append(os.path.pardir)
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"numpydoc",
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.inheritance_diagram",
"sphinx.ext.graphviz",
"matplotlib.sphinxext.plot_directive",
"sphinx_toolbox.collapse",
"nbsphinx",
"sphinx_gallery.gen_gallery",
]
# Autosummary pages will be generated by sphinx-autogen instead of sphinx-build
autosummary_generate = True
numpydoc_attributes_as_param_list = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source file names.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"discretize"
copyright = u"2013 - {}, SimPEG Developers, http://simpeg.xyz".format(
datetime.now().year
)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.7.2"
# The full version, including alpha/beta/rc tags.
release = "0.7.2"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
linkcheck_ignore = [
r"https://github.com/simpeg/*",
]
linkcheck_retries = 3
linkcheck_timeout = 500
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# source code links
link_github = True
# You can build old with link_github = False
if link_github:
import inspect
from os.path import relpath, dirname
extensions.append('sphinx.ext.linkcode')
def linkcode_resolve(domain, info):
if domain != "py":
return None
modname = info["module"]
fullname = info["fullname"]
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split("."):
try:
obj = getattr(obj, part)
except Exception:
return None
try:
unwrap = inspect.unwrap
except AttributeError:
pass
else:
obj = unwrap(obj)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
try:
fn = relpath(fn, start=dirname(discretize.__file__))
except ValueError:
return None
return f"https://github.com/simpeg/discretize/blob/main/discretize/{fn}{linespec}"
else:
extensions.append('sphinx.ext.viewcode')
# Make numpydoc to generate plots for example sections
numpydoc_use_plots = True
plot_pre_code = """
import numpy as np
np.random.seed(0)
"""
plot_include_source = True
plot_formats = [("png", 100), "pdf"]
import math
phi = (math.sqrt(5) + 1) / 2
plot_rcparams = {
"font.size": 8,
"axes.titlesize": 8,
"axes.labelsize": 8,
"xtick.labelsize": 8,
"ytick.labelsize": 8,
"legend.fontsize": 8,
"figure.figsize": (3 * phi, 3),
"figure.subplot.bottom": 0.2,
"figure.subplot.left": 0.2,
"figure.subplot.right": 0.9,
"figure.subplot.top": 0.85,
"figure.subplot.wspace": 0.4,
"text.usetex": False,
}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
try:
import pydata_sphinx_theme
html_theme = "pydata_sphinx_theme"
# If false, no module index is generated.
html_use_modindex = True
html_theme_options = {
"external_links": [
{"name": "SimPEG", "url": "https://simpeg.xyz"},
{"name": "Contact", "url": "http://slack.simpeg.xyz"}
],
"icon_links": [
{
"name": "GitHub",
"url": "https://github.com/simpeg/discretize",
"icon": "fab fa-github",
},
{
"name": "Slack",
"url": "http://slack.simpeg.xyz/",
"icon": "fab fa-slack",
},
{
"name": "Discourse",
"url": "https://simpeg.discourse.group/",
"icon": "fab fa-discourse",
},
{
"name": "Youtube",
"url": "https://www.youtube.com/c/geoscixyz",
"icon": "fab fa-youtube",
},
{
"name": "Twitter",
"url": "https://twitter.com/simpegpy",
"icon": "fab fa-twitter",
},
],
"use_edit_page_button": False,
}
html_logo = "images/discretize-logo.png"
html_static_path = ['_static']
html_css_files = [
'css/custom.css',
]
html_context = {
"github_user": "simpeg",
"github_repo": "discretize",
"github_version": "main",
"doc_path": "docs",
}
except Exception:
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "./images/discretize-block.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "discretize"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"discretize.tex",
u"discretize documentation",
u"SimPEG Developers",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", "simpeg", u"discretize Documentation", [u"SimPEG Developers"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# Intersphinx
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
"matplotlib": ("https://matplotlib.org/stable", None),
"pyvista": ("https://docs.pyvista.org/", None),
"omf": ("https://omf.readthedocs.io/en/stable/", None)
}
numpydoc_xref_param_type = True
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"discretize",
u"discretize documentation",
u"SimPEG Developers",
"discretize",
"Finite volume methods for python.",
"Miscellaneous",
),
]
# Use pyvista's image scraper for example gallery
import pyvista
# Make sure off screen is set to true when building locally
pyvista.OFF_SCREEN = True
# necessary when building the sphinx gallery
pyvista.BUILDING_GALLERY = True
# Sphinx Gallery
sphinx_gallery_conf = {
# path to your examples scripts
"examples_dirs": [
"../examples",
"../tutorials/mesh_generation",
"../tutorials/operators",
"../tutorials/inner_products",
"../tutorials/pde",
],
"gallery_dirs": [
"examples",
"tutorials/mesh_generation",
"tutorials/operators",
"tutorials/inner_products",
"tutorials/pde",
],
"within_subsection_order": FileNameSortKey,
"filename_pattern": "\.py",
"backreferences_dir": "api/generated/backreferences",
"doc_module": "discretize",
# 'reference_url': {'discretize': None},
}
sphinx_gallery_conf["image_scrapers"] = (pyvista.Scraper(), "matplotlib")
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
graphviz_dot = shutil.which('dot')
# this must be png, because links on SVG are broken
graphviz_output_format = "png"
autodoc_member_order = "bysource"
nitpick_ignore = [
("py:class", "discretize.CurvilinearMesh.Array"),
("py:class", "discretize.mixins.vtk_mod.InterfaceTensorread_vtk"),
("py:class", "callable"),
]
| |
#
# The Python Imaging Library.
# $Id$
#
# a Tk display interface
#
# History:
# 96-04-08 fl Created
# 96-09-06 fl Added getimage method
# 96-11-01 fl Rewritten, removed image attribute and crop method
# 97-05-09 fl Use PyImagingPaste method instead of image type
# 97-05-12 fl Minor tweaks to match the IFUNC95 interface
# 97-05-17 fl Support the "pilbitmap" booster patch
# 97-06-05 fl Added file= and data= argument to image constructors
# 98-03-09 fl Added width and height methods to Image classes
# 98-07-02 fl Use default mode for "P" images without palette attribute
# 98-07-02 fl Explicitly destroy Tkinter image objects
# 99-07-24 fl Support multiple Tk interpreters (from Greg Couch)
# 99-07-26 fl Automatically hook into Tkinter (if possible)
# 99-08-15 fl Hook uses _imagingtk instead of _imaging
#
# Copyright (c) 1997-1999 by Secret Labs AB
# Copyright (c) 1996-1997 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import tkinter
from io import BytesIO
from . import Image
# --------------------------------------------------------------------
# Check for Tkinter interface hooks
_pilbitmap_ok = None
def _pilbitmap_check():
global _pilbitmap_ok
if _pilbitmap_ok is None:
try:
im = Image.new("1", (1, 1))
tkinter.BitmapImage(data="PIL:%d" % im.im.id)
_pilbitmap_ok = 1
except tkinter.TclError:
_pilbitmap_ok = 0
return _pilbitmap_ok
def _get_image_from_kw(kw):
source = None
if "file" in kw:
source = kw.pop("file")
elif "data" in kw:
source = BytesIO(kw.pop("data"))
if source:
return Image.open(source)
# --------------------------------------------------------------------
# PhotoImage
class PhotoImage:
"""
A Tkinter-compatible photo image. This can be used
everywhere Tkinter expects an image object. If the image is an RGBA
image, pixels having alpha 0 are treated as transparent.
The constructor takes either a PIL image, or a mode and a size.
Alternatively, you can use the **file** or **data** options to initialize
the photo image object.
:param image: Either a PIL image, or a mode string. If a mode string is
used, a size must also be given.
:param size: If the first argument is a mode string, this defines the size
of the image.
:keyword file: A filename to load the image from (using
``Image.open(file)``).
:keyword data: An 8-bit string containing image data (as loaded from an
image file).
"""
def __init__(self, image=None, size=None, **kw):
# Tk compatibility: file or data
if image is None:
image = _get_image_from_kw(kw)
if hasattr(image, "mode") and hasattr(image, "size"):
# got an image instead of a mode
mode = image.mode
if mode == "P":
# palette mapped data
image.load()
try:
mode = image.palette.mode
except AttributeError:
mode = "RGB" # default
size = image.size
kw["width"], kw["height"] = size
else:
mode = image
image = None
if mode not in ["1", "L", "RGB", "RGBA"]:
mode = Image.getmodebase(mode)
self.__mode = mode
self.__size = size
self.__photo = tkinter.PhotoImage(**kw)
self.tk = self.__photo.tk
if image:
self.paste(image)
def __del__(self):
name = self.__photo.name
self.__photo.name = None
try:
self.__photo.tk.call("image", "delete", name)
except Exception:
pass # ignore internal errors
def __str__(self):
"""
Get the Tkinter photo image identifier. This method is automatically
called by Tkinter whenever a PhotoImage object is passed to a Tkinter
method.
:return: A Tkinter photo image identifier (a string).
"""
return str(self.__photo)
def width(self):
"""
Get the width of the image.
:return: The width, in pixels.
"""
return self.__size[0]
def height(self):
"""
Get the height of the image.
:return: The height, in pixels.
"""
return self.__size[1]
def paste(self, im, box=None):
"""
Paste a PIL image into the photo image. Note that this can
be very slow if the photo image is displayed.
:param im: A PIL image. The size must match the target region. If the
mode does not match, the image is converted to the mode of
the bitmap image.
:param box: A 4-tuple defining the left, upper, right, and lower pixel
coordinate. See :ref:`coordinate-system`. If None is given
instead of a tuple, all of the image is assumed.
"""
# convert to blittable
im.load()
image = im.im
if image.isblock() and im.mode == self.__mode:
block = image
else:
block = image.new_block(self.__mode, im.size)
image.convert2(block, image) # convert directly between buffers
tk = self.__photo.tk
try:
tk.call("PyImagingPhoto", self.__photo, block.id)
except tkinter.TclError:
# activate Tkinter hook
try:
from . import _imagingtk
try:
if hasattr(tk, "interp"):
# Required for PyPy, which always has CFFI installed
from cffi import FFI
ffi = FFI()
# PyPy is using an FFI CDATA element
# (Pdb) self.tk.interp
# <cdata 'Tcl_Interp *' 0x3061b50>
_imagingtk.tkinit(int(ffi.cast("uintptr_t", tk.interp)), 1)
else:
_imagingtk.tkinit(tk.interpaddr(), 1)
except AttributeError:
_imagingtk.tkinit(id(tk), 0)
tk.call("PyImagingPhoto", self.__photo, block.id)
except (ImportError, AttributeError, tkinter.TclError):
raise # configuration problem; cannot attach to Tkinter
# --------------------------------------------------------------------
# BitmapImage
class BitmapImage:
"""
A Tkinter-compatible bitmap image. This can be used everywhere Tkinter
expects an image object.
The given image must have mode "1". Pixels having value 0 are treated as
transparent. Options, if any, are passed on to Tkinter. The most commonly
used option is **foreground**, which is used to specify the color for the
non-transparent parts. See the Tkinter documentation for information on
how to specify colours.
:param image: A PIL image.
"""
def __init__(self, image=None, **kw):
# Tk compatibility: file or data
if image is None:
image = _get_image_from_kw(kw)
self.__mode = image.mode
self.__size = image.size
if _pilbitmap_check():
# fast way (requires the pilbitmap booster patch)
image.load()
kw["data"] = "PIL:%d" % image.im.id
self.__im = image # must keep a reference
else:
# slow but safe way
kw["data"] = image.tobitmap()
self.__photo = tkinter.BitmapImage(**kw)
def __del__(self):
name = self.__photo.name
self.__photo.name = None
try:
self.__photo.tk.call("image", "delete", name)
except Exception:
pass # ignore internal errors
def width(self):
"""
Get the width of the image.
:return: The width, in pixels.
"""
return self.__size[0]
def height(self):
"""
Get the height of the image.
:return: The height, in pixels.
"""
return self.__size[1]
def __str__(self):
"""
Get the Tkinter bitmap image identifier. This method is automatically
called by Tkinter whenever a BitmapImage object is passed to a Tkinter
method.
:return: A Tkinter bitmap image identifier (a string).
"""
return str(self.__photo)
def getimage(photo):
"""Copies the contents of a PhotoImage to a PIL image memory."""
im = Image.new("RGBA", (photo.width(), photo.height()))
block = im.im
photo.tk.call("PyImagingPhotoGet", photo, block.id)
return im
def _show(image, title):
"""Helper for the Image.show method."""
class UI(tkinter.Label):
def __init__(self, master, im):
if im.mode == "1":
self.image = BitmapImage(im, foreground="white", master=master)
else:
self.image = PhotoImage(im, master=master)
super().__init__(master, image=self.image, bg="black", bd=0)
if not tkinter._default_root:
raise OSError("tkinter not initialized")
top = tkinter.Toplevel()
if title:
top.title(title)
UI(top, image).pack()
| |
from __future__ import absolute_import
from django.conf import settings
import pika
from pika.adapters.blocking_connection import BlockingChannel
from pika.spec import Basic
import logging
import ujson
import random
import time
import threading
import atexit
from collections import defaultdict
from zerver.lib.utils import statsd
from typing import Any, Callable, Dict, List, Mapping, Optional, Set, Union
Consumer = Callable[[BlockingChannel, Basic.Deliver, pika.BasicProperties, str], None]
# This simple queuing library doesn't expose much of the power of
# rabbitmq/pika's queuing system; its purpose is to just provide an
# interface for external files to put things into queues and take them
# out from bots without having to import pika code all over our codebase.
class SimpleQueueClient(object):
def __init__(self):
# type: () -> None
self.log = logging.getLogger('zulip.queue')
self.queues = set() # type: Set[str]
self.channel = None # type: Optional[BlockingChannel]
self.consumers = defaultdict(set) # type: Dict[str, Set[Consumer]]
# Disable RabbitMQ heartbeats since BlockingConnection can't process them
self.rabbitmq_heartbeat = 0
self._connect()
def _connect(self):
# type: () -> None
start = time.time()
self.connection = pika.BlockingConnection(self._get_parameters())
self.channel = self.connection.channel()
self.log.info('SimpleQueueClient connected (connecting took %.3fs)' % (time.time() - start,))
def _reconnect(self):
# type: () -> None
self.connection = None
self.channel = None
self.queues = set()
self._connect()
def _get_parameters(self):
# type: () -> pika.ConnectionParameters
# We explicitly disable the RabbitMQ heartbeat feature, since
# it doesn't make sense with BlockingConnection
credentials = pika.PlainCredentials(settings.RABBITMQ_USERNAME,
settings.RABBITMQ_PASSWORD)
return pika.ConnectionParameters(settings.RABBITMQ_HOST,
heartbeat_interval=self.rabbitmq_heartbeat,
credentials=credentials)
def _generate_ctag(self, queue_name):
# type: (str) -> str
return "%s_%s" % (queue_name, str(random.getrandbits(16)))
def _reconnect_consumer_callback(self, queue, consumer):
# type: (str, Consumer) -> None
self.log.info("Queue reconnecting saved consumer %s to queue %s" % (consumer, queue))
self.ensure_queue(queue, lambda: self.channel.basic_consume(consumer,
queue=queue,
consumer_tag=self._generate_ctag(queue)))
def _reconnect_consumer_callbacks(self):
# type: () -> None
for queue, consumers in self.consumers.items():
for consumer in consumers:
self._reconnect_consumer_callback(queue, consumer)
def close(self):
# type: () -> None
if self.connection:
self.connection.close()
def ready(self):
# type: () -> bool
return self.channel is not None
def ensure_queue(self, queue_name, callback):
# type: (str, Callable[[], None]) -> None
'''Ensure that a given queue has been declared, and then call
the callback with no arguments.'''
if not self.connection.is_open:
self._connect()
if queue_name not in self.queues:
self.channel.queue_declare(queue=queue_name, durable=True)
self.queues.add(queue_name)
callback()
def publish(self, queue_name, body):
# type: (str, str) -> None
def do_publish():
# type: () -> None
self.channel.basic_publish(
exchange='',
routing_key=queue_name,
properties=pika.BasicProperties(delivery_mode=2),
body=body)
statsd.incr("rabbitmq.publish.%s" % (queue_name,))
self.ensure_queue(queue_name, do_publish)
def json_publish(self, queue_name, body):
# type: (str, Union[Mapping[str, Any], str]) -> None
# Union because of zerver.middleware.write_log_line uses a str
try:
self.publish(queue_name, ujson.dumps(body))
except (AttributeError, pika.exceptions.AMQPConnectionError):
self.log.warning("Failed to send to rabbitmq, trying to reconnect and send again")
self._reconnect()
self.publish(queue_name, ujson.dumps(body))
def register_consumer(self, queue_name, consumer):
# type: (str, Consumer) -> None
def wrapped_consumer(ch, method, properties, body):
# type: (BlockingChannel, Basic.Deliver, pika.BasicProperties, str) -> None
try:
consumer(ch, method, properties, body)
ch.basic_ack(delivery_tag=method.delivery_tag)
except Exception as e:
ch.basic_nack(delivery_tag=method.delivery_tag)
raise e
self.consumers[queue_name].add(wrapped_consumer)
self.ensure_queue(queue_name,
lambda: self.channel.basic_consume(wrapped_consumer, queue=queue_name,
consumer_tag=self._generate_ctag(queue_name)))
def register_json_consumer(self, queue_name, callback):
# type: (str, Callable[[Mapping[str, Any]], None]) -> None
def wrapped_callback(ch, method, properties, body):
# type: (BlockingChannel, Basic.Deliver, pika.BasicProperties, str) -> None
callback(ujson.loads(body))
self.register_consumer(queue_name, wrapped_callback)
def drain_queue(self, queue_name, json=False):
# type: (str, bool) -> List[Dict[str, Any]]
"Returns all messages in the desired queue"
messages = []
def opened():
# type: () -> None
while True:
(meta, _, message) = self.channel.basic_get(queue_name)
if not message:
break
self.channel.basic_ack(meta.delivery_tag)
if json:
message = ujson.loads(message)
messages.append(message)
self.ensure_queue(queue_name, opened)
return messages
def start_consuming(self):
# type: () -> None
self.channel.start_consuming()
def stop_consuming(self):
# type: () -> None
self.channel.stop_consuming()
# Patch pika.adapters.TornadoConnection so that a socket error doesn't
# throw an exception and disconnect the tornado process from the rabbitmq
# queue. Instead, just re-connect as usual
class ExceptionFreeTornadoConnection(pika.adapters.TornadoConnection):
def _adapter_disconnect(self):
# type: () -> None
try:
super(ExceptionFreeTornadoConnection, self)._adapter_disconnect()
except (pika.exceptions.ProbableAuthenticationError,
pika.exceptions.ProbableAccessDeniedError,
pika.exceptions.IncompatibleProtocolError) as e:
logging.warning("Caught exception '%r' in ExceptionFreeTornadoConnection when \
calling _adapter_disconnect, ignoring" % (e,))
class TornadoQueueClient(SimpleQueueClient):
# Based on:
# https://pika.readthedocs.io/en/0.9.8/examples/asynchronous_consumer_example.html
def __init__(self):
# type: () -> None
super(TornadoQueueClient, self).__init__()
# Enable rabbitmq heartbeat since TornadoConection can process them
self.rabbitmq_heartbeat = None
self._on_open_cbs = [] # type: List[Callable[[], None]]
def _connect(self, on_open_cb = None):
# type: (Optional[Callable[[], None]]) -> None
self.log.info("Beginning TornadoQueueClient connection")
if on_open_cb is not None:
self._on_open_cbs.append(on_open_cb)
self.connection = ExceptionFreeTornadoConnection(
self._get_parameters(),
on_open_callback = self._on_open,
stop_ioloop_on_close = False)
self.connection.add_on_close_callback(self._on_connection_closed)
def _reconnect(self):
# type: () -> None
self.connection = None
self.channel = None
self.queues = set()
self._connect()
def _on_open(self, connection):
# type: (pika.Connection) -> None
self.connection.channel(
on_open_callback = self._on_channel_open)
def _on_channel_open(self, channel):
# type: (BlockingChannel) -> None
self.channel = channel
for callback in self._on_open_cbs:
callback()
self._reconnect_consumer_callbacks()
self.log.info('TornadoQueueClient connected')
def _on_connection_closed(self, connection, reply_code, reply_text):
# type: (pika.Connection, int, str) -> None
self.log.warning("TornadoQueueClient lost connection to RabbitMQ, reconnecting...")
from tornado import ioloop
# Try to reconnect in two seconds
retry_seconds = 2
def on_timeout():
# type: () -> None
try:
self._reconnect()
except pika.exceptions.AMQPConnectionError:
self.log.critical("Failed to reconnect to RabbitMQ, retrying...")
ioloop.IOLoop.instance().add_timeout(time.time() + retry_seconds, on_timeout)
ioloop.IOLoop.instance().add_timeout(time.time() + retry_seconds, on_timeout)
def ensure_queue(self, queue_name, callback):
# type: (str, Callable[[], None]) -> None
def finish(frame):
# type: (Any) -> None
self.queues.add(queue_name)
callback()
if queue_name not in self.queues:
# If we're not connected yet, send this message
# once we have created the channel
if not self.ready():
self._on_open_cbs.append(lambda: self.ensure_queue(queue_name, callback))
return
self.channel.queue_declare(queue=queue_name, durable=True, callback=finish)
else:
callback()
def register_consumer(self, queue_name, consumer):
# type: (str, Consumer) -> None
def wrapped_consumer(ch, method, properties, body):
# type: (BlockingChannel, Basic.Deliver, pika.BasicProperties, str) -> None
consumer(ch, method, properties, body)
ch.basic_ack(delivery_tag=method.delivery_tag)
if not self.ready():
self.consumers[queue_name].add(wrapped_consumer)
return
self.consumers[queue_name].add(wrapped_consumer)
self.ensure_queue(queue_name,
lambda: self.channel.basic_consume(wrapped_consumer, queue=queue_name,
consumer_tag=self._generate_ctag(queue_name)))
queue_client = None # type: Optional[SimpleQueueClient]
def get_queue_client():
# type: () -> SimpleQueueClient
global queue_client
if queue_client is None:
if settings.RUNNING_INSIDE_TORNADO and settings.USING_RABBITMQ:
queue_client = TornadoQueueClient()
elif settings.USING_RABBITMQ:
queue_client = SimpleQueueClient()
return queue_client
def setup_tornado_rabbitmq():
# type: () -> None
# When tornado is shut down, disconnect cleanly from rabbitmq
if settings.USING_RABBITMQ:
atexit.register(lambda: queue_client.close())
# We using a simple lock to prevent multiple RabbitMQ messages being
# sent to the SimpleQueueClient at the same time; this is a workaround
# for an issue with the pika BlockingConnection where using
# BlockingConnection for multiple queues causes the channel to
# randomly close.
queue_lock = threading.RLock()
def queue_json_publish(queue_name, event, processor):
# type: (str, Union[Mapping[str, Any], str], Callable[[Any], None]) -> None
# most events are dicts, but zerver.middleware.write_log_line uses a str
with queue_lock:
if settings.USING_RABBITMQ:
get_queue_client().json_publish(queue_name, event)
else:
processor(event)
| |
#!/usr/bin/python3
# Copyright (c) 2018-2021 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# IMPORTS
import csv
import json
import sys
import io
from collections import defaultdict
class parse_key_components:
role_type = None
node_id = None
def setKey(self, key):
key_tokens = key.split('_')
key_token_count = len(key_tokens)
if key_token_count > 2:
self.node_id = key_tokens[1]
self.role_type = key_tokens[0]
else:
self.role_type = key_tokens[0]
def getNodeId(self):
if unicode(self.node_id).isnumeric():
return self.node_id
else:
return 'false'
def getRoleType(self):
return self.role_type
def read_input_file(input_file):
inputFile = open("exported_name_value.csv", 'r')
csvIn = csv.DictReader(inputFile, fieldnames=("Key", "Value"),
delimiter='|')
settings = {}
keys = []
value = []
for row in csvIn:
if row["Key"] not in keys:
key = row["Key"]
value = row["Value"]
if key.startswith("sah_"):
node_setting_key = key.split("sah_")[1]
p = parse_key_components()
p.setKey(key)
role_type = p.getRoleType()
node_id = p.getNodeId()
elif key.startswith("director_"):
node_setting_key = key.split("director_")[1]
p = parse_key_components()
p.setKey(key)
role_type = p.getRoleType()
node_id = p.getNodeId()
elif key.startswith("controller_"):
p = parse_key_components()
p.setKey(key)
role_type = p.getRoleType()
node_id = p.getNodeId()
if node_id.isdigit():
node_setting_key = key.split("controller_" +
node_id + "_")[1]
else:
continue
elif key.startswith("compute_"):
p = parse_key_components()
p.setKey(key)
role_type = p.getRoleType()
node_id = p.getNodeId()
if node_id.isdigit():
node_setting_key = key.split("compute_" +
node_id + "_")[1]
else:
continue
elif key.startswith("storage_"):
p = parse_key_components()
p.setKey(key)
role_type = p.getRoleType()
node_id = p.getNodeId()
if node_id.isdigit():
node_setting_key = key.split("storage_" +
node_id + "_")[1]
else:
continue
else:
continue
if role_type in settings:
node_settings = settings[role_type]
else:
node_settings = {}
if node_id in node_settings:
node_id_settings = node_settings[node_id]
else:
node_id_settings = {}
if role_type.startswith("sah"):
node_id_settings['is_sah'] = 'true'
elif role_type.startswith("director"):
node_id_settings['is_director'] = 'true'
elif role_type.startswith("compute"):
node_id_settings['is_compute'] = 'true'
elif role_type.startswith("controller"):
node_id_settings['is_controller'] = 'true'
elif role_type.startswith("storage"):
node_id_settings['is_ceph_storage'] = 'true'
else:
continue
node_id_settings[node_setting_key] = value
node_settings[node_id] = node_id_settings
settings[role_type] = node_settings
return settings
def generate_output_file(in_data, output_file,
excluded_keys, use_service_tags):
i = 0
outputFile = open(output_file, 'w')
sections = []
top_level_keys = in_data.keys()
key_count = len(top_level_keys)
for tlkey in top_level_keys:
nexlevel_keys = in_data.get(tlkey).keys()
i = i+1
for nexkey in nexlevel_keys:
odata = in_data.get(tlkey).get(nexkey)
if use_service_tags and "idrac_ip" in odata:
del odata["idrac_ip"]
elif "service_tag" in odata:
del odata["service_tag"]
if "is_director" in odata:
del odata["storage_ip"]
if "is_controller" in odata:
del odata["storage_cluster_ip"]
del odata["provisioning_ip"]
if "is_compute" in odata:
del odata["storage_cluster_ip"]
del odata["provisioning_ip"]
del odata["public_api_ip"]
if "is_ceph_storage" in odata:
del odata["tenant_tunnel_ip"]
del odata["private_api_ip"]
del odata["provisioning_ip"]
del odata["public_api_ip"]
if excluded_keys is not None:
for exclude_key in excluded_keys:
if exclude_key in odata:
del odata[exclude_key]
sections.append(odata)
outputFile.write(json.dumps(sections, indent=4, sort_keys=True))
outputFile.close()
def main():
num_args = len(sys.argv) - 1
use_service_tag = False
if num_args < 2:
print("error: missing required arguments")
print("usage: python %s <kvp_input_file> <output_file> \
[-use_service_tag]" % sys.argv[0])
sys.exit(1)
kvp_in_file = sys.argv[1]
output_file = sys.argv[2]
if num_args == 3:
mode = sys.argv[3]
if mode == '-use_service_tag':
use_service_tag = True
if kvp_in_file == output_file:
print("error: all file arguments must be unique")
sys.exit(1)
in_data = read_input_file(kvp_in_file)
if len(in_data) > 0:
excluded_keys = ["bonding_opts",
"bond_opts",
"bond_0_interface_0",
"bond_0_interface_1",
"bond_1_interface_0",
"bond_1_interface_1",
"public_ip",
"os_oob_management_ip",
"provisioning_interface",
"install_user",
"install_user_password",
"ipmi_user",
"ipmi_password"]
generate_output_file(in_data, output_file,
excluded_keys, use_service_tag)
else:
print("no input data. not populating output ini file")
sys.exit(1)
if __name__ == '__main__':
main()
| |
"""
Support for Z-Wave lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.zwave/
"""
import logging
# Because we do not compile openzwave on CI
# pylint: disable=import-error
from threading import Timer
from homeassistant.components.light import ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, \
ATTR_RGB_COLOR, SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, \
SUPPORT_RGB_COLOR, DOMAIN, Light
from homeassistant.components import zwave
from homeassistant.components.zwave import async_setup_platform # noqa # pylint: disable=unused-import
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.util.color import HASS_COLOR_MAX, HASS_COLOR_MIN, \
color_temperature_mired_to_kelvin, color_temperature_to_rgb, \
color_rgb_to_rgbw, color_rgbw_to_rgb
_LOGGER = logging.getLogger(__name__)
AEOTEC = 0x86
AEOTEC_ZW098_LED_BULB = 0x62
AEOTEC_ZW098_LED_BULB_LIGHT = (AEOTEC, AEOTEC_ZW098_LED_BULB)
COLOR_CHANNEL_WARM_WHITE = 0x01
COLOR_CHANNEL_COLD_WHITE = 0x02
COLOR_CHANNEL_RED = 0x04
COLOR_CHANNEL_GREEN = 0x08
COLOR_CHANNEL_BLUE = 0x10
WORKAROUND_ZW098 = 'zw098'
DEVICE_MAPPINGS = {
AEOTEC_ZW098_LED_BULB_LIGHT: WORKAROUND_ZW098
}
# Generate midpoint color temperatures for bulbs that have limited
# support for white light colors
TEMP_MID_HASS = (HASS_COLOR_MAX - HASS_COLOR_MIN) / 2 + HASS_COLOR_MIN
TEMP_WARM_HASS = (HASS_COLOR_MAX - HASS_COLOR_MIN) / 3 * 2 + HASS_COLOR_MIN
TEMP_COLD_HASS = (HASS_COLOR_MAX - HASS_COLOR_MIN) / 3 + HASS_COLOR_MIN
SUPPORT_ZWAVE_DIMMER = SUPPORT_BRIGHTNESS
SUPPORT_ZWAVE_COLOR = SUPPORT_BRIGHTNESS | SUPPORT_RGB_COLOR
SUPPORT_ZWAVE_COLORTEMP = (SUPPORT_BRIGHTNESS | SUPPORT_RGB_COLOR
| SUPPORT_COLOR_TEMP)
def get_device(node, value, node_config, **kwargs):
"""Create zwave entity device."""
name = '{}.{}'.format(DOMAIN, zwave.object_id(value))
refresh = node_config.get(zwave.CONF_REFRESH_VALUE)
delay = node_config.get(zwave.CONF_REFRESH_DELAY)
_LOGGER.debug('name=%s node_config=%s CONF_REFRESH_VALUE=%s'
' CONF_REFRESH_DELAY=%s', name, node_config,
refresh, delay)
if node.has_command_class(zwave.const.COMMAND_CLASS_SWITCH_COLOR):
return ZwaveColorLight(value, refresh, delay)
else:
return ZwaveDimmer(value, refresh, delay)
def brightness_state(value):
"""Return the brightness and state."""
if value.data > 0:
return round((value.data / 99) * 255, 0), STATE_ON
else:
return 0, STATE_OFF
class ZwaveDimmer(zwave.ZWaveDeviceEntity, Light):
"""Representation of a Z-Wave dimmer."""
def __init__(self, value, refresh, delay):
"""Initialize the light."""
zwave.ZWaveDeviceEntity.__init__(self, value, DOMAIN)
self._brightness = None
self._state = None
self._delay = delay
self._refresh_value = refresh
self._zw098 = None
# Enable appropriate workaround flags for our device
# Make sure that we have values for the key before converting to int
if (value.node.manufacturer_id.strip() and
value.node.product_id.strip()):
specific_sensor_key = (int(value.node.manufacturer_id, 16),
int(value.node.product_id, 16))
if specific_sensor_key in DEVICE_MAPPINGS:
if DEVICE_MAPPINGS[specific_sensor_key] == WORKAROUND_ZW098:
_LOGGER.debug("AEOTEC ZW098 workaround enabled")
self._zw098 = 1
# Used for value change event handling
self._refreshing = False
self._timer = None
_LOGGER.debug('self._refreshing=%s self.delay=%s',
self._refresh_value, self._delay)
self.update_properties()
def update_properties(self):
"""Update internal properties based on zwave values."""
# Brightness
self._brightness, self._state = brightness_state(self._value)
def value_changed(self):
"""Called when a value for this entity's node has changed."""
if self._refresh_value:
if self._refreshing:
self._refreshing = False
else:
def _refresh_value():
"""Used timer callback for delayed value refresh."""
self._refreshing = True
self._value.refresh()
if self._timer is not None and self._timer.isAlive():
self._timer.cancel()
self._timer = Timer(self._delay, _refresh_value)
self._timer.start()
return
super().value_changed()
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def is_on(self):
"""Return true if device is on."""
return self._state == STATE_ON
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_ZWAVE_DIMMER
def turn_on(self, **kwargs):
"""Turn the device on."""
# Zwave multilevel switches use a range of [0, 99] to control
# brightness. Level 255 means to set it to previous value.
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
brightness = int((self._brightness / 255) * 99)
else:
brightness = 255
if self._value.node.set_dimmer(self._value.value_id, brightness):
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
if self._value.node.set_dimmer(self._value.value_id, 0):
self._state = STATE_OFF
def ct_to_rgb(temp):
"""Convert color temperature (mireds) to RGB."""
colorlist = list(
color_temperature_to_rgb(color_temperature_mired_to_kelvin(temp)))
return [int(val) for val in colorlist]
class ZwaveColorLight(ZwaveDimmer):
"""Representation of a Z-Wave color changing light."""
def __init__(self, value, refresh, delay):
"""Initialize the light."""
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
self._value_color = None
self._value_color_channels = None
self._color_channels = None
self._rgb = None
self._ct = None
super().__init__(value, refresh, delay)
# Create a listener so the color values can be linked to this entity
dispatcher.connect(
self._value_added, ZWaveNetwork.SIGNAL_VALUE_ADDED)
self._get_color_values()
@property
def dependent_value_ids(self):
"""List of value IDs a device depends on."""
return [val.value_id for val in [
self._value_color, self._value_color_channels] if val]
def _get_color_values(self):
"""Search for color values available on this node."""
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
_LOGGER.debug("Searching for zwave color values")
# Currently zwave nodes only exist with one color element per node.
if self._value_color is None:
for value_color in self._value.node.get_rgbbulbs().values():
self._value_color = value_color
if self._value_color_channels is None:
self._value_color_channels = self.get_value(
class_id=zwave.const.COMMAND_CLASS_SWITCH_COLOR,
genre=zwave.const.GENRE_SYSTEM, type=zwave.const.TYPE_INT)
if self._value_color and self._value_color_channels:
_LOGGER.debug("Zwave node color values found.")
dispatcher.disconnect(
self._value_added, ZWaveNetwork.SIGNAL_VALUE_ADDED)
self.update_properties()
def _value_added(self, value):
"""Called when a value has been added to the network."""
if self._value.node != value.node:
return
# Check for the missing color values
self._get_color_values()
def update_properties(self):
"""Update internal properties based on zwave values."""
super().update_properties()
if self._value_color is None:
return
if self._value_color_channels is None:
return
# Color Channels
self._color_channels = self._value_color_channels.data
# Color Data String
data = self._value_color.data
# RGB is always present in the openzwave color data string.
self._rgb = [
int(data[1:3], 16),
int(data[3:5], 16),
int(data[5:7], 16)]
# Parse remaining color channels. Openzwave appends white channels
# that are present.
index = 7
# Warm white
if self._color_channels & COLOR_CHANNEL_WARM_WHITE:
warm_white = int(data[index:index+2], 16)
index += 2
else:
warm_white = 0
# Cold white
if self._color_channels & COLOR_CHANNEL_COLD_WHITE:
cold_white = int(data[index:index+2], 16)
index += 2
else:
cold_white = 0
# Color temperature. With the AEOTEC ZW098 bulb, only two color
# temperatures are supported. The warm and cold channel values
# indicate brightness for warm/cold color temperature.
if self._zw098:
if warm_white > 0:
self._ct = TEMP_WARM_HASS
self._rgb = ct_to_rgb(self._ct)
elif cold_white > 0:
self._ct = TEMP_COLD_HASS
self._rgb = ct_to_rgb(self._ct)
else:
# RGB color is being used. Just report midpoint.
self._ct = TEMP_MID_HASS
elif self._color_channels & COLOR_CHANNEL_WARM_WHITE:
self._rgb = list(color_rgbw_to_rgb(*self._rgb, w=warm_white))
elif self._color_channels & COLOR_CHANNEL_COLD_WHITE:
self._rgb = list(color_rgbw_to_rgb(*self._rgb, w=cold_white))
# If no rgb channels supported, report None.
if not (self._color_channels & COLOR_CHANNEL_RED or
self._color_channels & COLOR_CHANNEL_GREEN or
self._color_channels & COLOR_CHANNEL_BLUE):
self._rgb = None
@property
def rgb_color(self):
"""Return the rgb color."""
return self._rgb
@property
def color_temp(self):
"""Return the color temperature."""
return self._ct
def turn_on(self, **kwargs):
"""Turn the device on."""
rgbw = None
if ATTR_COLOR_TEMP in kwargs:
# Color temperature. With the AEOTEC ZW098 bulb, only two color
# temperatures are supported. The warm and cold channel values
# indicate brightness for warm/cold color temperature.
if self._zw098:
if kwargs[ATTR_COLOR_TEMP] > TEMP_MID_HASS:
self._ct = TEMP_WARM_HASS
rgbw = b'#000000FF00'
else:
self._ct = TEMP_COLD_HASS
rgbw = b'#00000000FF'
elif ATTR_RGB_COLOR in kwargs:
self._rgb = kwargs[ATTR_RGB_COLOR]
if (not self._zw098 and (
self._color_channels & COLOR_CHANNEL_WARM_WHITE or
self._color_channels & COLOR_CHANNEL_COLD_WHITE)):
rgbw = b'#'
for colorval in color_rgb_to_rgbw(*self._rgb):
rgbw += format(colorval, '02x').encode('utf-8')
rgbw += b'00'
else:
rgbw = b'#'
for colorval in self._rgb:
rgbw += format(colorval, '02x').encode('utf-8')
rgbw += b'0000'
if rgbw and self._value_color:
self._value_color.node.set_rgbw(self._value_color.value_id, rgbw)
super().turn_on(**kwargs)
@property
def supported_features(self):
"""Flag supported features."""
if self._zw098:
return SUPPORT_ZWAVE_COLORTEMP
else:
return SUPPORT_ZWAVE_COLOR
| |
# -*- coding: utf-8 -*-
"""Handles the instrospection of REST Framework Views and ViewSets."""
import inspect
import re
import yaml
import importlib
from .compat import OrderedDict, strip_tags
from abc import ABCMeta, abstractmethod
from django.http import HttpRequest
from django.contrib.auth.models import AnonymousUser
from django.contrib.admindocs.utils import trim_docstring
from django.utils.encoding import smart_text
import rest_framework
from rest_framework.views import get_view_name
from rest_framework import viewsets
from rest_framework.compat import apply_markdown
from rest_framework.utils import formatting
from django.utils import six
def get_view_description(view_cls, html=False, docstring=None):
if docstring is not None:
view_cls = type(
view_cls.__name__ + '_fake',
(view_cls,),
{'__doc__': docstring})
return rest_framework.settings.api_settings \
.VIEW_DESCRIPTION_FUNCTION(view_cls, html)
def get_default_value(field):
default_value = getattr(field, 'default', None)
if rest_framework.VERSION >= '3.0.0':
from rest_framework.fields import empty
if default_value == empty:
default_value = None
if callable(default_value):
default_value = default_value()
return default_value
class IntrospectorHelper(object):
__metaclass__ = ABCMeta
@staticmethod
def strip_yaml_from_docstring(docstring):
"""
Strips YAML from the docstring.
"""
split_lines = trim_docstring(docstring).split('\n')
cut_off = None
for index in range(len(split_lines) - 1, -1, -1):
line = split_lines[index]
line = line.strip()
if line == '---':
cut_off = index
break
if cut_off is not None:
split_lines = split_lines[0:cut_off]
return "\n".join(split_lines)
@staticmethod
def strip_params_from_docstring(docstring):
"""
Strips the params from the docstring (ie. myparam -- Some param) will
not be removed from the text body
"""
params_pattern = re.compile(r'(?:^|[^-])--(?:$|[^-])')
split_lines = trim_docstring(docstring).split('\n')
cut_off = None
for index, line in enumerate(split_lines):
line = line.strip()
if params_pattern.search(line):
cut_off = index
break
if cut_off is not None:
split_lines = split_lines[0:cut_off]
return "\n".join(split_lines)
@staticmethod
def get_serializer_name(serializer):
if serializer is None:
return None
if rest_framework.VERSION >= '3.0.0':
from rest_framework.serializers import ListSerializer
assert serializer != ListSerializer, "uh oh, what now?"
if isinstance(serializer, ListSerializer):
serializer = serializer.child
if inspect.isclass(serializer):
return serializer.__name__
return serializer.__class__.__name__
@staticmethod
def get_summary(callback, docstring=None):
"""
Returns the first sentence of the first line of the class docstring
"""
description = strip_tags(get_view_description(
callback, html=True, docstring=docstring)) \
.split("\n")[0].split(".")[0]
return description
class BaseViewIntrospector(object):
__metaclass__ = ABCMeta
def __init__(self, callback, path, pattern):
self.callback = callback
self.path = path
self.pattern = pattern
def get_yaml_parser(self):
parser = YAMLDocstringParser(self)
return parser
@abstractmethod
def __iter__(self):
pass
def get_iterator(self):
return self.__iter__()
def get_description(self):
"""
Returns the first sentence of the first line of the class docstring
"""
return IntrospectorHelper.get_summary(self.callback)
def get_docs(self):
return get_view_description(self.callback)
class BaseMethodIntrospector(object):
__metaclass__ = ABCMeta
PRIMITIVES = {
'integer': ['int32', 'int64'],
'number': ['float', 'double'],
'string': ['string', 'byte', 'date', 'date-time'],
'boolean': ['boolean'],
}
def __init__(self, view_introspector, method):
self.method = method
self.parent = view_introspector
self.callback = view_introspector.callback
self.path = view_introspector.path
def get_module(self):
return self.callback.__module__
def check_yaml_methods(self, yaml_methods):
missing_set = set()
for key in yaml_methods:
if key not in self.parent.methods():
missing_set.add(key)
if missing_set:
raise Exception(
"methods %s in class docstring are not in view methods %s"
% (list(missing_set), list(self.parent.methods())))
def get_yaml_parser(self):
parser = YAMLDocstringParser(self)
parent_parser = YAMLDocstringParser(self.parent)
self.check_yaml_methods(parent_parser.object.keys())
new_object = {}
new_object.update(parent_parser.object.get(self.method, {}))
new_object.update(parser.object)
parser.object = new_object
return parser
def get_extra_serializer_classes(self):
return self.get_yaml_parser().get_extra_serializer_classes(
self.callback)
def ask_for_serializer_class(self):
if hasattr(self.callback, 'get_serializer_class'):
view = self.create_view()
parser = self.get_yaml_parser()
mock_view = parser.get_view_mocker(self.callback)
view = mock_view(view)
if view is not None:
return view.get_serializer_class()
def create_view(self):
view = self.callback()
if not hasattr(view, 'kwargs'):
view.kwargs = dict()
if hasattr(self.parent.pattern, 'default_args'):
view.kwargs.update(self.parent.pattern.default_args)
view.request = HttpRequest()
view.request.user = AnonymousUser()
view.request.method = self.method
return view
def get_serializer_class(self):
parser = self.get_yaml_parser()
serializer = parser.get_serializer_class(self.callback)
if serializer is None:
serializer = self.ask_for_serializer_class()
return serializer
def get_response_serializer_class(self):
parser = self.get_yaml_parser()
serializer = parser.get_response_serializer_class(self.callback)
if serializer is None:
serializer = self.get_serializer_class()
return serializer
def get_request_serializer_class(self):
parser = self.get_yaml_parser()
serializer = parser.get_request_serializer_class(self.callback)
if serializer is None:
serializer = self.get_serializer_class()
return serializer
def get_summary(self):
# If there is no docstring on the method, get class docs
return IntrospectorHelper.get_summary(
self.callback,
self.get_docs() or self.parent.get_description())
def get_nickname(self):
""" Returns the APIView's nickname """
return get_view_name(self.callback).replace(' ', '_')
def get_notes(self):
"""
Returns the body of the docstring trimmed before any parameters are
listed. First, get the class docstring and then get the method's. The
methods will always inherit the class comments.
"""
docstring = ""
class_docs = get_view_description(self.callback)
class_docs = IntrospectorHelper.strip_yaml_from_docstring(class_docs)
class_docs = IntrospectorHelper.strip_params_from_docstring(class_docs)
method_docs = self.get_docs()
if class_docs is not None:
docstring += class_docs + " \n"
if method_docs is not None:
method_docs = formatting.dedent(smart_text(method_docs))
method_docs = IntrospectorHelper.strip_yaml_from_docstring(
method_docs
)
method_docs = IntrospectorHelper.strip_params_from_docstring(
method_docs
)
docstring += '\n' + method_docs
return do_markdown(docstring)
def get_parameters(self):
"""
Returns parameters for an API. Parameters are a combination of HTTP
query parameters as well as HTTP body parameters that are defined by
the DRF serializer fields
"""
params = []
path_params = self.build_path_parameters()
body_params = self.build_body_parameters()
form_params = self.build_form_parameters()
query_params = self.build_query_parameters()
if path_params:
params += path_params
if self.get_http_method() not in ["GET", "DELETE"]:
params += form_params
if not form_params and body_params is not None:
params.append(body_params)
if query_params:
params += query_params
return params
def get_http_method(self):
return self.method
@abstractmethod
def get_docs(self):
return ''
def retrieve_docstring(self):
"""
Attempts to fetch the docs for a class method. Returns None
if the method does not exist
"""
method = str(self.method).lower()
if not hasattr(self.callback, method):
return None
return get_view_description(getattr(self.callback, method))
def build_body_parameters(self):
serializer = self.get_request_serializer_class()
serializer_name = IntrospectorHelper.get_serializer_name(serializer)
if serializer_name is None:
return
return {
'name': serializer_name,
'type': serializer_name,
'paramType': 'body',
}
def build_path_parameters(self):
"""
Gets the parameters from the URL
"""
url_params = re.findall('/{([^}]*)}', self.path)
params = []
for param in url_params:
params.append({
'name': param,
'type': 'string',
'paramType': 'path',
'required': True
})
return params
def build_query_parameters(self):
params = []
docstring = self.retrieve_docstring() or ''
docstring += "\n" + get_view_description(self.callback)
if docstring is None:
return params
split_lines = docstring.split('\n')
for line in split_lines:
param = line.split(' -- ')
if len(param) == 2:
params.append({'paramType': 'query',
'name': param[0].strip(),
'description': param[1].strip(),
'dataType': ''})
return params
def build_form_parameters(self):
"""
Builds form parameters from the serializer class
"""
data = []
serializer = self.get_request_serializer_class()
if serializer is None:
return data
fields = serializer().get_fields()
for name, field in fields.items():
if getattr(field, 'read_only', False):
continue
data_type = get_data_type(field)
# guess format
data_format = 'string'
if data_type in self.PRIMITIVES:
data_format = self.PRIMITIVES.get(data_type)[0]
f = {
'paramType': 'form',
'name': name,
'description': getattr(field, 'help_text', ''),
'type': data_type,
'format': data_format,
'required': getattr(field, 'required', False),
'defaultValue': get_default_value(field),
}
# Min/Max values
max_val = getattr(field, 'max_val', None)
min_val = getattr(field, 'min_val', None)
if max_val is not None and data_type == 'integer':
f['minimum'] = min_val
if max_val is not None and data_type == 'integer':
f['maximum'] = max_val
# ENUM options
if get_data_type(field) in ['multiple choice', 'choice']:
if isinstance(field.choices, list):
f['enum'] = [k for k, v in field.choices]
elif isinstance(field.choices, dict):
f['enum'] = [k for k, v in field.choices.items()]
data.append(f)
return data
def get_data_type(field):
from rest_framework import fields
if hasattr(field, 'type_label'):
return field.type_label
elif isinstance(field, fields.BooleanField):
return 'boolean'
elif isinstance(field, fields.URLField):
return 'url'
elif isinstance(field, fields.SlugField):
return 'slug'
elif isinstance(field, fields.ChoiceField):
return 'choice'
elif isinstance(field, fields.EmailField):
return 'email'
elif isinstance(field, fields.RegexField):
return 'regex'
elif isinstance(field, fields.DateField):
return 'date'
elif isinstance(field, fields.DateTimeField):
return 'datetime'
elif isinstance(field, fields.TimeField):
return 'time'
elif isinstance(field, fields.IntegerField):
return 'integer'
elif isinstance(field, fields.FloatField):
return 'float'
elif isinstance(field, fields.DecimalField):
return 'decimal'
elif isinstance(field, fields.ImageField):
return 'image upload'
elif isinstance(field, fields.FileField):
return 'file upload'
elif isinstance(field, fields.CharField):
return 'string'
else:
return 'field'
class APIViewIntrospector(BaseViewIntrospector):
def __iter__(self):
for method in self.methods():
yield APIViewMethodIntrospector(self, method)
def methods(self):
return self.callback().allowed_methods
class WrappedAPIViewIntrospector(BaseViewIntrospector):
def __iter__(self):
for method in self.methods():
yield WrappedAPIViewMethodIntrospector(self, method)
def methods(self):
return self.callback().allowed_methods
def get_notes(self):
class_docs = get_view_description(self.callback)
class_docs = IntrospectorHelper.strip_yaml_from_docstring(
class_docs)
class_docs = IntrospectorHelper.strip_params_from_docstring(
class_docs)
return get_view_description(
self.callback, html=True, docstring=class_docs)
def do_markdown(docstring):
# Markdown is optional
if apply_markdown:
return apply_markdown(docstring)
else:
return docstring.replace("\n\n", "<br/>")
class APIViewMethodIntrospector(BaseMethodIntrospector):
def get_docs(self):
"""
Attempts to retrieve method specific docs for an
endpoint. If none are available, the class docstring
will be used
"""
return self.retrieve_docstring()
class WrappedAPIViewMethodIntrospector(BaseMethodIntrospector):
def get_docs(self):
"""
Attempts to retrieve method specific docs for an
endpoint. If none are available, the class docstring
will be used
"""
return get_view_description(self.callback)
def get_module(self):
from rest_framework_swagger.decorators import wrapper_to_func
func = wrapper_to_func(self.callback)
return func.__module__
def get_notes(self):
return self.parent.get_notes()
def get_yaml_parser(self):
parser = YAMLDocstringParser(self)
return parser
class ViewSetIntrospector(BaseViewIntrospector):
"""Handle ViewSet introspection."""
def __init__(self, callback, path, pattern, patterns=None):
super(ViewSetIntrospector, self).__init__(callback, path, pattern)
if not issubclass(callback, viewsets.ViewSetMixin):
raise Exception("wrong callback passed to ViewSetIntrospector")
self.patterns = patterns or [pattern]
def __iter__(self):
methods = self._resolve_methods()
for method in methods:
yield ViewSetMethodIntrospector(self, methods[method], method)
def methods(self):
stuff = []
for pattern in self.patterns:
if pattern.callback:
stuff.extend(self._resolve_methods(pattern).values())
return stuff
def _resolve_methods(self, pattern=None):
from .decorators import closure_n_code, get_closure_var
if pattern is None:
pattern = self.pattern
callback = pattern.callback
try:
x = closure_n_code(callback)
while getattr(x.code, 'co_name') != 'view':
# lets unwrap!
callback = get_closure_var(callback)
x = closure_n_code(callback)
freevars = x.code.co_freevars
except (AttributeError, IndexError):
raise RuntimeError(
'Unable to use callback invalid closure/function ' +
'specified.')
else:
return x.closure[freevars.index('actions')].cell_contents
class ViewSetMethodIntrospector(BaseMethodIntrospector):
def __init__(self, view_introspector, method, http_method):
super(ViewSetMethodIntrospector, self) \
.__init__(view_introspector, method)
self.http_method = http_method.upper()
def get_http_method(self):
return self.http_method
def get_docs(self):
"""
Attempts to retrieve method specific docs for an
endpoint. If none are available, the class docstring
will be used
"""
return self.retrieve_docstring()
def create_view(self):
view = super(ViewSetMethodIntrospector, self).create_view()
if not hasattr(view, 'action'):
setattr(view, 'action', self.method)
view.request.method = self.http_method
return view
def build_query_parameters(self):
parameters = super(ViewSetMethodIntrospector, self) \
.build_query_parameters()
view = self.create_view()
if not hasattr(view, 'paginate_by'):
return parameters
if self.method == 'list' and view.paginate_by:
parameters.append({'paramType': 'query',
'name': view.page_kwarg,
'description': None,
'dataType': 'integer'})
if hasattr(view, 'paginate_by_param') and view.paginate_by_param:
parameters.append({'paramType': 'query',
'name': view.paginate_by_param,
'description': None,
'dataType': 'integer'})
return parameters
def multi_getattr(obj, attr, default=None):
"""
Get a named attribute from an object; multi_getattr(x, 'a.b.c.d') is
equivalent to x.a.b.c.d. When a default argument is given, it is
returned when any attribute in the chain doesn't exist; without
it, an exception is raised when a missing attribute is encountered.
"""
attributes = attr.split(".")
for i in attributes:
try:
obj = getattr(obj, i)
except AttributeError:
if default:
return default
else:
raise
return obj
class YAMLDocstringParser(object):
"""
Docstring parser powered by YAML syntax
This parser allows you override some parts of automatic method inspection
behaviours which are not always correct.
See the following documents for more information about YAML and Swagger:
- https://github.com/wordnik/swagger-core/wiki
- http://www.yaml.org/spec/1.2/spec.html
- https://github.com/wordnik/swagger-codegen/wiki/Creating-Swagger-JSON-from-YAML-files
1. Control over parameters
============================================================================
Define parameters and its properties in docstrings:
parameters:
- name: some_param
description: Foobar long description goes here
required: true
type: integer
paramType: form
minimum: 10
maximum: 100
- name: other_foo
paramType: query
- name: avatar
type: file
It is possible to override parameters discovered by method inspector by
defining:
`parameters_strategy` option to either `merge` or `replace`
To define different strategies for different `paramType`'s use the
following syntax:
parameters_strategy:
form: replace
query: merge
By default strategy is set to `merge`
Sometimes method inspector produces wrong list of parameters that
you might not won't to see in SWAGGER form. To handle this situation
define `paramTypes` that should be omitted
omit_parameters:
- form
2. Control over serializers
============================================================================
Once in a while you are using different serializers inside methods
but automatic method inspector cannot detect this. For that purpose there
is two explicit parameters that allows you to discard serializer detected
by method inspector OR replace it with another one
serializer: some.package.FooSerializer
omit_serializer: true
3. Custom Response Class
============================================================================
If your view is not using serializer at all but instead outputs simple
data type such as JSON you may define custom response object in method
signature like follows:
type:
name:
required: true
type: string
url:
required: false
type: url
4. Response Messages (Error Codes)
============================================================================
If you'd like to share common response errors that your APIView might throw
you can define them in docstring using following format:
responseMessages:
- code: 401
message: Not authenticated
- code: 403
message: Insufficient rights to call this procedure
5. Different models for reading and writing operations
============================================================================
Since REST Framework won't output write_only fields in responses as well as
does not require read_only fields to be provided it is worth to
automatically register 2 separate models for reading and writing operations.
Discovered serializer will be registered with `Write` or `Read` prefix.
Response Class will be automatically adjusted if serializer class was
detected by method inspector.
You can also refer to this models in your parameters:
parameters:
- name: CigarSerializer
type: WriteCigarSerializer
paramType: body
SAMPLE DOCSTRING:
============================================================================
---
# API Docs
# Note: YAML always starts with `---`
type:
name:
required: true
type: string
url:
required: false
type: url
created_at:
required: true
type: string
format: date-time
serializer: .serializers.FooSerializer
omit_serializer: false
parameters_strategy: merge
omit_parameters:
- path
parameters:
- name: name
description: Foobar long description goes here
required: true
type: string
paramType: form
- name: other_foo
paramType: query
- name: other_bar
paramType: query
- name: avatar
type: file
responseMessages:
- code: 401
message: Not authenticated
"""
PARAM_TYPES = ['header', 'path', 'form', 'body', 'query']
yaml_error = None
def __init__(self, method_introspector):
self.method_introspector = method_introspector
self.object = self.load_obj_from_docstring(
docstring=self.method_introspector.get_docs())
if self.object is None:
self.object = {}
def load_obj_from_docstring(self, docstring):
"""Loads YAML from docstring"""
split_lines = trim_docstring(docstring).split('\n')
# Cut YAML from rest of docstring
for index, line in enumerate(split_lines):
line = line.strip()
if line.startswith('---'):
cut_from = index
break
else:
return None
yaml_string = "\n".join(split_lines[cut_from:])
yaml_string = formatting.dedent(yaml_string)
try:
return yaml.load(yaml_string)
except yaml.YAMLError as e:
self.yaml_error = e
return None
def _load_class(self, cls_path, callback):
"""
Dynamically load a class from a string
"""
if not cls_path or not callback or not hasattr(callback, '__module__'):
return None
package = None
if '.' not in cls_path:
# within current module/file
class_name = cls_path
module_path = self.method_introspector.get_module()
else:
# relative or fully qualified path import
class_name = cls_path.split('.')[-1]
module_path = ".".join(cls_path.split('.')[:-1])
if cls_path.startswith('.'):
# relative lookup against current package
# ..serializers.FooSerializer
package = self.method_introspector.get_module()
class_obj = None
# Try to perform local or relative/fq import
try:
module = importlib.import_module(module_path, package=package)
class_obj = getattr(module, class_name, None)
except ImportError:
pass
# Class was not found, maybe it was imported to callback module?
# from app.serializers import submodule
# serializer: submodule.FooSerializer
if class_obj is None:
try:
module = importlib.import_module(
self.method_introspector.get_module())
class_obj = multi_getattr(module, cls_path, None)
except (ImportError, AttributeError):
raise Exception("Could not find %s, looked in %s" % (cls_path, module))
return class_obj
def get_serializer_class(self, callback):
"""
Retrieves serializer class from YAML object
"""
serializer = self.object.get('serializer', None)
try:
return self._load_class(serializer, callback)
except (ImportError, ValueError):
pass
return None
def get_extra_serializer_classes(self, callback):
"""
Retrieves serializer classes from pytype YAML objects
"""
parameters = self.object.get('parameters', [])
serializers = []
for parameter in parameters:
serializer = parameter.get('pytype', None)
if serializer is not None:
try:
serializer = self._load_class(serializer, callback)
serializers.append(serializer)
except (ImportError, ValueError):
pass
return serializers
def get_request_serializer_class(self, callback):
"""
Retrieves request serializer class from YAML object
"""
serializer = self.object.get('request_serializer', None)
try:
return self._load_class(serializer, callback)
except (ImportError, ValueError):
pass
return None
def get_response_serializer_class(self, callback):
"""
Retrieves response serializer class from YAML object
"""
serializer = self.object.get('response_serializer', None)
if isinstance(serializer, list):
serializer = serializer[0]
try:
return self._load_class(serializer, callback)
except (ImportError, ValueError):
pass
return None
def get_response_type(self):
"""
Docstring may define custom response class
"""
return self.object.get('type', None)
def get_response_messages(self):
"""
Retrieves response error codes from YAML object
"""
messages = []
response_messages = self.object.get('responseMessages', [])
for message in response_messages:
messages.append({
'code': message.get('code', None),
'message': message.get('message', None),
'responseModel': message.get('responseModel', None),
})
return messages
def get_view_mocker(self, callback):
view_mocker = self.object.get('view_mocker', lambda a: a)
if isinstance(view_mocker, six.string_types):
view_mocker = self._load_class(view_mocker, callback)
return view_mocker
def get_parameters(self, callback):
"""
Retrieves parameters from YAML object
"""
params = []
fields = self.object.get('parameters', [])
for field in fields:
param_type = field.get('paramType', None)
if param_type not in self.PARAM_TYPES:
param_type = 'form'
# Data Type & Format
# See:
# https://github.com/wordnik/swagger-core/wiki/1.2-transition#wiki-additions-2
# https://github.com/wordnik/swagger-core/wiki/Parameters
data_type = field.get('type', 'string')
pytype = field.get('pytype', None)
if pytype is not None:
try:
serializer = self._load_class(pytype, callback)
data_type = IntrospectorHelper.get_serializer_name(
serializer)
except (ImportError, ValueError):
pass
if param_type in ['path', 'query', 'header']:
if data_type not in BaseMethodIntrospector.PRIMITIVES:
data_type = 'string'
# Data Format
data_format = field.get('format', None)
flatten_primitives = [
val for sublist in BaseMethodIntrospector.PRIMITIVES.values()
for val in sublist
]
if data_format not in flatten_primitives:
formats = BaseMethodIntrospector.PRIMITIVES.get(data_type, None)
if formats:
data_format = formats[0]
else:
data_format = 'string'
f = {
'paramType': param_type,
'name': field.get('name', None),
'description': field.get('description', None),
'type': data_type,
'format': data_format,
'required': field.get('required', False),
'defaultValue': field.get('defaultValue', None),
}
# Allow Multiple Values &f=1,2,3,4
if field.get('allowMultiple'):
f['allowMultiple'] = True
# Min/Max are optional
if 'minimum' in field and data_type == 'integer':
f['minimum'] = str(field.get('minimum', 0))
if 'maximum' in field and data_type == 'integer':
f['maximum'] = str(field.get('maximum', 0))
# enum options
enum = field.get('enum', [])
if enum:
f['enum'] = enum
# File support
if f['type'] == 'file':
f['paramType'] = 'body'
params.append(f)
return params
def discover_parameters(self, inspector):
"""
Applies parameters strategy for parameters discovered
from method and docstring
"""
parameters = []
docstring_params = self.get_parameters(inspector.callback)
method_params = inspector.get_parameters()
# paramType may differ, overwrite first
# so strategy can be applied
for meth_param in method_params:
for doc_param in docstring_params:
if doc_param['name'] == meth_param['name']:
if 'paramType' in doc_param:
meth_param['paramType'] = doc_param['paramType']
for param_type in self.PARAM_TYPES:
if self.should_omit_parameters(param_type):
continue
parameters += self._apply_strategy(
param_type, method_params, docstring_params
)
# PATCH requests expects all fields except path fields to be optional
if inspector.get_http_method() == "PATCH":
for param in parameters:
if param['paramType'] != 'path':
param['required'] = False
return parameters
def should_omit_parameters(self, param_type):
"""
Checks if particular parameter types should be omitted explicitly
"""
return param_type in self.object.get('omit_parameters', [])
def should_omit_serializer(self):
"""
Checks if serializer should be intentionally omitted
"""
return self.object.get('omit_serializer', False)
def _apply_strategy(self, param_type, method_params, docstring_params):
"""
Applies strategy for subset of parameters filtered by `paramType`
"""
strategy = self.get_parameters_strategy(param_type=param_type)
method_params = self._filter_params(
params=method_params,
key='paramType',
val=param_type
)
docstring_params = self._filter_params(
params=docstring_params,
key='paramType',
val=param_type
)
if strategy == 'replace':
return docstring_params or method_params
elif strategy == 'merge':
return self._merge_params(
method_params,
docstring_params,
key='name',
)
return []
@staticmethod
def _filter_params(params, key, val):
"""
Returns filter function for parameters structure
"""
fn = lambda o: o.get(key, None) == val
return filter(fn, params)
@staticmethod
def _merge_params(params1, params2, key):
"""
Helper method.
Merges parameters lists by key
"""
import itertools
merged = OrderedDict()
for item in itertools.chain(params1, params2):
merged[item[key]] = item
return [val for (_, val) in merged.items()]
def get_parameters_strategy(self, param_type=None):
"""
Get behaviour strategy for parameter types.
It can be either `merge` or `replace`:
- `merge` overwrites duplicate parameters signatures
discovered by inspector with the ones defined explicitly in
docstring
- `replace` strategy completely overwrites parameters discovered
by inspector with the ones defined explicitly in docstring.
Note: Strategy can be defined per `paramType` so `path` parameters can
use `merge` strategy while `form` parameters will use `replace`
strategy.
Default strategy: `merge`
"""
default = 'merge'
strategy = self.object.get('parameters_strategy', default)
if hasattr(strategy, 'get') and param_type is not None:
strategy = strategy.get(param_type, default)
if strategy not in ['merge', 'replace']:
strategy = default
return strategy
| |
# Copyright 2015 Ciara Kamahele-Sanfratello
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math, random
from Primitives import *
class KitchenState(State):
def __init__(self, numObjs, numLocs, stoveLoc,
move_info, pick_info, place_info, cook_info, look_info,
robotLoc, heldObj, objLocs, freeLocs,
round_precision=3, world=False, epsilon=0.1):
self.numObjs = numObjs
self.numLocs = numLocs
self.stoveLoc = stoveLoc
self.move_info = move_info
self.pick_info = pick_info
self.place_info = place_info
self.cook_info = cook_info
self.look_info = look_info
self.move_prob, self.bump_obs, self.move_cost = move_info
self.pick_prob, self.drop_obs, self.pick_cost = pick_info
self.place_prob, self.stick_obs, self.place_cost = place_info
self.cook_prob, self.raw_obs, self.cook_cost = cook_info
self.look_obs, self.look_cost = look_info
# robot and object locations are represented as [p_loc0, p_loc1, p_loc2, ...]
self.robotLoc = robotLoc[:]
# heldObj is represented as [p_obj0, p_obj1, p_obj2, ..., p_nothing]
self.heldObj = heldObj[:]
# objLocs is represented as [obj0loc[], obj1loc[], obj2loc[] ...]
self.objLocs = [objLoc[:] for objLoc in objLocs]
# freeLocs is represented as [p_loc0_free, p_loc1_free, p_loc2_free, ...]
self.freeLocs = freeLocs[:]
self.round_precision = round_precision
self.epsilon = epsilon
self.world = world
self.assert_valid()
# return robot location if definite else None
def robot_loc(self):
for loc in range(self.numLocs):
if self.robotLoc[loc] == 1:
return loc
return None
# set robot location if definite
def set_robot_loc(self, loc):
self.robotLoc[self.robot_loc()] = 0
self.robotLoc[loc] = 1
# return held object if definite else None
def held_obj(self):
for obj in range(self.numObjs + 1):
if self.heldObj[obj] == 1:
return obj
return None
# set held object if definite
def set_held_obj(self, obj):
self.heldObj[self.held_obj()] = 0
self.heldObj[obj] = 1
# return object location if definite else None
def obj_loc(self, obj):
for loc in range(self.numLocs):
if self.objLocs[obj][loc] == 1:
return loc
return None
# return True if location is free
def free_loc(self, loc):
return self.freeLocs[loc] == 1
# set object location if definite
def set_obj_loc(self, obj, loc):
self.objLocs[obj][self.obj_loc(obj)] = 0
self.objLocs[obj][loc] = 1
# recompute free loc probabilities
def recompute_free_locs(self):
for l in range(self.numLocs):
s = 0.0
for o in range(self.numObjs):
s += self.objLocs[o][l]
self.freeLocs[l] = 1.0 - s
def normalize_robotLoc(self):
self.robotLoc = [x / sum(self.robotLoc) for x in self.robotLoc]
def normalize_objLoc(self, o):
self.objLocs[o] = [x / sum(self.objLocs[o]) for x in self.objLocs[o]]
def normalize_objLocs(self):
for o in range(self.numObjs):
self.normalize_objLoc(o)
def normalize_heldObj(self):
self.heldObj = [x / sum(self.heldObj) for x in self.heldObj]
def assert_valid(self):
if self.world:
return True
# robot bloc <= 1
if sum(self.robotLoc) > 1.0:
# normalize
if sum(self.robotLoc) <= (1.0 + self.epsilon):
self.normalize_robotLoc()
else:
assert(False)
# obj blocs <= 1
for i, o in enumerate(self.objLocs):
if sum(o) > 1.0:
# normalize
if sum(o) <= (1.0 + self.epsilon):
self.normalize_objLoc(i)
else:
assert(False)
# bholds + bholdnothings <= 1
if sum(self.heldObj) > 1.0:
# normalize
if sum(self.heldObj) <= (1.0 + self.epsilon):
self.normalize_heldObj()
else:
assert(False)
self.recompute_free_locs()
def assert_complete(self):
if self.world:
return True
# robot bloc == 1
assert(sum(self.robotLoc) == 1.0)
# obj blocs == 1
assert(all([round(sum(o), self.round_precision) == 1.0 for o in self.objLocs]))
# bholds + bholdnothings == 1
assert(round(sum(self.heldObj), self.round_precision) == 1.0)
# loc blocs + bfrees == 1
for l in range(self.numLocs):
s = self.freeLocs[l]
for o in range(self.numObjs):
s += self.objLocs[o][l]
assert(round(s, self.round_precision) == 1.0)
def copy(self):
return KitchenState(self.numObjs, self.numLocs, self.stoveLoc,
self.move_info, self.pick_info, self.place_info, self.cook_info, self.look_info,
self.robotLoc,
self.heldObj,
self.objLocs,
self.freeLocs)
def equals(self, goal_state):
if self.numObjs != goal_state.numObjs or self.numLocs != goal_state.numLocs:
return False
for l in range(self.numLocs):
if round(self.robotLoc[l], self.round_precision) != round(goal_state.robotLoc[l], self.round_precision):
return False
for o in range(self.numObjs):
for l in range(self.numLocs):
if round(self.objLocs[o][l], self.round_precision) != round(goal_state.objLocs[o][l], self.round_precision):
return False
for o in range(self.numObjs + 1):
if round(self.heldObj[o], self.round_precision) != round(goal_state.heldObj[o], self.round_precision):
return False
for l in range(self.numLocs):
if round(self.freeLocs[l], self.round_precision) != round(goal_state.freeLocs[l], self.round_precision):
return False
return True
def satisfies(self, goal_state):
if self.numObjs != goal_state.numObjs or self.numLocs != goal_state.numLocs:
return False
for l in range(self.numLocs):
if round(self.robotLoc[l], self.round_precision) < round(goal_state.robotLoc[l], self.round_precision):
return False
for o in range(self.numObjs):
for l in range(self.numLocs):
if round(self.objLocs[o][l], self.round_precision) < round(goal_state.objLocs[o][l], self.round_precision):
return False
for o in range(self.numObjs + 1):
if round(self.heldObj[o], self.round_precision) < round(goal_state.heldObj[o], self.round_precision):
return False
for l in range(self.numLocs):
if round(self.freeLocs[l], self.round_precision) < round(goal_state.freeLocs[l], self.round_precision):
return False
return True
def attributes(self):
return (self.numObjs, self.numLocs, self.stoveLoc, self.move_info, self.pick_info, self.place_info, self.cook_info, self.look_info)
def attributes_str(self):
return 'numObjs: %d, numLocs: %d,\nmove: %s,\npick: %s,\nplace: %s\nlook: %s' %\
(self.numObjs, self.numLocs,
str(self.move_info),
str(self.pick_info),
str(self.place_info),
str(self.look_info))
def __str__(self):
r = 'State: %d locs, %d objs\n' % (self.numLocs, self.numObjs)
r += 'loc: ['
for p in self.robotLoc:
r += '%.3f ' % p
r = r[:len(r) - 1] + ']\n'
r += 'held: ['
for p in self.heldObj[:len(self.heldObj) - 1]:
r += '%.3f ' % p
r = r[:len(r) - 1] + '] %.3f\n' % self.heldObj[-1]
for i, o in enumerate(self.objLocs):
r += 'object %d: [' % i
for p in o:
r += '%.3f ' % p
r = r[:len(r) - 1]
r += ']\n'
r = r[:len(r) - 1] + '\n'
r += 'free: ['
for p in self.freeLocs:
r += '%.3f ' % p
r = r[:len(r) - 1] + ']\n'
return r
class KitchenAction(Action):
# move: fromLoc, toLoc
# pick: obj, loc
# place: obj, loc
# lookobj: obj, loc
# lookrobot: loc
# lookhand: obj
# reachgoal
def __init__(self, action, args=[]):
self.action = action
self.args = args
def __str__(self):
return self.action + ' ' + str(self.args)
class KitchenWorld(World):
def __init__(self):
self.state = None
# generate initial world based off of belief_state, leaving belief_state unchanged
def generate_new_world(self, belief_state):
self.state = belief_state.copy()
# robotLoc
p = random.uniform(0.0, 1.0)
total = 0.0
for loc in range(belief_state.numLocs):
total += belief_state.robotLoc[loc]
if p <= total:
self.state.robotLoc = [0] * belief_state.numLocs
self.state.robotLoc[loc] = 1
break
# heldObj
p = random.uniform(0.0, 1.0)
total = 0.0
for obj in range(belief_state.numObjs + 1):
total += belief_state.heldObj[loc]
if p <= total:
self.state.heldObj = [0] * (belief_state.numObjs + 1)
self.state.heldObj[obj] = 1
break
# objLocs
for obj in range(belief_state.numObjs):
p = random.uniform(0.0, 1.0)
total = 0.0
for loc in range(belief_state.numLocs):
total += belief_state.objLocs[obj][loc]
if p <= total:
self.state.objLocs[obj] = [0] * belief_state.numLocs
self.state.objLocs[obj][loc] = 1
break
# freeLocs
self.state.freeLocs = [1] * belief_state.numLocs
for obj in range(belief_state.numObjs):
self.state.freeLocs[self.state.obj_loc(obj)] = 0
def execute_action(self, action):
if action.action == 'am':
p = random.uniform(0.0, 1.0)
actuator_success = False
# actuator succeeds
if p <= self.state.move_prob:
fromLoc, toLoc = action.args
# in fromLoc
if self.state.robot_loc() == fromLoc:
# holding nothing
if self.state.held_obj() == self.state.numObjs:
# move robot
self.state.set_robot_loc(toLoc)
actuator_success = True
# holding something and toLoc is free
elif self.state.freeLocs[toLoc] == 1:
# free old location
self.state.freeLocs[fromLoc] = 1
# move robot
self.state.set_robot_loc(toLoc)
# move held object
self.state.set_obj_loc(self.state.held_obj(), toLoc)
# occupy new location
self.state.freeLocs[toLoc] = 0
actuator_success = True
return ('oaction', self.state.move_cost, actuator_success)
elif action.action == 'api':
p = random.uniform(0.0, 1.0)
actuator_success = False
# actuator succeeds
if p <= self.state.pick_prob:
obj, loc = action.args
# not holding anything and robot and object are in the location
if self.state.held_obj() == self.state.numObjs and\
self.state.obj_loc(obj) == loc and\
self.state.robot_loc() == loc:
# pick up object
self.state.set_held_obj(obj)
actuator_success = True
return ('oaction', self.state.pick_cost, actuator_success)
elif action.action == 'apl':
p = random.uniform(0.0, 1.0)
actuator_success = False
# actuator succeeds
if p <= self.state.place_prob:
obj, loc = action.args
# holding the object
if self.state.held_obj() == obj:
# place up object
self.state.set_held_obj(self.state.numObjs)
actuator_success = True
return ('oaction', self.state.place_cost, actuator_success)
elif action.action == 'alo':
obj, loc = action.args
p = random.uniform(0.0, 1.0)
# sensor succeeds
if p <= self.state.look_obs:
if obj == -1:
return ('oobj' if self.state.free_loc(loc) else 'onone', self.state.look_cost, True)
else:
return ('oobj' if not self.state.free_loc(loc) else 'onone', self.state.look_cost, True)
# sensor fails
else:
if obj == -1:
return ('onone' if self.state.obj_loc(obj) != loc else 'oobj', self.state.look_cost, False)
else:
return ('onone' if self.state.obj_loc(obj) == loc else 'oobj', self.state.look_cost, False)
elif action.action == 'alr':
loc = action.args[0]
p = random.uniform(0.0, 1.0)
# sensor succeeds
if p <= self.state.look_obs:
return ('orobot' if self.state.robot_loc() == loc else 'onone', self.state.look_cost, True)
# sensor fails
else:
return ('onone' if self.state.robot_loc() == loc else 'orobot', self.state.look_cost, False)
elif action.action == 'alh':
obj = action.args[0]
p = random.uniform(0.0, 1.0)
# sensor succeeds
if p <= self.state.look_obs:
if obj == -1:
return ('oobj' if self.state.held_obj() == self.state.numObjs else 'onone', self.state.look_cost, True)
else:
return ('oobj' if self.state.held_obj() == obj else 'onone', self.state.look_cost, True)
# sensor fails
else:
if obj == -1:
return ('onone' if self.state.held_obj() == self.state.numObjs else 'oobj', self.state.look_cost, False)
else:
return ('onone' if self.state.held_obj() == obj else 'oobj', self.state.look_cost, False)
else:
# invalid action
assert(False)
def success(self, goal_state):
return self.state.satisfies(goal_state)
| |
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
"""
from pyclaw.plotters import colormaps, geoplot
plotdata.clearfigures() # clear any old figures,axes,items data
def set_drytol(current_data):
# The drytol parameter is used in masking land and water and
# affects what color map is used for cells with small water depth h.
# The cell will be plotted as dry if h < drytol.
# The best value to use often depends on the application and can
# be set here (measured in meters):
current_data.user.drytol = 1.e-2
plotdata.beforeframe = set_drytol
# To plot gauge locations on pcolor or contour plot, use this as
# an afteraxis function:
def addgauges(current_data):
from pyclaw.plotters import gaugetools
gaugetools.plot_gauge_locations(current_data.plotdata, \
gaugenos='all', format_string='ko', add_labels=True)
#-----------------------------------------
# Figure for pcolor plot
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='pcolor', figno=0)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('pcolor')
plotaxes.title = 'Surface'
plotaxes.scaled = True
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
#plotitem.plot_var = geoplot.surface
plotitem.plot_var = geoplot.surface_or_depth
plotitem.pcolor_cmap = geoplot.tsunami_colormap
plotitem.pcolor_cmin = -0.9
plotitem.pcolor_cmax = 0.9
plotitem.add_colorbar = True
plotitem.amr_gridlines_show = [1,1,0]
plotitem.amr_gridedges_show = [1]
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_gridlines_show = [1,1,0]
plotaxes.xlimits = [-100,100]
plotaxes.ylimits = [-100,100]
#-----------------------------------------
# Figure for zoom
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Zoom', figno=10)
#plotfigure.show = False
plotfigure.kwargs = {'figsize':[12,7]}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('diag zoom')
plotaxes.axescmd = 'axes([0.0,0.1,0.6,0.6])'
plotaxes.title = 'On diagonal'
plotaxes.scaled = True
plotaxes.xlimits = [55,66]
plotaxes.ylimits = [55,66]
plotaxes.afteraxes = addgauges
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
#plotitem.plot_var = geoplot.surface
plotitem.plot_var = geoplot.surface_or_depth
plotitem.pcolor_cmap = geoplot.tsunami_colormap
plotitem.pcolor_cmin = -0.9
plotitem.pcolor_cmax = 0.9
plotitem.add_colorbar = True
plotitem.amr_gridlines_show = [1,1,0]
plotitem.amr_gridedges_show = [1]
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_gridlines_show = [1,1,0]
# Add contour lines of bathymetry:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.plot_var = geoplot.topo
from numpy import arange, linspace
plotitem.contour_levels = arange(-10., 0., 1.)
plotitem.amr_contour_colors = ['k'] # color on each level
plotitem.kwargs = {'linestyles':'solid'}
plotitem.amr_contour_show = [0,0,1] # show contours only on finest level
plotitem.gridlines_show = 0
plotitem.gridedges_show = 0
plotitem.show = True
# Add contour lines of topography:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.plot_var = geoplot.topo
from numpy import arange, linspace
plotitem.contour_levels = arange(0., 11., 1.)
plotitem.amr_contour_colors = ['g'] # color on each level
plotitem.kwargs = {'linestyles':'solid'}
plotitem.amr_contour_show = [0,0,1] # show contours only on finest level
plotitem.gridlines_show = 0
plotitem.gridedges_show = 0
plotitem.show = True
# Add dashed contour line for shoreline
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.plot_var = geoplot.topo
plotitem.contour_levels = [0.]
plotitem.amr_contour_colors = ['k'] # color on each level
plotitem.kwargs = {'linestyles':'dashed'}
plotitem.amr_contour_show = [0,0,1] # show contours only on finest level
plotitem.gridlines_show = 0
plotitem.gridedges_show = 0
plotitem.show = True
#-----------------------------------------
# Figure for zoom near axis
#-----------------------------------------
#plotfigure = plotdata.new_plotfigure(name='Zoom2', figno=11)
# now included in same figure as zoom on diagonal
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('x zoom')
plotaxes.show = True
plotaxes.axescmd = 'axes([0.5,0.1,0.6,0.6])'
plotaxes.title = 'On x-axis'
plotaxes.scaled = True
plotaxes.xlimits = [82,93]
plotaxes.ylimits = [-5,6]
plotaxes.afteraxes = addgauges
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
#plotitem.plot_var = geoplot.surface
plotitem.plot_var = geoplot.surface_or_depth
plotitem.pcolor_cmap = geoplot.tsunami_colormap
plotitem.pcolor_cmin = -0.9
plotitem.pcolor_cmax = 0.9
plotitem.add_colorbar = True
plotitem.amr_gridlines_show = [1,1,0]
plotitem.amr_gridedges_show = [1]
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_gridlines_show = [1,1,0]
# Add contour lines of bathymetry:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.plot_var = geoplot.topo
from numpy import arange, linspace
plotitem.contour_levels = arange(-10., 0., 1.)
plotitem.amr_contour_colors = ['k'] # color on each level
plotitem.kwargs = {'linestyles':'solid'}
plotitem.amr_contour_show = [0,0,1] # show contours only on finest level
plotitem.gridlines_show = 0
plotitem.gridedges_show = 0
plotitem.show = True
# Add contour lines of topography:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.plot_var = geoplot.topo
from numpy import arange, linspace
plotitem.contour_levels = arange(0., 11., 1.)
plotitem.amr_contour_colors = ['g'] # color on each level
plotitem.kwargs = {'linestyles':'solid'}
plotitem.amr_contour_show = [0,0,1] # show contours only on finest level
plotitem.gridlines_show = 0
plotitem.gridedges_show = 0
plotitem.show = True
# Add dashed contour line for shoreline
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.plot_var = geoplot.topo
plotitem.contour_levels = [0.]
plotitem.amr_contour_colors = ['k'] # color on each level
plotitem.kwargs = {'linestyles':'dashed'}
plotitem.amr_contour_show = [0,0,1] # show contours only on finest level
plotitem.gridlines_show = 0
plotitem.gridedges_show = 0
plotitem.show = True
#-----------------------------------------
# Figures for gauges
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Surface & topo', figno=300, \
type='each_gauge')
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = 'auto'
plotaxes.ylimits = [-2.0, 2.0]
plotaxes.title = 'Surface'
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 3
plotitem.plotstyle = 'b-'
# Plot topo as green curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
def gaugetopo(current_data):
q = current_data.q
h = q[:,0]
eta = q[:,3]
topo = eta - h
return topo
plotitem.plot_var = gaugetopo
plotitem.clf_each_gauge = False
plotitem.plotstyle = 'g-'
def add_zeroline(current_data):
from pylab import plot, legend
t = current_data.t
legend(('surface','topography'),loc='lower left')
plot(t, 0*t, 'k')
plotaxes.afteraxes = add_zeroline
#-----------------------------------------
# Figure for grids alone
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='grids', figno=2)
plotfigure.show = False
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = [0,1]
plotaxes.ylimits = [0,1]
plotaxes.title = 'grids'
plotaxes.scaled = True
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='2d_grid')
plotitem.amr_grid_bgcolor = ['#ffeeee', '#eeeeff', '#eeffee']
plotitem.amr_gridlines_show = [1,1,0]
plotitem.amr_gridedges_show = [1]
#-----------------------------------------
# Scatter plot of surface for radially symmetric
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Scatter', figno=200)
plotfigure.show = False
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = [0., 100.]
plotaxes.ylimits = [-.5, 1.]
plotaxes.title = 'Scatter plot of surface'
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')
plotitem.plot_var = geoplot.surface
def q_vs_radius(current_data):
from numpy import sqrt
x = current_data.x
y = current_data.y
r = sqrt(x**2 + y**2)
q = current_data.var
return r,q
plotitem.map_2d_to_1d = q_vs_radius
plotitem.plotstyle = 'o'
plotitem.amr_color=['b','r','g']
plotaxes.afteraxes = "pylab.legend(['Level 1','Level 2'])"
#-----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_gaugenos = [4,5,104,105] # list of gauges to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
plotdata.format = 'ascii' # Format of output
# plotdata.format = 'netcdf'
return plotdata
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.dataproc.v1",
manifest={
"LoggingConfig",
"HadoopJob",
"SparkJob",
"PySparkJob",
"QueryList",
"HiveJob",
"SparkSqlJob",
"PigJob",
"SparkRJob",
"PrestoJob",
"JobPlacement",
"JobStatus",
"JobReference",
"YarnApplication",
"Job",
"JobScheduling",
"SubmitJobRequest",
"JobMetadata",
"GetJobRequest",
"ListJobsRequest",
"UpdateJobRequest",
"ListJobsResponse",
"CancelJobRequest",
"DeleteJobRequest",
},
)
class LoggingConfig(proto.Message):
r"""The runtime logging config of the job.
Attributes:
driver_log_levels (Sequence[google.cloud.dataproc_v1.types.LoggingConfig.DriverLogLevelsEntry]):
The per-package log levels for the driver.
This may include "root" package name to
configure rootLogger. Examples:
'com.google = FATAL', 'root = INFO',
'org.apache = DEBUG'
"""
class Level(proto.Enum):
r"""The Log4j level for job execution. When running an `Apache
Hive <https://hive.apache.org/>`__ job, Cloud Dataproc configures
the Hive client to an equivalent verbosity level.
"""
LEVEL_UNSPECIFIED = 0
ALL = 1
TRACE = 2
DEBUG = 3
INFO = 4
WARN = 5
ERROR = 6
FATAL = 7
OFF = 8
driver_log_levels = proto.MapField(proto.STRING, proto.ENUM, number=2, enum=Level,)
class HadoopJob(proto.Message):
r"""A Dataproc job for running `Apache Hadoop
MapReduce <https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html>`__
jobs on `Apache Hadoop
YARN <https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html>`__.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
main_jar_file_uri (str):
The HCFS URI of the jar file containing the
main class. Examples:
'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
'hdfs:/tmp/test-samples/custom-wordcount.jar'
'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
This field is a member of `oneof`_ ``driver``.
main_class (str):
The name of the driver's main class. The jar file containing
the class must be in the default CLASSPATH or specified in
``jar_file_uris``.
This field is a member of `oneof`_ ``driver``.
args (Sequence[str]):
Optional. The arguments to pass to the driver. Do not
include arguments, such as ``-libjars`` or ``-Dfoo=bar``,
that can be set as job properties, since a collision may
occur that causes an incorrect job submission.
jar_file_uris (Sequence[str]):
Optional. Jar file URIs to add to the
CLASSPATHs of the Hadoop driver and tasks.
file_uris (Sequence[str]):
Optional. HCFS (Hadoop Compatible Filesystem)
URIs of files to be copied to the working
directory of Hadoop drivers and distributed
tasks. Useful for naively parallel tasks.
archive_uris (Sequence[str]):
Optional. HCFS URIs of archives to be
extracted in the working directory of Hadoop
drivers and tasks. Supported file types: .jar,
.tar, .tar.gz, .tgz, or .zip.
properties (Sequence[google.cloud.dataproc_v1.types.HadoopJob.PropertiesEntry]):
Optional. A mapping of property names to values, used to
configure Hadoop. Properties that conflict with values set
by the Dataproc API may be overwritten. Can include
properties set in /etc/hadoop/conf/*-site and classes in
user code.
logging_config (google.cloud.dataproc_v1.types.LoggingConfig):
Optional. The runtime log config for job
execution.
"""
main_jar_file_uri = proto.Field(proto.STRING, number=1, oneof="driver",)
main_class = proto.Field(proto.STRING, number=2, oneof="driver",)
args = proto.RepeatedField(proto.STRING, number=3,)
jar_file_uris = proto.RepeatedField(proto.STRING, number=4,)
file_uris = proto.RepeatedField(proto.STRING, number=5,)
archive_uris = proto.RepeatedField(proto.STRING, number=6,)
properties = proto.MapField(proto.STRING, proto.STRING, number=7,)
logging_config = proto.Field(proto.MESSAGE, number=8, message="LoggingConfig",)
class SparkJob(proto.Message):
r"""A Dataproc job for running `Apache
Spark <http://spark.apache.org/>`__ applications on YARN.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
main_jar_file_uri (str):
The HCFS URI of the jar file that contains
the main class.
This field is a member of `oneof`_ ``driver``.
main_class (str):
The name of the driver's main class. The jar file that
contains the class must be in the default CLASSPATH or
specified in ``jar_file_uris``.
This field is a member of `oneof`_ ``driver``.
args (Sequence[str]):
Optional. The arguments to pass to the driver. Do not
include arguments, such as ``--conf``, that can be set as
job properties, since a collision may occur that causes an
incorrect job submission.
jar_file_uris (Sequence[str]):
Optional. HCFS URIs of jar files to add to
the CLASSPATHs of the Spark driver and tasks.
file_uris (Sequence[str]):
Optional. HCFS URIs of files to be placed in
the working directory of each executor. Useful
for naively parallel tasks.
archive_uris (Sequence[str]):
Optional. HCFS URIs of archives to be
extracted into the working directory of each
executor. Supported file types: .jar, .tar,
.tar.gz, .tgz, and .zip.
properties (Sequence[google.cloud.dataproc_v1.types.SparkJob.PropertiesEntry]):
Optional. A mapping of property names to
values, used to configure Spark. Properties that
conflict with values set by the Dataproc API may
be overwritten. Can include properties set in
/etc/spark/conf/spark-defaults.conf and classes
in user code.
logging_config (google.cloud.dataproc_v1.types.LoggingConfig):
Optional. The runtime log config for job
execution.
"""
main_jar_file_uri = proto.Field(proto.STRING, number=1, oneof="driver",)
main_class = proto.Field(proto.STRING, number=2, oneof="driver",)
args = proto.RepeatedField(proto.STRING, number=3,)
jar_file_uris = proto.RepeatedField(proto.STRING, number=4,)
file_uris = proto.RepeatedField(proto.STRING, number=5,)
archive_uris = proto.RepeatedField(proto.STRING, number=6,)
properties = proto.MapField(proto.STRING, proto.STRING, number=7,)
logging_config = proto.Field(proto.MESSAGE, number=8, message="LoggingConfig",)
class PySparkJob(proto.Message):
r"""A Dataproc job for running `Apache
PySpark <https://spark.apache.org/docs/0.9.0/python-programming-guide.html>`__
applications on YARN.
Attributes:
main_python_file_uri (str):
Required. The HCFS URI of the main Python
file to use as the driver. Must be a .py file.
args (Sequence[str]):
Optional. The arguments to pass to the driver. Do not
include arguments, such as ``--conf``, that can be set as
job properties, since a collision may occur that causes an
incorrect job submission.
python_file_uris (Sequence[str]):
Optional. HCFS file URIs of Python files to
pass to the PySpark framework. Supported file
types: .py, .egg, and .zip.
jar_file_uris (Sequence[str]):
Optional. HCFS URIs of jar files to add to
the CLASSPATHs of the Python driver and tasks.
file_uris (Sequence[str]):
Optional. HCFS URIs of files to be placed in
the working directory of each executor. Useful
for naively parallel tasks.
archive_uris (Sequence[str]):
Optional. HCFS URIs of archives to be
extracted into the working directory of each
executor. Supported file types: .jar, .tar,
.tar.gz, .tgz, and .zip.
properties (Sequence[google.cloud.dataproc_v1.types.PySparkJob.PropertiesEntry]):
Optional. A mapping of property names to
values, used to configure PySpark. Properties
that conflict with values set by the Dataproc
API may be overwritten. Can include properties
set in
/etc/spark/conf/spark-defaults.conf and classes
in user code.
logging_config (google.cloud.dataproc_v1.types.LoggingConfig):
Optional. The runtime log config for job
execution.
"""
main_python_file_uri = proto.Field(proto.STRING, number=1,)
args = proto.RepeatedField(proto.STRING, number=2,)
python_file_uris = proto.RepeatedField(proto.STRING, number=3,)
jar_file_uris = proto.RepeatedField(proto.STRING, number=4,)
file_uris = proto.RepeatedField(proto.STRING, number=5,)
archive_uris = proto.RepeatedField(proto.STRING, number=6,)
properties = proto.MapField(proto.STRING, proto.STRING, number=7,)
logging_config = proto.Field(proto.MESSAGE, number=8, message="LoggingConfig",)
class QueryList(proto.Message):
r"""A list of queries to run on a cluster.
Attributes:
queries (Sequence[str]):
Required. The queries to execute. You do not need to end a
query expression with a semicolon. Multiple queries can be
specified in one string by separating each with a semicolon.
Here is an example of a Dataproc API snippet that uses a
QueryList to specify a HiveJob:
::
"hiveJob": {
"queryList": {
"queries": [
"query1",
"query2",
"query3;query4",
]
}
}
"""
queries = proto.RepeatedField(proto.STRING, number=1,)
class HiveJob(proto.Message):
r"""A Dataproc job for running `Apache
Hive <https://hive.apache.org/>`__ queries on YARN.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
query_file_uri (str):
The HCFS URI of the script that contains Hive
queries.
This field is a member of `oneof`_ ``queries``.
query_list (google.cloud.dataproc_v1.types.QueryList):
A list of queries.
This field is a member of `oneof`_ ``queries``.
continue_on_failure (bool):
Optional. Whether to continue executing queries if a query
fails. The default value is ``false``. Setting to ``true``
can be useful when executing independent parallel queries.
script_variables (Sequence[google.cloud.dataproc_v1.types.HiveJob.ScriptVariablesEntry]):
Optional. Mapping of query variable names to values
(equivalent to the Hive command: ``SET name="value";``).
properties (Sequence[google.cloud.dataproc_v1.types.HiveJob.PropertiesEntry]):
Optional. A mapping of property names and values, used to
configure Hive. Properties that conflict with values set by
the Dataproc API may be overwritten. Can include properties
set in /etc/hadoop/conf/*-site.xml,
/etc/hive/conf/hive-site.xml, and classes in user code.
jar_file_uris (Sequence[str]):
Optional. HCFS URIs of jar files to add to
the CLASSPATH of the Hive server and Hadoop
MapReduce (MR) tasks. Can contain Hive SerDes
and UDFs.
"""
query_file_uri = proto.Field(proto.STRING, number=1, oneof="queries",)
query_list = proto.Field(
proto.MESSAGE, number=2, oneof="queries", message="QueryList",
)
continue_on_failure = proto.Field(proto.BOOL, number=3,)
script_variables = proto.MapField(proto.STRING, proto.STRING, number=4,)
properties = proto.MapField(proto.STRING, proto.STRING, number=5,)
jar_file_uris = proto.RepeatedField(proto.STRING, number=6,)
class SparkSqlJob(proto.Message):
r"""A Dataproc job for running `Apache Spark
SQL <http://spark.apache.org/sql/>`__ queries.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
query_file_uri (str):
The HCFS URI of the script that contains SQL
queries.
This field is a member of `oneof`_ ``queries``.
query_list (google.cloud.dataproc_v1.types.QueryList):
A list of queries.
This field is a member of `oneof`_ ``queries``.
script_variables (Sequence[google.cloud.dataproc_v1.types.SparkSqlJob.ScriptVariablesEntry]):
Optional. Mapping of query variable names to values
(equivalent to the Spark SQL command: SET
``name="value";``).
properties (Sequence[google.cloud.dataproc_v1.types.SparkSqlJob.PropertiesEntry]):
Optional. A mapping of property names to
values, used to configure Spark SQL's SparkConf.
Properties that conflict with values set by the
Dataproc API may be overwritten.
jar_file_uris (Sequence[str]):
Optional. HCFS URIs of jar files to be added
to the Spark CLASSPATH.
logging_config (google.cloud.dataproc_v1.types.LoggingConfig):
Optional. The runtime log config for job
execution.
"""
query_file_uri = proto.Field(proto.STRING, number=1, oneof="queries",)
query_list = proto.Field(
proto.MESSAGE, number=2, oneof="queries", message="QueryList",
)
script_variables = proto.MapField(proto.STRING, proto.STRING, number=3,)
properties = proto.MapField(proto.STRING, proto.STRING, number=4,)
jar_file_uris = proto.RepeatedField(proto.STRING, number=56,)
logging_config = proto.Field(proto.MESSAGE, number=6, message="LoggingConfig",)
class PigJob(proto.Message):
r"""A Dataproc job for running `Apache Pig <https://pig.apache.org/>`__
queries on YARN.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
query_file_uri (str):
The HCFS URI of the script that contains the
Pig queries.
This field is a member of `oneof`_ ``queries``.
query_list (google.cloud.dataproc_v1.types.QueryList):
A list of queries.
This field is a member of `oneof`_ ``queries``.
continue_on_failure (bool):
Optional. Whether to continue executing queries if a query
fails. The default value is ``false``. Setting to ``true``
can be useful when executing independent parallel queries.
script_variables (Sequence[google.cloud.dataproc_v1.types.PigJob.ScriptVariablesEntry]):
Optional. Mapping of query variable names to values
(equivalent to the Pig command: ``name=[value]``).
properties (Sequence[google.cloud.dataproc_v1.types.PigJob.PropertiesEntry]):
Optional. A mapping of property names to values, used to
configure Pig. Properties that conflict with values set by
the Dataproc API may be overwritten. Can include properties
set in /etc/hadoop/conf/*-site.xml,
/etc/pig/conf/pig.properties, and classes in user code.
jar_file_uris (Sequence[str]):
Optional. HCFS URIs of jar files to add to
the CLASSPATH of the Pig Client and Hadoop
MapReduce (MR) tasks. Can contain Pig UDFs.
logging_config (google.cloud.dataproc_v1.types.LoggingConfig):
Optional. The runtime log config for job
execution.
"""
query_file_uri = proto.Field(proto.STRING, number=1, oneof="queries",)
query_list = proto.Field(
proto.MESSAGE, number=2, oneof="queries", message="QueryList",
)
continue_on_failure = proto.Field(proto.BOOL, number=3,)
script_variables = proto.MapField(proto.STRING, proto.STRING, number=4,)
properties = proto.MapField(proto.STRING, proto.STRING, number=5,)
jar_file_uris = proto.RepeatedField(proto.STRING, number=6,)
logging_config = proto.Field(proto.MESSAGE, number=7, message="LoggingConfig",)
class SparkRJob(proto.Message):
r"""A Dataproc job for running `Apache
SparkR <https://spark.apache.org/docs/latest/sparkr.html>`__
applications on YARN.
Attributes:
main_r_file_uri (str):
Required. The HCFS URI of the main R file to
use as the driver. Must be a .R file.
args (Sequence[str]):
Optional. The arguments to pass to the driver. Do not
include arguments, such as ``--conf``, that can be set as
job properties, since a collision may occur that causes an
incorrect job submission.
file_uris (Sequence[str]):
Optional. HCFS URIs of files to be placed in
the working directory of each executor. Useful
for naively parallel tasks.
archive_uris (Sequence[str]):
Optional. HCFS URIs of archives to be
extracted into the working directory of each
executor. Supported file types: .jar, .tar,
.tar.gz, .tgz, and .zip.
properties (Sequence[google.cloud.dataproc_v1.types.SparkRJob.PropertiesEntry]):
Optional. A mapping of property names to
values, used to configure SparkR. Properties
that conflict with values set by the Dataproc
API may be overwritten. Can include properties
set in
/etc/spark/conf/spark-defaults.conf and classes
in user code.
logging_config (google.cloud.dataproc_v1.types.LoggingConfig):
Optional. The runtime log config for job
execution.
"""
main_r_file_uri = proto.Field(proto.STRING, number=1,)
args = proto.RepeatedField(proto.STRING, number=2,)
file_uris = proto.RepeatedField(proto.STRING, number=3,)
archive_uris = proto.RepeatedField(proto.STRING, number=4,)
properties = proto.MapField(proto.STRING, proto.STRING, number=5,)
logging_config = proto.Field(proto.MESSAGE, number=6, message="LoggingConfig",)
class PrestoJob(proto.Message):
r"""A Dataproc job for running `Presto <https://prestosql.io/>`__
queries. **IMPORTANT**: The `Dataproc Presto Optional
Component <https://cloud.google.com/dataproc/docs/concepts/components/presto>`__
must be enabled when the cluster is created to submit a Presto job
to the cluster.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
query_file_uri (str):
The HCFS URI of the script that contains SQL
queries.
This field is a member of `oneof`_ ``queries``.
query_list (google.cloud.dataproc_v1.types.QueryList):
A list of queries.
This field is a member of `oneof`_ ``queries``.
continue_on_failure (bool):
Optional. Whether to continue executing queries if a query
fails. The default value is ``false``. Setting to ``true``
can be useful when executing independent parallel queries.
output_format (str):
Optional. The format in which query output
will be displayed. See the Presto documentation
for supported output formats
client_tags (Sequence[str]):
Optional. Presto client tags to attach to
this query
properties (Sequence[google.cloud.dataproc_v1.types.PrestoJob.PropertiesEntry]):
Optional. A mapping of property names to values. Used to set
Presto `session
properties <https://prestodb.io/docs/current/sql/set-session.html>`__
Equivalent to using the --session flag in the Presto CLI
logging_config (google.cloud.dataproc_v1.types.LoggingConfig):
Optional. The runtime log config for job
execution.
"""
query_file_uri = proto.Field(proto.STRING, number=1, oneof="queries",)
query_list = proto.Field(
proto.MESSAGE, number=2, oneof="queries", message="QueryList",
)
continue_on_failure = proto.Field(proto.BOOL, number=3,)
output_format = proto.Field(proto.STRING, number=4,)
client_tags = proto.RepeatedField(proto.STRING, number=5,)
properties = proto.MapField(proto.STRING, proto.STRING, number=6,)
logging_config = proto.Field(proto.MESSAGE, number=7, message="LoggingConfig",)
class JobPlacement(proto.Message):
r"""Dataproc job config.
Attributes:
cluster_name (str):
Required. The name of the cluster where the
job will be submitted.
cluster_uuid (str):
Output only. A cluster UUID generated by the
Dataproc service when the job is submitted.
cluster_labels (Sequence[google.cloud.dataproc_v1.types.JobPlacement.ClusterLabelsEntry]):
Optional. Cluster labels to identify a
cluster where the job will be submitted.
"""
cluster_name = proto.Field(proto.STRING, number=1,)
cluster_uuid = proto.Field(proto.STRING, number=2,)
cluster_labels = proto.MapField(proto.STRING, proto.STRING, number=3,)
class JobStatus(proto.Message):
r"""Dataproc job status.
Attributes:
state (google.cloud.dataproc_v1.types.JobStatus.State):
Output only. A state message specifying the
overall job state.
details (str):
Optional. Output only. Job state details,
such as an error description if the state is
<code>ERROR</code>.
state_start_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time when this state was
entered.
substate (google.cloud.dataproc_v1.types.JobStatus.Substate):
Output only. Additional state information,
which includes status reported by the agent.
"""
class State(proto.Enum):
r"""The job state."""
STATE_UNSPECIFIED = 0
PENDING = 1
SETUP_DONE = 8
RUNNING = 2
CANCEL_PENDING = 3
CANCEL_STARTED = 7
CANCELLED = 4
DONE = 5
ERROR = 6
ATTEMPT_FAILURE = 9
class Substate(proto.Enum):
r"""The job substate."""
UNSPECIFIED = 0
SUBMITTED = 1
QUEUED = 2
STALE_STATUS = 3
state = proto.Field(proto.ENUM, number=1, enum=State,)
details = proto.Field(proto.STRING, number=2,)
state_start_time = proto.Field(
proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp,
)
substate = proto.Field(proto.ENUM, number=7, enum=Substate,)
class JobReference(proto.Message):
r"""Encapsulates the full scoping used to reference a job.
Attributes:
project_id (str):
Optional. The ID of the Google Cloud Platform
project that the job belongs to. If specified,
must match the request project ID.
job_id (str):
Optional. The job ID, which must be unique within the
project.
The ID must contain only letters (a-z, A-Z), numbers (0-9),
underscores (_), or hyphens (-). The maximum length is 100
characters.
If not specified by the caller, the job ID will be provided
by the server.
"""
project_id = proto.Field(proto.STRING, number=1,)
job_id = proto.Field(proto.STRING, number=2,)
class YarnApplication(proto.Message):
r"""A YARN application created by a job. Application information is a
subset of
org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto.
**Beta Feature**: This report is available for testing purposes
only. It may be changed before final release.
Attributes:
name (str):
Required. The application name.
state (google.cloud.dataproc_v1.types.YarnApplication.State):
Required. The application state.
progress (float):
Required. The numerical progress of the
application, from 1 to 100.
tracking_url (str):
Optional. The HTTP URL of the
ApplicationMaster, HistoryServer, or
TimelineServer that provides
application-specific information. The URL uses
the internal hostname, and requires a proxy
server for resolution and, possibly, access.
"""
class State(proto.Enum):
r"""The application state, corresponding to
<code>YarnProtos.YarnApplicationStateProto</code>.
"""
STATE_UNSPECIFIED = 0
NEW = 1
NEW_SAVING = 2
SUBMITTED = 3
ACCEPTED = 4
RUNNING = 5
FINISHED = 6
FAILED = 7
KILLED = 8
name = proto.Field(proto.STRING, number=1,)
state = proto.Field(proto.ENUM, number=2, enum=State,)
progress = proto.Field(proto.FLOAT, number=3,)
tracking_url = proto.Field(proto.STRING, number=4,)
class Job(proto.Message):
r"""A Dataproc job resource.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
reference (google.cloud.dataproc_v1.types.JobReference):
Optional. The fully qualified reference to the job, which
can be used to obtain the equivalent REST path of the job
resource. If this property is not specified when a job is
created, the server generates a job_id.
placement (google.cloud.dataproc_v1.types.JobPlacement):
Required. Job information, including how,
when, and where to run the job.
hadoop_job (google.cloud.dataproc_v1.types.HadoopJob):
Optional. Job is a Hadoop job.
This field is a member of `oneof`_ ``type_job``.
spark_job (google.cloud.dataproc_v1.types.SparkJob):
Optional. Job is a Spark job.
This field is a member of `oneof`_ ``type_job``.
pyspark_job (google.cloud.dataproc_v1.types.PySparkJob):
Optional. Job is a PySpark job.
This field is a member of `oneof`_ ``type_job``.
hive_job (google.cloud.dataproc_v1.types.HiveJob):
Optional. Job is a Hive job.
This field is a member of `oneof`_ ``type_job``.
pig_job (google.cloud.dataproc_v1.types.PigJob):
Optional. Job is a Pig job.
This field is a member of `oneof`_ ``type_job``.
spark_r_job (google.cloud.dataproc_v1.types.SparkRJob):
Optional. Job is a SparkR job.
This field is a member of `oneof`_ ``type_job``.
spark_sql_job (google.cloud.dataproc_v1.types.SparkSqlJob):
Optional. Job is a SparkSql job.
This field is a member of `oneof`_ ``type_job``.
presto_job (google.cloud.dataproc_v1.types.PrestoJob):
Optional. Job is a Presto job.
This field is a member of `oneof`_ ``type_job``.
status (google.cloud.dataproc_v1.types.JobStatus):
Output only. The job status. Additional application-specific
status information may be contained in the type_job and
yarn_applications fields.
status_history (Sequence[google.cloud.dataproc_v1.types.JobStatus]):
Output only. The previous job status.
yarn_applications (Sequence[google.cloud.dataproc_v1.types.YarnApplication]):
Output only. The collection of YARN applications spun up by
this job.
**Beta** Feature: This report is available for testing
purposes only. It may be changed before final release.
driver_output_resource_uri (str):
Output only. A URI pointing to the location
of the stdout of the job's driver program.
driver_control_files_uri (str):
Output only. If present, the location of miscellaneous
control files which may be used as part of job setup and
handling. If not present, control files may be placed in the
same location as ``driver_output_uri``.
labels (Sequence[google.cloud.dataproc_v1.types.Job.LabelsEntry]):
Optional. The labels to associate with this job. Label
**keys** must contain 1 to 63 characters, and must conform
to `RFC 1035 <https://www.ietf.org/rfc/rfc1035.txt>`__.
Label **values** may be empty, but, if present, must contain
1 to 63 characters, and must conform to `RFC
1035 <https://www.ietf.org/rfc/rfc1035.txt>`__. No more than
32 labels can be associated with a job.
scheduling (google.cloud.dataproc_v1.types.JobScheduling):
Optional. Job scheduling configuration.
job_uuid (str):
Output only. A UUID that uniquely identifies a job within
the project over time. This is in contrast to a
user-settable reference.job_id that may be reused over time.
done (bool):
Output only. Indicates whether the job is completed. If the
value is ``false``, the job is still in progress. If
``true``, the job is completed, and ``status.state`` field
will indicate if it was successful, failed, or cancelled.
"""
reference = proto.Field(proto.MESSAGE, number=1, message="JobReference",)
placement = proto.Field(proto.MESSAGE, number=2, message="JobPlacement",)
hadoop_job = proto.Field(
proto.MESSAGE, number=3, oneof="type_job", message="HadoopJob",
)
spark_job = proto.Field(
proto.MESSAGE, number=4, oneof="type_job", message="SparkJob",
)
pyspark_job = proto.Field(
proto.MESSAGE, number=5, oneof="type_job", message="PySparkJob",
)
hive_job = proto.Field(
proto.MESSAGE, number=6, oneof="type_job", message="HiveJob",
)
pig_job = proto.Field(proto.MESSAGE, number=7, oneof="type_job", message="PigJob",)
spark_r_job = proto.Field(
proto.MESSAGE, number=21, oneof="type_job", message="SparkRJob",
)
spark_sql_job = proto.Field(
proto.MESSAGE, number=12, oneof="type_job", message="SparkSqlJob",
)
presto_job = proto.Field(
proto.MESSAGE, number=23, oneof="type_job", message="PrestoJob",
)
status = proto.Field(proto.MESSAGE, number=8, message="JobStatus",)
status_history = proto.RepeatedField(proto.MESSAGE, number=13, message="JobStatus",)
yarn_applications = proto.RepeatedField(
proto.MESSAGE, number=9, message="YarnApplication",
)
driver_output_resource_uri = proto.Field(proto.STRING, number=17,)
driver_control_files_uri = proto.Field(proto.STRING, number=15,)
labels = proto.MapField(proto.STRING, proto.STRING, number=18,)
scheduling = proto.Field(proto.MESSAGE, number=20, message="JobScheduling",)
job_uuid = proto.Field(proto.STRING, number=22,)
done = proto.Field(proto.BOOL, number=24,)
class JobScheduling(proto.Message):
r"""Job scheduling options.
Attributes:
max_failures_per_hour (int):
Optional. Maximum number of times per hour a driver may be
restarted as a result of driver exiting with non-zero code
before job is reported failed.
A job may be reported as thrashing if driver exits with
non-zero code 4 times within 10 minute window.
Maximum value is 10.
**Note:** Currently, this restartable job option is not
supported in Dataproc `workflow
template <https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template>`__
jobs.
max_failures_total (int):
Optional. Maximum number of times in total a driver may be
restarted as a result of driver exiting with non-zero code
before job is reported failed. Maximum value is 240.
**Note:** Currently, this restartable job option is not
supported in Dataproc `workflow
template <https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template>`__
jobs.
"""
max_failures_per_hour = proto.Field(proto.INT32, number=1,)
max_failures_total = proto.Field(proto.INT32, number=2,)
class SubmitJobRequest(proto.Message):
r"""A request to submit a job.
Attributes:
project_id (str):
Required. The ID of the Google Cloud Platform
project that the job belongs to.
region (str):
Required. The Dataproc region in which to
handle the request.
job (google.cloud.dataproc_v1.types.Job):
Required. The job resource.
request_id (str):
Optional. A unique id used to identify the request. If the
server receives two
`SubmitJobRequest <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest>`__\ s
with the same id, then the second request will be ignored
and the first [Job][google.cloud.dataproc.v1.Job] created
and stored in the backend is returned.
It is recommended to always set this value to a
`UUID <https://en.wikipedia.org/wiki/Universally_unique_identifier>`__.
The id must contain only letters (a-z, A-Z), numbers (0-9),
underscores (_), and hyphens (-). The maximum length is 40
characters.
"""
project_id = proto.Field(proto.STRING, number=1,)
region = proto.Field(proto.STRING, number=3,)
job = proto.Field(proto.MESSAGE, number=2, message="Job",)
request_id = proto.Field(proto.STRING, number=4,)
class JobMetadata(proto.Message):
r"""Job Operation metadata.
Attributes:
job_id (str):
Output only. The job id.
status (google.cloud.dataproc_v1.types.JobStatus):
Output only. Most recent job status.
operation_type (str):
Output only. Operation type.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Job submission time.
"""
job_id = proto.Field(proto.STRING, number=1,)
status = proto.Field(proto.MESSAGE, number=2, message="JobStatus",)
operation_type = proto.Field(proto.STRING, number=3,)
start_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
class GetJobRequest(proto.Message):
r"""A request to get the resource representation for a job in a
project.
Attributes:
project_id (str):
Required. The ID of the Google Cloud Platform
project that the job belongs to.
region (str):
Required. The Dataproc region in which to
handle the request.
job_id (str):
Required. The job ID.
"""
project_id = proto.Field(proto.STRING, number=1,)
region = proto.Field(proto.STRING, number=3,)
job_id = proto.Field(proto.STRING, number=2,)
class ListJobsRequest(proto.Message):
r"""A request to list jobs in a project.
Attributes:
project_id (str):
Required. The ID of the Google Cloud Platform
project that the job belongs to.
region (str):
Required. The Dataproc region in which to
handle the request.
page_size (int):
Optional. The number of results to return in
each response.
page_token (str):
Optional. The page token, returned by a
previous call, to request the next page of
results.
cluster_name (str):
Optional. If set, the returned jobs list
includes only jobs that were submitted to the
named cluster.
job_state_matcher (google.cloud.dataproc_v1.types.ListJobsRequest.JobStateMatcher):
Optional. Specifies enumerated categories of jobs to list.
(default = match ALL jobs).
If ``filter`` is provided, ``jobStateMatcher`` will be
ignored.
filter (str):
Optional. A filter constraining the jobs to list. Filters
are case-sensitive and have the following syntax:
[field = value] AND [field [= value]] ...
where **field** is ``status.state`` or ``labels.[KEY]``, and
``[KEY]`` is a label key. **value** can be ``*`` to match
all values. ``status.state`` can be either ``ACTIVE`` or
``NON_ACTIVE``. Only the logical ``AND`` operator is
supported; space-separated items are treated as having an
implicit ``AND`` operator.
Example filter:
status.state = ACTIVE AND labels.env = staging AND
labels.starred = \*
"""
class JobStateMatcher(proto.Enum):
r"""A matcher that specifies categories of job states."""
ALL = 0
ACTIVE = 1
NON_ACTIVE = 2
project_id = proto.Field(proto.STRING, number=1,)
region = proto.Field(proto.STRING, number=6,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
cluster_name = proto.Field(proto.STRING, number=4,)
job_state_matcher = proto.Field(proto.ENUM, number=5, enum=JobStateMatcher,)
filter = proto.Field(proto.STRING, number=7,)
class UpdateJobRequest(proto.Message):
r"""A request to update a job.
Attributes:
project_id (str):
Required. The ID of the Google Cloud Platform
project that the job belongs to.
region (str):
Required. The Dataproc region in which to
handle the request.
job_id (str):
Required. The job ID.
job (google.cloud.dataproc_v1.types.Job):
Required. The changes to the job.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. Specifies the path, relative to Job, of the field
to update. For example, to update the labels of a Job the
update_mask parameter would be specified as labels, and the
``PATCH`` request body would specify the new value. Note:
Currently, labels is the only field that can be updated.
"""
project_id = proto.Field(proto.STRING, number=1,)
region = proto.Field(proto.STRING, number=2,)
job_id = proto.Field(proto.STRING, number=3,)
job = proto.Field(proto.MESSAGE, number=4, message="Job",)
update_mask = proto.Field(
proto.MESSAGE, number=5, message=field_mask_pb2.FieldMask,
)
class ListJobsResponse(proto.Message):
r"""A list of jobs in a project.
Attributes:
jobs (Sequence[google.cloud.dataproc_v1.types.Job]):
Output only. Jobs list.
next_page_token (str):
Optional. This token is included in the response if there
are more results to fetch. To fetch additional results,
provide this value as the ``page_token`` in a subsequent
ListJobsRequest.
"""
@property
def raw_page(self):
return self
jobs = proto.RepeatedField(proto.MESSAGE, number=1, message="Job",)
next_page_token = proto.Field(proto.STRING, number=2,)
class CancelJobRequest(proto.Message):
r"""A request to cancel a job.
Attributes:
project_id (str):
Required. The ID of the Google Cloud Platform
project that the job belongs to.
region (str):
Required. The Dataproc region in which to
handle the request.
job_id (str):
Required. The job ID.
"""
project_id = proto.Field(proto.STRING, number=1,)
region = proto.Field(proto.STRING, number=3,)
job_id = proto.Field(proto.STRING, number=2,)
class DeleteJobRequest(proto.Message):
r"""A request to delete a job.
Attributes:
project_id (str):
Required. The ID of the Google Cloud Platform
project that the job belongs to.
region (str):
Required. The Dataproc region in which to
handle the request.
job_id (str):
Required. The job ID.
"""
project_id = proto.Field(proto.STRING, number=1,)
region = proto.Field(proto.STRING, number=3,)
job_id = proto.Field(proto.STRING, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
| |
from direct.distributed.PyDatagram import *
from pandac.PandaModules import *
from otp.ai.AIZoneData import AIZoneDataStore
from otp.ai.MagicWordManagerAI import MagicWordManagerAI
from otp.ai.TimeManagerAI import TimeManagerAI
from otp.ai import BanManagerAI
from otp.distributed.OtpDoGlobals import *
from otp.friends.FriendManagerAI import FriendManagerAI
from toontown.ai import CogPageManagerAI
from toontown.ai import CogSuitManagerAI
from toontown.ai import PromotionManagerAI
from toontown.ai.AchievementsManagerAI import AchievementsManagerAI
from toontown.ai.FishManagerAI import FishManagerAI
from toontown.ai.HolidayManagerAI import HolidayManagerAI
from toontown.ai.NewsManagerAI import NewsManagerAI
from toontown.ai.QuestManagerAI import QuestManagerAI
from toontown.ai import BankManagerAI
from toontown.building.DistributedTrophyMgrAI import DistributedTrophyMgrAI
from toontown.catalog.CatalogManagerAI import CatalogManagerAI
from toontown.catalog.PopularItemManagerAI import PopularItemManagerAI
from toontown.coghq import CountryClubManagerAI
from toontown.coghq import FactoryManagerAI
from toontown.coghq import LawOfficeManagerAI
from toontown.coghq import MintManagerAI
from toontown.distributed.ToontownDistrictAI import ToontownDistrictAI
from toontown.distributed.ToontownDistrictStatsAI import ToontownDistrictStatsAI
from toontown.distributed.ToontownInternalRepository import ToontownInternalRepository
from toontown.dna.DNAParser import loadDNAFileAI
from toontown.estate.EstateManagerAI import EstateManagerAI
from toontown.hood import BRHoodAI
from toontown.hood import BossbotHQAI
from toontown.hood import CashbotHQAI
from toontown.hood import DDHoodAI
from toontown.hood import DGHoodAI
from toontown.hood import DLHoodAI
from toontown.hood import GSHoodAI
from toontown.hood import GZHoodAI
from toontown.hood import LawbotHQAI
from toontown.hood import MMHoodAI
from toontown.hood import OZHoodAI
from toontown.hood import SellbotHQAI
from toontown.hood import TTHoodAI
from toontown.hood import ZoneUtil
from toontown.pets.PetManagerAI import PetManagerAI
from toontown.safezone.SafeZoneManagerAI import SafeZoneManagerAI
from toontown.suit.SuitInvasionManagerAI import SuitInvasionManagerAI
from toontown.toon import NPCToons
from toontown.toonbase import ToontownGlobals
from toontown.tutorial.TutorialManagerAI import TutorialManagerAI
from toontown.uberdog.DistributedPartyManagerAI import DistributedPartyManagerAI
if config.GetBool('want-web-rpc', False):
from toontown.rpc.ToontownRPCClient import ToontownRPCClient
class ToontownAIRepository(ToontownInternalRepository):
def __init__(self, baseChannel, stateServerChannel, districtName):
ToontownInternalRepository.__init__(
self, baseChannel, stateServerChannel, dcSuffix='AI')
self.webRpc = None
self.districtName = districtName
self.notify.setInfo(True) # Our AI repository should always log info.
self.hoods = []
self.cogHeadquarters = []
self.dnaStoreMap = {}
self.dnaDataMap = {}
self.suitPlanners = {}
self.buildingManagers = {}
self.disconnectedToons = {}
self.factoryMgr = None
self.mintMgr = None
self.lawOfficeMgr = None
self.countryClubMgr = None
self.zoneAllocator = UniqueIdAllocator(ToontownGlobals.DynamicZonesBegin,
ToontownGlobals.DynamicZonesEnd)
self.zoneDataStore = AIZoneDataStore()
self.wantFishing = self.config.GetBool('want-fishing', True)
self.wantHousing = self.config.GetBool('want-housing', True)
self.wantPets = self.config.GetBool('want-pets', True)
self.wantParties = self.config.GetBool('want-parties', True)
self.wantCogbuildings = self.config.GetBool('want-cogbuildings', True)
self.wantCogdominiums = self.config.GetBool('want-cogdominiums', True)
self.doLiveUpdates = self.config.GetBool('want-live-updates', False)
self.wantTrackClsends = self.config.GetBool('want-track-clsends', False)
self.wantAchievements = self.config.GetBool('want-achievements', True)
self.wantYinYang = self.config.GetBool('want-yin-yang', False)
self.baseXpMultiplier = self.config.GetFloat('base-xp-multiplier', 1.0)
self.wantHalloween = self.config.GetBool('want-halloween', False)
self.wantChristmas = self.config.GetBool('want-christmas', False)
self.wantFireworks = self.config.GetBool('want-fireworks', False)
self.cogSuitMessageSent = False
def createManagers(self):
self.timeManager = TimeManagerAI(self)
self.timeManager.generateWithRequired(2)
self.magicWordManager = MagicWordManagerAI(self)
self.magicWordManager.generateWithRequired(2)
self.newsManager = NewsManagerAI(self)
self.newsManager.generateWithRequired(2)
self.holidayManager = HolidayManagerAI(self)
self.safeZoneManager = SafeZoneManagerAI(self)
self.safeZoneManager.generateWithRequired(2)
self.tutorialManager = TutorialManagerAI(self)
self.tutorialManager.generateWithRequired(2)
self.friendManager = FriendManagerAI(self)
self.friendManager.generateWithRequired(2)
self.questManager = QuestManagerAI(self)
self.banManager = BanManagerAI.BanManagerAI(self)
self.achievementsManager = AchievementsManagerAI(self)
self.suitInvasionManager = SuitInvasionManagerAI(self)
self.trophyMgr = DistributedTrophyMgrAI(self)
self.trophyMgr.generateWithRequired(2)
self.cogSuitMgr = CogSuitManagerAI.CogSuitManagerAI(self)
self.promotionMgr = PromotionManagerAI.PromotionManagerAI(self)
self.cogPageManager = CogPageManagerAI.CogPageManagerAI()
self.bankManager = BankManagerAI.BankManagerAI(self)
if self.wantFishing:
self.fishManager = FishManagerAI(self)
if self.wantHousing:
self.estateManager = EstateManagerAI(self)
self.estateManager.generateWithRequired(2)
self.catalogManager = CatalogManagerAI(self)
self.catalogManager.generateWithRequired(2)
self.popularItemManager = PopularItemManagerAI(self)
self.deliveryManager = self.generateGlobalObject(
OTP_DO_ID_TOONTOWN_DELIVERY_MANAGER, 'DistributedDeliveryManager')
if self.wantPets:
self.petMgr = PetManagerAI(self)
if self.wantParties:
self.partyManager = DistributedPartyManagerAI(self)
self.partyManager.generateWithRequired(2)
self.globalPartyMgr = self.generateGlobalObject(
OTP_DO_ID_GLOBAL_PARTY_MANAGER, 'GlobalPartyManager')
def createSafeZones(self):
NPCToons.generateZone2NpcDict()
if self.config.GetBool('want-toontown-central', True):
self.hoods.append(TTHoodAI.TTHoodAI(self))
if self.config.GetBool('want-donalds-dock', True):
self.hoods.append(DDHoodAI.DDHoodAI(self))
if self.config.GetBool('want-daisys-garden', True):
self.hoods.append(DGHoodAI.DGHoodAI(self))
if self.config.GetBool('want-minnies-melodyland', True):
self.hoods.append(MMHoodAI.MMHoodAI(self))
if self.config.GetBool('want-the-burrrgh', True):
self.hoods.append(BRHoodAI.BRHoodAI(self))
if self.config.GetBool('want-donalds-dreamland', True):
self.hoods.append(DLHoodAI.DLHoodAI(self))
if self.config.GetBool('want-goofy-speedway', True):
self.hoods.append(GSHoodAI.GSHoodAI(self))
if self.config.GetBool('want-outdoor-zone', True):
self.hoods.append(OZHoodAI.OZHoodAI(self))
if self.config.GetBool('want-golf-zone', True):
self.hoods.append(GZHoodAI.GZHoodAI(self))
def createCogHeadquarters(self):
NPCToons.generateZone2NpcDict()
if self.config.GetBool('want-sellbot-headquarters', True):
self.factoryMgr = FactoryManagerAI.FactoryManagerAI(self)
self.cogHeadquarters.append(SellbotHQAI.SellbotHQAI(self))
if self.config.GetBool('want-cashbot-headquarters', True):
self.mintMgr = MintManagerAI.MintManagerAI(self)
self.cogHeadquarters.append(CashbotHQAI.CashbotHQAI(self))
if self.config.GetBool('want-lawbot-headquarters', True):
self.lawOfficeMgr = LawOfficeManagerAI.LawOfficeManagerAI(self)
self.cogHeadquarters.append(LawbotHQAI.LawbotHQAI(self))
if self.config.GetBool('want-bossbot-headquarters', True):
self.countryClubMgr = CountryClubManagerAI.CountryClubManagerAI(self)
self.cogHeadquarters.append(BossbotHQAI.BossbotHQAI(self))
def handleConnected(self):
if config.GetBool('want-web-rpc', False):
endpoint = config.GetString('web-rpc-endpoint', 'http://localhost:8000/rpc')
self.webRpc = ToontownRPCClient(endpoint)
self.districtId = self.allocateChannel()
self.notify.info('Creating ToontownDistrictAI(%d)...' % self.districtId)
self.distributedDistrict = ToontownDistrictAI(self)
self.distributedDistrict.setName(self.districtName)
self.distributedDistrict.generateWithRequiredAndId(
self.districtId, self.getGameDoId(), 2)
self.notify.info('Claiming ownership of channel ID: %d...' % self.districtId)
self.claimOwnership(self.districtId)
self.districtStats = ToontownDistrictStatsAI(self)
self.districtStats.settoontownDistrictId(self.districtId)
self.districtStats.generateWithRequiredAndId(
self.allocateChannel(), self.getGameDoId(), 3)
self.notify.info('Created ToontownDistrictStats(%d)' % self.districtStats.doId)
self.notify.info('Creating managers...')
self.createManagers()
if self.config.GetBool('want-safe-zones', True):
self.notify.info('Creating safe zones...')
self.createSafeZones()
if self.config.GetBool('want-cog-headquarters', True):
self.notify.info('Creating Cog headquarters...')
self.createCogHeadquarters()
self.notify.info('Starting Holiday Manager...')
self.holidayManager.start()
self.notify.info('Making district available...')
self.distributedDistrict.b_setAvailable(1)
self.notify.info('Done.')
def claimOwnership(self, channelId):
datagram = PyDatagram()
datagram.addServerHeader(channelId, self.ourChannel, STATESERVER_OBJECT_SET_AI)
datagram.addChannel(self.ourChannel)
self.send(datagram)
def lookupDNAFileName(self, zoneId):
zoneId = ZoneUtil.getCanonicalZoneId(zoneId)
hoodId = ZoneUtil.getCanonicalHoodId(zoneId)
hood = ToontownGlobals.dnaMap[hoodId]
if hoodId == zoneId:
zoneId = 'sz'
phaseNum = ToontownGlobals.phaseMap[hoodId]
else:
phaseNum = ToontownGlobals.streetPhaseMap[hoodId]
return 'phase_%s/dna/%s_%s.pdna' % (phaseNum, hood, zoneId)
def loadDNAFileAI(self, dnastore, filename):
return loadDNAFileAI(dnastore, filename)
def incrementPopulation(self):
self.districtStats.b_setAvatarCount(self.districtStats.getAvatarCount() + 1)
def decrementPopulation(self):
self.districtStats.b_setAvatarCount(self.districtStats.getAvatarCount() - 1)
def allocateZone(self):
return self.zoneAllocator.allocate()
def deallocateZone(self, zone):
self.zoneAllocator.free(zone)
def getZoneDataStore(self):
return self.zoneDataStore
def getTrackClsends(self):
return self.wantTrackClsends
def getAvatarExitEvent(self, avId):
return 'distObjDelete-%d' % avId
def trueUniqueName(self, name):
return self.uniqueName(name)
def setAvatarDisconnectReason(self, avId, reason):
self.disconnectedToons[avId] = reason
def getAvatarDisconnectReason(self, avId):
reason = self.disconnectedToons[avId]
del self.disconnectedToons[avId]
return reason
| |
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import zip
from builtins import range
from dataclasses import dataclass, field
from typing import List
import warnings
import numpy as np
from astroquery.vizier import Vizier
from astropy import units as u
from astropy.coordinates import SkyCoord
import shapely.geometry as geom
from scipy import interpolate
from uncertainties import ufloat, unumpy
import mwdust
warnings.simplefilter("ignore")
@dataclass
class Quantity:
name: str
value: float = np.nan
error: float = np.nan
source: str = 'unknown'
def setval(self, v, e, s, condition=True):
if hasattr(v, 'value'):
if ~np.isnan(float(v.value)) and ~np.isnan(float(e.value)) and condition:
self.value = v
self.error = e
self.source = s
else:
if ~np.isnan(float(v)) and ~np.isnan(float(e)) and condition:
self.value = v
self.error = e
self.source = s
@property
def isvalid(self):
return self.source != 'unknown'
def totuple(self):
return [self.value, self.error, self.source]
@dataclass
class Magnitude(Quantity):
pfilter: str = 'unknown'
Av: float = 0.0
def setmag(self, v, e, s, f, condition=True):
self.setval(v, max(e, 0.01), s, condition=condition)
if ~np.isnan(self.value):
self.pfilter = f
def totuplemag(self):
return self.totuple()+[self.pfilter, self.Av]
def setAv(self, v):
if ~np.isnan(self.value) and ~np.isnan(float(v)) and (v >= 0.0):
self.Av = v
_magnitudes = ['V', 'B', 'Bt', 'Vt', 'J', 'H', 'K', 'b', 'y', 'm1', 'c1',\
'G', 'W1', 'W2', 'W3', 'R', 'I', 'BP', 'RP', 'E_BPRP']
def _make_mag_list():
return [Magnitude(s) for s in _magnitudes]
def _make_coord_list():
return [Quantity('RA'), Quantity('DEC')]
def _make_proper_motion_list():
return [Quantity('pmRA'), Quantity('pmDEC')]
@dataclass
class Star:
name: str
magnitudes: List[Magnitude] = field(default_factory=_make_mag_list)
coordinates: List[Quantity] = field(default_factory=_make_coord_list)
propermotion: List[Quantity] = field(default_factory=_make_proper_motion_list)
parallax: Quantity = Quantity('Parallax')
galactic_coords: List[float] = field(default_factory=list)
def getmag(self, mag):
return self.magnitudes[_magnitudes.index(mag)]
def getmags(self, mags):
return [self.getmag(m) for m in mags]
@property
def validmags(self):
return [self.getmag(m).name for m in _magnitudes if self.getmag(m).isvalid]
def change_coords(self, ra, dec, s):
[RA, DEC] = self.coordinates
RA.setval(ra, 0.0, s)
DEC.setval(dec, 0.0, s)
def change_propermotion(self, pmra, pmdec, err_ra, err_dec, s):
if ~np.isnan(pmra) and ~np.isnan(pmdec) and ~np.isnan(err_ra) and ~np.isnan(err_dec):
pmRA, pmDEC = self.propermotion
pmRA.setval(pmra, err_ra, s)
pmDEC.setval(pmdec, err_dec, s)
def todict(self):
d = {'name': self.name}
if all([c.isvalid for c in self.coordinates]):
d['RA'] = [self.coordinates[0].value, self.coordinates[0].source]
d['DEC'] = [self.coordinates[1].value, self.coordinates[1].source]
if all([c.isvalid for c in self.propermotion]):
d['pmRA'] = self.propermotion[0].totuple()
d['pmDEC'] = self.propermotion[1].totuple()
if self.parallax.isvalid:
d['parallax'] = self.parallax.totuple()
valid = self.validmags
for v in valid:
d[v] = self.getmag(v).totuplemag()
if self.galactic_coords:
d['galactic_coords'] = self.galactic_coords
return d
def search_catalog_name(starname, vizier_object, catlist, catname):
def _look_r(starname, catname, catlist, rlist, restrict=False):
for r in rlist:
result = vizier_object.query_object(starname, catalog=[catname], radius=r*u.arcsec)
if str(result) != 'Empty TableList':
if restrict:
p = result[0]['Plx']
if any(p >= 2.0):
catlist.append(result)
break
else:
catlist.append(result)
break
return catlist
if catname == "I/345/gaia2":
catlist = _look_r(starname, catname, catlist, [15., 25., 50.], restrict=True)
elif catname == "I/284/out":
catlist = _look_r(starname, catname, catlist, [10., 20.])
elif catname == "I/311/hip2":
catlist = _look_r(starname, catname, catlist, [5., 10., 20., 30.])
else:
catlist = _look_r(starname, catname, catlist, [5., 10., 20., 30., 50.])
return catlist
def search_catalog_coords(RA, DEC, vizier_object, catlist, catname):
def _look_r(C, catname, catlist, rlist, restrict=False):
for r in rlist:
result = vizier_object.query_region(SkyCoord(ra=C[0], dec=C[1], unit=(u.deg, u.deg),
frame='icrs'),\
width=r, catalog=[catname])
if str(result) != 'Empty TableList':
if restrict:
p = result[0]['Plx']
if any(p >= 2.0):
catlist.append(result)
break
else:
catlist.append(result)
break
return catlist
if catname == "I/345/gaia2":
catlist = _look_r([RA, DEC], catname, catlist, ['15s', '25s', '50s'], restrict=True)
elif catname == "I/284/out":
catlist = _look_r([RA, DEC], catname, catlist, ['10s', '20s'])
elif catname == "I/311/hip2":
catlist = _look_r([RA, DEC], catname, catlist, ['5s', '10s', '20s', '30s'])
else:
catlist = _look_r([RA, DEC], catname, catlist, ['5s', '10s', '20s', '30s', '50s'])
return catlist
def retrieve_catalogs(starname, vizier_object, use_coords=False, RA=None, DEC=None):
result = []
catalogues = ["II/246/out", "I/311/hip2", "I/259/tyc2", "J/MNRAS/373/13/table1",
"J/ApJS/168/128/survey", "II/215/catalog", "V/130/gcs3",
"J/MNRAS/403/1949/ubvri", "I/337/tgas", "II/237/colors",
"I/345/gaia2", "II/336/apass9", "I/284/out", "II/328/allwise"]
if use_coords:
for c in catalogues:
result = search_catalog_coords(RA, DEC, vizier_object, result, c)
else:
starname = starname.replace('A', '').replace('-', ' ').replace('_', ' ')
for c in catalogues:
result = search_catalog_name(starname, vizier_object, result, c)
return result
def vizier_params(starname, use_coords=False, RA=None, DEC=None):
star = Star(starname)
V, B, Bt, Vt, J, H, K, b, y, m1, c1, G, W1, W2, W3, R, I, BP, RP, E_BPRP = star.getmags(_magnitudes)
if use_coords:
star.change_coords(RA, DEC, "from user or fits header")
try:
e_hpmag = 0.01
v = Vizier(columns=["**"])
result = retrieve_catalogs(starname, v, use_coords, RA, DEC)
if result:
if len(result[0][0].columns) < 5:
v = Vizier(columns=["*"])
result = retrieve_catalogs(starname, v, use_coords, RA, DEC)
name_cats = [list(r.keys())[0] for r in result]
# 2MASS
if "II/246/out" in name_cats:
r = result[name_cats.index("II/246/out")][0]
if len(r['Jmag']) > 1:
nA = np.zeros(len(r['Qflg']))
for k, q in enumerate(r['Qflg']):
nA[k] = q.count('A')
maxA = np.argmax(nA)
else:
maxA = 0
if r['Qflg'][maxA].count('U') == 0:
J.setmag(r['Jmag'][maxA], r['e_Jmag'][maxA], "II/246/out", '2MASS J',\
condition=r['Qflg'][maxA][0] != 'U')
H.setmag(r['Hmag'][maxA], r['e_Hmag'][maxA], "II/246/out", '2MASS H',\
condition=r['Qflg'][maxA][1] != 'U')
K.setmag(r['Kmag'][maxA], r['e_Kmag'][maxA], "II/246/out", '2MASS Ks',\
condition=r['Qflg'][maxA][2] != 'U')
star.change_coords(r['RAJ2000'][maxA], r['DEJ2000'][maxA], "II/246/out")
# GAIA DR2
if "I/345/gaia2" in name_cats:
r = result[name_cats.index("I/345/gaia2")][0]
i2 = np.where(r['Teff'] > 0.0)[0]
if len(r['Plx'][i2]) >= 1:
if len(r['Plx'][i2]) > 1:
i0 = np.where(r['Plx'][i2] > 2.0)[0]
if len(i0) > 1:
iav0 = np.where(np.abs(r['RV'][i2][i0]) > 0.0)[0]
if len(iav0) > 1:
itemp = np.argmax(r['Teff'][i2][i0[iav0]])
i0 = int(i0[iav0[itemp]])
elif len(iav0) == 1:
i0 = int(i0[iav0])
else:
i0 = int(i0[0])
elif len(i0) == 1:
i0 = int(i0[0])
else:
i0 = 0
else:
i0 = 0
if r['Plx'][i2][i0] > 2.0:
star.parallax.setval(r['Plx'][i2][i0]*u.mas, r['e_Plx'][i2][i0]*u.mas,
"I/345/gaia2")
star.change_coords(r['RA_ICRS'][i2][i0], r['DE_ICRS'][i2][i0],
"I/345/gaia2")
G.setmag(r['Gmag'][i2][i0], r['e_Gmag'][i2][i0], "I/345/gaia2", "GAIA G")
G.setAv(r['AG'][i2][i0])
star.change_propermotion(r['pmRA'][i2][i0], r['pmDE'][i2][i0],
r['e_pmRA'][i2][i0], r['e_pmDE'][i2][i0],
"I/345/gaia2")
BP.setmag(r['BPmag'][i2][i0], r['e_BPmag'][i2][i0], "I/345/gaia2", "GAIA BP")
RP.setmag(r['RPmag'][i2][i0], r['e_RPmag'][i2][i0], "I/345/gaia2", "GAIA RP")
E_BPRP.setmag(r['E_BP-RP_'][i2][i0], 0.1, "I/345/gaia2", "GAIA E(BP-RP)")
elif r['Plx'][0] > 2.0:
star.parallax.setval(r['Plx'][0]*u.mas, r['e_Plx'][0]*u.mas, "I/345/gaia2",
condition=r['Plx'][0] > 0.0)
star.change_coords(r['RA_ICRS'][0], r['DE_ICRS'][0], "I/345/gaia2")
G.setmag(r['Gmag'][0], r['e_Gmag'][0], "I/345/gaia2", "GAIA G")
G.setAv(r['AG'][0])
star.change_propermotion(r['pmRA'][0], r['pmDE'][0],
r['e_pmRA'][0], r['e_pmDE'][0], "I/345/gaia2")
BP.setmag(r['BPmag'][0], r['e_BPmag'][0], "I/345/gaia2", "GAIA BP")
RP.setmag(r['RPmag'][0], r['e_RPmag'][0], "I/345/gaia2", "GAIA RP")
E_BPRP.setmag(r['E_BP-RP_'][0], 0.1, "I/345/gaia2", "GAIA E(BP-RP)")
# Correct for the systematic from Stassun & Torres 2018
if star.parallax.isvalid:
plx_corr = ufloat(star.parallax.value.value,
star.parallax.error.value)\
+ ufloat(82, 33)*1e-3
star.parallax.setval(plx_corr.n*u.mas, plx_corr.s*u.mas, "I/345/gaia2")
# GAIA
elif "I/337/tgas" in name_cats:
r = result[name_cats.index("I/337/tgas")][0]
star.parallax.setval(r['Plx'][0]*u.mas, r['e_Plx'][0]*u.mas, "I/337/tgas",
condition=r['Plx'][0] > 0.0)
star.change_coords(r['RA_ICRS'][0], r['DE_ICRS'][0], "I/337/tgas")
# HIPPARCOS
if ("I/311/hip2" in name_cats) and (not star.parallax.isvalid):
r = result[name_cats.index("I/311/hip2")][0]
star.parallax.setval(r['Plx'][0]*u.mas, r['e_Plx'][0]*u.mas, "I/311/hip2",
condition=r['Plx'][0] > 0.0)
star.change_coords(r['RArad'][0], r['DErad'][0], "I/311/hip2")
if all([c.isvalid is False for c in star.propermotion]):
star.change_propermotion(r['pmRA'][0], r['pmDE'][0],
r['e_pmRA'][0], r['e_pmDE'][0], "I/311/hip2")
e_hpmag = r['e_Hpmag'][0] if r['e_Hpmag'][0] != 0.0 else 0.01
# USNO-B1.0 (Monet+ 2003) for proper motions
if "I/284/out" in name_cats:
r = result[name_cats.index("I/284/out")][0]
if all([c.isvalid is False for c in star.propermotion]):
star.change_propermotion(r['pmRA'][0], r['pmDE'][0],
r['e_pmRA'][0], r['e_pmDE'][0], "I/284/out")
# WISE
if "II/328/allwise" in name_cats:
r = result[name_cats.index("II/328/allwise")][0]
if len(r['qph']) > 1:
counts = np.zeros(len(r['qph']))
flag_vals = {'A':6, 'B':5, 'C':4, 'U':3, 'X':2, 'Z':1}
for q, q_val in enumerate(r['qph']):
counts[q] = sum([flag_vals[l]*q_val.count(l) for l in flag_vals])
i0 = np.argmax(counts)
del counts
else:
i0 = 0
if all([~np.isnan(float(r['e_%smag' % k][i0])) for k in ['W1', 'W2', 'W3']]):
W1.setmag(r['W1mag'][i0], r['e_W1mag'][i0], "II/328/allwise", 'WISE-1')
W2.setmag(r['W2mag'][i0], r['e_W2mag'][i0], "II/328/allwise", 'WISE-2')
W3.setmag(r['W3mag'][i0], r['e_W3mag'][i0], "II/328/allwise", 'WISE-3')
# The Tycho-2 Catalogue (Hog+ 2000)
if "I/259/tyc2" in name_cats:
r = result[name_cats.index("I/259/tyc2")][0]
Bt.setmag(r['BTmag'][0], r['e_BTmag'][0], "I/259/tyc2", 'HIPPARCOS BT')
Vt.setmag(r['VTmag'][0], r['e_VTmag'][0], "I/259/tyc2", 'HIPPARCOS VT')
star.change_coords(r['RA_ICRS_'][0], r['DE_ICRS_'][0], "I/259/tyc2")
# Koen et al. 2010
if "J/MNRAS/403/1949/ubvri" in name_cats:
r = result[name_cats.index("J/MNRAS/403/1949/ubvri")][0]
V.setmag(r['Vmag'][0], 0.01, "J/MNRAS/403/1949/ubvri", 'Landolt V')
B.setmag(r['B-V'][0] + r['Vmag'][0], 0.01, "J/MNRAS/403/1949/ubvri", 'Landolt B')
R.setmag(r['Vmag'][0] - r['V-Rc'][0], 0.01, "J/MNRAS/403/1949/ubvri", 'Landolt R')
I.setmag(r['Vmag'][0] - r['V-Ic'][0], 0.01, "J/MNRAS/403/1949/ubvri", 'Landolt I')
else:
# Casagrande et al. 2006
if "J/MNRAS/373/13/table1" in name_cats:
r = result[name_cats.index("J/MNRAS/373/13/table1")][0]
V.setmag(r['Vmag'][0], 0.01, "J/MNRAS/373/13/table1", 'Landolt V')
B.setmag(r['B-V'][0] + r['Vmag'][0], 0.01, "J/MNRAS/373/13/table1", 'Landolt B')
R.setmag(r['Vmag'][0] - r['V-Rc'][0], 0.01,
"J/MNRAS/373/13/table1", 'Landolt R')
I.setmag(r['Vmag'][0] - r['V-Rc'][0] - r['R-Ic'][0], 0.01,
"J/MNRAS/373/13/table1", 'Landolt I')
J.setmag(r['Jmag'][0], r['e_Jmag'][0], "J/MNRAS/373/13/table1", '2MASS J')
H.setmag(r['Hmag'][0], r['e_Hmag'][0], "J/MNRAS/373/13/table1", '2MASS H')
K.setmag(r['Ksmag'][0], r['e_Ksmag'][0], "J/MNRAS/373/13/table1", '2MASS Ks')
if not star.parallax.isvalid:
star.parallax.setval(r['Plx'][0]*u.mas, r['e_Plx'][0]*u.mas,
"J/MNRAS/373/13/table1", condition=r['Plx'][0] > 0.0)
# Beers et al. 2007
elif "J/ApJS/168/128/survey" in name_cats:
r = result[name_cats.index("J/ApJS/168/128/survey")][0]
V.setmag(r['Vmag'][0], r['e_Vmag'][0], "J/ApJS/168/128/survey", 'Landolt V')
B.setmag(r['B-V'][0] + r['Vmag'][0],
np.sqrt(r['e_Vmag'][0]**2 + r['e_B-V'][0]**2),
"J/ApJS/168/128/survey", 'Landolt B')
R.setmag(r['Vmag'][0] - r['V-Rc'][0],
np.sqrt(r['e_Vmag'][0]**2. + r['e_V-Rc'][0]**2.),
"J/ApJS/168/128/survey", 'Landolt R')
I.setmag(r['Vmag'][0] - r['V-Ic'][0],
np.sqrt(r['e_Vmag'][0]**2. + r['e_V-Ic'][0]**2.),
"J/ApJS/168/128/survey", 'Landolt I')
J.setmag(r['Jmag'][0], r['e_Jmag'][0], "J/ApJS/168/128/survey", '2MASS J')
H.setmag(r['Hmag'][0], r['e_Hmag'][0], "J/ApJS/168/128/survey", '2MASS H')
K.setmag(r['Ksmag'][0], r['e_Ksmag'][0], "J/ApJS/168/128/survey", '2MASS Ks')
# HAUCK
if "II/215/catalog" in name_cats:
r = result[name_cats.index("II/215/catalog")][0]
if not V.isvalid:
V.setmag(r['Vmag'][0], r['e_Vmag'][0], "II/215/catalog", 'Landolt V')
b.setmag(r['b-y'][0], r['e_b-y'][0], "II/215/catalog", 'Stromgren b')
y.setmag(0., r['e_b-y'][0], "II/215/catalog", 'Stromgren y')
m1.setmag(r['m1'][0], r['e_m1'][0], "II/215/catalog", 'f')
c1.setmag(r['c1'][0], r['e_c1'][0], "II/215/catalog", 'f')
else:
# GENEVA
if "V/130/gcs3" in name_cats:
r = result[name_cats.index("V/130/gcs3")][0]
if not V.isvalid:
V.setmag(r['Vmag'][0], 0.01, "V/130/gcs3", 'Landolt V')
b.setmag(r['b-y'][0], 0.01, "V/130/gcs3", 'Stromgren b')
y.setmag(0., e_hpmag, "V/130/gcs3", 'Stromgren y')
m1.setmag(r['m1'][0], 0.01, "V/130/gcs3", 'f')
c1.setmag(r['c1'][0], 0.01, "V/130/gcs3", 'f')
if all([not M.isvalid for M in [R, I, V, B]]) or\
any([M.error > 0.2 for M in [J, H, K]]) or\
all([not M.isvalid for M in [R, I, B]]):
# Ducati 2002
if "II/237/colors" in name_cats:
r = result[name_cats.index("II/237/colors")][0]
V.setmag(r['Vmag'][0], e_hpmag, "II/237/colors", 'Landolt V')
B.setmag(r['B-V'][0] + r['Vmag'][0], 0.01, "II/237/colors", 'Landolt B')
R.setmag(r['R-V'][0] + r['Vmag'][0], 0.01, "II/237/colors", 'Landolt R')
I.setmag(r['I-V'][0] + r['Vmag'][0], 0.01, "II/237/colors", 'Landolt I')
if any([M.error > 0.2 for M in [J, H, K]]):
J.setmag(r['J-V'][0] + r['Vmag'][0], r['Jsig'][0]*1e-2,
"II/237/colors", '2MASS J')
H.setmag(r['H-V'][0] + r['Vmag'][0], r['Hsig'][0]*1e-2,
"II/237/colors", '2MASS H')
K.setmag(r['K-V'][0] + r['Vmag'][0], r['Ksig'][0]*1e-2,
"II/237/colors", '2MASS Ks')
elif "II/336/apass9" in name_cats:
r = result[name_cats.index("II/336/apass9")][0]
if Vt.isvalid:
V.setmag(r['Vmag'][0], r['e_Vmag'][0], "II/336/apass9", 'Landolt V',
condition=np.abs(Vt.value - r['Vmag'][0]) < 0.25)
else:
V.setmag(r['Vmag'][0], r['e_Vmag'][0], "II/336/apass9", 'Landolt V')
if Bt.isvalid:
B.setmag(r['Bmag'][0], r['e_Bmag'][0], "II/336/apass9", 'Landolt B',
condition=np.abs(Bt.value - r['e_Bmag'][0]) < 0.25)
else:
B.setmag(r['Bmag'][0], r['e_Bmag'][0], "II/336/apass9", 'Landolt B')
del result
except Exception as e:
print('error', e)
# Get galactic coordinates if possible
if all([c.isvalid for c in star.coordinates]):
ra, dec = star.coordinates
l, b = get_galactic_coords(ra, dec)
star.galactic_coords = [l, b]
photometry = star.todict()
photometry = correct_extinction(photometry)
del star
return photometry
#******************************************************************************
#******************************************************************************
def get_galactic_coords(ra, dec):
c = SkyCoord(ra=ra.value*u.deg, dec=dec.value*u.deg, frame='icrs')
l = c.galactic.l.degree
b = c.galactic.b.degree
del c
return l, b
def get_Av(photometry):
A_V = 0.0
if 'galactic_coords' in photometry:
l, b = photometry['galactic_coords']
if 'parallax' in photometry:
p = photometry['parallax'][0]
d = p.to(u.pc, equivalencies=u.parallax()).value/1000. # in Kpc
ext_map = mwdust.Combined15()
else:
d = 1.
ext_map = mwdust.SFD()
A_V = 2.742*ext_map(l, b, d)
del ext_map
return A_V
def correct_extinction(photometry):
def _correct_wave(l, b, d, wave, ext_map):
if wave in ('HIPPARCOS BT', 'HIPPARCOS VT'):
ext_map._filter = 'Landolt B'
A_B = ext_map(l, b, d)
ext_map._filter = 'Landolt V'
A_V = ext_map(l, b, d)
# Transform A_wave to the Hipparcos system
if wave == 'HIPPARCOS BT':
A_wave = 1.090/0.850*(A_B-A_V) + A_V
else:
A_wave = A_V + 0.090/0.850*(A_B-A_V)
else:
ext_map._filter = wave
A_wave = ext_map(l, b, d)
return max(A_wave[0], 0.0)
mag = ['B', 'V', 'R', 'I', 'J', 'H', 'K', 'b', 'y', 'Bt', 'Vt', 'W1', 'W2']
if 'galactic_coords' in photometry:
l, b = photometry['galactic_coords']
d = 1.0
if 'parallax' in photometry:
p = photometry['parallax'][0]
d = p.to(u.pc, equivalencies=u.parallax()).value/1000. # in Kpc
ext_map = mwdust.Combined15()
else:
ext_map = mwdust.SFD()
for m in mag:
if m in photometry:
wave = photometry[m][3]
A_wave = _correct_wave(l, b, d, wave, ext_map)
if not np.isnan(A_wave) and\
(np.isnan(photometry[m][4]) or photometry[m][4] == 0.0):
photometry[m][4] = A_wave
if 'G' in photometry:
ext_map._filter=None
E_BV = ext_map(l, b, d)[0]
if not np.isnan(E_BV):
photometry['G'][4] = max(2.35*E_BV, 0.0)
if all([m in photometry for m in ['BP', 'RP', 'E_BPRP', 'G']]):
ext_map._filter=None
E_BV = ext_map(l, b, d)[0]
A0 = 3.1*E_BV
G_BVRPm = photometry['BP'][0]-photometry['RP'][0]
E_BPRP = photometry['E_BPRP'][0]
G_BVRP0 = G_BVRPm-E_BPRP
c_BP = [1.1517, -0.0871, -0.0333, 0.0173, -0.0230, 0.0006, 0.0043]
c_RP = [0.6104, -0.0170, -0.0026, -0.0017, -0.0078, 0.00005, 0.0006]
c_G = [0.9761, -0.1704, 0.0086, 0.0011, -0.0438, 0.0013, 0.0099]
def Ax(c1, c2, c3, c4, c5, c6, c7):
return c1 + c2*G_BVRP0 + c3*G_BVRP0**2 + c4*G_BVRP0**3 + c5*A0 + c6*A0**2 + c7*G_BVRP0*A0
A_BP = Ax(*c_BP)*A0
A_RP = Ax(*c_RP)*A0
A_G = Ax(*c_G)*A0
photometry['BP'][4] = A_BP
photometry['RP'][4] = A_RP
#print(photometry['G'][4], A_G, E_BV*2.35)
photometry['G'][4] = A_G
del ext_map
return photometry
#******************************************************************************
#******************************************************************************
def stellar_class(photometry):
# Intrinsic colors of dwarf and giant stars, for different spectral types.
# From Bessell and Brett 1988
JH_dwarfs = np.array([-0.05, 0.0, 0.02, 0.06, 0.09, 0.13, 0.165, 0.23, 0.285, 0.305, 0.32,\
0.33, 0.37, 0.45, 0.5, 0.58, 0.61, 0.66, 0.695, 0.68, 0.665, 0.62,\
0.60, 0.62, 0.66])
HK_dwarfs = np.array([-0.035, 0.00, 0.005, 0.015, 0.025, 0.03, 0.035, 0.04, 0.045, 0.05,\
0.052, 0.055, 0.06, 0.075, 0.09, 0.105, 0.11, 0.13, 0.165, 0.20, 0.21,\
0.25, 0.275, 0.32, 0.37])
JH_giants = np.array([0.37, 0.47, 0.50, 0.50, 0.54, 0.58, 0.63, 0.68, 0.73, 0.79, 0.83, 0.85,\
0.87, 0.90, 0.93, 0.95, 0.96, 0.96])
HK_giants = np.array([0.065, 0.08, 0.085, 0.085, 0.095, 0.10, 0.115, 0.14, 0.15, 0.165, 0.19,\
0.205, 0.215, 0.235, 0.245, 0.285, 0.30, 0.31])
# Cenvert the Bessell and Brett magnitudes to 2mass filters using the
# relationships from Carpenter 2001
i_dwarfs = np.where(HK_dwarfs > 0.14)[0]
i_giants = np.where(HK_giants > 0.14)[0]
JH_dwarfs_2mass = 0.990*JH_dwarfs[i_dwarfs] - 0.049
JH_giants_2mass = 0.990*JH_giants[i_giants] - 0.049
HK_dwarfs_2mass = 1.00*HK_dwarfs[i_dwarfs] + 0.034
HK_giants_2mass = 1.00*HK_giants[i_giants] + 0.034
line_dwarfs = geom.LineString(list(zip(HK_dwarfs_2mass, JH_dwarfs_2mass)))
line_giants = geom.LineString(list(zip(HK_giants_2mass, JH_giants_2mass)))
sp_class = 'dwarf'
# Detect if JHK are present
if ('J' in photometry) and ('H' in photometry) and ('K' in photometry):
J = photometry['J'][0] - photometry['J'][4]
H = photometry['H'][0] - photometry['H'][4]
K = photometry['K'][0] - photometry['K'][4]
JH = J-H
HK = H-K
# Compute distance from curves
if HK > 0.14:
point = geom.Point(HK, JH)
d_dwarf = point.distance(line_dwarfs)
d_giant = point.distance(line_giants)
del point
if d_giant < d_dwarf:
sp_class = 'giant'
del line_dwarfs, line_giants
return sp_class
def stellar_class_pm(photometry):
# Use the proper motion of the star for classification, following
# Collier Cameron et al. 2007
sp_class = 'dwarf'
if all([k in photometry for k in ['J', 'H', 'pmRA', 'pmDEC']]):
try:
pmRA = photometry['pmRA'][0]/1000.
pmDEC = photometry['pmDEC'][0]/1000.
mu = np.sqrt(pmRA**2. + pmDEC**2.)
J = photometry['J'][0] - photometry['J'][4]
H = photometry['H'][0] - photometry['H'][4]
RPM = J + 5.*np.log10(mu)
if (-15. <= RPM <= 10.) and (0.0 <= (J-H) <= 1.0):
ycurve = -141.25*(J-H)**4. + 473.18*(J-H)**3.\
-583.60*(J-H)**2. + 313.42*(J-H) - 58.
if RPM < ycurve:
sp_class = 'giant'
else:
return stellar_class(photometry)
except:
return stellar_class(photometry)
else:
return stellar_class(photometry)
return sp_class
def ini_logg(T, sp_type):
if sp_type == 'dwarf':
return 4.68*1e-8*T**2. - 8.33*1e-4*T + 7.547
return max(1.0, -5.82*1e-7*T**2. + 6.73*1e-3*T - 10.65)
def ini_met(photometry):
met = 0.0
# From Martell & Laughlin 2002
if 'b' in photometry and 'y' in photometry and 'm1' in photometry and 'c1' in photometry:
b = photometry['b'][0] - photometry['b'][4]
y = photometry['y'][0] - photometry['y'][4]
by = b-y
m1 = photometry['m1'][0]
c1 = photometry['c1'][0]
if (0.288 < by < 0.571) and (0.058 < m1 < 0.497) and (0.116 < c1 < 0.745):
met = -10.424602 + 31.059003*by + 42.184476*m1 + 15.351995*c1 \
-11.239435*by**2. - 29.218135*m1**2. - 11.457610*c1**2. \
-138.92376*by*m1 - 52.033290*by*c1 + 11.259341*m1*c1 \
-46.087731*by**3. + 26.065099*m1**3. - 1.1017830*c1**3. \
+138.48588*by**2.*m1 + 39.012001*by**2.*c1 \
+23.225562*m1**2.*by - 69.146876*m1**2.*c1 \
+20.456093*c1**2.*by - 3.3302478*c1**2.*m1 \
+70.168761*by*m1*c1
if (met > 0.5) or (met < -2.0):
met = 0.0
return met
#******************************************************************************
#******************************************************************************
def correct_mamajek(Tc, color, inst):
# harps, feros, hires, uves
corrections = {'B-V': (-3.67, 54.8, -15.79, -6.12),\
'V-K': (15.23, 11.03, 43.25, 42.25),\
'J-H': (15.47, 32.62, 78.61, 59.06),\
'Bt-Vt': (64.61, 84.23, 108.15, 71.98)}
x = 0
if inst in ['harps', 'feros', 'hires', 'uves']:
x = corrections[color][['harps', 'feros', 'hires', 'uves'].index(inst)]
return Tc + x
def mamajek(photometry, inst):
# Define the spline representations
tck_bv = (np.array([3000., 3000., 3000., 3000., 3100., 3200., 3250.,
3410., 3500., 3550., 3650., 3700., 3850., 3880.,
3970., 4050., 4230., 4410., 4620., 4840., 5040.,
5170., 5280., 5340., 5490., 5530., 5590., 5660.,
5680., 5720., 5770., 5880., 5920., 6040., 6170.,
6240., 6340., 6510., 6640., 6720., 6810., 7030.,
7200., 7440., 7500., 7800., 8000., 8080., 8270.,
8550., 8840., 9200., 9700., 10400., 10700., 12500.,
14500., 14500., 14500., 14500.]),\
np.array([1.91, 1.89889695, 1.64625059, 1.69918967, 1.53801881,
1.55319352, 1.53106075, 1.48801053, 1.48838346, 1.47779774,
1.44527783, 1.4129107, 1.3861779, 1.30383319, 1.24749974,
1.12702398, 1.12023921, 0.98822216, 0.90145692, 0.84143165,
0.83585252, 0.74482766, 0.76955031, 0.70259105, 0.7107385,
0.68402323, 0.67252669, 0.657925, 0.65103668, 0.60442773,
0.5946758, 0.53880897, 0.53974386, 0.5054044, 0.48018914,
0.43561453, 0.42026151, 0.38487402, 0.36939307, 0.34017362,
0.28593417, 0.25906698, 0.24421401, 0.22166016, 0.17370659,
0.15529447, 0.14294398, 0.07675079, 0.07944651, 0.03617357,
-0.00639459, -0.04057817, -0.10613727, -0.10490602, -0.1217432,
-0.14, 0., 0., 0., 0.]), 3)
tck_vks = (np.array([3000., 3000., 3000., 3000., 3100., 3200., 3250.,
3410., 3500., 3550., 3650., 3700., 3850., 3880.,
3970., 4050., 4230., 4410., 4620., 4840., 5040.,
5170., 5280., 5340., 5490., 5530., 5590., 5660.,
5680., 5720., 5770., 5880., 5920., 6040., 6170.,
6240., 6340., 6510., 6640., 6720., 6810., 7030.,
7200., 7440., 7500., 7800., 8000., 8080., 8270.,
8550., 8840., 9200., 9700., 10400., 10700., 12500.,
14500., 14500., 14500., 14500.]),\
np.array([6.50000000e+00, 5.62931496e+00, 5.75367026e+00,
5.34076308e+00, 4.79071352e+00, 4.61391909e+00,
4.51720164e+00, 4.13159547e+00, 4.14709596e+00,
4.03097681e+00, 3.85578132e+00, 3.70546709e+00,
3.54424972e+00, 3.31949749e+00, 3.16721742e+00,
2.81656851e+00, 2.79358299e+00, 2.39537651e+00,
2.17786419e+00, 2.02134802e+00, 1.99884413e+00,
1.78559326e+00, 1.84401087e+00, 1.68769762e+00,
1.70675113e+00, 1.64431540e+00, 1.61765528e+00,
1.58260890e+00, 1.56619519e+00, 1.45865442e+00,
1.43726664e+00, 1.30978554e+00, 1.31255319e+00,
1.23323500e+00, 1.17676105e+00, 1.07373032e+00,
1.03727999e+00, 9.50782384e-01, 9.14314901e-01,
8.42414011e-01, 7.12893077e-01, 6.47709841e-01,
6.12165798e-01, 5.57520113e-01, 4.37245386e-01,
3.91233825e-01, 3.60535900e-01, 1.93847001e-01,
2.05967449e-01, 6.93649668e-02, 3.93919827e-02,
-5.51783950e-03, -2.63670696e-01, -2.23184229e-01,
-3.14197676e-01, -3.58000000e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00]), 3)
tck_jh = (np.array([3000., 3000., 3000., 3000., 3100., 3200., 3250.,
3410., 3500., 3550., 3650., 3700., 3850., 3880.,
3970., 4050., 4230., 4410., 4620., 4840., 5040.,
5170., 5280., 5340., 5490., 5530., 5590., 5660.,
5680., 5720., 5770., 5880., 5920., 6040., 6170.,
6240., 6340., 6510., 6640., 6720., 6810., 7030.,
7200., 7440., 7500., 7800., 8000., 8080., 8270.,
8550., 8840., 9200., 9700., 10400., 10700., 12500.,
14500., 14500., 14500., 14500.]),\
np.array([0.588, 0.57925679, 0.55974591, 0.55662775, 0.55739209,
0.58028578, 0.58376255, 0.60634672, 0.60546662, 0.6120368,
0.61989396, 0.62481195, 0.63594655, 0.61500676, 0.60674412,
0.55966082, 0.55230917, 0.49293409, 0.43609132, 0.40202698,
0.39806407, 0.34706382, 0.35960846, 0.32332711, 0.3280911,
0.31165243, 0.30654383, 0.29678003, 0.29401661, 0.26762904,
0.26081307, 0.22934639, 0.23111651, 0.20988225, 0.19754575,
0.17130757, 0.16340732, 0.1446157, 0.13839745, 0.12210648,
0.09356932, 0.08068036, 0.07127585, 0.06038954, 0.03801209,
0.02849096, 0.02386206, -0.00845365, -0.00723668, -0.02947868,
-0.0309659, -0.04121424, -0.06064657, -0.06648605, -0.07545834,
-0.081, 0., 0., 0., 0.]), 3)
tck_btvt = (np.array([3850., 3850., 3850., 3850., 3970., 4050., 4230.,
4410., 4620., 4840., 5040., 5170., 5280., 5340.,
5490., 5530., 5590., 5660., 5680., 5720., 5770.,
5880., 5920., 6040., 6170., 6240., 6340., 6510.,
6640., 6720., 6810., 7030., 7200., 7440., 7500.,
7800., 8000., 8080., 8270., 8550., 8840., 9200.,
9700., 10400., 10700., 12500., 14500., 14500., 14500.,
14500.]),\
np.array([1.65, 1.62529982, 1.61107882, 1.49837621, 1.4387566,
1.36306363, 1.32576584, 1.16207044, 1.06271882, 0.97207866,
0.97009969, 0.85213091, 0.88670237, 0.79478911, 0.8051525,
0.76802269, 0.75579062, 0.73434453, 0.72579215, 0.66145907,
0.64843825, 0.567861, 0.56983969, 0.52834313, 0.4990927,
0.4526401, 0.43614294, 0.40085542, 0.38573171, 0.3529186,
0.31238673, 0.28288031, 0.26840011, 0.24412267, 0.19710763,
0.18078778, 0.17080152, 0.09332344, 0.09761852, 0.04697616,
0.00787136, -0.02889995, -0.10493612, -0.10123972, -0.12268253,
-0.142, 0., 0., 0., 0.]), 3)
T = np.array([])
err_mag = np.array([])
mult_zeros = []
colors = ['B-V', 'V-K', 'J-H', 'Bt-Vt']
tcks = [tck_bv, tck_vks, tck_jh, tck_btvt]
for i in range(4):
c1, c2 = colors[i].split('-')
if (c1 in photometry) and (c2 in photometry):
C1 = photometry[c1][0] - photometry[c1][4]
C2 = photometry[c2][0] - photometry[c2][4]
C = C1-C2
e_C = np.sqrt(photometry[c1][1]**2. + photometry[c2][1]**2.)
tck_new = (tcks[i][0], tcks[i][1]-C, tcks[i][2])
zeros = interpolate.sproot(tck_new)
correction = correct_mamajek(0.0, colors[i], inst)
zeros = zeros + correction
if len(zeros) == 1:
T = np.append(T, zeros[0])
err_mag = np.append(err_mag, e_C)
if len(zeros) > 1:
mult_zeros.append((i, colors[i], zeros, e_C))
if T.size > 0:
T_average = np.average(T, weights=1./err_mag)
if mult_zeros:
for m in mult_zeros:
d = np.abs(m[2]-T_average)
T = np.append(T, m[2][np.argmin(d)])
err_mag = np.append(err_mag, m[3])
T_average = np.average(T, weights=1./err_mag)
err_T_average = np.sqrt(np.average((T - T_average)**2., weights=1./err_mag))
if err_T_average == 0.0:
err_T_average = 100.
else:
T_average = 0.0
err_T_average = 0.0
return T_average, err_T_average
def alonso1999(photometry, met):
colors = np.array(['V-K', 'V-K', 'J-H', 'J-K'])
coefficients = [[0.5558, 0.2105, 0.001981, -0.009965, 0.01325, -0.002726],\
[0.3770, 0.3660, -0.03170, -0.003074, -0.002765, -0.002973],\
[0.5977, 1.015, -0.1020, -0.01029, 0.03006, 0.01013],\
[0.5816, 0.9134, -0.1443, 0.0000, 0.0000, 0.0000]]
e_Teff = np.array([40., 25., 170., 125.])
color_ranges = {'V-K':[{'FeH': [-0.5, 0.2], 'color': [0.20, 2.50], 'r': 0},
{'FeH': [-1.5, -0.5], 'color': [1.00, 2.50], 'r': 0},
{'FeH': [-2.5, -1.5], 'color': [1.20, 2.50], 'r': 0},
{'FeH': [-3.0, -2.5], 'color': [1.70, 2.50], 'r': 0},
{'FeH': [-0.5, 0.2], 'color': [2.00, 4.90], 'r': 1},
{'FeH': [-1.5, -0.5], 'color': [2.00, 4.60], 'r': 1},
{'FeH': [-2.5, -1.5], 'color': [2.00, 3.40], 'r': 1},
{'FeH': [-3.0, -2.5], 'color': [2.00, 2.80], 'r': 1}],
'J-H':[{'FeH': [-0.5, 0.2], 'color': [0.00, 0.90]},
{'FeH': [-1.5, -0.5], 'color': [0.20, 0.80]},
{'FeH': [-2.5, -1.5], 'color': [0.30, 0.70]},
{'FeH': [-3.0, -2.5], 'color': [0.35, 0.65]}],
'J-K':[{'FeH': [-0.5, 0.2], 'color': [0.00, 1.10]},
{'FeH': [-1.5, -0.5], 'color': [0.20, 1.00]},
{'FeH': [-2.5, -1.5], 'color': [0.30, 0.90]},
{'FeH': [-3.0, -2.5], 'color': [0.40, 0.80]}]}
#corrections = np.array([0.0, 0.0, 0.0, 0.0])
a_corr = np.array([2.70107158e-01, 8.86192858e-02, 2.57890150e-01])
b_corr = np.array([3.58390280e+03, 4.48537163e+03, 3.72508078e+03])
list_colors = ['V-K', 'J-H', 'J-K']
T_array = np.ones(len(list_colors))*np.nan
err_T_array = np.ones(len(list_colors))*np.nan
T_final = 0.0
err_T_final = 0.0
for i, lc in enumerate(list_colors):
c1, c2 = lc.split('-')
if (c1 in photometry) and (c2 in photometry):
C1 = photometry[c1][0] - photometry[c1][4]
C2 = photometry[c2][0] - photometry[c2][4]
C = C1-C2
icolor = np.where(colors == lc)[0]
coefs = None
err_T_int = None
# If there is only one relation for the color
if len(icolor) == 1:
# Check metallicity and color ranges
in_ranges = 0
for k in color_ranges[lc]:
range_m = k['FeH']
range_c = k['color']
if (range_m[0] <= met <= range_m[1]) and (range_c[0] <= C <= range_c[1]):
in_ranges = 1
if in_ranges > 0:
coefs = coefficients[int(icolor)]
err_T_int = e_Teff[int(icolor)]
# There are two equations for the color, depending on the color value
else:
in_ranges = 0
r = -99
for k in color_ranges[lc]:
range_m = k['FeH']
range_c = k['color']
range_r = k['r']
if (range_m[0] <= met <= range_m[1]) and (range_c[0] <= C <= range_c[1]):
in_ranges += 1
r = range_r
if in_ranges == 1:
coefs = coefficients[icolor[r]]
err_T_int = e_Teff[icolor[r]]
elif in_ranges > 1:
imin = np.argmin(e_Teff[icolor])
coefs = coefficients[icolor[imin]]
err_T_int = e_Teff[icolor[imin]]
if coefs is not None and err_T_int is not None:
theta = coefs[0] + coefs[1]*C + coefs[2]*C**2 + coefs[3]*C*met\
+ coefs[4]*met + coefs[5]*met**2
if theta != 0.0:
Teff = 5040./theta + ufloat(0.0, err_T_int)
T_array[i] = Teff.n
err_T_array[i] = Teff.s
ii = np.where(~np.isnan(T_array))[0]
if ii.size > 0:
Tcorr = [np.polyval([a_corr[ii][i_], b_corr[ii][i_]], T_array[ii][i_])\
for i_ in range(len(ii))]
Tmean = unumpy.uarray(Tcorr, err_T_array[ii])
#T_final = np.average(T_array[ii], weights=1./err_T_array[ii])
T_final = np.average(Tcorr, weights=1./err_T_array[ii])
#T_final = np.median(T_array[ii])
err_T_final = np.median(Tmean).s
#Tmean = unumpy.uarray(T_array[ii]+corrections[ii], err_T_array[ii])
#T_final = np.median(Tmean).n
#err_T_final = np.median(Tmean).s
return T_final, err_T_final
#******************************************************************************
#******************************************************************************
def correct_mann(Tc, color, inst, met):
# harps, feros, hires, uves
corrections = {'V-J': ((41.3, 26.3, 89.7, 53.4), (-87.8, -73.3, -87.8, -48.2)),\
'V-I': ((0.0, 0.0, 0.0, 0.0), (0.0, 0.0, 0.0, 0.0))}
m = 0 if met else 1
x = 0
if inst in ['harps', 'feros', 'hires', 'uves']:
x = corrections[m][color][['harps', 'feros', 'hires', 'uves'].index(inst)]
return Tc + x
def coefs_mann(type_color, met=True):
if met:
if type_color == 'V-J':
return [2.515, -1.054, 0.2965, -0.04150, 0.002245, 0.05262]
if type_color == 'V-I':
return [1.901, -0.6564, 0.1471, -0.01274, 0.0, 0.04697]
return [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
if type_color == 'V-J':
return [2.769, -1.421, 0.4284, -0.06133, 0.003310, 0.1333, 0.05416]
if type_color == 'V-I':
return [1.568, -0.4381, 0.07749, -0.005610, 0.0, 0.2441, -0.09257]
return [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
def mann(photometry, met):
if met is not None:
if ('V' in photometry) and ('J' in photometry):
i = coefs_mann('V-J', met=True)
C1 = photometry['V'][0] - photometry['V'][4]
C2 = photometry['J'][0] - photometry['J'][4]
c = C1-C2
err_c = np.sqrt(photometry['J'][1]**2. + photometry['V'][1]**2.)
color = ufloat(c, err_c)
elif ('V' in photometry) and ('I' in photometry):
i = coefs_mann('V-I', met=True)
C1 = photometry['V'][0] - photometry['V'][4]
C2 = photometry['I'][0] - photometry['I'][4]
c = C1-C2
err_c = np.sqrt(photometry['I'][1]**2. + photometry['V'][1]**2.)
color = ufloat(c, err_c)
else:
i = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
c = 0.0
color = ufloat(c, 999.0)
T = i[0] + i[1]*color + i[2]*color**2 + i[3]*color**3 + i[4]*color**4 + i[5]*met
else:
if ('V' in photometry) and ('J' in photometry):
i = coefs_mann('V-J', met=False)
C1 = photometry['V'][0] - photometry['V'][4]
C2 = photometry['J'][0] - photometry['J'][4]
c = C1-C2
err_c = np.sqrt(photometry['J'][1]**2. + photometry['V'][1]**2.)
color = ufloat(c, err_c)
elif ('V' in photometry) and ('I' in photometry):
i = coefs_mann('V-I', met=False)
C1 = photometry['V'][0] - photometry['V'][4]
C2 = photometry['I'][0] - photometry['I'][4]
c = C1-C2
err_c = np.sqrt(photometry['I'][1]**2. + photometry['V'][1]**2.)
color = ufloat(c, err_c)
else:
i = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
c = 0.0
color = ufloat(c, 999.0)
if ('J' in photometry) and ('H' in photometry):
C1 = photometry['J'][0] - photometry['J'][4]
C2 = photometry['H'][0] - photometry['H'][4]
c2 = C1-C2
err_c2 = np.sqrt(photometry['J'][1]**2. + photometry['H'][1]**2.)
color2 = ufloat(c2, err_c2)
T = i[0] + i[1]*color + i[2]*color**2 + i[3]*color**2 + i[4]*color**3 +\
i[5]*color2 + i[6]*color2**2
else:
T = 0.0
T = T*3500.
return T.n, T.s
#******************************************************************************
#******************************************************************************
def coefs_casagrande(type_color, color):
coefs = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
if type_color == 'B-V' and (0.18 <= color.n <= 1.29):
coefs = [0.5665, 0.4809, -0.0060, -0.0613, -0.0042, -0.0055]
elif type_color == 'V-J' and (0.61 <= color.n <= 2.44):
coefs = [0.4669, 0.3849, -0.0350, -0.0140, 0.0225, 0.0011]
elif type_color == 'V-H' and (0.67 <= color.n <= 3.01):
coefs = [0.5251, 0.2553, -0.0119, -0.0187, 0.0410, 0.0025]
elif type_color == 'V-K' and (0.78 <= color.n <= 3.15):
coefs = [0.5057, 0.2600, -0.0146, -0.0131, 0.0288, 0.0016]
elif type_color == 'J-K' and (0.07 <= color.n <= 0.80):
coefs = [0.6393, 0.6104, 0.0920, -0.0330, 0.0291, 0.0020]
elif type_color == 'Bt-Vt' and (0.19 <= color.n <= 1.49):
coefs = [0.5839, 0.4000, -0.0067, -0.0282, -0.0346, -0.0087]
elif type_color == 'Vt-J' and (0.77 <= color.n <= 2.56):
coefs = [0.4525, 0.3797, -0.0357, -0.0082, 0.0123, -0.0009]
elif type_color == 'Vt-H' and (0.77 <= color.n <= 3.16):
coefs = [0.5286, 0.2354, -0.0073, -0.0182, 0.0401, 0.0021]
elif type_color == 'Vt-K' and (0.99 <= color.n <= 3.29):
coefs = [0.4892, 0.2634, -0.0165, -0.0121, 0.0249, -0.0001]
elif type_color == 'b-y' and (0.18 <= color.n <= 0.72):
coefs = [0.5796, 0.4812, 0.5747, -0.0633, 0.0042, -0.0055]
return coefs
def correct_casagrande(Tc, color, inst):
# harps, feros, hires, uves
corrections = {'Vt-K': [16.66, 25.67, 34.48, 31.58],
'V-K': [23.26, 26.81, 47.89, 52.3],
'Vt-H': [11.48, 18.96, 35.03, 45.29],
'V-H': [21.02, 23.92, 55.81, 53.8],
'V-J': [-7.26, -17.38, -10.37, 25.0],
'Vt-J': [-18.77, -4.76, -16.9, 13.75],
'b-y': [14.09, 24.5, 44.49, 29.27],
'B-V': [36.54, 65.18, 37.37, 19.43],
'Bt-Vt': [19.56, 19.39, 23.88, 11.53],
'J-K': [85.5, 124.89, 191.43, 120.83]}
x = 0
if inst in ['harps', 'feros', 'hires', 'uves']:
x = corrections[color][['harps', 'feros', 'hires', 'uves'].index(inst)]
return Tc + x
def casagrande(photometry, met, inst):
possible_colors = ['Vt-K', 'V-K', 'Vt-H', 'V-H', 'V-J', 'Vt-J',\
'b-y', 'B-V', 'Bt-Vt', 'J-K']
T_final = 0.0
err_T_final = 100000.
color_p_final = []
T_array = []
for c in possible_colors:
c1, c2 = c.split('-')
if (c1 in photometry) and (c2 in photometry):
C1 = ufloat(photometry[c1][0] - photometry[c1][4], photometry[c1][1])
C2 = ufloat(photometry[c2][0] - photometry[c2][4], photometry[c2][1])
color = C1-C2
i = coefs_casagrande(c, color)
t = i[0] + i[1]*color + i[2]*color**2. + i[3]*color*met + i[4]*met + i[5]*met**2.
if t.n != 0.0 and (t.n < 5040./3500.) and (t.n > 5040./10000.):
T_array_v1 = (5040./t).n
err_T_array_v1 = (5040./t).s
T_corrected = correct_casagrande(T_array_v1, c, inst)
T_array.append(ufloat(T_corrected, err_T_array_v1))
color_p_final.append('%s: %.1f +- %.1f' % (c, T_corrected, err_T_array_v1))
del t
T_array = np.array(T_array)
if T_array.size > 0:
T_mean = np.mean(T_array)
T_final = T_mean.n
err_T_final = T_mean.s
color_p_final = ", ".join(color_p_final)# color_p_final[:-2]
del possible_colors, photometry
return T_final, err_T_final, color_p_final
#******************************************************************************
#******************************************************************************
def use_relation(photometry):
limit_colors = {'B-V': 1.340, 'Bt-Vt': 1.543, 'V-R': 0.826, 'V-I': 1.580,\
'V-K': 3.418, 'J-H': 0.557, 'H-K': 0.169}
use_mann = False
for c in list(limit_colors.keys()):
c1, c2 = c.split('-')
if (c1 in photometry) and (c2 in photometry):
C1 = photometry[c1][0] - photometry[c1][4]
C2 = photometry[c2][0] - photometry[c2][4]
val = C1-C2
if val >= limit_colors[c]:
use_mann = True
break
if use_mann:
return 'mann'
return 'casagrande'
#******************************************************************************
#******************************************************************************
def check_relation(photometry, xmetal, exception, inst):
relation = use_relation(photometry)
T_c = 0.0
if relation == 'casagrande':
T_c, err_T_c, color_c = casagrande(photometry, xmetal, inst)
if (T_c == 0.0) or (relation == 'mann'):
if exception == 1:
T_c, err_T_c = mann(photometry, xmetal)
else:
T_c, err_T_c = mann(photometry, met=None)
if T_c == 0.0 or T_c > 4000.:
T_c, err_T_c, color_c = casagrande(photometry, xmetal, inst)
relation = 'casagrande'
else:
color_c = 'any'
return T_c, err_T_c, color_c, relation
| |
from __future__ import division, absolute_import, print_function
import platform
import pytest
from numpy import array
from numpy.compat import long
from numpy.testing import assert_, assert_raises
from . import util
class TestReturnReal(util.F2PyTest):
def check_function(self, t):
if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']:
err = 1e-5
else:
err = 0.0
assert_(abs(t(234) - 234.0) <= err)
assert_(abs(t(234.6) - 234.6) <= err)
assert_(abs(t(long(234)) - 234.0) <= err)
assert_(abs(t('234') - 234) <= err)
assert_(abs(t('234.6') - 234.6) <= err)
assert_(abs(t(-234) + 234) <= err)
assert_(abs(t([234]) - 234) <= err)
assert_(abs(t((234,)) - 234.) <= err)
assert_(abs(t(array(234)) - 234.) <= err)
assert_(abs(t(array([234])) - 234.) <= err)
assert_(abs(t(array([[234]])) - 234.) <= err)
assert_(abs(t(array([234], 'b')) + 22) <= err)
assert_(abs(t(array([234], 'h')) - 234.) <= err)
assert_(abs(t(array([234], 'i')) - 234.) <= err)
assert_(abs(t(array([234], 'l')) - 234.) <= err)
assert_(abs(t(array([234], 'B')) - 234.) <= err)
assert_(abs(t(array([234], 'f')) - 234.) <= err)
assert_(abs(t(array([234], 'd')) - 234.) <= err)
if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']:
assert_(t(1e200) == t(1e300)) # inf
#assert_raises(ValueError, t, array([234], 'S1'))
assert_raises(ValueError, t, 'abc')
assert_raises(IndexError, t, [])
assert_raises(IndexError, t, ())
assert_raises(Exception, t, t)
assert_raises(Exception, t, {})
try:
r = t(10 ** 400)
assert_(repr(r) in ['inf', 'Infinity'], repr(r))
except OverflowError:
pass
@pytest.mark.skipif(
platform.system() == 'Darwin',
reason="Prone to error when run with numpy/f2py/tests on mac os, "
"but not when run in isolation")
class TestCReturnReal(TestReturnReal):
suffix = ".pyf"
module_name = "c_ext_return_real"
code = """
python module c_ext_return_real
usercode \'\'\'
float t4(float value) { return value; }
void s4(float *t4, float value) { *t4 = value; }
double t8(double value) { return value; }
void s8(double *t8, double value) { *t8 = value; }
\'\'\'
interface
function t4(value)
real*4 intent(c) :: t4,value
end
function t8(value)
real*8 intent(c) :: t8,value
end
subroutine s4(t4,value)
intent(c) s4
real*4 intent(out) :: t4
real*4 intent(c) :: value
end
subroutine s8(t8,value)
intent(c) s8
real*8 intent(out) :: t8
real*8 intent(c) :: value
end
end interface
end python module c_ext_return_real
"""
@pytest.mark.slow
@pytest.mark.parametrize('name', 't4,t8,s4,s8'.split(','))
def test_all(self, name):
self.check_function(getattr(self.module, name))
class TestF77ReturnReal(TestReturnReal):
code = """
function t0(value)
real value
real t0
t0 = value
end
function t4(value)
real*4 value
real*4 t4
t4 = value
end
function t8(value)
real*8 value
real*8 t8
t8 = value
end
function td(value)
double precision value
double precision td
td = value
end
subroutine s0(t0,value)
real value
real t0
cf2py intent(out) t0
t0 = value
end
subroutine s4(t4,value)
real*4 value
real*4 t4
cf2py intent(out) t4
t4 = value
end
subroutine s8(t8,value)
real*8 value
real*8 t8
cf2py intent(out) t8
t8 = value
end
subroutine sd(td,value)
double precision value
double precision td
cf2py intent(out) td
td = value
end
"""
@pytest.mark.slow
@pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(','))
def test_all(self, name):
self.check_function(getattr(self.module, name))
class TestF90ReturnReal(TestReturnReal):
suffix = ".f90"
code = """
module f90_return_real
contains
function t0(value)
real :: value
real :: t0
t0 = value
end function t0
function t4(value)
real(kind=4) :: value
real(kind=4) :: t4
t4 = value
end function t4
function t8(value)
real(kind=8) :: value
real(kind=8) :: t8
t8 = value
end function t8
function td(value)
double precision :: value
double precision :: td
td = value
end function td
subroutine s0(t0,value)
real :: value
real :: t0
!f2py intent(out) t0
t0 = value
end subroutine s0
subroutine s4(t4,value)
real(kind=4) :: value
real(kind=4) :: t4
!f2py intent(out) t4
t4 = value
end subroutine s4
subroutine s8(t8,value)
real(kind=8) :: value
real(kind=8) :: t8
!f2py intent(out) t8
t8 = value
end subroutine s8
subroutine sd(td,value)
double precision :: value
double precision :: td
!f2py intent(out) td
td = value
end subroutine sd
end module f90_return_real
"""
@pytest.mark.slow
@pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(','))
def test_all(self, name):
self.check_function(getattr(self.module.f90_return_real, name))
| |
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test of Policy Engine For Cinder."""
import os.path
import urllib2
from oslo.config import cfg
import six
from cinder import context
from cinder import exception
import cinder.openstack.common.policy
from cinder.openstack.common import policy as common_policy
from cinder import policy
from cinder import test
from cinder import utils
CONF = cfg.CONF
class PolicyFileTestCase(test.TestCase):
def setUp(self):
super(PolicyFileTestCase, self).setUp()
# since is_admin is defined by policy, create context before reset
self.context = context.RequestContext('fake', 'fake')
policy.reset()
self.target = {}
self.addCleanup(policy.reset)
def test_modified_policy_reloads(self):
with utils.tempdir() as tmpdir:
tmpfilename = os.path.join(tmpdir, 'policy')
self.flags(policy_file=tmpfilename)
action = "example:test"
with open(tmpfilename, "w") as policyfile:
policyfile.write("""{"example:test": []}""")
policy.enforce(self.context, action, self.target)
with open(tmpfilename, "w") as policyfile:
policyfile.write("""{"example:test": ["false:false"]}""")
# NOTE(vish): reset stored policy cache so we don't have to
# sleep(1)
policy._POLICY_CACHE = {}
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
class PolicyTestCase(test.TestCase):
def setUp(self):
super(PolicyTestCase, self).setUp()
policy.reset()
# NOTE(vish): preload rules to circumvent reloading from file
policy.init()
rules = {
"true": [],
"example:allowed": [],
"example:denied": [["false:false"]],
"example:get_http": [["http:http://www.example.com"]],
"example:my_file": [["role:compute_admin"],
["project_id:%(project_id)s"]],
"example:early_and_fail": [["false:false", "rule:true"]],
"example:early_or_success": [["rule:true"], ["false:false"]],
"example:lowercase_admin": [["role:admin"], ["role:sysadmin"]],
"example:uppercase_admin": [["role:ADMIN"], ["role:sysadmin"]],
}
# NOTE(vish): then overload underlying brain
common_policy.set_brain(common_policy.Brain(rules))
self.context = context.RequestContext('fake', 'fake', roles=['member'])
self.target = {}
self.addCleanup(policy.reset)
def test_enforce_nonexistent_action_throws(self):
action = "example:noexist"
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_enforce_bad_action_throws(self):
action = "example:denied"
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_enforce_good_action(self):
action = "example:allowed"
policy.enforce(self.context, action, self.target)
def test_enforce_http_true(self):
def fakeurlopen(url, post_data):
return six.StringIO("True")
self.stubs.Set(urllib2, 'urlopen', fakeurlopen)
action = "example:get_http"
target = {}
result = policy.enforce(self.context, action, target)
self.assertIsNone(result)
def test_enforce_http_false(self):
def fakeurlopen(url, post_data):
return six.StringIO("False")
self.stubs.Set(urllib2, 'urlopen', fakeurlopen)
action = "example:get_http"
target = {}
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, target)
def test_templatized_enforcement(self):
target_mine = {'project_id': 'fake'}
target_not_mine = {'project_id': 'another'}
action = "example:my_file"
policy.enforce(self.context, action, target_mine)
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, target_not_mine)
def test_early_AND_enforcement(self):
action = "example:early_and_fail"
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_early_OR_enforcement(self):
action = "example:early_or_success"
policy.enforce(self.context, action, self.target)
def test_ignore_case_role_check(self):
lowercase_action = "example:lowercase_admin"
uppercase_action = "example:uppercase_admin"
# NOTE(dprince) we mix case in the Admin role here to ensure
# case is ignored
admin_context = context.RequestContext('admin',
'fake',
roles=['AdMiN'])
policy.enforce(admin_context, lowercase_action, self.target)
policy.enforce(admin_context, uppercase_action, self.target)
class DefaultPolicyTestCase(test.TestCase):
def setUp(self):
super(DefaultPolicyTestCase, self).setUp()
policy.reset()
policy.init()
self.rules = {
"default": [],
"example:exist": [["false:false"]]
}
self._set_brain('default')
self.context = context.RequestContext('fake', 'fake')
self.addCleanup(policy.reset)
def _set_brain(self, default_rule):
brain = cinder.openstack.common.policy.Brain(self.rules,
default_rule)
cinder.openstack.common.policy.set_brain(brain)
def test_policy_called(self):
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, "example:exist", {})
def test_not_found_policy_calls_default(self):
policy.enforce(self.context, "example:noexist", {})
def test_default_not_found(self):
self._set_brain("default_noexist")
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, "example:noexist", {})
class ContextIsAdminPolicyTestCase(test.TestCase):
def setUp(self):
super(ContextIsAdminPolicyTestCase, self).setUp()
policy.reset()
policy.init()
def test_default_admin_role_is_admin(self):
ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin'])
self.assertFalse(ctx.is_admin)
ctx = context.RequestContext('fake', 'fake', roles=['admin'])
self.assertTrue(ctx.is_admin)
def test_custom_admin_role_is_admin(self):
# define explicit rules for context_is_admin
rules = {
'context_is_admin': [["role:administrator"], ["role:johnny-admin"]]
}
brain = common_policy.Brain(rules, CONF.policy_default_rule)
common_policy.set_brain(brain)
ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin'])
self.assertTrue(ctx.is_admin)
ctx = context.RequestContext('fake', 'fake', roles=['administrator'])
self.assertTrue(ctx.is_admin)
# default rule no longer applies
ctx = context.RequestContext('fake', 'fake', roles=['admin'])
self.assertFalse(ctx.is_admin)
def test_context_is_admin_undefined(self):
rules = {
"admin_or_owner": [["role:admin"], ["project_id:%(project_id)s"]],
"default": [["rule:admin_or_owner"]],
}
brain = common_policy.Brain(rules, CONF.policy_default_rule)
common_policy.set_brain(brain)
ctx = context.RequestContext('fake', 'fake')
self.assertFalse(ctx.is_admin)
ctx = context.RequestContext('fake', 'fake', roles=['admin'])
self.assertTrue(ctx.is_admin)
| |
#!/usr/bin/env python
from __future__ import unicode_literals
import argparse
import collections
import locale
import re
import string
import sys
import flatcat
from flatcat.exception import ArgumentException
from flatcat import utils
BND_MARKER = '\u2059' # 5-dot punctuation
SPACE_MARKER = '\u2e2a' # square 4-dot
LETTERING_BEG = '\u2e2b' # v 3-dot
LETTERING_MID = '\u2e2c' # ^ 3-dot
LETTERING_END = '\u2e2d' # + 4-dot
PY3 = sys.version_info.major == 3
LICENSE = """
Copyright (c) 2015, Stig-Arne Gronroos
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
def get_argparser():
parser = argparse.ArgumentParser(
prog='flatcat-advanced-segment',
description="""
Morfessor FlatCat advanced segmentation
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False)
add_arg = parser.add_argument
add_arg('model', metavar='<flatcat model>',
help='A FlatCat model (tarball or binary)')
add_arg('infile', metavar='<infile>',
help='The input file. The type will be sniffed automatically, '
'or can be specified manually.')
add_arg('outfile', metavar='<outfile>',
help='The output file. The type is defined by preset '
'or can be specified manually.')
add_arg('-e', '--encoding', dest='encoding', metavar='<encoding>',
help='Encoding of input and output files (if none is given, '
'both the local encoding and UTF-8 are tried).')
add_arg('--category-separator', dest='catseparator', type=str, default='/',
metavar='<string>',
help='separator for the category tag following a morph. '
'(default %(default)s).')
add_arg('--output-format', dest='output_format', type=str,
default=None, metavar='<id>',
help='Output format')
add_arg('--dont-remove-nonmorphemes', dest='no_rm_nonmorph', default=False,
action='store_true',
help='Use heuristic postprocessing to remove nonmorphemes '
'from output segmentations.')
add_arg('--passthrough-regex-file', dest='re_file', type=str,
default=None, metavar='<file>',
help='File containing regular expressions for tokens '
'that should be passed through without segmentation.')
return parser
def corpus_reader(io, infile):
for line in io._read_text_file(infile, raw=True):
for (i, token) in enumerate(line.split(' ')):
if i != 0:
yield ' '
yield token
yield '\n'
class FlatcatWrapper(object):
def __init__(self, model, remove_nonmorphemes=True):
self.model = model
if remove_nonmorphemes:
self.hpp = flatcat.categorizationscheme.HeuristicPostprocessor()
else:
self.hpp = None
self._top_morphs = None
def segment(self, word):
(analysis, cost) = self.model.viterbi_analyze(word)
if self.hpp is not None:
analysis = self.hpp.apply_to(analysis, self.model)
return analysis
def is_top_freq_morph(self, morph, threshold=5000):
if self._top_morphs is None:
morphs = sorted(
self.model._morph_usage._contexts.items(),
key=lambda pair: pair[1].count,
reverse=True)
self._top_morphs = set(m for (m, c) in morphs[:threshold])
return morph in self._top_morphs
def _make_morph_formatter(self, category_sep, strip_tags):
if not strip_tags:
def output_morph(cmorph):
if cmorph.category is None:
return cmorph.morph
return '{}{}{}'.format(cmorph.morph,
category_sep,
cmorph.category)
else:
def output_morph(cmorph):
try:
return cmorph.morph
except AttributeError:
return cmorph
return output_morph
#analysis = flatcat.flatcat._wb_wrap(word.analysis)
# if cmorph.category == flatcat.WORD_BOUNDARY:
# continue
# out.append(self._morph_formatter(cmorph))
#formatted = ''.join(out)
# ''.join(cmorph.morph for cmorph in word.analysis)
def split_compound(morphs):
out = []
current = []
prev = None
for morph in morphs:
if prev is not None and prev != 'PRE':
if morph.category in ('PRE', 'STM'):
out.append(current)
current = []
current.append(morph)
prev = morph.category
out.append(current)
return out
def mark_by_tag(morphs):
for morph in morphs:
if morph.category == 'PRE':
yield '{}+'.format(morph.morph)
elif morph.category == 'STM':
yield '{}'.format(morph.morph)
elif morph.category == 'SUF':
yield '+{}'.format(morph.morph)
elif morph.category is None:
yield morph.morph
else:
assert False, morph.category
def long_to_stems(morphs):
for morph in morphs:
if morph.category == 'STM':
# avoids unnecessary NOOP re-wrapping
yield morph
elif len(morph) >= 5:
yield flatcat.CategorizedMorph(morph.morph, 'STM')
else:
yield morph
def postprocess(fmt, morphs, model_wrapper):
if fmt == 'both_sides':
#ala+ +kive+ +n+ +kolo+ +on
return '+ +'.join(cmorph.morph for cmorph in morphs)
elif fmt == 'right_only':
#ala +kive +n +kolo +on
return ' +'.join(cmorph.morph for cmorph in morphs)
#elif fmt == 'affix_only':
# #ala+ kive +n? kolo +on
# compound = split_compound(morphs)
# pass
elif fmt == 'compound_symbol':
#ala+ kive +n <c> kolo +on
parts = split_compound(morphs)
parts = [mark_by_tag(part) for part in parts]
parts = [' '.join(part) for part in parts]
return ' +@+ '.join(parts)
elif fmt == 'compound_both_sides':
#ala+ +kive+ +n> <kolo+ +on
parts = split_compound(morphs)
parts = [[morph.morph for morph in part]
for part in parts]
parts = ['+ +'.join(part) for part in parts]
return '@ @'.join(parts)
elif fmt == 'compound_affix':
#ala+ kive +n> kolo +on
parts = split_compound(morphs)
parts = [mark_by_tag(part) for part in parts]
parts = [' '.join(part) for part in parts]
return '@ '.join(parts)
elif fmt == 'compound_modifier_affix':
#alakiven> kolo +on
parts = split_compound(morphs)
out = []
for part in parts[:-1]:
part = [morph.morph for morph in part]
out.append(''.join(part))
part = mark_by_tag(parts[-1])
out.append(' '.join(part))
return '@ '.join(out)
elif fmt == 'advanced':
#alakiven+ kolo +on
morphs = long_to_stems(morphs)
parts = split_compound(morphs)
out = []
for part in parts[:-1]:
part = [morph.morph for morph in part]
out.append(''.join(part))
part = mark_by_tag(parts[-1])
out.append(' '.join(part))
return '+ '.join(out)
elif fmt == 'compound_splitter':
#alakiven+ koloon (except 5-point, not plus)
morphs = long_to_stems(morphs)
parts = split_compound(morphs)
out = []
for part in parts:
part = [morph.morph for morph in part]
out.append(''.join(part))
return (BND_MARKER + ' ').join(out)
elif fmt == '2016':
#ala +kive +n +kolo +on (except 5-point, not plus)
return (' ' + BND_MARKER).join(
cmorph.morph for cmorph in morphs)
elif fmt == '2016b':
# same as 2016, except names and numbers spelled out
firstchar = morphs[0].morph[0]
if firstchar.isupper() or firstchar.isdigit():
chars = ''.join(cmorph.morph for cmorph in morphs)
return (' ' + BND_MARKER).join(chars)
return (' ' + BND_MARKER).join(
cmorph.morph for cmorph in morphs)
elif fmt == '2016c':
out = []
for cmorph in morphs:
morph = cmorph.morph
if model_wrapper.is_top_freq_morph(morph, 5000):
# include most frequent morphs in lexicon
out.append(morph)
elif BND_MARKER in morph:
# avoid breaking already forcesplit
out.append(morph)
else:
# spell out everything else
out.extend([char for char in morph])
return (' ' + BND_MARKER).join(out)
elif fmt == '2016d':
# similar to 2016b, but different marker scheme
firstchar = morphs[0].morph[0]
if firstchar == ' ':
return ' '
if firstchar.isupper() or firstchar.isdigit():
chars = ''.join(cmorph.morph for cmorph in morphs)
if len(chars) == 1:
return SPACE_MARKER + chars
chars = list(chars)
firstmarked = LETTERING_BEG + chars.pop(0)
lastmarked = LETTERING_END + chars.pop(-1)
midmarked = [LETTERING_MID + char for char in chars]
marked = [firstmarked] + midmarked + [lastmarked]
return SPACE_MARKER + (' '.join(marked))
out = ' '.join(
cmorph.morph for cmorph in morphs)
if out[0] == BND_MARKER:
# remove leading boundary markers from forcesplit
return out[1:]
else:
# mark leading space
return SPACE_MARKER + out
else:
assert False, 'unknown output format {}'.format(fmt)
class SegmentationCache(object):
def __init__(self, seg_func, passthrough=None, limit=1000000):
self.seg_func = seg_func
if passthrough is not None:
self.passthrough = passthrough
else:
self.passthrough = []
self.limit = limit
self._cache = {}
self.seg_count = 0
self.unseg_count = 0
def segment(self, word):
if any(pattern.match(word)
for pattern in self.passthrough):
return [flatcat.CategorizedMorph(word, None)]
if len(self._cache) > self.limit:
# brute solution clears whole cache once limit is reached
self._cache = {}
if word not in self._cache:
self._cache[word] = self.seg_func(word)
seg = self._cache[word]
if len(seg) > 1:
self.seg_count += 1
else:
self.unseg_count += 1
return seg
def segment_from(self, pipe):
for word in pipe:
yield self.segment(word)
def load_model(io, modelfile):
init_is_pickle = (modelfile.endswith('.pickled') or
modelfile.endswith('.pickle') or
modelfile.endswith('.bin'))
init_is_tarball = (modelfile.endswith('.tar.gz') or
modelfile.endswith('.tgz'))
if not init_is_pickle and not init_is_tarball:
raise ArgumentException(
'This tool can only load tarball and binary models')
if init_is_pickle:
model = io.read_binary_model_file(modelfile)
else:
model = io.read_tarball_model_file(modelfile)
model.initialize_hmm()
return model
def main(args):
io = flatcat.io.FlatcatIO(encoding=args.encoding,
category_separator=args.catseparator)
passthrough = []
if args.re_file is not None:
for line in io._read_text_file(args.re_file):
passthrough.append(
re.compile(line))
if args.output_format.startswith('2016') \
or args.output_format == 'compound_splitter':
print('Passing through boundary marker')
passthrough.append(re.compile(BND_MARKER + '.*'))
model = load_model(io, args.model)
model_wrapper = FlatcatWrapper(
model,
remove_nonmorphemes=(not args.no_rm_nonmorph))
cache = SegmentationCache(model_wrapper.segment, passthrough)
with io._open_text_file_write(args.outfile) as fobj:
pipe = corpus_reader(io, args.infile)
pipe = utils._generator_progress(pipe, 10000)
pipe = cache.segment_from(pipe)
# FIXME: transformations (joining/filtering) here
pipe = (postprocess(args.output_format, morphs, model_wrapper)
for morphs in pipe)
for token in pipe:
fobj.write(token)
tot_count = cache.seg_count + cache.unseg_count
seg_prop = float(cache.seg_count) / float(tot_count)
print('{} segmented ({}), {} unsegmented, {} total'.format(
cache.seg_count, seg_prop, cache.unseg_count, tot_count))
if __name__ == "__main__":
parser = get_argparser()
try:
args = parser.parse_args(sys.argv[1:])
main(args)
except ArgumentException as e:
parser.error(e)
except Exception as e:
raise
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A fake implementation for the `scandir` function working with
FakeFilesystem.
Works with both the function integrated into the `os` module since Python 3.5
and the standalone function available in the standalone `scandir` python
package.
"""
import os
import sys
from pyfakefs.extra_packages import use_scandir_package
from pyfakefs.helpers import to_string
if sys.version_info >= (3, 6):
BaseClass = os.PathLike
else:
BaseClass = object
class DirEntry(BaseClass):
"""Emulates os.DirEntry. Note that we did not enforce keyword only
arguments."""
def __init__(self, filesystem):
"""Initialize the dir entry with unset values.
Args:
filesystem: the fake filesystem used for implementation.
"""
self._filesystem = filesystem
self.name = ''
self.path = ''
self._abspath = ''
self._inode = None
self._islink = False
self._isdir = False
self._statresult = None
self._statresult_symlink = None
def inode(self):
"""Return the inode number of the entry."""
if self._inode is None:
self.stat(follow_symlinks=False)
return self._inode
def is_dir(self, follow_symlinks=True):
"""Return True if this entry is a directory entry.
Args:
follow_symlinks: If True, also return True if this entry is a
symlink pointing to a directory.
Returns:
True if this entry is an existing directory entry, or if
follow_symlinks is set, and this entry points to an existing
directory entry.
"""
return self._isdir and (follow_symlinks or not self._islink)
def is_file(self, follow_symlinks=True):
"""Return True if this entry is a regular file entry.
Args:
follow_symlinks: If True, also return True if this entry is a
symlink pointing to a regular file.
Returns:
True if this entry is an existing file entry, or if
follow_symlinks is set, and this entry points to an existing
file entry.
"""
return not self._isdir and (follow_symlinks or not self._islink)
def is_symlink(self):
"""Return True if this entry is a symbolic link (even if broken)."""
return self._islink
def stat(self, follow_symlinks=True):
"""Return a stat_result object for this entry.
Args:
follow_symlinks: If False and the entry is a symlink, return the
result for the symlink, otherwise for the object it points to.
"""
if follow_symlinks:
if self._statresult_symlink is None:
file_object = self._filesystem.resolve(self._abspath)
self._statresult_symlink = file_object.stat_result.copy()
if self._filesystem.is_windows_fs:
self._statresult_symlink.st_nlink = 0
return self._statresult_symlink
if self._statresult is None:
file_object = self._filesystem.lresolve(self._abspath)
self._inode = file_object.st_ino
self._statresult = file_object.stat_result.copy()
if self._filesystem.is_windows_fs:
self._statresult.st_nlink = 0
return self._statresult
if sys.version_info >= (3, 6):
def __fspath__(self):
return self.path
class ScanDirIter:
"""Iterator for DirEntry objects returned from `scandir()`
function."""
def __init__(self, filesystem, path):
self.filesystem = filesystem
if isinstance(path, int):
if not use_scandir_package and (
sys.version_info < (3, 7) or
self.filesystem.is_windows_fs):
raise NotImplementedError(
'scandir does not support file descriptor '
'path argument')
self.abspath = self.filesystem.absnormpath(
self.filesystem.get_open_file(path).get_object().path)
self.path = ''
else:
self.abspath = self.filesystem.absnormpath(path)
self.path = to_string(path)
contents = self.filesystem.confirmdir(self.abspath).contents
self.contents_iter = iter(contents)
def __iter__(self):
return self
def __next__(self):
entry = self.contents_iter.__next__()
dir_entry = DirEntry(self.filesystem)
dir_entry.name = entry
dir_entry.path = self.filesystem.joinpaths(self.path,
dir_entry.name)
dir_entry._abspath = self.filesystem.joinpaths(self.abspath,
dir_entry.name)
dir_entry._isdir = self.filesystem.isdir(dir_entry._abspath)
dir_entry._islink = self.filesystem.islink(dir_entry._abspath)
return dir_entry
if sys.version_info >= (3, 6):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
pass
def scandir(filesystem, path=''):
"""Return an iterator of DirEntry objects corresponding to the entries
in the directory given by path.
Args:
filesystem: The fake filesystem used for implementation
path: Path to the target directory within the fake filesystem.
Returns:
an iterator to an unsorted list of os.DirEntry objects for
each entry in path.
Raises:
OSError: if the target is not a directory.
"""
return ScanDirIter(filesystem, path)
def _classify_directory_contents(filesystem, root):
"""Classify contents of a directory as files/directories.
Args:
filesystem: The fake filesystem used for implementation
root: (str) Directory to examine.
Returns:
(tuple) A tuple consisting of three values: the directory examined,
a list containing all of the directory entries, and a list
containing all of the non-directory entries.
(This is the same format as returned by the `os.walk` generator.)
Raises:
Nothing on its own, but be ready to catch exceptions generated by
underlying mechanisms like `os.listdir`.
"""
dirs = []
files = []
for entry in filesystem.listdir(root):
if filesystem.isdir(filesystem.joinpaths(root, entry)):
dirs.append(entry)
else:
files.append(entry)
return root, dirs, files
def walk(filesystem, top, topdown=True, onerror=None, followlinks=False):
"""Perform an os.walk operation over the fake filesystem.
Args:
filesystem: The fake filesystem used for implementation
top: The root directory from which to begin walk.
topdown: Determines whether to return the tuples with the root as
the first entry (`True`) or as the last, after all the child
directory tuples (`False`).
onerror: If not `None`, function which will be called to handle the
`os.error` instance provided when `os.listdir()` fails.
followlinks: If `True`, symbolic links are followed.
Yields:
(path, directories, nondirectories) for top and each of its
subdirectories. See the documentation for the builtin os module
for further details.
"""
def do_walk(top_dir, top_most=False):
if not top_most and not followlinks and filesystem.islink(top_dir):
return
try:
top_contents = _classify_directory_contents(filesystem, top_dir)
except OSError as exc:
top_contents = None
if onerror is not None:
onerror(exc)
if top_contents is not None:
if topdown:
yield top_contents
for directory in top_contents[1]:
if not followlinks and filesystem.islink(directory):
continue
for contents in do_walk(filesystem.joinpaths(top_dir,
directory)):
yield contents
if not topdown:
yield top_contents
return do_walk(to_string(top), top_most=True)
class FakeScanDirModule:
"""Uses FakeFilesystem to provide a fake `scandir` module replacement.
.. Note:: The ``scandir`` function is a part of the standard ``os`` module
since Python 3.5. This class handles the separate ``scandir`` module
that is available on pypi.
You need a fake_filesystem to use this:
`filesystem = fake_filesystem.FakeFilesystem()`
`fake_scandir_module = fake_filesystem.FakeScanDirModule(filesystem)`
"""
@staticmethod
def dir():
"""Return the list of patched function names. Used for patching
functions imported from the module.
"""
return 'scandir', 'walk'
def __init__(self, filesystem):
self.filesystem = filesystem
def scandir(self, path='.'):
"""Return an iterator of DirEntry objects corresponding to the entries
in the directory given by path.
Args:
path: Path to the target directory within the fake filesystem.
Returns:
an iterator to an unsorted list of os.DirEntry objects for
each entry in path.
Raises:
OSError: if the target is not a directory.
"""
return scandir(self.filesystem, path)
def walk(self, top, topdown=True, onerror=None, followlinks=False):
"""Perform a walk operation over the fake filesystem.
Args:
top: The root directory from which to begin walk.
topdown: Determines whether to return the tuples with the root as
the first entry (`True`) or as the last, after all the child
directory tuples (`False`).
onerror: If not `None`, function which will be called to handle the
`os.error` instance provided when `os.listdir()` fails.
followlinks: If `True`, symbolic links are followed.
Yields:
(path, directories, nondirectories) for top and each of its
subdirectories. See the documentation for the builtin os module
for further details.
"""
return walk(self.filesystem, top, topdown, onerror, followlinks)
| |
import logging
import re
from django import forms
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from metashare.settings import LOG_HANDLER
from metashare.repository.models import resourceInfoType_model
from metashare.repository.search_indexes import resourceInfoType_modelIndex
# from metashare.recommendations.recommendations import get_more_from_same_creators
# get_more_from_same_projects
from haystack.forms import FacetedSearchForm
from haystack.query import SQ
from metashare.utils import prettify_camel_case_string
from metashare.settings import MEDIA_URL
# Setup logging support.
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(LOG_HANDLER)
# define special query prefixes
MORE_FROM_SAME_CREATORS = "mfsc"
MORE_FROM_SAME_PROJECTS = "mfsp"
def is_member(user, group):
return user.groups.filter(name=group).exists()
class FacetedBrowseForm(FacetedSearchForm):
"""
A custom `FacetedSearchForm` for faceted browsing and searching.
"""
def search(self):
"""
A blend of its super methods with only a different base
`SearchQuerySet` in case of empty/invalid queries.
"""
sqs = self.searchqueryset
if self.is_valid() and self.cleaned_data.get('q'):
# extract special queries
special_queries, query = \
_extract_special_queries(self.cleaned_data.get('q'))
if query:
sqs = sqs.auto_query(query)
if (special_queries):
# for each special query, get the Django internal resource ids
# matching the query and filter the SearchQuerySet accordingly
for _sq in special_queries:
_res_ids = _process_special_query(_sq)
if _res_ids:
_sq = SQ()
for _id in _res_ids:
_sq.add(SQ(django_id=_id), SQ.OR)
sqs = sqs.filter(_sq)
else:
# force empty search result if no ids are returned
# for a special query
sqs = sqs.none()
break
if self.load_all:
sqs = sqs.load_all()
# we need to process each facet to ensure that the field name and the
# value are quoted correctly and separately:
for facet in [f for f in self.selected_facets if ":" in f]:
field, value = facet.split(":", 1)
# only add facets which are also in the search index
# pylint: disable-msg=E1101
if not field in resourceInfoType_modelIndex.fields:
LOGGER.info('Ignoring unknown facet field "%s".', field)
continue
if value:
sqs = sqs.narrow(u'%s:"%s"' % (field, sqs.query.clean(value)))
if not is_member(self.request.user, 'ecmembers') and not self.request.user.is_superuser:
sqs = sqs.filter_and(publicationStatusFilter__exact='published')
return sqs
def clean(self):
# add validation errors for unknown facet fields in the request:
_errors = []
for facet in self.selected_facets:
field = facet.split(":", 1)[0]
# pylint: disable-msg=E1101
if not field in resourceInfoType_modelIndex.fields:
_errors.append(
_("Ignoring an unknown filter from your query: %s") % field)
if _errors:
raise forms.ValidationError(_errors)
return super(FacetedBrowseForm, self).clean()
def _extract_special_queries(query):
"""
Extracts from the given query string all special queries;
returns the original query that is stripped from the special queries
and the extracted special queries;
currently , we have two special queries:
more-from-creator-of:<resource-id>
more-from-project-of:<resource-id>
"""
# here we collect the special queries
special_queries = []
for _token in query.split():
if _token.startswith(MORE_FROM_SAME_CREATORS)\
or _token.startswith(MORE_FROM_SAME_PROJECTS):
special_queries.append(_token)
# remove special queries from original query
if special_queries:
for _sq in special_queries:
query = query.replace(_sq, "")
ws_pattern = re.compile(r'\s+')
query = re.sub(ws_pattern, " ", query)
query = query.strip()
return special_queries, query
def _process_special_query(query):
"""
Processes the given special query;
returns a list of resource ids matching the query;
ids are the INTERNAL Django ids, not the StorageObject identifiers!!!
"""
query_type, resource_id = query.split(":")
# get resource
try:
res = resourceInfoType_model.objects.get(
storage_object__identifier=resource_id)
except resourceInfoType_model.DoesNotExist:
LOGGER.info('Ignoring unknown storage identifier "%s" in "%s" query.',
resource_id, query_type)
return []
# get related resources
# if query_type == MORE_FROM_SAME_CREATORS:
# rel_res = get_more_from_same_creators(res)
# elif query_type == MORE_FROM_SAME_PROJECTS:
# rel_res = get_more_from_same_projects(res)
else:
LOGGER.info('Ignoring unknown special query type "%s".', query_type)
return []
# return internal ids from related resources
return [x.id for x in rel_res]
class LicenseSelectionForm(forms.Form):
"""
A `Form` for presenting download licenses and selecting exactly one of them.
"""
def __init__(self, licences, *args, **kwargs):
"""
Initializes the `LicenseSelectionForm` with the given licenses.
"""
super(LicenseSelectionForm, self).__init__(*args, **kwargs)
class _LicenseSelectionRenderer(forms.widgets.RadioFieldRenderer):
"""
A custom `RadioSelectRenderer` for rendering license selections.
This widget does not only contain radio buttons with license name
labels but additionally short license information blocks for each
license.
"""
def __iter__(self):
for i, choice in enumerate(self.choices):
l_name=(choice[0],prettify_camel_case_string(licences[choice[0]][0].licence))
yield (licences[choice[0]][0],
forms.widgets.RadioInput(self.name, self.value,
self.attrs.copy(), l_name, i))
def render(self):
return mark_safe(u'<ul>{0}\n</ul>'.format(
u'\n'.join([u'<li><div>{0}</div>\n{1}</li>' \
.format(force_unicode(w),
self._create_restrictions_block(l, l.id, w.choice_value))
for (l, w) in self])))
def _create_restrictions_block(self, licence_info, licence_id, licence_name):
"""
Creates an HTML block element string containing the restrictions
of the given license information.
"""
r_list = licence_info.get_restrictionsOfUse_display_list()
try:
if licences[licence_id][1]:
direct_download_msg = u'<img src="'+MEDIA_URL + "images/ok.png"+u' " alt="✓" style="width:20px;height:20px;"> {0}'.format(_('Direct download available'))
else:
direct_download_msg = u'<img src="'+MEDIA_URL + "images/warning.png"+u' " alt="⚠" style="width:20px;height:20px;"> {0}'.format(_('No direct download available'))
# direct_download_msg = u'<span style="color:orange;font-size:16pt;font-weight:bold">⚠ </span>{0}'.format(_('No direct download available'))
except KeyError:
if licences[licence_name][1]:
direct_download_msg = u'<img src="'+MEDIA_URL + "images/ok.png"+u' " alt="✓" style="width:20px;height:20px;"> {0}'.format(_('Direct download available'))
else:
direct_download_msg = u'<img src="'+MEDIA_URL + "images/warning.png"+u' " alt="⚠" style="width:20px;height:20px;"> {0}'.format(_('No direct download available'))
if r_list:
result = u'<div><p>{0}</p><p>{1}</p>' \
.format(direct_download_msg, _('Conditions of use:'))
result += u'\n<ul>'
for restr in r_list:
result += u'<li style="font-style:italic">{0}</li>'.format(restr)
result += u'</ul>'
else:
result = u'<div><p>{0}</p>' \
.format(direct_download_msg,)
# result += u'<li>{0}</li></ul></div>'.format(direct_download_msg)
return result
self.fields['licence'] = \
forms.ChoiceField(choices=[(name, name) for name in licences],
widget=forms.widgets.RadioSelect(
renderer=_LicenseSelectionRenderer))
class LicenseAgreementForm(forms.Form):
"""
A `Form` for presenting a license to which the user must agree.
"""
in_licence_agree_form = forms.BooleanField(initial=True,
widget=forms.HiddenInput())
licence_agree = forms.BooleanField(label=_('I agree to these licence ' \
'terms and would like to download the resource.'))
def __init__(self, licence, *args, **kwargs):
"""
Initializes the `LicenseAgreementForm` with the given licence.
"""
super(LicenseAgreementForm, self).__init__(*args, **kwargs)
self.fields['licence'] = forms.CharField(initial=licence,
widget=forms.HiddenInput())
class DownloadContactForm(forms.Form):
"""
A `Form` for sending a contact request regarding the download of a resource
"""
userEmail = forms.EmailField(label=_("Your e-mail"))
message = forms.CharField(label=_("Your message"), widget=forms.Textarea())
class DownloadUnregisteredContactForm(forms.Form):
"""
A `Form` for sending a contact request regarding the download of a resource
"""
userName= forms.CharField(label = _("Your name"))
userEmail = forms.EmailField(label=_("Your e-mail"))
message = forms.CharField(label=_("Your message"), widget=forms.Textarea())
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Trivial type inference for simple functions.
For internal use only; no backwards-compatibility guarantees.
"""
from __future__ import absolute_import
from __future__ import print_function
import collections
import dis
import inspect
import pprint
import sys
import traceback
import types
from builtins import object
from builtins import zip
from functools import reduce
from apache_beam.typehints import Any
from apache_beam.typehints import typehints
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try: # Python 2
import __builtin__ as builtins
except ImportError: # Python 3
import builtins
# pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports
class TypeInferenceError(ValueError):
"""Error to raise when type inference failed."""
pass
def instance_to_type(o):
"""Given a Python object o, return the corresponding type hint.
"""
t = type(o)
if o is None:
return type(None)
elif t not in typehints.DISALLOWED_PRIMITIVE_TYPES:
# pylint: disable=deprecated-types-field
if sys.version_info[0] == 2 and t == types.InstanceType:
return o.__class__
if t == BoundMethod:
return types.MethodType
return t
elif t == tuple:
return typehints.Tuple[[instance_to_type(item) for item in o]]
elif t == list:
return typehints.List[
typehints.Union[[instance_to_type(item) for item in o]]
]
elif t == set:
return typehints.Set[
typehints.Union[[instance_to_type(item) for item in o]]
]
elif t == dict:
return typehints.Dict[
typehints.Union[[instance_to_type(k) for k, v in o.items()]],
typehints.Union[[instance_to_type(v) for k, v in o.items()]],
]
else:
raise TypeInferenceError('Unknown forbidden type: %s' % t)
def union_list(xs, ys):
assert len(xs) == len(ys)
return [union(x, y) for x, y in zip(xs, ys)]
class Const(object):
def __init__(self, value):
self.value = value
self.type = instance_to_type(value)
def __eq__(self, other):
return isinstance(other, Const) and self.value == other.value
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash(self.value)
def __repr__(self):
return 'Const[%s]' % str(self.value)[:100]
@staticmethod
def unwrap(x):
if isinstance(x, Const):
return x.type
return x
@staticmethod
def unwrap_all(xs):
return [Const.unwrap(x) for x in xs]
class FrameState(object):
"""Stores the state of the frame at a particular point of execution.
"""
def __init__(self, f, local_vars=None, stack=()):
self.f = f
self.co = f.__code__
self.vars = list(local_vars)
self.stack = list(stack)
def __eq__(self, other):
return isinstance(other, FrameState) and self.__dict__ == other.__dict__
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
def copy(self):
return FrameState(self.f, self.vars, self.stack)
def const_type(self, i):
return Const(self.co.co_consts[i])
def get_closure(self, i):
num_cellvars = len(self.co.co_cellvars)
if i < num_cellvars:
return self.vars[i]
else:
return self.f.__closure__[i - num_cellvars].cell_contents
def closure_type(self, i):
"""Returns a TypeConstraint or Const."""
val = self.get_closure(i)
if isinstance(val, typehints.TypeConstraint):
return val
else:
return Const(val)
def get_global(self, i):
name = self.get_name(i)
if name in self.f.__globals__:
return Const(self.f.__globals__[name])
if name in builtins.__dict__:
return Const(builtins.__dict__[name])
return Any
def get_name(self, i):
return self.co.co_names[i]
def __repr__(self):
return 'Stack: %s Vars: %s' % (self.stack, self.vars)
def __or__(self, other):
if self is None:
return other.copy()
elif other is None:
return self.copy()
return FrameState(self.f, union_list(self.vars, other.vars), union_list(
self.stack, other.stack))
def __ror__(self, left):
return self | left
def union(a, b):
"""Returns the union of two types or Const values.
"""
if a == b:
return a
elif not a:
return b
elif not b:
return a
a = Const.unwrap(a)
b = Const.unwrap(b)
# TODO(robertwb): Work this into the Union code in a more generic way.
if type(a) == type(b) and element_type(a) == typehints.Union[()]:
return b
elif type(a) == type(b) and element_type(b) == typehints.Union[()]:
return a
return typehints.Union[a, b]
def finalize_hints(type_hint):
"""Sets type hint for empty data structures to Any."""
def visitor(tc, unused_arg):
if isinstance(tc, typehints.DictConstraint):
empty_union = typehints.Union[()]
if tc.key_type == empty_union:
tc.key_type = Any
if tc.value_type == empty_union:
tc.value_type = Any
if isinstance(type_hint, typehints.TypeConstraint):
type_hint.visit(visitor, None)
def element_type(hint):
"""Returns the element type of a composite type.
"""
hint = Const.unwrap(hint)
if isinstance(hint, typehints.SequenceTypeConstraint):
return hint.inner_type
elif isinstance(hint, typehints.TupleHint.TupleConstraint):
return typehints.Union[hint.tuple_types]
return Any
def key_value_types(kv_type):
"""Returns the key and value type of a KV type.
"""
# TODO(robertwb): Unions of tuples, etc.
# TODO(robertwb): Assert?
if (isinstance(kv_type, typehints.TupleHint.TupleConstraint)
and len(kv_type.tuple_types) == 2):
return kv_type.tuple_types
return Any, Any
known_return_types = {len: int, hash: int,}
class BoundMethod(object):
"""Used to create a bound method when we only know the type of the instance.
"""
def __init__(self, func, type):
"""Instantiates a bound method object.
Args:
func (types.FunctionType): The method's underlying function
type (type): The class of the method.
"""
self.func = func
self.type = type
def hashable(c):
try:
hash(c)
return True
except TypeError:
return False
def infer_return_type(c, input_types, debug=False, depth=5):
"""Analyses a callable to deduce its return type.
Args:
c: A Python callable to infer the return type of.
input_types: A sequence of inputs corresponding to the input types.
debug: Whether to print verbose debugging information.
depth: Maximum inspection depth during type inference.
Returns:
A TypeConstraint that that the return value of this function will (likely)
satisfy given the specified inputs.
"""
try:
if hashable(c) and c in known_return_types:
return known_return_types[c]
elif isinstance(c, types.FunctionType):
return infer_return_type_func(c, input_types, debug, depth)
elif isinstance(c, types.MethodType):
if c.__self__ is not None:
input_types = [Const(c.__self__)] + input_types
return infer_return_type_func(c.__func__, input_types, debug, depth)
elif isinstance(c, BoundMethod):
input_types = [c.type] + input_types
return infer_return_type_func(c.func, input_types, debug, depth)
elif inspect.isclass(c):
if c in typehints.DISALLOWED_PRIMITIVE_TYPES:
return {
list: typehints.List[Any],
set: typehints.Set[Any],
tuple: typehints.Tuple[Any, ...],
dict: typehints.Dict[Any, Any]
}[c]
return c
else:
return Any
except TypeInferenceError:
if debug:
traceback.print_exc()
return Any
except Exception:
if debug:
sys.stdout.flush()
raise
else:
return Any
def infer_return_type_func(f, input_types, debug=False, depth=0):
"""Analyses a function to deduce its return type.
Args:
f: A Python function object to infer the return type of.
input_types: A sequence of inputs corresponding to the input types.
debug: Whether to print verbose debugging information.
depth: Maximum inspection depth during type inference.
Returns:
A TypeConstraint that that the return value of this function will (likely)
satisfy given the specified inputs.
Raises:
TypeInferenceError: if no type can be inferred.
"""
if debug:
print()
print(f, id(f), input_types)
dis.dis(f)
from . import opcodes
simple_ops = dict((k.upper(), v) for k, v in opcodes.__dict__.items())
co = f.__code__
code = co.co_code
end = len(code)
pc = 0
extended_arg = 0 # Python 2 only.
free = None
yields = set()
returns = set()
# TODO(robertwb): Default args via inspect module.
local_vars = list(input_types) + [typehints.Union[()]] * (len(co.co_varnames)
- len(input_types))
state = FrameState(f, local_vars)
states = collections.defaultdict(lambda: None)
jumps = collections.defaultdict(int)
# In Python 3, use dis library functions to disassemble bytecode and handle
# EXTENDED_ARGs.
is_py3 = sys.version_info[0] == 3
if is_py3:
ofs_table = {} # offset -> instruction
for instruction in dis.get_instructions(f):
ofs_table[instruction.offset] = instruction
# Python 2 - 3.5: 1 byte opcode + optional 2 byte arg (1 or 3 bytes).
# Python 3.6+: 1 byte opcode + 1 byte arg (2 bytes, arg may be ignored).
if sys.version_info >= (3, 6):
inst_size = 2
opt_arg_size = 0
else:
inst_size = 1
opt_arg_size = 2
last_pc = -1
while pc < end: # pylint: disable=too-many-nested-blocks
start = pc
if is_py3:
instruction = ofs_table[pc]
op = instruction.opcode
else:
op = ord(code[pc])
if debug:
print('-->' if pc == last_pc else ' ', end=' ')
print(repr(pc).rjust(4), end=' ')
print(dis.opname[op].ljust(20), end=' ')
pc += inst_size
if op >= dis.HAVE_ARGUMENT:
if is_py3:
arg = instruction.arg
else:
arg = ord(code[pc]) + ord(code[pc + 1]) * 256 + extended_arg
extended_arg = 0
pc += opt_arg_size
if op == dis.EXTENDED_ARG:
extended_arg = arg * 65536
if debug:
print(str(arg).rjust(5), end=' ')
if op in dis.hasconst:
print('(' + repr(co.co_consts[arg]) + ')', end=' ')
elif op in dis.hasname:
print('(' + co.co_names[arg] + ')', end=' ')
elif op in dis.hasjrel:
print('(to ' + repr(pc + arg) + ')', end=' ')
elif op in dis.haslocal:
print('(' + co.co_varnames[arg] + ')', end=' ')
elif op in dis.hascompare:
print('(' + dis.cmp_op[arg] + ')', end=' ')
elif op in dis.hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
print('(' + free[arg] + ')', end=' ')
# Actually emulate the op.
if state is None and states[start] is None:
# No control reaches here (yet).
if debug:
print()
continue
state |= states[start]
opname = dis.opname[op]
jmp = jmp_state = None
if opname.startswith('CALL_FUNCTION'):
if sys.version_info < (3, 6):
# Each keyword takes up two arguments on the stack (name and value).
standard_args = (arg & 0xFF) + 2 * (arg >> 8)
var_args = 'VAR' in opname
kw_args = 'KW' in opname
pop_count = standard_args + var_args + kw_args + 1
if depth <= 0:
return_type = Any
elif arg >> 8:
# TODO(robertwb): Handle this case.
return_type = Any
elif isinstance(state.stack[-pop_count], Const):
# TODO(robertwb): Handle this better.
if var_args or kw_args:
state.stack[-1] = Any
state.stack[-var_args - kw_args] = Any
return_type = infer_return_type(state.stack[-pop_count].value,
state.stack[1 - pop_count:],
debug=debug,
depth=depth - 1)
else:
return_type = Any
state.stack[-pop_count:] = [return_type]
else: # Python 3.6+
if opname == 'CALL_FUNCTION':
pop_count = arg + 1
if depth <= 0:
return_type = Any
else:
return_type = infer_return_type(state.stack[-pop_count].value,
state.stack[1 - pop_count:],
debug=debug,
depth=depth - 1)
elif opname == 'CALL_FUNCTION_KW':
# TODO(udim): Handle keyword arguments. Requires passing them by name
# to infer_return_type.
pop_count = arg + 2
return_type = Any
elif opname == 'CALL_FUNCTION_EX':
# stack[-has_kwargs]: Map of keyword args.
# stack[-1 - has_kwargs]: Iterable of positional args.
# stack[-2 - has_kwargs]: Function to call.
has_kwargs = arg & 1 # type: int
pop_count = has_kwargs + 2
if has_kwargs:
# TODO(udim): Unimplemented. Requires same functionality as a
# CALL_FUNCTION_KW implementation.
return_type = Any
else:
args = state.stack[-1]
_callable = state.stack[-2]
if isinstance(args, typehints.ListConstraint):
# Case where there's a single var_arg argument.
args = [args]
elif isinstance(args, typehints.TupleConstraint):
args = list(args._inner_types())
return_type = infer_return_type(_callable.value,
args,
debug=debug,
depth=depth - 1)
else:
raise TypeInferenceError('unable to handle %s' % opname)
state.stack[-pop_count:] = [return_type]
elif opname == 'CALL_METHOD':
pop_count = 1 + arg
# LOAD_METHOD will return a non-Const (Any) if loading from an Any.
if isinstance(state.stack[-pop_count], Const) and depth > 0:
return_type = infer_return_type(state.stack[-pop_count].value,
state.stack[1 - pop_count:],
debug=debug,
depth=depth - 1)
else:
return_type = typehints.Any
state.stack[-pop_count:] = [return_type]
elif opname in simple_ops:
if debug:
print("Executing simple op " + opname)
simple_ops[opname](state, arg)
elif opname == 'RETURN_VALUE':
returns.add(state.stack[-1])
state = None
elif opname == 'YIELD_VALUE':
yields.add(state.stack[-1])
elif opname == 'JUMP_FORWARD':
jmp = pc + arg
jmp_state = state
state = None
elif opname == 'JUMP_ABSOLUTE':
jmp = arg
jmp_state = state
state = None
elif opname in ('POP_JUMP_IF_TRUE', 'POP_JUMP_IF_FALSE'):
state.stack.pop()
jmp = arg
jmp_state = state.copy()
elif opname in ('JUMP_IF_TRUE_OR_POP', 'JUMP_IF_FALSE_OR_POP'):
jmp = arg
jmp_state = state.copy()
state.stack.pop()
elif opname == 'FOR_ITER':
jmp = pc + arg
jmp_state = state.copy()
jmp_state.stack.pop()
state.stack.append(element_type(state.stack[-1]))
else:
raise TypeInferenceError('unable to handle %s' % opname)
if jmp is not None:
# TODO(robertwb): Is this guaranteed to converge?
new_state = states[jmp] | jmp_state
if jmp < pc and new_state != states[jmp] and jumps[pc] < 5:
jumps[pc] += 1
pc = jmp
states[jmp] = new_state
if debug:
print()
print(state)
pprint.pprint(dict(item for item in states.items() if item[1]))
if yields:
result = typehints.Iterable[reduce(union, Const.unwrap_all(yields))]
else:
result = reduce(union, Const.unwrap_all(returns))
finalize_hints(result)
if debug:
print(f, id(f), input_types, '->', result)
return result
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# $Id$
# =============================================================================
## @file eos.py
# (rather ugly) ``Globbing'' utility for EOS
# - it goes through creation of fuse mount point
# - it require proper installation of EOS&Co
#
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2015-08-15
#
# =============================================================================
""" (rather ugly) ``Globbing'' utility for EOS
- it goes through creation of fuse mount point
- it require proper EOS&Co installation
"""
# =============================================================================
__version__ = "$Revision$"
__author__ = "Vanya BELYAEV Ivan.Belyaev@itep.ru"
__date__ = "2015-08-15"
__all__ = ( 'EOS',)
# =============================================================================
## @class EOS
# (rather ugly) ``Globbing'' utility for EOS
# @code
# pattern = '/lhcb/grid/user/lhcb/user/i/ibelyaev/29/2014_03/72347/*/*EW*.mdst'
# with EOS() as eos :
# for i in eos.iglob ( pattern ) : print i
# for i in eos.iglob ( '/eos' + pattern ) : print i
# for i in eos.iglob ( 'root://eoslhcb//eos' + pattern ) : print i
# for i in eos.iglob ( 'root://eoslhcb.cern.ch//eos' + pattern ) : print i
# @endcode
# The matched name can be ``rootified'' to be directly used in ROOT.TFile.Open
# @code
# pattern = '/lhcb/grid/user/lhcb/user/i/ibelyaev/29/2014_03/72347/*/*EW*.mdst'
# with EOS() as eos :
# for i in eos.iglob ( pattern , root = True ) : print i
# @endcode
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2015-08-15
class EOS(object):
"""(rather ugly) ``Globbing'' utility for EOS
>>> pattern = '/lhcb/grid/user/lhcb/user/i/ibelyaev/29/2014_03/72347/*/*EW*.mdst'
>>> with EOS() as eos :
... for i in eos.iglob ( pattern ) : print i
... for i in eos.iglob ( '/eos' + pattern ) : print i
... for i in eos.iglob ( 'root://eoslhcb//eos' + pattern ) : print i
... for i in eos.iglob ( 'root://eoslhcb.cern.ch//eos' + pattern ) : print i
The matched name can be ``rootified//to be directly used in ROOT.TFile.Open
>>> pattern = '/lhcb/grid/user/lhcb/user/i/ibelyaev/29/2014_03/72347/*/*EW*.mdst'
>>> with EOS() as eos :
... for i in eos.iglob ( pattern , root = True ) : print i
"""
def __init__ (
self ,
eosmount = 'eosmount' ,
eosumount = 'eosumount' ,
setup = '/afs/cern.ch/project/eos/installation/client/etc/setup' ) :
from subprocess import Popen , PIPE
## check aliases/functions,
def _check_ ( command ) :
try :
p = Popen( command , stderr = PIPE , stdout = PIPE , shell = True )
out,err = p.communicate()
for l in err.splitlines() : return False
return 0 == p.returncode
except OSError :
return False
def _alias_ ( command ) :
p = Popen( [ '-i' , '-c' , 'alias -p'],
stderr = PIPE , stdout = PIPE , executable = '/bin/bash' )
out,err = p.communicate()
for l in err.splitlines() : return False
if 0 != p.returncode : return False
cmd = 'alias %s=' % command.strip()
for line in out.splitlines() :
p = line.find( cmd )
if 0 != p : continue
return line[len(cmd):-1].strip('"').strip("'")
return False
def _read_setup_ ( command , setupfile ) :
import os
if not os.path.exists ( setupfile ) : return None
cmd = 'alias %s=' % command
for line in open( setupfile, 'r') :
p = line.find( cmd )
if 0 != p : continue
return line[len(cmd):-1].strip('"').strip("'")
return None
def _getcmd_ ( command ) :
## try to check it as command
if _check_ ( command ) : return command
## try to check as alias
cmd = _alias_ ( command )
if cmd : return cmd
## try to read the configuration file
cmd = _read_setup_ ( command , setup + '.sh' )
if not cmd : raise OSError ( "Can't get correct command for '%s'" % command )
return cmd
eosmount = eosmount if _check_ ( eosmount ) else _getcmd_ ( 'eosmount' )
eosumount = eosmount if _check_ ( eosumount ) else _getcmd_ ( 'eosumount' )
self.cmd_start = """
%s %%s
ls -al %%s/lhcb
""" % eosmount
self.cmd_exit = """
ls -al %%s/lhcb
%s %%s
""" % eosumount
## context manager: create EOS mount point
def __enter__ ( self ) :
#
## create temporary directory
#
import tempfile
self.tmpdir = tempfile.mkdtemp()
#
## build proper commands:
#
self.cmd_start = self.cmd_start % ( self.tmpdir , self.tmpdir )
self.cmd_exit = self.cmd_exit % ( self.tmpdir , self.tmpdir )
self.ok = False
from subprocess import Popen , PIPE
p = Popen ( self.cmd_start , stderr = PIPE, stdout = PIPE, shell = True )
out,err = p.communicate()
for l in err.splitlines() :
if 0<= l.find('failed') :
raise OSError ( 'Unable to create EOS mount point, check eosmount/umount')
if 0 != p.returncode :
raise OSError ( 'Unable to create EOS mount point, check eosmount/umount (%s)' % p.returncode )
## everything fine?
self.ok = True
return self
## context manager: destroy EOS mount point
def __exit__ ( self , *_ ) :
if self.ok :
from subprocess import Popen , PIPE
p = Popen ( self.cmd_exit , stderr = PIPE, stdout = PIPE , shell = True )
out,err = p.communicate()
self.ok = not err and 0 == p.returncode
import os
os.rmdir ( self.tmpdir)
# =========================================================================
## ``globbing'' iterator
# @code
# with EOS() as eos :
# ... for i in eos.iglob( pattern ) : print i
# @endcode
def iglob ( self , pattern , root = True ) :
"""
``globbing iterator''
>>> with EOS() as eos :
... for i in eos.iglob( pattern ) : print i
"""
## default format:
if not root : fmt = '%s'
else : fmt = 'root://eoslhcb.cern.ch//eos%s' ## ROOT
#
prefix = '/eos/'
if 0 == pattern.find ( prefix ) :
pattern = pattern [len(prefix)-1:]
if not root : fmt = '%s%%s' % prefix [:-1]
else : fmt = 'root://eoslhcb.cern.ch//eos%s' ## ROOT
prefix = 'root://eoslhcb//eos/'
if 0 == pattern.find ( prefix ) :
pattern = pattern [len(prefix)-1:]
if not root : fmt = '%s%%s' % prefix [:-1]
else : fmt = 'root://eoslhcb.cern.ch//eos%s' ## ROOT
prefix = 'root://eoslhcb.cern.ch//eos/'
if 0 == pattern.find ( prefix ) :
pattern = pattern [len(prefix)-1:]
if not root : fmt = '%s%%s' % prefix [:-1]
else : fmt = 'root://eoslhcb.cern.ch//eos%s' ## ROOT
import glob
l = len(self.tmpdir)
for i in glob.iglob ( self.tmpdir + pattern ) :
yield fmt % i[l:]
# =========================================================================
## Get list of matched names via ``globbing''
# @code
# with EOS() as eos :
# ... files = eos.glob( pattern )
# @endcode
def glob ( self , pattern , root = True ) :
"""Get list of matched names via ``globbing''
>>> with EOS() as eos :
... files = eos.glob( pattern )
"""
return [ i for i in self.iglob ( self.tmpdir + pattern , root = root ) ]
# =============================================================================
if '__main__' == __name__ :
from ostap.logger.logger import getLogger
logger = getLogger ( 'ostap.utils.eos' )
from ostap import banner
logger.info ( __file__ + '\n' + banner )
logger.info ( 80*'*' )
logger.info ( __doc__ )
logger.info ( 80*'*' )
logger.info ( ' Author : %s' % __author__ )
logger.info ( ' Version : %s' % __version__ )
logger.info ( ' Date : %s' % __date__ )
logger.info ( ' Symbols : %s' % list ( __all__ ) )
logger.info ( 80*'*' )
import sys
args = sys.argv[1:]
if not args :
args = [ '/lhcb/grid/user/lhcb/user/i/ibelyaev/29/2014_03/72347/*/*EW*.mdst' ]
files = []
try :
with EOS() as eos :
for pattern in args :
for f in eos.iglob( pattern ) :
logger.info ( f )
except OSError :
logger.error( "EOS can't be accessed" )
logger.info ( 80*'*' )
# =============================================================================
# logging
# =============================================================================
| |
# -*- coding: UTF-8 -*-
# Copyright 2009-2018 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""
Defines Lino's **Python serializer and deserializer**. See
:doc:`Specification </specs/dpy>`.
"""
from __future__ import unicode_literals
from __future__ import print_function
# from future import standard_library
# standard_library.install_aliases()
from builtins import str
from builtins import object
import six
import logging
logger = logging.getLogger(__name__)
from pkg_resources import parse_version as PV
#from io import StringIO
import os
#from os.path import dirname
import imp
#from decimal import Decimal
from unipath import Path
# from lino import AFTER17
from django.conf import settings
from django.db import models
from django.utils import translation
from django.utils.module_loading import import_string
from django.utils.encoding import force_str
#from django.db import IntegrityError
from django.core.serializers import base
from django.core.exceptions import ValidationError
#from django.core.exceptions import ObjectDoesNotExist
#from lino.utils.mldbc.fields import BabelCharField, BabelTextField
#from lino.core.choicelists import ChoiceListField
from lino.core.utils import obj2str, full_model_name
SUFFIX = '.py'
def create_mti_child(parent_model, pk, child_model, **kw):
"""Similar to :func:`lino.utils.mti.insert_child`, but for usage in
Python dumps (generated by :manage:`dump2py`).
The difference is very tricky. The return value here is an
"almost normal" model instance, whose `save` and `full_clean`
methods have been hacked. These are the only methods that will be
called by :class:`Deserializer`. You should not use this instance
for anything else and throw it away when the save() has been
called.
"""
parent_link_field = child_model._meta.parents.get(parent_model, None)
if parent_link_field is None:
raise ValidationError("A %s cannot be parent for a %s" % (
parent_model.__name__, child_model.__name__))
pfields = {}
for f in parent_model._meta.fields:
if f.name in kw:
pfields[f.name] = kw.pop(f.name)
kw[parent_link_field.name + "_id"] = pk
# if ignored:
# raise Exception(
# "create_mti_child() %s %s from %s : "
# "ignored non-local fields %s" % (
# child_model.__name__,
# pk,
# parent_model.__name__,
# ignored))
child_obj = child_model(**kw)
if len(pfields):
parent_obj = parent_model.objects.get(pk=pk)
for k, v in pfields.items():
setattr(parent_obj, k, v)
parent_obj.full_clean()
parent_obj.save()
def full_clean(*args, **kw):
pass
def save(*args, **kw):
kw.update(raw=True, force_insert=True)
child_obj.save_base(**kw)
child_obj.save = save
child_obj.full_clean = full_clean
return child_obj
SUPPORT_EMPTY_FIXTURES = False # trying, but doesn't yet work
if SUPPORT_EMPTY_FIXTURES:
from django_site.utils import AttrDict
class DummyDeserializedObject(base.DeserializedObject):
class FakeObject(object):
_meta = AttrDict(db_table='')
object = FakeObject()
def __init__(self):
pass
def save(self, *args, **kw):
pass
class FakeDeserializedObject(base.DeserializedObject):
"""Imitates DeserializedObject required by loaddata.
Unlike normal DeserializedObject, we *don't want* to bypass
pre_save and validation methods on the individual objects.
"""
def __init__(self, deserializer, object, **kw):
super(FakeDeserializedObject,self).__init__(object, deserializer, **kw)
self.object = object
# self.name = name
self.deserializer = deserializer
def save(self, *args, **kw):
"""
"""
# print 'dpy.py',self.object
# logger.info("Loading %s...",self.name)
self.try_save(*args, **kw)
# if self.try_save(*args,**kw):
# self.deserializer.saved += 1
# else:
# self.deserializer.save_later.append(self)
def try_save(self, *args, **kw):
"""Try to save the specified Model instance `obj`. Return `True`
on success, `False` if this instance wasn't saved and should be
deferred.
"""
obj = self.object
try:
"""
"""
m = getattr(obj, 'before_dumpy_save', None)
if m is not None:
m(self.deserializer)
if not self.deserializer.quick:
try:
obj.full_clean()
except ValidationError as e:
# raise Exception("{0} : {1}".format(obj2str(obj), e))
raise # Exception("{0} : {1}".format(obj2str(obj), e))
obj.save(*args, **kw)
logger.debug("%s has been saved" % obj2str(obj))
self.deserializer.register_success()
return True
# except ValidationError,e:
# except ObjectDoesNotExist,e:
# except (ValidationError,ObjectDoesNotExist), e:
# except (ValidationError,ObjectDoesNotExist,IntegrityError), e:
except Exception as e:
if True:
if not settings.SITE.loading_from_dump:
# hand-written fixtures are expected to yield in savable
# order
logger.warning("Failed to save %s from manual fixture:" % obj2str(obj))
raise
deps = [f.remote_field.model for f in obj._meta.fields
if f.remote_field and f.remote_field.model]
if not deps:
logger.exception(e)
raise Exception(
"Failed to save independent %s." % obj2str(obj))
self.deserializer.register_failure(self, e)
return False
# except Exception,e:
# logger.exception(e)
# raise Exception("Failed to save %s. Abandoned." % obj2str(obj))
class Serializer(base.Serializer):
"""Serializes a QuerySet to a py stream.
Usage: ``manage.py dumpdata --format py``
DEPRECATED. The problem with this approach is that a serializer
creates -by definition- one single file. And Python needs
-understandably- to load a module completely into memory before it
can be executed. Use :manage:`dump2py` instead.
"""
internal_use_only = False
def serialize(self, queryset, **options):
raise NotImplementedError("Don't use dumpdata but `dump2py`")
class FlushDeferredObjects(object):
"""
Indicator class object.
Fixture may yield a `FlushDeferredObjects`
to indicate that all deferred objects should get saved before going on.
"""
pass
class LoaderBase(object):
quick = False
source_version = None
max_deferred_objects = 1000
def __init__(self):
# logger.info("20120225 DpyLoader.__init__()")
self.save_later = {}
self.reported_tracebacks = set()
self.saved = 0
self.count_objects = 0
self.AFTER_LOAD_HANDLERS = []
# populated by Migrator.after_load(), but remains empty in a DpyDeserializer
self.before_load_handlers = []
def flush_deferred_objects(self):
"""
Flush the list of deferred objects.
"""
while self.saved and self.save_later:
try_again = []
for msg_objlist in list(self.save_later.values()):
for objlist in list(msg_objlist.values()):
try_again += objlist
logger.info("Trying to save %d deferred objects.",
len(try_again))
self.save_later = {}
self.saved = 0
for obj in try_again:
obj.try_save() # ,*args,**kw):
logger.info("Saved %d objects.", self.saved)
def expand(self, obj):
if obj is None:
pass # ignore None values
elif obj is FlushDeferredObjects:
self.flush_deferred_objects()
elif isinstance(obj, models.Model):
yield FakeDeserializedObject(self, obj)
elif hasattr(obj, '__iter__'):
# if type(obj) is GeneratorType:
# logger.info("20120225 expand iterable %r",obj)
for o in obj:
for so in self.expand(o):
yield so
# elif isinstance(obj,MtiChildWrapper):
# the return value of create_mti_child()
# yield FakeDeserializedObject(self,obj)
# obj.deserializer = self
# yield obj
else:
logger.warning("Ignored unknown object %r", obj)
def register_success(self):
self.saved += 1
self.count_objects += 1
def register_failure(self, obj, e):
msg = force_str(e)
d = self.save_later.setdefault(obj.object.__class__, {})
l = d.setdefault(msg, [])
count = len(l)
if count == 0:
logger.info("Deferred %s : %s", obj2str(obj.object), msg)
elif count > self.max_deferred_objects:
self.flush_deferred_objects()
if count > self.max_deferred_objects + 1:
raise Exception(
"More than {} deferred objects".format(
self.max_deferred_objects))
l.append(obj)
# report a full traceback, but only once per model and
# exception type:
k = (obj.object.__class__, e.__class__)
if k not in self.reported_tracebacks:
logger.exception(e)
self.reported_tracebacks.add(k)
def initialize(self):
"""To be called after initdb and before starting to load the dumped
data."""
for h in self.before_load_handlers:
logger.info("Running before_load handler %s", h.__doc__)
h(self)
def finalize(self):
"""
"""
self.flush_deferred_objects()
if len(self.AFTER_LOAD_HANDLERS):
logger.info(
"Finalize %d after_load handlers",
len(self.AFTER_LOAD_HANDLERS))
for h in self.AFTER_LOAD_HANDLERS:
logger.info("Running after_load handler %s", h.__doc__)
h(self)
# logger.info("Loaded %d objects", self.count_objects)
if self.save_later:
count = 0
s = ''
for model, msg_objects in list(self.save_later.items()):
for msg, objects in list(msg_objects.items()):
if False: # detailed content of the first object
s += "\n- %s %s (%d object(s), e.g. %s)" % (
full_model_name(model), msg, len(objects),
obj2str(objects[0].object, force_detailed=True))
else: # pk of all objects
s += "\n- %s %s (%d object(s) with primary key %s)" % (
full_model_name(model), msg, len(objects),
', '.join([str(o.object.pk) for o in objects]))
count += len(objects)
msg = "Abandoning with {} unsaved instances:{}"
logger.warning(msg.format(count, s))
# Don't raise an exception. The unsaved instances got lost and
# the loaddata should be done again, but meanwhile the database
# is not necessarily invalid and may be used for further testing.
# And anyway, loaddata would catch it and still continue.
# raise Exception(msg)
settings.SITE.loading_from_dump = False
# reset to False because the same SITE might get reused by
# Django test runner for other test cases.
class DpyLoader(LoaderBase):
"""Instantiated by :xfile:`restore.py`.
"""
def __init__(self, globals_dict, quick=None):
if quick is not None:
self.quick = quick
self.globals_dict = globals_dict
super(DpyLoader, self).__init__()
self.source_version = globals_dict['SOURCE_VERSION']
site = globals_dict['settings'].SITE
site.startup()
site.install_migrations(self)
def save(self, obj):
for o in self.expand(obj):
o.try_save()
class DpyDeserializer(LoaderBase):
"""The Django deserializer for :ref:`dpy`.
Note that this deserializer explicitly ignores fixtures whose
source file is located in the current directory because i the case
of `.py` files this can lead to side effects when importing them.
See e.g. :ticket:`1029`. We consider it an odd behaviour of
Django to search for fixtures also in the current directory (and
not, as `documented
<https://docs.djangoproject.com/en/1.11/howto/initial-data/#where-django-finds-fixture-files>`__,
in the `fixtures` subdirs of plugins and the optional
:setting:`FIXTURE_DIRS`).
"""
def deserialize(self, fp, **options):
# logger.info("20120225 DpyLoader.deserialize()")
if isinstance(fp, six.string_types):
raise NotImplementedError
# ignore fixtures in current directory.
p1 = Path(fp.name).parent.absolute().resolve()
p2 = Path(os.getcwd()).absolute().resolve()
if p1 == p2:
return
translation.activate(settings.SITE.get_default_language())
# self.count += 1
fqname = 'lino.dpy_tmp_%s' % abs(hash(fp.name))
if False:
parts = fp.name.split(os.sep)
# parts = os.path.split(fp.name)
print(parts)
# fqname = parts[-1]
fqname = '.'.join([p for p in parts if ':' not in p])
assert fqname.endswith(SUFFIX)
fqname = fqname[:-len(SUFFIX)]
print(fqname)
desc = (SUFFIX, 'r', imp.PY_SOURCE)
# logger.info("20160817 %s...", options)
logger.info("Loading data from %s", fp.name)
module = imp.load_module(fqname, fp, fp.name, desc)
# module = __import__(filename)
for o in self.deserialize_module(module, **options):
yield o
def deserialize_module(self, module, **options):
self.initialize()
empty_fixture = True
objects = getattr(module, 'objects', None)
if objects is None:
logger.info("Fixture %s has no attribute 'objects'" %
module.__name__)
else:
for obj in objects():
for o in self.expand(obj):
empty_fixture = False
yield o
# # Since Django 1.7 no longer considers empty fixtures as an
# # error, we don't need to use our trick of yielding the
# # SiteConfig instance. That trick sometimes could cause side
# # effects.
# if empty_fixture and not AFTER17:
# if SUPPORT_EMPTY_FIXTURES:
# # avoid Django interpreting empty fixtures as an error
# yield DummyDeserializedObject()
# else:
# # To avoid Django interpreting empty fixtures as an
# # error, we yield one object which always exists: the
# # SiteConfig instance.
# # Oops, that will fail in lino_welfare if the company
# # pointed to by SiteConfig.job_office had been
# # deferred.
# if settings.SITE.site_config:
# yield FakeDeserializedObject(
# self, settings.SITE.site_config)
# else:
# raise Exception("""\
# Fixture %s decided to not create any object.
# We're sorry, but Django doesn't like that.
# See <https://code.djangoproject.com/ticket/18213>.
# """ % module.__name__)
# logger.info("Saved %d instances from %s.",self.saved,fp.name)
self.finalize()
def Deserializer(fp, **options):
"""The Deserializer used when ``manage.py loaddata`` encounters a
`.py` fixture.
"""
d = DpyDeserializer()
return d.deserialize(fp, **options)
class Migrator(object):
"""The SITE's Migrator class is instantiated by `install_migrations`.
If :attr:`migration_class<lino.core.site.Site.migration_class>` is
`None` (the default), then this class will be
instantiated. Applications may define their own Migrator class
which should be a subclasss of this.
"""
def __init__(self, site, loader):
self.site = site
self.loader = loader
def after_load(self, todo):
"""Declare a function to be called after all data has been loaded."""
assert callable(todo)
# al = self.globals_dict['AFTER_LOAD_HANDLERS']
self.loader.AFTER_LOAD_HANDLERS.append(todo)
def before_load(self, todo):
"""Declare a function to be called before loading dumped data."""
assert callable(todo)
self.loader.before_load_handlers.append(todo)
def install_migrations(self, loader):
"""
Install "migrators" into the given global namespace.
Python dumps are generated with one line near the end of their
:xfile:`restore.py` file which calls this method, passing it their
global namespace::
settings.SITE.install_migrations(globals())
A dumped fixture should always call this, even if there is no
version change and no data migration, because this also does
certain other things:
- set :attr:`loading_from_dump
<lino.core.site.Site.loading_from_dump>` to `True`
- remove any Permission and Site objects that might have been
generated by `post_syncdb` signal if these apps are installed.
"""
globals_dict = loader.globals_dict
self.loading_from_dump = True
# if self.is_installed('auth'):
# from django.contrib.auth.models import Permission
# Permission.objects.all().delete()
if self.is_installed('sites'):
from django.contrib.sites.models import Site
Site.objects.all().delete()
current_version = self.version
if current_version is None:
logger.info("Unversioned Site instance : no database migration")
return
if globals_dict['SOURCE_VERSION'] == current_version:
logger.info("Source version is %s : no migration needed",
current_version)
return
if self.migration_class is not None:
mc = import_string(self.migration_class)
migrator = mc(self, loader)
else:
migrator = self
while True:
from_version = globals_dict['SOURCE_VERSION']
funcname = 'migrate_from_' + from_version.replace('.', '_')
m = getattr(migrator, funcname, None)
if m is not None:
# logger.info("Found %s()", funcname)
to_version = m(globals_dict)
if not isinstance(to_version, six.string_types):
raise Exception("Oops, %s didn't return a string!" % m)
if PV(to_version) <= PV(from_version):
raise Exception(
"Oops, %s tries to migrate from version %s to %s ?!" %
(m, from_version, to_version))
msg = "Migrating from version %s to %s" % (
from_version, to_version)
if m.__doc__:
msg += ":\n" + m.__doc__
logger.info(msg)
globals_dict['SOURCE_VERSION'] = to_version
else:
if from_version != current_version:
logger.warning(
"No method for migrating from version %s to %s",
from_version, current_version)
break
def unused_load_fixture_from_module(m, **options):
"""No longer used in unit tests to manually load a given fixture
module.
"""
# filename = m.__file__[:-1]
# print filename
# assert filename.endswith('.py')
# fp = open(filename)
d = DpyDeserializer()
for o in d.deserialize_module(m, **options):
o.save()
# 20140506 Don't remember why the following was. But it disturbed
# in Lino `/tutorials/tables/index`.
# if d.saved != 1:
# logger.info("20140506 Loaded %d objects", d.saved)
# raise Exception("Failed to load Python fixture from module %s" %
# m.__name__)
# return d
# from functools import wraps
def override(globals_dict):
"""A decorator to be applied when redefining, in a
:meth:`migrate_from_VERSION` method, one of the
:func:`create_APP_MODEL` functions defined in the
:xfile:`restore.py` file of a dump.
"""
def override_decorator(func):
if func.__name__ not in globals_dict:
raise Exception("Cannot override {}".format(func))
globals_dict[func.__name__] = func
# @wraps(func)
# def wrapper(name):
# if func.__name__ not in globals_dict:
# raise Exception("Cannot override {}".format(func))
# globals_dict[func.__name__] = func
# return wrapper
return override_decorator
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import StringIO
from oslo.config import cfg
from nova.virt.libvirt import utils as libvirt_utils
CONF = cfg.CONF
CONF.import_opt('instances_path', 'nova.compute.manager')
files = {'console.log': True}
disk_sizes = {}
disk_backing_files = {}
disk_type = "qcow2"
def get_iscsi_initiator():
return "fake.initiator.iqn"
def get_fc_hbas():
return [{'ClassDevice': 'host1',
'ClassDevicePath': '/sys/devices/pci0000:00/0000:00:03.0'
'/0000:05:00.2/host1/fc_host/host1',
'dev_loss_tmo': '30',
'fabric_name': '0x1000000533f55566',
'issue_lip': '<store method only>',
'max_npiv_vports': '255',
'maxframe_size': '2048 bytes',
'node_name': '0x200010604b019419',
'npiv_vports_inuse': '0',
'port_id': '0x680409',
'port_name': '0x100010604b019419',
'port_state': 'Online',
'port_type': 'NPort (fabric via point-to-point)',
'speed': '10 Gbit',
'supported_classes': 'Class 3',
'supported_speeds': '10 Gbit',
'symbolic_name': 'Emulex 554M FV4.0.493.0 DV8.3.27',
'tgtid_bind_type': 'wwpn (World Wide Port Name)',
'uevent': None,
'vport_create': '<store method only>',
'vport_delete': '<store method only>'}]
def get_fc_hbas_info():
hbas = get_fc_hbas()
info = [{'port_name': hbas[0]['port_name'].replace('0x', ''),
'node_name': hbas[0]['node_name'].replace('0x', ''),
'host_device': hbas[0]['ClassDevice'],
'device_path': hbas[0]['ClassDevicePath']}]
return info
def get_fc_wwpns():
hbas = get_fc_hbas()
wwpns = []
for hba in hbas:
wwpn = hba['port_name'].replace('0x', '')
wwpns.append(wwpn)
return wwpns
def get_fc_wwnns():
hbas = get_fc_hbas()
wwnns = []
for hba in hbas:
wwnn = hba['node_name'].replace('0x', '')
wwnns.append(wwnn)
return wwnns
def create_image(disk_format, path, size):
pass
def create_cow_image(backing_file, path):
pass
def get_disk_backing_file(path):
return disk_backing_files.get(path, None)
def get_disk_type(path):
return disk_type
def copy_image(src, dest):
pass
def resize2fs(path):
pass
def create_lvm_image(vg, lv, size, sparse=False):
pass
def import_rbd_image(path, *args):
pass
def volume_group_free_space(vg):
pass
def remove_logical_volumes(*paths):
pass
def write_to_file(path, contents, umask=None):
pass
def chown(path, owner):
pass
def extract_snapshot(disk_path, source_fmt, out_path, dest_fmt):
files[out_path] = ''
class File(object):
def __init__(self, path, mode=None):
if path in files:
self.fp = StringIO.StringIO(files[path])
else:
self.fp = StringIO.StringIO(files[os.path.split(path)[-1]])
def __enter__(self):
return self.fp
def __exit__(self, *args):
return
def close(self, *args, **kwargs):
self.fp.close()
def file_open(path, mode=None):
return File(path, mode)
def find_disk(virt_dom):
return "filename"
def load_file(path):
if os.path.exists(path):
with open(path, 'r') as fp:
return fp.read()
else:
return ''
def logical_volume_info(path):
return {}
def file_delete(path):
return True
def get_fs_info(path):
return {'total': 128 * (1024 ** 3),
'used': 44 * (1024 ** 3),
'free': 84 * (1024 ** 3)}
def fetch_image(context, target, image_id, user_id, project_id, max_size=0):
pass
def get_instance_path(instance, forceold=False, relative=False):
return libvirt_utils.get_instance_path(instance, forceold=forceold,
relative=relative)
def pick_disk_driver_name(hypervisor_version, is_block_dev=False):
return "qemu"
def list_rbd_volumes(pool):
fake_volumes = ['fakeinstancename.local', 'fakeinstancename.swap',
'fakeinstancename', 'wronginstancename']
return fake_volumes
def remove_rbd_volumes(pool, *names):
pass
| |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import math
import time
import unittest
import numpy as np
import faiss
from faiss.contrib import datasets
class EvalIVFPQAccuracy(unittest.TestCase):
def get_dataset(self, small_one=False):
if not small_one:
d = 128
nb = 100000
nt = 15000
nq = 2000
else:
d = 32
nb = 10000
nt = 1000
nq = 200
np.random.seed(123)
# generate points in a low-dim subspace to make the resutls
# look better :-)
d1 = 16
q, r = np.linalg.qr(np.random.randn(d, d))
qc = q[:d1, :]
def make_mat(n):
return np.dot(
np.random.random(size=(nb, d1)), qc).astype('float32')
return (make_mat(nt), make_mat(nb), make_mat(nq))
def test_mm(self):
# trouble with MKL+fbmake that appears only at runtime. Check it here
x = np.random.random(size=(100, 20)).astype('float32')
mat = faiss.PCAMatrix(20, 10)
mat.train(x)
mat.apply_py(x)
def do_cpu_to_gpu(self, index_key):
ts = []
ts.append(time.time())
(xt, xb, xq) = self.get_dataset(small_one=True)
nb, d = xb.shape
index = faiss.index_factory(d, index_key)
if index.__class__ == faiss.IndexIVFPQ:
# speed up test
index.pq.cp.niter = 2
index.do_polysemous_training = False
ts.append(time.time())
index.train(xt)
ts.append(time.time())
# adding some ids because there was a bug in this case;
# those need to be cast to idx_t(= int64_t), because
# on windows the numpy int default is int32
ids = (np.arange(nb) * 3 + 12345).astype('int64')
index.add_with_ids(xb, ids)
ts.append(time.time())
index.nprobe = 4
Dref, Iref = index.search(xq, 10)
ts.append(time.time())
res = faiss.StandardGpuResources()
gpu_index = faiss.index_cpu_to_gpu(res, 0, index)
ts.append(time.time())
# Validate the layout of the memory info
mem_info = res.getMemoryInfo()
assert type(mem_info) == dict
assert type(mem_info[0]['FlatData']) == tuple
assert type(mem_info[0]['FlatData'][0]) == int
assert type(mem_info[0]['FlatData'][1]) == int
gpu_index.setNumProbes(4)
Dnew, Inew = gpu_index.search(xq, 10)
ts.append(time.time())
print('times:', [t - ts[0] for t in ts])
# Give us some margin of error
self.assertGreaterEqual((Iref == Inew).sum(), Iref.size - 50)
if faiss.get_num_gpus() == 1:
return
for shard in False, True:
# test on just 2 GPUs
res = [faiss.StandardGpuResources() for i in range(2)]
co = faiss.GpuMultipleClonerOptions()
co.shard = shard
gpu_index = faiss.index_cpu_to_gpu_multiple_py(res, index, co)
faiss.GpuParameterSpace().set_index_parameter(
gpu_index, 'nprobe', 4)
Dnew, Inew = gpu_index.search(xq, 10)
# 0.99: allow some tolerance in results otherwise test
# fails occasionally (not reproducible)
self.assertGreaterEqual((Iref == Inew).sum(), Iref.size * 0.99)
def test_cpu_to_gpu_IVFPQ(self):
self.do_cpu_to_gpu('IVF128,PQ4')
def test_cpu_to_gpu_IVFFlat(self):
self.do_cpu_to_gpu('IVF128,Flat')
def test_set_gpu_param(self):
index = faiss.index_factory(12, "PCAR8,IVF10,PQ4")
res = faiss.StandardGpuResources()
gpu_index = faiss.index_cpu_to_gpu(res, 0, index)
faiss.GpuParameterSpace().set_index_parameter(gpu_index, "nprobe", 3)
class TestShardedFlat(unittest.TestCase):
@unittest.skipIf(faiss.get_num_gpus() < 2, "Relevant for multiple GPU only.")
def test_sharded(self):
d = 32
nb = 1000
nq = 200
k = 10
rs = np.random.RandomState(123)
xb = rs.rand(nb, d).astype('float32')
xq = rs.rand(nq, d).astype('float32')
index_cpu = faiss.IndexFlatL2(d)
assert faiss.get_num_gpus() > 1
co = faiss.GpuMultipleClonerOptions()
co.shard = True
index = faiss.index_cpu_to_all_gpus(index_cpu, co, ngpu=2)
index.add(xb)
D, I = index.search(xq, k)
index_cpu.add(xb)
D_ref, I_ref = index_cpu.search(xq, k)
assert np.all(I == I_ref)
del index
index2 = faiss.index_cpu_to_all_gpus(index_cpu, co, ngpu=2)
D2, I2 = index2.search(xq, k)
assert np.all(I2 == I_ref)
try:
index2.add(xb)
except RuntimeError:
pass
else:
assert False, "this call should fail!"
class TestInterleavedIVFPQLayout(unittest.TestCase):
def test_interleaved(self):
res = faiss.StandardGpuResources()
for bits_per_code in [4, 5, 6, 8]:
d = 128
nb = 10000
nq = 20
rs = np.random.RandomState(123)
xb = rs.rand(nb, d).astype('float32')
xq = rs.rand(nq, d).astype('float32')
nlist = int(math.sqrt(nb))
sub_q = 16
nprobe = 16
config = faiss.GpuIndexIVFPQConfig()
config.interleavedLayout = True
idx_gpu = faiss.GpuIndexIVFPQ(res, d, nlist, sub_q, bits_per_code, faiss.METRIC_L2, config)
q = faiss.IndexFlatL2(d)
idx_cpu = faiss.IndexIVFPQ(q, d, nlist, sub_q, bits_per_code, faiss.METRIC_L2)
idx_gpu.train(xb)
idx_gpu.add(xb)
idx_gpu.copyTo(idx_cpu)
idx_gpu.nprobe = nprobe
idx_cpu.nprobe = nprobe
k = 20
# Try without precomputed codes
d_g, i_g = idx_gpu.search(xq, k)
d_c, i_c = idx_cpu.search(xq, k)
self.assertGreaterEqual((i_g == i_c).sum(), i_g.size * 0.9)
self.assertTrue(np.allclose(d_g, d_c, rtol=5e-5, atol=5e-5))
# Try with precomputed codes (different kernel)
idx_gpu.setPrecomputedCodes(True)
d_g, i_g = idx_gpu.search(xq, k)
d_c, i_c = idx_cpu.search(xq, k)
self.assertGreaterEqual((i_g == i_c).sum(), i_g.size * 0.9)
self.assertTrue(np.allclose(d_g, d_c, rtol=5e-5, atol=5e-5))
def test_copy_to_cpu(self):
res = faiss.StandardGpuResources()
for bits_per_code in [4, 5, 6, 8]:
d = 128
nb = 10000
nq = 20
rs = np.random.RandomState(234)
xb = rs.rand(nb, d).astype('float32')
xq = rs.rand(nq, d).astype('float32')
nlist = int(math.sqrt(nb))
sub_q = 16
bits_per_code = 8
nprobe = 4
config = faiss.GpuIndexIVFPQConfig()
config.interleavedLayout = True
idx_gpu = faiss.GpuIndexIVFPQ(res, d, nlist, sub_q, bits_per_code, faiss.METRIC_L2, config)
q = faiss.IndexFlatL2(d)
idx_cpu = faiss.IndexIVFPQ(q, d, nlist, sub_q, bits_per_code, faiss.METRIC_L2)
idx_gpu.train(xb)
idx_gpu.add(xb)
idx_gpu.copyTo(idx_cpu)
idx_gpu.nprobe = nprobe
idx_cpu.nprobe = nprobe
# Try without precomputed codes
d_g, i_g = idx_gpu.search(xq, 10)
d_c, i_c = idx_cpu.search(xq, 10)
self.assertGreaterEqual((i_g == i_c).sum(), i_g.size * 0.9)
self.assertTrue(np.allclose(d_g, d_c))
# Try with precomputed codes (different kernel)
idx_gpu.setPrecomputedCodes(True)
d_g, i_g = idx_gpu.search(xq, 10)
d_c, i_c = idx_cpu.search(xq, 10)
self.assertGreaterEqual((i_g == i_c).sum(), i_g.size * 0.9)
self.assertTrue(np.allclose(d_g, d_c))
def test_copy_to_gpu(self):
res = faiss.StandardGpuResources()
for bits_per_code in [4, 5, 6, 8]:
d = 128
nb = 10000
nq = 20
rs = np.random.RandomState(567)
xb = rs.rand(nb, d).astype('float32')
xq = rs.rand(nq, d).astype('float32')
nlist = int(math.sqrt(nb))
sub_q = 16
bits_per_code = 8
nprobe = 4
config = faiss.GpuIndexIVFPQConfig()
config.interleavedLayout = True
idx_gpu = faiss.GpuIndexIVFPQ(res, d, nlist, sub_q, bits_per_code, faiss.METRIC_L2, config)
q = faiss.IndexFlatL2(d)
idx_cpu = faiss.IndexIVFPQ(q, d, nlist, sub_q, bits_per_code, faiss.METRIC_L2)
idx_cpu.train(xb)
idx_cpu.add(xb)
idx_gpu.copyFrom(idx_cpu)
idx_gpu.nprobe = nprobe
idx_cpu.nprobe = nprobe
# Try without precomputed codes
d_g, i_g = idx_gpu.search(xq, 10)
d_c, i_c = idx_cpu.search(xq, 10)
self.assertGreaterEqual((i_g == i_c).sum(), i_g.size * 0.9)
self.assertTrue(np.allclose(d_g, d_c))
# Try with precomputed codes (different kernel)
idx_gpu.setPrecomputedCodes(True)
d_g, i_g = idx_gpu.search(xq, 10)
d_c, i_c = idx_cpu.search(xq, 10)
self.assertGreaterEqual((i_g == i_c).sum(), i_g.size * 0.9)
self.assertTrue(np.allclose(d_g, d_c))
# Make sure indices are properly stored in the IVF lists
class TestIVFIndices(unittest.TestCase):
def test_indices_ivfflat(self):
res = faiss.StandardGpuResources()
d = 128
nb = 5000
nlist = 10
rs = np.random.RandomState(567)
xb = rs.rand(nb, d).astype('float32')
xb_indices_base = np.arange(nb, dtype=np.int64)
# Force values to not be representable in int32
xb_indices = (xb_indices_base + 4294967296).astype('int64')
config = faiss.GpuIndexIVFFlatConfig()
idx = faiss.GpuIndexIVFFlat(res, d, nlist, faiss.METRIC_L2, config)
idx.train(xb)
idx.add_with_ids(xb, xb_indices)
_, I = idx.search(xb[10:20], 5)
self.assertTrue(np.array_equal(xb_indices[10:20], I[:, 0]))
# Store values using 32-bit indices instead
config.indicesOptions = faiss.INDICES_32_BIT
idx = faiss.GpuIndexIVFFlat(res, d, nlist, faiss.METRIC_L2, config)
idx.train(xb)
idx.add_with_ids(xb, xb_indices)
_, I = idx.search(xb[10:20], 5)
# This will strip the high bit
self.assertTrue(np.array_equal(xb_indices_base[10:20], I[:, 0]))
def test_indices_ivfpq(self):
res = faiss.StandardGpuResources()
d = 128
nb = 5000
nlist = 10
M = 4
nbits = 8
rs = np.random.RandomState(567)
xb = rs.rand(nb, d).astype('float32')
xb_indices_base = np.arange(nb, dtype=np.int64)
# Force values to not be representable in int32
xb_indices = (xb_indices_base + 4294967296).astype('int64')
config = faiss.GpuIndexIVFPQConfig()
idx = faiss.GpuIndexIVFPQ(res, d, nlist, M, nbits,
faiss.METRIC_L2, config)
idx.train(xb)
idx.add_with_ids(xb, xb_indices)
_, I = idx.search(xb[10:20], 5)
self.assertTrue(np.array_equal(xb_indices[10:20], I[:, 0]))
# Store values using 32-bit indices instead
config.indicesOptions = faiss.INDICES_32_BIT
idx = faiss.GpuIndexIVFPQ(res, d, nlist, M, nbits,
faiss.METRIC_L2, config)
idx.train(xb)
idx.add_with_ids(xb, xb_indices)
_, I = idx.search(xb[10:20], 5)
# This will strip the high bit
self.assertTrue(np.array_equal(xb_indices_base[10:20], I[:, 0]))
def test_indices_ivfsq(self):
res = faiss.StandardGpuResources()
d = 128
nb = 5000
nlist = 10
qtype = faiss.ScalarQuantizer.QT_4bit
rs = np.random.RandomState(567)
xb = rs.rand(nb, d).astype('float32')
xb_indices_base = np.arange(nb, dtype=np.int64)
# Force values to not be representable in int32
xb_indices = (xb_indices_base + 4294967296).astype('int64')
config = faiss.GpuIndexIVFScalarQuantizerConfig()
idx = faiss.GpuIndexIVFScalarQuantizer(res, d, nlist, qtype,
faiss.METRIC_L2, True, config)
idx.train(xb)
idx.add_with_ids(xb, xb_indices)
_, I = idx.search(xb[10:20], 5)
self.assertTrue(np.array_equal(xb_indices[10:20], I[:, 0]))
# Store values using 32-bit indices instead
config.indicesOptions = faiss.INDICES_32_BIT
idx = faiss.GpuIndexIVFScalarQuantizer(res, d, nlist, qtype,
faiss.METRIC_L2, True, config)
idx.train(xb)
idx.add_with_ids(xb, xb_indices)
_, I = idx.search(xb[10:20], 5)
# This will strip the high bit
self.assertTrue(np.array_equal(xb_indices_base[10:20], I[:, 0]))
class TestSQ_to_gpu(unittest.TestCase):
def test_sq_cpu_to_gpu(self):
res = faiss.StandardGpuResources()
index = faiss.index_factory(32, "SQfp16")
index.add(np.random.rand(1000, 32).astype(np.float32))
gpu_index = faiss.index_cpu_to_gpu(res, 0, index)
self.assertIsInstance(gpu_index, faiss.GpuIndexFlat)
class TestInvalidParams(unittest.TestCase):
def test_indices_ivfpq(self):
res = faiss.StandardGpuResources()
d = 128
nb = 5000
nlist = 10
M = 4
nbits = 8
rs = np.random.RandomState(567)
xb = rs.rand(nb, d).astype('float32')
xb_indices_base = np.arange(nb, dtype=np.int64)
# Force values to not be representable in int32
xb_indices = (xb_indices_base + 4294967296).astype('int64')
config = faiss.GpuIndexIVFPQConfig()
idx = faiss.GpuIndexIVFPQ(res, d, nlist, M, nbits,
faiss.METRIC_L2, config)
idx.train(xb)
idx.add_with_ids(xb, xb_indices)
# invalid k (should be > 0)
k = -5
idx.setNumProbes(3)
self.assertRaises(AssertionError, idx.search, xb[10:20], k)
# invalid nprobe (should be > 0)
self.assertRaises(RuntimeError, idx.setNumProbes, 0)
self.assertRaises(RuntimeError, idx.setNumProbes, -3)
k = 5
idx.nprobe = -3
self.assertRaises(RuntimeError, idx.search, xb[10:20], k)
# valid params
k = 5
idx.setNumProbes(3)
_, I = idx.search(xb[10:20], k)
self.assertTrue(np.array_equal(xb_indices[10:20], I[:, 0]))
class TestLSQIcmEncoder(unittest.TestCase):
@staticmethod
def eval_codec(q, xb):
codes = q.compute_codes(xb)
decoded = q.decode(codes)
return ((xb - decoded) ** 2).sum()
def subtest_gpu_encoding(self, ngpus):
"""check that the error is in the same as cpu."""
ds = datasets.SyntheticDataset(32, 1000, 1000, 0)
xt = ds.get_train()
xb = ds.get_database()
M = 4
nbits = 8
lsq = faiss.LocalSearchQuantizer(ds.d, M, nbits)
lsq.train(xt)
err_cpu = self.eval_codec(lsq, xb)
lsq = faiss.LocalSearchQuantizer(ds.d, M, nbits)
lsq.train(xt)
lsq.icm_encoder_factory = faiss.GpuIcmEncoderFactory(ngpus)
err_gpu = self.eval_codec(lsq, xb)
# 13804.411 vs 13814.794, 1 gpu
print(err_gpu, err_cpu)
self.assertLess(err_gpu, err_cpu * 1.05)
def test_one_gpu(self):
self.subtest_gpu_encoding(1)
def test_multiple_gpu(self):
ngpu = faiss.get_num_gpus()
self.subtest_gpu_encoding(ngpu)
if __name__ == '__main__':
unittest.main()
| |
"""
Copyright (c) 2016, Marcelo Leal
Description: Simple Azure Media Services Rest Python library
License: MIT (see LICENSE.txt file for details)
"""
# amsrest.py - azurerm functions for the AMS Rest Interface
import urllib
import requests
from .restfns import do_auth, do_get, do_post, do_put, do_delete, do_patch, do_sto_put, do_get_url
from .settings import ams_rest_endpoint, ams_auth_endpoint
# get_access_token(accountname, accountkey)
# get access token with ams
def get_access_token(accountname, accountkey):
accountkey_encoded = urllib.parse.quote(accountkey, safe='')
body = "grant_type=client_credentials&client_id=" + accountname + \
"&client_secret=" + accountkey_encoded + " &scope=urn%3aWindowsAzureMediaServices"
return do_auth(ams_auth_endpoint, body)
# get_url(access_token)
# get an specific url
def get_url(access_token, endpoint=ams_rest_endpoint, flag=True):
return do_get_url(endpoint, access_token, flag)
# list_media_asset(access_token, oid="")
# list a media asset(s)
def list_media_asset(access_token, oid=""):
path = '/Assets'
return helper_list(access_token, oid, path)
# list_content_keys(access_token, oid="")
# list the content key(s)
def list_content_key(access_token, oid=""):
path = '/ContentKeys'
return helper_list(access_token, oid, path)
# list_contentkey_authorization_policy(access_token, oid="")
# list content key authorization policy(ies)
def list_contentkey_authorization_policy(access_token, oid=""):
path = '/ContentKeyAuthorizationPolicies'
return helper_list(access_token, oid, path)
# list_contentkey_authorization_policy_options(access_token, oid="")
# list content key authorization policy options
def list_contentkey_authorization_policy_options(access_token, oid=""):
path = '/ContentKeyAuthorizationPolicyOptions'
return helper_list(access_token, oid, path)
# list_media_processor(access_token, oid="")
# list the media processor(s)
def list_media_processor(access_token, oid=""):
path = '/MediaProcessors'
return helper_list(access_token, oid, path)
# list_asset_accesspolicy(access_token, oid="")
# list a asset access policy(ies)
def list_asset_accesspolicy(access_token, oid=""):
path = '/AccessPolicies'
return helper_list(access_token, oid, path)
# list_sas_locator(access_token, oid="")
# list a sas locator(s)
def list_sas_locator(access_token, oid=""):
path = '/Locators'
return helper_list(access_token, oid, path)
# list_media_job(access_token, oid="")
# list a media job(s)
def list_media_job(access_token, oid=""):
path = '/Jobs'
return helper_list(access_token, oid, path)
# list_asset_delivery_policy(access_token, oid="")
# list an asset delivery policy(ies)
def list_asset_delivery_policy(access_token, oid=""):
path = '/AssetDeliveryPolicies'
return helper_list(access_token, oid, path)
# list_streaming_endpoint(access_token, oid="")
# list streaming endpoint(s)
def list_streaming_endpoint(access_token, oid=""):
path = '/StreamingEndpoints'
return helper_list(access_token, oid, path)
# delete_streaming_endpoint(access_token, oid)
# delete a streaming endpoint
def delete_streaming_endpoint(access_token, oid):
path = '/StreamingEndpoints'
return helper_delete(access_token, oid, path)
# delete_asset_delivery_policy(access_token, oid)
# delete a asset delivery policy
def delete_asset_delivery_policy(access_token, oid):
path = '/AssetDeliveryPolicies'
return helper_delete(access_token, oid, path)
# delete_asset_accesspolicy(access_token, oid)
# delete a asset access policy
def delete_asset_accesspolicy(access_token, oid):
path = '/AccessPolicies'
return helper_delete(access_token, oid, path)
# delete_sas_locator(access_token, oid)
# delete a sas locator
def delete_sas_locator(access_token, oid):
path = '/Locators'
return helper_delete(access_token, oid, path)
# delete_content_key(access_token, oid)
# delete a content key
def delete_content_key(access_token, oid):
path = '/ContentKeys'
return helper_delete(access_token, oid, path)
# delete_contentkey_authorization_policy(access_token, oid)
# delete a content key authorization policy
def delete_contentkey_authorization_policy(access_token, oid):
path = '/ContentKeyAuthorizationPolicies'
return helper_delete(access_token, oid, path)
# delete_contentkey_authorization_policy_options(access_token, oid)
# delete content key authorization policy options
def delete_contentkey_authorization_policy_options(access_token, oid):
path = '/ContentKeyAuthorizationPolicyOptions'
return helper_delete(access_token, oid, path)
# delete_media_asset(access_token, oid)
# delete a media asset
def delete_media_asset(access_token, oid):
path = '/Assets'
return helper_delete(access_token, oid, path)
# create_media_asset(access_token, name, options="0")
# create a media asset
def create_media_asset(access_token, name, options="0"):
path = '/Assets'
endpoint = ''.join([ams_rest_endpoint, path])
body = '{"Name": "' + name + '", "Options": "' + str(options) + '"}'
return do_post(endpoint, path, body, access_token)
# create_media_assetfile(access_token, parent_asset_id, name, is_primary="false", is_encrypted="false", encryption_scheme="None", encryptionkey_id="None")
# create a media assetfile
def create_media_assetfile(access_token, parent_asset_id, name, is_primary="false", is_encrypted="false", encryption_scheme="None", encryptionkey_id="None"):
path = '/Files'
endpoint = ''.join([ams_rest_endpoint, path])
if (encryption_scheme == "StorageEncryption"):
body = '{ \
"IsEncrypted": "' + is_encrypted + '", \
"EncryptionScheme": "' + encryption_scheme + '", \
"EncryptionVersion": "' + "1.0" + '", \
"EncryptionKeyId": "' + encryptionkey_id + '", \
"IsPrimary": "' + is_primary + '", \
"MimeType": "video/mp4", \
"Name": "' + name + '", \
"ParentAssetId": "' + parent_asset_id + '" \
}'
else:
body = '{ \
"IsPrimary": "' + is_primary + '", \
"MimeType": "video/mp4", \
"Name": "' + name + '", \
"ParentAssetId": "' + parent_asset_id + '" \
}'
return do_post(endpoint, path, body, access_token)
# create_sas_locator(access_token, asset_id, accesspolicy_id)
# create a sas locator
def create_sas_locator(access_token, asset_id, accesspolicy_id):
path = '/Locators'
endpoint = ''.join([ams_rest_endpoint, path])
#body = '{"AccessPolicyId":"' + accesspolicy_id + '", "AssetId":"' + asset_id + '", "StartTime":"' + starttime + '", "Type":1 }'
body = '{ \
"AccessPolicyId":"' + accesspolicy_id + '", \
"AssetId":"' + asset_id + '", \
"Type":1 \
}'
return do_post(endpoint, path, body, access_token)
# create_asset_delivery_policy(access_token, asset_id, accesspolicy_id)
# create an asset delivery policy
def create_asset_delivery_policy(access_token, ams_account):
path = '/AssetDeliveryPolicies'
endpoint = ''.join([ams_rest_endpoint, path])
body = '{ \
"Name":"AssetDeliveryPolicy", \
"AssetDeliveryProtocol":"4", \
"AssetDeliveryPolicyType":"3", \
"AssetDeliveryConfiguration":"[{ \
\\"Key\\":\\"2\\", \
\\"Value\\":\\"https://' + ams_account + '.keydelivery.mediaservices.windows.net/\\"}]" \
}'
return do_post(endpoint, path, body, access_token)
# create_media_task(access_token, processor_id, asset_id, content)
# create a media task
def create_media_task(access_token, processor_id, asset_id, content):
path = '/Tasks'
endpoint = ''.join([ams_rest_endpoint, path])
body = content
return do_post(endpoint, path, body, access_token)
# create_media_job(access_token, processor_id, asset_id, content)
# create a media job
def create_media_job(access_token, processor_id, asset_id, content):
path = '/Jobs'
endpoint = ''.join([ams_rest_endpoint, path])
body = content
return do_post(endpoint, path, body, access_token)
# create_contentkey_authorization_policy(access_token, processor_id, asset_id, content)
# create content key authorization policy
def create_contentkey_authorization_policy(access_token, content):
path = '/ContentKeyAuthorizationPolicies'
endpoint = ''.join([ams_rest_endpoint, path])
body = content
return do_post(endpoint, path, body, access_token)
# create_contentkey_authorization_policy_options(access_token, processor_id, asset_id, content)
# create content key authorization policy options
def create_contentkey_authorization_policy_options(access_token, key_delivery_type="2", name="HLS Open Authorization Policy", key_restriction_type="0"):
path = '/ContentKeyAuthorizationPolicyOptions'
endpoint = ''.join([ams_rest_endpoint, path])
body = '{ \
"Name":"policy",\
"KeyDeliveryType":2, \
"KeyDeliveryConfiguration":"", \
"Restrictions":[{ \
"Name":"' + name + '", \
"KeyRestrictionType":0, \
"Requirements":null \
}] \
}'
return do_post(endpoint, path, body, access_token, "json_only")
# create_ondemand_streaming_locator(access_token, encoded_asset_id, asset_id, pid, starttime="None")
# create an ondemand streaming locator
def create_ondemand_streaming_locator(access_token, encoded_asset_id, pid, starttime=None):
path = '/Locators'
endpoint = ''.join([ams_rest_endpoint, path])
if(starttime == None):
body = '{ \
"AccessPolicyId":"' + pid + '", \
"AssetId":"' + encoded_asset_id + '", \
"Type": "2" \
}'
else:
body = '{ \
"AccessPolicyId":"' + pid + '", \
"AssetId":"' + encoded_asset_id + '", \
"StartTime":"' + str(starttime) + '", \
"Type": "2" \
}'
return do_post(endpoint, path, body, access_token, "json_only")
# create_asset_accesspolicy(access_token, duration)
# create an asset access policy
def create_asset_accesspolicy(access_token, name, duration, permission="1"):
path = '/AccessPolicies'
endpoint = ''.join([ams_rest_endpoint, path])
body = '{ \
"Name": "' + str(name) + '", \
"DurationInMinutes": "' + duration + '", \
"Permissions": "' + permission + '" \
}'
return do_post(endpoint, path, body, access_token)
# create_streaming_endpoint(access_token, name, options="0")
# create a streaming endpoint
def create_streaming_endpoint(access_token, name, description="New Streaming Endpoint", scale_units="1"):
path = '/StreamingEndpoints'
endpoint = ''.join([ams_rest_endpoint, path])
body = '{ \
"Id":null, \
"Name":"' + name + '", \
"Description":"' + description + '", \
"Created":"0001-01-01T00:00:00", \
"LastModified":"0001-01-01T00:00:00", \
"State":null, \
"HostName":null, \
"ScaleUnits":"' + scale_units + '", \
"CrossSiteAccessPolicies":{ \
"ClientAccessPolicy":"<access-policy><cross-domain-access><policy><allow-from http-request-headers=\\"*\\"><domain uri=\\"http://*\\" /></allow-from><grant-to><resource path=\\"/\\" include-subpaths=\\"false\\" /></grant-to></policy></cross-domain-access></access-policy>", \
"CrossDomainPolicy":"<?xml version=\\"1.0\\"?><!DOCTYPE cross-domain-policy SYSTEM \\"http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd\\"><cross-domain-policy><allow-access-from domain=\\"*\\" /></cross-domain-policy>" \
} \
}'
return do_post(endpoint, path, body, access_token)
# scale_streaming_endpoint(access_token, streaming_endpoint_id, scale_units)
# scale a scale unit
def scale_streaming_endpoint(access_token, streaming_endpoint_id, scale_units):
path = '/StreamingEndpoints'
full_path = ''.join([path, "('", streaming_endpoint_id, "')", "/Scale"])
full_path_encoded = urllib.parse.quote(full_path, safe='')
endpoint = ''.join([ams_rest_endpoint, full_path_encoded])
body = '{"scaleUnits": "' + str(scale_units) + '"}'
return do_post(endpoint, full_path_encoded, body, access_token)
# link_asset_content_key(access_token, asset_id, encryptionkey_id)
# link an asset with a content key
def link_asset_content_key(access_token, asset_id, encryptionkey_id, ams_redirected_rest_endpoint):
path = '/Assets'
full_path = ''.join([path, "('", asset_id, "')", "/$links/ContentKeys"])
full_path_encoded = urllib.parse.quote(full_path, safe='')
endpoint = ''.join([ams_rest_endpoint, full_path_encoded])
uri = ''.join([ams_redirected_rest_endpoint, 'ContentKeys', "('", encryptionkey_id, "')"])
body = '{"uri": "' + uri + '"}'
return do_post(endpoint, full_path_encoded, body, access_token)
# link_asset_deliver_policy(access_token, asset_id, encryptionkey_id)
# link an asset with a delivery policy
def link_asset_delivery_policy(access_token, asset_id, adp_id, ams_redirected_rest_endpoint):
path = '/Assets'
full_path = ''.join([path, "('", asset_id, "')", "/$links/DeliveryPolicies"])
full_path_encoded = urllib.parse.quote(full_path, safe='')
endpoint = ''.join([ams_rest_endpoint, full_path_encoded])
uri = ''.join([ams_redirected_rest_endpoint, 'AssetDeliveryPolicies', "('", adp_id, "')"])
body = '{"uri": "' + uri + '"}'
return do_post(endpoint, full_path_encoded, body, access_token)
# link_contentkey_authorization_policy(access_token, ckap_id, options_id, encryptionkey_id)
# link content key aurhorization policy with options
def link_contentkey_authorization_policy(access_token, ckap_id, options_id, ams_redirected_rest_endpoint):
path = '/ContentKeyAuthorizationPolicies'
full_path = ''.join([path, "('", ckap_id, "')", "/$links/Options"])
full_path_encoded = urllib.parse.quote(full_path, safe='')
endpoint = ''.join([ams_rest_endpoint, full_path_encoded])
uri = ''.join([ams_redirected_rest_endpoint, 'ContentKeyAuthorizationPolicyOptions', "('", options_id, "')"])
body = '{"uri": "' + uri + '"}'
return do_post(endpoint, full_path_encoded, body, access_token, "json_only", "1.0;NetFx")
# add_authorization_policy(access_token, oid)
# add a authorization policy
def add_authorization_policy(access_token, ck_id, oid):
path = '/ContentKeys'
body = '{"AuthorizationPolicyId":"' + oid + '"}'
return helper_add(access_token, ck_id, path, body)
# update_media_assetfile(access_token, parent_asset_id, asset_id, content_length, name)
# update a media assetfile
def update_media_assetfile(access_token, parent_asset_id, asset_id, content_length, name):
path = '/Files'
full_path = ''.join([path, "('", asset_id, "')"])
full_path_encoded = urllib.parse.quote(full_path, safe='')
endpoint = ''.join([ams_rest_endpoint, full_path_encoded])
body = '{ \
"ContentFileSize": "' + str(content_length) + '", \
"Id": "' + asset_id + '", \
"MimeType": "video/mp4", \
"Name": "' + name + '", \
"ParentAssetId": "' + parent_asset_id + '" \
}'
return do_patch(endpoint, full_path_encoded, body, access_token)
# get_delivery_url(access_token, ck_id, key_type)
# get a delivery url
def get_delivery_url(access_token, ck_id, key_type):
path = '/ContentKeys'
full_path = ''.join([path, "('", ck_id, "')", "/GetKeyDeliveryUrl"])
endpoint = ''.join([ams_rest_endpoint, full_path])
body = '{"keyDeliveryType": "' + key_type + '"}'
return do_post(endpoint, full_path, body, access_token)
# encode_mezzanine_asset(access_token, processor_id, asset_id, output_assetname, json_profile)
# encode a mezzanine asset
def encode_mezzanine_asset(access_token, processor_id, asset_id, output_assetname, json_profile):
path = '/Jobs'
endpoint = ''.join([ams_rest_endpoint, path])
assets_path = ''.join(["/Assets", "('", asset_id, "')"])
assets_path_encoded = urllib.parse.quote(assets_path, safe='')
endpoint_assets = ''.join([ams_rest_endpoint, assets_path_encoded])
body = '{ \
"Name":"' + output_assetname + '", \
"InputMediaAssets":[{ \
"__metadata":{ \
"uri":"' + endpoint_assets + '" \
} \
}], \
"Tasks":[{ \
"Configuration":\'' + json_profile + '\', \
"MediaProcessorId":"' + processor_id + '", \
"TaskBody":"<?xml version=\\"1.0\\" encoding=\\"utf-16\\"?><taskBody><inputAsset>JobInputAsset(0)</inputAsset><outputAsset assetCreationOptions=\\"0\\" assetName=\\"' + output_assetname + '\\">JobOutputAsset(0)</outputAsset></taskBody>" \
}] \
}'
return do_post(endpoint, path, body, access_token)
# validate_mp4_asset(access_token, processor_id, asset_id, output_assetname)
# validate a mp4 asset
def validate_mp4_asset(access_token, processor_id, asset_id, output_assetname):
path = '/Jobs'
endpoint = ''.join([ams_rest_endpoint, path])
assets_path = ''.join(["/Assets", "('", asset_id, "')"])
assets_path_encoded = urllib.parse.quote(assets_path, safe='')
endpoint_assets = ''.join([ams_rest_endpoint, assets_path_encoded])
body = '{ \
"Name":"ValidateEncodedMP4", \
"InputMediaAssets":[{ \
"__metadata":{ \
"uri":"' + endpoint_assets + '" \
} \
}], \
"Tasks":[{ \
"Configuration":"<?xml version=\\"1.0\\" encoding=\\"utf-8\\"?><taskDefinition xmlns=\\"http://schemas.microsoft.com/iis/media/v4/TM/TaskDefinition#\\"><name>MP4 Preprocessor</name><id>859515BF-9BA3-4BDD-A3B6-400CEF07F870</id><description xml:lang=\\"en\\" /><inputFolder /><properties namespace=\\"http://schemas.microsoft.com/iis/media/V4/TM/MP4Preprocessor#\\" prefix=\\"mp4p\\"><property name=\\"SmoothRequired\\" value=\\"false\\" /><property name=\\"HLSRequired\\" value=\\"true\\" /></properties><taskCode><type>Microsoft.Web.Media.TransformManager.MP4PreProcessor.MP4Preprocessor_Task, Microsoft.Web.Media.TransformManager.MP4Preprocessor, Version=1.0.0.0, Culture=neutral, PublicKeyToken=31bf3856ad364e35</type></taskCode></taskDefinition>", \
"MediaProcessorId":"' + processor_id + '", \
"TaskBody":"<?xml version=\\"1.0\\" encoding=\\"utf-16\\"?><taskBody><inputAsset>JobInputAsset(0)</inputAsset><outputAsset assetCreationOptions=\\"0\\" assetName=\\"' + output_assetname + '\\">JobOutputAsset(0)</outputAsset></taskBody>" \
}] \
}'
return do_post(endpoint, path, body, access_token)
### Helpers...
# Generic functions not intended for "external" use...
def helper_add(access_token, ck_id, path, body):
full_path = ''.join([path, "('", ck_id, "')"])
full_path_encoded = urllib.parse.quote(full_path, safe='')
endpoint = ''.join([ams_rest_endpoint, full_path_encoded])
return do_put(endpoint, full_path_encoded, body, access_token, "json_only", "1.0;NetFx")
def helper_list(access_token, oid, path):
if(oid != ""):
path = ''.join([path, "('", oid, "')"])
endpoint = ''.join([ams_rest_endpoint, path])
return do_get(endpoint, path, access_token)
def helper_delete(access_token, oid, path):
full_path = ''.join([path, "('", oid, "')"])
full_path_encoded = urllib.parse.quote(full_path, safe='')
endpoint = ''.join([ams_rest_endpoint, full_path_encoded])
return do_delete(endpoint, full_path_encoded, access_token)
### Aux Funcions...
# These are functions that are intended for "external" use, but are not AMS REST API's...
# Translate the numeric options/encryption of the Asset
def translate_asset_options(nr):
if (nr == "0"):
return "None"
if (nr == "1"):
return "StorageEncrypted"
if (nr == "2"):
return "CommonEncryptionProtected"
if (nr == "4"):
return "EnvelopeEncryptionProtected"
# Translate the numeric state of the Jobs
def translate_job_state(nr):
if (nr == "0"):
return "Queued"
if (nr == "1"):
return "Scheduled"
if (nr == "2"):
return "Processing"
if (nr == "3"):
return "Finished"
if (nr == "4"):
return "Error"
if (nr == "5"):
return "Canceled"
if (nr == "6"):
return "Canceling"
# Get specific url
def retrieve_url_content(url):
return do_get(endpoint, path, access_token)
### Exceptions...
# These, I think, should not be here... ;-)
# upload_block_blob(access_token, endpoint, content, content_length)
# upload a block blob
def upload_block_blob(access_token, endpoint, content, content_length):
return do_sto_put(endpoint, content, content_length, access_token)
| |
#!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
MINIFI_SUBFOLDER = '/nifi/nifi-minifi-cpp/'
APACHE_CLOSER_REPO_JSON_URL = 'https://www.apache.org/dyn/closer.cgi?as_json=1&path=/nifi/nifi-minifi-cpp'
APACHE_MIRROR_LIST = "http://www.apache.org/mirrors/"
import argparse
import sys
if sys.version_info[0] < 3:
from urllib2 import urlopen
input = raw_input
else:
from urllib.request import urlopen
import json
import os.path
import platform
import tarfile
from distutils.util import strtobool
from ftplib import FTP
def install_package(package_name):
try:
import pip
if hasattr(pip, 'main'):
pipcode = pip.main(['install', package])
else:
pipcode = pip._internal.main(['install', package])
return pipcode == 0
except:
return False
distro_available = False
try:
import distro
distro_available = True
except:
distro_available = install_package("distro")
def get_distro():
if is_mac():
return ["osx", "", "darwin"]
try:
if distro_available:
return distro.linux_distribution(full_distribution_name=False)
else:
return platform.linux_distribution()
except:
return ["N/A", "N/A", "N/A"]
def is_mac():
return platform.system() == "Darwin"
def mapped_distro():
distro_info = get_distro()
distro = distro_info[0].lower()
release = distro_info[2].lower()
if any(d in distro for d in ["rhel", "red hat", "centos"]):
return "rhel", release
else:
return distro, release
def find_closest_mirror():
try:
url = urlopen(APACHE_CLOSER_REPO_JSON_URL)
data = json.loads(url.read().decode())
return data['ftp'][0]
except Exception as e:
print ("Failed to find closest mirror, please specify one!")
return ""
def get_release_and_binaries_from_ftp(host, apache_dir, version = None):
ftp = FTP(host)
ftp.login()
ftp.cwd(apache_dir + MINIFI_SUBFOLDER)
# list files with ftplib
file_list = list(filter(lambda x: any(char.isdigit() for char in x),
ftp.nlst(""))) # to filter "." and ".." - relese names contain number
file_list.sort(reverse=True)
if not version:
latest_release = file_list[0]
else:
if version not in file_list:
print("The specified version (" + version + ") doesn't exist. Please use one of the following: " + ", ".join(file_list))
exit(-1)
latest_release = version
ftp.cwd("./" + latest_release)
binaries = list(filter(lambda x: any(char.isdigit() for char in x), ftp.nlst("")))
ftp.quit()
return latest_release, binaries
def download_binary_from_ftp(host, apache_dir, release, binary):
successful_download = False
try:
ftp = FTP(host)
ftp.login()
ftp.cwd(apache_dir + MINIFI_SUBFOLDER + release)
print ("Downloading: ftp://" + host + "/" + MINIFI_SUBFOLDER + release + "/" + binary)
with open(os.path.join(os.getcwd(), binary), "wb") as targetfile:
ftp.retrbinary("RETR " + binary, targetfile.write)
successful_download = True
except:
print("Failed to download binary")
finally:
ftp.quit()
return successful_download
def main(args):
print(get_distro())
binaries = []
try:
local_repo = args.mirror if args.mirror else find_closest_mirror()
print(local_repo)
host, dir = local_repo.replace('ftp://', '').split('/', 1)
latest_release, binaries = get_release_and_binaries_from_ftp(host, dir, args.version if args.version else None)
except:
print("Failed to get binaries from Apache mirror")
return -1
matching_binaries = []
for binary in binaries:
distro, release = mapped_distro()
if release and release in binary:
matching_binaries.append(binary)
elif distro and distro in binary:
matching_binaries.append(binary)
if not matching_binaries:
print("No compatible binary found, MiNiFi needs to be compiled locally")
return 1
invalid_input = True
download = None
selected_binary = None
if len(matching_binaries) == 1:
print("A binary in Apache repo seems to match your system: " + matching_binaries[0])
while invalid_input:
try:
download = strtobool(input("Would you like to download? [y/n]"))
invalid_input = False
if download:
selected_binary = matching_binaries[0]
except:
pass
else:
print("The following binaries in Apache repo seem to match your system: ")
for i, item in enumerate(matching_binaries):
print(str(i + 1) + " - " + item)
print()
while invalid_input:
try:
user_input = input("Please select one to download (1 to " + str(
len(matching_binaries)) + ") or \"s\" to skip and compile locally\n")
user_input.lower()
if user_input == "s":
invalid_input = False
download = False
break
idx = int(user_input) - 1
if (idx < 0):
continue
selected_binary = matching_binaries[idx]
download = True
invalid_input = False
except:
pass
if not download:
return 1
if not download_binary_from_ftp(host, dir, latest_release, selected_binary):
return -1
try:
with tarfile.open(os.path.join(os.getcwd(), selected_binary), "r:gz") as tar:
tar.extractall()
except:
print("Failed to extract tar file")
return -1
print("Successfully downloaded and extracted MiNiFi")
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Download latest MiNiFi release")
parser.add_argument("-m", "--mirror", dest="mirror", help="user-specified apache mirror")
parser.add_argument("-v", "--version", dest="version", help="user-specified version to be downloaded")
args = parser.parse_args()
main(args)
| |
from __future__ import unicode_literals
from django.test import TestCase
from django.utils import six
from .models import Article, Publication
class ManyToManyTests(TestCase):
def setUp(self):
# Create a couple of Publications.
self.p1 = Publication.objects.create(id=None, title='The Python Journal')
self.p2 = Publication.objects.create(id=None, title='Science News')
self.p3 = Publication.objects.create(id=None, title='Science Weekly')
self.p4 = Publication.objects.create(title='Highlights for Children')
self.a1 = Article.objects.create(id=None, headline='Django lets you build Web apps easily')
self.a1.publications.add(self.p1)
self.a2 = Article.objects.create(id=None, headline='NASA uses Python')
self.a2.publications.add(self.p1, self.p2, self.p3, self.p4)
self.a3 = Article.objects.create(headline='NASA finds intelligent life on Earth')
self.a3.publications.add(self.p2)
self.a4 = Article.objects.create(headline='Oxygen-free diet works wonders')
self.a4.publications.add(self.p2)
def test_add(self):
# Create an Article.
a5 = Article(id=None, headline='Django lets you reate Web apps easily')
# You can't associate it with a Publication until it's been saved.
self.assertRaises(ValueError, getattr, a5, 'publications')
# Save it!
a5.save()
# Associate the Article with a Publication.
a5.publications.add(self.p1)
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: The Python Journal>'])
# Create another Article, and set it to appear in both Publications.
a6 = Article(id=None, headline='ESA uses Python')
a6.save()
a6.publications.add(self.p1, self.p2)
a6.publications.add(self.p3)
# Adding a second time is OK
a6.publications.add(self.p3)
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Adding an object of the wrong type raises TypeError
with six.assertRaisesRegex(self, TypeError, "'Publication' instance expected, got <Article.*"):
a6.publications.add(a5)
# Add a Publication directly via publications.add by using keyword arguments.
a6.publications.create(title='Highlights for Adults')
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Adults>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_reverse_add(self):
# Adding via the 'other' end of an m2m
a5 = Article(headline='NASA finds intelligent life on Mars')
a5.save()
self.p2.article_set.add(a5)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: Science News>'])
# Adding via the other end using keywords
self.p2.article_set.create(headline='Carbon-free diet works wonders')
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: Carbon-free diet works wonders>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
a6 = self.p2.article_set.all()[3]
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_related_sets(self):
# Article objects have access to their related Publication objects.
self.assertQuerysetEqual(self.a1.publications.all(),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Publication objects have access to their related Article objects.
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p1.article_set.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(Publication.objects.get(id=self.p4.id).article_set.all(),
['<Article: NASA uses Python>'])
def test_selects(self):
# We can perform kwarg queries across m2m relationships
self.assertQuerysetEqual(
Article.objects.filter(publications__id__exact=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__pk=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science"),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science").distinct(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# The count() function respects distinct() as well.
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").count(), 4)
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").distinct().count(), 3)
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id, self.p2.id]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id, self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1, self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# Excluding a related item works as you would expect, too (although the SQL
# involved is a little complex).
self.assertQuerysetEqual(Article.objects.exclude(publications=self.p2),
['<Article: Django lets you build Web apps easily>'])
def test_reverse_selects(self):
# Reverse m2m queries are supported (i.e., starting at the table that
# doesn't have a ManyToManyField).
self.assertQuerysetEqual(Publication.objects.filter(id__exact=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(pk=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__headline__startswith="NASA"),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Publication.objects.filter(article__id__exact=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article__pk=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id, self.a2.id]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id, self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1, self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_delete(self):
# If we delete a Publication, its Articles won't be able to access it.
self.p1.delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
])
self.assertQuerysetEqual(self.a1.publications.all(), [])
# If we delete an Article, its Publications won't be able to access it.
self.a2.delete()
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
def test_bulk_delete(self):
# Bulk delete some Publications - references to deleted publications should go
Publication.objects.filter(title__startswith='Science').delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
# Bulk delete some articles - references to deleted objects should go
q = Article.objects.filter(headline__startswith='Django')
self.assertQuerysetEqual(q, ['<Article: Django lets you build Web apps easily>'])
q.delete()
# After the delete, the QuerySet cache needs to be cleared,
# and the referenced objects should be gone
self.assertQuerysetEqual(q, [])
self.assertQuerysetEqual(self.p1.article_set.all(),
['<Article: NASA uses Python>'])
def test_remove(self):
# Removing publication from an article:
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.a4.publications.remove(self.p2)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And from the other end
self.p2.article_set.remove(self.a3)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a3.publications.all(), [])
def test_assign(self):
# Relation sets can be assigned. Assignment clears any existing set members
self.p2.article_set = [self.a4, self.a3]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
# An alternate to calling clear() is to assign the empty set
self.p2.article_set = []
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.a4.publications = []
self.assertQuerysetEqual(self.a4.publications.all(), [])
def test_assign_ids(self):
# Relation sets can also be set using primary key values
self.p2.article_set = [self.a4.id, self.a3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
def test_clear(self):
# Relation sets can be cleared:
self.p2.article_set.clear()
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And you can clear from the other end
self.p2.article_set.add(self.a3, self.a4)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
[
'<Publication: Science News>',
])
self.a4.publications.clear()
self.assertQuerysetEqual(self.a4.publications.all(), [])
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
| |
import cv2, sys, os, rospy, math
import numpy as np
from scipy.misc import imresize
root = os.path.dirname(os.path.abspath(__file__))
root = root+'/..'#'/number_searching'
sys.path.insert(0, root)
# print(root)
print(os.path.dirname(root))
from number_searching.grid_recognition import read_image_from_file,preprocessing_for_number_searching,filter_redundancy_boxes
from number_searching.preprocess_for_number_recognition import draw_box, region_of_interest
file_dir = None
is_debug_mode = True
draw_prompt_lights_box_color = (255,255,255)
"""
Analysis and filter contours
"""
def analysis_and_filter_contours_for_prompt_lights_searching(contours):
ratio = 2.0 / 1.0
sudokuWidth = 50
sudokuHeight = 25
angleTolerance = 6
ratioToleranceRate = 0.2
dimensionsToleranceRate = 0.4
contours_filtered = list()
rects = list()
boxes = list()
for contour in contours:
tempRect = cv2.minAreaRect(contour)
# if is_debug_mode:
# print("[Debug] tempRect:", tempRect)
width = tempRect[1][0]
height = tempRect[1][1]
if not (width > height):
# tempRect = cv2.boxPoints((tempRect[0],(tempRect[1][0],tempRect[1][1]),tempRect[2] + 90.0))
tempRect = (tempRect[0],(tempRect[1][1],tempRect[1][0]),tempRect[2] + 90.0)
width = tempRect[1][0]
height = tempRect[1][1]
if(height==0):
height = -1
ratio_cur = width / height
if (ratio_cur > (1.0-ratioToleranceRate) * ratio and \
ratio_cur < (1.0+ratioToleranceRate) * ratio and \
width > (1.0-dimensionsToleranceRate) * sudokuWidth and \
width < (1.0+dimensionsToleranceRate) * sudokuWidth and \
height > (1.0-dimensionsToleranceRate) * sudokuHeight and \
height < (1.0+dimensionsToleranceRate) * sudokuHeight and \
((tempRect[2] > -angleTolerance and tempRect[2] < angleTolerance) or \
tempRect[2] < (-180+angleTolerance) or \
tempRect[2] > (180-angleTolerance))
):
contours_filtered.append(contour)
rects.append(tempRect)
if (is_debug_mode):
tempRect_points = cv2.boxPoints(tempRect)
boxes.append(tempRect_points)
return contours_filtered, rects, boxes
"""
Pre-processing image
"""
def preprocessing_for_prompt_lights_searching(src_img):
# convert source iamge to gray scale and resize
gray = cv2.cvtColor(src_img, cv2.COLOR_BGR2GRAY)
# gray = imresize(gray, [50, 80])
# blur
# gray = cv2.medianBlur(gray,13)
blur = cv2.GaussianBlur(gray,(5,5),0)
# threshold
# ret, gray = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
gray = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \
cv2.THRESH_BINARY, 15, 3)
# ret, gray = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
# enhance outline
kernel = np.ones([3, 3], np.uint8)
gray = cv2.dilate(gray, kernel, iterations = 1)
return gray
"""
This function first classify points into groups by x distance.
Then, pick up the larget group.
"""
def prompt_light_filter_outlier_boxes_by_x_dist(contours, rects, number_boxes):
dist_list = [[rects[i]] for i in range(len(rects))]
boxes_list = [[number_boxes[i]] for i in range(len(rects))]
contours_list = [[contours[i]] for i in range(len(rects))]
x_bin_size = 10
# find near centre points for each centre point (by horizontal distance)
for rect_i in range(len(rects)):
for rect_j in range(rect_i+1,len(rects)):
rect_i_center_x = rects[rect_i][0][0]
rect_i_center_y = rects[rect_i][0][1]
rect_j_center_x = rects[rect_j][0][0]
rect_j_center_y = rects[rect_j][0][1]
dist_x = abs(rect_i_center_x - rect_j_center_x)
dist_y = abs(rect_i_center_y - rect_j_center_y)
dist_ij = dist_x**2 + dist_y**2
if dist_x < x_bin_size:
dist_list[rect_i].append(rects[rect_j])
dist_list[rect_j].append(rects[rect_i])
boxes_list[rect_i].append(number_boxes[rect_j])
boxes_list[rect_j].append(number_boxes[rect_i])
contours_list[rect_i].append(contours[rect_j])
contours_list[rect_j].append(contours[rect_i])
# get the size of each bin
dist_len_list = [0.0] * len(rects)
for i in range(len(dist_list)):
dist_len_list[i] = len(dist_list[i])
# largest bin (group) size
max_bin_size = max(dist_len_list)
good_list_index = dist_len_list.index(max(dist_len_list))
bad_box_indexs = list()
good_contours = contours_list.pop(good_list_index)
good_rects = dist_list.pop(good_list_index)
good_boxes = boxes_list.pop(good_list_index)
return good_contours, good_rects, good_boxes, bad_box_indexs
"""
This function get rid of outlier prompt light boxes
"""
def filter_outlier_boxes(contours, rects, number_boxes):
dist_list = [0.0] * len(rects)
for rect_i in range(len(rects)):
for rect_j in range(rect_i+1,len(rects)):
rect_i_center_x = rects[rect_i][0][0]
rect_i_center_y = rects[rect_i][0][1]
rect_j_center_x = rects[rect_j][0][0]
rect_j_center_y = rects[rect_j][0][1]
dist_x = abs(rect_i_center_x - rect_j_center_x)
dist_y = abs(rect_i_center_y - rect_j_center_y)
dist_ij = dist_x**2 + dist_y**2
dist_list[rect_i] += dist_ij
dist_list[rect_j] += dist_ij
bad_box_indexs = list()
good_contours = list()
good_rects = list()
good_boxes = list()
for i in range(min(5, len(rects))):
current_min_index = dist_list.index(min(dist_list))
bad_box_indexs.append(dist_list.pop(current_min_index))
good_contours.append(contours.pop(current_min_index))
good_rects.append(rects.pop(current_min_index))
good_boxes.append(number_boxes.pop(current_min_index))
return good_contours, good_rects, good_boxes, bad_box_indexs
"""
This function will extract roi after the prompt lights have been found
"""
def preprocess_for_prompt_light_identify(src_img, rects, number_boxes):
global draw_prompt_lights_box_color
number_boxes_regions_list = list()
box_index = 0
# src_img = cv2.GaussianBlur(src_img,(51,51),0)
for box in number_boxes:
# extract ROI to pick the most comment color in the box
blur = cv2.GaussianBlur(region_of_interest(src_img, box),(15,15),0)
blur = imresize(blur, [25, 50]) # resize
# simply get rid of rim
draw_prompt_lights_box_color = (int(blur[(12,25)][0]),int(blur[(12,25)][1]),int(blur[(12,25)][2]))
draw_box(src_img, box, draw_prompt_lights_box_color) # draw the rim with a most comment color in the box
# extract ROI for promt lights identify
blur = cv2.GaussianBlur(region_of_interest(src_img, box),(15,15),0)
extracted_result = imresize(blur, [25, 50]) # resize
# extracted result ready for return
number_boxes_regions_list.append(extracted_result)
# Debug
box_center = rects[box_index][0]
cv2.circle(src_img, (int(round(box_center[0])), int(round(box_center[1]))), 1, (0,0,255), 5)
# update loop variable
box_index += 1
return number_boxes_regions_list
"""
Major process of prompt lights searching
"""
def prompt_lights_searching(src_img):
processed_img = preprocessing_for_prompt_lights_searching(src_img)
# processed_img = preprocessing_for_number_searching(src_img)
# src_img = np.copy(processed_img)
im2, contours, hierarchy = cv2.findContours(processed_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# cv2.drawContours(src_img, contours, -1, (255,0,0), 3)
# cv2.fillPoly(src_img, contours, (0, 255, 0))
# print(type(contours))
#Analysis to get boxes
contours, rects, number_boxes = analysis_and_filter_contours_for_prompt_lights_searching(contours)
# cv2.drawContours(src_img, contours, -1, (255,0,255), 3)
#Avoid redundancy boxes
contours, rects, number_boxes, _ = filter_redundancy_boxes(contours, rects, number_boxes)
#Find a largest bin in x direction
contours, rects, number_boxes, _ = prompt_light_filter_outlier_boxes_by_x_dist(contours, rects, number_boxes)
#Avoid outliers
_, rects, number_boxes, _ = filter_outlier_boxes(contours, rects, number_boxes)
#Extract info for prompt lights identify
number_boxes_regions_list = preprocess_for_prompt_light_identify(src_img, rects, number_boxes)
if len(rects) == 5:
for i in range(len(rects)):
draw_box(src_img, number_boxes[i], (0,255,0)) # draw the rim
cv2.putText(src_img, str(number_boxes_regions_list[i][(12,25)]), (int(rects[i][0][0]),int(rects[i][0][1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
# print(src_img[rects[i][0]])
# print("+++++++++++++")
for i in range(len(contours)):
cv2.drawContours(src_img, contours, i, (255,0,0),3)
# cv2.fillPoly(src_img, list(contours[i]), (0,255,0))
# cv2.imshow("kankan", src_img)
# key = cv2.waitKey(0) & 0xff
# if key == ord('q'):
# break
return src_img, number_boxes_regions_list
"""
Main function (for testing)
"""
if __name__ == "__main__":
""" ================ Testing with image files (START) ================ """
"""
# import .grid_recognition
# from grid_recognition import read_image_from_file
#load src image
src_img = read_image_from_file()
# src_img, number_boxes_regions_list, _ = number_search(src_img)
src_img = prompt_lights_searching(src_img)
cv2.imshow('src_img', src_img)
key = cv2.waitKey(0)
"""
""" ================= Testing with image files (END) ================= """
""" ================ Testing with video files (START) ================ """
# """
# cam = cv2.VideoCapture('./../Buff2017.mp4')
cam = cv2.VideoCapture('./../../buff_test_video_01.mpeg')
# Define the codec and create VideoWriter object
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
# fourcc = cv2.VideoWriter_fourcc(*'FMP4')
fourcc = cv2.VideoWriter_fourcc(*'H264')
out = None#cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
frame_num = 1
segment_num = 1
frame_rate = 24
recording = False
while True:
ret, frame = cam.read()
assert ret == True
# src_img, number_boxes_regions_list, _ = number_search(frame)
src_img, number_boxes_regions_list = prompt_lights_searching(frame)
cv2.imshow('src_img', src_img)
for i in range(len(number_boxes_regions_list)):
cv2.imshow(str(i),number_boxes_regions_list[i])
key = cv2.waitKey(1000/frame_rate) & 0xff
# key = cv2.waitKey(0) & 0xff
if key == ord('q'):
break
# """
""" ================= Testing with image files (END) ================= """
cv2.destroyAllWindows()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, string, sys, time, platform, random
class TYPE:
UNDEFINED = 0
ALPHA = 1
QUOTE = 2
DIGIT = 3
MATH = 4
SPECIAL = 5
class ACTION:
DEFAULT = 0
QUOTE = 1
RANDOM = 2
class Printer(object):
MAX_TYPE_SPEED = 100 # measured in letters per second ~1200wpm -> 100lps
MIN_TYPE_SPEED = 10 # 120wpm converted to lps
TYPE_SPEED_CHANGE_AMT = 0.5
TYPE_SPEED_DEFAULT = 10
CLEAR = '\033[2J\033[;H'
RESET = '\033[0m'
SHIFT_IN = "\016"
SHIFT_OUT = "\017"
SPECIAL_CHARS = ['@', '?', '&', '|', '%', '!', ':', '\\']
MATH_CHARS = ['+', '=', '>', '<', '.', '/', '*']
def __init__(self, shift_in_chance=0):
self.rand = Random()
self._type_speed = None
self.type_delay = None
self.override_speed = 0
self.shift_in_chance = shift_in_chance
if self.shift_in_chance < 0: self.shift_in_chance = 0
if self.shift_in_chance > 100: self.shift_in_chance = 100
self.reset()
@property
def type_speed(self): return self._type_speed
@type_speed.setter
def type_speed(self, value):
self._type_speed = value
if self._type_speed > self.MAX_TYPE_SPEED: self._type_speed = self.MAX_TYPE_SPEED
elif self._type_speed < self.MIN_TYPE_SPEED: self._type_speed = self.MIN_TYPE_SPEED
if self.override_speed is not 0:
self._type_speed += self.override_speed
self.type_delay = ((60.0/self.type_speed)/60.0)
def reset(self):
if self.shift_in_chance and self.rand.int(1, 100) <= self.shift_in_chance:
print (self.SHIFT_IN)
else:
print (self.SHIFT_OUT)
self.override_speed = 0
self.type_speed = self.TYPE_SPEED_DEFAULT
self.action = ACTION.DEFAULT
self.color_list = []
self.main_color = self.pick_color()
self.quote_color = self.pick_color()
self.digit_color = self.pick_color()
self.math_color = self.pick_color()
self.special_color = self.pick_color()
self.random_color = self.pick_color()
self.alpha_color = self.pick_color()
def write(self, string, speed=None):
if speed: self.override_speed = speed
for char in string:
color = self.determine_color(char)
sys.stdout.write('%s%s' % (color, char))
sys.stdout.flush()
self.typing_change()
time.sleep(self.type_delay)
def backspace(self, length):
for _ in range(length):
sys.stdout.write('\b')
sys.stdout.flush()
self.typing_change()
time.sleep(self.type_delay)
def backspace_delete(self, length):
for _ in range(length):
sys.stdout.write('\b \b')
sys.stdout.flush()
self.typing_change()
time.sleep(self.type_delay)
def pick_color(self):
new_color = ''
if not platform.system() is 'Windows': # don't work on windows, so don't bother
new_color = self.rand.unique_ansi_color(self.color_list)
self.color_list.append(new_color)
return new_color
def typing_change(self):
if not self.rand.int(0, 1000):
self.type_speed = self.MAX_TYPE_SPEED if self.rand.int(0, 1) else self.MIN_TYPE_SPEED
else:
self.accelerate_typing(self.rand.int(0, 1))
def accelerate_typing(self, roll):
if roll:
self.type_speed += self.TYPE_SPEED_CHANGE_AMT
else:
self.type_speed -= self.TYPE_SPEED_CHANGE_AMT
def determine_color(self, char):
char_type = self.determine_type(char)
if self.action == ACTION.RANDOM:
if char_type == TYPE.ALPHA:
return self.random_color
else:
self.action = ACTION.DEFAULT
# return self.main_color
if self.action == ACTION.QUOTE:
if char_type == TYPE.QUOTE:
self.action = ACTION.DEFAULT
return self.quote_color
elif char_type == TYPE.QUOTE:
self.action = ACTION.QUOTE
return self.quote_color
elif char_type == TYPE.ALPHA:
return self.alpha_color
elif char_type == TYPE.DIGIT:
return self.digit_color
elif char_type == TYPE.MATH:
return self.math_color
elif char_type == TYPE.SPECIAL:
return self.special_color
elif self.action == ACTION.DEFAULT:
if char == " " and not self.rand.int(0, 10):
self.action = ACTION.RANDOM
return self.random_color
return self.main_color
def determine_type(self, char):
# TODO Detect curly brackets,
if char.isalpha() or char == "-" or char == "_": return TYPE.ALPHA
elif char == '\"': return TYPE.QUOTE
elif char.isdigit(): return TYPE.DIGIT
elif char in self.MATH_CHARS: return TYPE.MATH
elif char in self.SPECIAL_CHARS: return TYPE.SPECIAL
else: return TYPE.UNDEFINED
class Random(random.Random):
def __init__(self):
random.Random.__init__(self)
def file(self, directory):
return self.safe_choice([f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))])
def dir(self, directory):
return self.safe_choice([d for d in os.listdir(directory) if os.path.isdir(os.path.join(directory, d))])
def safe_choice(self, color_list):
if not len(color_list):
return None
return self.choice(color_list)
def ansi_color(self):
return '\033[9' + str(self.randint(0, 9)) + 'm' # in the range \003[90m - \003[98m
def ansi_annotation(self): # this has a high chance of being butt ugly
return '\033[' + str(self.randint(10, 99)) + 'm' # in the range \003[10m - \003[99m
def unique_ansi_color(self, color_list):
length = len(color_list)
if length:
function = self.ansi_color
if length == 8: # ran out of colors, just find something to return
function = self.ansi_annotation
selection = function()
while any(selection in s for s in color_list):
selection = function()
return selection
else:
return self.ansi_color()
def int(self, min_index, max_index):
return self.randint(min_index, max_index)
def string(self, length):
return ''.join(self.choice(string.digits + string.ascii_letters) for i in range(length))
| |
import os
import shutil
from datetime import datetime
import formatting
try:
from management_tools import loggers
except ImportError as e:
print "You need the 'Management Tools' module to be installed first."
print "https://github.com/univ-of-utah-marriott-library-apple/management_tools"
raise e
def nested(origin, destination, replace=True, grain=3, persist=False, update_time=False, logger=None):
"""
Handles the movement of files from one location to another, but the
destination will be organized in a nested format, e.g.
Destination > Year > Month > Day > file
:param origin: the originating directory, which will be copied
:param destination: the destination directory where the nesting will be
created
:param replace: if attempting to move a file and it exists in the
destination already, should it be replaced or left alone?
:type replace: bool
:param grain: how deep to form the nestings
:type grain: int
:param persist: whether to leave the original files in-place or delete them
:type persist: bool
:param update_time: whether to update the timestamps on files in the
destination
:type update_time: bool
:param logger: a Management Tools logger to record information
"""
# Ensure we have some sort of logger. Prevents errors.
if not logger:
logger = loggers.stream_logger(1)
date = formatting.date(grain)
# Check that the origin actually, like... exists.
if not os.path.isdir(origin):
raise RuntimeError("No such origin directory: " + origin)
# Destination should probably exist.
if not os.path.isdir(destination):
logger.error("Creating destination directory at: " + destination)
os.makedirs(destination)
# Find everything in the origin directory.
logger.info("Building payload list.")
with ChDir(origin):
payload = sorted([os.path.abspath(x) for x in os.listdir('.')])
# Do all of the archival.
with ChDir(destination):
logger.info("Creating nested directory structure in: {}".format(os.path.abspath(destination)))
dirs_needed = []
file_paths = {}
max_file_length = 0
for file in payload:
# For each file, pull its timestamp and split it into its different
# parts. These will be used to create the appropriate directory
# structure in the destination.
time = datetime.fromtimestamp(os.path.getmtime(file))
dirs = time.strftime(date).split('.')
leaf = os.path.join(*dirs)
dirs_needed.append(leaf)
file_paths[file] = leaf
# This is just used for pretty printing.
if len(os.path.basename(file)) > max_file_length:
max_file_length = len(os.path.basename(file))
# Remove duplicates from the necessary directories. (This avoids errors
# where a folder already exists.) Then create the nested folders.
dirs_needed = uniquify(dirs_needed)
for dir in dirs_needed:
if not os.path.isdir(dir):
logger.info(" ./" + dir)
os.makedirs(dir)
# Start moving/copying the files.
# (Moving is used if the files don't need to stay in the origin.)
logger.info("{} files to appropriate subdirectories...".format("Moving" if not persist else "Copying"))
for file, path in file_paths.items():
# Each file gets wrapped in a try/except block to ensure that flow
# is not interrupted if there's an issue with one of them.
try:
# Set the file's destination.
file_destination = os.path.join(path, os.path.basename(file))
# Determine whether the file should be put in the destination.
add = False
if replace:
# Definitely add the file if we're okay with replacing it.
add = True
else:
# If we're not okay with replacing it, ensure that the file
# does not exist in the destination already.
if not os.path.isfile(file_destination):
add = True
# If we're okay with adding the file, then do the thing!
if add:
logger.info(" {file:>{length}} {dash}> ./{dest}".format(
file = os.path.basename(file),
length = max_file_length,
dest = file_destination,
dash = '=' if persist else '-'
))
# If the file exists in the destination, delete it before
# attempting to move a new copy there. This also accounts
# for symbolic links.
if os.path.isfile(file_destination):
os.remove(file_destination)
# Copy if persisting data; move otherwise.
if persist:
shutil.copy2(file, path)
else:
shutil.move(file, path)
# Update the time as needed.
if update_time:
os.utime(file_destination, None)
except (IOError, OSError) as e:
# These are the most likely errors.
logger.error("{}".format(repr(e)))
logger.error("Unable to copy file '{}' to path: {}".format(file, path))
except (KeyboardInterrupt, SystemExit):
logger.info("Quitting...")
break
except Exception as e:
logger.error("{}".format(repr(e)))
def flat(origin, destination, replace=True, grain=3, persist=False, delimiter='.', update_time=False, logger=None):
"""
Handles the movement of files from one location to another. The destination
will not be organized; all files will just be dumped into it. The files will
be renamed to indicate their origin time.
:param origin: the originating directory, which will be copied
:param destination: the destination directory where the nesting will be
created
:param replace: if attempting to move a file and it exists in the
destination already, should it be replaced or left alone?
:type replace: bool
:param grain: how much information to incorporate in the new filename
:type grain: int
:param persist: whether to leave the original files in-place or delete them
:type persist: bool
:param delimiter: the string used to split the parts of the date in the new
filename
:param update_time: whether to update the timestamps on files in the
destination
:type update_time: bool
:param logger: a Management Tools logger to record information
"""
# Ensure we have some sort of logger. Prevents errors.
if not logger:
logger = loggers.stream_logger(1)
date = formatting.date(grain)
# Check that the origin actually, like... exists.
if not os.path.isdir(origin):
raise RuntimeError("No such origin directory: " + origin)
if not os.path.isdir(destination):
logger.error("Creating destination directory at: " + destination)
os.makedirs(destination)
# Find everything in the origin directory.
logger.info("Building payload list.")
with ChDir(origin):
payload = sorted([os.path.abspath(x) for x in os.listdir('.')])
# Do all of the archival.
with ChDir(destination):
file_prefixes = {}
max_file_length = 0
for file in payload:
# For each file, pull its timestamp and split it into its different
# parts. These will be used to create the appropriate file name for
# each file being moved.
time = datetime.fromtimestamp(os.path.getmtime(file))
date_parts = time.strftime(date).split('.')
prefix = delimiter.join(date_parts)
file_prefixes[file] = prefix
# This is just used for pretty printing.
if len(os.path.basename(file)) > max_file_length:
max_file_length = len(os.path.basename(file))
# Start moving/copying the files.
# (Moving is used if the files don't need to stay in the origin.)
logger.info("{} files to appropriate subdirectories...".format("Moving" if not persist else "Copying"))
for file, prefix in file_prefixes.items():
# Each file gets wrapped in a try/except block to ensure that flow
# is not interrupted if there's an issue with one of them.
try:
# Form the new file name.
new_name = prefix + delimiter + os.path.basename(file)
add = False
# Determine whether the file should be put in the destination.
if replace:
# Definitely add the file if we're okay with replacing it.
add = True
else:
# If we're not okay with replacing it, ensure that the file
# does not exist in the destination already.
if not os.path.isfile(new_name):
add = True
# If we're okay with adding the file, then do the thing!
if add:
logger.info(" {file:>{length}} {dash}> ./{new}".format(
length = max_file_length,
file = os.path.basename(file),
new = new_name,
dash = '=' if persist else '-'
))
# If the file exists in the destination, delete it before
# attempting to move a new copy there. This also accounts
# for symbolic links.
if os.path.isfile(new_name):
os.remove(new_name)
# Copy if persisting data; move otherwise.
if persist:
shutil.copy2(file, new_name)
else:
shutil.move(file, new_name)
# Update the time as needed.
if update_time:
os.utime(new_name, None)
except (IOError, OSError) as e:
# These are the most likely errors.
logger.error("{}".format(repr(e)))
logger.error("Unable to copy file '{}' to path: {}".format(file, path))
except (KeyboardInterrupt, SystemExit):
logger.info("Quitting...")
break
except Exception as e:
logger.error("{}".format(repr(e)))
def uniquify(seq, idfun=None):
"""
This function copied from:
http://www.peterbe.com/plog/uniqifiers-benchmark
This is function 'f5' from that page, by Peter Bengtsson.
Order-preserving, fast method of removing duplicates from a list.
"""
# order preserving
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
class ChDir:
"""
Changes directories to the new path and retains the old directory.
Use this in a 'with' statement for the best effect:
# If we start in oldPath:
os.getcwd()
# returns oldPath
with ChDir(newPath):
os.getcwd()
# returns newPath
os.getcwd()
# returns oldPath
"""
def __init__(self, new_path):
self.saved_path = os.getcwd()
os.chdir(new_path)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
os.chdir(self.saved_path)
| |
import argparse
import os
from filelock import FileLock
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import torch.utils.data.distributed
import horovod.torch as hvd
from horovod.ray import RayExecutor
def metric_average(val, name):
tensor = torch.tensor(val)
avg_tensor = hvd.allreduce(tensor, name=name)
return avg_tensor.item()
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
def train_fn(data_dir=None,
seed=42,
use_cuda=False,
batch_size=64,
use_adasum=False,
lr=0.01,
momentum=0.5,
num_epochs=10,
log_interval=10):
# Horovod: initialize library.
hvd.init()
torch.manual_seed(seed)
if use_cuda:
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(1)
kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}
data_dir = data_dir or "./data"
with FileLock(os.path.expanduser("~/.horovod_lock")):
train_dataset = \
datasets.MNIST(data_dir, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
# Horovod: use DistributedSampler to partition the training data.
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, num_replicas=hvd.size(), rank=hvd.rank())
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler, **kwargs)
model = Net()
# By default, Adasum doesn't need scaling up learning rate.
lr_scaler = hvd.size() if not use_adasum else 1
if use_cuda:
# Move model to GPU.
model.cuda()
# If using GPU Adasum allreduce, scale learning rate by local_size.
if use_adasum and hvd.nccl_built():
lr_scaler = hvd.local_size()
# Horovod: scale learning rate by lr_scaler.
optimizer = optim.SGD(
model.parameters(), lr=lr * lr_scaler, momentum=momentum)
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
op=hvd.Adasum if use_adasum else hvd.Average)
for epoch in range(1, num_epochs + 1):
model.train()
# Horovod: set epoch to sampler for shuffling.
train_sampler.set_epoch(epoch)
for batch_idx, (data, target) in enumerate(train_loader):
if use_cuda:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
# Horovod: use train_sampler to determine the number of
# examples in this worker's partition.
print("Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch, batch_idx * len(data), len(train_sampler),
100. * batch_idx / len(train_loader), loss.item()))
def main(num_workers, use_gpu, **kwargs):
settings = RayExecutor.create_settings(timeout_s=30)
executor = RayExecutor(settings, use_gpu=use_gpu, num_workers=num_workers)
executor.run(train_fn, kwargs=kwargs)
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(
description="PyTorch MNIST Example",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)")
parser.add_argument(
"--epochs",
type=int,
default=5,
metavar="N",
help="number of epochs to train (default: 10)")
parser.add_argument(
"--lr",
type=float,
default=0.01,
metavar="LR",
help="learning rate (default: 0.01)")
parser.add_argument(
"--momentum",
type=float,
default=0.5,
metavar="M",
help="SGD momentum (default: 0.5)")
parser.add_argument(
"--no-cuda",
action="store_true",
default=False,
help="disables CUDA training")
parser.add_argument(
"--seed",
type=int,
default=42,
metavar="S",
help="random seed (default: 42)")
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status")
parser.add_argument(
"--use-adasum",
action="store_true",
default=False,
help="use adasum algorithm to do reduction")
parser.add_argument(
"--num-workers",
type=int,
default=4,
help="Number of Ray workers to use for training.")
parser.add_argument(
"--data-dir",
help="location of the training dataset in the local filesystem ("
"will be downloaded if needed)")
parser.add_argument(
"--address",
require=False,
types=str,
default=None,
help="Address of Ray cluster.")
parser.add_argument(
"--server-address",
type=str,
default=None,
required=False,
help="The address of server to connect to if using "
"Ray Client.")
args = parser.parse_args()
import ray
if args.address:
ray.init(args.address)
elif args.server_address:
ray.util.connect(args.server_address)
else:
ray.init()
kwargs = {
"data_dir": args.data_dir,
"seed": args.seed,
"use_cuda": args.use_cuda if args.use_cuda else False,
"batch_size": args.batch_size,
"use_adasum": args.use_adasum if args.use_adasum else False,
"lr": args.lr,
"momentum": args.momentum,
"num_epochs": args.num_epochs,
"log_interval": args.log_interval
}
main(
num_workers=args.num_workers,
use_gpu=args.use_cuda if args.use_cuda else False,
kwargs=kwargs)
| |
#!/usr/bin/env python
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import getopt
def write_java_head(tofile, name):
tofile.write("// CHECKSTYLE:OFF Generated code\n")
tofile.write("/* This file is auto-generated from {}.java. DO NOT MODIFY. */\n\n".format(name))
def replace_xml_head(line, name):
return line.replace('<?xml version="1.0" encoding="utf-8"?>', '<?xml version="1.0" encoding="utf-8"?>\n<!-- This file is auto-generated from {}.xml. DO NOT MODIFY. -->\n'.format(name))
file = open('src/main/java/com/example/android/leanback/GuidedStepActivity.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/GuidedStepSupportActivity.java', 'w')
write_java_head(outfile, "GuidedStepActivity")
for line in file:
line = line.replace('android.app.Fragment', 'androidx.fragment.app.Fragment')
line = line.replace('android.app.Activity', 'androidx.fragment.app.FragmentActivity')
line = line.replace('GuidedStepFragment', 'GuidedStepSupportFragment')
line = line.replace('GuidedStepActivity', 'GuidedStepSupportActivity')
line = line.replace('extends Activity', 'extends FragmentActivity')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/GuidedStepHalfScreenActivity.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/GuidedStepSupportHalfScreenActivity.java', 'w')
write_java_head(outfile, "GuidedStepHalfScreenActivity")
for line in file:
line = line.replace('android.app.Fragment', 'androidx.fragment.app.Fragment')
line = line.replace('android.app.Activity', 'androidx.fragment.app.FragmentActivity')
line = line.replace('GuidedStepFragment', 'GuidedStepSupportFragment')
line = line.replace('GuidedStepActivity', 'GuidedStepSupportActivity')
line = line.replace('GuidedStepHalfScreenActivity', 'GuidedStepSupportHalfScreenActivity')
line = line.replace('extends Activity', 'extends FragmentActivity')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/BrowseFragment.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/BrowseSupportFragment.java', 'w')
write_java_head(outfile, "BrowseFragment")
for line in file:
line = line.replace('android.app.Fragment', 'androidx.fragment.app.Fragment')
line = line.replace('android.app.Activity', 'androidx.fragment.app.FragmentActivity')
line = line.replace('BrowseFragment', 'BrowseSupportFragment')
line = line.replace('GuidedStepFragment', 'GuidedStepSupportFragment')
line = line.replace('GuidedStepActivity', 'GuidedStepSupportActivity')
line = line.replace('getActivity().getFragmentManager()', 'getActivity().getSupportFragmentManager()')
line = line.replace('BrowseActivity', 'BrowseSupportActivity')
line = line.replace('DetailsActivity', 'DetailsSupportActivity')
line = line.replace('SearchActivity', 'SearchSupportActivity')
line = line.replace('RowsActivity', 'RowsSupportActivity')
line = line.replace('RowsFragment', 'RowsSupportFragment')
line = line.replace('GuidedStepHalfScreenActivity', 'GuidedStepSupportHalfScreenActivity')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/BrowseActivity.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/BrowseSupportActivity.java', 'w')
write_java_head(outfile, "BrowseActivity")
for line in file:
line = line.replace('BrowseActivity', 'BrowseSupportActivity')
line = line.replace('android.app.Activity', 'androidx.fragment.app.FragmentActivity')
line = line.replace('extends Activity', 'extends FragmentActivity')
line = line.replace('R.layout.browse', 'R.layout.browse_support')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/res/layout/browse.xml', 'r')
outfile = open('src/main/res/layout/browse_support.xml', 'w')
for line in file:
line = replace_xml_head(line, "browse")
line = line.replace('com.example.android.leanback.BrowseFragment', 'com.example.android.leanback.BrowseSupportFragment')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/DetailsFragment.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/DetailsSupportFragment.java', 'w')
write_java_head(outfile, "DetailsFragment")
for line in file:
line = line.replace('android.app.Fragment', 'androidx.fragment.app.Fragment')
line = line.replace('android.app.Activity', 'androidx.fragment.app.FragmentActivity')
line = line.replace('DetailsFragment', 'DetailsSupportFragment')
line = line.replace('DetailsActivity', 'DetailsSupportActivity')
line = line.replace('PlaybackOverlayActivity', 'PlaybackOverlaySupportActivity')
line = line.replace('SearchActivity', 'SearchSupportActivity')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/NewDetailsFragment.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/NewDetailsSupportFragment.java', 'w')
write_java_head(outfile, "NewDetailsFragment")
for line in file:
line = line.replace('android.app.Fragment', 'androidx.fragment.app.Fragment')
line = line.replace('android.app.Activity', 'androidx.fragment.app.FragmentActivity')
line = line.replace('DetailsFragment', 'DetailsSupportFragment')
line = line.replace('DetailsSupportFragmentVideoHelper', 'DetailsFragmentVideoHelper')
line = line.replace('VideoFragment', 'VideoSupportFragment')
line = line.replace('PlaybackFragmentGlueHost', 'PlaybackSupportFragmentGlueHost')
line = line.replace('DetailsActivity', 'DetailsSupportActivity')
line = line.replace('PlaybackOverlayActivity', 'PlaybackOverlaySupportActivity')
line = line.replace('SearchActivity', 'SearchSupportActivity')
line = line.replace('SearchActivity', 'SearchSupportActivity')
line = line.replace('getRowsFragment', 'getRowsSupportFragment')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/DetailsActivity.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/DetailsSupportActivity.java', 'w')
write_java_head(outfile, "DetailsActivity")
for line in file:
line = line.replace('DetailsActivity', 'DetailsSupportActivity')
line = line.replace('android.app.Activity', 'androidx.fragment.app.FragmentActivity')
line = line.replace('extends Activity', 'extends FragmentActivity')
line = line.replace('getFragmentManager()', 'getSupportFragmentManager()')
line = line.replace('DetailsFragment', 'DetailsSupportFragment')
line = line.replace('NewDetailsFragment', 'NewDetailsSupportFragment')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/SearchDetailsActivity.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/SearchDetailsSupportActivity.java', 'w')
write_java_head(outfile, "SearchDetailsActivity")
for line in file:
line = line.replace('DetailsActivity', 'DetailsSupportActivity')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/SearchFragment.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/SearchSupportFragment.java', 'w')
write_java_head(outfile, "SearchFragment")
for line in file:
line = line.replace('SearchFragment', 'SearchSupportFragment')
line = line.replace('DetailsActivity', 'DetailsSupportActivity')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/SearchActivity.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/SearchSupportActivity.java', 'w')
write_java_head(outfile, "SearchActivity")
for line in file:
line = line.replace('SearchActivity', 'SearchSupportActivity')
line = line.replace('extends Activity', 'extends FragmentActivity')
line = line.replace('R.layout.search', 'R.layout.search_support')
line = line.replace('android.app.Activity', 'androidx.fragment.app.FragmentActivity')
line = line.replace('getFragmentManager()', 'getSupportFragmentManager()')
line = line.replace('SearchFragment', 'SearchSupportFragment')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/res/layout/search.xml', 'r')
outfile = open('src/main/res/layout/search_support.xml', 'w')
for line in file:
line = replace_xml_head(line, "search")
line = line.replace('com.example.android.leanback.SearchFragment', 'com.example.android.leanback.SearchSupportFragment')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/VerticalGridFragment.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/VerticalGridSupportFragment.java', 'w')
write_java_head(outfile, "VerticalGridFragment")
for line in file:
line = line.replace('VerticalGridFragment', 'VerticalGridSupportFragment')
line = line.replace('DetailsActivity', 'DetailsSupportActivity')
line = line.replace('SearchActivity', 'SearchSupportActivity')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/VerticalGridActivity.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/VerticalGridSupportActivity.java', 'w')
write_java_head(outfile, "VerticalGridActivity")
for line in file:
line = line.replace('VerticalGridActivity', 'VerticalGridSupportActivity')
line = line.replace('extends Activity', 'extends FragmentActivity')
line = line.replace('R.layout.vertical_grid', 'R.layout.vertical_grid_support')
line = line.replace('android.app.Activity', 'androidx.fragment.app.FragmentActivity')
line = line.replace('getFragmentManager()', 'getSupportFragmentManager()')
line = line.replace('VerticalGridFragment', 'VerticalGridSupportFragment')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/res/layout/vertical_grid.xml', 'r')
outfile = open('src/main/res/layout/vertical_grid_support.xml', 'w')
for line in file:
line = replace_xml_head(line, "vertical_grid")
line = line.replace('com.example.android.leanback.VerticalGridFragment', 'com.example.android.leanback.VerticalGridSupportFragment')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/ErrorFragment.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/ErrorSupportFragment.java', 'w')
write_java_head(outfile, "ErrorFragment")
for line in file:
line = line.replace('ErrorFragment', 'ErrorSupportFragment')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/BrowseErrorActivity.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/BrowseErrorSupportActivity.java', 'w')
write_java_head(outfile, "BrowseErrorActivity")
for line in file:
line = line.replace('BrowseErrorActivity', 'BrowseErrorSupportActivity')
line = line.replace('extends Activity', 'extends FragmentActivity')
line = line.replace('R.layout.browse', 'R.layout.browse_support')
line = line.replace('android.app.Activity', 'androidx.fragment.app.FragmentActivity')
line = line.replace('getFragmentManager()', 'getSupportFragmentManager()')
line = line.replace('ErrorFragment', 'ErrorSupportFragment')
line = line.replace('SpinnerFragment', 'SpinnerSupportFragment')
line = line.replace('android.app.Fragment', 'androidx.fragment.app.Fragment')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/RowsFragment.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/RowsSupportFragment.java', 'w')
write_java_head(outfile, "RowsFragment")
for line in file:
line = line.replace('RowsFragment', 'RowsSupportFragment')
line = line.replace('DetailsActivity', 'DetailsSupportActivity')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/RowsActivity.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/RowsSupportActivity.java', 'w')
write_java_head(outfile, "RowsActivity")
for line in file:
line = line.replace('RowsActivity', 'RowsSupportActivity')
line = line.replace('extends Activity', 'extends FragmentActivity')
line = line.replace('R.layout.rows', 'R.layout.rows_support')
line = line.replace('android.app.Activity', 'androidx.fragment.app.FragmentActivity')
line = line.replace('RowsFragment', 'RowsSupportFragment')
line = line.replace('getFragmentManager()', 'getSupportFragmentManager()')
line = line.replace('SearchActivity', 'SearchSupportActivity')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/res/layout/rows.xml', 'r')
outfile = open('src/main/res/layout/rows_support.xml', 'w')
for line in file:
line = replace_xml_head(line, "rows")
line = line.replace('com.example.android.leanback.RowsFragment', 'com.example.android.leanback.RowsSupportFragment')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/PlaybackFragment.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/PlaybackSupportFragment.java', 'w')
write_java_head(outfile, "PlaybackFragment")
for line in file:
line = line.replace('PlaybackFragment', 'PlaybackSupportFragment')
line = line.replace('PlaybackActivity', 'PlaybackSupportActivity')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/PlaybackActivity.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/PlaybackSupportActivity.java', 'w')
write_java_head(outfile, "PlaybackActivity")
for line in file:
line = line.replace('PlaybackActivity', 'PlaybackSupportActivity')
line = line.replace('extends Activity', 'extends FragmentActivity')
line = line.replace('R.layout.playback_activity', 'R.layout.playback_activity_support')
line = line.replace('android.app.Activity', 'androidx.fragment.app.FragmentActivity')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/res/layout/playback_activity.xml', 'r')
outfile = open('src/main/res/layout/playback_activity_support.xml', 'w')
for line in file:
line = replace_xml_head(line, "playback_controls")
line = line.replace('com.example.android.leanback.PlaybackFragment', 'com.example.android.leanback.PlaybackSupportFragment')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/PlaybackTransportControlFragment.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/PlaybackTransportControlSupportFragment.java', 'w')
write_java_head(outfile, "PlaybackTransportControlFragment")
for line in file:
line = line.replace('PlaybackFragment', 'PlaybackSupportFragment')
line = line.replace('PlaybackTransportControlFragment', 'PlaybackTransportControlSupportFragment')
line = line.replace('PlaybackTransportControlActivity', 'PlaybackTransportControlSupportActivity')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/PlaybackTransportControlActivity.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/PlaybackTransportControlSupportActivity.java', 'w')
write_java_head(outfile, "PlaybackTransportControlActivity")
for line in file:
line = line.replace('PlaybackTransportControlActivity', 'PlaybackTransportControlSupportActivity')
line = line.replace('extends Activity', 'extends FragmentActivity')
line = line.replace('R.layout.playback_transportcontrol_activity', 'R.layout.playback_transportcontrol_activity_support')
line = line.replace('android.app.Activity', 'androidx.fragment.app.FragmentActivity')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/res/layout/playback_transportcontrol_activity.xml', 'r')
outfile = open('src/main/res/layout/playback_transportcontrol_activity_support.xml', 'w')
for line in file:
line = replace_xml_head(line, "playback_transportcontrols")
line = line.replace('com.example.android.leanback.PlaybackTransportControlFragment', 'com.example.android.leanback.PlaybackTransportControlSupportFragment')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/res/layout/playback_controls.xml', 'r')
outfile = open('src/main/res/layout/playback_controls_support.xml', 'w')
for line in file:
line = replace_xml_head(line, "playback_controls")
line = line.replace('com.example.android.leanback.PlaybackOverlayFragment', 'com.example.android.leanback.PlaybackOverlaySupportFragment')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/OnboardingActivity.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/OnboardingSupportActivity.java', 'w')
write_java_head(outfile, "OnboardingActivity")
for line in file:
line = line.replace('android.app.Fragment', 'androidx.fragment.app.Fragment')
line = line.replace('android.app.Activity', 'androidx.fragment.app.FragmentActivity')
line = line.replace('OnboardingActivity', 'OnboardingSupportActivity')
line = line.replace('OnboardingDemoFragment', 'OnboardingDemoSupportFragment')
line = line.replace('extends Activity', 'extends FragmentActivity')
line = line.replace('getFragmentManager()', 'getSupportFragmentManager()')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/OnboardingDemoFragment.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/OnboardingDemoSupportFragment.java', 'w')
write_java_head(outfile, "OnboardingDemoFragment")
for line in file:
line = line.replace('android.app.Fragment', 'androidx.fragment.app.Fragment')
line = line.replace('import android.app.Activity', 'import androidx.fragment.app.FragmentActivity')
line = line.replace('OnboardingDemoFragment', 'OnboardingDemoSupportFragment')
line = line.replace('OnboardingFragment', 'OnboardingSupportFragment')
line = line.replace('OnboardingActivity', 'OnboardingSupportActivity')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/SampleVideoFragment.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/SampleVideoSupportFragment.java', 'w')
write_java_head(outfile, "OnboardingDemoFragment")
for line in file:
line = line.replace('android.app.Fragment', 'androidx.fragment.app.Fragment')
line = line.replace('import android.app.Activity', 'import androidx.fragment.app.FragmentActivity')
line = line.replace('SampleVideoFragment', 'SampleVideoSupportFragment')
line = line.replace('VideoFragment', 'VideoSupportFragment')
outfile.write(line)
file.close()
outfile.close()
file = open('src/main/java/com/example/android/leanback/VideoActivity.java', 'r')
outfile = open('src/main/java/com/example/android/leanback/VideoSupportActivity.java', 'w')
write_java_head(outfile, "OnboardingDemoFragment")
for line in file:
line = line.replace('android.app.Fragment', 'androidx.fragment.app.Fragment')
line = line.replace('import android.app.Activity', 'import androidx.fragment.app.FragmentActivity')
line = line.replace('VideoActivity', 'VideoSupportActivity')
line = line.replace('extends Activity', 'extends FragmentActivity')
line = line.replace('getFragmentManager()', 'getSupportFragmentManager()')
line = line.replace('SampleVideoFragment', 'SampleVideoSupportFragment')
outfile.write(line)
file.close()
outfile.close()
| |
import subprocess
import MySQLdb
import shutil
import os
import inspect
import foundation.agent_properties
import time
def execute_servers(exec_dir):
try:
clock_command = exec_dir + '/ClockManager2/src/ClockServer'
clock_stdout = open('clockserver_stdout.log', "w")
clock_stderr = open('clockserver_stderr.log', "w")
clock = subprocess.Popen(clock_command,
stdout=clock_stdout,
stderr=clock_stderr,
shell=True)
time.sleep(0.1)
market_command = exec_dir + '/MarketPlaceServer/src/MarketPlaceServer'
market_stdout = open('marketserver_stdout.log', "w")
market_stderr = open('marketserver_stderr.log', "w")
market = subprocess.Popen(market_command,
stdout=market_stdout,
stderr=market_stderr,
shell=True)
time.sleep(0.5)
clock_status = clock.wait()
market_status = market.wait()
clock_stdout.close()
clock_stderr.close()
market_stdout.close()
market_stderr.close()
print 'Command exit Status/Return Code clock_server : ' + str(clock_status) + '\n'
print 'Command exit Status/Return Code market_server : ' + str(market_status) + '\n'
except Exception as e:
print e
def execute_process(exec_dir):
try:
provider_command = '/usr/bin/python ' + exec_dir + '/agents/ProviderExecution.py'
provider_stdout = open('providerexecution_stdout.log', "w")
provider_stderr = open('providerexecution_stderr.log', "w")
provider = subprocess.Popen(provider_command,
stdout=provider_stdout,
stderr=provider_stderr,
shell=True)
consumer_command = '/usr/bin/python ' + exec_dir + '/agents/ConsumerExecution.py'
consumer_stdout = open('consumerexecution_stdout.log', "w")
consumer_stderr = open('consumerexecution_stderr.log', "w")
consumer = subprocess.Popen(consumer_command,
stdout=consumer_stdout,
stderr=consumer_stderr,
shell=True)
provider_status = provider.wait()
consumer_status = consumer.wait()
provider_stdout.close()
provider_stderr.close()
consumer_stdout.close()
consumer_stderr.close()
print 'Command exit Status/Return Code provider_execution ' + str(provider_status) + '\n'
print 'Command exit Status/Return Code consumer_execution ' + str(consumer_status) + '\n'
except Exception as e:
print e
def update_consumers(cursor, consumer_id, num_consumers):
sql = "update simulation_consumer \
set number_execute = '%d' \
where id = '%d' " % (num_consumers, consumer_id)
cursor.execute(sql)
def update_periods(num_periods):
sql = "update simulation_generalparameters \
set bid_periods = '%d'" % (num_periods)
cursor.execute(sql)
def update_provider(cursor, provider_id):
sql = "update simulation_provider \
set status = 'A' \
where id = '%d' " % (provider_id)
cursor.execute(sql)
def inactivate_providers(cursor):
sql = "update simulation_provider \
set status = 'I' "
cursor.execute(sql)
def read_configuration_step_providers(cursor, execution_configuration):
sql = "select id, provider_id \
from simulation_executionconfigurationproviders \
where execution_configuration_id = '%d' " % (execution_configuration)
providers = {}
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
providers[row[0]] = { 'provider_id' : row[1] }
return providers
def read_configuration_steps(cursor, configuration_group):
sql = "select id, description, number_consumers, number_periods \
from simulation_executionconfiguration \
where execution_group_id = '%d' \
and status = 'A'" % (configuration_group)
steps = {}
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
steps[row[0]] = { 'description' : row[1],
'number_consumers' : row[2],
'number_periods' : row[3],
}
return steps
def read_consumer(cursor):
sql = "select a.id \
from simulation_consumer a, simulation_consumerservice b \
where a.id = b.consumer_id \
and b.execute = 1 \
LIMIT 1 "
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
consumerId = row[0]
return consumerId
def read_configuration_groups(cursor):
sql = "select id, name, description \
from simulation_executiongroup \
where status = 'A'"
groups = {}
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
groups[row[0]] = {'name' : row[1], 'description' : row[2], 'steps' : {} }
return groups
def removing_directory_content(directory):
for the_file in os.listdir(directory):
full_file_name = os.path.join(directory, the_file)
try:
if (os.path.isfile(full_file_name)):
os.unlink(full_file_name)
except Exception, e:
print e
def copy_directory_content(src_directory, dst_directory):
src_files = os.listdir(src_directory)
for file_name in src_files:
full_file_name = os.path.join(src_directory, file_name)
des_file_name = os.path.join(dst_directory, file_name)
if (os.path.isfile(full_file_name)):
shutil.copy(full_file_name, des_file_name)
def copy_logs_files(src_directory,dst_directory):
src_files = os.listdir(src_directory)
for file_name in src_files:
if file_name.endswith('.log'):
full_file_name = os.path.join(src_directory, file_name)
des_file_name = os.path.join(dst_directory, file_name)
if (os.path.isfile(full_file_name)):
shutil.copy(full_file_name, des_file_name)
def delete_log_files(directory):
for the_file in os.listdir(directory):
if the_file.endswith('.log'):
full_file_name = os.path.join(directory, the_file)
try:
if (os.path.isfile(full_file_name)):
os.unlink(full_file_name)
except Exception, e:
print e
# Open database connection
db = MySQLdb.connect("localhost","root","password","Network_Simulation" )
# Prepare a cursor object using cursor() method
cursor = db.cursor()
# Brings the consumer
consumer_id = read_consumer(cursor)
# Brings configuration groups.
groups = read_configuration_groups(cursor)
for groupId in groups:
group = groups[groupId]
steps = read_configuration_steps(cursor, groupId)
group['steps'] = steps
# Brings the providers of every step.
for groupId in groups:
group = groups[groupId]
steps = group['steps']
for stepId in steps:
step = steps[stepId]
providers = read_configuration_step_providers(cursor, stepId)
step['providers'] = providers
file_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
dir_path = file_path.split('/')
dir_path.pop() # remove ./agents from the list
main_dir = '/'.join(dir_path)
result_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
result_dir = result_dir + '/' + foundation.agent_properties.result_directory
# Execute different configurations
for groupId in groups:
group = groups[groupId]
steps = group['steps']
for stepId in steps:
delete_log_files(file_path)
step = steps[stepId]
num_periods = step['number_periods']
num_consumers = step['number_consumers']
# Activate providers that must be executed in the configuration.
inactivate_providers(cursor)
for provider in step['providers']:
provider_id = ((step['providers']).get(provider)).get('provider_id')
print 'provider_id:' + str(provider_id)
# update the status of the provider
update_provider(cursor, provider_id)
# Establish the correct number of consumers
update_consumers(cursor, consumer_id, num_consumers)
# Updates the number of periods.
update_periods(num_periods)
cursor.connection.commit()
# Executes the software
execute_servers(main_dir)
execute_process(main_dir)
# Create and copy the result files. The directory is created under the result folder.
# The name of the folder is composed by the group and step id
folder_name = str(groupId) + '_' + str(stepId)
directory = result_dir + '/' + folder_name
if not os.path.exists(directory):
os.makedirs(directory)
removing_directory_content(directory)
copy_directory_content(result_dir, directory)
copy_logs_files(file_path,directory)
| |
"""Solvers of systems of polynomial equations. """
from sympy.polys import Poly, groebner, roots
from sympy.polys.polytools import parallel_poly_from_expr
from sympy.polys.polyerrors import ComputationFailed
from sympy.utilities import any, all, postfixes
from sympy.utilities.iterables import minkey
from sympy.simplify import rcollect
from sympy.core import S
class SolveFailed(Exception):
"""Raised when solver's conditions weren't met. """
def solve_poly_system(seq, *gens, **args):
"""
Solve a system of polynomial equations.
Example
=======
>>> from sympy import solve_poly_system
>>> from sympy.abc import x, y
>>> solve_poly_system([x*y - 2*y, 2*y**2 - x**2], x, y)
[(0, 0), (2, -2**(1/2)), (2, 2**(1/2))]
"""
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('solve_poly_system', len(seq), exc)
if len(polys) == len(opt.gens) == 2:
f, g = polys
a, b = f.degree_list()
c, d = g.degree_list()
if a <= 2 and b <= 2 and c <= 2 and d <= 2:
try:
return solve_biquadratic(f, g, opt)
except SolveFailed:
pass
return solve_generic(polys, opt)
def solve_biquadratic(f, g, opt):
"""Solve a system of two bivariate quadratic polynomial equations. """
G = groebner([f, g])
if len(G) == 1 and G[0].is_ground:
return None
if len(G) != 2:
raise SolveFailed
p, q = G
x, y = opt.gens
p = Poly(p, x, expand=False)
q = q.ltrim(-1)
p_roots = [ rcollect(expr, y) for expr in roots(p).keys() ]
q_roots = roots(q).keys()
solutions = []
for q_root in q_roots:
for p_root in p_roots:
solution = (p_root.subs(y, q_root), q_root)
solutions.append(solution)
return sorted(solutions)
def solve_generic(polys, opt):
"""
Solve a generic system of polynomial equations.
Returns all possible solutions over C[x_1, x_2, ..., x_m] of a
set F = { f_1, f_2, ..., f_n } of polynomial equations, using
Groebner basis approach. For now only zero-dimensional systems
are supported, which means F can have at most a finite number
of solutions.
The algorithm works by the fact that, supposing G is the basis
of F with respect to an elimination order (here lexicographic
order is used), G and F generate the same ideal, they have the
same set of solutions. By the elimination property, if G is a
reduced, zero-dimensional Groebner basis, then there exists an
univariate polynomial in G (in its last variable). This can be
solved by computing its roots. Substituting all computed roots
for the last (eliminated) variable in other elements of G, new
polynomial system is generated. Applying the above procedure
recursively, a finite number of solutions can be found.
The ability of finding all solutions by this procedure depends
on the root finding algorithms. If no solutions were found, it
means only that roots() failed, but the system is solvable. To
overcome this difficulty use numerical algorithms instead.
References
==========
.. [Buchberger01] B. Buchberger, Groebner Bases: A Short
Introduction for Systems Theorists, In: R. Moreno-Diaz,
B. Buchberger, J.L. Freire, Proceedings of EUROCAST'01,
February, 2001
.. [Cox97] D. Cox, J. Little, D. O'Shea, Ideals, Varieties
and Algorithms, Springer, Second Edition, 1997, pp. 112
"""
def is_univariate(f):
"""Returns True if 'f' is univariate in its last variable. """
for monom in f.monoms():
if any(m > 0 for m in monom[:-1]):
return False
return True
def subs_root(f, gen, zero):
"""Replace generator with a root so that the result is nice. """
p = f.as_expr({gen: zero})
if f.degree(gen) >= 2:
p = p.expand(deep=False)
return p
def solve_reduced_system(system, gens, entry=False):
"""Recursively solves reduced polynomial systems. """
if len(system) == len(gens) == 1:
zeros = roots(system[0], gens[-1]).keys()
return [ (zero,) for zero in zeros ]
basis = groebner(system, gens, polys=True)
if len(basis) == 1 and basis[0].is_ground:
if not entry:
return []
else:
return None
univariate = filter(is_univariate, basis)
if len(univariate) == 1:
f = univariate.pop()
else:
raise NotImplementedError("only zero-dimensional systems supported (finite number of solutions)")
gens = f.gens
gen = gens[-1]
zeros = roots(f.ltrim(gen)).keys()
if not zeros:
return []
if len(basis) == 1:
return [ (zero,) for zero in zeros ]
solutions = []
for zero in zeros:
new_system = []
new_gens = gens[:-1]
for b in basis[:-1]:
eq = subs_root(b, gen, zero)
if eq is not S.Zero:
new_system.append(eq)
for solution in solve_reduced_system(new_system, new_gens):
solutions.append(solution + (zero,))
return solutions
result = solve_reduced_system(polys, opt.gens, entry=True)
if result is not None:
return sorted(result)
else:
return None
def solve_triangulated(polys, *gens, **args):
"""
Solve a polynomial system using Gianni-Kalkbrenner algorithm.
The algorithm proceeds by computing one Groebner basis in the ground
domain and then by iteratively computing polynomial factorizations in
appropriately constructed algebraic extensions of the ground domain.
Example
=======
>>> from sympy.solvers.polysys import solve_triangulated
>>> from sympy.abc import x, y, z
>>> F = [x**2 + y + z - 1, x + y**2 + z - 1, x + y + z**2 - 1]
>>> solve_triangulated(F, x, y, z)
[(0, 0, 1), (0, 1, 0), (1, 0, 0)]
References
==========
.. [Gianni89] Patrizia Gianni, Teo Mora, Algebraic Solution of System of
Polynomial Equations using Groebner Bases, AAECC-5 on Applied Algebra,
Algebraic Algorithms and Error-Correcting Codes, LNCS 356 247--257, 1989
"""
G = groebner(polys, gens, polys=True)
G = list(reversed(G))
domain = args.get('domain')
if domain is not None:
for i, g in enumerate(G):
G[i] = g.set_domain(domain)
f, G = G[0].ltrim(-1), G[1:]
dom = f.get_domain()
zeros = f.ground_roots()
solutions = set([])
for zero in zeros:
solutions.add(((zero,), dom))
var_seq = reversed(gens[:-1])
vars_seq = postfixes(gens[1:])
for var, vars in zip(var_seq, vars_seq):
_solutions = set([])
for values, dom in solutions:
H, mapping = [], zip(vars, values)
for g in G:
_vars = (var,) + vars
if g.has_only_gens(*_vars) and g.degree(var) != 0:
h = g.ltrim(var).eval(mapping)
if g.degree(var) == h.degree():
H.append(h)
p = minkey(H, key=lambda h: h.degree())
zeros = p.ground_roots()
for zero in zeros:
if not zero.is_Rational:
dom_zero = dom.algebraic_field(zero)
else:
dom_zero = dom
_solutions.add(((zero,) + values, dom_zero))
solutions = _solutions
solutions = list(solutions)
for i, (solution, _) in enumerate(solutions):
solutions[i] = solution
return sorted(solutions)
| |
# Gambit scripts
#
# Copyright (C) USC Information Sciences Institute
# Author: Nibir Bora <nbora@usc.edu>
# URL: <http://cbg.isi.edu/>
# For license information, see LICENSE
import os
import re
import sys
import csv
import math
import nltk
#import time
import numpy
import getopt
import random
import string
import anyjson
import datetime
import psycopg2
import itertools
import cPickle as pickle
from pprint import pprint
from pymining import itemmining
from multiprocessing import Pool
from nltk.corpus import stopwords
from nltk.probability import FreqDist
from datetime import datetime, timedelta
from pytagcloud.colors import COLOR_SCHEMES
from pytagcloud import create_tag_image, make_tags
from pytagcloud.lang.counter import get_tag_counts
import settings as my
from lib import onlineldavb
sys.path.insert(0, os.path.abspath('..'))
#
# PREP INPUT
#
def prep_input():
''''''
print '\n', my.TS_START, my.TS_WINDOW, '\n'
SQL = '''SELECT text\
FROM {rel_tweet} \
WHERE timestamp BETWEEN '{ts_start}'
AND timestamp '{ts_start}' + INTERVAL '{window} days'
'''.format(rel_tweet=my.REL_TWEET,
ts_start=my.TS_START, window=my.TS_WINDOW)
print 'Querying DB...'
con = psycopg2.connect(my.DB_CONN_STRING)
cur = con.cursor()
cur.execute(SQL)
recs = cur.fetchall()
con.close()
print '{count} records retrieved.'.format(count=len(recs))
global sw
sw = stopwords.words('english')
sw.extend(my.STOPWORDS)
sw = list(set(sw))
global tokens
with open('data/' + my.DATA_FOLDER + 'tokens.json', 'rb') as fp:
tokens = sorted(anyjson.loads(fp.read()))
legend = dict((tokens.index(tok), tok) for tok in tokens)
pool = Pool(processes=my.PROCESSES)
tweets = pool.map(_trim_tweet, recs)
tweets = filter(None, tweets)
print '{count} tokenized tweets prepared.'.format(count=len(tweets))
path = 'data/' + my.DATA_FOLDER + 'lda/'
if not os.path.exists(path): os.makedirs(path)
open(path + 'lda_input' + '.dat', 'wb').close()
with open(path + 'lda_input' + '.dat', 'ab') as fp:
for tw in tweets:
fp.write(tw)
with open(path + 'lda_legend' + '.json', 'wb') as fp:
fp.write(anyjson.dumps(legend))
def _trim_tweet(rec):
'''Map function'''
text = rec[0].lower()
toks = nltk.word_tokenize(text)
toks = [t for t in toks \
if t not in sw \
]
#and t.isalpha()]
toks = tuple(set(toks) & set(tokens))
if toks:
tw = tuple(i for i in toks)
#tw = tuple(str(tokens.index(i)) for i in toks)
#tw = tuple(str(tokens.index(i))+':1' for i in toks)
s = ' '.join(tw) + '\n'
else:
s = None
return s
def run_lda():
''''''
K_ = range(my.TOPICS_START, my.TOPICS_END+1, my.TOPICS_STEP)
#for K in K_:
# _run_lda(K)
pool = Pool(processes=my.PROCESSES)
#pool = Pool(processes=2)
pool.map(_run_lda, K_)
def _run_lda(K=10):
'''Map function'''
path = 'data/' + my.DATA_FOLDER + 'lda/'
with open(path + 'lda_legend.json', 'rb') as fp:
legend = anyjson.loads(fp.read())
#vocab = legend.keys()
vocab = legend.values()
W = len(vocab)
with open(path + 'lda_input.dat', 'rb') as fp:
docset = fp.readlines()
D = len(docset)
print '\nRunning OLDA. Vocab size: {v}, Docs: {c}, K: {k}, D: {d}.'\
.format(v=W, c=len(docset), k=K, d=D)
olda = onlineldavb.OnlineLDA(vocab, K, D, 1./K, 1./K, 1024., 0.7)
i = 0
while i <= len(docset):
print 'K:', K, ', Batch:', i, i+my.BATCH_SIZE
(gamma, bound) = olda.update_lambda(docset[i:i+my.BATCH_SIZE])
i += my.BATCH_SIZE
(wordids, wordcts) = onlineldavb.parse_doc_list(docset, olda._vocab)
perwordbound = bound * len(docset) / (D * sum(map(sum, wordcts)))
print 'K = %f, rho_t = %f, held-out perplexity estimate = %f' % \
(K, olda._rhot, numpy.exp(-perwordbound))
path = path + 'models/' + str(K) + '/'
if not os.path.exists(path): os.makedirs(path)
numpy.savetxt(path + 'output_lambda.dat', olda._lambda)
numpy.savetxt(path + 'output_gamma.dat', gamma)
def print_topics():
''''''
path = 'data/' + my.DATA_FOLDER + 'lda/'
with open(path + 'lda_legend.json', 'rb') as fp:
legend = anyjson.loads(fp.read())
#vocab = legend.keys()
vocab = legend.values()
K_ = range(my.TOPICS_START, my.TOPICS_END+1, my.TOPICS_STEP)
for K in K_:
print '\n\nK =', K, '\n'
path = 'data/' + my.DATA_FOLDER + 'lda/' + 'models/' + str(K) + '/'
testlambda = numpy.loadtxt(path + 'output_lambda.dat')
for k in range(0, len(testlambda)):
lambdak = list(testlambda[k, :])
lambdak = lambdak / sum(lambdak)
temp = zip(lambdak, range(0, len(lambdak)))
temp = sorted(temp, key = lambda x: x[0], reverse=True)
#print 'topic %d:' % (k)
#for i in range(0, 53):
# print '%20s \t---\t %.4f' % (vocab[temp[i][1]], temp[i][0])
print 'topic %d:' % (k), '\t', [vocab[temp[i][1]] for i in range(10)]
def make_topics():
K_ = range(my.TOPICS_START, my.TOPICS_END+1, my.TOPICS_STEP)
for K in K_:
_make_topic(K)
def _make_topic(K=10):
''''''
print '\n', K, '\n'
path = 'data/' + my.DATA_FOLDER + 'lda/'
with open(path + 'lda_legend.json', 'rb') as fp:
legend = anyjson.loads(fp.read())
#vocab = legend.keys()
vocab = legend.values()
testlambda = numpy.loadtxt(path + 'models/' + str(K) + '/' \
+ 'output_lambda.dat')
tex_path = path + 'tex/'
if not os.path.exists(tex_path): os.makedirs(tex_path)
pdfs_path = path + 'pdfs/'
if not os.path.exists(pdfs_path): os.makedirs(pdfs_path)
tex_gfx_path = path + 'tex/gfx/word_clouds/' + str(K) + '/'
if not os.path.exists(tex_gfx_path): os.makedirs(tex_gfx_path)
word_path = path + 'word_clouds/' + str(K) + '/'
if not os.path.exists(word_path): os.makedirs(word_path)
for k in range(0, len(testlambda)):
lambdak = list(testlambda[k, :])
lambdak = lambdak / sum(lambdak)
temp = zip(lambdak, range(0, len(lambdak)))
temp = sorted(temp, key = lambda x: x[0], reverse=True)
words = [vocab[temp[i][1]] for i in range(10)]
weights = [temp[i][0] for i in range(10)]
fracs = [int(round(100 * w / sum(weights))) for w in weights]
print 'topic %d:' % (k), '\t', words
text = [(words[i], fracs[i]) for i in range(len(words))]
tags = make_tags(text, minsize=20, maxsize=50,
colors=COLOR_SCHEMES['oldschool'])
create_tag_image(tags, word_path + str(k) + '.png',
size=(500, 500),
layout=3)
create_tag_image(tags, tex_gfx_path + str(k) + '.png',
size=(500, 500),
layout=3)
_make_tex(filename=str(K) + '-topics',
title='June 30, 6:00am +24 hrs', n_topics=K)
def _make_tex(filename, title, n_topics):
head = """\documentclass[11pt,letterpaper]{report}
\usepackage[landscape,margin=0.5in]{geometry}
\usepackage{multicol}
\usepackage{nopageno}
\usepackage{graphicx}
\usepackage{pifont}
\usepackage{amsmath}
\setlength{\parindent}{0cm}
\setlength{\pdfpageheight}{60in}
\\newcommand{\subfolder}{word_clouds/%s}
\\begin{document}
\\textbf{NBA Drafts 2013 \\texttt{%s}. %s topics}
\\begin{centering}
""" % (n_topics, title, n_topics)
foot = """\end{centering}
\end{document}
"""
body = []
for i in range(n_topics):
body.append("""\\frame{\includegraphics[width=3.33in]{gfx/\subfolder/%s}}""" % i)
body = '\n'.join(body)
tex = '\n'.join([head, body, foot])
path = 'data/' + my.DATA_FOLDER + 'lda/tex/'
with open(path + filename + '.tex', 'wb') as fp:
fp.write(tex)
os.system('cd ' + path + '; pdflatex ' + filename)
pdf_path = 'data/' + my.DATA_FOLDER + 'lda/' + 'pdfs/'
os.system('cp ' + path + filename + '.pdf' + ' ' + pdf_path)
| |
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from selenium.webdriver.remote.webelement import WebElement
try:
str = unicode
except NameError:
pass
class ExecutingJavaScriptTests(unittest.TestCase):
def testShouldBeAbleToExecuteSimpleJavascriptAndReturnAString(self):
self._loadPage("xhtmlTest")
result = self.driver.execute_script("return document.title")
self.assertTrue(type(result) == str,
"The type of the result is %s" % type(result))
self.assertEqual("XHTML Test Page", result)
def testShouldBeAbleToExecuteSimpleJavascriptAndReturnAnInteger(self):
self._loadPage("nestedElements")
result = self.driver.execute_script("return document.getElementsByName('checky').length")
self.assertTrue(type(result) == int)
self.assertTrue(int(result) > 1)
#@Ignore(SELENESE)
def testShouldBeAbleToExecuteSimpleJavascriptAndReturnAWebElement(self):
self._loadPage("xhtmlTest")
result = self.driver.execute_script("return document.getElementById('id1')")
self.assertTrue(result is not None)
self.assertTrue(type(result) == WebElement)
self.assertEqual("a", result.tag_name.lower())
def testShouldBeAbleToExecuteSimpleJavascriptAndReturnABoolean(self):
self._loadPage("xhtmlTest")
result = self.driver.execute_script("return true")
self.assertTrue(result is not None)
self.assertTrue(type(result) == bool)
self.assertTrue(bool(result))
#@Ignore(SELENESE, IPHONE)
def testShouldBeAbleToExecuteSimpleJavascriptAndAStringsArray(self):
self._loadPage("javascriptPage")
expectedResult = []
expectedResult.append("zero")
expectedResult.append("one")
expectedResult.append("two")
result = self.driver.execute_script(
"return ['zero', 'one', 'two']")
self.assertEqual(expectedResult, result)
#@Ignore(SELENESE, IPHONE)
def testShouldBeAbleToExecuteSimpleJavascriptAndReturnAnArray(self):
self._loadPage("javascriptPage")
expectedResult = []
expectedResult.append("zero")
subList = []
subList.append(True)
subList.append(False)
expectedResult.append(subList)
result = self.driver.execute_script("return ['zero', [true, false]]")
self.assertTrue(result is not None)
self.assertTrue(type(result) == list)
self.assertTrue(expectedResult, result)
def testPassingAndReturningAnIntShouldReturnAWholeNumber(self):
self._loadPage("javascriptPage")
expectedResult = 1
result = self.driver.execute_script("return arguments[0]", expectedResult)
self.assertTrue((type(result) == int))
self.assertEqual(expectedResult, result)
def testPassingAndReturningADoubleShouldReturnADecimal(self):
self._loadPage("javascriptPage")
expectedResult = 1.2
result = self.driver.execute_script("return arguments[0]", expectedResult)
self.assertTrue( type(result) == float)
self.assertEqual(expectedResult, result)
def testShouldThrowAnExceptionWhenTheJavascriptIsBad(self):
self._loadPage("xhtmlTest")
try:
self.driver.execute_script("return squiggle()")
self.fail("Expected an exception")
except Exception as e:
pass
def testShouldBeAbleToCallFunctionsDefinedOnThePage(self):
self._loadPage("javascriptPage")
self.driver.execute_script("displayMessage('I like cheese')")
text = self.driver.find_element_by_id("result").text
self.assertEqual("I like cheese", text.strip())
def testShouldBeAbleToPassAStringAnAsArgument(self):
self._loadPage("javascriptPage")
value = self.driver.execute_script(
"return arguments[0] == 'fish' ? 'fish' : 'not fish'", "fish")
self.assertEqual("fish", value)
def testShouldBeAbleToPassABooleanAnAsArgument(self):
self._loadPage("javascriptPage")
value = bool(self.driver.execute_script("return arguments[0] == true", True))
self.assertTrue(value)
def testShouldBeAbleToPassANumberAnAsArgument(self):
self._loadPage("javascriptPage")
value = bool(self.driver.execute_script("return arguments[0] == 1 ? true : false", 1))
self.assertTrue(value)
def testShouldBeAbleToPassAWebElementAsArgument(self):
self._loadPage("javascriptPage")
button = self.driver.find_element_by_id("plainButton")
value = self.driver.execute_script(
"arguments[0]['flibble'] = arguments[0].getAttribute('id'); return arguments[0]['flibble']",
button)
self.assertEqual("plainButton", value)
def testShouldBeAbleToPassAnArrayAsArgument(self):
self._loadPage("javascriptPage")
array = ["zero", 1, True, 3.14159]
length = int(self.driver.execute_script("return arguments[0].length", array))
self.assertEqual(len(array), length)
def testShouldBeAbleToPassACollectionAsArgument(self):
self._loadPage("javascriptPage")
collection = []
collection.append("Cheddar")
collection.append("Brie")
collection.append(7)
length = int(self.driver.execute_script("return arguments[0].length", collection))
self.assertEqual(len(collection), length)
collection = []
collection.append("Gouda")
collection.append("Stilton")
collection.append("Stilton")
collection.append(True)
length = int(self.driver.execute_script("return arguments[0].length", collection))
self.assertEqual(len(collection), length)
def testShouldThrowAnExceptionIfAnArgumentIsNotValid(self):
self._loadPage("javascriptPage")
try:
self.driver.execute_script("return arguments[0]", driver)
self.fail("Exception should have been thrown")
except Exception as e:
pass
def testShouldBeAbleToPassInMoreThanOneArgument(self):
self._loadPage("javascriptPage")
result = self.driver.execute_script("return arguments[0] + arguments[1]", "one", "two")
self.assertEqual("onetwo", result)
def testJavascriptStringHandlingShouldWorkAsExpected(self):
self._loadPage("javascriptPage")
value = self.driver.execute_script("return ''")
self.assertEqual("", value)
value = self.driver.execute_script("return undefined")
self.assertTrue(value is None)
value = self.driver.execute_script("return ' '")
self.assertEqual(" ", value)
def testShouldBeAbleToCreateAPersistentValue(self):
self._loadPage("formPage")
self.driver.execute_script("document.alerts = []")
self.driver.execute_script("document.alerts.push('hello world')")
text = self.driver.execute_script("return document.alerts.shift()")
self.assertEqual("hello world", text)
def testCanPassADictionaryAsAParameter(self):
self._loadSimplePage()
nums = [1, 2]
args = {"bar": "test", "foo": nums}
res = self.driver.execute_script("return arguments[0]['foo'][1]", args);
self.assertEqual(2, res)
def testCanPassANone(self):
self._loadSimplePage()
res = self.driver.execute_script("return arguments[0] === null", None)
self.assertTrue(res)
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Windows can't run .sh files, so this is a Python implementation of
update.sh. This script should replace update.sh on all platforms eventually."""
import argparse
import contextlib
import cStringIO
import os
import re
import shutil
import subprocess
import stat
import sys
import tarfile
import time
import urllib2
import zipfile
# Do NOT CHANGE this if you don't know what you're doing -- see
# https://code.google.com/p/chromium/wiki/UpdatingClang
# Reverting problematic clang rolls is safe, though.
# Note: this revision is only used for Windows. Other platforms use update.sh.
# TODO(thakis): Use the same revision on Windows and non-Windows.
# TODO(thakis): Remove update.sh, use update.py everywhere.
LLVM_WIN_REVISION = '238562'
use_head_revision = 'LLVM_FORCE_HEAD_REVISION' in os.environ
if use_head_revision:
LLVM_WIN_REVISION = 'HEAD'
# This is incremented when pushing a new build of Clang at the same revision.
CLANG_SUB_REVISION=1
PACKAGE_VERSION = "%s-%s" % (LLVM_WIN_REVISION, CLANG_SUB_REVISION)
# Path constants. (All of these should be absolute paths.)
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
CHROMIUM_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '..', '..'))
THIRD_PARTY_DIR = os.path.join(CHROMIUM_DIR, 'third_party')
LLVM_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm')
LLVM_BOOTSTRAP_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm-bootstrap')
LLVM_BOOTSTRAP_INSTALL_DIR = os.path.join(THIRD_PARTY_DIR,
'llvm-bootstrap-install')
CHROME_TOOLS_SHIM_DIR = os.path.join(LLVM_DIR, 'tools', 'chrometools')
LLVM_BUILD_DIR = os.path.join(CHROMIUM_DIR, 'third_party', 'llvm-build',
'Release+Asserts')
COMPILER_RT_BUILD_DIR = os.path.join(LLVM_BUILD_DIR, '32bit-compiler-rt')
CLANG_DIR = os.path.join(LLVM_DIR, 'tools', 'clang')
LLD_DIR = os.path.join(LLVM_DIR, 'tools', 'lld')
COMPILER_RT_DIR = os.path.join(LLVM_DIR, 'projects', 'compiler-rt')
LLVM_BUILD_TOOLS_DIR = os.path.abspath(
os.path.join(LLVM_DIR, '..', 'llvm-build-tools'))
STAMP_FILE = os.path.join(LLVM_DIR, '..', 'llvm-build', 'cr_build_revision')
BINUTILS_DIR = os.path.join(THIRD_PARTY_DIR, 'binutils')
VERSION = '3.7.0'
# URL for pre-built binaries.
CDS_URL = 'https://commondatastorage.googleapis.com/chromium-browser-clang'
LLVM_REPO_URL='https://llvm.org/svn/llvm-project'
if 'LLVM_REPO_URL' in os.environ:
LLVM_REPO_URL = os.environ['LLVM_REPO_URL']
def DownloadUrl(url, output_file):
"""Download url into output_file."""
CHUNK_SIZE = 4096
TOTAL_DOTS = 10
sys.stdout.write('Downloading %s ' % url)
sys.stdout.flush()
response = urllib2.urlopen(url)
total_size = int(response.info().getheader('Content-Length').strip())
bytes_done = 0
dots_printed = 0
while True:
chunk = response.read(CHUNK_SIZE)
if not chunk:
break
output_file.write(chunk)
bytes_done += len(chunk)
num_dots = TOTAL_DOTS * bytes_done / total_size
sys.stdout.write('.' * (num_dots - dots_printed))
sys.stdout.flush()
dots_printed = num_dots
print ' Done.'
def ReadStampFile():
"""Return the contents of the stamp file, or '' if it doesn't exist."""
try:
with open(STAMP_FILE, 'r') as f:
return f.read();
except IOError:
return ''
def WriteStampFile(s):
"""Write s to the stamp file."""
if not os.path.exists(LLVM_BUILD_DIR):
os.makedirs(LLVM_BUILD_DIR)
with open(STAMP_FILE, 'w') as f:
f.write(s)
def GetSvnRevision(svn_repo):
"""Returns current revision of the svn repo at svn_repo."""
svn_info = subprocess.check_output(['svn', 'info', svn_repo], shell=True)
m = re.search(r'Revision: (\d+)', svn_info)
return m.group(1)
def RmTree(dir):
"""Delete dir."""
def ChmodAndRetry(func, path, _):
# Subversion can leave read-only files around.
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
return func(path)
raise
shutil.rmtree(dir, onerror=ChmodAndRetry)
def RunCommand(command, fail_hard=True):
"""Run command and return success (True) or failure; or if fail_hard is
True, exit on failure."""
print 'Running %s' % (str(command))
if subprocess.call(command, shell=True) == 0:
return True
print 'Failed.'
if fail_hard:
sys.exit(1)
return False
def CopyFile(src, dst):
"""Copy a file from src to dst."""
shutil.copy(src, dst)
print "Copying %s to %s" % (src, dst)
def CopyDirectoryContents(src, dst, filename_filter=None):
"""Copy the files from directory src to dst
with an optional filename filter."""
if not os.path.exists(dst):
os.makedirs(dst)
for root, _, files in os.walk(src):
for f in files:
if filename_filter and not re.match(filename_filter, f):
continue
CopyFile(os.path.join(root, f), dst)
def Checkout(name, url, dir):
"""Checkout the SVN module at url into dir. Use name for the log message."""
print "Checking out %s r%s into '%s'" % (name, LLVM_WIN_REVISION, dir)
command = ['svn', 'checkout', '--force', url + '@' + LLVM_WIN_REVISION, dir]
if RunCommand(command, fail_hard=False):
return
if os.path.isdir(dir):
print "Removing %s." % (dir)
RmTree(dir)
print "Retrying."
RunCommand(command)
def DeleteChromeToolsShim():
shutil.rmtree(CHROME_TOOLS_SHIM_DIR, ignore_errors=True)
def CreateChromeToolsShim():
"""Hooks the Chrome tools into the LLVM build.
Several Chrome tools have dependencies on LLVM/Clang libraries. The LLVM build
detects implicit tools in the tools subdirectory, so this helper install a
shim CMakeLists.txt that forwards to the real directory for the Chrome tools.
Note that the shim directory name intentionally has no - or _. The implicit
tool detection logic munges them in a weird way."""
assert not any(i in os.path.basename(CHROME_TOOLS_SHIM_DIR) for i in '-_')
os.mkdir(CHROME_TOOLS_SHIM_DIR)
with file(os.path.join(CHROME_TOOLS_SHIM_DIR, 'CMakeLists.txt'), 'w') as f:
f.write('# Automatically generated by tools/clang/scripts/update.py. ' +
'Do not edit.\n')
f.write('# Since tools/clang is located in another directory, use the \n')
f.write('# two arg version to specify where build artifacts go. CMake\n')
f.write('# disallows reuse of the same binary dir for multiple source\n')
f.write('# dirs, so the build artifacts need to go into a subdirectory.\n')
f.write('# dirs, so the build artifacts need to go into a subdirectory.\n')
f.write('if (CHROMIUM_TOOLS_SRC)\n')
f.write(' add_subdirectory(${CHROMIUM_TOOLS_SRC} ' +
'${CMAKE_CURRENT_BINARY_DIR}/a)\n')
f.write('endif (CHROMIUM_TOOLS_SRC)\n')
def AddCMakeToPath():
"""Download CMake and add it to PATH."""
if sys.platform == 'win32':
zip_name = 'cmake-3.2.2-win32-x86.zip'
cmake_dir = os.path.join(LLVM_BUILD_TOOLS_DIR,
'cmake-3.2.2-win32-x86', 'bin')
else:
suffix = 'Darwin' if sys.platform == 'darwin' else 'Linux'
zip_name = 'cmake310_%s.tgz' % suffix
cmake_dir = os.path.join(LLVM_BUILD_TOOLS_DIR, 'cmake310', 'bin')
if not os.path.exists(cmake_dir):
if not os.path.exists(LLVM_BUILD_TOOLS_DIR):
os.makedirs(LLVM_BUILD_TOOLS_DIR)
# The cmake archive is smaller than 20 MB, small enough to keep in memory:
with contextlib.closing(cStringIO.StringIO()) as f:
DownloadUrl(CDS_URL + '/tools/' + zip_name, f)
f.seek(0)
if zip_name.endswith('.zip'):
zipfile.ZipFile(f).extractall(path=LLVM_BUILD_TOOLS_DIR)
else:
tarfile.open(mode='r:gz', fileobj=f).extractall(path=LLVM_BUILD_DIR)
os.environ['PATH'] = cmake_dir + os.pathsep + os.environ.get('PATH', '')
vs_version = None
def GetVSVersion():
global vs_version
if vs_version:
return vs_version
# Try using the toolchain in depot_tools.
# This sets environment variables used by SelectVisualStudioVersion below.
sys.path.append(os.path.join(CHROMIUM_DIR, 'build'))
import vs_toolchain
vs_toolchain.SetEnvironmentAndGetRuntimeDllDirs()
# Use gyp to find the MSVS installation, either in depot_tools as per above,
# or a system-wide installation otherwise.
sys.path.append(os.path.join(CHROMIUM_DIR, 'tools', 'gyp', 'pylib'))
import gyp.MSVSVersion
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion('2013')
return vs_version
def UpdateClang(args):
print 'Updating Clang to %s...' % PACKAGE_VERSION
if ReadStampFile() == PACKAGE_VERSION:
print 'Already up to date.'
return 0
# Reset the stamp file in case the build is unsuccessful.
WriteStampFile('')
if not args.force_local_build:
cds_file = "clang-%s.tgz" % PACKAGE_VERSION
cds_full_url = CDS_URL + '/Win/' + cds_file
# Check if there's a prebuilt binary and if so just fetch that. That's
# faster, and goma relies on having matching binary hashes on client and
# server too.
print 'Trying to download prebuilt clang'
# clang packages are smaller than 50 MB, small enough to keep in memory.
with contextlib.closing(cStringIO.StringIO()) as f:
try:
DownloadUrl(cds_full_url, f)
f.seek(0)
tarfile.open(mode='r:gz', fileobj=f).extractall(path=LLVM_BUILD_DIR)
print 'clang %s unpacked' % PACKAGE_VERSION
WriteStampFile(PACKAGE_VERSION)
return 0
except urllib2.HTTPError:
print 'Did not find prebuilt clang %s, building locally' % cds_file
AddCMakeToPath()
DeleteChromeToolsShim();
Checkout('LLVM', LLVM_REPO_URL + '/llvm/trunk', LLVM_DIR)
Checkout('Clang', LLVM_REPO_URL + '/cfe/trunk', CLANG_DIR)
Checkout('LLD', LLVM_REPO_URL + '/lld/trunk', LLD_DIR)
Checkout('compiler-rt', LLVM_REPO_URL + '/compiler-rt/trunk', COMPILER_RT_DIR)
CreateChromeToolsShim();
# If building at head, define a macro that plugins can use for #ifdefing
# out code that builds at head, but not at CLANG_REVISION or vice versa.
cflags = cxxflags = ''
# If building at head, define a macro that plugins can use for #ifdefing
# out code that builds at head, but not at LLVM_WIN_REVISION or vice versa.
if use_head_revision:
cflags += ' -DLLVM_FORCE_HEAD_REVISION'
cxxflags += ' -DLLVM_FORCE_HEAD_REVISION'
base_cmake_args = ['-GNinja',
'-DCMAKE_BUILD_TYPE=Release',
'-DLLVM_ENABLE_ASSERTIONS=ON',
'-DLLVM_ENABLE_THREADS=OFF',
]
cc, cxx = None, None
if args.bootstrap:
print 'Building bootstrap compiler'
if not os.path.exists(LLVM_BOOTSTRAP_DIR):
os.makedirs(LLVM_BOOTSTRAP_DIR)
os.chdir(LLVM_BOOTSTRAP_DIR)
bootstrap_args = base_cmake_args + [
'-DLLVM_TARGETS_TO_BUILD=host',
'-DCMAKE_INSTALL_PREFIX=' + LLVM_BOOTSTRAP_INSTALL_DIR,
'-DCMAKE_C_FLAGS=' + cflags,
'-DCMAKE_CXX_FLAGS=' + cxxflags,
]
if cc is not None: bootstrap_args.append('-DCMAKE_C_COMPILER=' + cc)
if cxx is not None: bootstrap_args.append('-DCMAKE_CXX_COMPILER=' + cxx)
RunCommand(GetVSVersion().SetupScript('x64') +
['&&', 'cmake'] + bootstrap_args + [LLVM_DIR])
RunCommand(GetVSVersion().SetupScript('x64') + ['&&', 'ninja'])
if args.run_tests:
RunCommand(GetVSVersion().SetupScript('x64') +
['&&', 'ninja', 'check-all'])
RunCommand(GetVSVersion().SetupScript('x64') + ['&&', 'ninja', 'install'])
# TODO(thakis): Set these to clang / clang++ on posix once this script
# is used on posix.
cc = os.path.join(LLVM_BOOTSTRAP_INSTALL_DIR, 'bin', 'clang-cl.exe')
cxx = os.path.join(LLVM_BOOTSTRAP_INSTALL_DIR, 'bin', 'clang-cl.exe')
# CMake has a hard time with backslashes in compiler paths:
# https://stackoverflow.com/questions/13050827
cc = cc.replace('\\', '/')
cxx = cxx.replace('\\', '/')
print 'Building final compiler'
# Build clang.
binutils_incdir = ''
if sys.platform.startswith('linux'):
binutils_incdir = os.path.join(BINUTILS_DIR, 'Linux_x64/Release/include')
cmake_args = base_cmake_args + [
'-DLLVM_BINUTILS_INCDIR=' + binutils_incdir,
'-DCMAKE_C_FLAGS=' + cflags,
'-DCMAKE_CXX_FLAGS=' + cxxflags,
'-DCHROMIUM_TOOLS_SRC=%s' % os.path.join(CHROMIUM_DIR, 'tools', 'clang'),
'-DCHROMIUM_TOOLS=%s' % ';'.join(args.tools)]
# TODO(thakis): Append this to base_cmake_args instead once compiler-rt
# can build with clang-cl (http://llvm.org/PR23698)
if cc is not None: cmake_args.append('-DCMAKE_C_COMPILER=' + cc)
if cxx is not None: cmake_args.append('-DCMAKE_CXX_COMPILER=' + cxx)
if not os.path.exists(LLVM_BUILD_DIR):
os.makedirs(LLVM_BUILD_DIR)
os.chdir(LLVM_BUILD_DIR)
RunCommand(GetVSVersion().SetupScript('x64') +
['&&', 'cmake'] + cmake_args + [LLVM_DIR])
RunCommand(GetVSVersion().SetupScript('x64') + ['&&', 'ninja', 'all'])
# Do an x86 build of compiler-rt to get the 32-bit ASan run-time.
# TODO(hans): Remove once the regular build above produces this.
if not os.path.exists(COMPILER_RT_BUILD_DIR):
os.makedirs(COMPILER_RT_BUILD_DIR)
os.chdir(COMPILER_RT_BUILD_DIR)
# TODO(thakis): Add this once compiler-rt can build with clang-cl (see
# above).
#if args.bootstrap:
# The bootstrap compiler produces 64-bit binaries by default.
#cflags += ' -m32'
#cxxflags += ' -m32'
compiler_rt_args = base_cmake_args + [
'-DCMAKE_C_FLAGS=' + cflags,
'-DCMAKE_CXX_FLAGS=' + cxxflags]
RunCommand(GetVSVersion().SetupScript('x86') +
['&&', 'cmake'] + compiler_rt_args + [LLVM_DIR])
RunCommand(GetVSVersion().SetupScript('x86') + ['&&', 'ninja', 'compiler-rt'])
# TODO(hans): Make this (and the .gypi and .isolate files) version number
# independent.
asan_rt_lib_src_dir = os.path.join(COMPILER_RT_BUILD_DIR, 'lib', 'clang',
VERSION, 'lib', 'windows')
asan_rt_lib_dst_dir = os.path.join(LLVM_BUILD_DIR, 'lib', 'clang',
VERSION, 'lib', 'windows')
CopyDirectoryContents(asan_rt_lib_src_dir, asan_rt_lib_dst_dir,
r'^.*-i386\.lib$')
CopyDirectoryContents(asan_rt_lib_src_dir, asan_rt_lib_dst_dir,
r'^.*-i386\.dll$')
CopyFile(os.path.join(asan_rt_lib_src_dir, '..', '..', 'asan_blacklist.txt'),
os.path.join(asan_rt_lib_dst_dir, '..', '..'))
# Make an extra copy of the sanitizer headers, to be put on the include path
# of the fallback compiler.
sanitizer_include_dir = os.path.join(LLVM_BUILD_DIR, 'lib', 'clang', VERSION,
'include', 'sanitizer')
aux_sanitizer_include_dir = os.path.join(LLVM_BUILD_DIR, 'lib', 'clang',
VERSION, 'include_sanitizer',
'sanitizer')
if not os.path.exists(aux_sanitizer_include_dir):
os.makedirs(aux_sanitizer_include_dir)
for _, _, files in os.walk(sanitizer_include_dir):
for f in files:
CopyFile(os.path.join(sanitizer_include_dir, f),
aux_sanitizer_include_dir)
# Run tests.
if args.run_tests or use_head_revision:
os.chdir(LLVM_BUILD_DIR)
RunCommand(GetVSVersion().SetupScript('x64') +
['&&', 'ninja', 'cr-check-all'])
if args.run_tests:
os.chdir(LLVM_BUILD_DIR)
RunCommand(GetVSVersion().SetupScript('x64') +
['&&', 'ninja', 'check-all'])
WriteStampFile(PACKAGE_VERSION)
print 'Clang update was successful.'
return 0
def main():
if not sys.platform in ['win32', 'cygwin']:
# For non-Windows, fall back to update.sh.
# TODO(hans): Make update.py replace update.sh completely.
# This script is called by gclient. gclient opens its hooks subprocesses
# with (stdout=subprocess.PIPE, stderr=subprocess.STDOUT) and then does
# custom output processing that breaks printing '\r' characters for
# single-line updating status messages as printed by curl and wget.
# Work around this by setting stderr of the update.sh process to stdin (!):
# gclient doesn't redirect stdin, and while stdin itself is read-only, a
# dup()ed sys.stdin is writable, try
# fd2 = os.dup(sys.stdin.fileno()); os.write(fd2, 'hi')
# TODO: Fix gclient instead, http://crbug.com/95350
if '--no-stdin-hack' in sys.argv:
sys.argv.remove('--no-stdin-hack')
stderr = None
else:
try:
stderr = os.fdopen(os.dup(sys.stdin.fileno()))
except:
stderr = sys.stderr
return subprocess.call(
[os.path.join(os.path.dirname(__file__), 'update.sh')] + sys.argv[1:],
stderr=stderr)
parser = argparse.ArgumentParser(description='Build Clang.')
parser.add_argument('--bootstrap', action='store_true',
help='first build clang with CC, then with itself.')
parser.add_argument('--if-needed', action='store_true',
help="run only if the script thinks clang is needed")
parser.add_argument('--force-local-build', action='store_true',
help="don't try to download prebuild binaries")
parser.add_argument('--print-revision', action='store_true',
help='print current clang revision and exit.')
parser.add_argument('--run-tests', action='store_true',
help='run tests after building; only for local builds')
parser.add_argument('--tools', nargs='*',
help='select which chrome tools to build',
default=['plugins', 'blink_gc_plugin'])
# For now, these flags are only used for the non-Windows flow, but argparser
# gets mad if it sees a flag it doesn't recognize.
parser.add_argument('--no-stdin-hack', action='store_true')
args = parser.parse_args()
if re.search(r'\b(make_clang_dir)=', os.environ.get('GYP_DEFINES', '')):
print 'Skipping Clang update (make_clang_dir= was set in GYP_DEFINES).'
return 0
if args.if_needed:
is_clang_required = False
# clang is always used on Mac and Linux.
if sys.platform == 'darwin' or sys.platform.startswith('linux'):
is_clang_required = True
# clang requested via $GYP_DEFINES.
if re.search(r'\b(clang|asan|lsan|msan|tsan)=1',
os.environ.get('GYP_DEFINES', '')):
is_clang_required = True
# clang previously downloaded, keep it up-to-date.
# If you don't want this, delete third_party/llvm-build on your machine.
if os.path.isdir(LLVM_BUILD_DIR):
is_clang_required = True
if not is_clang_required:
return 0
global LLVM_WIN_REVISION, PACKAGE_VERSION
if args.print_revision:
if use_head_revision:
print GetSvnRevision(LLVM_DIR)
else:
print PACKAGE_VERSION
return 0
if LLVM_WIN_REVISION == 'HEAD':
# Use a real revision number rather than HEAD to make sure that the stamp
# file logic works.
LLVM_WIN_REVISION = GetSvnRevision(LLVM_REPO_URL)
PACKAGE_VERSION = LLVM_WIN_REVISION + '-0'
return UpdateClang(args)
if __name__ == '__main__':
sys.exit(main())
| |
import os
import shutil
import tempfile
from mock import patch
from mock import Mock
from zope.interface import implements
from twisted.trial import unittest
from twisted.test import proto_helpers
from twisted.internet import defer, error, task, tcp
from twisted.internet.endpoints import TCP4ServerEndpoint
from twisted.internet.endpoints import serverFromString
from twisted.internet.endpoints import clientFromString
from twisted.python.failure import Failure
from twisted.internet.error import ConnectionRefusedError
from twisted.internet.interfaces import IReactorCore
from twisted.internet.interfaces import IProtocolFactory
from twisted.internet.interfaces import IProtocol
from twisted.internet.interfaces import IReactorTCP
from twisted.internet.interfaces import IListeningPort
from twisted.internet.interfaces import IAddress
from txtorcon import TorControlProtocol
from txtorcon import ITorControlProtocol
from txtorcon import TorConfig
from txtorcon import launch_tor
from txtorcon import TCPHiddenServiceEndpoint
from txtorcon import TorClientEndpoint
from txtorcon import TorNotFound
from txtorcon import TCPHiddenServiceEndpointParser
from txtorcon import IProgressProvider
from txtorcon import TorOnionAddress
from txtorcon.util import NoOpProtocolFactory
from txtorcon.endpoints import get_global_tor # FIXME
from txtorcon.endpoints import default_tcp4_endpoint_generator
import util
connectionRefusedFailure = Failure(ConnectionRefusedError())
class EndpointTests(unittest.TestCase):
def setUp(self):
from txtorcon import endpoints
endpoints._global_tor_config = None
del endpoints._global_tor_lock
endpoints._global_tor_lock = defer.DeferredLock()
self.reactor = FakeReactorTcp(self)
self.protocol = FakeControlProtocol([])
self.protocol.event_happened('INFO', 'something craaaaaaazy')
self.protocol.event_happened(
'INFO',
'connection_dir_client_reached_eof(): Uploaded rendezvous '
'descriptor (status 200 ("Service descriptor (v2) stored"))'
)
self.config = TorConfig(self.protocol)
self.protocol.answers.append(
'config/names=\nHiddenServiceOptions Virtual'
)
self.protocol.answers.append('HiddenServiceOptions')
self.patcher = patch(
'txtorcon.torconfig.find_tor_binary',
return_value='/not/tor'
)
self.patcher.start()
def tearDown(self):
from txtorcon import endpoints
endpoints._global_tor_config = None
del endpoints._global_tor_lock
endpoints._global_tor_lock = defer.DeferredLock()
self.patcher.stop()
@defer.inlineCallbacks
def test_global_tor(self):
config = yield get_global_tor(
Mock(),
_tor_launcher=lambda x, y, z: True
)
self.assertEqual(0, config.SOCKSPort)
@defer.inlineCallbacks
def test_global_tor_error(self):
config0 = yield get_global_tor(
Mock(),
_tor_launcher=lambda x, y, z: True
)
# now if we specify a control_port it should be an error since
# the above should have launched one.
try:
config1 = yield get_global_tor(Mock(), control_port=111,
_tor_launcher=lambda x, y, z: True)
self.fail()
except RuntimeError as e:
# should be an error
pass
@defer.inlineCallbacks
def test_endpoint_properties(self):
ep = yield TCPHiddenServiceEndpoint.private_tor(Mock(), 80)
self.assertEqual(None, ep.onion_private_key)
self.assertEqual(None, ep.onion_uri)
ep.hiddenservice = Mock()
ep.hiddenservice.private_key = 'mumble'
self.assertEqual('mumble', ep.onion_private_key)
@defer.inlineCallbacks
def test_private_tor(self):
m = Mock()
from txtorcon import endpoints
endpoints.launch_tor = m
ep = yield TCPHiddenServiceEndpoint.private_tor(Mock(), 80,
control_port=1234)
self.assertTrue(m.called)
@defer.inlineCallbacks
def test_private_tor_no_control_port(self):
m = Mock()
from txtorcon import endpoints
endpoints.launch_tor = m
ep = yield TCPHiddenServiceEndpoint.private_tor(Mock(), 80)
self.assertTrue(m.called)
@defer.inlineCallbacks
def test_system_tor(self):
from test_torconfig import FakeControlProtocol
def boom(*args):
# why does the new_callable thing need a callable that
# returns a callable? Feels like I must be doing something
# wrong somewhere...
def bam(*args, **kw):
return self.protocol
return bam
with patch('txtorcon.endpoints.launch_tor') as launch_mock:
with patch('txtorcon.endpoints.build_tor_connection', new_callable=boom) as btc:
client = clientFromString(
self.reactor,
"tcp:host=localhost:port=9050"
)
ep = yield TCPHiddenServiceEndpoint.system_tor(self.reactor,
client, 80)
port = yield ep.listen(NoOpProtocolFactory())
toa = port.getHost()
self.assertTrue(hasattr(toa, 'onion_uri'))
self.assertTrue(hasattr(toa, 'onion_port'))
port.startListening()
str(port)
port.tor_config
# system_tor should be connecting to a running one,
# *not* launching a new one.
self.assertFalse(launch_mock.called)
@defer.inlineCallbacks
def test_basic(self):
listen = RuntimeError("listen")
connect = RuntimeError("connect")
reactor = proto_helpers.RaisingMemoryReactor(listen, connect)
reactor.addSystemEventTrigger = Mock()
ep = TCPHiddenServiceEndpoint(reactor, self.config, 123)
self.config.bootstrap()
yield self.config.post_bootstrap
self.assertTrue(IProgressProvider.providedBy(ep))
try:
port = yield ep.listen(NoOpProtocolFactory())
self.fail("Should have been an exception")
except RuntimeError as e:
# make sure we called listenTCP not connectTCP
self.assertEqual(e, listen)
repr(self.config.HiddenServices)
def test_progress_updates(self):
config = TorConfig()
ep = TCPHiddenServiceEndpoint(self.reactor, config, 123)
self.assertTrue(IProgressProvider.providedBy(ep))
prog = IProgressProvider(ep)
ding = Mock()
prog.add_progress_listener(ding)
args = (50, "blarg", "Doing that thing we talked about.")
# kind-of cheating, test-wise?
ep._tor_progress_update(*args)
self.assertTrue(ding.called_with(*args))
@patch('txtorcon.endpoints.launch_tor')
def test_progress_updates_private_tor(self, tor):
ep = TCPHiddenServiceEndpoint.private_tor(self.reactor, 1234)
tor.call_args[1]['progress_updates'](40, 'FOO', 'foo to the bar')
return ep
def __test_progress_updates_system_tor(self):
ep = TCPHiddenServiceEndpoint.system_tor(self.reactor, 1234)
ep._tor_progress_update(40, "FOO", "foo to bar")
return ep
@patch('txtorcon.endpoints.get_global_tor')
def test_progress_updates_global_tor(self, tor):
ep = TCPHiddenServiceEndpoint.global_tor(self.reactor, 1234)
tor.call_args[1]['progress_updates'](40, 'FOO', 'foo to the bar')
return ep
def test_hiddenservice_key_unfound(self):
ep = TCPHiddenServiceEndpoint.private_tor(
self.reactor,
1234,
hidden_service_dir='/dev/null'
)
# FIXME Mock() should work somehow for this, but I couldn't
# make it "go"
class Blam(object):
@property
def private_key(self):
raise IOError("blam")
ep.hiddenservice = Blam()
self.assertEqual(ep.onion_private_key, None)
return ep
def test_multiple_listen(self):
ep = TCPHiddenServiceEndpoint(self.reactor, self.config, 123)
d0 = ep.listen(NoOpProtocolFactory())
@defer.inlineCallbacks
def more_listen(arg):
yield arg.stopListening()
d1 = ep.listen(NoOpProtocolFactory())
def foo(arg):
return arg
d1.addBoth(foo)
defer.returnValue(arg)
return
d0.addBoth(more_listen)
self.config.bootstrap()
def check(arg):
self.assertEqual('127.0.0.1', ep.tcp_endpoint._interface)
self.assertEqual(len(self.config.HiddenServices), 1)
d0.addCallback(check).addErrback(self.fail)
return d0
def test_already_bootstrapped(self):
self.config.bootstrap()
ep = TCPHiddenServiceEndpoint(self.reactor, self.config, 123)
d = ep.listen(NoOpProtocolFactory())
return d
@defer.inlineCallbacks
def test_explicit_data_dir(self):
config = TorConfig(self.protocol)
ep = TCPHiddenServiceEndpoint(self.reactor, config, 123, '/dev/null')
# make sure listen() correctly configures our hidden-serivce
# with the explicit directory we passed in above
d = ep.listen(NoOpProtocolFactory())
def foo(fail):
print "ERROR", fail
d.addErrback(foo)
port = yield d
self.assertEqual(1, len(config.HiddenServices))
self.assertEqual(config.HiddenServices[0].dir, '/dev/null')
def test_failure(self):
self.reactor.failures = 1
ep = TCPHiddenServiceEndpoint(self.reactor, self.config, 123)
d = ep.listen(NoOpProtocolFactory())
self.config.bootstrap()
d.addErrback(self.check_error)
return d
def check_error(self, failure):
self.assertEqual(failure.type, error.CannotListenError)
return None
def test_parse_via_plugin(self):
# make sure we have a valid thing from get_global_tor without
# actually launching tor
config = TorConfig()
config.post_bootstrap = defer.succeed(config)
from txtorcon import torconfig
torconfig._global_tor_config = None
get_global_tor(
self.reactor,
_tor_launcher=lambda react, config, prog: defer.succeed(config)
)
ep = serverFromString(
self.reactor,
'onion:88:localPort=1234:hiddenServiceDir=/foo/bar'
)
self.assertEqual(ep.public_port, 88)
self.assertEqual(ep.local_port, 1234)
self.assertEqual(ep.hidden_service_dir, '/foo/bar')
def test_parse_user_path(self):
# this makes sure we expand users and symlinks in
# hiddenServiceDir args. see Issue #77
# make sure we have a valid thing from get_global_tor without
# actually launching tor
config = TorConfig()
config.post_bootstrap = defer.succeed(config)
from txtorcon import torconfig
torconfig._global_tor_config = None
get_global_tor(
self.reactor,
_tor_launcher=lambda react, config, prog: defer.succeed(config)
)
ep = serverFromString(
self.reactor,
'onion:88:localPort=1234:hiddenServiceDir=~/blam/blarg'
)
# would be nice to have a fixed path here, but then would have
# to run as a known user :/
# maybe using the docker stuff to run integration tests better here?
self.assertEqual(
os.path.expanduser('~/blam/blarg'),
ep.hidden_service_dir
)
def test_parse_relative_path(self):
# this makes sure we convert a relative path to absolute
# hiddenServiceDir args. see Issue #77
# make sure we have a valid thing from get_global_tor without
# actually launching tor
config = TorConfig()
config.post_bootstrap = defer.succeed(config)
from txtorcon import torconfig
torconfig._global_tor_config = None
get_global_tor(
self.reactor,
_tor_launcher=lambda react, config, prog: defer.succeed(config)
)
orig = os.path.realpath('.')
try:
with util.TempDir() as t:
t = str(t)
os.chdir(t)
os.mkdir(os.path.join(t, 'foo'))
hsdir = os.path.join(t, 'foo', 'blam')
os.mkdir(hsdir)
ep = serverFromString(
self.reactor,
'onion:88:localPort=1234:hiddenServiceDir=foo/blam'
)
self.assertEqual(
os.path.realpath(hsdir),
ep.hidden_service_dir
)
finally:
os.chdir(orig)
class EndpointLaunchTests(unittest.TestCase):
def setUp(self):
self.reactor = FakeReactorTcp(self)
self.protocol = FakeControlProtocol([])
def test_onion_address(self):
addr = TorOnionAddress("foo.onion", 80)
# just want to run these and assure they don't throw
# exceptions.
repr(addr)
hash(addr)
def test_onion_parse_unix_socket(self):
r = Mock()
ep = serverFromString(r, "onion:80:controlPort=/tmp/foo")
@patch('txtorcon.TCPHiddenServiceEndpoint.system_tor')
@patch('txtorcon.TCPHiddenServiceEndpoint.global_tor')
@patch('txtorcon.TCPHiddenServiceEndpoint.private_tor')
@defer.inlineCallbacks
def test_endpoint_launch_tor(self, private_tor, global_tor, system_tor):
"""
we just want to confirm that calling listen results in the
spawning of a Tor process; the parsing/setup from string are
checked elsewhere.
"""
reactor = proto_helpers.MemoryReactor()
ep = serverFromString(reactor, 'onion:8888')
r = yield ep.listen(NoOpProtocolFactory())
self.assertEqual(global_tor.call_count, 1)
self.assertEqual(private_tor.call_count, 0)
self.assertEqual(system_tor.call_count, 0)
@patch('txtorcon.TCPHiddenServiceEndpoint.system_tor')
@patch('txtorcon.TCPHiddenServiceEndpoint.global_tor')
@patch('txtorcon.TCPHiddenServiceEndpoint.private_tor')
@defer.inlineCallbacks
def test_endpoint_connect_tor(self, private_tor, global_tor, system_tor):
"""
similar to above test, we're confirming that an
endpoint-string with 'controlPort=xxxx' in it calls the API
that will connect to a running Tor.
"""
reactor = proto_helpers.MemoryReactor()
ep = serverFromString(
reactor,
'onion:8888:controlPort=9055:localPort=1234'
)
r = yield ep.listen(NoOpProtocolFactory())
self.assertEqual(global_tor.call_count, 0)
self.assertEqual(private_tor.call_count, 0)
self.assertEqual(system_tor.call_count, 1)
# unfortunately, we don't add the hidden-service
# configurations until we've connected to the launched Tor
# and bootstrapped a TorConfig object -- and that's a ton
# of stuff to fake out. Most of that is covered by the
# parsing tests (i.e. are we getting the right config
# values from a server-endpoint-string)
# FIXME should probably go somewhere else, so other tests can easily use these.
class FakeProtocol(object):
implements(IProtocol)
def dataReceived(self, data):
print "DATA", data
def connectionLost(self, reason):
print "LOST", reason
def makeConnection(self, transport):
print "MAKE", transport
transport.protocol = self
def connectionMade(self):
print "MADE!"
class FakeAddress(object):
implements(IAddress)
compareAttributes = ('type', 'host', 'port')
type = 'fakeTCP'
def __init__(self, host, port):
self.host = host
self.port = port
def __repr__(self):
return '%s(%r, %d)' % (
self.__class__.__name__, self.host, self.port)
def __hash__(self):
return hash((self.type, self.host, self.port))
class FakeListeningPort(object):
implements(IListeningPort)
def __init__(self, port):
self.port = port
def startListening(self):
self.factory.doStart()
def stopListening(self):
self.factory.doStop()
def getHost(self):
return FakeAddress('host', self.port)
def port_generator():
for x in xrange(65535, 0, -1):
yield x
from test_torconfig import FakeReactor # FIXME put in util or something?
from test_torconfig import FakeProcessTransport # FIXME importing from other test sucks
from test_torconfig import FakeControlProtocol # FIXME
class FakeReactorTcp(FakeReactor):
implements(IReactorTCP)
failures = 0
_port_generator = port_generator()
def __init__(self, test):
self.protocol = TorControlProtocol()
self.protocol.connectionMade = lambda: None
self.transport = proto_helpers.StringTransport()
self.transport = FakeProcessTransport()
self.transport.protocol = self.protocol
def blam():
self.protocol.outReceived("Bootstrap")
self.transport.closeStdin = blam
self.protocol.makeConnection(self.transport)
FakeReactor.__init__(self, test, self.transport, lambda x: None)
def listenTCP(self, port, factory, **kwargs):
'''returns IListeningPort'''
if self.failures > 0:
self.failures -= 1
raise error.CannotListenError(None, None, None)
if port == 0:
port = self._port_generator.next()
p = FakeListeningPort(port)
p.factory = factory
p.startListening()
return p
def connectTCP(self, host, port, factory, timeout, bindAddress):
'''should return IConnector'''
r = tcp.Connector(
host, port, factory, timeout,
bindAddress, reactor=self
)
def blam(*args):
print "BLAAAAAM", args
r.connect = blam
return r
class FakeTorSocksEndpoint(object):
def __init__(self, *args, **kw):
self.host = args[1]
self.port = args[2]
self.transport = None
self.failure = kw.get('failure', None)
self.acceptPort = kw.get('acceptPort', None)
def connect(self, fac):
self.factory = fac
if self.acceptPort:
if self.port != self.acceptPort:
return defer.fail(self.failure)
else:
if self.failure:
return defer.fail(self.failure)
self.proto = fac.buildProtocol(None)
transport = proto_helpers.StringTransport()
self.proto.makeConnection(transport)
self.transport = transport
return defer.succeed(self.proto)
class TestTorClientEndpoint(unittest.TestCase):
def test_client_connection_failed(self):
"""
This test is equivalent to txsocksx's
TestSOCKS4ClientEndpoint.test_clientConnectionFailed
"""
def FailTorSocksEndpointGenerator(*args, **kw):
kw['failure'] = connectionRefusedFailure
return FakeTorSocksEndpoint(*args, **kw)
endpoint = TorClientEndpoint('', 0, _proxy_endpoint_generator=FailTorSocksEndpointGenerator)
d = endpoint.connect(None)
return self.assertFailure(d, ConnectionRefusedError)
def test_client_connection_failed_user_password(self):
"""
Same as above, but with a username/password.
"""
def FailTorSocksEndpointGenerator(*args, **kw):
kw['failure'] = connectionRefusedFailure
return FakeTorSocksEndpoint(*args, **kw)
endpoint = TorClientEndpoint(
'invalid host', 0,
socks_username='billy', socks_password='s333cure',
_proxy_endpoint_generator=FailTorSocksEndpointGenerator)
d = endpoint.connect(None)
return self.assertFailure(d, ConnectionRefusedError)
def test_default_generator(self):
# just ensuring the default generator doesn't blow updoesn't blow up
default_tcp4_endpoint_generator(None, 'foo.bar', 1234)
def test_no_host(self):
self.assertRaises(
ValueError,
TorClientEndpoint, None, None
)
def test_parser_basic(self):
ep = clientFromString(None, 'tor:host=timaq4ygg2iegci7.onion:port=80:socksPort=9050')
self.assertEqual(ep.host, 'timaq4ygg2iegci7.onion')
self.assertEqual(ep.port, 80)
self.assertEqual(ep.socks_port, 9050)
def test_parser_user_password(self):
epstring = 'tor:host=torproject.org:port=443' + \
':socksUsername=foo:socksPassword=bar'
ep = clientFromString(None, epstring)
self.assertEqual(ep.host, 'torproject.org')
self.assertEqual(ep.port, 443)
self.assertEqual(ep.socks_username, 'foo')
self.assertEqual(ep.socks_password, 'bar')
def test_default_factory(self):
"""
This test is equivalent to txsocksx's TestSOCKS5ClientEndpoint.test_defaultFactory
"""
def TorSocksEndpointGenerator(*args, **kw):
return FakeTorSocksEndpoint(*args, **kw)
endpoint = TorClientEndpoint('', 0, _proxy_endpoint_generator=TorSocksEndpointGenerator)
endpoint.connect(None)
self.assertEqual(endpoint.tor_socks_endpoint.transport.value(), '\x05\x01\x00')
def test_good_port_retry(self):
"""
This tests that our Tor client endpoint retry logic works correctly.
We create a proxy endpoint that fires a connectionRefusedFailure
unless the connecting port matches. We attempt to connect with the
proxy endpoint for each port that the Tor client endpoint will try.
"""
success_ports = TorClientEndpoint.socks_ports_to_try
for port in success_ports:
def TorSocksEndpointGenerator(*args, **kw):
kw['acceptPort'] = port
kw['failure'] = connectionRefusedFailure
return FakeTorSocksEndpoint(*args, **kw)
endpoint = TorClientEndpoint('', 0, _proxy_endpoint_generator=TorSocksEndpointGenerator)
endpoint.connect(None)
self.assertEqual(endpoint.tor_socks_endpoint.transport.value(), '\x05\x01\x00')
def test_bad_port_retry(self):
"""
This tests failure to connect to the ports on the "try" list.
"""
fail_ports = [1984, 666]
for port in fail_ports:
def TorSocksEndpointGenerator(*args, **kw):
kw['acceptPort'] = port
kw['failure'] = connectionRefusedFailure
return FakeTorSocksEndpoint(*args, **kw)
endpoint = TorClientEndpoint('', 0, _proxy_endpoint_generator=TorSocksEndpointGenerator)
d = endpoint.connect(None)
return self.assertFailure(d, ConnectionRefusedError)
def test_good_no_guess_socks_port(self):
"""
This tests that if a SOCKS port is specified, we *only* attempt to
connect to that SOCKS port.
"""
def TorSocksEndpointGenerator(*args, **kw):
kw['acceptPort'] = 6669
kw['failure'] = connectionRefusedFailure
return FakeTorSocksEndpoint(*args, **kw)
endpoint = TorClientEndpoint('', 0, _proxy_endpoint_generator=TorSocksEndpointGenerator, socks_port=6669)
endpoint.connect(None)
self.assertEqual(endpoint.tor_socks_endpoint.transport.value(), '\x05\x01\x00')
def test_bad_no_guess_socks_port(self):
"""
This tests that are connection fails if we try to connect to an unavailable
specified SOCKS port... even if there is a valid SOCKS port listening on
the socks_ports_to_try list.
"""
def TorSocksEndpointGenerator(*args, **kw):
kw['acceptPort'] = 9050
kw['failure'] = connectionRefusedFailure
return FakeTorSocksEndpoint(*args, **kw)
endpoint = TorClientEndpoint('', 0, _proxy_endpoint_generator=TorSocksEndpointGenerator, socks_port=6669)
d = endpoint.connect(None)
self.assertFailure(d, ConnectionRefusedError)
| |
#!/usr/bin/env python
"""
setup.py
========
This is a generic as possible setup.py template. The goal is to retrieve almost
all of the information from the main module file, rather than relying on values
explicitly entered here.
## Usage
This setup.py script needs to modified in the following ways:
- `MAIN_FILE` needs to be pointed at the main metadata file, this can be done
easily by modifyng the second arg.
- `setup` kwargs need to be modified:
- `classifiers` needs to be modified to suit your project.
- `keywords` needs to be modified to suit your project.
- If you have files that need to be included (such as `LICENSE`, you need to
create a MANIFEST.in file and `include FILENAME` them.
Other than that, all the metadata should live in your main file, just like
the example below.
## Metadata Example
The following should be placed in your project module's __init__.py file:
::
__author__ = "Ivan Busquets"
__author_email__ = "ivanbusquets@gmail.com"
__copyright__ = "Copyright 2011, Ivan Busquets"
__credits__ = ["Ivan Busquets", "Sean Wallitsch", ]
__license__ = "MIT"
__version__ = "1.2"
__maintainer__ = "Sean Wallitsch"
__maintainer_email__ = "sean@grenadehop.com"
__module_name__ = "animatedSnap3D"
__short_desc__ = "An extension to Nuke's 'snap' options for animated verts"
__status__ = "Development"
__url__ = 'http://github.com/ThoriumGroup/animatedSnap3D'
Note: At this time `credits` is unused.
"""
# ==============================================================================
# IMPORTS
# ==============================================================================
from setuptools import setup, find_packages
import codecs
import os
import re
# ==============================================================================
# GLOBALS
# ==============================================================================
HERE = os.path.abspath(os.path.dirname(__file__))
MAIN_FILE = os.path.join(HERE, 'animatedSnap3D', '__init__.py')
# Get the long description from the relevant file
with codecs.open('README.rst', encoding='utf-8') as readme_file:
LONG_DESCRIPTION = readme_file.read()
# ==============================================================================
# PRIVATE FUNCTIONS
# ==============================================================================
def _find_metadata(filepath):
"""Reads all the metadata from a source file by opening manually.
Why open and read it and not import?
https://groups.google.com/d/topic/pypa-dev/0PkjVpcxTzQ/discussion
Args:
filepath : (str)
Filepath to the file containing the metadata.
Returns:
{str: str}
Dictionary with metadata keys and values.
Raises:
RuntimeError
Cannot proceed if version or module_name not found
"""
# Open in Latin-1 so that we avoid encoding errors.
# Use codecs.open for Python 2 compatibility
with codecs.open(filepath, 'r', 'latin1') as meta_file:
metadata_file = meta_file.read()
metadata = {}
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
author_match = re.search(r"^__author__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
author_email_match = re.search(r"^__author_email__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
copyright_match = re.search(r"^__copyright__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
credits_match = re.search(r"^__credits__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
license_match = re.search(r"^__license__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
maint_match = re.search(r"^__maintainer__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
maint_email_match = re.search(r"^__maintainer_email__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
module_name_match = re.search(r"^__module_name__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
short_desc_match = re.search(r"^__short_desc__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
status_match = re.search(r"^__status__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
url_match = re.search(r"^__url__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
if not version_match or not module_name_match:
raise RuntimeError("Unable to find version or module_name string.")
if author_match:
metadata['author'] = author_match.group(1)
if author_email_match:
metadata['author_email'] = author_email_match.group(1)
if copyright_match:
metadata['copyright'] = copyright_match.group(1)
if credits_match:
metadata['credits'] = credits_match.group(1)
if license_match:
metadata['license'] = license_match.group(1)
if maint_match:
metadata['maintainer'] = maint_match.group(1)
if maint_email_match:
metadata['maintainer_email'] = maint_email_match.group(1)
if module_name_match:
metadata['module_name'] = module_name_match.group(1)
if short_desc_match:
metadata['short_desc'] = short_desc_match.group(1)
if status_match:
metadata['status'] = status_match.group(1)
if version_match:
metadata['version'] = version_match.group(1)
if url_match:
metadata['url'] = url_match.group(1)
return metadata
# ==============================================================================
# MAIN
# ==============================================================================
metadata = _find_metadata(MAIN_FILE)
setup(
name=metadata['module_name'],
version=metadata['version'],
description=metadata.get('short_desc', ''),
long_description=LONG_DESCRIPTION,
# The project URL.
url=metadata.get('url', ''),
# Author & Maintainer details
author=metadata.get('author', ''),
author_email=metadata.get('author_email', ''),
maintainer=metadata.get('maintainer', ''),
maintainer_email=metadata.get('maintainer_email', ''),
# Choose your license
license=metadata.get('license', ''),
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Topic :: Multimedia :: Graphics',
'Topic :: Multimedia :: Video',
'Topic :: Software Development :: Libraries :: Python Modules',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
# OS
'Operating System :: OS Independent',
# Language
'Natural Language :: English',
],
# What does your project relate to?
keywords='film tv color vfx nuke',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages.
packages=find_packages(exclude=['tests']),
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={},
include_package_data=True,
# Targeted OS
platforms='any',
)
| |
# Copyright (c) 2006-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Whitebox tests for TCP APIs.
"""
import errno, socket, os
try:
import resource
except ImportError:
resource = None
from twisted.trial.unittest import TestCase
from twisted.python import log
from twisted.internet.tcp import ECONNABORTED, ENOMEM, ENFILE, EMFILE, ENOBUFS, EINPROGRESS, Port
from twisted.internet.protocol import ServerFactory
from twisted.python.runtime import platform
from twisted.internet.defer import maybeDeferred, gatherResults
from twisted.internet import reactor, interfaces
class PlatformAssumptionsTestCase(TestCase):
"""
Test assumptions about platform behaviors.
"""
socketLimit = 8192
def setUp(self):
self.openSockets = []
if resource is not None:
self.originalFileLimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (256, self.originalFileLimit[1]))
self.socketLimit = 512
def tearDown(self):
while self.openSockets:
self.openSockets.pop().close()
if resource is not None:
# OS X implicitly lowers the hard limit in the setrlimit call
# above. Retrieve the new hard limit to pass in to this
# setrlimit call, so that it doesn't give us a permission denied
# error.
currentHardLimit = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
newSoftLimit = min(self.originalFileLimit[0], currentHardLimit)
resource.setrlimit(resource.RLIMIT_NOFILE, (newSoftLimit, currentHardLimit))
def socket(self):
"""
Create and return a new socket object, also tracking it so it can be
closed in the test tear down.
"""
s = socket.socket()
self.openSockets.append(s)
return s
def test_acceptOutOfFiles(self):
"""
Test that the platform accept(2) call fails with either L{EMFILE} or
L{ENOBUFS} when there are too many file descriptors open.
"""
# Make a server to which to connect
port = self.socket()
port.bind(('127.0.0.1', 0))
serverPortNumber = port.getsockname()[1]
port.listen(5)
# Make a client to use to connect to the server
client = self.socket()
client.setblocking(False)
# Use up all the rest of the file descriptors.
for i in xrange(self.socketLimit):
try:
self.socket()
except socket.error, e:
if e.args[0] in (EMFILE, ENOBUFS):
# The desired state has been achieved.
break
else:
# Some unexpected error occurred.
raise
else:
self.fail("Could provoke neither EMFILE nor ENOBUFS from platform.")
# Non-blocking connect is supposed to fail, but this is not true
# everywhere (e.g. freeBSD)
self.assertIn(client.connect_ex(('127.0.0.1', serverPortNumber)),
(0, EINPROGRESS))
# Make sure that the accept call fails in the way we expect.
exc = self.assertRaises(socket.error, port.accept)
self.assertIn(exc.args[0], (EMFILE, ENOBUFS))
if platform.getType() == "win32":
test_acceptOutOfFiles.skip = (
"Windows requires an unacceptably large amount of resources to "
"provoke this behavior in the naive manner.")
class SelectReactorTestCase(TestCase):
"""
Tests for select-specific failure conditions.
"""
def setUp(self):
self.ports = []
self.messages = []
log.addObserver(self.messages.append)
def tearDown(self):
log.removeObserver(self.messages.append)
return gatherResults([
maybeDeferred(p.stopListening)
for p in self.ports])
def port(self, portNumber, factory, interface):
"""
Create, start, and return a new L{Port}, also tracking it so it can
be stopped in the test tear down.
"""
p = Port(portNumber, factory, interface=interface)
p.startListening()
self.ports.append(p)
return p
def _acceptFailureTest(self, socketErrorNumber):
"""
Test behavior in the face of an exception from C{accept(2)}.
On any exception which indicates the platform is unable or unwilling
to allocate further resources to us, the existing port should remain
listening, a message should be logged, and the exception should not
propagate outward from doRead.
@param socketErrorNumber: The errno to simulate from accept.
"""
class FakeSocket(object):
"""
Pretend to be a socket in an overloaded system.
"""
def accept(self):
raise socket.error(
socketErrorNumber, os.strerror(socketErrorNumber))
factory = ServerFactory()
port = self.port(0, factory, interface='127.0.0.1')
originalSocket = port.socket
try:
port.socket = FakeSocket()
port.doRead()
expectedFormat = "Could not accept new connection (%s)"
expectedErrorCode = errno.errorcode[socketErrorNumber]
expectedMessage = expectedFormat % (expectedErrorCode,)
for msg in self.messages:
if msg.get('message') == (expectedMessage,):
break
else:
self.fail("Log event for failed accept not found in "
"%r" % (self.messages,))
finally:
port.socket = originalSocket
def test_tooManyFilesFromAccept(self):
"""
C{accept(2)} can fail with C{EMFILE} when there are too many open file
descriptors in the process. Test that this doesn't negatively impact
any other existing connections.
C{EMFILE} mainly occurs on Linux when the open file rlimit is
encountered.
"""
return self._acceptFailureTest(EMFILE)
def test_noBufferSpaceFromAccept(self):
"""
Similar to L{test_tooManyFilesFromAccept}, but test the case where
C{accept(2)} fails with C{ENOBUFS}.
This mainly occurs on Windows and FreeBSD, but may be possible on
Linux and other platforms as well.
"""
return self._acceptFailureTest(ENOBUFS)
def test_connectionAbortedFromAccept(self):
"""
Similar to L{test_tooManyFilesFromAccept}, but test the case where
C{accept(2)} fails with C{ECONNABORTED}.
It is not clear whether this is actually possible for TCP
connections on modern versions of Linux.
"""
return self._acceptFailureTest(ECONNABORTED)
def test_noFilesFromAccept(self):
"""
Similar to L{test_tooManyFilesFromAccept}, but test the case where
C{accept(2)} fails with C{ENFILE}.
This can occur on Linux when the system has exhausted (!) its supply
of inodes.
"""
return self._acceptFailureTest(ENFILE)
if platform.getType() == 'win32':
test_noFilesFromAccept.skip = "Windows accept(2) cannot generate ENFILE"
def test_noMemoryFromAccept(self):
"""
Similar to L{test_tooManyFilesFromAccept}, but test the case where
C{accept(2)} fails with C{ENOMEM}.
On Linux at least, this can sensibly occur, even in a Python program
(which eats memory like no ones business), when memory has become
fragmented or low memory has been filled (d_alloc calls
kmem_cache_alloc calls kmalloc - kmalloc only allocates out of low
memory).
"""
return self._acceptFailureTest(ENOMEM)
if platform.getType() == 'win32':
test_noMemoryFromAccept.skip = "Windows accept(2) cannot generate ENOMEM"
if not interfaces.IReactorFDSet.providedBy(reactor):
skipMsg = 'This test only applies to reactors that implement IReactorFDset'
PlatformAssumptionsTestCase.skip = skipMsg
SelectReactorTestCase.skip = skipMsg
| |
import pygame
# from collections import deque
pygame.display.init()
pygame.joystick.init()
stick = pygame.joystick.Joystick(pygame.joystick.get_count()-1)
stick.init()
numaxes = stick.get_numaxes()
numbuttons = stick.get_numbuttons()
numhats = stick.get_numhats()
# print "numaxes:", numaxes
pygame.event.set_allowed([pygame.JOYBUTTONDOWN, pygame.JOYBUTTONUP, pygame.JOYAXISMOTION])
P1_EVENT_SEQUENCE = []
P2_EVENT_SEQUENCE = []
def read_p1(evt):
# print evt
if evt.type == pygame.JOYAXISMOTION:
value = round(evt.value)
# print "joy axis motion", evt.type, value
if evt.axis == 6:
if value == -1:
return "left"
elif value == 1:
return "right"
elif evt.axis == 7:
if value == -1:
return "up"
elif value == 1:
return "down"
elif evt.type == pygame.JOYBUTTONDOWN:
if evt.button == 12:
return "triangle"
elif evt.button == 13:
return "o"
elif evt.button == 14:
return "x"
elif evt.button == 15:
return "square"
elif evt.button == 20:
return "select"
elif evt.button == 21:
return "start"
elif evt.type == pygame.JOYBUTTONUP:
# do we actually need to track the up events?
pass
return False # if nothing matched return false
def read_p2(evt):
# print evt
if evt.type == pygame.JOYAXISMOTION:
value = round(evt.value)
if evt.axis == 3:
if value == -1:
return "up"
elif value == 1:
return "down"
elif evt.axis == 2:
if value == -1:
return "left"
elif value == 1:
return "right"
elif evt.type == pygame.JOYBUTTONDOWN:
if evt.button == 0:
return "triangle"
elif evt.button == 1:
return "o"
elif evt.button == 2:
return "x"
elif evt.button == 3:
return "square"
elif evt.button == 8:
return "select"
elif evt.button == 9:
return "start"
elif evt.type == pygame.JOYBUTTONUP:
# do we actually need to track the up events?
pass
return False # if nothing matched return false
def read_event(evt):
# left side (player 1)
# up: get_axis(8) == -1
# down: get_axis(8) == 1
# left: get_axis(7) == -1
# right: get_axis(7) == 1
# get_axis(7) and get_axis(8)
# triangle: get_button(12)
# o: get_button(13)
# x: get_button(14)
# square: get_button(15)
# select: get_button(20)
# start: get_button(21)
# right side (player 2)
# up: get_axis(4) == -1
# down: get_axis(4) == 1
# left: get_axis(3) == -1
# right: get_axis(3) == 1
# get_axis(3) and get_axis(4)
# triangle: get_button(0)
# o: get_button(1)
# x: get_button(2)
# square: get_button(3)
# select: get_button(8)
# start: get_button(9)
# print pygame.event.event_name(evt.type)
# print evt
p1 = read_p1(evt)
# print evt, p1
if p1 != False:
print "p1:", p1
P1_EVENT_SEQUENCE.append(p1)
parse_event_sequence(P1_EVENT_SEQUENCE)
return True
else:
p2 = read_p2(evt)
if p2 != False:
print "p2:",p2
P2_EVENT_SEQUENCE.append(p2)
parse_event_sequence(P2_EVENT_SEQUENCE)
return True
return False
import shlex, subprocess
from subprocess import call,check_output
def staging_deploy():
call(["open","http://inewsdeploy.prvt.nytimes.com/projects/145/stages/331/deployments/new?task=deploy"])
def nice_moves():
call(["osascript", "-e", '"set Volume 10"'])
call(["say", "-v", 'agnes', "-r", "270", '"nice moves"'])
import json
import tweepy
from twitter_credentials import *
twitter_auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
twitter_auth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
twitter_client = tweepy.API(twitter_auth)
DANCES = json.load(open("dances.json","r"))
from random import choice
def generate_random_tweet():
dance = choice(DANCES)
url = "http://en.wikipedia.org/"+str(dance[0])
return "We're dancing "+dance[0]+" style! "+url
def tweet(text=""):
if text == "":
text = generate_random_tweet()
print "tweeting:",text
try:
twitter_client.update_status(text)
except tweepy.error.TweepError:
print "tweeting error..."
pass
def git_commit():
subprocess.Popen(shlex.split("git commit -am 'Dancing out a commit'"), cwd="/Users/204377/Desktop/dancing_desk/")
def git_push():
subprocess.Popen(shlex.split("git push origin master"), cwd="/Users/204377/Desktop/dancing_desk/")
last_commit = subprocess.check_output(shlex.split("git rev-parse HEAD"), cwd="/Users/204377/Desktop/dancing_desk/").strip()
tweet("The dancing desk just committed! https://github.com/albertsun/dancing_desk/commit/"+last_commit)
COMMANDS = {
"staging_deploy": staging_deploy,
"nice_moves": nice_moves,
"git_commit": git_commit,
"git_push": git_push,
"tweet": tweet
}
KEYWORDS = ["up","down","left","right","triangle","o","x","square","select","start"]
PATTERNS = [
[["up", "right", "down", "left"], "squiggle"],
[["up"], "up"],
[["x", "left", "triangle"], "tweet"],
[["square","square","square"], "git_commit"],
[["o","o","o"], "git_push"],
[["select","start"], "staging_deploy"],
[["up","up","down","down","left","right","left","right","x","o","start"], "nice_moves"]
]
def parse_event_sequence(seq):
# print seq
for pat,out in PATTERNS:
if len(seq) >= len(pat) and seq[len(seq)-len(pat):] == pat:
if COMMANDS.has_key(out): COMMANDS[out]()
print out + "!"
while True:
# pygame.event.pump()
# print [round(stick.get_axis(i)) for i in range(numaxes)]
# buttons = [(i,stick.get_button(i)) for i in range(numbuttons)]
# print buttons
# print [stick.get_hat(i) for i in range(numhats)]
# print [stick.get_axis(i) for i in range(numaxes)] + [stick.get_button(i) for i in range(numbuttons)]
# block execution until another event comes
read_event(pygame.event.wait())
| |
"""Keep track of all functions once seemingly completed."""
import numpy as np
def fetchMetaData(G):
"""Prep a memory efficient DFS in O(elog(e)) time."""
G = G[G[:, 0].argsort()]
# pass through the array and record the slices in which children are stored
print("Inferring unique Parents")
uniqueNodes = 1
GF = G.flatten()
GF.sort()
for i in range(len(GF)-1):
if GF[i] != GF[i+1]:
uniqueNodes += 1
# create a flat array representation of parent children relationships
print("Creating parent children ordering")
N = np.zeros((uniqueNodes, 7), dtype=np.int16)
N[0][0] = GF[0]
count = 0
for i in range(1, len(GF)):
if GF[i] != GF[i-1]:
count += 1
N[count][0] = GF[i]
tally = 0
count = 0
children = 0
for i in range(len(G)-1):
children += 1
if G[i][0] != G[i+1][0]:
# if empty nodes exist inbetween with no children fill them
# accordingly until we reach the parent
while (G[i][0] != N[count][0]):
N[count][1] = tally
N[count][2] = 0
count += 1
# fill in metadata about parent
N[count][1] = tally
N[count][2] = children
tally += children
count += 1
children = 0
# with no redirects the last one must go somehwere and therfore has one
# child missed in the previous loop
#
# if something goes wrong it's probably here
#
# TODO: test this out on a variety of graph scenarios
N[len(N)-2][1] = tally
N[len(N)-2][2] = 1
return G, N
def binomialSearch(V, node):
"""Perform a binomialSearch to find a vertex in O(V log(V) time)."""
L = len(V)
direction = 1
block = L//2
i = 0
# handle corner case where the list is odd numbered
if (V[L-1] == node):
return L-1
# inspect remainder of list for node value
while V[i] != node:
if (node > V[i]):
direction = 1
elif (node < V[i]):
direction = -1
else:
break
i += direction*block
block = block//2
# account for edge case where we are one away by inference but no more
# length left to divide
if (block == 0):
block = 1
return i
def mDFS(G, start, prune, order, u):
"""Given a list of sorted edges perform a DFS on a subset of nodes.
We eventually hope to port this to CUDA so we explore the option of a
in place sort on parent nodes to align all children with their parents
from their we can construct an indexing which allows us to look up each
parent child relationship.
"""
G, N = fetchMetaData(G)
vertices = N[:, 0]
postpre = 0
parent = start
if (prune):
U = order[binomialSearch(order[:, 0], u), 1]
V = order[binomialSearch(order[:, 0], start), 1]
stack = np.zeros(len(vertices), dtype=np.int16) - 1
counter = 0
N[:, 4:] = -1
topo = []
# TODO: failing on nine figure this out when I get back to it
while (N[start][5] == -1):
# find the required information in O(Vlog(V)) time
idx = binomialSearch(vertices, parent)
# mark all intermediate nodes or prune them. Pruning is performed
# bny falsely reassigning child count to zero
if (prune):
W = order[idx, 1]
if (W < U and W >= V):
N[idx][6] = 1
else:
N[idx][2] = 0
# assign post pres and increment
if (N[idx][4] == -1):
N[idx][4] = postpre
postpre += 1
elif (N[idx][5] == -1 and N[idx][2] == N[idx][3]):
N[idx][5] = postpre
postpre += 1
topo.append(parent)
if (N[idx][2] != N[idx][3]):
# find the lookup and increment the children searched
lookup = N[idx][1] + N[idx][3]
N[idx][3] += 1
# push to stack
stack[counter] = parent
counter += 1
# assign the child value as the new parent
parent = G[lookup, 1]
else:
if (N[idx][2] == 0):
N[idx][5] = postpre
postpre += 1
topo.append(parent)
counter -= 1
parent = stack[counter]
stack[counter] = -1
print(topo)
return G, N
def insertEdge(G, N, order, edgeToInsert):
"""Perform second pass of topological edge insertion."""
print(order)
print(G)
print(N)
U = order[binomialSearch(order[:, 0], edgeToInsert[0]), 1]
V = order[binomialSearch(order[:, 0], edgeToInsert[1]), 1]
# might want to test this out but my hunch is that caching the list for a
# Vlog(V) sort is cheaper than checking the sublist multiple times via
# binomial search to get something like (V/b)log(V/b) since they're are
# asymptotically the same but this should be checked or provided an
# alternative
listContainer = np.zeros(U-V+1)
shift = 0
topoOrder = qsort(order[:, 0], order[:, 1])[V:]
for i in range(U-V+1):
vertex = topoOrder[i]
idx = binomialSearch(N[:, 0], vertex)
marked = (N[idx, 6] == 1)
if (marked):
listContainer[shift] = vertex
shift += 1
else:
print(toporder)
print(shift, i-shift)
def qsort(A, B):
"""Taken from Rosetta code, this is a placeholder not to be used in Python.
When translating to CUDA. Do NOT translate this portion. Simply obtain
the qsort for C++. This can be done by carrying one of the lists along for
the ride in C++. Instead here we take two numpy arrays and use in built
functions to mititage this part.
"""
AB = np.vstack([A, B])
A_sorted = AB.T[AB[1, :].argsort()].T[0, :]
return A_sorted
| |
# -*- coding: utf-8 -*-
"""Project and solution classes."""
import abc
class VSConfiguration(object):
"""Visual Studio configuration.
Attributes:
name (str): configuration name.
platform (str): configuration platform.
"""
def __init__(self, name='', platform=''):
"""Initializes a Visual Studio configuration.
Args:
name (Optional[str]): configuration name.
platform (Optional[str]): configuration platform.
"""
self.name = name
self.platform = platform
@abc.abstractmethod
def CopyToX64(self):
"""Copies the Visual Studio solution configuration to an x64 equivalent."""
class VSConfigurations(object):
"""Visual Studio solution and project configurations.
Attributes:
names (list[str]): names of the configurations.
platforms (list[str]): platforms of the configurations.
"""
def __init__(self):
"""Initializes a Visual Studio configurations."""
self._configurations = {}
self.names = []
self.platforms = []
@property
def number_of_configurations(self):
"""int: number of configurations."""
return len(self._configurations.values())
def Append(self, configuration):
"""Appends a configuration.
Args:
configuration (VSConfiguration): configuration.
"""
if configuration.name not in self.names:
self.names.append(configuration.name)
if configuration.platform not in self.platforms:
self.platforms.append(configuration.platform)
identifier = '{0:s}|{1:s}'.format(
configuration.name, configuration.platform)
self._configurations[identifier] = configuration
# pylint: disable=unused-argument
def ExtendWithX64(self, output_version):
"""Extends the configurations with the x64 platform.
Args:
output_version (str): output Visual Studio version.
"""
if 'x64' not in self.platforms:
for configuration in list(self._configurations.values()):
if configuration.platform != 'x64':
x64_configuration = configuration.CopyToX64()
self.Append(x64_configuration)
def GetByIdentifier(self, name, platform):
"""Retrieves a specific configuration by identifier.
The identifier is formatted as: name|platform.
Args:
name (str): configuration name.
platform (Optional[str]): configuration platform.
Returns:
VSConfiguration: configuration.
"""
identifier = '{0:s}|{1:s}'.format(name, platform)
return self._configurations[identifier]
def GetSorted(self, reverse=False):
"""Retrieves configurations in sorted order.
The sorting order is first alphabetically by name,
secondly alphabetically by platform.
Args:
reverse (Optional[bool]): True if the name sort order should be
reversed. The platform sort order is not affected.
Yields:
VSConfiguration: configuration.
"""
for name in sorted(self.names, reverse=reverse):
for platform in sorted(self.platforms):
yield self.GetByIdentifier(name, platform)
class VSProjectConfiguration(VSConfiguration):
"""Visual Studio project configuration.
Attributes:
additional_dependencies (list[str]): additional dependencies.
basic_runtime_checks (str): basic runtime checks.
character_set (str): character set.
compile_as (str): compile as.
data_execution_prevention (str): data execution prevention.
debug_information_format (str): debug information format.
detect_64bit_portability_problems (str): detect 64bit portability problems.
enable_comdat_folding (str): enable comdat folding.
enable_function_level_linking (str): enable function level linking.
enable_intrinsic_functions (str): enable intrinsic functions.
fixed_base_address (str): fixed base address.
generate_debug_information (str): generate debug information.
import_library (str): import library.
include_directories (list[str]): include directories.
librarian_ignore_defaults (str): librarian ignore defaults.
librarian_output_file (str): librarian output file.
library_directories (list[str]): library directories.
link_incremental (str): link incremental.
linker_output_directory (str): linker output directory.
linker_output_file (str): linker output file.
linker_values_set (bool): True if linker values are set.
managed_extensions (str): managed extensions.
module_definition_file (str): module definition file.
name (str): project name.
optimize_references (str): optimize references.
optimization (str): optimization.
output_type (str): output type.
platform (str): platform.
platform_toolset (str): platform toolset.
precompiled_header (str): precompiled header.
preprocessor_definitions (str): preprocessor definitions.
randomized_base_address (str): randomized base address.
runtime_library (str): runtime library.
smaller_type_check (str): smaller type check.
sub_system (str): sub system.
target_machine (str): target machine.
warning_as_error (str): warning as error.
warning_level (str): warning level.
whole_program_optimization (str): whole program optimization.
"""
_OUTPUT_TYPE_STRINGS = {
1: 'Application',
2: 'DynamicLibrary',
4: 'StaticLibrary'
}
def __init__(self):
"""Initializes a Visual Studio project configuration."""
super(VSProjectConfiguration, self).__init__()
# Note that name and platform are inherited from VSConfiguration.
self.additional_dependencies = []
self.basic_runtime_checks = ''
self.character_set = ''
self.compile_as = ''
self.data_execution_prevention = ''
self.debug_information_format = ''
self.detect_64bit_portability_problems = ''
self.enable_comdat_folding = ''
self.enable_function_level_linking = ''
self.enable_intrinsic_functions = ''
self.fixed_base_address = ''
self.generate_debug_information = ''
self.import_library = ''
self.include_directories = []
self.librarian_ignore_defaults = ''
self.librarian_output_file = ''
self.library_directories = []
self.link_incremental = ''
self.linker_output_directory = ''
self.linker_output_file = ''
self.linker_values_set = False
self.managed_extensions = ''
self.module_definition_file = ''
self.name = ''
self.optimize_references = ''
self.optimization = ''
self.output_type = ''
self.platform = ''
self.platform_toolset = ''
self.precompiled_header = ''
self.preprocessor_definitions = ''
self.randomized_base_address = ''
self.runtime_library = ''
self.smaller_type_check = ''
self.sub_system = ''
self.target_machine = ''
self.warning_as_error = ''
self.warning_level = ''
self.whole_program_optimization = ''
@property
def basic_runtime_checks_string(self):
"""str: basic runtime checks formatted as a string."""
try:
basic_runtime_checks = int(self.basic_runtime_checks, 10)
except (TypeError, ValueError):
return ''
if basic_runtime_checks == 0:
return 'Default'
if basic_runtime_checks == 3:
return 'EnableFastChecks'
return ''
@property
def character_set_string(self):
"""str: character set formatted as a string."""
try:
character_set = int(self.character_set, 10)
except (TypeError, ValueError):
return ''
if character_set == 1:
return 'Unicode'
return ''
@property
def compile_as_string(self):
"""str: compile formatted as a string."""
try:
compile_as = int(self.compile_as, 10)
except (TypeError, ValueError):
return ''
if compile_as == 1:
return 'CompileAsC'
if compile_as == 2:
return 'CompileAsCpp'
return ''
@property
def data_execution_prevention_string(self):
"""str: data execution prevention formatted as a string."""
try:
data_execution_prevention = int(self.data_execution_prevention, 10)
except (TypeError, ValueError):
return ''
if data_execution_prevention == 1:
return 'false'
if data_execution_prevention == 2:
return 'true'
return ''
@property
def debug_information_format_string(self):
"""str: debug information formatted as a string."""
try:
debug_information_format = int(self.debug_information_format, 10)
except (TypeError, ValueError):
return ''
if debug_information_format == 3:
return 'ProgramDatabase'
return ''
@property
def enable_comdat_folding_string(self):
"""str: enable comdat folding formatted as a string."""
try:
enable_comdat_folding = int(self.enable_comdat_folding, 10)
except (TypeError, ValueError):
return ''
if enable_comdat_folding == 2:
return 'true'
return ''
@property
def link_incremental_string(self):
"""str: link incremental formatted as a string."""
try:
link_incremental = int(self.link_incremental, 10)
except (TypeError, ValueError):
return ''
if link_incremental == 1:
return 'false'
return ''
@property
def optimize_references_string(self):
"""str: optimize references formatted as a string."""
try:
optimize_references = int(self.optimize_references, 10)
except (TypeError, ValueError):
return ''
if optimize_references == 2:
return 'true'
return ''
@property
def optimization_string(self):
"""str: optimization formatted as a string."""
try:
optimization = int(self.optimization, 10)
except (TypeError, ValueError):
return ''
if optimization == 0:
return 'Disabled'
if optimization == 2:
return 'MaxSpeed'
return ''
@property
def output_type_string(self):
"""str: output type formatted as a string."""
try:
output_type = int(self.output_type, 10)
except (TypeError, ValueError):
return ''
return self._OUTPUT_TYPE_STRINGS.get(output_type, '')
@property
def precompiled_header_string(self):
"""str: precompiled header formatted as a string."""
try:
_ = int(self.precompiled_header, 10)
except (TypeError, ValueError):
return ''
# TODO: do something with precompiled_header.
return ''
@property
def randomized_base_address_string(self):
"""str: randomized base address formatted as a string."""
try:
randomized_base_address = int(self.randomized_base_address, 10)
except (TypeError, ValueError):
return ''
if randomized_base_address == 1:
return 'false'
if randomized_base_address == 2:
return 'true'
return ''
@property
def runtime_librarian_string(self):
"""str: runtime librarian formatted as a string."""
try:
runtime_library = int(self.runtime_library, 10)
except (TypeError, ValueError):
return ''
if runtime_library == 2:
return 'MultiThreadedDLL'
if runtime_library == 3:
return 'MultiThreadedDebugDLL'
return ''
@property
def sub_system_string(self):
"""str: sub system formatted as a string."""
try:
sub_system = int(self.sub_system, 10)
except (TypeError, ValueError):
return ''
if sub_system == 0:
return 'NotSet'
if sub_system == 1:
return 'Console'
return ''
@property
def target_machine_string(self):
"""str: target machine formatted as a string."""
try:
target_machine = int(self.target_machine, 10)
except (TypeError, ValueError):
return ''
if target_machine == 1:
return 'MachineX86'
# TODO: assuming here that 2 is x64.
if target_machine == 2:
return 'MachineX64'
return ''
@property
def warning_level_string(self):
"""str: warning level formatted as a string."""
try:
warning_level = int(self.warning_level, 10)
except (TypeError, ValueError):
return ''
if warning_level == 3:
return 'Level3'
if warning_level == 4:
return 'Level4'
return ''
@property
def whole_program_optimization_string(self):
"""str: whole program optimization formatted as a string."""
try:
whole_program_optimization = int(self.whole_program_optimization, 10)
except (TypeError, ValueError):
return ''
if whole_program_optimization == 0:
return 'false'
if whole_program_optimization == 1:
return 'true'
return ''
def CopyToX64(self):
"""Copies the Visual Studio project configuration to an x64 equivalent."""
copy = VSProjectConfiguration()
copy.additional_dependencies = list(self.additional_dependencies)
copy.basic_runtime_checks = self.basic_runtime_checks
copy.character_set = self.character_set
copy.compile_as = self.compile_as
copy.data_execution_prevention = self.data_execution_prevention
copy.debug_information_format = self.debug_information_format
copy.detect_64bit_portability_problems = (
self.detect_64bit_portability_problems)
copy.enable_comdat_folding = self.enable_comdat_folding
copy.enable_function_level_linking = self.enable_function_level_linking
copy.enable_intrinsic_functions = self.enable_intrinsic_functions
copy.generate_debug_information = self.generate_debug_information
copy.fixed_base_address = self.fixed_base_address
copy.import_library = self.import_library
copy.include_directories = list(self.include_directories)
copy.librarian_ignore_defaults = self.librarian_ignore_defaults
copy.librarian_output_file = self.librarian_output_file
copy.library_directories = list(self.library_directories)
copy.link_incremental = self.link_incremental
copy.linker_output_directory = self.linker_output_directory
copy.linker_output_file = self.linker_output_file
copy.linker_values_set = self.linker_values_set
copy.managed_extensions = self.managed_extensions
copy.module_definition_file = self.module_definition_file
copy.name = self.name
copy.optimize_references = self.optimize_references
copy.optimization = self.optimization
copy.output_type = self.output_type
copy.platform = 'x64'
copy.platform_toolset = ''
copy.precompiled_header = self.precompiled_header
copy.preprocessor_definitions = self.preprocessor_definitions
copy.randomized_base_address = self.randomized_base_address
copy.runtime_library = self.runtime_library
copy.smaller_type_check = self.smaller_type_check
copy.sub_system = self.sub_system
copy.target_machine = '2'
copy.warning_as_error = self.warning_as_error
copy.warning_level = self.warning_level
copy.whole_program_optimization = self.whole_program_optimization
return copy
def GetPlatformToolset(self, output_version):
"""Retrieves the platform toolset.
Args:
output_version (str): output Visual Studio version.
Returns:
str: platform toolset version.
"""
platform_toolset = self.platform_toolset
if not platform_toolset:
if output_version == 2010 and self.platform == 'x64':
platform_toolset = 'Windows7.1SDK'
elif output_version == 2012:
platform_toolset = 'v110'
# elif output_version == 2015:
# platform_toolset = 'v140'
elif output_version == 2017:
platform_toolset = 'v141'
elif output_version == 2019:
platform_toolset = 'v142'
return platform_toolset
class VSProjectInformation(object):
"""Visual Studio project information.
Attributes:
configurations (VSConfigurations): configurations.
dependencies (list[str]): dependencies.
guid (str): project identifier (GUID).
header_files (list[str]): header files.
keyword (str): keyword.
name (str): project name.
resource_files (list[str]): resource files.
root_name_space (str): root name space.
source_files (list[str]): source files.
third_party_dependencies (list[str]): third party dependencies.
"""
def __init__(self):
"""Initializes Visual Studio project information."""
self.configurations = VSConfigurations()
self.dependencies = []
self.guid = ''
self.header_files = []
self.keyword = ''
self.name = ''
self.resource_files = []
self.root_name_space = ''
self.source_files = []
self.third_party_dependencies = []
class VSSolutionConfiguration(VSConfiguration):
"""Visual Studio solution configuration."""
def CopyToX64(self):
"""Copies the Visual Studio solution configuration to an x64 equivalent."""
copy = VSSolutionConfiguration()
copy.name = self.name
copy.platform = 'x64'
return copy
class VSSolutionProject(object):
"""Visual Studio solution project.
Attributes:
name (str): project name.
filename (str): name of the project file without extension.
guid (str): project identifier (GUID).
"""
def __init__(self, name, filename, guid):
"""Initializes a Visual Studio solution project.
Args:
name (str): project name.
filename (str): name of the project file without extension.
guid (str): project identifier (GUID).
"""
self.name = name
self.filename = filename
self.guid = guid.lower()
self.dependencies = []
def AddDependency(self, dependency_guid):
"""Adds a dependency.
Args:
dependency_guid (str): project identifier (GUID) of the dependency.
"""
self.dependencies.append(dependency_guid.lower())
| |
# -*- coding: utf-8 -*-
#
# Tunic
#
# Copyright 2014-2015 TSH Labs <projects@tshlabs.org>
#
# Available under the MIT license. See LICENSE for details.
#
"""
tunic.install
~~~~~~~~~~~~~
Perform installations on remote machines.
"""
from urlparse import urlparse
import os.path
from .core import try_repeatedly, FabRunner, ProjectBaseMixin
def _is_iterable(val):
"""Ensure that a value is iterable and not some sort of string"""
try:
iter(val)
except (ValueError, TypeError):
return False
else:
return not isinstance(val, basestring)
class VirtualEnvInstallation(ProjectBaseMixin):
"""Install one or multiple packages into a remote Python virtual environment.
If the remote virtual environment does not already exist, it will be
created during the install process.
The installation can use the standard package index (PyPI) to download
dependencies, or it can use one or multiple alternative installation
sources (such as a local PyPI instance, Artifactory, the local file system,
etc.) and ignore the default index. These two modes are mutually exclusive.
See :doc:`design` for more information about the expected directory
structure for deployments.
.. versionadded:: 0.3.0
"""
# pylint: disable=too-many-arguments
def __init__(self, base, packages, sources=None, venv_path=None, runner=None):
"""Set the project base directory, packages to install, and optionally
alternative sources from which to download dependencies and path to the
virtualenv tool.
:param str base: Absolute path to the root of the code deploy on
the remote server
:param list packages: A collection of package names to install into
a remote virtual environment.
:param list sources: A collection of alternative sources from which
to install dependencies. These sources should be strings that are
either URLs or file paths. E.g. 'http://pypi.example.com/simple/'
or '/tmp/build/mypackages'. Paths and URLs may be mixed in the same
list of sources.
:param str venv_path: Optional absolute path to the virtualenv tool on
the remote server. Required if the virtualenv tool is not in the
PATH on the remote server.
:param FabRunner runner: Optional runner to use for executing
remote and local commands to perform the installation.
:raises ValueError: If the base directory isn't specified, if no packages
are given, packages is not an iterable collection of some kind, or if
sources is specified but not an iterable collection of some kind.
.. versionchanged:: 0.4.0
Allow the path to the ``virtualenv`` script on the remote server to be
specified.
"""
super(VirtualEnvInstallation, self).__init__(base)
if not packages:
raise ValueError(
"You must specify at least one package to install")
if not _is_iterable(packages):
raise ValueError("Packages must be an iterable")
if sources is not None and not _is_iterable(sources):
raise ValueError("Sources must be an iterable")
self._packages = list(packages)
self._sources = list(sources) if sources is not None else []
self._venv_path = venv_path if venv_path is not None else 'virtualenv'
self._runner = runner if runner is not None else FabRunner()
def _get_install_sources(self):
"""Construct arguments to use alternative package indexes if
there were sources supplied, empty string if there were not.
"""
if not self._sources:
return ''
parts = ['--no-index']
for source in self._sources:
parts.append("--find-links '{0}'".format(source))
return ' '.join(parts)
def install(self, release_id, upgrade=False):
"""Install target packages into a virtual environment.
If the virtual environment for the given release ID does not
exist on the remote system, it will be created. The virtual
environment will be created according to the standard Tunic
directory structure (see :doc:`design`).
If ``upgrade=True`` is passed, packages will be updated to the
most recent version if they are already installed in the virtual
environment.
:param str release_id: Timestamp-based identifier for this
deployment. If this ID corresponds to a virtual environment
that already exists, packages will be installed into this
environment.
:param bool upgrade: Should packages be updated if they are
already installed in the virtual environment.
:return: The results of running the installation command using
Fabric. Note that this return value is a decorated version
of a string that contains additional meta data about the
result of the command, in addition to the output generated.
:rtype: str
"""
release_path = os.path.join(self._releases, release_id)
if not self._runner.exists(release_path):
self._runner.run("{0} '{1}'".format(self._venv_path, release_path))
cmd = [os.path.join(release_path, 'bin', 'pip'), 'install']
if upgrade:
cmd.append('--upgrade')
sources = self._get_install_sources()
if sources:
cmd.append(sources)
cmd.extend("'{0}'".format(package) for package in self._packages)
return self._runner.run(' '.join(cmd))
class StaticFileInstallation(ProjectBaseMixin):
"""Install the contents of a local directory into a remote release
directory.
If the remote release directory does not already exist, it will be
created during the install process.
See :doc:`design` for more information about the expected directory
structure for deployments.
.. versionadded:: 0.5.0
"""
def __init__(self, base, local_path, runner=None):
"""Set the project base directory on the remote server and path to a
directory of static content to be installed into a remote release
directory.
:param str base: Absolute path to the root of the code deploy on
the remote server
:param str local_path: Absolute or relative path to a local directory
whose contents will be copied to a remote release directory.
:param FabRunner runner: Optional runner to use for executing
remote and local commands to perform the installation.
:raises ValueError: If the base directory or local path isn't
specified.
"""
super(StaticFileInstallation, self).__init__(base)
if not local_path:
raise ValueError("You must specify a local path")
self._local_path = local_path
self._runner = runner if runner is not None else FabRunner()
def install(self, release_id):
"""Install the contents of the local directory into a release directory.
If the directory for the given release ID does not exist on the remote
system, it will be created. The directory will be created according to
the standard Tunic directory structure (see :doc:`design`).
Note that the name and path of the local directory is irrelevant, only
the contents of the specified directory will be transferred to the remote
server. The contents will end up as children of the release directory on
the remote server.
:param str release_id: Timestamp-based identifier for this
deployment. If this ID corresponds to a directory that already
exists, contents of the local directory will be copied into
this directory.
:return: The results of the ``put`` command using Fabric. This return
value is an iterable of the paths of all files uploaded on the remote
server.
"""
release_path = os.path.join(self._releases, release_id)
if not self._runner.exists(release_path):
self._runner.run("mkdir -p '{0}'".format(release_path))
# Make sure to remove any user supplied globs or trailing slashes
# so that we can ensure exactly the glob behavior we want from the
# put command.
local_path = self._local_path.strip('*').strip(os.path.sep)
return self._runner.put(os.path.join(local_path, '*'), release_path)
class LocalArtifactInstallation(ProjectBaseMixin):
"""Install a single local file into a remote release directory.
This can be useful for installing applications that are typically bundled
as a single file, e.g. Go binaries or Java JAR files, etc.. The artifact
can optionally be renamed as part of the installation process.
If the remote release directory does not already exist, it will be
created during the install process.
See :doc:`design` for more information about the expected directory
structure for deployments.
.. versionadded:: 1.1.0
"""
def __init__(self, base, local_file, remote_name=None, runner=None):
"""Set the project base directory on the remote server, local artifact (a
single file) that should be installed remotely, and optional file name
to rename the artifact to on the remote server.
:param str base: Absolute path to the root of the code deploy on
the remote server
:param str local_file: Relative or absolute path to the local artifact
to be installed on the remote server.
:param str remote_name: Optional file name for the artifact after it has
been installed on the remote server. For example, if the artifact should
always be called 'application.jar' on the remote server but might
be named differently ('application-1.2.3.jar') locally, you would
specify ``remote_name='application.jar'`` for this parameter.
:param FabRunner runner: Optional runner to use for executing
remote and local commands to perform the installation.
:raises ValueError: If the base directory or local file isn't
specified.
"""
super(LocalArtifactInstallation, self).__init__(base)
if not local_file:
raise ValueError("You must specify a local file path")
self._local_file = local_file
self._remote_name = remote_name
self._runner = runner if runner is not None else FabRunner()
def install(self, release_id):
"""Install the local artifact into the remote release directory, optionally
with a different name than the artifact had locally.
If the directory for the given release ID does not exist on the remote
system, it will be created. The directory will be created according to
the standard Tunic directory structure (see :doc:`design`).
:param str release_id: Timestamp-based identifier for this deployment.
:param int retries: Max number of times to retry downloads after a failure
:param float retry_delay: Number of seconds between download retries
:return: The results of the ``put`` command using Fabric. This return
value is an iterable of the paths of all files uploaded on the remote
server.
"""
release_path = os.path.join(self._releases, release_id)
if not self._runner.exists(release_path):
self._runner.run("mkdir -p '{0}'".format(release_path))
# The artifact can optionally be renamed when being uploaded to
# remote server. Useful for when we need a consistent name for
# each deploy on the remote server but the local artifact includes
# version numbers or something.
if self._remote_name is not None:
destination = os.path.join(release_path, self._remote_name)
else:
destination = release_path
return self._runner.put(self._local_file, destination, mirror_local_mode=True)
def download_url(url, destination, retries=None, retry_delay=None, runner=None):
"""Download the given URL with wget to the provided path. The command is
run via Fabric on the current remote machine. Therefore, the destination
path should be for the remote machine.
:param str url: URL to download onto the remote machine
:param str destination: Path to download the URL to on the remote
machine
:param int retries: Max number of times to retry downloads after a failure
:param float retry_delay: Number of seconds between download retries
:param FabRunner runner: Optional runner to use for executing commands.
:return: The results of the wget call
"""
runner = runner if runner is not None else FabRunner()
return try_repeatedly(
lambda: runner.run("wget --quiet --output-document '{0}' '{1}'".format(destination, url)),
max_retries=retries,
delay=retry_delay
)
class HttpArtifactInstallation(ProjectBaseMixin):
"""Download and install a single file into a remote release directory.
This is useful for installing an application that is typically bundled as a
single file, e.g. Go binaries or Java JAR files, after downloading it from
some sort of artifact repository (such as a company-wide file server or artifact
store like Artifactory).
Downloads are performed over HTTP or HTTPS using a call to ``wget`` on the remote
machine by default. An alternate download method may be specified when creating a
new instance of this installer by providing an alternate implementation. The
download method is expected to conform to the following interface.
>>> def download(url, destination, retries=None, retry_delay=None):
... pass
Where ``url`` is a URL to the artifact that should be downloaded, ``destination``
is the absolute path on the remote machine that the artifact should be downloaded
to, ``retries`` is the number of download attempts made after a failure, and
``retry_delay`` is the number of seconds between retries. The function should return
the result of the Fabric command run(e.g. calling 'curl' or 'wget' with
:func:`fabric.api.run`).
If the remote release directory does not already exist, it will be
created during the install process.
See :doc:`design` for more information about the expected directory
structure for deployments.
.. versionadded:: 1.2.0
"""
_default_retries = 1
_default_retry_delay = 0
# pylint: disable=too-many-arguments
def __init__(self, base, artifact_url, remote_name=None, retries=None, retry_delay=None,
downloader=None, runner=None):
"""Set the project base directory on the remote server, URL to the artifact
that should be installed remotely, and optional file name to rename the artifact
to on the remote server.
:param str base: Absolute path to the root of the code deploy on
the remote server
:param str artifact_url: URL to the artifact to be downloaded and installed on
the remote server.
:param str remote_name: Optional file name for the artifact after it has
been installed on the remote server. For example, if the artifact should
always be called 'application.jar' on the remote server but might
be named differently ('application-1.2.3.jar') locally, you would
specify ``remote_name='application.jar'`` for this parameter.
:param int retries: Max number of times to retry downloads after a failure. Default
is to retry once after a failure.
:param float retry_delay: Number of seconds between download retries. Default is not
to wait between a failure and subsequent retry.
:param callable downloader: Function to download the artifact with the
interface specified above. This is primarily for unit testing but
may be useful for users that need to be able to customize how the
artifact HTTP store is accessed.
:param FabRunner runner: Optional runner to use for executing
remote and local commands to perform the installation.
:raises ValueError: If the base directory or artifact URL isn't
specified.
.. versionchanged:: 1.3.0
Added the ``retries`` and ``retry_delay`` parameters
"""
super(HttpArtifactInstallation, self).__init__(base)
if not artifact_url:
raise ValueError("You must specify a URL")
self._artifact_url = artifact_url
self._remote_name = remote_name
self._retries = retries if retries is not None else self._default_retries
self._retry_delay = retry_delay if retry_delay is not None else self._default_retry_delay
self._downloader = downloader if downloader is not None else download_url
self._runner = runner if runner is not None else FabRunner()
@staticmethod
def _get_file_from_url(url):
"""Get the filename part of the path component from a URL."""
path = urlparse(url).path
if not path:
raise ValueError("Could not extract path from URL '{0}'".format(url))
name = os.path.basename(path)
if not name:
raise ValueError("Could not extract file name from path '{0}'".format(path))
return name
def install(self, release_id):
"""Download and install an artifact into the remote release directory,
optionally with a different name the the artifact had.
If the directory for the given release ID does not exist on the remote
system, it will be created. The directory will be created according to
the standard Tunic directory structure (see :doc:`design`).
:param str release_id: Timestamp-based identifier for this deployment.
:return: The results of the download function being run. This return value
should be the result of running a command with Fabric. By default
this will be the result of running ``wget``.
"""
release_path = os.path.join(self._releases, release_id)
if not self._runner.exists(release_path):
self._runner.run("mkdir -p '{0}'".format(release_path))
# The artifact can optionally be renamed to something specific when
# downloaded on the remote server. In that case use the provided name
# in the download path. Otherwise, just use the last component of
# of the URL we're downloading.
if self._remote_name is not None:
destination = os.path.join(release_path, self._remote_name)
else:
destination = os.path.join(release_path, self._get_file_from_url(self._artifact_url))
# Note that although the default implementation of a download method
# accepts a FabRunner instance, we aren't passing our instance here.
# The reason being, that's only needed for testing the download_url
# method. If we were testing class, we'd mock out the download method
# anyway. So, it's not part of the public API of the download interface
# and we don't deal with it here.
return self._downloader(
self._artifact_url,
destination,
retries=self._retries,
retry_delay=self._retry_delay
)
# pylint: disable=too-few-public-methods
class LocalArtifactTransfer(object):
"""Transfer a local artifact or directory of artifacts to a remote
server when entering a context manager and clean the transferred files
up on the remote server after leaving the block.
The value yielded when entering the context manager will be the
full path to the transferred file or directory on the remote
server. The value yielded will be made up of ``remote_path``
combined with the right most component of ``local_path``.
For example, if ``/tmp/myapp`` is a local directory that contains
several files, the example below will have the following effect.
>>> transfer = LocalArtifactTransfer('/tmp/myapp', '/tmp/artifacts')
>>> with transfer as remote_dest:
... pass
The directory ``myapp`` and its contents would be copied to ``/tmp/artifacts/myapp``
on the remote machine within the scope of the context manager and
the value of ``remote_dest`` would be ``/tmp/artifacts/myapp``. After
the context manager exits ``/tmp/artifacts/myapp`` on the remote machine
will be removed.
If ``/tmp/myartifact.zip`` is a single local file, the example below
will have the following effect.
>>> transfer = LocalArtifactTransfer('/tmp/myartifact.zip', '/tmp/artifacts')
>>> with transfer as remote_dest:
... pass
The file ``myartifact.zip`` would be copied to ``/tmp/artifacts/myartifact.zip``
on the remote machine within the scope of the context manager and the
value of ``remote_dest`` would be ``/tmp/artifacts/myartifact.zip``. After
the context manager exits ``/tmp/artifacts/myartifact.zip`` on the remote
machine will be removed.
The destination of the artifacts must be a directory that is writable
by the user running the deploy or that the user has permission to create.
The path yielded by the context will be removed when the context
manager exits. The local artifacts are not modified or removed on
exit.
.. versionadded:: 0.4.0
"""
def __init__(self, local_path, remote_path, runner=None):
"""Set the local directory that contains the artifacts and
the remote directory that they should be transferred to.
Both the local and remote paths should not contain trailing
slashes. Any trailing slashes will be removed.
:param str local_path: Directory path on the local machine that
contains the build artifacts to be transferred (without a
trailing slash) or the path on the local machine of a single
file.
:param str remote_path: Directory on the remote machine that
the build artifacts should be transferred to (without a
tailing slash).
:param FabRunner runner: Optional runner to use for executing
commands to transfer artifacts.
.. versionchanged:: 0.5.0
Trailing slashes are now removed from ``local_path`` and
``remote_path``.
"""
self._local_path = local_path.rstrip(os.path.sep)
self._remote_path = remote_path.rstrip(os.path.sep)
self._remote_dest = os.path.join(
self._remote_path, os.path.basename(self._local_path))
self._runner = runner if runner is not None else FabRunner()
def __enter__(self):
"""Transfer the local artifacts to the appropriate place on
the remote server (ensuring the path exists first) and return
the remote destination path.
The remote destination path is the remote path joined with the
right-most component of the local path.
:return: The path artifacts were transferred to on the remote
server
:rtype: str
"""
self._runner.run("mkdir -p '{0}'".format(self._remote_path))
self._runner.put(self._local_path, self._remote_path)
return self._remote_dest
def __exit__(self, exc_type, exc_val, exc_tb):
"""Remove the directory containing the build artifacts on the
remote server.
"""
self._runner.run("rm -rf '{0}'".format(self._remote_dest))
return False
| |
from mathml_presentation_query import MathMLPresentation
from mathml_content_query import MathMLContent
from query_all import Query_All
import modular, sigure, subtree
from collections import OrderedDict
import functools
import operator
import re
from lxml import etree
from MathConverter import MathConverter
re_escape = r'([+&|!(){}[\]"~*?:\\^-])'
re_qvar = r'\bqvar\b'
class Query:
solr_url_math = ''
n_row = 0
def __init__(self, solrurlmath, nrow):
self.solr_url_math = solrurlmath
self.n_row = nrow
def __escape(self, string):
retval = ' '.join([token for token in re.sub(re_escape, r'\\\1', string).split(' ') if 'qvar' not in token])
if retval.strip() == "":
return '""'
return retval
def __getUnicodeText(self, string):
if type(string) is str:
return string.decode('utf-8')
else:
return string
def get_op_arg_unif(self, paths):
ops = []
ops_unif = []
args = []
for p in paths:
elem = p.split()
op = ""
op_unif = ""
arg = ""
for el in elem:
if el.startswith("mo#") or "#mo#" in el:
op = ("%s %s" % (op, el)).strip()
op_unif = ("%s %s" % (op_unif, el)).strip()
continue
if el.startswith("mi#") or "#mi#" in el or el.startswith("mn#") or "#mn#" in el:
arg = ("%s %s" % (arg, el)).strip()
cells = el.split("#")
if cells[-2] == "mi":
unif_concat = "#".join(cells[:-1])
op_unif = ("%s %s" % (op_unif, unif_concat)).strip()
continue
op = ("%s %s" % (op, el)).strip()
op_unif = ("%s %s" % (op_unif, el)).strip()
arg= ("%s %s" % (arg, el)).strip()
if op.strip() != "": ops.append(op)
if op_unif.strip() != "": ops_unif.append(op_unif)
if arg.strip() != "": args.append(arg)
return ops, ops_unif, args
def __encodeMaths_path_pres(self, mts_string):
procPres = MathMLPresentation('http://localhost:9000/upconvert')
semantics, mathml_string, mathml_presentation = procPres.get_doc_with_orig(mts_string)
opaths = []
opaths_ops = []
opaths_args = []
upaths = []
upaths_ops = []
upaths_args = []
sisters = []
if semantics is not None:
opaths, sisters = procPres.get_ordered_paths_and_sisters(semantics, False)
upaths = map(lambda paths: ' '.join(map(self.__getUnicodeText, paths)), procPres.get_unordered_paths(opaths))
opaths = map(lambda paths: ' '.join(map(self.__getUnicodeText, paths)), opaths)
opaths_ops, opaths_ops_unif, opaths_args = self.get_op_arg_unif(opaths)
upaths_ops, upaths_ops_unif, upaths_args = self.get_op_arg_unif(upaths)
return opaths_ops, opaths_args, upaths_ops, upaths_args, sisters
def __encodeMaths_hash_pres(self, mts_string):
procPres = MathMLPresentation('http://localhost:9000/upconvert')
semantics, mathml_string, mathml_presentation = procPres.get_doc_with_orig(mts_string)
psubhash = []
psighash = []
pmodhash = []
if semantics is not None:
psubhash = subtree.hash_string(mathml_presentation)
psighash = sigure.hash_string(mathml_presentation)
pmodhash = modular.hash_string_generator(2 ** 32)(mathml_presentation)
return psubhash, psighash, pmodhash
def __constructSolrQuery_math_path_pres(self, qmath):
opath_ops, opath_args, upath_ops, upath_args, sister = self.__encodeMaths_path_pres(qmath)
# opath_query = "opaths:(%s)" % self.__escape(' '.join(opath))
# upath_query = "upaths:(%s)" % self.__escape(' '.join(upath))
opath_ops_query = "p_opaths_op:(%s)" % (self.__escape(opath_ops[0]) if len(opath_ops) > 0 else '""')
opath_arg_query = "p_opaths_arg:(%s)" % (self.__escape(opath_args[0]) if len(opath_args) > 0 else '""')
upath_ops_query = "p_upaths_op:(%s)" % (self.__escape(upath_ops[0]) if len(upath_ops) > 0 else '""')
upath_arg_query = "p_upaths_arg:(%s)" % (self.__escape(upath_args[0]) if len(upath_args) > 0 else '""')
sister = [s for s in sister if ''.join(s).replace('qvar', '').strip() != '']
sister_query = ' '.join(map(lambda family: 'p_sisters:("%s")' % self.__escape(' '.join(family)), sister))
return opath_ops_query, opath_arg_query, upath_ops_query, upath_arg_query, sister_query
def __constructSolrQuery_math_hash_pres(self, qmath):
psubhash, psighash, pmodhash = self.__encodeMaths_hash_pres(qmath)
psubhash_query = "p_stree_hashes:(%s)" % (' '.join([str(val) for val in psubhash]).replace('-', '\-') if len(psubhash) > 0 else '*')
psighash_query = "p_sigure_hashes:(%s)" % (' '.join([str(val) for val in psighash]).replace('-', '\-') if len(psighash) > 0 else '*')
pmodhash_query = "p_mtrick_hashes:(%s)" % (' '.join([str(val) for val in pmodhash]).replace('-', '\-') if len(pmodhash) > 0 else '*')
return psubhash_query, psighash_query, pmodhash_query
def __constructSolrQuery_math_pres(self, query_element):
#construct math query
query_op_opath = ''
query_arg_opath = ''
query_op_upath = ''
query_arg_upath = ''
query_sister = ''
query_psubhash = ""
query_psighash = ""
query_pmodhash = ""
opath_op_query, opath_arg_query, upath_op_query, upath_arg_query, sister_query = self.__constructSolrQuery_math_path_pres(query_element)
psubhash_query, psighash_query, pmodhash_query = self.__constructSolrQuery_math_hash_pres(query_element)
#comb:path pres
query_op_opath = ' '.join([query_op_opath, opath_op_query]).strip()
query_arg_opath = ' '.join([query_arg_opath, opath_arg_query]).strip()
query_op_upath = ' '.join([query_op_upath, upath_op_query]).strip()
query_arg_upath = ' '.join([query_arg_upath, upath_arg_query]).strip()
query_sister = ' '.join([query_sister, sister_query]).strip()
#comb3: hash pres
query_psubhash = " ".join([query_psubhash, psubhash_query]).strip()
query_psighash = " ".join([query_psighash, psighash_query]).strip()
query_pmodhash = " ".join([query_pmodhash, pmodhash_query]).strip()
return query_op_opath, query_arg_opath, query_op_upath, query_arg_upath, query_sister, query_psubhash, query_psighash, query_pmodhash
def __encodeMaths_path_cont(self, mts_string):
procCont = MathMLContent()
oopers = []
oargs = []
uopers = []
uargs = []
trees, cmathmls_str = procCont.encode_mathml_as_tree(mts_string)
for tree in trees:
ooper, oarg = procCont.encode_paths(tree)
uoper = procCont.get_unordered_paths(ooper)
uarg = procCont.get_unordered_paths(oarg)
oopers.extend(map(self.__getUnicodeText, ooper))
uopers.extend(map(self.__getUnicodeText, uoper))
oargs.extend(map(self.__getUnicodeText, oarg))
uargs.extend(map(self.__getUnicodeText, uarg))
# oopers = map(lambda paths: ' '.join(paths), oopers)
# oargs = map(lambda paths: ' '.join(paths), oargs)
# uopers = map(lambda paths: ' '.join(paths), uopers)
# uargs = map(lambda paths: ' '.join(paths), uargs)
# return oopers, oargs, uopers, uargs
return oopers[-1], oargs[-1], uopers[-1], uargs[-1]
def __encodeMaths_hash_cont(self, mts_string):
procCont = MathMLContent()
trees, cmathmls_str = procCont.encode_mathml_as_tree(mts_string)
csubhash = []
csighash = []
cmodhash = []
for cmathml_str in cmathmls_str:
csubhash.extend(subtree.hash_string(cmathml_str))
csighash.extend(sigure.hash_string(cmathml_str))
cmodhash.extend(modular.hash_string_generator(2 ** 32)(cmathml_str))
return csubhash, csighash, cmodhash
def __constructSolrQuery_math_path_cont(self, qmath):
ooper, oarg, uoper, uarg = self.__encodeMaths_path_cont(qmath)
ooper_query = "c_opaths_op:(%s)" % self.__escape(' '.join(ooper))
oarg_query = "c_opaths_arg:(%s)" % self.__escape(' '.join(oarg))
uoper_query = "c_upaths_op:(%s)" % self.__escape(' '.join(uoper))
uarg_query = "c_upaths_arg:(%s)" % self.__escape(' '.join(uarg))
return ooper_query, oarg_query, uoper_query, uarg_query
def __constructSolrQuery_math_hash_cont(self, qmath):
csubhash, csighash, cmodhash = self.__encodeMaths_hash_cont(qmath)
csubhash_query = "c_stree_hashes:(%s)" % (' '.join([str(val) for val in csubhash]).replace('-', '\-') if len(csubhash) > 0 else '*')
csighash_query = "c_sigure_hashes:(%s)" % (' '.join([str(val) for val in csighash]).replace('-', '\-') if len(csighash) > 0 else '*')
cmodhash_query = "c_mtrick_hashes:(%s)" % (' '.join([str(val) for val in cmodhash]).replace('-', '\-') if len(cmodhash) > 0 else '*')
return csubhash_query, csighash_query, cmodhash_query
def __constructSolrQuery_math_cont(self, query_element):
#construct math query
query_ooper = ""
query_oarg = ""
query_uoper = ""
query_uarg = ""
query_csubhash = ""
query_csighash = ""
query_cmodhash = ""
ooper_query, oarg_query, uoper_query, uarg_query = self.__constructSolrQuery_math_path_cont(query_element)
csubhash_query, csighash_query, cmodhash_query = self.__constructSolrQuery_math_hash_cont(query_element)
#comb2: path content
query_ooper = " ".join([query_ooper, ooper_query]).strip()
query_oarg = " ".join([query_oarg, oarg_query]).strip()
query_uoper = " ".join([query_uoper, uoper_query]).strip()
query_uarg = " ".join([query_uarg, uarg_query]).strip()
#comb4: hash content
query_csubhash = " ".join([query_csubhash, csubhash_query]).strip()
query_csighash = " ".join([query_csighash, csighash_query]).strip()
query_cmodhash = " ".join([query_cmodhash, cmodhash_query]).strip()
return query_ooper, query_oarg, query_uoper, query_uarg, query_csubhash, query_csighash, query_cmodhash
def __constructSolrQuery_words(self, keywords):
text_fields = []
or_terms = ' OR '.join('"%s"^%s' % (term, term_weight) for term, term_weight in keywords.iteritems())
for fld in ["contexts", "contexts_children", "nounphrases", "nounphrases_children", "descriptions", "descriptions_children"]:
text_fields.append("%s:(%s)" % (fld, or_terms))
return text_fields
def __summarize_score_max(self, all_maths):
return max(all_maths.values())
def askSolr_all_pres(self, query):
qall = Query_All(self.solr_url_math, self.n_row)
fields = []
if query["mathml"] != "":
mc = MathConverter()
mathml = query['mathml']
terms_fields = self.__constructSolrQuery_math_pres(mathml)
fields += list(terms_fields)
if len(query["text"]) > 0: #query['text'].strip() != "":
text_fields = self.__constructSolrQuery_words(query['text'])
fields += text_fields
qmath, qdocs = qall.ask_solr_math_fqueries(fields, query["mathml"])
return qmath, qdocs
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Thread and ThreadGroup that reraise exceptions on the main thread."""
# pylint: disable=W0212
import logging
import sys
import threading
import time
import traceback
from devil.utils import watchdog_timer
class TimeoutError(Exception):
"""Module-specific timeout exception."""
pass
def LogThreadStack(thread, error_log_func=logging.critical):
"""Log the stack for the given thread.
Args:
thread: a threading.Thread instance.
error_log_func: Logging function when logging errors.
"""
stack = sys._current_frames()[thread.ident]
error_log_func('*' * 80)
error_log_func('Stack dump for thread %r', thread.name)
error_log_func('*' * 80)
for filename, lineno, name, line in traceback.extract_stack(stack):
error_log_func('File: "%s", line %d, in %s', filename, lineno, name)
if line:
error_log_func(' %s', line.strip())
error_log_func('*' * 80)
class ReraiserThread(threading.Thread):
"""Thread class that can reraise exceptions."""
def __init__(self, func, args=None, kwargs=None, name=None):
"""Initialize thread.
Args:
func: callable to call on a new thread.
args: list of positional arguments for callable, defaults to empty.
kwargs: dictionary of keyword arguments for callable, defaults to empty.
name: thread name, defaults to Thread-N.
"""
if not name and func.__name__ != '<lambda>':
name = func.__name__
super(ReraiserThread, self).__init__(name=name)
if not args:
args = []
if not kwargs:
kwargs = {}
self.daemon = True
self._func = func
self._args = args
self._kwargs = kwargs
self._ret = None
self._exc_info = None
self._thread_group = None
def ReraiseIfException(self):
"""Reraise exception if an exception was raised in the thread."""
if self._exc_info:
raise self._exc_info[0], self._exc_info[1], self._exc_info[2]
def GetReturnValue(self):
"""Reraise exception if present, otherwise get the return value."""
self.ReraiseIfException()
return self._ret
# override
def run(self):
"""Overrides Thread.run() to add support for reraising exceptions."""
try:
self._ret = self._func(*self._args, **self._kwargs)
except: # pylint: disable=W0702
self._exc_info = sys.exc_info()
class ReraiserThreadGroup(object):
"""A group of ReraiserThread objects."""
def __init__(self, threads=None):
"""Initialize thread group.
Args:
threads: a list of ReraiserThread objects; defaults to empty.
"""
self._threads = []
# Set when a thread from one group has called JoinAll on another. It is used
# to detect when a there is a TimeoutRetryThread active that links to the
# current thread.
self.blocked_parent_thread_group = None
if threads:
for thread in threads:
self.Add(thread)
def Add(self, thread):
"""Add a thread to the group.
Args:
thread: a ReraiserThread object.
"""
assert thread._thread_group is None
thread._thread_group = self
self._threads.append(thread)
def StartAll(self, will_block=False):
"""Start all threads.
Args:
will_block: Whether the calling thread will subsequently block on this
thread group. Causes the active ReraiserThreadGroup (if there is one)
to be marked as blocking on this thread group.
"""
if will_block:
# Multiple threads blocking on the same outer thread should not happen in
# practice.
assert not self.blocked_parent_thread_group
self.blocked_parent_thread_group = CurrentThreadGroup()
for thread in self._threads:
thread.start()
def _JoinAll(self, watcher=None, timeout=None):
"""Join all threads without stack dumps.
Reraises exceptions raised by the child threads and supports breaking
immediately on exceptions raised on the main thread.
Args:
watcher: Watchdog object providing the thread timeout. If none is
provided, the thread will never be timed out.
timeout: An optional number of seconds to wait before timing out the join
operation. This will not time out the threads.
"""
if watcher is None:
watcher = watchdog_timer.WatchdogTimer(None)
alive_threads = self._threads[:]
end_time = (time.time() + timeout) if timeout else None
try:
while alive_threads and (end_time is None or end_time > time.time()):
for thread in alive_threads[:]:
if watcher.IsTimedOut():
raise TimeoutError('Timed out waiting for %d of %d threads.' %
(len(alive_threads), len(self._threads)))
# Allow the main thread to periodically check for interrupts.
thread.join(0.1)
if not thread.isAlive():
alive_threads.remove(thread)
# All threads are allowed to complete before reraising exceptions.
for thread in self._threads:
thread.ReraiseIfException()
finally:
self.blocked_parent_thread_group = None
def IsAlive(self):
"""Check whether any of the threads are still alive.
Returns:
Whether any of the threads are still alive.
"""
return any(t.isAlive() for t in self._threads)
def JoinAll(self, watcher=None, timeout=None,
error_log_func=logging.critical):
"""Join all threads.
Reraises exceptions raised by the child threads and supports breaking
immediately on exceptions raised on the main thread. Unfinished threads'
stacks will be logged on watchdog timeout.
Args:
watcher: Watchdog object providing the thread timeout. If none is
provided, the thread will never be timed out.
timeout: An optional number of seconds to wait before timing out the join
operation. This will not time out the threads.
error_log_func: Logging function when logging errors.
"""
try:
self._JoinAll(watcher, timeout)
except TimeoutError:
error_log_func('Timed out. Dumping threads.')
for thread in (t for t in self._threads if t.isAlive()):
LogThreadStack(thread, error_log_func=error_log_func)
raise
def GetAllReturnValues(self, watcher=None):
"""Get all return values, joining all threads if necessary.
Args:
watcher: same as in |JoinAll|. Only used if threads are alive.
"""
if any([t.isAlive() for t in self._threads]):
self.JoinAll(watcher)
return [t.GetReturnValue() for t in self._threads]
def CurrentThreadGroup():
"""Returns the ReraiserThreadGroup that owns the running thread.
Returns:
The current thread group, otherwise None.
"""
current_thread = threading.current_thread()
if isinstance(current_thread, ReraiserThread):
return current_thread._thread_group # pylint: disable=no-member
return None
def RunAsync(funcs, watcher=None):
"""Executes the given functions in parallel and returns their results.
Args:
funcs: List of functions to perform on their own threads.
watcher: Watchdog object providing timeout, by default waits forever.
Returns:
A list of return values in the order of the given functions.
"""
thread_group = ReraiserThreadGroup(ReraiserThread(f) for f in funcs)
thread_group.StartAll(will_block=True)
return thread_group.GetAllReturnValues(watcher=watcher)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SeasonsOperations:
"""SeasonsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.agrifood.farming.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
min_start_date_time: Optional[datetime.datetime] = None,
max_start_date_time: Optional[datetime.datetime] = None,
min_end_date_time: Optional[datetime.datetime] = None,
max_end_date_time: Optional[datetime.datetime] = None,
years: Optional[List[int]] = None,
ids: Optional[List[str]] = None,
names: Optional[List[str]] = None,
property_filters: Optional[List[str]] = None,
statuses: Optional[List[str]] = None,
min_created_date_time: Optional[datetime.datetime] = None,
max_created_date_time: Optional[datetime.datetime] = None,
min_last_modified_date_time: Optional[datetime.datetime] = None,
max_last_modified_date_time: Optional[datetime.datetime] = None,
max_page_size: Optional[int] = 50,
skip_token: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.SeasonListResponse"]:
"""Returns a paginated list of season resources.
:param min_start_date_time: Minimum season start datetime, sample format: yyyy-MM-ddTHH:mm:ssZ.
:type min_start_date_time: ~datetime.datetime
:param max_start_date_time: Maximum season start datetime, sample format: yyyy-MM-ddTHH:mm:ssZ.
:type max_start_date_time: ~datetime.datetime
:param min_end_date_time: Minimum season end datetime, sample format: yyyy-MM-ddTHH:mm:ssZ.
:type min_end_date_time: ~datetime.datetime
:param max_end_date_time: Maximum season end datetime, sample format: yyyy-MM-ddTHH:mm:ssZ.
:type max_end_date_time: ~datetime.datetime
:param years: Years of the resource.
:type years: list[int]
:param ids: Ids of the resource.
:type ids: list[str]
:param names: Names of the resource.
:type names: list[str]
:param property_filters: Filters on key-value pairs within the Properties object.
eg. "{testKey} eq {testValue}".
:type property_filters: list[str]
:param statuses: Statuses of the resource.
:type statuses: list[str]
:param min_created_date_time: Minimum creation date of resource (inclusive).
:type min_created_date_time: ~datetime.datetime
:param max_created_date_time: Maximum creation date of resource (inclusive).
:type max_created_date_time: ~datetime.datetime
:param min_last_modified_date_time: Minimum last modified date of resource (inclusive).
:type min_last_modified_date_time: ~datetime.datetime
:param max_last_modified_date_time: Maximum last modified date of resource (inclusive).
:type max_last_modified_date_time: ~datetime.datetime
:param max_page_size: Maximum number of items needed (inclusive).
Minimum = 10, Maximum = 1000, Default value = 50.
:type max_page_size: int
:param skip_token: Skip token for getting next set of results.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SeasonListResponse or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.agrifood.farming.models.SeasonListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SeasonListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if min_start_date_time is not None:
query_parameters['minStartDateTime'] = self._serialize.query("min_start_date_time", min_start_date_time, 'iso-8601')
if max_start_date_time is not None:
query_parameters['maxStartDateTime'] = self._serialize.query("max_start_date_time", max_start_date_time, 'iso-8601')
if min_end_date_time is not None:
query_parameters['minEndDateTime'] = self._serialize.query("min_end_date_time", min_end_date_time, 'iso-8601')
if max_end_date_time is not None:
query_parameters['maxEndDateTime'] = self._serialize.query("max_end_date_time", max_end_date_time, 'iso-8601')
if years is not None:
query_parameters['years'] = [self._serialize.query("years", q, 'int') if q is not None else '' for q in years]
if ids is not None:
query_parameters['ids'] = [self._serialize.query("ids", q, 'str') if q is not None else '' for q in ids]
if names is not None:
query_parameters['names'] = [self._serialize.query("names", q, 'str') if q is not None else '' for q in names]
if property_filters is not None:
query_parameters['propertyFilters'] = [self._serialize.query("property_filters", q, 'str') if q is not None else '' for q in property_filters]
if statuses is not None:
query_parameters['statuses'] = [self._serialize.query("statuses", q, 'str') if q is not None else '' for q in statuses]
if min_created_date_time is not None:
query_parameters['minCreatedDateTime'] = self._serialize.query("min_created_date_time", min_created_date_time, 'iso-8601')
if max_created_date_time is not None:
query_parameters['maxCreatedDateTime'] = self._serialize.query("max_created_date_time", max_created_date_time, 'iso-8601')
if min_last_modified_date_time is not None:
query_parameters['minLastModifiedDateTime'] = self._serialize.query("min_last_modified_date_time", min_last_modified_date_time, 'iso-8601')
if max_last_modified_date_time is not None:
query_parameters['maxLastModifiedDateTime'] = self._serialize.query("max_last_modified_date_time", max_last_modified_date_time, 'iso-8601')
if max_page_size is not None:
query_parameters['$maxPageSize'] = self._serialize.query("max_page_size", max_page_size, 'int', maximum=1000, minimum=10)
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SeasonListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/seasons'} # type: ignore
async def get(
self,
season_id: str,
**kwargs: Any
) -> "_models.Season":
"""Gets a specified season resource.
:param season_id: ID of the season.
:type season_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Season, or the result of cls(response)
:rtype: ~azure.agrifood.farming.models.Season
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Season"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'seasonId': self._serialize.url("season_id", season_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('Season', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/seasons/{seasonId}'} # type: ignore
async def create_or_update(
self,
season_id: str,
season: Optional["_models.Season"] = None,
**kwargs: Any
) -> "_models.Season":
"""Creates or updates a season resource.
:param season_id: ID of the season resource.
:type season_id: str
:param season: Season resource payload to create or update.
:type season: ~azure.agrifood.farming.models.Season
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Season, or the result of cls(response)
:rtype: ~azure.agrifood.farming.models.Season
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Season"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
content_type = kwargs.pop("content_type", "application/merge-patch+json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'seasonId': self._serialize.url("season_id", season_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if season is not None:
body_content = self._serialize.body(season, 'Season')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
deserialized = self._deserialize('Season', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Season', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/seasons/{seasonId}'} # type: ignore
async def delete(
self,
season_id: str,
**kwargs: Any
) -> None:
"""Deletes a specified season resource.
:param season_id: ID of the season.
:type season_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'seasonId': self._serialize.url("season_id", season_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/seasons/{seasonId}'} # type: ignore
| |
# encoding: utf-8
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'EventForm'
db.create_table('webinars_eventform', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('event', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['webinars.Event'])),
('cms_form', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['webinars.CmsForm'])),
('sync_start_at', self.gf('sanetime.dj.SaneTimeField')(default=0)),
))
db.send_create_signal('webinars', ['EventForm'])
db.execute("INSERT webinars_eventform (event_id, cms_form_id) SELECT event_id, cmsform_id FROM webinars_event_cms_forms");
db.execute("INSERT webinars_eventform (event_id, cms_form_id) SELECT id, _update_cms_form_id FROM webinars_event WHERE _update_cms_form_id IS NOT NULL");
# Deleting field 'Event._update_cms_form'
db.delete_column('webinars_event', '_update_cms_form_id')
# Deleting field 'Event.last_hubspot_snapshotted_at'
db.delete_column('webinars_event', 'last_hubspot_snapshotted_at')
# Removing M2M table for field cms_forms on 'Event'
db.delete_table('webinars_event_cms_forms')
def backwards(self, orm):
# Deleting model 'EventForm'
db.delete_table('webinars_eventform')
# Adding field 'Event._update_cms_form'
db.add_column('webinars_event', '_update_cms_form', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', null=True, to=orm['webinars.CmsForm']), keep_default=False)
# Adding field 'Event.last_hubspot_snapshotted_at'
db.add_column('webinars_event', 'last_hubspot_snapshotted_at', self.gf('sanetime.dj.SaneTimeField')(default=0), keep_default=False)
# Adding M2M table for field cms_forms on 'Event'
db.create_table('webinars_event_cms_forms', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('event', models.ForeignKey(orm['webinars.event'], null=False)),
('cmsform', models.ForeignKey(orm['webinars.cmsform'], null=False))
))
db.create_unique('webinars_event_cms_forms', ['event_id', 'cmsform_id'])
models = {
'webinars.account': {
'Meta': {'object_name': 'Account'},
'account_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.AccountType']"}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Hub']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'prevent_unformed_lead_import': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'webinars.accounttype': {
'Meta': {'object_name': 'AccountType'},
'can_api_create_event': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_api_load_event': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_api_register_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_api_report_views': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'extra_username_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'listing_priority': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'username_label': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'webinars.cmsform': {
'Meta': {'object_name': 'CmsForm'},
'guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Hub']"}),
'is_sync_target': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'webinars.event': {
'Meta': {'object_name': 'Event'},
'_attended_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_attended_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_noshow_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_registered_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_registered_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'attended_campaign_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'deleted_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'event_forms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['webinars.CmsForm']", 'through': "orm['webinars.EventForm']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mothballed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'noshow_campaign_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'registrants_synced_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'requested_registrants_sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'started_registrants_sync_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'time_starts_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'timezone_starts_at': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'unknowable_registrants': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'})
},
'webinars.eventform': {
'Meta': {'object_name': 'EventForm'},
'cms_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.CmsForm']"}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sync_start_at': ('sanetime.dj.SaneTimeField', [], {'default': '0'})
},
'webinars.hub': {
'Meta': {'object_name': 'Hub'},
'_attended_any_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_attended_any_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_registered_any_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_registered_any_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_timezone': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'events_synced_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'requested_events_sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'started_events_sync_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'uninstalled_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'})
},
'webinars.hubspotregistrantsnapshot': {
'Meta': {'object_name': 'HubSpotRegistrantSnapshot'},
'attended_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'attended_for': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'attended_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'conversion_event_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_form_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'lead_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'registered_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'registered_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.landingpage': {
'Meta': {'object_name': 'LandingPage'},
'cms_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.CmsForm']"}),
'form_title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'webinars.registrant': {
'Meta': {'object_name': 'Registrant'},
'attended_for': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'cms_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.CmsForm']", 'null': 'True'}),
'conversion_event_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'deleted_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'lead_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'})
},
'webinars.task': {
'Meta': {'object_name': 'Task'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']", 'null': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Hub']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'sync_all_registrants': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sync_events': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sync_specific_registrants': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'webinars.taskrunner': {
'Meta': {'object_name': 'TaskRunner'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.webexeventsnapshot': {
'Meta': {'object_name': 'WebexEventSnapshot'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'time_starts_at': ('sanetime.dj.SaneTimeField', [], {}),
'timezone_starts_at': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'webinars.webexregistrantsnapshot': {
'Meta': {'object_name': 'WebexRegistrantSnapshot'},
'attended_for': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
}
}
complete_apps = ['webinars']
| |
import logging
import itertools
import numpy as np
from collections import OrderedDict
from collections import Mapping
from picklable_itertools.extras import equizip
import theano
from blocks.algorithms import GradientDescent, UpdatesAlgorithm
from blocks.graph import ComputationGraph
from blocks.theano_expressions import l2_norm
logger = logging.getLogger(__name__)
class TwoStepGradientDescent(GradientDescent):
"""
This class takes two groups of names and assigns each update with
names in the corresponding group to its corresponding theano function.
Two functions are now runned at process_batch time. They will each be
runned a number of time stored in grous_steps
WARNING: This means the parameters need to have names!!!
"""
def __init__(self, groups={}, groups_steps=[1,1], **kwargs):
assert len(groups) == 2
assert len(groups_steps) == 2
assert all([key in ['group1', 'group2'] for key in groups.keys()])
self.groups = groups
self.groups_steps = groups_steps
super(TwoStepGradientDescent, self).__init__(**kwargs)
def initialize(self):
logger.info("Initializing the training algorithm")
update_values = [new_value for _, new_value in self.updates]
logger.debug("Inferring graph inputs...")
try:
self.inputs = ComputationGraph(update_values).inputs
except AttributeError:
print "oh you silly blocks, why do you even?"
logger.debug("Compiling training function...")
# find updates according to names in the two groups
updt_group1 = [x for x in self.updates \
if any([name in x[0].name for name in self.groups['group1']])]
updt_group2 = [x for x in self.updates \
if any([name in x[0].name for name in self.groups['group2']])]
for updt in updt_group1 + updt_group2:
self.updates.pop(self.updates.index(updt))
updt_group1 += self.updates
updt_group2 += self.updates
self._function1 = theano.function(
self.inputs, [], updates=updt_group1, **self.theano_func_kwargs)
self._function2 = theano.function(
self.inputs, [], updates=updt_group2, **self.theano_func_kwargs)
logger.info("The training algorithm is initialized")
def process_batch(self, batch):
self._validate_source_names(batch)
ordered_batch = [batch[v.name] for v in self.inputs]
for i in range(self.groups_steps[0]):
self._function1(*ordered_batch)
for i in range(self.groups_steps[1]):
self._function2(*ordered_batch)
class EntropySGD(UpdatesAlgorithm):
"""
Copy Pasta of __init__ method of GradientDescent. Watch out if a block's
update change this. Unfortunatly, it seems to implement this we need to
intercept some things happening in the __init__ and cannot directly subclass
GradientDescent.
"""
def __init__(self, langevin_itr, eta=0.1, alpha=0.75, gamma0=1e-4,
hard_limit_on_scopping=True, scoping=1.001, epsilon=1e-4,
cost=None, parameters=None, step_rule=None,
gradients=None, known_grads=None, consider_constant=None,
**kwargs):
self.eta = np.float32(eta)
self.alpha = np.float32(alpha)
self.gamma = theano.shared(np.float32(gamma0), name='ESGD_gamma')
self.scoping = np.float32(scoping)
self.hard_limit_on_scopping = hard_limit_on_scopping
self.epsilon = np.float32(epsilon)
self.langevin_itr = langevin_itr
self.langevin_step = 1
# Set initial values for cost, parameters, gradients.
self.cost = cost
self.parameters = parameters
# Coerce lists of tuples to OrderedDict. Do not coerce Mappings,
# as we don't want to convert dict -> OrderedDict and give it
# an arbitrary, non-deterministic order.
if gradients is not None and not isinstance(gradients, Mapping):
gradients = OrderedDict(gradients)
self.gradients = gradients
# If we don't have gradients, we'll need to infer them from the
# cost and the parameters, both of which must not be None.
if not self.gradients:
self.gradients = self._compute_gradients(known_grads,
consider_constant)
else:
if cost is not None:
logger.warning(('{}: gradients already specified directly; '
'cost is unused.'
.format(self.__class__.__name__)))
if self.parameters is None and isinstance(gradients, OrderedDict):
# If the dictionary is ordered, it's safe to use the keys
# as they have a deterministic order.
self.parameters = list(self.gradients.keys())
elif self.parameters is not None:
# If parameters and gradients.keys() don't match we can
# try to recover if gradients is ordered.
if set(self.parameters) != set(self.gradients.keys()):
logger.warn("Specified parameters list does not match "
"keys in provided gradient dictionary; "
"using parameters inferred from gradients")
if not isinstance(self.gradients, OrderedDict):
raise ValueError(determinism_error)
self.parameters = list(self.gradients.keys())
else:
# self.parameters is not None, and gradients isn't
# an OrderedDict. We can't do anything safe.
raise ValueError(determinism_error)
if known_grads:
raise ValueError("known_grads has no effect when gradients "
"are passed in")
if consider_constant is not None:
raise ValueError("consider_constant has no effect when "
"gradients are passed in")
# ------------ESGD interception! -----------------
# recreating a two list of parameters of theano shared
# they are x_prime and mu in the paper
true_parameters = []
mu_parameters = []
for param in self.parameters:
new_param = theano.shared(param.get_value(),
name=param.name)
# same thing but we need a unique object
mu_param = theano.shared(param.get_value(),
name=param.name)
true_parameters += [new_param]
mu_parameters += [mu_param]
self.true_parameters = true_parameters
self.mu_parameters = mu_parameters
new_gradients = OrderedDict()
#import ipdb; ipdb.set_trace()
for true_param, param in zip(true_parameters, self.parameters):
gradient = self.gradients[param]
new_gradient = gradient - self.gamma * (true_param - param)
new_gradients.update({param: new_gradient})
# gradients now contain the ESGD step (line 4 algo 1 of the paper)
del self.gradients
self.gradients = new_gradients
# The order in which the different gradient terms appears
# here matters, as floating point addition is non-commutative (and
# Theano's graph optimizations are not order-independent).
# This is why we do not use .values().
gradient_values = [self.gradients[p] for p in self.parameters]
self.total_gradient_norm = (l2_norm(gradient_values)
.copy(name="total_gradient_norm"))
self.step_rule = step_rule if step_rule else Scale()
logger.debug("Computing parameter steps...")
self.steps, self.step_rule_updates = (
self.step_rule.compute_steps(self.gradients))
# Same as gradient_values above: the order may influence a
# bunch of things, so enforce a consistent one (don't use
# .values()).
step_values = [self.steps[p] for p in self.parameters]
self.total_step_norm = (l2_norm(step_values)
.copy(name="total_step_norm"))
# Once again, iterating on gradients may not be deterministically
# ordered if it is not an OrderedDict. We add the updates here in
# the order specified in self.parameters. Keep it this way to
# maintain reproducibility.
# ---- Another ESGD interception here! -----------------
randrg = theano.tensor.shared_randomstreams.RandomStreams(seed=1234)
eps = self.epsilon * randrg.normal(dtype=theano.config.floatX)
eta_prime = getattr(self.step_rule, 'learning_rate')
slgd_eta_update = theano.tensor.sqrt(eta_prime) * eps
kwargs.setdefault('updates', []).extend(
itertools.chain(((parameter, parameter - self.steps[parameter] + slgd_eta_update)
for parameter in self.parameters),
self.step_rule_updates)
)
mu_updates = [(mu, np.float32(1. - self.alpha) * mu + self.alpha * x_prime) for mu, x_prime \
in zip(self.mu_parameters, self.parameters)]
self.mu_updates = mu_updates
super(EntropySGD, self).__init__(**kwargs)
#import ipdb; ipdb.set_trace()
def initialize(self):
logger.info("Initializing the training algorithm")
update_values = [new_value for _, new_value in self.updates]
logger.debug("Inferring graph inputs...")
try:
self.inputs = ComputationGraph(update_values).inputs
except AttributeError:
print "oh you silly blocks, why do you even?"
logger.debug("Compiling training function...")
# line 5
try:
self._function_for_x_prime = theano.function(
self.inputs, [], updates=self.updates, **self.theano_func_kwargs)
except TypeError as e:
print "This following error was thrown: ", e
print "Let's try to fix broadcastable patterns and recompile..."
# this is ugly, but lets face the ugliness
for i, tup in enumerate(self.updates):
var = tup[0]
updt = tup[1]
if updt.broadcastable != var.broadcastable:
import ipdb; ipdb.set_trace()
updt = theano.tensor.patternbroadcast(updt, var.broadcastable)
self.updates[i] = (var, updt)
self._function_for_x_prime = theano.function(
self.inputs, [], updates=self.updates, **self.theano_func_kwargs)
# line 6
self._function_for_mu = theano.function(
[], [], updates=self.mu_updates)
logger.info("The training algorithm is initialized")
def process_batch(self, batch):
self._validate_source_names(batch)
ordered_batch = [batch[v.name] for v in self.inputs]
if self.langevin_step % self.langevin_itr == 0:
# new langevin epoch
# update the true param and reassign x_prime and mu with the true param values
for param, mu, true_param in zip(self.parameters, self.mu_parameters,
self.true_parameters):
true_param.set_value(true_param.get_value() - self.eta * (true_param.get_value() - \
mu.get_value()))
param.set_value(true_param.get_value())
mu.set_value(true_param.get_value())
gamma_val = self.gamma.get_value()
if gamma_val > 1. and self.hard_limit_on_scopping:
gamma_val = np.float32(1.)
self.gamma.set_value(gamma_val * self.scoping)
self.langevin_step = 0
self._function_for_x_prime(*ordered_batch)
self._function_for_mu()
self.langevin_step += 1
def _compute_gradients(self, known_grads, consider_constant):
if self.cost is None:
raise ValueError("can't infer gradients; no cost specified")
elif self.parameters is None or len(self.parameters) == 0:
raise ValueError("can't infer gradients; no parameters "
"specified")
# While this strictly speaking could be a dict and not an
# OrderedDict (because we iterate over it in the order of
# self.parameters), this guards a little bit against
# nondeterminism introduced by future refactoring.
logger.info("Taking the cost gradient")
gradients = OrderedDict(
equizip(self.parameters, theano.tensor.grad(
self.cost, self.parameters,
known_grads=known_grads,
consider_constant=consider_constant)))
logger.info("The cost gradient computation graph is built")
return gradients
if __name__ == '__main__':
import theano.tensor as T
import numpy as np
x = T.fmatrix('x')
u = T.fmatrix('u')
npw = np.random.random((64,128)).astype(np.float32)
w = theano.shared(npw, name='w')
y = T.dot(x, w)
cost = T.sqrt(T.sum(y**2))
updt = [(w, w-(u-w)*(cost/1000.))]
f = theano.function(inputs=[x, u],outputs=[cost],updates=updt)
out = f(np.random.random((100,64)).astype(np.float32))
| |
# Copyright (C) 2006-2011, University of Maryland
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author: James Krycka
"""
This module implements the InstrumentParameters class for obtaining data used
for calculating resolution.
"""
#==============================================================================
from __future__ import print_function
import sys
import wx
from numpy import inf
from ..api.ncnrdata import ANDR, NG1, NG7, XRay, NCNRLoader
from ..api.snsdata import Liquids, Magnetic, SNSLoader
from .input_list import InputListDialog
#==============================================================================
class InstrumentParameters():
"""
This class is responsible for gathering instrument parameters (also known
as resoution parameters or instrument metadata) from the user.
"""
def __init__(self):
self.instr_classes = [ANDR, NG1, NG7, XRay, Liquids, Magnetic]
self.instr_location = ["NCNR", "NCNR", "NCNR", "NCNR", "SNS", "SNS"]
# Get the instrument name and radiation type for each instrument.
self.instr_names = []
self.radiation = []
for classname in self.instr_classes:
self.instr_names.append(classname.instrument)
self.radiation.append(classname.radiation)
n = len(self.instr_classes)
# Editable parameters are stored in 2 x n lists where list[0] contains
# default values by instrument and list[1] holds their current values.
# n is the number of instruments supported. For a given instrument
# only a subset of the parameters may be applicable.
self.wavelength = [[None] * n, [None] * n] # monochromatic
self.wavelength_lo = [[None] * n, [None] * n] # polychromatic
self.wavelength_hi = [[None] * n, [None] * n] # polychromatic
self.dLoL = [[None] * n, [None] * n] # both
self.d_s1 = [[None] * n, [None] * n] # both
self.d_s2 = [[None] * n, [None] * n] # both
self.T = [[None] * n, [None] * n] # polychromatic
self.Tlo = [[None] * n, [None] * n] # monochromatic
self.Thi = [[None] * n, [None] * n] # monochromatic
self.slit1_size = [[None] * n, [None] * n] # polychromatic
self.slit2_size = [[None] * n, [None] * n] # polychromatic
self.slit1_at_Tlo = [[None] * n, [None] * n] # monochromatic
self.slit2_at_Tlo = [[None] * n, [None] * n] # monochromatic
self.slit1_below = [[None] * n, [None] * n] # monochromatic
self.slit2_below = [[None] * n, [None] * n] # monochromatic
self.slit1_above = [[None] * n, [None] * n] # monochromatic
self.slit2_above = [[None] * n, [None] * n] # monochromatic
self.sample_width = [[None] * n, [None] * n] # both
self.sample_broadening = [[None] * n, [None] * n] # both
for i, classname in enumerate(self.instr_classes):
self.set_default_values(i, classname)
# Indicate that no instrument has been chosen.
self.instr_idx = -1
def get_instr_idx(self):
return self.instr_idx
def set_instr_idx(self, index):
self.instr_idx = index
def set_default_values(self, i, iclass):
""" Sets default values for reflectometer parameters."""
if hasattr(iclass, 'wavelength'):
try:
self.wavelength_lo[0][i], \
self.wavelength_hi[0][i] = iclass.wavelength
except Exception:
self.wavelength[0][i] = iclass.wavelength
if hasattr(iclass, 'dLoL'):
self.dLoL[0][i] = iclass.dLoL
if hasattr(iclass, 'T'):
self.T[0][i] = iclass.T
if hasattr(iclass, 'Tlo'):
if iclass.Tlo is not inf: # TODO: resolve handling of inf
self.Tlo[0][i] = iclass.Tlo
if hasattr(iclass, 'Thi'):
if iclass.Thi is not inf: # TODO: resolve handling of inf
self.Thi[0][i] = iclass.Thi
if hasattr(iclass, 'd_s1'):
self.d_s1[0][i] = iclass.d_s1
if hasattr(iclass, 'd_s2'):
self.d_s2[0][i] = iclass.d_s2
#if hasattr(iclass, 'sample_width'):
# self.sample_width[0][i] = iclass.sample_width
if hasattr(iclass, 'sample_broadening'):
self.sample_broadening[0][i] = iclass.sample_broadening
self.instr_idx = i
self.init_metadata()
def init_metadata(self):
"""
Sets current metadata values for insturments to their default values.
"""
i = self.instr_idx
self.wavelength[1][i] = self.wavelength[0][i]
self.wavelength_lo[1][i] = self.wavelength_lo[0][i]
self.wavelength_hi[1][i] = self.wavelength_hi[0][i]
self.dLoL[1][i] = self.dLoL[0][i]
self.d_s1[1][i] = self.d_s1[0][i]
self.d_s2[1][i] = self.d_s2[0][i]
self.T[1][i] = self.T[0][i]
self.Tlo[1][i] = self.Tlo[0][i]
self.Thi[1][i] = self.Thi[0][i]
self.slit1_size[1][i] = self.slit1_size[0][i]
self.slit2_size[1][i] = self.slit2_size[0][i]
self.slit1_at_Tlo[1][i] = self.slit1_at_Tlo[0][i]
self.slit2_at_Tlo[1][i] = self.slit2_at_Tlo[0][i]
self.slit1_below[1][i] = self.slit1_below[0][i]
self.slit2_below[1][i] = self.slit2_below[0][i]
self.slit1_above[1][i] = self.slit1_above[0][i]
self.slit2_above[1][i] = self.slit2_above[0][i]
self.sample_width[1][i] = self.sample_width[0][i]
self.sample_broadening[1][i] = self.sample_broadening[0][i]
def edit_metadata(self):
"""Dispatches to the appropriate class of instrument."""
if self.instr_idx <= 3:
self.edit_metadata_monochromatic()
else:
self.edit_metadata_polychromatic()
def edit_metadata_monochromatic(self):
"""
Allows the user to edit the values for parameters of a monochromatic
scanning instrument.
"""
i = self.instr_idx
fields = [
["Radiation Type:", self.radiation[i], "str", 'RH2B', None,
self.instr_names[i]+" Scanning Reflectometer"],
["Instrument location:", self.instr_location[i],
"str", 'R', None],
["Wavelength (A):", self.wavelength[1][i],
"float", 'REH2', None, "Instrument Settings"],
["Wavelength Dispersion (dLoL):", self.dLoL[1][i],
"float", 'RE', None],
["Distance to Slit 1 (mm):", self.d_s1[1][i],
"float", 'RE', None],
["Distance to Slit 2 (mm):", self.d_s2[1][i],
"float", 'RE', None],
["Theta Lo (deg):", self.Tlo[1][i],
"float", 'REH2', None, "Measurement Settings"],
["Theta Hi (deg):", self.Thi[1][i],
"float", 'E', None],
["Slit 1 at Theta Lo (mm):", self.slit1_at_Tlo[1][i],
"float", 'RE', None],
["Slit 2 at Theta Lo (mm):", self.slit2_at_Tlo[1][i],
"float", 'E', None],
["Slit 1 below Theta Lo (mm):", self.slit1_below[1][i],
"float", 'RE', None],
["Slit 2 below Theta Lo (mm):", self.slit2_below[1][i],
"float", 'E', None],
["Slit 1 above Theta Hi (mm):", self.slit1_above[1][i],
"float", 'EL', None],
["Slit 2 above Theta Hi (mm):", self.slit2_above[1][i],
"float", 'E', None],
["Sample Width (mm):", self.sample_width[1][i],
"float", 'E', None],
["Sample Broadening (deg):", self.sample_broadening[1][i],
"float", 'E', None],
]
# Get instrument and measurement parameters via a pop-up dialog box.
# Pass in the frame object as the parent window so that the dialog box
# will inherit font info from it instead of using system defaults.
frame = wx.FindWindowByName("AppFrame")
x, y = frame.GetPosition()
dlg = InputListDialog(parent=frame,
title="Instrument Properties",
pos=(x+350, y+50),
itemlist=fields,
align=True)
if dlg.ShowModal() == wx.ID_OK:
results = dlg.GetResultsAltFormat()
if len(sys.argv) > 1 and '--tracep' in sys.argv[1:]:
print("*** Instrument (resolution) parameters:")
print(results)
# Skip results[0], the radiation value that is not editable
# Skip results[1], the location value that is not editable
(self.wavelength[1][i],
self.dLoL[1][i],
self.d_s1[1][i],
self.d_s2[1][i],
self.Tlo[1][i],
self.Thi[1][i],
self.slit1_at_Tlo[1][i],
self.slit2_at_Tlo[1][i],
self.slit1_below[1][i],
self.slit2_below[1][i],
self.slit1_above[1][i],
self.slit2_above[1][i],
self.sample_width[1][i],
self.sample_broadening[1][i]
) = results[2:]
dlg.Destroy()
def edit_metadata_polychromatic(self):
"""
Allows the user to edit the values for parameters of a polychromatic
time-of-flight instrument.
"""
i = self.instr_idx
fields = [
["Radiation Type:", self.radiation[i], "str", 'RH2B', None,
self.instr_names[i]+" Time-of-Flight Reflectometer"],
["Instrument location:", self.instr_location[i],
"str", 'R', None],
["Wavelength Lo (A):", self.wavelength_lo[1][i],
"float", 'REH2', None, "Instrument Settings"],
["Wavelength Hi (A):", self.wavelength_hi[1][i],
"float", 'RE', None],
["Wavelength Dispersion (dLoL):", self.dLoL[1][i],
"float", 'RE', None],
["Distance to Slit 1 (mm):", self.d_s1[1][i],
"float", 'RE', None],
["Distance to Slit 2 (mm):", self.d_s2[1][i],
"float", 'RE', None],
["Theta (deg):", self.T[1][i],
"float", 'REH2', None, "Measurement Settings"],
["Size of Slit 1 (mm):", self.slit1_size[1][i],
"float", 'RE', None],
["Size of Slit 2 (mm):", self.slit2_size[1][i],
"float", 'RE', None],
["Sample Width (mm):", self.sample_width[1][i],
"float", 'EL', None],
["Sample Broadening (deg):", self.sample_broadening[1][i],
"float", 'E', None],
]
# Get instrument and measurement parameters via a pop-up dialog box.
# Pass in the frame object as the parent window so that the dialog box
# will inherit font info from it instead of using system defaults.
frame = wx.FindWindowByName("AppFrame")
x, y = frame.GetPosition()
dlg = InputListDialog(parent=frame,
title="Instrument Properties",
pos=(x+350, y+50),
itemlist=fields,
align=True)
if dlg.ShowModal() == wx.ID_OK:
results = dlg.GetResultsAltFormat()
if len(sys.argv) > 1 and '--tracep' in sys.argv[1:]:
print("*** Instrument (resolution) parameters:")
print(results)
# Skip results[0], the radiation value that is not editable
# Skip results[1], the location value that is not editable
(self.wavelength_lo[1][i],
self.wavelength_hi[1][i],
self.dLoL[1][i],
self.d_s1[1][i],
self.d_s2[1][i],
self.T[1][i],
self.slit1_size[1][i],
self.slit2_size[1][i],
self.sample_width[1][i],
self.sample_broadening[1][i]
) = results[2:]
dlg.Destroy()
# Get methods (without corresponding set methods).
def get_instr_names(self):
return self.instr_names
def get_instr_classes(self):
return self.instr_classes
def get_radiation(self):
return self.radiation[self.instr_idx]
# Get methods (with corresponding set methods).
def get_wavelength(self):
return self.wavelength[1][self.instr_idx]
def get_wavelength_lo(self):
return self.wavelength_lo[1][self.instr_idx]
def get_wavelength_hi(self):
return self.wavelength_hi[1][self.instr_idx]
def get_dLoL(self):
return self.dLoL[1][self.instr_idx]
def get_d_s1(self):
return self.d_s1[1][self.instr_idx]
def get_d_s2(self):
return self.d_s2[1][self.instr_idx]
def get_T(self):
return self.T[1][self.instr_idx]
def get_Tlo(self):
return self.Tlo[1][self.instr_idx]
def get_Thi(self):
return self.Thi[1][self.instr_idx]
def get_slit1_size(self):
return self.slit1_size[1][self.instr_idx]
def get_slit2_size(self):
return self.slit2_size[1][self.instr_idx]
def get_slit1_at_Tlo(self):
return self.slit1_at_Tlo[1][self.instr_idx]
def get_slit2_at_Tlo(self):
return self.slit2_at_Tlo[1][self.instr_idx]
def get_slit1_below(self):
return self.slit1_below[1][self.instr_idx]
def get_slit2_below(self):
return self.slit2_below[1][self.instr_idx]
def get_slit1_above(self):
return self.slit1_above[1][self.instr_idx]
def get_slit2_above(self):
return self.slit2_above[1][self.instr_idx]
def get_sample_width(self):
return self.sample_width[1][self.instr_idx]
def get_sample_broadening(self):
return self.sample_broadening[1][self.instr_idx]
# Set methods (with corresponding get methods).
def set_wavelength(self):
self.wavelength[1][self.instr_idx] = value
def set_wavelength_lo(self):
self.wavelength_lo[1][self.instr_idx] = value
def set_wavelength_hi(self):
self.wavelength_hi[1][self.instr_idx] = value
def set_dLoL(self):
self.dLoL[1][self.instr_idx] = value
def set_d_s1(self):
self.d_s1[1][self.instr_idx] = value
def set_d_s2(self):
self.d_s2[1][self.instr_idx] = value
def set_T(self, value=None):
self.T[1][self.instr_idx] = value
def set_Tlo(self, value=None):
self.Tlo[1][self.instr_idx] = value
def set_Thi(self, value=None):
self.Thi[1][self.instr_idx] = value
def set_slit1_size(self, value=None):
self.slit1_size[1][self.instr_idx] = value
def set_slit2_size(self, value=None):
self.slit2_size[1][self.instr_idx] = value
def set_slit1_at_Tlo(self, value=None):
self.slit1_at_Tlo[1][self.instr_idx] = value
def set_slit2_at_Tlo(self):
self.slit2_at_Tlo[1][self.instr_idx] = value
def set_slit1_below(self, value=None):
self.slit1_below[1][self.instr_idx] = value
def set_slit2_below(self, value=None):
self.slit2_below[1][self.instr_idx] = value
def set_slit1_above(self):
self.slit1_above[1][self.instr_idx] = value
def set_slit2_above(self):
self.slit2_above[1][self.instr_idx] = value
def set_sample_width(self):
self.sample_width[1][self.instr_idx] = value
def set_sample_broadening(self):
self.sample_broadening[1][self.instr_idx] = value
| |
import six
import mock
import decimal
from decimal import Decimal
if six.PY3:
from io import StringIO
else:
from StringIO import StringIO
from ..base import BaseSmartCSVTestCase
import smartcsv
class ValidValueTransformation(BaseSmartCSVTestCase):
def test_required_column_with_data_is_transformed(self):
"""Should apply the transformation to the value"""
iphone_data = {
'title': 'iPhone 5C',
'price': '799'
}
ipad_data = {
'title': 'iPad mini',
'price': '699'
}
csv_data = """
title,price
{iphone_row}
{ipad_row}
""".format(
iphone_row="{title},{price}".format(**iphone_data),
ipad_row="{title},{price}".format(**ipad_data)
)
reader = smartcsv.reader(StringIO(csv_data), columns=[
{'name': 'title', 'required': True},
{
'name': 'price',
'required': True,
'transform': lambda x: Decimal(x)
},
])
iphone = next(reader)
ipad = next(reader)
self.assertRaises(StopIteration, lambda: list(next(reader)))
self.assertTrue(isinstance(iphone, dict) and isinstance(ipad, dict))
self.assertModelsEquals(iphone, {
'title': 'iPhone 5C',
'price': Decimal('799')
})
self.assertModelsEquals(ipad, {
'title': 'iPad mini',
'price': Decimal('699')
})
def test_column_with_missing_data_is_not_transformed(self):
"""Shouldn't invoke the transform function if no value is passed"""
iphone_data = {
'title': 'iPhone 5C',
'price': ''
}
ipad_data = {
'title': 'iPad mini',
'price': ''
}
csv_data = """
title,price
{iphone_row}
{ipad_row}
""".format(
iphone_row="{title},{price}".format(**iphone_data),
ipad_row="{title},{price}".format(**ipad_data)
)
mocked_validator = mock.MagicMock(return_value=True)
reader = smartcsv.reader(StringIO(csv_data), columns=[
{'name': 'title', 'required': True},
{
'name': 'price',
'required': False,
'transform': mocked_validator
},
])
iphone = next(reader)
ipad = next(reader)
self.assertRaises(StopIteration, lambda: list(next(reader)))
self.assertTrue(isinstance(iphone, dict) and isinstance(ipad, dict))
self.assertModelsEquals(iphone, iphone_data)
self.assertModelsEquals(ipad, ipad_data)
self.assertEqual(mocked_validator.call_count, 0)
def test_default_value_is_not_transformed(self):
"""Shouldn't apply no transformation if the value is missing and the default value is being used"""
iphone_data = {
'title': 'iPhone 5C',
'price': ''
}
ipad_data = {
'title': 'iPad mini',
'price': ''
}
csv_data = """
title,price
{iphone_row}
{ipad_row}
""".format(
iphone_row="{title},{price}".format(**iphone_data),
ipad_row="{title},{price}".format(**ipad_data)
)
mocked_validator = mock.MagicMock(return_value=True)
reader = smartcsv.reader(StringIO(csv_data), columns=[
{'name': 'title', 'required': True},
{
'name': 'price',
'required': False,
'transform': mocked_validator,
'default': 899
},
])
iphone = next(reader)
ipad = next(reader)
self.assertRaises(StopIteration, lambda: list(next(reader)))
self.assertTrue(isinstance(iphone, dict) and isinstance(ipad, dict))
self.assertModelsEquals(iphone, {
'title': 'iPhone 5C',
'price': 899
})
self.assertModelsEquals(ipad, {
'title': 'iPad mini',
'price': 899
})
self.assertEqual(mocked_validator.call_count, 0)
class ErrorValueTransformationTestCase(BaseSmartCSVTestCase):
def test_error_preserves_exception_with_fail_fast(self):
"""Should raise the exception that happens with the value transformation"""
iphone_data = {
'title': 'iPhone 5C',
'price': 'INVALID'
}
ipad_data = {
'title': 'iPad mini',
'price': '699'
}
csv_data = """
title,price
{iphone_row}
{ipad_row}
""".format(
iphone_row="{title},{price}".format(**iphone_data),
ipad_row="{title},{price}".format(**ipad_data)
)
reader = smartcsv.reader(StringIO(csv_data), columns=[
{'name': 'title', 'required': True},
{
'name': 'price',
'required': True,
'transform': lambda x: Decimal(x)
},
])
self.assertRaises(decimal.InvalidOperation, lambda: next(reader))
def test_error_exception_is_reported_without_fail_fast(self):
"""reader.errors should contain the exception that happenend with the value transformation"""
iphone_data = {
'title': 'iPhone 5C',
'price': 'INVALID'
}
ipad_data = {
'title': 'iPad mini',
'price': '699'
}
iphone_row = "{title},{price}".format(**iphone_data)
csv_data = """
title,price
{iphone_row}
{ipad_row}
""".format(
iphone_row=iphone_row,
ipad_row="{title},{price}".format(**ipad_data)
)
reader = smartcsv.reader(StringIO(csv_data), columns=[
{'name': 'title', 'required': True},
{
'name': 'price',
'required': True,
'transform': lambda x: Decimal(x)
},
], fail_fast=False)
ipad = next(reader)
self.assertRaises(StopIteration, lambda: list(next(reader)))
self.assertTrue(isinstance(ipad, dict))
self.assertModelsEquals(ipad, {
'title': 'iPad mini',
'price': Decimal('699')
})
self.assertTrue(reader.errors is not None)
self.assertTrue('rows' in reader.errors)
self.assertEqual(len(reader.errors['rows']), 1) # 1 row failing
self.assertRowError(
reader.errors, iphone_row, 0, 'transform')
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
import logging
import time
import uuid
import traceback
from collections import deque
from datetime import timedelta
from functools import partial
try:
from urllib.parse import quote
except ImportError:
from urllib import quote # noqa
import pika
import pika.adapters
from tornado import ioloop, gen
from tornado.concurrent import Future
from totoro.base import (
TaskProducerAdapter,
TaskPublishDelegate,
TaskConsumerBase,
WaitForResultTimeoutError)
LOGGER = logging.getLogger(__name__)
def generate_consumer_tag():
return 'totoro_tag_0.{0}'.format(uuid.uuid4().hex)
class Connection(object):
""""""
def __init__(self, url, io_loop=None):
self._url = url
self._io_loop = io_loop or ioloop.IOLoop.instance()
self._connection = None
self._channel = None
self._connected_time = None
self._consumers = dict()
self._waiting_callers = list()
self._connected_future = None
self._closed_future = Future()
self._closed_future.set_result(self)
@property
def connected_time(self):
return self._connected_time
@property
def is_ready(self):
return self._channel and self._channel.is_open
@property
def is_idle(self):
return len(self._consumers) == 0 and len(self._waiting_callers) == 0
def queue_declare(self, callback, queue='', passive=False, durable=False,
exclusive=False, auto_delete=False, nowait=False,
arguments=None):
if callback and nowait is True:
raise ValueError('Can not pass a callback if nowait is True')
if self.is_ready:
LOGGER.debug('queue_declare: queue={0}'.format(queue))
self._channel.queue_declare(callback, queue, passive, durable, exclusive,
auto_delete, nowait, arguments)
else:
self._waiting_callers.append(partial(self.queue_declare, callback, queue, passive,
durable, exclusive, auto_delete, nowait, arguments))
def basic_publish(self, exchange, routing_key, body,
properties=None, mandatory=False, immediate=False):
if self.is_ready:
LOGGER.debug('basic_publish: exchange={0} routing_key={1}'.format(exchange, routing_key))
self._channel.basic_publish(exchange, routing_key, body,
properties, mandatory, immediate)
else:
self._waiting_callers.append(partial(self.basic_publish, exchange, routing_key, body, properties,
mandatory, immediate))
def basic_consume(self, consumer_callback, queue='', no_ack=False,
exclusive=False, consumer_tag=None, arguments=None):
consumer_tag = consumer_tag or generate_consumer_tag()
if not self.is_ready:
self._waiting_callers.append(partial(self.basic_consume, consumer_callback, queue, no_ack,
exclusive, consumer_tag, arguments))
else:
LOGGER.debug('basic_consume: queue={0} consumer_tag={1}'.format(queue, consumer_tag))
consumer_tag = self._channel.basic_consume(
consumer_callback, queue, no_ack,
exclusive, consumer_tag, arguments)
self._consumers[consumer_tag] = dict(
consumer_callback=consumer_callback,
queue=queue, no_ack=no_ack,
exclusive=exclusive, arguments=arguments)
return consumer_tag
def basic_cancel(self, callback=None, consumer_tag='', nowait=False):
LOGGER.debug('enter basic_cancel: consumer_tag={0}'.format(consumer_tag))
if callback and nowait is True:
raise ValueError('Can not pass a callback if nowait is True')
if not self.is_ready:
self._waiting_callers.append(partial(self.basic_cancel, callback, consumer_tag, nowait))
return
if consumer_tag not in self._consumers:
LOGGER.info('Invalid consumer_tag:{0}'.format(consumer_tag))
return
cb = callback
if nowait:
del self._consumers[consumer_tag]
else:
def callback_wrapper(method_frame):
del self._consumers[consumer_tag]
if callback:
callback(method_frame)
cb = callback_wrapper
self._channel.basic_cancel(cb, consumer_tag, nowait)
def connect(self):
if self._connected_future is None:
self._connected_future = Future()
self._connect()
return self._connected_future
def close(self):
if self._closed_future is None:
self._closed_future = Future()
self._close()
return self._closed_future
def _close(self):
LOGGER.info('Closing connection.')
self._connection.close()
def _connect(self):
LOGGER.info('Connecting to %s.', self._url)
self._connection = pika.adapters.TornadoConnection(
parameters=pika.URLParameters(self._url),
on_open_callback=self._on_connection_open,
custom_ioloop=self._io_loop)
def _on_connection_open(self, unused_connection):
LOGGER.info('Connection opened.')
LOGGER.info('Adding connection close callback.')
self._connection.add_on_close_callback(self._on_connection_closed)
LOGGER.info('Creating a new channel.')
self._connection.channel(on_open_callback=self._on_channel_open)
def _on_connection_closed(self, connection, reply_code, reply_text):
self._connection = None
self._channel = None
if self._closed_future is None:
LOGGER.warning('Connection closed, reopening in 2 seconds: (%s) %s.', reply_code, reply_text)
self._io_loop.add_timeout(2, self._connect())
else:
LOGGER.info('Connection closed, setting the result of a `self._closed_future`.')
self._closed_future.set_result(self)
self._connected_future = None
def _on_channel_open(self, channel):
LOGGER.info('Channel opened.')
self._channel = channel
self._connected_time = time.time()
LOGGER.info('Adding channel close callback.')
self._channel.add_on_close_callback(self._on_channel_closed)
LOGGER.info('Try to restore previous channel state.')
self._restore()
if self._connected_future:
LOGGER.info('Channel opened, setting the result of a `self._connected_future`.')
self._connected_future.set_result(self)
self._closed_future = None
def _on_channel_closed(self, channel, reply_code, reply_text):
LOGGER.warning('Channel %i was closed: (%s) %s.', channel, reply_code, reply_text)
self._channel = None
self._connection.close()
def _restore(self):
if self._waiting_callers:
LOGGER.info('restore waiting callers: {0}'.format(len(self._waiting_callers)))
waiting_callers = self._waiting_callers
self._waiting_callers = list()
for caller in waiting_callers:
try:
caller()
except:
tp, value, tb = sys.exc_info()
LOGGER.warning(''.join([line.decode("unicode-escape")
for line in traceback.format_exception(tp, value, tb)]))
if self._consumers:
LOGGER.info('restore consumers: {0} - [{1}]'.format(
len(self._consumers), ','.join(self._consumers.keys())))
consumers = self._consumers
self._consumers = dict()
for consumer_tag, consumer_args in consumers.items():
try:
self.basic_consume(consumer_tag=consumer_tag, **consumer_args)
except:
LOGGER.warning('restore: call basic_consume(consumer_tag={0}) fails.'.format(consumer_tag))
tp, value, tb = sys.exc_info()
LOGGER.warning(''.join([line.decode("unicode-escape")
for line in traceback.format_exception(tp, value, tb)]))
class ConnectionPool(object):
""""""
CONN_OPTIONS_NAMES = (
'max_idle_connections',
'max_recycle_sec',
'max_open_connections')
@classmethod
def get_conn_options(cls, **kwargs):
conn_options = TaskProducerAdapter.app.conf.get('TOTORO_AMQP_CONNECTION_POOL', dict())
current_conn_options = dict()
if isinstance(conn_options, dict):
for n in cls.CONN_OPTIONS_NAMES:
if n in conn_options:
current_conn_options[n] = int(conn_options[n])
elif n in kwargs:
current_conn_options[n] = kwargs[n]
else:
LOGGER.warning('Invalid conn_options: {0}'.format(conn_options))
LOGGER.info('ConnectionPool - current_conn_options: {0}'.format(
', '.join(['{0}={1}'.format(k, v) for k, v in current_conn_options.items()])))
return current_conn_options
@staticmethod
def get_url():
parts = list(TaskProducerAdapter.app.connection().as_uri(
include_password=True).partition('://'))
parts.extend(parts.pop(-1).partition('/'))
parts[-1] = quote(parts[-1], safe='')
return ''.join(str(part) for part in parts if part)
@staticmethod
def instance():
if not hasattr(ConnectionPool, '_instance'):
ConnectionPool._instance = ConnectionPool(
ConnectionPool.get_url(),
**ConnectionPool.get_conn_options(max_idle_connections=3, max_open_connections=10))
return ConnectionPool._instance
def __init__(self, url,
max_idle_connections=1,
max_recycle_sec=3600,
max_open_connections=0,
io_loop=None):
self.url = url
self.max_idle = max_idle_connections
self.max_open = max_open_connections
self.max_recycle_sec = max_recycle_sec
self.io_loop = io_loop or ioloop.IOLoop.instance()
self._opened_conns = 0
self._free_conn = deque()
self._waitings = deque()
def stat(self):
"""Returns (opened connections, free connections, waiters)"""
return self._opened_conns, len(self._free_conn), len(self._waitings)
def get_connection(self):
now = self.io_loop.time()
# Try to reuse in free pool
while self._free_conn:
conn = self._free_conn.popleft()
if conn.is_idle and (now - conn.connected_time) > self.max_recycle_sec:
self._close_async(conn)
continue
LOGGER.debug("Reusing connection from pool: %s", self.stat())
future = Future()
future.set_result(conn)
return future
# Open new connection
if self.max_open == 0 or self._opened_conns < self.max_open:
self._opened_conns += 1
LOGGER.info("Creating new connection: %s", self.stat())
conn = Connection(self.url, self.io_loop)
return conn.connect()
# Wait to other connection is released.
future = Future()
self._waitings.append(future)
return future
def put_connection(self, conn):
if (not conn.is_idle
or (len(self._free_conn) < self.max_idle
and (self.io_loop.time() - conn.connected_time) < self.max_recycle_sec)):
if self._waitings:
fut = self._waitings.popleft()
fut.set_result(conn)
LOGGER.debug("Passing returned connection to waiter: %s", self.stat())
else:
LOGGER.info("Add connection to free pool: %s", self.stat())
self._free_conn.append(conn)
max_close = len(self._free_conn) - self.max_idle
if max_close > 0:
for _ in range(0, len(self._free_conn)):
conn = self._free_conn.popleft()
if conn.is_idle:
self._close_async(conn)
max_close -= 1
if max_close <= 0:
break
else:
self._free_conn.append(conn)
else:
self._close_async(conn)
def _close_async(self, conn):
self.io_loop.add_future(conn.close(), callback=self._after_close)
def _after_close(self, future):
if self._waitings:
conn = future.result()
future = self._waitings.popleft()
self.io_loop.add_future(conn.connect(), callback=lambda f: future.set_result(conn))
else:
self._opened_conns -= 1
LOGGER.info("Connection closed: %s", self.stat())
class AMQPConsumer(TaskConsumerBase):
""""""
def __init__(self, **kwargs):
super(AMQPConsumer, self).__init__(**kwargs)
self._connection_pool = ConnectionPool.instance()
@gen.coroutine
def _consume(self, task_id, callback, wait_timeout):
conn = yield self._connection_pool.get_connection()
timeout = None
consumer_tag = generate_consumer_tag()
consume_future = Future()
def _basic_cancel():
conn.basic_cancel(consumer_tag=consumer_tag)
consume_future.set_result(None)
if wait_timeout:
def _on_timeout():
_basic_cancel()
callback(WaitForResultTimeoutError(wait_timeout))
timeout = self.io_loop.add_timeout(timedelta(milliseconds=wait_timeout), _on_timeout)
try:
def _on_result(reply):
if timeout:
self.io_loop.remove_timeout(timeout)
_basic_cancel()
callback(reply)
name = self.backend.rkey(task_id)
queue_declare_future = Future()
conn.queue_declare(lambda method_frame: queue_declare_future.set_result(None),
queue=name,
auto_delete=self.backend.auto_delete,
durable=self.backend.persistent,
arguments=self.backend.queue_arguments)
yield queue_declare_future
conn.basic_consume(
consumer_callback=lambda channel, deliver, properties, reply: _on_result(reply),
queue=name, consumer_tag=consumer_tag)
finally:
self._connection_pool.put_connection(conn)
yield consume_future
def _wait_for(self, task_id, callback, wait_timeout):
def tracking(fut=None):
if fut:
fut.result()
LOGGER.debug('Task({0}) is completed.'.format(task_id))
future = self._consume(task_id, callback, wait_timeout)
if not future.done():
self.io_loop.add_future(future, lambda f: tracking(f))
else:
tracking()
class AMQPTaskPublishDelegate(TaskPublishDelegate):
""""""
def __init__(self, task_producer):
super(AMQPTaskPublishDelegate, self).__init__(task_producer)
self._connection_pool = ConnectionPool.instance()
@gen.coroutine
def _publish(self, body, priority, content_type, content_encoding,
headers, routing_key, mandatory, immediate, exchange,
retry, retry_policy, **properties):
conn = yield self._connection_pool.get_connection()
try:
properties = pika.BasicProperties(
content_type=content_type,
content_encoding=content_encoding,
headers=headers,
priority=priority,
**properties
)
conn.basic_publish(
exchange=exchange, routing_key=routing_key,
body=body, properties=properties,
mandatory=mandatory, immediate=immediate)
finally:
self._connection_pool.put_connection(conn)
def publish(self, task_id, body, priority, content_type, content_encoding,
headers, routing_key, mandatory, immediate, exchange,
declare, retry, retry_policy, **properties):
if declare:
maybe_declare = self.task_producer.maybe_declare
[maybe_declare(entity) for entity in declare]
future = self._publish(body, priority, content_type, content_encoding,
headers, routing_key, mandatory, immediate, exchange,
retry, retry_policy, **properties)
def tracking(fut=None):
if fut:
fut.result()
LOGGER.debug('Task({0}) is published.'.format(task_id))
if not future.done():
self.task_producer.io_loop.add_future(future, lambda f: tracking(f))
else:
tracking()
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
LOG = logging.getLogger(__name__)
class ManilaShare(resource.Resource):
"""A resource that creates shared mountable file system.
The resource creates a manila share - shared mountable filesystem that
can be attached to any client(or clients) that has a network access and
permission to mount filesystem. Share is a unit of storage with specific
size that supports pre-defined share protocol and advanced security model
(access lists, share networks and security services).
"""
support_status = support.SupportStatus(version='5.0.0')
_ACCESS_RULE_PROPERTIES = (
ACCESS_TO, ACCESS_TYPE, ACCESS_LEVEL
) = (
'access_to', 'access_type', 'access_level')
_SHARE_STATUSES = (
STATUS_CREATING, STATUS_DELETING, STATUS_ERROR, STATUS_ERROR_DELETING,
STATUS_AVAILABLE
) = (
'creating', 'deleting', 'error', 'error_deleting',
'available'
)
PROPERTIES = (
SHARE_PROTOCOL, SIZE, SHARE_SNAPSHOT, NAME, METADATA,
SHARE_NETWORK, DESCRIPTION, SHARE_TYPE, IS_PUBLIC,
ACCESS_RULES
) = (
'share_protocol', 'size', 'snapshot', 'name', 'metadata',
'share_network', 'description', 'share_type', 'is_public',
'access_rules'
)
ATTRIBUTES = (
AVAILABILITY_ZONE_ATTR, HOST_ATTR, EXPORT_LOCATIONS_ATTR,
SHARE_SERVER_ID_ATTR, CREATED_AT_ATTR, SHARE_STATUS_ATTR,
PROJECT_ID_ATTR
) = (
'availability_zone', 'host', 'export_locations',
'share_server_id', 'created_at', 'status',
'project_id'
)
properties_schema = {
SHARE_PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('Share protocol supported by shared filesystem.'),
required=True,
constraints=[constraints.AllowedValues(
['NFS', 'CIFS', 'GlusterFS', 'HDFS'])]
),
SIZE: properties.Schema(
properties.Schema.INTEGER,
_('Share storage size in GB.'),
required=True
),
SHARE_SNAPSHOT: properties.Schema(
properties.Schema.STRING,
_('Name or ID of shared file system snapshot that will be restored'
' and created as a new share.'),
constraints=[constraints.CustomConstraint('manila.share_snapshot')]
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Share name.'),
update_allowed=True
),
METADATA: properties.Schema(
properties.Schema.MAP,
_('Metadata key-values defined for share.'),
update_allowed=True
),
SHARE_NETWORK: properties.Schema(
properties.Schema.STRING,
_('Name or ID of shared network defined for shared filesystem.'),
constraints=[constraints.CustomConstraint('manila.share_network')]
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Share description.'),
update_allowed=True
),
SHARE_TYPE: properties.Schema(
properties.Schema.STRING,
_('Name or ID of shared filesystem type. Types defines some share '
'filesystem profiles that will be used for share creation.'),
constraints=[constraints.CustomConstraint("manila.share_type")]
),
IS_PUBLIC: properties.Schema(
properties.Schema.BOOLEAN,
_('Defines if shared filesystem is public or private.'),
default=False,
update_allowed=True
),
ACCESS_RULES: properties.Schema(
properties.Schema.LIST,
_('A list of access rules that define access from IP to Share.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
ACCESS_TO: properties.Schema(
properties.Schema.STRING,
_('IP or other address information about guest that '
'allowed to access to Share.'),
required=True
),
ACCESS_TYPE: properties.Schema(
properties.Schema.STRING,
_('Type of access that should be provided to guest.'),
constraints=[constraints.AllowedValues(
['ip', 'domain'])],
required=True
),
ACCESS_LEVEL: properties.Schema(
properties.Schema.STRING,
_('Level of access that need to be provided for '
'guest.'),
constraints=[constraints.AllowedValues(['ro', 'rw'])]
)
}
),
update_allowed=True,
default=[]
)
}
attributes_schema = {
AVAILABILITY_ZONE_ATTR: attributes.Schema(
_('The availability zone of shared filesystem.'),
type=attributes.Schema.STRING
),
HOST_ATTR: attributes.Schema(
_('Share host.'),
type=attributes.Schema.STRING
),
EXPORT_LOCATIONS_ATTR: attributes.Schema(
_('Export locations of share.'),
type=attributes.Schema.LIST
),
SHARE_SERVER_ID_ATTR: attributes.Schema(
_('ID of server (VM, etc...) on host that is used for '
'exporting network file-system.'),
type=attributes.Schema.STRING
),
CREATED_AT_ATTR: attributes.Schema(
_('Datetime when a share was created.'),
type=attributes.Schema.STRING
),
SHARE_STATUS_ATTR: attributes.Schema(
_('Current share status.'),
type=attributes.Schema.STRING
),
PROJECT_ID_ATTR: attributes.Schema(
_('Share project ID.'),
type=attributes.Schema.STRING
)
}
default_client_name = 'manila'
entity = 'shares'
def _request_share(self):
return self.client().shares.get(self.resource_id)
def _resolve_attribute(self, name):
share = self._request_share()
return six.text_type(getattr(share, name))
def handle_create(self):
# Request IDs of entities from manila
# if name of the entity defined in template
share_net_identity = self.properties[self.SHARE_NETWORK]
if share_net_identity:
share_net_identity = self.client_plugin().get_share_network(
share_net_identity).id
snapshot_identity = self.properties[self.SHARE_SNAPSHOT]
if snapshot_identity:
snapshot_identity = self.client_plugin().get_share_snapshot(
snapshot_identity).id
share_type_identity = self.properties[self.SHARE_TYPE]
if share_type_identity:
share_type_identity = self.client_plugin().get_share_type(
share_type_identity).id
share = self.client().shares.create(
share_proto=self.properties[self.SHARE_PROTOCOL],
size=self.properties[self.SIZE],
snapshot_id=snapshot_identity,
name=self.properties[self.NAME],
description=self.properties[self.DESCRIPTION],
metadata=self.properties[self.METADATA],
share_network=share_net_identity,
share_type=share_type_identity,
is_public=self.properties[self.IS_PUBLIC])
self.resource_id_set(share.id)
def check_create_complete(self, *args):
share_status = self._request_share().status
if share_status == self.STATUS_CREATING:
return False
elif share_status == self.STATUS_AVAILABLE:
LOG.info(_LI('Applying access rules to created Share.'))
# apply access rules to created share. please note that it is not
# possible to define rules for share with share_status = creating
access_rules = self.properties.get(self.ACCESS_RULES)
try:
if access_rules:
for rule in access_rules:
self.client().shares.allow(
share=self.resource_id,
access_type=rule.get(self.ACCESS_TYPE),
access=rule.get(self.ACCESS_TO),
access_level=rule.get(self.ACCESS_LEVEL))
return True
except Exception as ex:
reason = _(
'Error during applying access rules to share "{0}". '
'The root cause of the problem is the following: {1}.'
).format(self.resource_id, ex.message)
raise exception.ResourceInError(status_reason=reason)
elif share_status == self.STATUS_ERROR:
reason = _('Error during creation of share "{0}"').format(
self.resource_id)
raise exception.ResourceInError(status_reason=reason,
resource_status=share_status)
else:
reason = _('Unknown share_status during creation of share "{0}"'
).format(self.resource_id)
raise exception.ResourceUnknownStatus(
status_reason=reason, resource_status=share_status)
def check_delete_complete(self, *args):
if not self.resource_id:
return True
try:
share = self._request_share()
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
return True
else:
# when share creation is not finished proceed listening
if share.status == self.STATUS_DELETING:
return False
elif share.status in (self.STATUS_ERROR,
self.STATUS_ERROR_DELETING):
raise exception.ResourceInError(
status_reason=_(
'Error during deleting share "{0}".'
).format(self.resource_id),
resource_status=share.status)
else:
reason = _('Unknown status during deleting share '
'"{0}"').format(self.resource_id)
raise exception.ResourceUnknownStatus(
status_reason=reason, resource_status=share.status)
def handle_check(self):
share = self._request_share()
expected_statuses = [self.STATUS_AVAILABLE]
checks = [{'attr': 'status', 'expected': expected_statuses,
'current': share.status}]
self._verify_check_conditions(checks)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
kwargs = {}
if self.IS_PUBLIC in prop_diff:
kwargs['is_public'] = prop_diff.get(self.IS_PUBLIC)
if self.NAME in prop_diff:
kwargs['display_name'] = prop_diff.get(self.NAME)
if self.DESCRIPTION in prop_diff:
kwargs['display_description'] = prop_diff.get(self.DESCRIPTION)
if kwargs:
self.client().shares.update(self.resource_id,
**kwargs)
if self.METADATA in prop_diff:
metadata = prop_diff.get(self.METADATA)
self.client().shares.update_all_metadata(
self.resource_id, metadata)
if self.ACCESS_RULES in prop_diff:
actual_old_rules = []
for rule in self.client().shares.access_list(self.resource_id):
old_rule = {
self.ACCESS_TO: getattr(rule, self.ACCESS_TO),
self.ACCESS_TYPE: getattr(rule, self.ACCESS_TYPE),
self.ACCESS_LEVEL: getattr(rule, self.ACCESS_LEVEL)
}
if old_rule in prop_diff[self.ACCESS_RULES]:
actual_old_rules.append(old_rule)
else:
self.client().shares.deny(share=self.resource_id,
id=rule.id)
for rule in prop_diff[self.ACCESS_RULES]:
if rule not in actual_old_rules:
self.client().shares.allow(
share=self.resource_id,
access_type=rule.get(self.ACCESS_TYPE),
access=rule.get(self.ACCESS_TO),
access_level=rule.get(self.ACCESS_LEVEL)
)
def resource_mapping():
return {'OS::Manila::Share': ManilaShare}
| |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.language.v1beta2 LanguageService API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import grpc
from google.cloud.language_v1beta2.gapic import enums
from google.cloud.language_v1beta2.gapic import language_service_client_config
from google.cloud.language_v1beta2.gapic.transports import language_service_grpc_transport
from google.cloud.language_v1beta2.proto import language_service_pb2
from google.cloud.language_v1beta2.proto import language_service_pb2_grpc
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-cloud-language', ).version
class LanguageServiceClient(object):
"""
Provides text analysis operations such as sentiment analysis and entity
recognition.
"""
SERVICE_ADDRESS = 'language.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.cloud.language.v1beta2.LanguageService'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
LanguageServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(self,
transport=None,
channel=None,
credentials=None,
client_config=language_service_client_config.config,
client_info=None):
"""Constructor.
Args:
transport (Union[~.LanguageServiceGrpcTransport,
Callable[[~.Credentials, type], ~.LanguageServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config:
warnings.warn('The `client_config` argument is deprecated.',
PendingDeprecationWarning)
if channel:
warnings.warn(
'The `channel` argument is deprecated; use '
'`transport` instead.', PendingDeprecationWarning)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=language_service_grpc_transport.
LanguageServiceGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.')
self.transport = transport
else:
self.transport = language_service_grpc_transport.LanguageServiceGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = (
google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO)
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME], )
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def analyze_sentiment(self,
document,
encoding_type=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Analyzes the sentiment of the provided text.
Example:
>>> from google.cloud import language_v1beta2
>>>
>>> client = language_v1beta2.LanguageServiceClient()
>>>
>>> # TODO: Initialize ``document``:
>>> document = {}
>>>
>>> response = client.analyze_sentiment(document)
Args:
document (Union[dict, ~google.cloud.language_v1beta2.types.Document]): Input document.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.language_v1beta2.types.Document`
encoding_type (~google.cloud.language_v1beta2.types.EncodingType): The encoding type used by the API to calculate sentence offsets for the
sentence sentiment.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.language_v1beta2.types.AnalyzeSentimentResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'analyze_sentiment' not in self._inner_api_calls:
self._inner_api_calls[
'analyze_sentiment'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.analyze_sentiment,
default_retry=self._method_configs['AnalyzeSentiment'].
retry,
default_timeout=self._method_configs['AnalyzeSentiment'].
timeout,
client_info=self._client_info,
)
request = language_service_pb2.AnalyzeSentimentRequest(
document=document,
encoding_type=encoding_type,
)
return self._inner_api_calls['analyze_sentiment'](
request, retry=retry, timeout=timeout, metadata=metadata)
def analyze_entities(self,
document,
encoding_type=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Finds named entities (currently proper names and common nouns) in the text
along with entity types, salience, mentions for each entity, and
other properties.
Example:
>>> from google.cloud import language_v1beta2
>>>
>>> client = language_v1beta2.LanguageServiceClient()
>>>
>>> # TODO: Initialize ``document``:
>>> document = {}
>>>
>>> response = client.analyze_entities(document)
Args:
document (Union[dict, ~google.cloud.language_v1beta2.types.Document]): Input document.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.language_v1beta2.types.Document`
encoding_type (~google.cloud.language_v1beta2.types.EncodingType): The encoding type used by the API to calculate offsets.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.language_v1beta2.types.AnalyzeEntitiesResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'analyze_entities' not in self._inner_api_calls:
self._inner_api_calls[
'analyze_entities'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.analyze_entities,
default_retry=self._method_configs['AnalyzeEntities'].
retry,
default_timeout=self._method_configs['AnalyzeEntities'].
timeout,
client_info=self._client_info,
)
request = language_service_pb2.AnalyzeEntitiesRequest(
document=document,
encoding_type=encoding_type,
)
return self._inner_api_calls['analyze_entities'](
request, retry=retry, timeout=timeout, metadata=metadata)
def analyze_entity_sentiment(
self,
document,
encoding_type=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Finds entities, similar to ``AnalyzeEntities`` in the text and analyzes
sentiment associated with each entity and its mentions.
Example:
>>> from google.cloud import language_v1beta2
>>>
>>> client = language_v1beta2.LanguageServiceClient()
>>>
>>> # TODO: Initialize ``document``:
>>> document = {}
>>>
>>> response = client.analyze_entity_sentiment(document)
Args:
document (Union[dict, ~google.cloud.language_v1beta2.types.Document]): Input document.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.language_v1beta2.types.Document`
encoding_type (~google.cloud.language_v1beta2.types.EncodingType): The encoding type used by the API to calculate offsets.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.language_v1beta2.types.AnalyzeEntitySentimentResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'analyze_entity_sentiment' not in self._inner_api_calls:
self._inner_api_calls[
'analyze_entity_sentiment'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.analyze_entity_sentiment,
default_retry=self.
_method_configs['AnalyzeEntitySentiment'].retry,
default_timeout=self.
_method_configs['AnalyzeEntitySentiment'].timeout,
client_info=self._client_info,
)
request = language_service_pb2.AnalyzeEntitySentimentRequest(
document=document,
encoding_type=encoding_type,
)
return self._inner_api_calls['analyze_entity_sentiment'](
request, retry=retry, timeout=timeout, metadata=metadata)
def analyze_syntax(self,
document,
encoding_type=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Analyzes the syntax of the text and provides sentence boundaries and
tokenization along with part of speech tags, dependency trees, and other
properties.
Example:
>>> from google.cloud import language_v1beta2
>>>
>>> client = language_v1beta2.LanguageServiceClient()
>>>
>>> # TODO: Initialize ``document``:
>>> document = {}
>>>
>>> response = client.analyze_syntax(document)
Args:
document (Union[dict, ~google.cloud.language_v1beta2.types.Document]): Input document.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.language_v1beta2.types.Document`
encoding_type (~google.cloud.language_v1beta2.types.EncodingType): The encoding type used by the API to calculate offsets.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.language_v1beta2.types.AnalyzeSyntaxResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'analyze_syntax' not in self._inner_api_calls:
self._inner_api_calls[
'analyze_syntax'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.analyze_syntax,
default_retry=self._method_configs['AnalyzeSyntax'].retry,
default_timeout=self._method_configs['AnalyzeSyntax'].
timeout,
client_info=self._client_info,
)
request = language_service_pb2.AnalyzeSyntaxRequest(
document=document,
encoding_type=encoding_type,
)
return self._inner_api_calls['analyze_syntax'](
request, retry=retry, timeout=timeout, metadata=metadata)
def classify_text(self,
document,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Classifies a document into categories.
Example:
>>> from google.cloud import language_v1beta2
>>>
>>> client = language_v1beta2.LanguageServiceClient()
>>>
>>> # TODO: Initialize ``document``:
>>> document = {}
>>>
>>> response = client.classify_text(document)
Args:
document (Union[dict, ~google.cloud.language_v1beta2.types.Document]): Input document.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.language_v1beta2.types.Document`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.language_v1beta2.types.ClassifyTextResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'classify_text' not in self._inner_api_calls:
self._inner_api_calls[
'classify_text'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.classify_text,
default_retry=self._method_configs['ClassifyText'].retry,
default_timeout=self._method_configs['ClassifyText'].
timeout,
client_info=self._client_info,
)
request = language_service_pb2.ClassifyTextRequest(document=document, )
return self._inner_api_calls['classify_text'](
request, retry=retry, timeout=timeout, metadata=metadata)
def annotate_text(self,
document,
features,
encoding_type=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
A convenience method that provides all syntax, sentiment, entity, and
classification features in one call.
Example:
>>> from google.cloud import language_v1beta2
>>>
>>> client = language_v1beta2.LanguageServiceClient()
>>>
>>> # TODO: Initialize ``document``:
>>> document = {}
>>>
>>> # TODO: Initialize ``features``:
>>> features = {}
>>>
>>> response = client.annotate_text(document, features)
Args:
document (Union[dict, ~google.cloud.language_v1beta2.types.Document]): Input document.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.language_v1beta2.types.Document`
features (Union[dict, ~google.cloud.language_v1beta2.types.Features]): The enabled features.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.language_v1beta2.types.Features`
encoding_type (~google.cloud.language_v1beta2.types.EncodingType): The encoding type used by the API to calculate offsets.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.language_v1beta2.types.AnnotateTextResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'annotate_text' not in self._inner_api_calls:
self._inner_api_calls[
'annotate_text'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.annotate_text,
default_retry=self._method_configs['AnnotateText'].retry,
default_timeout=self._method_configs['AnnotateText'].
timeout,
client_info=self._client_info,
)
request = language_service_pb2.AnnotateTextRequest(
document=document,
features=features,
encoding_type=encoding_type,
)
return self._inner_api_calls['annotate_text'](
request, retry=retry, timeout=timeout, metadata=metadata)
| |
"""
Transmission script of the range test.
author Jonathan Munoz (jonathan.munoz@inria.fr), January 2017
"""
import time
import logging
import threading
import sys
import sched
import Queue
import json
from datetime import datetime as dt
import datetime
import socket
from threading import Timer
import at86rf215_defs as defs
import at86rf215_driver as radio
import GpsThread as gps
import gpio_handler as gpio
FRAME_LENGTH = 2047
CRC_SIZE_LEGACY = 2
CRC_SIZE_154G = 2
SECURITY_TIME = 3 # 3 seconds to give more time to TRX to complete the 400 frame bursts.
START_OFFSET = 4 # 4.5 seconds after the starting time arrives.
MODEM_SUB_GHZ = 0
MODEM_2GHZ = 1
COUNTER_LENGTH = 2
class LoggerTx(threading.Thread):
def __init__(self, queue, settings):
# store parameters
self.queue = queue
self.settings = settings
# local variables
self.name_file = '/home/pi/range_test_outdoors/experiments_results_' + socket.gethostname() +\
'.json'
self.results = {'type': 'end_of_cycle_tx', 'start_time_str': time.strftime("%a, %d %b %Y %H:%M:%S UTC",
time.gmtime()),
'start_time_epoch': time.time(), 'radio_settings': None, 'GPSinfo_at_start': None,
'version': self.settings['version'], 'channel': None, 'frequency_0': None,
'burst_size': self.settings['numframes'], 'id': socket.gethostname()}
# start the thread
threading.Thread.__init__(self)
self.name = 'LoggerTx'
self.daemon = True
self.start()
logging.basicConfig(stream=sys.__stdout__, level=logging.DEBUG)
def run(self):
while True:
item = self.queue.get()
if item == 'Start':
if self.results['radio_settings']:
with open(self.name_file, 'a') as f:
f.write(json.dumps(self.results.copy())+'\n')
self.results['start_time_str'] = time.strftime("%a, %d %b %Y %H:%M:%S UTC", time.gmtime())
self.results['start_time_epoch'] = time.time()
elif item == 'Print last':
with open(self.name_file, 'a') as f:
f.write(json.dumps(self.results.copy())+'\n')
elif type(item) is tuple:
logging.info('Time to send the frames {0} - {1} was {2} seconds\n'.format(item[0] - 100, item[0],
item[1]))
elif type(item) is dict:
if item.get('frequency_0_kHz') is not None:
self.results['frequency_0'] = item['frequency_0_kHz']
self.results['channel'] = item['channel']
self.results['radio_settings'] = item['modulation']
else:
self.results['GPSinfo_at_start'] = item
elif type(item) is float:
logging.info('Time {0}'.format(item))
else:
logging.error('UNKNOWN ITEM IN THE QUEUE: {0}.'.format(item))
class ExperimentTx(threading.Thread):
def __init__(self, settings):
# local variables
self.settings = settings
self.queue_tx = Queue.Queue()
self.f_start_signal_LED = False
self.f_reset_button = False
self.f_exit = False
self.f_cancel_exp = False
self.hours = 0
self.minutes = 0
self.scheduler = sched.scheduler(time.time, time.sleep)
self.list_events_sched = [None for i in range(len(self.settings["test_settings"]))]
self.schedule_time = ['time' for i in range(len(self.settings["test_settings"]))]
self.led_array_pins = [29, 31, 33, 35, 37]
self.TRX_frame_pin = [36]
self.radio_isr_pin = 11
self.push_button_pin = 13
self.scheduler_aux = None
self.time_to_start = None
self.started_time = None
self.led_start_indicator = None
self.experiment_scheduled = None
self.experiment_tx_thread = None
self.experiment_counter = 0
self.modem_base_band_state = MODEM_SUB_GHZ
self.dataLock = threading.RLock()
# start the threads
self.f_reset = threading.Event()
self.start_experiment = threading.Event()
self.end_experiment = threading.Event()
self.f_schedule = threading.Event()
self.f_reset.clear()
self.start_experiment.clear()
self.end_experiment.clear()
self.f_schedule.clear()
self.radio_driver = None
self.LoggerTx = None
self.gps = None
self.gpio_handler = None
# start all the drivers
# gps should be enabled
self._gps_init()
# logging.debug('radio setup')
self._radio_setup()
# logging.info('logger init')
self._logger_init()
# logging.info('gpio handler init')
self._gpio_handler_init()
# logging.info('radio init')
self._radio_init()
logging.debug('INIT COMPLETE')
# start the thread
threading.Thread.__init__(self)
self.name = 'ExperimentTx_'
self.daemon = True
self.start()
# configure the logging module
# logging.basicConfig(stream=sys.__stdout__, level=logging.WARNING)
# ====================== private =========================================
def _radio_setup(self):
# initialize the radio driver
self.radio_driver = radio.At86rf215(None, None)
self.radio_driver.spi_init()
def _radio_init(self):
self.radio_driver.radio_reset()
self.radio_driver.read_isr_source() # no functional role, just clear the pending interrupt flag
def _gps_init(self):
logging.debug('in of GPS init')
# start the gps thread
self.gps = gps.GpsThread()
# waiting until the GPS time is valid
logging.info('waiting for valid GPS time...')
while self.gps.is_gps_time_valid() is False:
time.sleep(1)
logging.info('... time valid')
logging.info('out of GPS init')
def _logger_init(self):
# initializes the LoggerRx thread
self.LoggerTx = LoggerTx(self.queue_tx, self.settings)
def _gpio_handler_init(self):
self.gpio_handler = gpio.GPIO_handler(self.radio_isr_pin, self.push_button_pin,
self.radio_driver.cb_radio_isr,
self._cb_push_button)
self.gpio_handler.init_binary_pins(self.led_array_pins)
self.gpio_handler.init_binary_pins(self.TRX_frame_pin)
self.gpio_handler.led_off(self.TRX_frame_pin)
self.gpio_handler.binary_counter(0, self.led_array_pins)
def _start_time_experiment(self):
"""
it sets the next runtime for the whole experiment sequence in hours, minutes
current_time[3] = hours, current_time[4] = minutes, current_time[5] = seconds
:return: hours, minutes
"""
current_time = time.gmtime()
if current_time[5] < 50:
if current_time[4] is not 59:
new_time = current_time[3], current_time[4] + 1
else:
new_time = (current_time[3] + 1) % 24, 0
else:
if current_time[4] is 59:
new_time = (current_time[3] + 1) % 24, 1
else:
new_time = current_time[3], current_time[4] + 2
return new_time
def _stop_exp(self):
"""
it makes print the last modulation results
"""
self.queue_tx.put('Print last')
with self.dataLock:
self.end_experiment.set()
logging.info('before the led_end_experiment_signal, time: {0}, thread: {1}'.format(time.time(),
threading.current_thread()))
self._led_end_experiment_signal()
logging.debug('END OF EXPERIMENTS')
def _modem_2ghz(self):
self.modem_base_band_state = MODEM_2GHZ
def _execute_experiment_tx(self, item):
""""
:param item
"""
logging.info('current thread in EXPERIMENT_TX: {0}'.format(threading.current_thread()))
logging.info('thread enumerate: {0}'.format(threading.enumerate()))
logging.info('start time TX 100 : {0}'.format(time.time()))
total_time = time.time()
# logging.debug('entering _execute_experiment_tx, time: {0}, {1}'.format(time.time(), item['modulation']))
self.gpio_handler.led_off(self.TRX_frame_pin)
# clean the break _execute_experiment_tx flag
self.f_cancel_exp = False
self.queue_tx.put(time.time() - self.started_time)
self.gpio_handler.binary_counter(0, self.led_array_pins)
# initialize the frame counter
frame_counter = 0
# reset the radio to erase previous configuration
self.radio_driver.radio_reset()
# re-configure the radio
self.radio_driver.radio_write_config(defs.modulations_settings[item['modulation']])
# select the frequency
# if self.modem_base_band_state == MODEM_SUB_GHZ:
logging.debug('ITEM: {0}'.format(item))
if item['modem'] == "subGHz":
self.radio_driver.radio_off()
self.radio_driver.radio_set_frequency((item['channel_spacing_kHz'],
item['frequency_0_kHz'],
item['channel']))
elif item['modem'] == "2.4GHz":
self.radio_driver.radio_off_2_4ghz()
self.radio_driver.radio_set_frequency_2_4ghz((item['channel_spacing_kHz'],
item['frequency_0_kHz'],
item['channel']))
else:
logging.CRITICAL('ERROR')
self.gpio_handler.binary_counter(item['index'], self.led_array_pins)
logging.info('modulation: {0}, channel: {1}'.format(item["modulation"], item["channel"]))
# let know to the informative class the beginning of a new experiment
self.queue_tx.put('Start')
# log the config name
self.queue_tx.put(item)
# log GPS info
# self.queue_tx.put(self.gps.gps_info_read())
# if self.modem_base_band_state == MODEM_SUB_GHZ:
if item['standard'] == '802.15.4g':
# loop through packet lengths
for frame_length in self.settings["frame_lengths_15.4g"]:
# check if the reset button has been pressed
# logging.warning('self.radio_driver.read_reset_cmd(): {0}'.format(self.radio_driver.read_reset_cmd()))
with self.dataLock:
if self.f_cancel_exp:
logging.warning('BREAKING EXP')
break
if item['modem'] == 'subGHz':
self.radio_driver.radio_trx_enable()
else:
self.radio_driver.radio_trx_enable_2_4ghz()
# send burst of frames
for i in range(self.settings['numframes']):
# create frame
frameToSend = [frame_counter >> 8, frame_counter & 0xFF] + [i & 0xFF for i in range(FRAME_LENGTH -
COUNTER_LENGTH)]
# increment the frame counter
frame_counter += 1
# send frame
if item['modem'] == 'subGHz':
self.radio_driver.radio_load_packet(frameToSend[:frame_length - CRC_SIZE_154G], CRC_SIZE_154G)
self.radio_driver.radio_tx_now()
else:
self.radio_driver.radio_load_packet_2_4ghz(frameToSend[:frame_length - CRC_SIZE_154G],
CRC_SIZE_154G)
self.radio_driver.radio_tx_now_2_4ghz()
# IFS
time.sleep(self.settings['IFS'])
self.gpio_handler.led_toggle(self.TRX_frame_pin)
# logging.warning('self.radio_driver.read_reset_cmd(): {0}'.format(self.radio_driver.read_reset_cmd()))
with self.dataLock:
if self.f_cancel_exp:
logging.warning('BREAKING EXP')
break
# logging.info('EXIT FROM THE _execute_experiment_tx: {0}, {1}'.format(time.time(), item['modulation']))
# logging.info('DURATION OF {0} is: {1}'.format(item["modulation"], (time.time() - total_time)))
# standard is IEEE802.15.4-2006
else:
# loop through packet lengths
for frame_length in self.settings["frame_lengths_15.4-2006"]:
# check if the reset button has been pressed
# logging.warning('self.radio_driver.read_reset_cmd(): {0}'.format(self.radio_driver.read_reset_cmd()))
with self.dataLock:
if self.f_cancel_exp:
logging.warning('BREAKING EXP')
break
self.radio_driver.radio_trx_enable_2_4ghz()
# send burst of frames
for i in range(self.settings['numframes']):
# create frame
frameToSend = [frame_counter >> 8, frame_counter & 0xFF] + [i & 0xFF for i in
range(FRAME_LENGTH - COUNTER_LENGTH)]
# increment the frame counter
frame_counter += 1
# send frame
self.radio_driver.radio_load_packet_2_4ghz(frameToSend[:frame_length - CRC_SIZE_LEGACY],
CRC_SIZE_LEGACY)
self.radio_driver.radio_tx_now_2_4ghz()
# IFS
time.sleep(self.settings["IFS"])
self.gpio_handler.led_toggle(self.TRX_frame_pin)
# logging.warning('self.radio_driver.read_reset_cmd(): {0}'.format(self.radio_driver.read_reset_cmd()))
with self.dataLock:
if self.f_cancel_exp:
logging.warning('BREAKING EXP')
break
# logging.info('EXIT FROM THE _execute_experiment_tx: {0}, {1}'.format(time.time(), item['modulation']))
logging.info('DURATION OF {0} is: {1}'.format(item["modulation"], (time.time() - total_time)))
self.radio_driver.radio_off_2_4ghz()
self.radio_driver.radio_off()
def _remove_scheduled_experiment(self):
events = self.scheduler.queue
for ev in events:
self.scheduler.cancel(ev)
def _led_end_experiment_signal(self):
i = 0
for led in self.led_array_pins:
self.gpio_handler.led_off(led)
while i < 20 and not self.f_reset.is_set():
for led in self.led_array_pins:
self.gpio_handler.led_toggle(led)
time.sleep(1)
i += 1
def _led_start_experiment_signal(self):
"""
it lights on a LED if the experiment will take place in the next minute
it uses the frame receive LED to indicate whether the experiment is going to start the next minute or not.
:return:
"""
logging.debug('entering led_start_experiment_signal')
while not self.f_start_signal_LED:
now = time.gmtime()
if self.minutes - now[4] == 1 or self.minutes - now[4] == -59:
logging.debug('SWITCHING LIGHT UP led_start_experiment_signal')
self.gpio_handler.led_on(self.TRX_frame_pin)
self.f_start_signal_LED = True
continue
time.sleep(1)
self.f_start_signal_LED = False
logging.debug('OUTING led_start_experiment_signal')
def run(self):
# setup the radio
# self._radio_setup()
logging.info('current thread: {0}'.format(threading.current_thread()))
logging.info('WAITING FOR THE START BUTTON TO BE PRESSED')
logging.info('thread enumerate: {0}'.format(threading.enumerate()))
# push button signal
self.start_experiment.wait()
self.start_experiment.clear()
while True:
# gets current time and determines the running time for the experiment to start
self.started_time = time.time()
self.hours, self.minutes = self._start_time_experiment()
self.time_to_start = dt.combine(dt.now(), datetime.time(self.hours, self.minutes))
self.radio_driver.radio_off()
self.gpio_handler.led_off(self.TRX_frame_pin)
self.gpio_handler.binary_counter(0, self.led_array_pins)
self.experiment_counter = 0
self.experiment_scheduled = Timer(
time.mktime(self.time_to_start.timetuple()) + START_OFFSET - time.time(),
self._experiment_scheduling, ())
self.experiment_scheduled.start()
logging.info('time left for the experiment to start: {0}'.format(time.mktime(self.time_to_start.timetuple())
+ START_OFFSET - time.time()))
logging.info('time to start experiment: {0}'.format(self.time_to_start))
self.led_start_indicator = threading.Thread(target=self._led_start_experiment_signal)
self.led_start_indicator.start()
self.led_start_indicator.name = 'TX start led signal'
logging.info('Experiment loaded')
logging.info('current thread: {0}'.format(threading.current_thread()))
logging.info('thread enumerate: {0}'.format(threading.enumerate()))
self.f_reset.wait()
self.f_reset.clear()
logging.info('reset button pressed')
# ======================== callbacks =======================================
def _cb_push_button(self, channel=13):
logging.info('PUSH BUTTON PRESSED')
# pass
self.gpio_handler.clear_cb(13)
# switch on all leds to let the user know the push button has been pressed and it got the signal.
self.gpio_handler.binary_counter(31, self.led_array_pins)
if not self.f_reset_button:
with self.dataLock:
self.start_experiment.set()
self.f_reset_button = True
logging.info('START BUTTON PRESSED')
else:
with self.dataLock:
# self.end_experiment.set()
# self.f_schedule.set()
logging.info('RESET BUTTON PRESSED')
self.f_reset.set()
self.f_cancel_exp = True
# self.experiment_scheduled.cancel()
logging.info('f_reset set to true?: {0}'.format(self.f_reset.isSet()))
# self.gpio_handler.clean_gpio()
# sys.exit(0)
time.sleep(5)
self.gpio_handler.add_cb(self._cb_push_button, self.push_button_pin)
def _experiment_scheduling(self):
logging.info('current thread in the scheduling: {0}'.format(threading.current_thread()))
self.time_next_experiment = self.settings['test_settings'][self.experiment_counter % len(
self.settings['test_settings'])]['durationtx_s'] + SECURITY_TIME
logging.info('time of next experiment: {0}, setting: {1}'.format(self.time_next_experiment, self.settings[
'test_settings'][self.experiment_counter % len(self.settings['test_settings'])]['modulation']))
self.experiment_scheduled = Timer(self.time_next_experiment, self._experiment_scheduling, ())
self.experiment_scheduled.start()
self.experiment_tx_thread = threading.Thread(target=self._execute_experiment_tx, args=[self.settings[
'test_settings'][self.experiment_counter % len(self.settings['test_settings'])]])
self.experiment_tx_thread.start()
self.experiment_tx_thread.name = 'schedule TX _100 packets'
self.experiment_counter += 1
# ============================ main ==========================================
def load_experiment_details():
with open('/home/pi/range_test/raspberry/experiment_settings_outdoors_range_test.json', 'r') as f:
settings = f.read().replace('\n', ' ').replace('\r', '')
settings = json.loads(settings)
return settings
def main():
f_start = False
logging.basicConfig(stream=sys.__stdout__, level=logging.INFO)
logging.info('PROGRAM STARTING...')
experimentTx = ExperimentTx(load_experiment_details())
experimentTx.f_reset.wait()
logging.info('PROGRAM FINISHING...')
experimentTx.f_reset.clear()
experimentTx.gpio_handler.clean_gpio()
sys.exit(0)
logging.warning('.........................THIS LINE SHOULD NEVER BE READ.......')
# while True:
# input = raw_input('>')
# if input == 's':
# if not f_start:
# f_start = True
# logging.info('PROGRAM STARTING...')
# # experimentTx = ExperimentTx(load_experiment_details())
# logging.info('PROGRAM RUNNING')
# else:
# logging.info('PROGRAM ALREADY STARTED')
# if input == 'q':
# if f_start:
# experimentTx.gpio_handler.clean_gpio()
# sys.exit(0)
if __name__ == '__main__':
main()
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for gan.python.train."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python import train
from tensorflow.contrib.gan.python.features.python import random_tensor_pool
from tensorflow.contrib.slim.python.slim import learning as slim_learning
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.platform import test
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import coordinator
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
def generator_model(inputs):
return variable_scope.get_variable('dummy_g', initializer=2.0) * inputs
class Generator(object):
def __call__(self, inputs):
return generator_model(inputs)
def infogan_generator_model(inputs):
return variable_scope.get_variable('dummy_g', initializer=2.0) * inputs[0]
class InfoGANGenerator(object):
def __call__(self, inputs):
return infogan_generator_model(inputs)
def discriminator_model(inputs, _):
return variable_scope.get_variable('dummy_d', initializer=2.0) * inputs
class Discriminator(object):
def __call__(self, inputs, _):
return discriminator_model(inputs, _)
def infogan_discriminator_model(inputs, _):
return (variable_scope.get_variable('dummy_d', initializer=2.0) * inputs,
[categorical.Categorical([1.0])])
class InfoGANDiscriminator(object):
def __call__(self, inputs, _):
return infogan_discriminator_model(inputs, _)
def acgan_discriminator_model(inputs, _, num_classes=10):
return (
discriminator_model(inputs, _),
array_ops.one_hot(
# TODO(haeusser): infer batch size from input
random_ops.random_uniform(
[3], maxval=num_classes, dtype=dtypes.int32),
num_classes))
class ACGANDiscriminator(object):
def __call__(self, inputs, _, num_classes=10):
return (
discriminator_model(inputs, _),
array_ops.one_hot(
# TODO(haeusser): infer batch size from input
random_ops.random_uniform(
[3], maxval=num_classes, dtype=dtypes.int32),
num_classes))
def stargan_generator_model(inputs, _):
"""Dummy generator for StarGAN."""
return variable_scope.get_variable('dummy_g', initializer=0.5) * inputs
class StarGANGenerator(object):
def __call__(self, inputs, _):
return stargan_generator_model(inputs, _)
def stargan_discriminator_model(inputs, num_domains):
"""Differentiable dummy discriminator for StarGAN."""
hidden = layers.flatten(inputs)
output_src = math_ops.reduce_mean(hidden, axis=1)
output_cls = layers.fully_connected(
inputs=hidden,
num_outputs=num_domains,
activation_fn=None,
normalizer_fn=None,
biases_initializer=None)
return output_src, output_cls
class StarGANDiscriminator(object):
def __call__(self, inputs, num_domains):
return stargan_discriminator_model(inputs, num_domains)
def get_gan_model():
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
pass
with variable_scope.variable_scope('discriminator') as dis_scope:
pass
return namedtuples.GANModel(
generator_inputs=None,
generated_data=None,
generator_variables=None,
generator_scope=gen_scope,
generator_fn=generator_model,
real_data=array_ops.ones([1, 2, 3]),
discriminator_real_outputs=array_ops.ones([1, 2, 3]),
discriminator_gen_outputs=array_ops.ones([1, 2, 3]),
discriminator_variables=None,
discriminator_scope=dis_scope,
discriminator_fn=discriminator_model)
def get_callable_gan_model():
ganmodel = get_gan_model()
return ganmodel._replace(
generator_fn=Generator(), discriminator_fn=Discriminator())
def create_gan_model():
return train.gan_model(
generator_model,
discriminator_model,
real_data=array_ops.zeros([1, 2]),
generator_inputs=random_ops.random_normal([1, 2]))
def create_callable_gan_model():
return train.gan_model(
Generator(),
Discriminator(),
real_data=array_ops.zeros([1, 2]),
generator_inputs=random_ops.random_normal([1, 2]))
def get_infogan_model():
return namedtuples.InfoGANModel(
*get_gan_model(),
structured_generator_inputs=[constant_op.constant(0)],
predicted_distributions=[categorical.Categorical([1.0])],
discriminator_and_aux_fn=infogan_discriminator_model)
def get_callable_infogan_model():
return namedtuples.InfoGANModel(
*get_callable_gan_model(),
structured_generator_inputs=[constant_op.constant(0)],
predicted_distributions=[categorical.Categorical([1.0])],
discriminator_and_aux_fn=infogan_discriminator_model)
def create_infogan_model():
return train.infogan_model(
infogan_generator_model,
infogan_discriminator_model,
real_data=array_ops.zeros([1, 2]),
unstructured_generator_inputs=[],
structured_generator_inputs=[random_ops.random_normal([1, 2])])
def create_callable_infogan_model():
return train.infogan_model(
InfoGANGenerator(),
InfoGANDiscriminator(),
real_data=array_ops.zeros([1, 2]),
unstructured_generator_inputs=[],
structured_generator_inputs=[random_ops.random_normal([1, 2])])
def get_acgan_model():
return namedtuples.ACGANModel(
*get_gan_model(),
one_hot_labels=array_ops.one_hot([0, 1, 2], 10),
discriminator_real_classification_logits=array_ops.one_hot([0, 1, 3], 10),
discriminator_gen_classification_logits=array_ops.one_hot([0, 1, 4], 10))
def get_callable_acgan_model():
return namedtuples.ACGANModel(
*get_callable_gan_model(),
one_hot_labels=array_ops.one_hot([0, 1, 2], 10),
discriminator_real_classification_logits=array_ops.one_hot([0, 1, 3], 10),
discriminator_gen_classification_logits=array_ops.one_hot([0, 1, 4], 10))
def create_acgan_model():
return train.acgan_model(
generator_model,
acgan_discriminator_model,
real_data=array_ops.zeros([1, 2]),
generator_inputs=random_ops.random_normal([1, 2]),
one_hot_labels=array_ops.one_hot([0, 1, 2], 10))
def create_callable_acgan_model():
return train.acgan_model(
Generator(),
ACGANDiscriminator(),
real_data=array_ops.zeros([1, 2]),
generator_inputs=random_ops.random_normal([1, 2]),
one_hot_labels=array_ops.one_hot([0, 1, 2], 10))
def get_cyclegan_model():
return namedtuples.CycleGANModel(
model_x2y=get_gan_model(),
model_y2x=get_gan_model(),
reconstructed_x=array_ops.ones([1, 2, 3]),
reconstructed_y=array_ops.zeros([1, 2, 3]))
def get_callable_cyclegan_model():
return namedtuples.CycleGANModel(
model_x2y=get_callable_gan_model(),
model_y2x=get_callable_gan_model(),
reconstructed_x=array_ops.ones([1, 2, 3]),
reconstructed_y=array_ops.zeros([1, 2, 3]))
def create_cyclegan_model():
return train.cyclegan_model(
generator_model,
discriminator_model,
data_x=array_ops.zeros([1, 2]),
data_y=array_ops.ones([1, 2]))
def create_callable_cyclegan_model():
return train.cyclegan_model(
Generator(),
Discriminator(),
data_x=array_ops.zeros([1, 2]),
data_y=array_ops.ones([1, 2]))
def get_stargan_model():
"""Similar to get_gan_model()."""
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
pass
with variable_scope.variable_scope('discriminator') as dis_scope:
pass
return namedtuples.StarGANModel(
input_data=array_ops.ones([1, 2, 2, 3]),
input_data_domain_label=array_ops.ones([1, 2]),
generated_data=array_ops.ones([1, 2, 2, 3]),
generated_data_domain_target=array_ops.ones([1, 2]),
reconstructed_data=array_ops.ones([1, 2, 2, 3]),
discriminator_input_data_source_predication=array_ops.ones([1]),
discriminator_generated_data_source_predication=array_ops.ones([1]),
discriminator_input_data_domain_predication=array_ops.ones([1, 2]),
discriminator_generated_data_domain_predication=array_ops.ones([1, 2]),
generator_variables=None,
generator_scope=gen_scope,
generator_fn=stargan_generator_model,
discriminator_variables=None,
discriminator_scope=dis_scope,
discriminator_fn=stargan_discriminator_model)
def get_callable_stargan_model():
model = get_stargan_model()
return model._replace(
generator_fn=StarGANGenerator(), discriminator_fn=StarGANDiscriminator())
def create_stargan_model():
return train.stargan_model(
stargan_generator_model, stargan_discriminator_model,
array_ops.ones([1, 2, 2, 3]), array_ops.ones([1, 2]))
def create_callable_stargan_model():
return train.stargan_model(StarGANGenerator(), StarGANDiscriminator(),
array_ops.ones([1, 2, 2, 3]),
array_ops.ones([1, 2]))
def get_sync_optimizer():
return sync_replicas_optimizer.SyncReplicasOptimizer(
gradient_descent.GradientDescentOptimizer(learning_rate=1.0),
replicas_to_aggregate=1)
class GANModelTest(test.TestCase, parameterized.TestCase):
"""Tests for `gan_model`."""
@parameterized.named_parameters(
('gan', get_gan_model, namedtuples.GANModel),
('callable_gan', get_callable_gan_model, namedtuples.GANModel),
('infogan', get_infogan_model, namedtuples.InfoGANModel),
('callable_infogan', get_callable_infogan_model,
namedtuples.InfoGANModel),
('acgan', get_acgan_model, namedtuples.ACGANModel),
('callable_acgan', get_callable_acgan_model, namedtuples.ACGANModel),
('cyclegan', get_cyclegan_model, namedtuples.CycleGANModel),
('callable_cyclegan', get_callable_cyclegan_model,
namedtuples.CycleGANModel),
('stargan', get_stargan_model, namedtuples.StarGANModel),
('callabel_stargan', get_callable_stargan_model, namedtuples.StarGANModel)
)
def test_output_type(self, create_fn, expected_tuple_type):
"""Test that output type is as expected."""
self.assertIsInstance(create_fn(), expected_tuple_type)
def test_no_shape_check(self):
def dummy_generator_model(_):
return (None, None)
def dummy_discriminator_model(data, conditioning): # pylint: disable=unused-argument
return 1
with self.assertRaisesRegexp(AttributeError, 'object has no attribute'):
train.gan_model(
dummy_generator_model,
dummy_discriminator_model,
real_data=array_ops.zeros([1, 2]),
generator_inputs=array_ops.zeros([1]),
check_shapes=True)
train.gan_model(
dummy_generator_model,
dummy_discriminator_model,
real_data=array_ops.zeros([1, 2]),
generator_inputs=array_ops.zeros([1]),
check_shapes=False)
class StarGANModelTest(test.TestCase):
"""Tests for `stargan_model`."""
@staticmethod
def create_input_and_label_tensor(batch_size, img_size, c_size, num_domains):
input_tensor_list = []
label_tensor_list = []
for _ in range(num_domains):
input_tensor_list.append(
random_ops.random_uniform((batch_size, img_size, img_size, c_size)))
domain_idx = random_ops.random_uniform(
[batch_size], minval=0, maxval=num_domains, dtype=dtypes.int32)
label_tensor_list.append(array_ops.one_hot(domain_idx, num_domains))
return input_tensor_list, label_tensor_list
def test_generate_stargan_random_domain_target(self):
batch_size = 8
domain_numbers = 3
target_tensor = train._generate_stargan_random_domain_target(
batch_size, domain_numbers)
with self.test_session() as sess:
targets = sess.run(target_tensor)
self.assertTupleEqual((batch_size, domain_numbers), targets.shape)
for target in targets:
self.assertEqual(1, np.sum(target))
self.assertEqual(1, np.max(target))
def test_stargan_model_output_type(self):
batch_size = 2
img_size = 16
c_size = 3
num_domains = 5
input_tensor, label_tensor = StarGANModelTest.create_input_and_label_tensor(
batch_size, img_size, c_size, num_domains)
model = train.stargan_model(
generator_fn=stargan_generator_model,
discriminator_fn=stargan_discriminator_model,
input_data=input_tensor,
input_data_domain_label=label_tensor)
self.assertIsInstance(model, namedtuples.StarGANModel)
self.assertTrue(isinstance(model.discriminator_variables, list))
self.assertTrue(isinstance(model.generator_variables, list))
self.assertIsInstance(model.discriminator_scope,
variable_scope.VariableScope)
self.assertTrue(model.generator_scope, variable_scope.VariableScope)
self.assertTrue(callable(model.discriminator_fn))
self.assertTrue(callable(model.generator_fn))
def test_stargan_model_generator_output(self):
batch_size = 2
img_size = 16
c_size = 3
num_domains = 5
input_tensor, label_tensor = StarGANModelTest.create_input_and_label_tensor(
batch_size, img_size, c_size, num_domains)
model = train.stargan_model(
generator_fn=stargan_generator_model,
discriminator_fn=stargan_discriminator_model,
input_data=input_tensor,
input_data_domain_label=label_tensor)
with self.test_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
input_data, generated_data, reconstructed_data = sess.run(
[model.input_data, model.generated_data, model.reconstructed_data])
self.assertTupleEqual(
(batch_size * num_domains, img_size, img_size, c_size),
input_data.shape)
self.assertTupleEqual(
(batch_size * num_domains, img_size, img_size, c_size),
generated_data.shape)
self.assertTupleEqual(
(batch_size * num_domains, img_size, img_size, c_size),
reconstructed_data.shape)
def test_stargan_model_discriminator_output(self):
batch_size = 2
img_size = 16
c_size = 3
num_domains = 5
input_tensor, label_tensor = StarGANModelTest.create_input_and_label_tensor(
batch_size, img_size, c_size, num_domains)
model = train.stargan_model(
generator_fn=stargan_generator_model,
discriminator_fn=stargan_discriminator_model,
input_data=input_tensor,
input_data_domain_label=label_tensor)
with self.test_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
disc_input_data_source_pred, disc_gen_data_source_pred = sess.run([
model.discriminator_input_data_source_predication,
model.discriminator_generated_data_source_predication
])
self.assertEqual(1, len(disc_input_data_source_pred.shape))
self.assertEqual(batch_size * num_domains,
disc_input_data_source_pred.shape[0])
self.assertEqual(1, len(disc_gen_data_source_pred.shape))
self.assertEqual(batch_size * num_domains,
disc_gen_data_source_pred.shape[0])
input_label, disc_input_label, gen_label, disc_gen_label = sess.run([
model.input_data_domain_label,
model.discriminator_input_data_domain_predication,
model.generated_data_domain_target,
model.discriminator_generated_data_domain_predication
])
self.assertTupleEqual((batch_size * num_domains, num_domains),
input_label.shape)
self.assertTupleEqual((batch_size * num_domains, num_domains),
disc_input_label.shape)
self.assertTupleEqual((batch_size * num_domains, num_domains),
gen_label.shape)
self.assertTupleEqual((batch_size * num_domains, num_domains),
disc_gen_label.shape)
class GANLossTest(test.TestCase, parameterized.TestCase):
"""Tests for `gan_loss`."""
@parameterized.named_parameters(
('gan', get_gan_model),
('callable_gan', get_callable_gan_model),
('infogan', get_infogan_model),
('callable_infogan', get_callable_infogan_model),
('acgan', get_acgan_model),
('callable_acgan', get_callable_acgan_model),
)
def test_output_type(self, get_gan_model_fn):
"""Test output type."""
loss = train.gan_loss(get_gan_model_fn(), add_summaries=True)
self.assertIsInstance(loss, namedtuples.GANLoss)
self.assertGreater(len(ops.get_collection(ops.GraphKeys.SUMMARIES)), 0)
@parameterized.named_parameters(
('cyclegan', create_cyclegan_model),
('callable_cyclegan', create_callable_cyclegan_model),
)
def test_cyclegan_output_type(self, get_gan_model_fn):
loss = train.cyclegan_loss(get_gan_model_fn(), add_summaries=True)
self.assertIsInstance(loss, namedtuples.CycleGANLoss)
self.assertGreater(len(ops.get_collection(ops.GraphKeys.SUMMARIES)), 0)
@parameterized.named_parameters(
('gan', create_gan_model, False),
('gan_one_sided', create_gan_model, True),
('callable_gan', create_callable_gan_model, False),
('callable_gan_one_sided', create_callable_gan_model, True),
('infogan', create_infogan_model, False),
('infogan_one_sided', create_infogan_model, True),
('callable_infogan', create_callable_infogan_model, False),
('callable_infogan_one_sided', create_callable_infogan_model, True),
('acgan', create_acgan_model, False),
('acgan_one_sided', create_acgan_model, True),
('callable_acgan', create_callable_acgan_model, False),
('callable_acgan_one_sided', create_callable_acgan_model, True),
)
def test_grad_penalty(self, create_gan_model_fn, one_sided):
"""Test gradient penalty option."""
model = create_gan_model_fn()
loss = train.gan_loss(model)
loss_gp = train.gan_loss(
model,
gradient_penalty_weight=1.0,
gradient_penalty_one_sided=one_sided)
self.assertIsInstance(loss_gp, namedtuples.GANLoss)
# Check values.
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
loss_gen_np, loss_gen_gp_np = sess.run(
[loss.generator_loss, loss_gp.generator_loss])
loss_dis_np, loss_dis_gp_np = sess.run(
[loss.discriminator_loss, loss_gp.discriminator_loss])
self.assertEqual(loss_gen_np, loss_gen_gp_np)
self.assertLess(loss_dis_np, loss_dis_gp_np)
@parameterized.named_parameters(
('infogan', get_infogan_model),
('callable_infogan', get_callable_infogan_model),
)
def test_mutual_info_penalty(self, create_gan_model_fn):
"""Test mutual information penalty option."""
train.gan_loss(
create_gan_model_fn(),
mutual_information_penalty_weight=constant_op.constant(1.0))
@parameterized.named_parameters(
('gan', get_gan_model),
('callable_gan', get_callable_gan_model),
('infogan', get_infogan_model),
('callable_infogan', get_callable_infogan_model),
('acgan', get_acgan_model),
('callable_acgan', get_callable_acgan_model),
)
def test_regularization_helper(self, get_gan_model_fn):
"""Test regularization loss."""
# Evaluate losses without regularization.
no_reg_loss = train.gan_loss(get_gan_model_fn())
with self.test_session(use_gpu=True):
no_reg_loss_gen_np = no_reg_loss.generator_loss.eval()
no_reg_loss_dis_np = no_reg_loss.discriminator_loss.eval()
with ops.name_scope(get_gan_model_fn().generator_scope.name):
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES,
constant_op.constant(3.0))
with ops.name_scope(get_gan_model_fn().discriminator_scope.name):
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES,
constant_op.constant(2.0))
# Check that losses now include the correct regularization values.
reg_loss = train.gan_loss(get_gan_model_fn())
with self.test_session(use_gpu=True):
reg_loss_gen_np = reg_loss.generator_loss.eval()
reg_loss_dis_np = reg_loss.discriminator_loss.eval()
self.assertEqual(3.0, reg_loss_gen_np - no_reg_loss_gen_np)
self.assertEqual(2.0, reg_loss_dis_np - no_reg_loss_dis_np)
@parameterized.named_parameters(
('notcallable', create_acgan_model),
('callable', create_callable_acgan_model),
)
def test_acgan(self, create_gan_model_fn):
"""Test that ACGAN models work."""
model = create_gan_model_fn()
loss = train.gan_loss(model)
loss_ac_gen = train.gan_loss(model, aux_cond_generator_weight=1.0)
loss_ac_dis = train.gan_loss(model, aux_cond_discriminator_weight=1.0)
self.assertIsInstance(loss, namedtuples.GANLoss)
self.assertIsInstance(loss_ac_gen, namedtuples.GANLoss)
self.assertIsInstance(loss_ac_dis, namedtuples.GANLoss)
# Check values.
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
loss_gen_np, loss_ac_gen_gen_np, loss_ac_dis_gen_np = sess.run([
loss.generator_loss, loss_ac_gen.generator_loss,
loss_ac_dis.generator_loss
])
loss_dis_np, loss_ac_gen_dis_np, loss_ac_dis_dis_np = sess.run([
loss.discriminator_loss, loss_ac_gen.discriminator_loss,
loss_ac_dis.discriminator_loss
])
self.assertLess(loss_gen_np, loss_dis_np)
self.assertTrue(np.isscalar(loss_ac_gen_gen_np))
self.assertTrue(np.isscalar(loss_ac_dis_gen_np))
self.assertTrue(np.isscalar(loss_ac_gen_dis_np))
self.assertTrue(np.isscalar(loss_ac_dis_dis_np))
@parameterized.named_parameters(
('notcallable', create_cyclegan_model),
('callable', create_callable_cyclegan_model),
)
def test_cyclegan(self, create_gan_model_fn):
"""Test that CycleGan models work."""
model = create_gan_model_fn()
loss = train.cyclegan_loss(model)
self.assertIsInstance(loss, namedtuples.CycleGANLoss)
# Check values.
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
(loss_x2y_gen_np, loss_x2y_dis_np, loss_y2x_gen_np,
loss_y2x_dis_np) = sess.run([
loss.loss_x2y.generator_loss, loss.loss_x2y.discriminator_loss,
loss.loss_y2x.generator_loss, loss.loss_y2x.discriminator_loss
])
self.assertGreater(loss_x2y_gen_np, loss_x2y_dis_np)
self.assertGreater(loss_y2x_gen_np, loss_y2x_dis_np)
self.assertTrue(np.isscalar(loss_x2y_gen_np))
self.assertTrue(np.isscalar(loss_x2y_dis_np))
self.assertTrue(np.isscalar(loss_y2x_gen_np))
self.assertTrue(np.isscalar(loss_y2x_dis_np))
@parameterized.named_parameters(
('gan', create_gan_model),
('callable_gan', create_callable_gan_model),
('infogan', create_infogan_model),
('callable_infogan', create_callable_infogan_model),
('acgan', create_acgan_model),
('callable_acgan', create_callable_acgan_model),
)
def test_tensor_pool(self, create_gan_model_fn):
"""Test tensor pool option."""
model = create_gan_model_fn()
tensor_pool_fn = lambda x: random_tensor_pool.tensor_pool(x, pool_size=5)
loss = train.gan_loss(model, tensor_pool_fn=tensor_pool_fn)
self.assertIsInstance(loss, namedtuples.GANLoss)
# Check values.
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
for _ in range(10):
sess.run([loss.generator_loss, loss.discriminator_loss])
def test_discriminator_only_sees_pool(self):
"""Checks that discriminator only sees pooled values."""
def checker_gen_fn(_):
return constant_op.constant(0.0)
model = train.gan_model(
checker_gen_fn,
discriminator_model,
real_data=array_ops.zeros([]),
generator_inputs=random_ops.random_normal([]))
def tensor_pool_fn(_):
return (random_ops.random_uniform([]), random_ops.random_uniform([]))
def checker_dis_fn(inputs, _):
"""Discriminator that checks that it only sees pooled Tensors."""
self.assertFalse(constant_op.is_constant(inputs))
return inputs
model = model._replace(
discriminator_fn=checker_dis_fn)
train.gan_loss(model, tensor_pool_fn=tensor_pool_fn)
def test_doesnt_crash_when_in_nested_scope(self):
with variable_scope.variable_scope('outer_scope'):
gan_model = train.gan_model(
generator_model,
discriminator_model,
real_data=array_ops.zeros([1, 2]),
generator_inputs=random_ops.random_normal([1, 2]))
# This should work inside a scope.
train.gan_loss(gan_model, gradient_penalty_weight=1.0)
# This should also work outside a scope.
train.gan_loss(gan_model, gradient_penalty_weight=1.0)
class TensorPoolAdjusteModelTest(test.TestCase):
def _check_tensor_pool_adjusted_model_outputs(
self, tensor1, tensor2, pool_size):
history_values = []
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
for i in range(2 * pool_size):
t1, t2 = sess.run([tensor1, tensor2])
history_values.append(t1)
if i < pool_size:
# For [0, pool_size), the pool is not full, tensor1 should be equal
# to tensor2 as the pool.
self.assertAllEqual(t1, t2)
else:
# For [pool_size, ?), the pool is full, tensor2 must be equal to some
# historical values of tensor1 (which is previously stored in the
# pool).
self.assertTrue(any([(v == t2).all() for v in history_values]))
def _make_new_model_and_check(self, model, pool_size):
pool_fn = lambda x: random_tensor_pool.tensor_pool(x, pool_size=pool_size)
new_model = train._tensor_pool_adjusted_model(model, pool_fn)
# 'Generator/dummy_g:0' and 'Discriminator/dummy_d:0'
self.assertEqual(2, len(ops.get_collection(ops.GraphKeys.VARIABLES)))
self.assertIsNot(new_model.discriminator_gen_outputs,
model.discriminator_gen_outputs)
return new_model
def test_tensor_pool_adjusted_model_gan(self):
"""Test `_tensor_pool_adjusted_model` for gan model."""
pool_size = 5
model = create_gan_model()
new_model = self._make_new_model_and_check(model, pool_size)
# Check values.
self._check_tensor_pool_adjusted_model_outputs(
model.discriminator_gen_outputs, new_model.discriminator_gen_outputs,
pool_size)
def test_tensor_pool_adjusted_model_infogan(self):
"""Test _tensor_pool_adjusted_model for infogan model."""
pool_size = 5
model = create_infogan_model()
new_model = self._make_new_model_and_check(model, pool_size)
# Check values.
self.assertIsNot(new_model.predicted_distributions,
model.predicted_distributions)
self._check_tensor_pool_adjusted_model_outputs(
model.discriminator_gen_outputs, new_model.discriminator_gen_outputs,
pool_size)
def test_tensor_pool_adjusted_model_acgan(self):
"""Test _tensor_pool_adjusted_model for acgan model."""
pool_size = 5
model = create_acgan_model()
new_model = self._make_new_model_and_check(model, pool_size)
# Check values.
self.assertIsNot(new_model.discriminator_gen_classification_logits,
model.discriminator_gen_classification_logits)
self._check_tensor_pool_adjusted_model_outputs(
model.discriminator_gen_outputs, new_model.discriminator_gen_outputs,
pool_size)
class GANTrainOpsTest(test.TestCase, parameterized.TestCase):
"""Tests for `gan_train_ops`."""
@parameterized.named_parameters(
('gan', create_gan_model),
('callable_gan', create_callable_gan_model),
('infogan', create_infogan_model),
('callable_infogan', create_callable_infogan_model),
('acgan', create_acgan_model),
('callable_acgan', create_callable_acgan_model),
)
def test_output_type(self, create_gan_model_fn):
model = create_gan_model_fn()
loss = train.gan_loss(model)
g_opt = gradient_descent.GradientDescentOptimizer(1.0)
d_opt = gradient_descent.GradientDescentOptimizer(1.0)
train_ops = train.gan_train_ops(
model,
loss,
g_opt,
d_opt,
summarize_gradients=True,
colocate_gradients_with_ops=True)
self.assertIsInstance(train_ops, namedtuples.GANTrainOps)
# TODO(joelshor): Add a test to check that custom update op is run.
@parameterized.named_parameters(
('gan', create_gan_model, False),
('gan_provideupdates', create_gan_model, True),
('callable_gan', create_callable_gan_model, False),
('callable_gan_provideupdates', create_callable_gan_model, True),
('infogan', create_infogan_model, False),
('infogan_provideupdates', create_infogan_model, True),
('callable_infogan', create_callable_infogan_model, False),
('callable_infogan_provideupdates', create_callable_infogan_model, True),
('acgan', create_acgan_model, False),
('acgan_provideupdates', create_acgan_model, True),
('callable_acgan', create_callable_acgan_model, False),
('callable_acgan_provideupdates', create_callable_acgan_model, True),
)
def test_unused_update_ops(self, create_gan_model_fn, provide_update_ops):
model = create_gan_model_fn()
loss = train.gan_loss(model)
# Add generator and discriminator update ops.
with variable_scope.variable_scope(model.generator_scope):
gen_update_count = variable_scope.get_variable('gen_count', initializer=0)
gen_update_op = gen_update_count.assign_add(1)
ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, gen_update_op)
with variable_scope.variable_scope(model.discriminator_scope):
dis_update_count = variable_scope.get_variable('dis_count', initializer=0)
dis_update_op = dis_update_count.assign_add(1)
ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, dis_update_op)
# Add an update op outside the generator and discriminator scopes.
if provide_update_ops:
kwargs = {
'update_ops': [
constant_op.constant(1.0), gen_update_op, dis_update_op
]
}
else:
ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, constant_op.constant(1.0))
kwargs = {}
g_opt = gradient_descent.GradientDescentOptimizer(1.0)
d_opt = gradient_descent.GradientDescentOptimizer(1.0)
with self.assertRaisesRegexp(ValueError, 'There are unused update ops:'):
train.gan_train_ops(
model, loss, g_opt, d_opt, check_for_unused_update_ops=True, **kwargs)
train_ops = train.gan_train_ops(
model, loss, g_opt, d_opt, check_for_unused_update_ops=False, **kwargs)
with self.test_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(0, gen_update_count.eval())
self.assertEqual(0, dis_update_count.eval())
train_ops.generator_train_op.eval()
self.assertEqual(1, gen_update_count.eval())
self.assertEqual(0, dis_update_count.eval())
train_ops.discriminator_train_op.eval()
self.assertEqual(1, gen_update_count.eval())
self.assertEqual(1, dis_update_count.eval())
@parameterized.named_parameters(
('gan', create_gan_model, False),
('callable_gan', create_callable_gan_model, False),
('infogan', create_infogan_model, False),
('callable_infogan', create_callable_infogan_model, False),
('acgan', create_acgan_model, False),
('callable_acgan', create_callable_acgan_model, False),
('gan_canbeint32', create_gan_model, True),
)
def test_sync_replicas(self, create_gan_model_fn, create_global_step):
model = create_gan_model_fn()
loss = train.gan_loss(model)
num_trainable_vars = len(variables_lib.get_trainable_variables())
if create_global_step:
gstep = variable_scope.get_variable(
'custom_gstep', dtype=dtypes.int32, initializer=0, trainable=False)
ops.add_to_collection(ops.GraphKeys.GLOBAL_STEP, gstep)
g_opt = get_sync_optimizer()
d_opt = get_sync_optimizer()
train_ops = train.gan_train_ops(
model, loss, generator_optimizer=g_opt, discriminator_optimizer=d_opt)
self.assertIsInstance(train_ops, namedtuples.GANTrainOps)
# No new trainable variables should have been added.
self.assertEqual(num_trainable_vars,
len(variables_lib.get_trainable_variables()))
g_sync_init_op = g_opt.get_init_tokens_op(num_tokens=1)
d_sync_init_op = d_opt.get_init_tokens_op(num_tokens=1)
# Check that update op is run properly.
global_step = training_util.get_or_create_global_step()
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
g_opt.chief_init_op.run()
d_opt.chief_init_op.run()
gstep_before = global_step.eval()
# Start required queue runner for SyncReplicasOptimizer.
coord = coordinator.Coordinator()
g_threads = g_opt.get_chief_queue_runner().create_threads(sess, coord)
d_threads = d_opt.get_chief_queue_runner().create_threads(sess, coord)
g_sync_init_op.run()
d_sync_init_op.run()
train_ops.generator_train_op.eval()
# Check that global step wasn't incremented.
self.assertEqual(gstep_before, global_step.eval())
train_ops.discriminator_train_op.eval()
# Check that global step wasn't incremented.
self.assertEqual(gstep_before, global_step.eval())
coord.request_stop()
coord.join(g_threads + d_threads)
class GANTrainTest(test.TestCase, parameterized.TestCase):
"""Tests for `gan_train`."""
def _gan_train_ops(self, generator_add, discriminator_add):
step = training_util.create_global_step()
# Increment the global count every time a train op is run so we can count
# the number of times they're run.
# NOTE: `use_locking=True` is required to avoid race conditions with
# joint training.
train_ops = namedtuples.GANTrainOps(
generator_train_op=step.assign_add(generator_add, use_locking=True),
discriminator_train_op=step.assign_add(
discriminator_add, use_locking=True),
global_step_inc_op=step.assign_add(1))
return train_ops
@parameterized.named_parameters(
('gan', create_gan_model),
('callable_gan', create_callable_gan_model),
('infogan', create_infogan_model),
('callable_infogan', create_callable_infogan_model),
('acgan', create_acgan_model),
('callable_acgan', create_callable_acgan_model),
)
def test_run_helper(self, create_gan_model_fn):
random_seed.set_random_seed(1234)
model = create_gan_model_fn()
loss = train.gan_loss(model)
g_opt = gradient_descent.GradientDescentOptimizer(1.0)
d_opt = gradient_descent.GradientDescentOptimizer(1.0)
train_ops = train.gan_train_ops(model, loss, g_opt, d_opt)
final_step = train.gan_train(
train_ops,
logdir='',
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=2)])
self.assertTrue(np.isscalar(final_step))
self.assertEqual(2, final_step)
@parameterized.named_parameters(
('seq_train_steps', train.get_sequential_train_hooks),
('efficient_seq_train_steps', train.get_joint_train_hooks),
)
def test_multiple_steps(self, get_hooks_fn_fn):
"""Test multiple train steps."""
train_ops = self._gan_train_ops(generator_add=10, discriminator_add=100)
train_steps = namedtuples.GANTrainSteps(
generator_train_steps=3, discriminator_train_steps=4)
final_step = train.gan_train(
train_ops,
get_hooks_fn=get_hooks_fn_fn(train_steps),
logdir='',
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=1)])
self.assertTrue(np.isscalar(final_step))
self.assertEqual(1 + 3 * 10 + 4 * 100, final_step)
def test_supervisor_run_gan_model_train_ops_multiple_steps(self):
step = training_util.create_global_step()
train_ops = namedtuples.GANTrainOps(
generator_train_op=constant_op.constant(3.0),
discriminator_train_op=constant_op.constant(2.0),
global_step_inc_op=step.assign_add(1))
train_steps = namedtuples.GANTrainSteps(
generator_train_steps=3, discriminator_train_steps=4)
final_loss = slim_learning.train(
train_op=train_ops,
logdir='',
global_step=step,
number_of_steps=1,
train_step_fn=train.get_sequential_train_steps(train_steps))
self.assertTrue(np.isscalar(final_loss))
self.assertEqual(17.0, final_loss)
class PatchGANTest(test.TestCase, parameterized.TestCase):
"""Tests that functions work on PatchGAN style output."""
@parameterized.named_parameters(
('gan', create_gan_model),
('callable_gan', create_callable_gan_model),
('infogan', create_infogan_model),
('callable_infogan', create_callable_infogan_model),
('acgan', create_acgan_model),
('callable_acgan', create_callable_acgan_model),
)
def test_patchgan(self, create_gan_model_fn):
"""Ensure that patch-based discriminators work end-to-end."""
random_seed.set_random_seed(1234)
model = create_gan_model_fn()
loss = train.gan_loss(model)
g_opt = gradient_descent.GradientDescentOptimizer(1.0)
d_opt = gradient_descent.GradientDescentOptimizer(1.0)
train_ops = train.gan_train_ops(model, loss, g_opt, d_opt)
final_step = train.gan_train(
train_ops,
logdir='',
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=2)])
self.assertTrue(np.isscalar(final_step))
self.assertEqual(2, final_step)
if __name__ == '__main__':
test.main()
| |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
from eventlet.green import threading
from eventlet.green import time
from eventlet import greenpool
from eventlet import semaphore
from oslo.config import cfg
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.i18n import _LW
from sahara.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# TODO(slukjanov): it'll be better to use common_context.RequestContext as base
class Context(object):
def __init__(self,
user_id=None,
tenant_id=None,
token=None,
service_catalog=None,
username=None,
tenant_name=None,
roles=None,
is_admin=None,
remote_semaphore=None,
auth_uri=None,
**kwargs):
if kwargs:
LOG.warn(_LW('Arguments dropped when creating context: %s'),
kwargs)
self.user_id = user_id
self.tenant_id = tenant_id
self.token = token
self.service_catalog = service_catalog
self.username = username
self.tenant_name = tenant_name
self.is_admin = is_admin
self.remote_semaphore = remote_semaphore or semaphore.Semaphore(
CONF.cluster_remote_threshold)
self.roles = roles
if auth_uri:
self.auth_uri = auth_uri
else:
self.auth_uri = _get_auth_uri()
def clone(self):
return Context(
self.user_id,
self.tenant_id,
self.token,
self.service_catalog,
self.username,
self.tenant_name,
self.roles,
self.is_admin,
self.remote_semaphore,
self.auth_uri)
def to_dict(self):
return {
'user_id': self.user_id,
'tenant_id': self.tenant_id,
'token': self.token,
'service_catalog': self.service_catalog,
'username': self.username,
'tenant_name': self.tenant_name,
'is_admin': self.is_admin,
'roles': self.roles,
'auth_uri': self.auth_uri,
}
def is_auth_capable(self):
return (self.service_catalog and self.token and self.tenant_id and
self.user_id)
def get_admin_context():
return Context(is_admin=True)
_CTX_STORE = threading.local()
_CTX_KEY = 'current_ctx'
def has_ctx():
return hasattr(_CTX_STORE, _CTX_KEY)
def ctx():
if not has_ctx():
raise ex.IncorrectStateError(_("Context isn't available here"))
return getattr(_CTX_STORE, _CTX_KEY)
def current():
return ctx()
def set_ctx(new_ctx):
if not new_ctx and has_ctx():
delattr(_CTX_STORE, _CTX_KEY)
if new_ctx:
setattr(_CTX_STORE, _CTX_KEY, new_ctx)
def _get_auth_uri():
if CONF.keystone_authtoken.auth_uri is not None:
auth_uri = CONF.keystone_authtoken.auth_uri
else:
if CONF.keystone_authtoken.identity_uri is not None:
identity_uri = CONF.keystone_authtoken.identity_uri
else:
host = CONF.keystone_authtoken.auth_host
port = CONF.keystone_authtoken.auth_port
protocol = CONF.keystone_authtoken.auth_protocol
identity_uri = '%s://%s:%s' % (protocol, host, port)
if CONF.use_identity_api_v3 is False:
auth_version = 'v2.0'
else:
auth_version = 'v3'
auth_uri = '%s/%s' % (identity_uri, auth_version)
return auth_uri
def _wrapper(ctx, thread_description, thread_group, func, *args, **kwargs):
try:
set_ctx(ctx)
func(*args, **kwargs)
except BaseException as e:
LOG.exception(
_LE("Thread '%(thread)s' fails with exception: '%(exception)s'"),
{'thread': thread_description, 'exception': e})
if thread_group and not thread_group.exc:
thread_group.exc = e
thread_group.failed_thread = thread_description
finally:
if thread_group:
thread_group._on_thread_exit()
set_ctx(None)
def spawn(thread_description, func, *args, **kwargs):
eventlet.spawn(_wrapper, current().clone(), thread_description,
None, func, *args, **kwargs)
class ThreadGroup(object):
"""ThreadGroup object.
It is advised to use TreadGroup as a context manager instead
of instantiating and calling _wait() manually. The __exit__()
guaranties to exit only after all child threads are done, even if
spawning code have thrown an exception
"""
def __init__(self, thread_pool_size=1000):
self.tg = greenpool.GreenPool(size=thread_pool_size)
self.exc = None
self.failed_thread = None
self.threads = 0
self.cv = threading.Condition()
def spawn(self, thread_description, func, *args, **kwargs):
self.tg.spawn(_wrapper, current().clone(), thread_description,
self, func, *args, **kwargs)
with self.cv:
self.threads += 1
def _on_thread_exit(self):
with self.cv:
self.threads -= 1
if self.threads == 0:
self.cv.notifyAll()
# NOTE(dmitryme): A little rationale on why we reimplemented wait():
# * Eventlet's GreenPool.wait() can hung
# * Oslo's ThreadGroup.wait() can exit before all threads are done
#
def _wait(self):
"""Using of _wait() method.
It is preferred to use the class as a context manager and do not
use _wait() directly, see class docstring for an explanation.
"""
with self.cv:
while self.threads > 0:
self.cv.wait()
if self.exc:
raise ex.ThreadException(self.failed_thread, self.exc)
def __enter__(self):
return self
def __exit__(self, *ex):
if not any(ex):
self._wait()
else:
# If spawning code thrown an exception, it had higher priority
# for us than the one thrown inside child thread (if any)
try:
self._wait()
except Exception:
# that will make __exit__ throw original exception
pass
def sleep(seconds=0):
time.sleep(seconds)
| |
import time
import random
import pickle
import os
from datetime import date
RANDOM_CHANNEL_ID = os.environ.get("MARVIN_SLACK_CHANNEL_ID")
FILE="plugins/animal.data"
ANIMALS = ['Adelie Penguin',
'African Bush Elephant',
'African Civet',
'African Clawed Frog',
'African Forest Elephant',
'African Palm Civet',
'African Penguin',
'African Tree Toad',
'African Wild Dog',
'Albatross',
'Aldabra Giant Tortoise',
'Alligator',
'Angelfish',
'Ant',
'Anteater',
'Antelope',
'Arctic Fox',
'Arctic Hare',
'Arctic Wolf',
'Armadillo',
'Asian Elephant',
'Asian Giant Hornet',
'Asian Palm Civet',
'Asiatic Black Bear',
'Avocet',
'Axolotl',
'Aye Aye',
'Baboon',
'Bactrian Camel',
'Badger',
'Banded Palm Civet',
'Bandicoot',
'Barn Owl',
'Barnacle',
'Barracuda',
'Basking Shark',
'Bat',
'Bear',
'Bearded Dragon',
'Beaver',
'Beetle',
'Bengal Tiger',
'Binturong',
'Birds Of Paradise',
'Bison',
'Black Bear',
'Black Rhinoceros',
'Black Widow Spider',
'Blue Whale',
'Bobcat',
'Bonobo',
'Booby',
'Bornean Orang-utan',
'Borneo Elephant',
'Bottle Nosed Dolphin',
'Brown Bear',
'Budgerigar',
'Buffalo',
'Bull Shark',
'Bullfrog',
'Bumble Bee',
'Burrowing Frog',
'Butterfly',
'Butterfly Fish',
'Caiman',
'Caiman Lizard',
'Camel',
'Capybara',
'Caracal',
'Cassowary',
'Caterpillar',
'Catfish',
'Centipede',
'Chameleon',
'Chamois',
'Cheetah',
'Chicken',
'Chimpanzee',
'Chinchilla',
'Chinstrap Penguin',
'Chipmunk',
'Cichlid',
'Clouded Leopard',
'Clown Fish',
'Coati',
'Cockroach',
'Collared Peccary',
'Common Buzzard',
'Common Frog',
'Common Loon',
'Common Toad',
'Cottontop Tamarin',
'Cougar',
'Cow',
'Coyote',
'Crab',
'Crab-Eating Macaque',
'Crane',
'Crested Penguin',
'Crocodile',
'Cross River Gorilla',
'Cuscus',
'Cuttlefish',
'Darwin\'s Frog',
'Deer',
'Desert Tortoise',
'Dhole',
'Dingo',
'Discus',
'Dodo',
'Dolphin',
'Donkey',
'Dormouse',
'Dragonfly',
'Drever',
'Duck',
'Dugong',
'Dunker',
'Dusky Dolphin',
'Dwarf Crocodile',
'Eagle',
'Earwig',
'Eastern Gorilla',
'Eastern Lowland Gorilla',
'Echidna',
'Egyptian Mau',
'Electric Eel',
'Elephant',
'Elephant Seal',
'Elephant Shrew',
'Emperor Penguin',
'Emperor Tamarin',
'Emu',
'Falcon',
'Fennec Fox',
'Ferret',
'Fin Whale',
'Fire-Bellied Toad',
'Fishing Cat',
'Flamingo',
'Flounder',
'Flying Squirrel',
'Fossa',
'Fox',
'Frigatebird',
'Frilled Lizard',
'Frog',
'Fur Seal',
'Galapagos Penguin',
'Galapagos Tortoise',
'Gar',
'Gecko',
'Gentoo Penguin',
'Geoffroys Tamarin',
'Gerbil',
'Gharial',
'Giant African Land Snail',
'Giant Clam',
'Giant Panda Bear',
'Gibbon',
'Gila Monster',
'Giraffe',
'Glass Lizard',
'Glow Worm',
'Goat',
'Golden Lion Tamarin',
'Golden Oriole',
'Goose',
'Gopher',
'Gorilla',
'Grasshopper',
'Great White Shark',
'Green Bee-Eater',
'Grey Mouse Lemur',
'Grey Reef Shark',
'Grey Seal',
'Grizzly Bear',
'Grouse',
'Guinea Fowl',
'Guinea Pig',
'Guppy',
'Hammerhead Shark',
'Hamster',
'Hare',
'Harrier',
'Hedgehog',
'Hercules Beetle',
'Hermit Crab',
'Heron',
'Highland Cattle',
'Hippopotamus',
'Honey Bee',
'Horn Shark',
'Horned Frog',
'Horse',
'Horseshoe Crab',
'Howler Monkey',
'Human',
'Humboldt Penguin',
'Hummingbird',
'Humpback Whale',
'Hyena',
'Ibis',
'Iguana',
'Impala',
'Indian Elephant',
'Indian Palm Squirrel',
'Indian Rhinoceros',
'Indian Star Tortoise',
'Indochinese Tiger',
'Indri',
'Jackal',
'Jaguar',
'Japanese Chin',
'Japanese Macaque',
'Javan Rhinoceros',
'Jellyfish',
'Kakapo',
'Kangaroo',
'Keel Billed Toucan',
'Killer Whale',
'King Crab',
'King Penguin',
'Kingfisher',
'Kiwi',
'Koala',
'Komodo Dragon',
'Kudu',
'Ladybird',
'Leaf-Tailed Gecko',
'Lemming',
'Lemur',
'Leopard',
'Leopard Cat',
'Leopard Seal',
'Leopard Tortoise',
'Liger',
'Lion',
'Lionfish',
'Little Penguin',
'Lizard',
'Llama',
'Lobster',
'Long-Eared Owl',
'Lynx',
'Macaroni Penguin',
'Macaw',
'Magellanic Penguin',
'Magpie',
'Maine Coon',
'Malayan Civet',
'Malayan Tiger',
'Manatee',
'Mandrill',
'Manta Ray',
'Marine Toad',
'Markhor',
'Marsh Frog',
'Masked Palm Civet',
'Mayfly',
'Meerkat',
'Millipede',
'Minke Whale',
'Mole',
'Mongoose',
'Monitor Lizard',
'Monkey',
'Monte Iberia Eleuth',
'Moorhen',
'Moose',
'Moray Eel',
'Moth',
'Mountain Gorilla',
'Mountain Lion',
'Mouse',
'Mule',
'Newt',
'Nightingale',
'Numbat',
'Nurse Shark',
'Ocelot',
'Octopus',
'Okapi',
'Olm',
'Opossum',
'Orang-utan',
'Ostrich',
'Otter',
'Oyster',
'Pademelon',
'Panther',
'Parrot',
'Patas Monkey',
'Peacock',
'Pelican',
'Penguin',
'Pheasant',
'Pied Tamarin',
'Pig',
'Pika',
'Pike',
'Pink Fairy Armadillo',
'Piranha',
'Platypus',
'Poison Dart Frog',
'Polar Bear',
'Pond Skater',
'Poodle',
'Pool Frog',
'Porcupine',
'Possum',
'Prawn',
'Proboscis Monkey',
'Puffer Fish',
'Puffin',
'Puma',
'Purple Emperor',
'Puss Moth',
'Pygmy Hippopotamus',
'Pygmy Marmoset',
'Quail',
'Quetzal',
'Quokka',
'Quoll',
'Rabbit',
'Raccoon',
'Radiated Tortoise',
'Rat',
'Rattlesnake',
'Red Knee Tarantula',
'Red Panda',
'Red Wolf',
'Red-handed Tamarin',
'Reindeer',
'Rhinoceros',
'River Dolphin',
'River Turtle',
'Robin',
'Rock Hyrax',
'Rockhopper Penguin',
'Roseate Spoonbill',
'Royal Penguin',
'Sabre-Toothed Tiger',
'Salamander',
'Sand Lizard',
'Saola',
'Scorpion',
'Scorpion Fish',
'Sea Dragon',
'Sea Lion',
'Sea Otter',
'Sea Slug',
'Sea Squirt',
'Sea Turtle',
'Sea Urchin',
'Seahorse',
'Seal',
'Serval',
'Sheep',
'Shrimp',
'Siamese Fighting Fish',
'Siberian Tiger',
'Skunk',
'Sloth',
'Slow Worm',
'Snail',
'Snake',
'Snapping Turtle',
'Snowshoe',
'Snowy Owl',
'South China Tiger',
'Spadefoot Toad',
'Sparrow',
'Spectacled Bear',
'Sperm Whale',
'Spider Monkey',
'Spiny Dogfish',
'Sponge',
'Squid',
'Squirrel',
'Squirrel Monkey',
'Sri Lankan Elephant',
'Stag Beetle',
'Starfish',
'Stellers Sea Cow',
'Stick Insect',
'Stingray',
'Stoat',
'Striped Rocket Frog',
'Sumatran Elephant',
'Sumatran Orang-utan',
'Sumatran Rhinoceros',
'Sumatran Tiger',
'Sun Bear',
'Swan',
'Tang',
'Tapir',
'Tarsier',
'Tasmanian Devil',
'Tawny Owl',
'Termite',
'Tetra',
'Thorny Devil',
'Tiger',
'Tiger Salamander',
'Tiger Shark',
'Tortoise',
'Toucan',
'Tree Frog',
'Tropicbird',
'Tuatara',
'Turkey',
'Uakari',
'Uguisu',
'Umbrellabird',
'Vampire Bat',
'Vervet Monkey',
'Vulture',
'Wallaby',
'Walrus',
'Warthog',
'Wasp',
'Water Buffalo',
'Water Dragon',
'Water Vole',
'Weasel',
'Western Gorilla',
'Western Lowland Gorilla',
'Whale Shark',
'White Faced Capuchin',
'White Rhinoceros',
'White Tiger',
'Wild Boar',
'Wildebeest',
'Wolf',
'Wolverine',
'Wombat',
'Woodlouse',
'Woodpecker',
'Woolly Monkey',
'Wrasse',
'X-Ray Tetra',
'Yak',
'Yellow-Eyed Penguin',
'Yorkshire Terrier',
'Zebra',
'Zebra Shark',
'Zebu',
'Zonkey',
'Zorse']
crontable = []
outputs = []
def process_message(data):
if "text" in data:
text = data["text"]
channel = data["channel"]
if channel.startswith(RANDOM_CHANNEL_ID) or channel.startswith("D"):
if "animal" in text:
_,weekNumber,_ = date.today().isocalendar()
if os.path.isfile(FILE):
aow = pickle.load(open(FILE, 'rb'))
else:
aow = update_aow(weekNumber, '')
if aow[0] != weekNumber:
aow = update_aow(weekNumber, aow[1])
response_text = 'The animal of the week is \"{0}\"'.format(aow[1])
outputs.append([channel, response_text])
def update_aow(weekNumber, old_animal):
aow = (weekNumber, random_animal(old_animal))
pickle.dump(aow, open(FILE, 'wb'))
return aow
def random_animal(old_animal) :
new_animal = old_animal
while old_animal == new_animal:
new_animal = random.choice(ANIMALS)
return new_animal
| |
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
script = Script()
foundation = StaticAndDynamicLibrary("Foundation")
foundation.GCC_PREFIX_HEADER = 'CoreFoundation/Base.subproj/CoreFoundation_Prefix.h'
swift_cflags = ['-DDEPLOYMENT_RUNTIME_SWIFT']
if Configuration.current.target.sdk == OSType.Linux:
foundation.CFLAGS = '-DDEPLOYMENT_TARGET_LINUX -D_GNU_SOURCE -DCF_CHARACTERSET_DATA_DIR="CoreFoundation/CharacterSets"'
foundation.LDFLAGS = '${SWIFT_USE_LINKER} -lswiftGlibc -Wl,-Bsymbolic '
Configuration.current.requires_pkg_config = True
elif Configuration.current.target.sdk == OSType.FreeBSD:
foundation.CFLAGS = '-DDEPLOYMENT_TARGET_FREEBSD -I/usr/local/include -I/usr/local/include/libxml2 -I/usr/local/include/curl '
foundation.LDFLAGS = ''
elif Configuration.current.target.sdk == OSType.MacOSX:
foundation.CFLAGS = '-DDEPLOYMENT_TARGET_MACOSX '
foundation.LDFLAGS = '-licucore -twolevel_namespace -Wl,-alias_list,CoreFoundation/Base.subproj/DarwinSymbolAliases -sectcreate __UNICODE __csbitmaps CoreFoundation/CharacterSets/CFCharacterSetBitmaps.bitmap -sectcreate __UNICODE __properties CoreFoundation/CharacterSets/CFUniCharPropertyDatabase.data -sectcreate __UNICODE __data CoreFoundation/CharacterSets/CFUnicodeData-L.mapping -segprot __UNICODE r r '
elif Configuration.current.target.sdk == OSType.Win32 and Configuration.current.target.environ == EnvironmentType.Cygnus:
foundation.CFLAGS = '-DDEPLOYMENT_TARGET_LINUX -D_GNU_SOURCE -mcmodel=large '
foundation.LDFLAGS = '${SWIFT_USE_LINKER} -lswiftGlibc `icu-config --ldflags` -Wl,--allow-multiple-definition '
swift_cflags += ['-DCYGWIN']
if Configuration.current.build_mode == Configuration.Debug:
foundation.LDFLAGS += ' -lswiftSwiftOnoneSupport '
swift_cflags += ['-enable-testing']
foundation.ASFLAGS = " ".join([
'-DCF_CHARACTERSET_BITMAP=\\"CoreFoundation/CharacterSets/CFCharacterSetBitmaps.bitmap\\"',
'-DCF_CHARACTERSET_UNICHAR_DB=\\"CoreFoundation/CharacterSets/CFUniCharPropertyDatabase.data\\"',
'-DCF_CHARACTERSET_UNICODE_DATA_B=\\"CoreFoundation/CharacterSets/CFUnicodeData-B.mapping\\"',
'-DCF_CHARACTERSET_UNICODE_DATA_L=\\"CoreFoundation/CharacterSets/CFUnicodeData-L.mapping\\"',
])
# For now, we do not distinguish between public and private headers (they are all private to Foundation)
# These are really part of CF, which should ultimately be a separate target
foundation.ROOT_HEADERS_FOLDER_PATH = "${PREFIX}/lib/swift"
foundation.PUBLIC_HEADERS_FOLDER_PATH = "${PREFIX}/lib/swift/CoreFoundation"
foundation.PRIVATE_HEADERS_FOLDER_PATH = "${PREFIX}/lib/swift/CoreFoundation"
foundation.PROJECT_HEADERS_FOLDER_PATH = "${PREFIX}/lib/swift/CoreFoundation"
foundation.PUBLIC_MODULE_FOLDER_PATH = "${PREFIX}/lib/swift/CoreFoundation"
foundation.CFLAGS += " ".join([
'-DU_SHOW_DRAFT_API',
'-DCF_BUILDING_CF',
'-DDEPLOYMENT_RUNTIME_SWIFT',
'-fconstant-cfstrings',
'-fexceptions',
'-Wno-shorten-64-to-32',
'-Wno-deprecated-declarations',
'-Wno-unreachable-code',
'-Wno-conditional-uninitialized',
'-Wno-unused-variable',
'-Wno-int-conversion',
'-Wno-unused-function',
'-I./',
'-fno-common',
'-fcf-runtime-abi=swift',
])
swift_cflags += [
'-I${BUILD_DIR}/Foundation/${PREFIX}/lib/swift',
]
if "XCTEST_BUILD_DIR" in Configuration.current.variables:
swift_cflags += [
'-I${XCTEST_BUILD_DIR}',
'-L${XCTEST_BUILD_DIR}',
]
if Configuration.current.requires_pkg_config:
pkg_config_dependencies = [
'icu-i18n',
'icu-uc',
'libcurl',
'libxml-2.0',
]
for package_name in pkg_config_dependencies:
try:
package = PkgConfig(package_name)
except PkgConfig.Error as e:
sys.exit("pkg-config error for package {}: {}".format(package_name, e))
foundation.CFLAGS += ' {} '.format(' '.join(package.cflags))
foundation.LDFLAGS += ' {} '.format(' '.join(package.ldflags))
swift_cflags += package.swiftc_flags
else:
foundation.CFLAGS += ''.join([
'-I${SYSROOT}/usr/include/curl ',
'-I${SYSROOT}/usr/include/libxml2 ',
])
foundation.LDFLAGS += ''.join([
'-lcurl ',
'-lxml2 ',
])
swift_cflags += [
'-I${SYSROOT}/usr/include/curl',
'-I${SYSROOT}/usr/include/libxml2',
]
triple = Configuration.current.target.triple
if triple == "armv7-none-linux-androideabi":
foundation.LDFLAGS += '-llog '
else:
foundation.LDFLAGS += '-lpthread '
foundation.LDFLAGS += '-ldl -lm -lswiftCore '
# Configure use of Dispatch in CoreFoundation and Foundation if libdispatch is being built
if "LIBDISPATCH_SOURCE_DIR" in Configuration.current.variables:
foundation.CFLAGS += " "+" ".join([
'-DDEPLOYMENT_ENABLE_LIBDISPATCH',
'-I'+Configuration.current.variables["LIBDISPATCH_SOURCE_DIR"],
'-I' + os.path.join(Configuration.current.variables["LIBDISPATCH_SOURCE_DIR"], 'src', 'BlocksRuntime'),
'-I'+Configuration.current.variables["LIBDISPATCH_BUILD_DIR"]+'/tests' # for include of dispatch/private.h in CF
])
swift_cflags += ([
'-DDEPLOYMENT_ENABLE_LIBDISPATCH',
'-I'+Configuration.current.variables["LIBDISPATCH_SOURCE_DIR"],
'-I'+Configuration.current.variables["LIBDISPATCH_BUILD_DIR"]+'/src/swift',
'-Xcc -fblocks'
])
foundation.LDFLAGS += '-ldispatch -lswiftDispatch -L'+Configuration.current.variables["LIBDISPATCH_BUILD_DIR"]+'/src -rpath \$$ORIGIN '
foundation.LDFLAGS += '-L' + Configuration.current.variables['LIBDISPATCH_BUILD_DIR'] + ' -lBlocksRuntime '
foundation.SWIFTCFLAGS = " ".join(swift_cflags)
if "XCTEST_BUILD_DIR" in Configuration.current.variables:
foundation.LDFLAGS += '-L${XCTEST_BUILD_DIR}'
headers = CopyHeaders(
module = 'CoreFoundation/Base.subproj/module.modulemap',
public = [
'CoreFoundation/Stream.subproj/CFStream.h',
'CoreFoundation/String.subproj/CFStringEncodingExt.h',
'CoreFoundation/Base.subproj/SwiftRuntime/CoreFoundation.h',
'CoreFoundation/Base.subproj/SwiftRuntime/TargetConditionals.h',
'CoreFoundation/RunLoop.subproj/CFMessagePort.h',
'CoreFoundation/Collections.subproj/CFBinaryHeap.h',
'CoreFoundation/PlugIn.subproj/CFBundle.h',
'CoreFoundation/Locale.subproj/CFCalendar.h',
'CoreFoundation/Collections.subproj/CFBitVector.h',
'CoreFoundation/Base.subproj/CFAvailability.h',
'CoreFoundation/Collections.subproj/CFTree.h',
'CoreFoundation/NumberDate.subproj/CFTimeZone.h',
'CoreFoundation/Error.subproj/CFError.h',
'CoreFoundation/Collections.subproj/CFBag.h',
'CoreFoundation/PlugIn.subproj/CFPlugIn.h',
'CoreFoundation/Parsing.subproj/CFXMLParser.h',
'CoreFoundation/String.subproj/CFString.h',
'CoreFoundation/Collections.subproj/CFSet.h',
'CoreFoundation/Base.subproj/CFUUID.h',
'CoreFoundation/NumberDate.subproj/CFDate.h',
'CoreFoundation/Collections.subproj/CFDictionary.h',
'CoreFoundation/Base.subproj/CFByteOrder.h',
'CoreFoundation/AppServices.subproj/CFUserNotification.h',
'CoreFoundation/Base.subproj/CFBase.h',
'CoreFoundation/Preferences.subproj/CFPreferences.h',
'CoreFoundation/Locale.subproj/CFLocale.h',
'CoreFoundation/RunLoop.subproj/CFSocket.h',
'CoreFoundation/Parsing.subproj/CFPropertyList.h',
'CoreFoundation/Collections.subproj/CFArray.h',
'CoreFoundation/RunLoop.subproj/CFRunLoop.h',
'CoreFoundation/URL.subproj/CFURLAccess.h',
'CoreFoundation/URL.subproj/CFURLSessionInterface.h',
'CoreFoundation/Locale.subproj/CFDateFormatter.h',
'CoreFoundation/RunLoop.subproj/CFMachPort.h',
'CoreFoundation/PlugIn.subproj/CFPlugInCOM.h',
'CoreFoundation/Base.subproj/CFUtilities.h',
'CoreFoundation/Parsing.subproj/CFXMLNode.h',
'CoreFoundation/URL.subproj/CFURLComponents.h',
'CoreFoundation/URL.subproj/CFURL.h',
'CoreFoundation/Locale.subproj/CFNumberFormatter.h',
'CoreFoundation/String.subproj/CFCharacterSet.h',
'CoreFoundation/NumberDate.subproj/CFNumber.h',
'CoreFoundation/Collections.subproj/CFData.h',
'CoreFoundation/String.subproj/CFAttributedString.h',
'CoreFoundation/Base.subproj/CoreFoundation_Prefix.h',
'CoreFoundation/AppServices.subproj/CFNotificationCenter.h'
],
private = [
'CoreFoundation/Base.subproj/ForSwiftFoundationOnly.h',
'CoreFoundation/Base.subproj/ForFoundationOnly.h',
'CoreFoundation/Base.subproj/CFAsmMacros.h',
'CoreFoundation/String.subproj/CFBurstTrie.h',
'CoreFoundation/Error.subproj/CFError_Private.h',
'CoreFoundation/URL.subproj/CFURLPriv.h',
'CoreFoundation/Base.subproj/CFLogUtilities.h',
'CoreFoundation/PlugIn.subproj/CFBundlePriv.h',
'CoreFoundation/StringEncodings.subproj/CFStringEncodingConverter.h',
'CoreFoundation/Stream.subproj/CFStreamAbstract.h',
'CoreFoundation/Base.subproj/CFInternal.h',
'CoreFoundation/Parsing.subproj/CFXMLInputStream.h',
'CoreFoundation/Parsing.subproj/CFXMLInterface.h',
'CoreFoundation/PlugIn.subproj/CFPlugIn_Factory.h',
'CoreFoundation/String.subproj/CFStringLocalizedFormattingInternal.h',
'CoreFoundation/PlugIn.subproj/CFBundle_Internal.h',
'CoreFoundation/StringEncodings.subproj/CFStringEncodingConverterPriv.h',
'CoreFoundation/Collections.subproj/CFBasicHash.h',
'CoreFoundation/StringEncodings.subproj/CFStringEncodingDatabase.h',
'CoreFoundation/StringEncodings.subproj/CFUnicodeDecomposition.h',
'CoreFoundation/Stream.subproj/CFStreamInternal.h',
'CoreFoundation/PlugIn.subproj/CFBundle_BinaryTypes.h',
'CoreFoundation/Locale.subproj/CFICULogging.h',
'CoreFoundation/Locale.subproj/CFLocaleInternal.h',
'CoreFoundation/StringEncodings.subproj/CFUnicodePrecomposition.h',
'CoreFoundation/Base.subproj/CFPriv.h',
'CoreFoundation/StringEncodings.subproj/CFUniCharPriv.h',
'CoreFoundation/URL.subproj/CFURL.inc.h',
'CoreFoundation/NumberDate.subproj/CFBigNumber.h',
'CoreFoundation/StringEncodings.subproj/CFUniChar.h',
'CoreFoundation/StringEncodings.subproj/CFStringEncodingConverterExt.h',
'CoreFoundation/Collections.subproj/CFStorage.h',
'CoreFoundation/Base.subproj/CFRuntime.h',
'CoreFoundation/String.subproj/CFStringDefaultEncoding.h',
'CoreFoundation/String.subproj/CFCharacterSetPriv.h',
'CoreFoundation/Stream.subproj/CFStreamPriv.h',
'CoreFoundation/StringEncodings.subproj/CFICUConverters.h',
'CoreFoundation/String.subproj/CFRegularExpression.h',
'CoreFoundation/String.subproj/CFRunArray.h',
'CoreFoundation/Locale.subproj/CFDateFormatter_Private.h',
'CoreFoundation/Locale.subproj/CFLocale_Private.h',
'CoreFoundation/Parsing.subproj/CFPropertyList_Private.h',
'CoreFoundation/Base.subproj/CFKnownLocations.h',
'CoreFoundation/Base.subproj/CFOverflow.h',
'CoreFoundation/Base.subproj/CFRuntime_Internal.h',
'CoreFoundation/Collections.subproj/CFCollections_Internal.h',
'CoreFoundation/RunLoop.subproj/CFMachPort_Internal.h',
'CoreFoundation/RunLoop.subproj/CFMachPort_Lifetime.h',
'CoreFoundation/String.subproj/CFAttributedStringPriv.h',
'CoreFoundation/String.subproj/CFString_Internal.h',
],
project = [
])
foundation.add_phase(headers)
sources = CompileSources([
'uuid/uuid.c',
# 'CoreFoundation/AppServices.subproj/CFUserNotification.c',
'CoreFoundation/Base.subproj/CFBase.c',
'CoreFoundation/Base.subproj/CFFileUtilities.c',
'CoreFoundation/Base.subproj/CFPlatform.c',
'CoreFoundation/Base.subproj/CFRuntime.c',
'CoreFoundation/Base.subproj/CFSortFunctions.c',
'CoreFoundation/Base.subproj/CFSystemDirectories.c',
'CoreFoundation/Base.subproj/CFUtilities.c',
'CoreFoundation/Base.subproj/CFUUID.c',
'CoreFoundation/Collections.subproj/CFArray.c',
'CoreFoundation/Collections.subproj/CFBag.c',
'CoreFoundation/Collections.subproj/CFBasicHash.c',
'CoreFoundation/Collections.subproj/CFBinaryHeap.c',
'CoreFoundation/Collections.subproj/CFBitVector.c',
'CoreFoundation/Collections.subproj/CFData.c',
'CoreFoundation/Collections.subproj/CFDictionary.c',
'CoreFoundation/Collections.subproj/CFSet.c',
'CoreFoundation/Collections.subproj/CFStorage.c',
'CoreFoundation/Collections.subproj/CFTree.c',
'CoreFoundation/Error.subproj/CFError.c',
'CoreFoundation/Locale.subproj/CFCalendar.c',
'CoreFoundation/Locale.subproj/CFDateFormatter.c',
'CoreFoundation/Locale.subproj/CFLocale.c',
'CoreFoundation/Locale.subproj/CFLocaleIdentifier.c',
'CoreFoundation/Locale.subproj/CFLocaleKeys.c',
'CoreFoundation/Locale.subproj/CFNumberFormatter.c',
'CoreFoundation/NumberDate.subproj/CFBigNumber.c',
'CoreFoundation/NumberDate.subproj/CFDate.c',
'CoreFoundation/NumberDate.subproj/CFNumber.c',
'CoreFoundation/NumberDate.subproj/CFTimeZone.c',
'CoreFoundation/Parsing.subproj/CFBinaryPList.c',
'CoreFoundation/Parsing.subproj/CFOldStylePList.c',
'CoreFoundation/Parsing.subproj/CFPropertyList.c',
'CoreFoundation/Parsing.subproj/CFXMLInputStream.c',
'CoreFoundation/Parsing.subproj/CFXMLNode.c',
'CoreFoundation/Parsing.subproj/CFXMLParser.c',
'CoreFoundation/Parsing.subproj/CFXMLTree.c',
'CoreFoundation/Parsing.subproj/CFXMLInterface.c',
'CoreFoundation/PlugIn.subproj/CFBundle.c',
'CoreFoundation/PlugIn.subproj/CFBundle_Binary.c',
'CoreFoundation/PlugIn.subproj/CFBundle_Grok.c',
'CoreFoundation/PlugIn.subproj/CFBundle_InfoPlist.c',
'CoreFoundation/PlugIn.subproj/CFBundle_Locale.c',
'CoreFoundation/PlugIn.subproj/CFBundle_Resources.c',
'CoreFoundation/PlugIn.subproj/CFBundle_Strings.c',
'CoreFoundation/PlugIn.subproj/CFBundle_Main.c',
'CoreFoundation/PlugIn.subproj/CFBundle_ResourceFork.c',
'CoreFoundation/PlugIn.subproj/CFBundle_Executable.c',
'CoreFoundation/PlugIn.subproj/CFBundle_DebugStrings.c',
'CoreFoundation/PlugIn.subproj/CFPlugIn.c',
'CoreFoundation/PlugIn.subproj/CFPlugIn_Factory.c',
'CoreFoundation/PlugIn.subproj/CFPlugIn_Instance.c',
'CoreFoundation/PlugIn.subproj/CFPlugIn_PlugIn.c',
'CoreFoundation/Preferences.subproj/CFApplicationPreferences.c',
'CoreFoundation/Preferences.subproj/CFPreferences.c',
'CoreFoundation/Preferences.subproj/CFXMLPreferencesDomain.c',
# 'CoreFoundation/RunLoop.subproj/CFMachPort.c',
# 'CoreFoundation/RunLoop.subproj/CFMessagePort.c',
# 'CoreFoundation/RunLoop.subproj/CFMachPort_Lifetime.c',
'CoreFoundation/RunLoop.subproj/CFRunLoop.c',
'CoreFoundation/RunLoop.subproj/CFSocket.c',
'CoreFoundation/Stream.subproj/CFConcreteStreams.c',
'CoreFoundation/Stream.subproj/CFSocketStream.c',
'CoreFoundation/Stream.subproj/CFStream.c',
'CoreFoundation/String.subproj/CFBurstTrie.c',
'CoreFoundation/String.subproj/CFCharacterSet.c',
'CoreFoundation/String.subproj/CFString.c',
'CoreFoundation/String.subproj/CFStringEncodings.c',
'CoreFoundation/String.subproj/CFStringScanner.c',
'CoreFoundation/String.subproj/CFStringUtilities.c',
'CoreFoundation/String.subproj/CFStringTransform.c',
'CoreFoundation/StringEncodings.subproj/CFBuiltinConverters.c',
'CoreFoundation/StringEncodings.subproj/CFICUConverters.c',
'CoreFoundation/StringEncodings.subproj/CFPlatformConverters.c',
'CoreFoundation/StringEncodings.subproj/CFStringEncodingConverter.c',
'CoreFoundation/StringEncodings.subproj/CFStringEncodingDatabase.c',
'CoreFoundation/StringEncodings.subproj/CFUniChar.c',
'CoreFoundation/StringEncodings.subproj/CFUnicodeDecomposition.c',
'CoreFoundation/StringEncodings.subproj/CFUnicodePrecomposition.c',
'CoreFoundation/URL.subproj/CFURL.c',
'CoreFoundation/URL.subproj/CFURLAccess.c',
'CoreFoundation/URL.subproj/CFURLComponents.c',
'CoreFoundation/URL.subproj/CFURLComponents_URIParser.c',
'CoreFoundation/String.subproj/CFCharacterSetData.S',
'CoreFoundation/String.subproj/CFUnicodeData.S',
'CoreFoundation/String.subproj/CFUniCharPropertyDatabase.S',
'CoreFoundation/String.subproj/CFRegularExpression.c',
'CoreFoundation/String.subproj/CFAttributedString.c',
'CoreFoundation/String.subproj/CFRunArray.c',
'CoreFoundation/URL.subproj/CFURLSessionInterface.c',
'CoreFoundation/Base.subproj/CFKnownLocations.c',
])
# This code is already in libdispatch so is only needed if libdispatch is
# NOT being used
if "LIBDISPATCH_SOURCE_DIR" not in Configuration.current.variables:
sources += (['closure/data.c', 'closure/runtime.c'])
sources.add_dependency(headers)
foundation.add_phase(sources)
swift_sources = CompileSwiftSources([
'Foundation/NSObject.swift',
'Foundation/AffineTransform.swift',
'Foundation/NSArray.swift',
'Foundation/NSAttributedString.swift',
'Foundation/Bundle.swift',
'Foundation/ByteCountFormatter.swift',
'Foundation/NSCache.swift',
'Foundation/NSCalendar.swift',
'Foundation/NSCFArray.swift',
'Foundation/NSCFBoolean.swift',
'Foundation/NSCFDictionary.swift',
'Foundation/NSCFSet.swift',
'Foundation/NSCFString.swift',
'Foundation/NSCharacterSet.swift',
'Foundation/NSCFCharacterSet.swift',
'Foundation/NSCoder.swift',
'Foundation/NSComparisonPredicate.swift',
'Foundation/NSCompoundPredicate.swift',
'Foundation/NSConcreteValue.swift',
'Foundation/NSData.swift',
'Foundation/NSDate.swift',
'Foundation/DateComponentsFormatter.swift',
'Foundation/DateFormatter.swift',
'Foundation/DateIntervalFormatter.swift',
'Foundation/Decimal.swift',
'Foundation/NSDecimalNumber.swift',
'Foundation/NSDictionary.swift',
'Foundation/EnergyFormatter.swift',
'Foundation/NSEnumerator.swift',
'Foundation/NSError.swift',
'Foundation/NSExpression.swift',
'Foundation/FileHandle.swift',
'Foundation/FileManager.swift',
'Foundation/FileManager_XDG.swift',
'Foundation/Formatter.swift',
'Foundation/NSGeometry.swift',
'Foundation/Host.swift',
'Foundation/HTTPCookie.swift',
'Foundation/HTTPCookieStorage.swift',
'Foundation/NSIndexPath.swift',
'Foundation/NSIndexSet.swift',
'Foundation/ISO8601DateFormatter.swift',
'Foundation/JSONSerialization.swift',
'Foundation/NSKeyedCoderOldStyleArray.swift',
'Foundation/NSKeyedArchiver.swift',
'Foundation/NSKeyedArchiverHelpers.swift',
'Foundation/NSKeyedUnarchiver.swift',
'Foundation/LengthFormatter.swift',
'Foundation/NSLocale.swift',
'Foundation/NSLock.swift',
'Foundation/NSLog.swift',
'Foundation/MassFormatter.swift',
'Foundation/NSNotification.swift',
'Foundation/NotificationQueue.swift',
'Foundation/NSNull.swift',
'Foundation/NSNumber.swift',
'Foundation/NumberFormatter.swift',
'Foundation/NSObjCRuntime.swift',
'Foundation/Operation.swift',
'Foundation/NSOrderedSet.swift',
'Foundation/NSPathUtilities.swift',
'Foundation/NSPersonNameComponents.swift',
'Foundation/PersonNameComponentsFormatter.swift',
'Foundation/NSPlatform.swift',
'Foundation/Port.swift',
'Foundation/PortMessage.swift',
'Foundation/NSPredicate.swift',
'Foundation/ProcessInfo.swift',
'Foundation/Progress.swift',
'Foundation/ProgressFraction.swift',
'Foundation/PropertyListSerialization.swift',
'Foundation/NSRange.swift',
'Foundation/NSRegularExpression.swift',
'Foundation/RunLoop.swift',
'Foundation/Scanner.swift',
'Foundation/NSSet.swift',
'Foundation/NSSortDescriptor.swift',
'Foundation/NSSpecialValue.swift',
'Foundation/Stream.swift',
'Foundation/NSString.swift',
'Foundation/NSStringAPI.swift',
'Foundation/NSSwiftRuntime.swift',
'Foundation/Process.swift',
'Foundation/NSTextCheckingResult.swift',
'Foundation/Thread.swift',
'Foundation/Timer.swift',
'Foundation/NSTimeZone.swift',
'Foundation/NSURL.swift',
'Foundation/URLAuthenticationChallenge.swift',
'Foundation/URLCache.swift',
'Foundation/URLCredential.swift',
'Foundation/URLCredentialStorage.swift',
'Foundation/NSURLError.swift',
'Foundation/URLProtectionSpace.swift',
'Foundation/URLProtocol.swift',
'Foundation/NSURLRequest.swift',
'Foundation/URLResponse.swift',
'Foundation/URLSession/Configuration.swift',
'Foundation/URLSession/libcurl/EasyHandle.swift',
'Foundation/URLSession/BodySource.swift',
'Foundation/URLSession/Message.swift',
'Foundation/URLSession/http/HTTPMessage.swift',
'Foundation/URLSession/libcurl/MultiHandle.swift',
'Foundation/URLSession/URLSession.swift',
'Foundation/URLSession/URLSessionConfiguration.swift',
'Foundation/URLSession/URLSessionDelegate.swift',
'Foundation/URLSession/URLSessionTask.swift',
'Foundation/URLSession/TaskRegistry.swift',
'Foundation/URLSession/NativeProtocol.swift',
'Foundation/URLSession/TransferState.swift',
'Foundation/URLSession/libcurl/libcurlHelpers.swift',
'Foundation/URLSession/http/HTTPURLProtocol.swift',
'Foundation/UserDefaults.swift',
'Foundation/NSUUID.swift',
'Foundation/NSValue.swift',
'Foundation/XMLDocument.swift',
'Foundation/XMLDTD.swift',
'Foundation/XMLDTDNode.swift',
'Foundation/XMLElement.swift',
'Foundation/XMLNode.swift',
'Foundation/XMLParser.swift',
'Foundation/FoundationErrors.swift',
'Foundation/URL.swift',
'Foundation/UUID.swift',
'Foundation/Boxing.swift',
'Foundation/ReferenceConvertible.swift',
'Foundation/Date.swift',
'Foundation/Data.swift',
'Foundation/CharacterSet.swift',
'Foundation/URLRequest.swift',
'Foundation/PersonNameComponents.swift',
'Foundation/Notification.swift',
'Foundation/URLComponents.swift',
'Foundation/DateComponents.swift',
'Foundation/DateInterval.swift',
'Foundation/IndexPath.swift',
'Foundation/IndexSet.swift',
'Foundation/StringEncodings.swift',
'Foundation/ExtraStringAPIs.swift',
'Foundation/Measurement.swift',
'Foundation/NSMeasurement.swift',
'Foundation/MeasurementFormatter.swift',
'Foundation/Unit.swift',
'Foundation/TimeZone.swift',
'Foundation/Calendar.swift',
'Foundation/Locale.swift',
'Foundation/String.swift',
'Foundation/Set.swift',
'Foundation/Dictionary.swift',
'Foundation/Array.swift',
'Foundation/Bridging.swift',
'Foundation/CGFloat.swift',
'Foundation/Codable.swift',
'Foundation/JSONEncoder.swift',
])
if Configuration.current.build_mode == Configuration.Debug:
swift_sources.enable_testable_import = True
swift_sources.add_dependency(headers)
foundation.add_phase(swift_sources)
foundation_tests_resources = CopyResources('TestFoundation', [
'TestFoundation/Resources/Info.plist',
'TestFoundation/Resources/NSURLTestData.plist',
'TestFoundation/Resources/Test.plist',
'TestFoundation/Resources/NSStringTestData.txt',
'TestFoundation/Resources/NSString-UTF16-BE-data.txt',
'TestFoundation/Resources/NSString-UTF16-LE-data.txt',
'TestFoundation/Resources/NSString-UTF32-BE-data.txt',
'TestFoundation/Resources/NSString-UTF32-LE-data.txt',
'TestFoundation/Resources/NSString-ISO-8859-1-data.txt',
'TestFoundation/Resources/NSXMLDocumentTestData.xml',
'TestFoundation/Resources/PropertyList-1.0.dtd',
'TestFoundation/Resources/NSXMLDTDTestData.xml',
'TestFoundation/Resources/NSKeyedUnarchiver-ArrayTest.plist',
'TestFoundation/Resources/NSKeyedUnarchiver-ComplexTest.plist',
'TestFoundation/Resources/NSKeyedUnarchiver-ConcreteValueTest.plist',
'TestFoundation/Resources/NSKeyedUnarchiver-EdgeInsetsTest.plist',
'TestFoundation/Resources/NSKeyedUnarchiver-NotificationTest.plist',
'TestFoundation/Resources/NSKeyedUnarchiver-RangeTest.plist',
'TestFoundation/Resources/NSKeyedUnarchiver-RectTest.plist',
'TestFoundation/Resources/NSKeyedUnarchiver-URLTest.plist',
'TestFoundation/Resources/NSKeyedUnarchiver-UUIDTest.plist',
'TestFoundation/Resources/NSKeyedUnarchiver-OrderedSetTest.plist',
'TestFoundation/Resources/TestFileWithZeros.txt',
])
# TODO: Probably this should be another 'product', but for now it's simply a phase
foundation_tests = SwiftExecutable('TestFoundation', [
'TestFoundation/main.swift',
'TestFoundation/HTTPServer.swift',
'Foundation/ProgressFraction.swift',
'TestFoundation/Utilities.swift',
] + glob.glob('./TestFoundation/Test*.swift')) # all TestSomething.swift are considered sources to the test project in the TestFoundation directory
Configuration.current.extra_ld_flags += ' -L'+Configuration.current.variables["LIBDISPATCH_BUILD_DIR"]+'/src'
foundation_tests.add_dependency(foundation_tests_resources)
xdgTestHelper = SwiftExecutable('xdgTestHelper',
['TestFoundation/xdgTestHelper/main.swift'])
xdgTestHelper.outputDirectory = 'TestFoundation'
foundation_tests.add_dependency(xdgTestHelper)
foundation.add_phase(xdgTestHelper)
foundation.add_phase(foundation_tests_resources)
foundation.add_phase(foundation_tests)
plutil = SwiftExecutable('plutil', ['Tools/plutil/main.swift'])
foundation.add_phase(plutil)
script.add_product(foundation)
LIBS_DIRS = Configuration.current.build_directory.absolute()+"/Foundation/:"
if "XCTEST_BUILD_DIR" in Configuration.current.variables:
LIBS_DIRS += "${XCTEST_BUILD_DIR}:"
if "LIBDISPATCH_BUILD_DIR" in Configuration.current.variables:
LIBS_DIRS += Configuration.current.variables["LIBDISPATCH_BUILD_DIR"]+"/src:"
Configuration.current.variables["LIBS_DIRS"] = LIBS_DIRS
extra_script = """
rule InstallFoundation
command = mkdir -p "${DSTROOT}/${PREFIX}/lib/swift/${OS}"; $
cp "${BUILD_DIR}/Foundation/${DYLIB_PREFIX}Foundation${DYLIB_SUFFIX}" "${DSTROOT}/${PREFIX}/lib/swift/${OS}"; $
mkdir -p "${DSTROOT}/${PREFIX}/lib/swift_static/${OS}"; $
cp "${BUILD_DIR}/Foundation/${STATICLIB_PREFIX}Foundation${STATICLIB_SUFFIX}" "${DSTROOT}/${PREFIX}/lib/swift_static/${OS}"; $
mkdir -p "${DSTROOT}/${PREFIX}/lib/swift/${OS}/${ARCH}"; $
cp "${BUILD_DIR}/Foundation/Foundation.swiftmodule" "${DSTROOT}/${PREFIX}/lib/swift/${OS}/${ARCH}/"; $
cp "${BUILD_DIR}/Foundation/Foundation.swiftdoc" "${DSTROOT}/${PREFIX}/lib/swift/${OS}/${ARCH}/"; $
mkdir -p "${DSTROOT}/${PREFIX}/local/include"; $
rsync -a "${BUILD_DIR}/Foundation/${PREFIX}/lib/swift/CoreFoundation" "${DSTROOT}/${PREFIX}/lib/swift/"
build ${BUILD_DIR}/.install: InstallFoundation ${BUILD_DIR}/Foundation/${DYLIB_PREFIX}Foundation${DYLIB_SUFFIX}
build install: phony | ${BUILD_DIR}/.install
"""
extra_script += """
rule RunTestFoundation
command = echo "**** RUNNING TESTS ****\\nexecute:\\nLD_LIBRARY_PATH=${LIBS_DIRS} ${BUILD_DIR}/TestFoundation/TestFoundation\\n**** DEBUGGING TESTS ****\\nexecute:\\nLD_LIBRARY_PATH=${LIBS_DIRS} ${BUILD_DIR}/../lldb-${OS}-${ARCH}/bin/lldb ${BUILD_DIR}/TestFoundation/TestFoundation\\n"
description = Building Tests
build ${BUILD_DIR}/.test: RunTestFoundation | TestFoundation
build test: phony | ${BUILD_DIR}/.test
"""
script.add_text(extra_script)
script.generate()
| |
# codes to for analyse the model.
import re
import os
from astropy import units as u
from tardis import constants
import numpy as np
import pandas as pd
class LastLineInteraction(object):
@classmethod
def from_model(cls, model, packet_filter_mode="packet_out_nu"):
return cls(
model.runner.last_line_interaction_in_id,
model.runner.last_line_interaction_out_id,
model.runner.last_line_interaction_shell_id,
model.runner.output_nu,
model.runner.last_interaction_in_nu,
model.plasma.atomic_data.lines,
packet_filter_mode,
)
def __init__(
self,
last_line_interaction_in_id,
last_line_interaction_out_id,
last_line_interaction_shell_id,
output_nu,
input_nu,
lines,
packet_filter_mode="packet_out_nu",
):
# mask out packets which did not perform a line interaction
# TODO mask out packets which do not escape to observer?
mask = last_line_interaction_out_id != -1
self.last_line_interaction_in_id = last_line_interaction_in_id[mask]
self.last_line_interaction_out_id = last_line_interaction_out_id[mask]
self.last_line_interaction_shell_id = last_line_interaction_shell_id[
mask
]
self.last_line_interaction_out_angstrom = u.Quantity(
output_nu[mask], "Hz"
).to(u.Angstrom, equivalencies=u.spectral())
self.last_line_interaction_in_angstrom = u.Quantity(
input_nu[mask], "Hz"
).to(u.Angstrom, equivalencies=u.spectral())
self.lines = lines
self._wavelength_start = 0 * u.angstrom
self._wavelength_end = np.inf * u.angstrom
self._atomic_number = None
self._ion_number = None
self.packet_filter_mode = packet_filter_mode
self.update_last_interaction_filter()
@property
def wavelength_start(self):
return self._wavelength_start.to("angstrom")
@wavelength_start.setter
def wavelength_start(self, value):
if not isinstance(value, u.Quantity):
raise ValueError("needs to be a Quantity")
self._wavelength_start = value
self.update_last_interaction_filter()
@property
def wavelength_end(self):
return self._wavelength_end.to("angstrom")
@wavelength_end.setter
def wavelength_end(self, value):
if not isinstance(value, u.Quantity):
raise ValueError("needs to be a Quantity")
self._wavelength_end = value
self.update_last_interaction_filter()
@property
def atomic_number(self):
return self._atomic_number
@atomic_number.setter
def atomic_number(self, value):
self._atomic_number = value
self.update_last_interaction_filter()
@property
def ion_number(self):
return self._ion_number
@ion_number.setter
def ion_number(self, value):
self._ion_number = value
self.update_last_interaction_filter()
def update_last_interaction_filter(self):
if self.packet_filter_mode == "packet_out_nu":
packet_filter = (
self.last_line_interaction_out_angstrom > self.wavelength_start
) & (self.last_line_interaction_out_angstrom < self.wavelength_end)
elif self.packet_filter_mode == "packet_in_nu":
packet_filter = (
self.last_line_interaction_in_angstrom > self.wavelength_start
) & (self.last_line_interaction_in_angstrom < self.wavelength_end)
elif self.packet_filter_mode == "line_in_nu":
line_in_nu = self.lines.wavelength.iloc[
self.last_line_interaction_in_id
].values
packet_filter = (
line_in_nu > self.wavelength_start.to(u.angstrom).value
) & (line_in_nu < self.wavelength_end.to(u.angstrom).value)
else:
raise ValueError(
"Invalid value of packet_filter_mode. The only values "
"allowed are: packet_out_nu, packet_in_nu, line_in_nu"
)
self.last_line_in = self.lines.iloc[
self.last_line_interaction_in_id[packet_filter]
]
self.last_line_out = self.lines.iloc[
self.last_line_interaction_out_id[packet_filter]
]
if self.atomic_number is not None:
self.last_line_in = self.last_line_in.xs(
self.atomic_number, level="atomic_number", drop_level=False
)
self.last_line_out = self.last_line_out.xs(
self.atomic_number, level="atomic_number", drop_level=False
)
if self.ion_number is not None:
self.last_line_in = self.last_line_in.xs(
self.ion_number, level="ion_number", drop_level=False
)
self.last_line_out = self.last_line_out.xs(
self.ion_number, level="ion_number", drop_level=False
)
last_line_in_count = self.last_line_in.line_id.value_counts()
last_line_out_count = self.last_line_out.line_id.value_counts()
self.last_line_in_table = self.last_line_in.reset_index()[
[
"wavelength",
"atomic_number",
"ion_number",
"level_number_lower",
"level_number_upper",
]
]
self.last_line_in_table["count"] = last_line_in_count
self.last_line_in_table.sort_values(
by="count", ascending=False, inplace=True
)
self.last_line_out_table = self.last_line_out.reset_index()[
[
"wavelength",
"atomic_number",
"ion_number",
"level_number_lower",
"level_number_upper",
]
]
self.last_line_out_table["count"] = last_line_out_count
self.last_line_out_table.sort_values(
by="count", ascending=False, inplace=True
)
def plot_wave_in_out(self, fig, do_clf=True, plot_resonance=True):
if do_clf:
fig.clf()
ax = fig.add_subplot(111)
wave_in = self.last_line_list_in["wavelength"]
wave_out = self.last_line_list_out["wavelength"]
if plot_resonance:
min_wave = np.min([wave_in.min(), wave_out.min()])
max_wave = np.max([wave_in.max(), wave_out.max()])
ax.plot([min_wave, max_wave], [min_wave, max_wave], "b-")
ax.plot(wave_in, wave_out, "b.", picker=True)
ax.set_xlabel("Last interaction Wave in")
ax.set_ylabel("Last interaction Wave out")
def onpick(event):
print("-" * 80)
print(
"Line_in (%d/%d):\n%s"
% (
len(event.ind),
self.current_no_packets,
self.last_line_list_in.ix[event.ind],
)
)
print("\n\n")
print(
"Line_out (%d/%d):\n%s"
% (
len(event.ind),
self.current_no_packets,
self.last_line_list_in.ix[event.ind],
)
)
print("^" * 80)
def onpress(event):
pass
fig.canvas.mpl_connect("pick_event", onpick)
fig.canvas.mpl_connect("on_press", onpress)
class TARDISHistory(object):
"""
Records the history of the model
"""
def __init__(self, hdf5_fname, iterations=None):
self.hdf5_fname = hdf5_fname
if iterations is None:
iterations = []
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
for key in hdf_store.keys():
if key.split("/")[1] == "atom_data":
continue
iterations.append(
int(re.match(r"model(\d+)", key.split("/")[1]).groups()[0])
)
self.iterations = np.sort(np.unique(iterations))
hdf_store.close()
else:
self.iterations = iterations
self.levels = None
self.lines = None
def load_atom_data(self):
if self.levels is None or self.lines is None:
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
self.levels = hdf_store["atom_data/levels"]
self.lines = hdf_store["atom_data/lines"]
hdf_store.close()
def load_t_inner(self, iterations=None):
t_inners = []
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
t_inners.append(
hdf_store["model%03d/configuration" % iter].ix["t_inner"]
)
hdf_store.close()
t_inners = np.array(t_inners)
return t_inners
def load_t_rads(self, iterations=None):
t_rads_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = "iter%03d" % iter
t_rads_dict[current_iter] = hdf_store["model%03d/t_rads" % iter]
t_rads = pd.DataFrame(t_rads_dict)
hdf_store.close()
return t_rads
def load_ws(self, iterations=None):
ws_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = "iter{:03d}".format(iter)
ws_dict[current_iter] = hdf_store["model{:03d}/ws".format(iter)]
hdf_store.close()
return pd.DataFrame(ws_dict)
def load_level_populations(self, iterations=None):
level_populations_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
is_scalar = False
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
is_scalar = True
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = "iter%03d" % iter
level_populations_dict[current_iter] = hdf_store[
"model{:03d}/level_populations".format(iter)
]
hdf_store.close()
if is_scalar:
return pd.DataFrame(level_populations_dict.values()[0])
else:
return pd.Panel(level_populations_dict)
def load_jblues(self, iterations=None):
jblues_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
is_scalar = False
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
is_scalar = True
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = "iter{:03d}".format(iter)
jblues_dict[current_iter] = hdf_store[
"model{:03d}/j_blues".format(iter)
]
hdf_store.close()
if is_scalar:
return pd.DataFrame(jblues_dict.values()[0])
else:
return pd.Panel(jblues_dict)
def load_ion_populations(self, iterations=None):
ion_populations_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
is_scalar = False
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
is_scalar = True
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = "iter{:03d}".format(iter)
ion_populations_dict[current_iter] = hdf_store[
"model{:03d}/ion_populations".format(iter)
]
hdf_store.close()
if is_scalar:
return pd.DataFrame(ion_populations_dict.values()[0])
else:
return pd.Panel(ion_populations_dict)
def load_spectrum(self, iteration, spectrum_keyword="luminosity_density"):
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
spectrum = hdf_store[
"model%03d/%s" % (self.iterations[iteration], spectrum_keyword)
]
hdf_store.close()
return spectrum
def calculate_relative_lte_level_populations(self, species, iteration=-1):
self.load_atom_data()
t_rads = self.load_t_rads(iteration)
beta_rads = 1 / (constants.k_B.cgs.value * t_rads.values[:, 0])
species_levels = self.levels.ix[species]
relative_lte_level_populations = (
species_levels.g.values[np.newaxis].T
/ float(species_levels.g.loc[0])
) * np.exp(-beta_rads * species_levels.energy.values[np.newaxis].T)
return pd.DataFrame(
relative_lte_level_populations, index=species_levels.index
)
def calculate_departure_coefficients(self, species, iteration=-1):
self.load_atom_data()
t_rads = self.load_t_rads(iteration)
beta_rads = 1 / (constants.k_B.cgs.value * t_rads.values[:, 0])
species_levels = self.levels.ix[species]
species_level_populations = self.load_level_populations(iteration).ix[
species
]
departure_coefficient = (
(species_level_populations.values * species_levels.g.ix[0])
/ (
species_level_populations.ix[0].values
* species_levels.g.values[np.newaxis].T
)
) * np.exp(beta_rads * species_levels.energy.values[np.newaxis].T)
return pd.DataFrame(departure_coefficient, index=species_levels.index)
def get_last_line_interaction(self, iteration=-1):
iteration = self.iterations[iteration]
self.load_atom_data()
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
model_string = "model" + ("%03d" % iteration) + "/%s"
last_line_interaction_in_id = hdf_store[
model_string % "last_line_interaction_in_id"
].values
last_line_interaction_out_id = hdf_store[
model_string % "last_line_interaction_out_id"
].values
last_line_interaction_shell_id = hdf_store[
model_string % "last_line_interaction_shell_id"
].values
try:
montecarlo_nu = hdf_store[
model_string % "montecarlo_nus_path"
].values
except KeyError:
montecarlo_nu = hdf_store[model_string % "montecarlo_nus"].values
hdf_store.close()
return LastLineInteraction(
last_line_interaction_in_id,
last_line_interaction_out_id,
last_line_interaction_shell_id,
montecarlo_nu,
self.lines,
)
| |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from designate import context
from designate import plugin
from designate import rpc
from designate.central import rpcapi
from designate.i18n import _LI
from designate.pool_manager.rpcapi import PoolManagerAPI
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
LOG = logging.getLogger(__name__)
class PeriodicTask(plugin.ExtensionPlugin):
"""Abstract Zone Manager periodic task
"""
__plugin_ns__ = 'designate.zone_manager_tasks'
__plugin_type__ = 'zone_manager_task'
__interval__ = None
def __init__(self):
self.my_partitions = None
self.options = cfg.CONF[self.get_canonical_name()]
@classmethod
def get_base_opts(cls):
options = [
cfg.IntOpt(
'interval',
default=cls.__interval__,
help='Run interval in seconds'
),
cfg.IntOpt('per_page', default=100),
]
return options
@property
def central_api(self):
return rpcapi.CentralAPI.get_instance()
def on_partition_change(self, my_partitions, members, event):
"""Refresh partitions attribute
"""
self.my_partitions = my_partitions
def _my_range(self):
"""Returns first and last partitions
"""
return self.my_partitions[0], self.my_partitions[-1]
def _filter_between(self, col):
"""Generate BETWEEN filter based on _my_range
"""
return {col: "BETWEEN %s,%s" % self._my_range()}
def _iter(self, method, *args, **kwargs):
kwargs.setdefault("limit", self.options.per_page)
while True:
items = method(*args, **kwargs)
# Stop fetching if there's no more items
if len(items) == 0:
raise StopIteration
else:
kwargs["marker"] = items[-1].id
for i in items:
yield i
def _iter_zones(self, ctxt, criterion=None):
criterion = criterion or {}
criterion.update(self._filter_between('shard'))
return self._iter(self.central_api.find_zones, ctxt, criterion)
class DeletedZonePurgeTask(PeriodicTask):
"""Purge deleted zones that are exceeding the grace period time interval.
Deleted zones have values in the deleted_at column.
Purging means removing them from the database entirely.
"""
__plugin_name__ = 'zone_purge'
__interval__ = 3600
def __init__(self):
super(DeletedZonePurgeTask, self).__init__()
@classmethod
def get_cfg_opts(cls):
group = cfg.OptGroup(cls.get_canonical_name())
options = cls.get_base_opts() + [
cfg.IntOpt(
'time_threshold',
default=604800,
help="How old deleted zones should be (deleted_at) to be "
"purged, in seconds"
),
cfg.IntOpt(
'batch_size',
default=100,
help='How many zones to be purged on each run'
),
]
return [(group, options)]
def __call__(self):
"""Call the Central API to perform a purge of deleted zones based on
expiration time and sharding range.
"""
pstart, pend = self._my_range()
msg = _LI("Performing deleted zone purging for %(start)s to %(end)s")
LOG.info(msg, {"start": pstart, "end": pend})
delta = datetime.timedelta(seconds=self.options.time_threshold)
time_threshold = timeutils.utcnow() - delta
LOG.debug("Filtering deleted zones before %s", time_threshold)
criterion = self._filter_between('shard')
criterion['deleted'] = '!0'
criterion['deleted_at'] = "<=%s" % time_threshold
ctxt = context.DesignateContext.get_admin_context()
ctxt.all_tenants = True
self.central_api.purge_zones(
ctxt,
criterion,
limit=self.options.batch_size,
)
class PeriodicExistsTask(PeriodicTask):
__plugin_name__ = 'periodic_exists'
__interval__ = 3600
def __init__(self):
super(PeriodicExistsTask, self).__init__()
self.notifier = rpc.get_notifier('zone_manager')
@classmethod
def get_cfg_opts(cls):
group = cfg.OptGroup(cls.get_canonical_name())
options = cls.get_base_opts()
return [(group, options)]
@staticmethod
def _get_period(seconds):
interval = datetime.timedelta(seconds=seconds)
end = timeutils.utcnow()
return end - interval, end
def __call__(self):
pstart, pend = self._my_range()
msg = _LI("Emitting zone exist events for shards %(start)s to %(end)s")
LOG.info(msg, {"start": pstart, "end": pend})
ctxt = context.DesignateContext.get_admin_context()
ctxt.all_tenants = True
start, end = self._get_period(self.options.interval)
extra_data = {
"audit_period_beginning": start,
"audit_period_ending": end
}
counter = 0
for zone in self._iter_zones(ctxt):
counter += 1
zone_data = zone.to_dict()
zone_data.update(extra_data)
self.notifier.info(ctxt, 'dns.domain.exists', zone_data)
LOG.info(_LI("Finished emitting %(counter)d events for shards "
"%(start)s to %(end)s"),
{"start": pstart, "end": pend, "counter": counter})
class PeriodicSecondaryRefreshTask(PeriodicTask):
__plugin_name__ = 'periodic_secondary_refresh'
__interval__ = 3600
@classmethod
def get_cfg_opts(cls):
group = cfg.OptGroup(cls.get_canonical_name())
options = cls.get_base_opts()
return [(group, options)]
def __call__(self):
pstart, pend = self._my_range()
msg = _LI("Refreshing zones between for %(start)s to %(end)s")
LOG.info(msg, {"start": pstart, "end": pend})
ctxt = context.DesignateContext.get_admin_context()
ctxt.all_tenants = True
# each zone can have a different refresh / expire etc interval defined
# in the SOA at the source / master servers
criterion = {
"type": "SECONDARY"
}
for zone in self._iter_zones(ctxt, criterion):
# NOTE: If the zone isn't transferred yet, ignore it.
if zone.transferred_at is None:
continue
now = timeutils.utcnow(True)
transferred = timeutils.parse_isotime(zone.transferred_at)
seconds = timeutils.delta_seconds(transferred, now)
if seconds > zone.refresh:
msg = "Zone %(id)s has %(seconds)d seconds since last transfer, " \
"executing AXFR"
LOG.debug(msg, {"id": zone.id, "seconds": seconds})
self.central_api.xfr_zone(ctxt, zone.id)
class PeriodicGenerateDelayedNotifyTask(PeriodicTask):
"""Generate delayed NOTIFY transactions
Scan the database for zones with the delayed_notify flag set.
"""
__plugin_name__ = 'delayed_notify'
__interval__ = 5
def __init__(self):
super(PeriodicGenerateDelayedNotifyTask, self).__init__()
@classmethod
def get_cfg_opts(cls):
group = cfg.OptGroup(cls.get_canonical_name())
options = cls.get_base_opts() + [
cfg.IntOpt(
'interval',
default=cls.__interval__,
help='Run interval in seconds'
),
cfg.IntOpt(
'batch_size',
default=100,
help='How many zones to receive NOTIFY on each run'
),
]
return [(group, options)]
def __call__(self):
"""Fetch a list of zones with the delayed_notify flag set up to
"batch_size"
Call Pool Manager to emit NOTIFY transactions,
Reset the flag.
"""
pstart, pend = self._my_range()
ctxt = context.DesignateContext.get_admin_context()
ctxt.all_tenants = True
# Select zones where "delayed_notify" is set and starting from the
# oldest "updated_at".
# There's an index on delayed_notify.
criterion = self._filter_between('shard')
criterion['delayed_notify'] = True
zones = self.central_api.find_zones(
ctxt,
criterion,
limit=self.options.batch_size,
sort_key='updated_at',
sort_dir='asc',
)
msg = _LI("Performing delayed NOTIFY for %(start)s to %(end)s: %(n)d")
LOG.debug(msg % dict(start=pstart, end=pend, n=len(zones)))
pm_api = PoolManagerAPI.get_instance()
for z in zones:
pm_api.update_zone(ctxt, z)
z.delayed_notify = False
self.central_api.update_zone(ctxt, z)
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import inspect
import json
import logging
import math
import os
import re
import time
from abc import ABCMeta, abstractmethod
from logging import config as logging_config
import string_utils
from dictdiffer import diff
from kubernetes import watch
from kubernetes.client.models import V1DeleteOptions
from kubernetes.client.rest import ApiException
from six import add_metaclass
from urllib3.exceptions import MaxRetryError
from . import VERSION_RX
from .exceptions import KubernetesException
@add_metaclass(ABCMeta)
class BaseObjectHelper(object):
api_client = None
model = None
properties = None
base_model_name = None
base_model_name_snake = None
logger = logging.getLogger(__name__)
def __init__(self, api_version=None, kind=None, debug=False, reset_logfile=True, timeout=20, **auth):
self.version_rx = re.compile("V\d((alpha|beta)\d)?")
self.api_version = api_version
self.kind = kind
self.timeout = timeout # number of seconds to wait for an API request
if api_version and kind:
self.set_model(api_version, kind)
if debug:
self.enable_debug(reset_logfile)
self.set_client_config(**auth)
@staticmethod
@abstractmethod
def client_from_config(config_file, context):
pass
@classmethod
@abstractmethod
def available_apis(cls):
# TODO: do proper api discovery
pass
@staticmethod
@abstractmethod
def api_class_from_name(api_name):
pass
@staticmethod
@abstractmethod
def model_class_from_name(model_name):
pass
@staticmethod
@abstractmethod
def get_exception_class():
pass
def set_model(self, api_version, kind):
""" Switch the client's model """
self.api_version = api_version
self.kind = kind
self.model = self.get_model(api_version, kind)
self.properties = self.properties_from_model_class(self.model)
self.base_model_name = self.get_base_model_name(self.model.__name__)
self.base_model_name_snake = self.get_base_model_name_snake(self.base_model_name)
def set_client_config(self, **auth):
""" Convenience method for updating the configuration object, and instantiating a new client """
auth_keys = ['api_key', 'ssl_ca_cert', 'cert_file', 'key_file', 'verify_ssl']
for key in auth_keys + ['kubeconfig', 'context', 'host']:
# If a key is not defined in auth, check the environment for a K8S_AUTH_ variable.
if auth.get(key) is None:
env_value = os.getenv('K8S_AUTH_{}'.format(key.upper()), None)
if env_value is not None:
auth[key] = env_value
config_file = auth.get('kubeconfig')
context = auth.get('context')
self.api_client = self.client_from_config(config_file, context)
if auth.get('host') is not None:
self.api_client.host = auth['host']
for key in auth_keys:
if auth.get(key, None) is not None:
if key == 'api_key':
self.api_client.configuration.api_key = {'authorization': auth[key]}
else:
setattr(self.api_client.configuration, key, auth[key])
@staticmethod
def enable_debug(to_file=True, filename='KubeObjHelper.log', reset_logfile=True):
logger_config = {
'version': 1,
'level': 'DEBUG',
'propogate': False,
'loggers': {
'openshift.helper': {
'handlers': ['debug_logger'],
'level': 'DEBUG',
'propagate': False
}
}
}
if to_file:
mode = 'w' if reset_logfile else 'a'
logger_config['handlers'] = {
'debug_logger': {
'class': 'logging.FileHandler',
'level': 'DEBUG',
'filename': filename,
'mode': mode,
'encoding': 'utf-8'
}
}
else:
logger_config['handlers'] = {
'debug_logger': {
'class': 'logging.StreamHandler',
'level': 'DEBUG'
}
}
logging_config.dictConfig(logger_config)
def has_method(self, method_action):
"""
Determine if the object has a particular method.
:param method_action: string. one of 'create', 'update', 'delete', 'patch', 'list'
"""
method = None
try:
method = self.lookup_method(method_action)
except KubernetesException:
try:
method = self.lookup_method(method_action, namespace='namespace')
except KubernetesException:
return False
return method is not None
def fix_serialization(self, obj):
if obj and obj.kind == "Service":
for port in obj.spec.ports:
try:
port.target_port = int(port.target_port)
except ValueError:
pass
elif obj and obj.kind == "Route" and obj.spec.port:
try:
obj.spec.port.target_port = int(obj.spec.port.target_port)
except ValueError:
pass
return obj
def get_object(self, name=None, namespace=None):
k8s_obj = None
method_name = 'list' if self.kind.endswith('list') else 'read'
try:
get_method = self.lookup_method(method_name, namespace)
if name is None and namespace is None:
k8s_obj = get_method()
elif name and namespace is None:
k8s_obj = get_method(name)
elif namespace and not name:
k8s_obj = get_method(namespace)
else:
k8s_obj = get_method(name, namespace)
except ApiException as exc:
if exc.status != 404:
if self.base_model_name == 'Project' and exc.status == 403:
pass
else:
msg = json.loads(exc.body).get('message', exc.reason) if exc.body.startswith('{') else exc.body
raise self.get_exception_class()(msg, status=exc.status)
except MaxRetryError as ex:
raise self.get_exception_class()(str(ex.reason))
return k8s_obj
def patch_object(self, name, namespace, k8s_obj):
self.logger.debug('Starting patch object')
k8s_obj.metadata.resource_version = None
self.__remove_creation_timestamps(k8s_obj)
w, stream = self._create_stream(namespace)
return_obj = None
self.logger.debug("Patching object: {}".format(k8s_obj.to_str()))
try:
patch_method = self.lookup_method('patch', namespace)
if namespace:
patch_method(name, namespace, k8s_obj)
else:
patch_method(name, k8s_obj)
except ApiException as exc:
msg = json.loads(exc.body).get('message', exc.reason) if exc.body.startswith('{') else exc.body
raise self.get_exception_class()(msg, status=exc.status)
if stream is not None:
return_obj = self._read_stream(w, stream, name)
if not return_obj or self.kind in ('project', 'namespace'):
return_obj = self._wait_for_response(name, namespace, 'patch')
return self.fix_serialization(return_obj)
def create_object(self, namespace=None, k8s_obj=None, body=None):
"""
Send a POST request to the API. Pass either k8s_obj or body.
:param namespace: namespace value or None
:param k8s_obj: optional k8s object model
:param body: optional JSON dict
:return: new object returned from the API
"""
self.logger.debug('Starting create object')
w, stream = self._create_stream(namespace)
return_obj = None
name = None
if k8s_obj:
name = k8s_obj.metadata.name
elif body:
name = body.get('metadata', {}).get('name', None)
try:
create_method = self.lookup_method('create', namespace)
if namespace:
if k8s_obj:
create_method(namespace, k8s_obj)
else:
create_method(namespace, body=body)
else:
if k8s_obj:
create_method(k8s_obj)
else:
create_method(body=body)
except ApiException as exc:
msg = json.loads(exc.body).get('message', exc.reason) if exc.body.startswith('{') else exc.body
raise self.get_exception_class()(msg, status=exc.status)
except MaxRetryError as ex:
raise self.get_exception_class()(str(ex.reason))
if stream is not None:
return_obj = self._read_stream(w, stream, name)
if not return_obj or self.kind in ('project', 'namespace'):
return_obj = self._wait_for_response(name, namespace, 'create')
return self.fix_serialization(return_obj)
def delete_object(self, name, namespace):
self.logger.debug('Starting delete object {0} {1} {2}'.format(self.kind, name, namespace))
delete_method = self.lookup_method('delete', namespace)
if not namespace:
try:
if 'body' in inspect.getargspec(delete_method).args:
status_obj = delete_method(name, body=V1DeleteOptions(propagation_policy='Foreground'))
else:
status_obj = delete_method(name)
except ApiException as exc:
msg = json.loads(exc.body).get('message', exc.reason)
raise self.get_exception_class()(msg, status=exc.status)
except MaxRetryError as ex:
raise self.get_exception_class()(str(ex.reason))
else:
try:
if 'body' in inspect.getargspec(delete_method).args:
status_obj = delete_method(name, namespace, body=V1DeleteOptions(propagation_policy='Foreground'))
else:
status_obj = delete_method(name, namespace)
except ApiException as exc:
msg = json.loads(exc.body).get('message', exc.reason) if exc.body.startswith('{') else exc.body
raise self.get_exception_class()(msg, status=exc.status)
except MaxRetryError as ex:
raise self.get_exception_class()(str(ex.reason))
if status_obj is None or status_obj.status == 'Failure':
msg = 'Failed to delete {}'.format(name)
if namespace is not None:
msg += ' in namespace {}'.format(namespace)
msg += ' status: {}'.format(status_obj)
raise self.get_exception_class()(msg)
self._wait_for_response(name, namespace, 'delete')
def replace_object(self, name, namespace, k8s_obj=None, body=None):
""" Replace an existing object. Pass in a model object or request dict().
Will first lookup the existing object to get the resource version and
update the request.
"""
self.logger.debug('Starting replace object')
existing_obj = self.get_object(name, namespace)
if not existing_obj:
msg = "Error: Replacing object. Unable to find {}".format(name)
msg += " in namespace {}".format(namespace) if namespace else ""
raise self.get_exception_class()(msg)
if k8s_obj:
k8s_obj.status = self.properties['status']['class']()
self.__remove_creation_timestamps(k8s_obj)
k8s_obj.metadata.resource_version = existing_obj.metadata.resource_version
elif body:
body['metadata']['resourceVersion'] = existing_obj.metadata.resource_version
w, stream = self._create_stream(namespace)
return_obj = None
try:
replace_method = self.lookup_method('replace', namespace)
if k8s_obj:
if namespace is None:
replace_method(name, k8s_obj)
else:
replace_method(name, namespace, k8s_obj)
else:
if namespace is None:
replace_method(name, body=body)
else:
replace_method(name, namespace, body=body)
except ApiException as exc:
msg = json.loads(exc.body).get('message', exc.reason) if exc.body.startswith('{') else exc.body
raise self.get_exception_class()(msg, status=exc.status)
except MaxRetryError as ex:
raise self.get_exception_class()(str(ex.reason))
if stream is not None:
return_obj = self._read_stream(w, stream, name)
if not return_obj or self.kind in ('project', 'namespace'):
return_obj = self._wait_for_response(name, namespace, 'replace')
return self.fix_serialization(return_obj)
@staticmethod
def objects_match(obj_a, obj_b):
""" Test the equality of two objects. Returns bool, list(differences). """
match = False
diffs = []
if obj_a is None and obj_b is None:
match = True
elif not obj_a or not obj_b:
pass
elif type(obj_a).__name__ != type(obj_b).__name__:
pass
else:
dict_a = obj_a.to_dict()
dict_b = obj_b.to_dict()
diffs = list(diff(dict_a, dict_b))
match = len(diffs) == 0
return match, diffs
@classmethod
def properties_from_model_class(cls, model_class):
"""
Introspect an object, and return a dict of 'name:dict of properties' pairs. The properties include: class,
and immutable (a bool).
:param model_class: An object instantiated from openshift.client.models
:return: dict
"""
# Create a list of model properties. Each property is represented as a dict of key:value pairs
# If a property does not have a setter, it's considered to be immutable
properties = [
{'name': x,
'immutable': False if getattr(getattr(model_class, x), 'setter', None) else True
}
for x in model_class.attribute_map.keys() if isinstance(getattr(model_class, x), property)
]
result = {}
for prop in properties:
prop_kind = model_class.swagger_types[prop['name']]
if prop_kind == 'datetime':
prop_kind = 'str'
if prop_kind in ('str', 'int', 'bool', 'object', 'float'):
prop_class = eval(prop_kind)
elif prop_kind.startswith('list['):
prop_class = list
elif prop_kind.startswith('dict('):
prop_class = dict
else:
try:
prop_class = cls.model_class_from_name(prop_kind)
except AttributeError:
prop_class = cls.model_class_from_name(prop_kind)
result[prop['name']] = {
'class': prop_class,
'immutable': prop['immutable']
}
return result
def candidate_apis(self):
api_match = self.api_version.replace('/', '_').lower()
return [
api for api in self.available_apis()
if api_match in self.attribute_to_snake(api)
or not VERSION_RX.match(api)
]
def lookup_method(self, operation=None, namespace=None, method_name=None):
"""
Get the requested method (e.g. create, delete, patch, update) for
the model object.
:param operation: one of create, delete, patch, update
:param namespace: optional name of the namespace.
:return: pointer to the method
"""
if not method_name:
method_name = operation
method_name += '_namespaced_' if namespace else '_'
method_name += self.kind.replace('_list', '') if self.kind.endswith('_list') else self.kind
method = None
for api in self.candidate_apis():
api_class = self.api_class_from_name(api)
method = getattr(api_class(self.api_client), method_name, None)
if method is not None:
break
if method is None:
msg = "Did you forget to include the namespace?" if not namespace else ""
raise self.get_exception_class()(
"Error: method {0} not found for model {1}. {2}".format(method_name, self.kind, msg)
)
return method
@classmethod
def get_base_model_name(cls, model_name):
"""
Return model_name with API Version removed.
:param model_name: string
:return: string
"""
return VERSION_RX.sub('', model_name)
def get_base_model_name_snake(self, model_name):
"""
Return base model name with API version removed, and remainder converted to snake case
:param model_name: string
:return: string
"""
result = self.get_base_model_name(model_name)
return self.attribute_to_snake(result)
@staticmethod
def attribute_to_snake(name):
"""
Convert an object property name from camel to snake
:param name: string to convert
:return: string
"""
def replace(m):
m = m.group(0)
return m[0] + '_' + m[1:]
p = r'[a-z][A-Z]|' \
r'[A-Z]{2}[a-z]'
result = re.sub(p, replace, name)
return result.lower()
def get_model(self, api_version, kind):
"""
Return the model class for the requested object.
:param api_version: API version string
:param kind: The name of object type (i.e. Service, Route, Container, etc.)
:return: class
"""
# Handle API paths. In the case of 'batch/', remove it completely, otherwise, replace '/' with '_'.
api = re.sub(r'batch/', '', api_version, count=0, flags=re.IGNORECASE).replace('/', '_')
camel_kind = string_utils.snake_case_to_camel(kind)
camel_api_version = string_utils.snake_case_to_camel(api)
# capitalize the first letter of the string without lower-casing the remainder
name = (camel_kind[:1].capitalize() + camel_kind[1:]).replace("Api", "API")
api = camel_api_version[:1].capitalize() + camel_api_version[1:]
model_name = api + name
try:
model = self.model_class_from_name(model_name)
except AttributeError:
raise self.get_exception_class()(
"Error: {} was not found in client.models. "
"Did you specify the correct Kind and API Version?".format(model_name)
)
return model
def __remove_creation_timestamps(self, obj):
""" Recursively look for creation_timestamp property, and set it to None """
if hasattr(obj, 'swagger_types'):
for key, value in obj.swagger_types.items():
if key == 'creation_timestamp':
obj.creation_timestamp = None
continue
if value.startswith('dict(') or value.startswith('list['):
continue
if value in ('str', 'int', 'bool'):
continue
if getattr(obj, key) is not None:
self.__remove_creation_timestamps(getattr(obj, key))
def _wait_for_response(self, name, namespace, action):
""" Wait for an API response """
tries = 0
half = math.ceil(self.timeout / 2)
obj = None
if self.kind in ('project', 'namespace'):
# Wait for annotations to be applied
time.sleep(1)
while tries <= half:
obj = self.get_object(name, namespace)
if action == 'delete':
if not obj:
break
elif obj and not hasattr(obj, 'status'):
break
elif obj and obj.status and hasattr(obj.status, 'phase'):
if obj.status.phase == 'Active':
break
elif obj and obj.status:
break
tries += 2
time.sleep(2)
return obj
def _create_stream(self, namespace):
""" Create a stream that gets events for the our model """
w = None
stream = None
exception_class = self.get_exception_class()
try:
list_method = self.lookup_method('list', namespace)
w = watch.Watch()
w._api_client = self.api_client # monkey patch for access to OpenShift models
if namespace:
stream = w.stream(list_method, namespace, _request_timeout=self.timeout)
else:
stream = w.stream(list_method, _request_timeout=self.timeout)
except exception_class:
pass
except Exception:
raise
return w, stream
def _read_stream(self, watcher, stream, name):
return_obj = None
try:
for event in stream:
obj = None
if event.get('object'):
obj_json = event['object'].to_str()
self.logger.debug(
"EVENT type: {0} object: {1}".format(event['type'], obj_json)
)
obj = event['object']
else:
self.logger.debug(repr(event))
if event['object'].metadata.name == name:
if event['type'] == 'DELETED':
# Object was deleted
return_obj = obj
watcher.stop()
break
elif obj is not None:
# Object is either added or modified. Check the status and determine if we
# should continue waiting
if hasattr(obj, 'status'):
status = getattr(obj, 'status')
if hasattr(status, 'phase'):
if status.phase == 'Active':
# TODO other phase values ??
# TODO test namespaces for OpenShift annotations if needed
return_obj = obj
watcher.stop()
break
elif hasattr(status, 'conditions'):
conditions = getattr(status, 'conditions')
if conditions and len(conditions) > 0:
# We know there is a status, but it's up to the user to determine meaning.
return_obj = obj
watcher.stop()
break
elif obj.kind == 'Service' and status is not None:
return_obj = obj
watcher.stop()
break
elif obj.kind == 'Route':
route_statuses = set()
for route_ingress in status.ingress:
for condition in route_ingress.conditions:
route_statuses.add(condition.type)
if route_statuses <= {'Ready', 'Admitted'}:
return_obj = obj
watcher.stop()
break
except Exception as exc:
# A timeout occurred
self.logger.debug('STREAM FAILED: {}'.format(exc))
pass
return self.fix_serialization(return_obj)
| |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# This test compares the output of the intake pipeline
# with known ground truth output.
# The way to add a new test is:
# - load the test timeline
# $ ./e-mission-py.bash bin/debug/load_timeline_for_day_and_user.py emission/tests/data/real_examples/iphone_2016-02-22 test-iphone-feb-22
# ...
# Loading file emission/tests/data/real_examples/iphone_2016-02-22
# After registration, test-iphone-feb-22 -> 349b4f21-7cd2-4ac6-8786-ea69142c2238
#
# Note that there is some randomness in the tests, due to
# a56adddc5dc8c94cbe98964aafb17df3bc3f724c, so we need to use a random seed
# The tests use a seed of 61297777 - if the intake pipeline is being run to
# generate ground truth, the line setting the seed in the intake pipeline
# needs to be re-instituted.
# - run the intake pipeline
# $ ./e-mission-py.bash bin/debug/intake_single_user.py -e test-iphone-feb-22
# - log in via the phone and check that all is well
# - save the ground truth
# $ ./e-mission-py.bash bin/debug/save_ground_truth.py -e test-iphone-feb-22 2016-02-22 /tmp/iphone_2016-02-22.ground_truth
# Copy it back and add the test to this file
# $ mv /tmp/iphone_2016-02-22.ground_truth emission/tests/data/real_examples
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from builtins import *
import unittest
import logging
import json
import bson.json_util as bju
import attrdict as ad
import arrow
import numpy as np
# Our imports
import emission.core.get_database as edb
import emission.core.wrapper.localdate as ecwl
import emission.net.usercache.abstract_usercache_handler as enuah
import emission.analysis.plotting.geojson.geojson_feature_converter as gfc
import emission.storage.timeseries.tcquery as estt
import emission.core.common as ecc
# Test imports
import emission.tests.common as etc
class TestPipelineRealData(unittest.TestCase):
def setUp(self):
# Thanks to M&J for the number!
np.random.seed(61297777)
def tearDown(self):
logging.debug("Clearing related databases")
self.clearRelatedDb()
def clearRelatedDb(self):
edb.get_timeseries_db().delete_many({"user_id": self.testUUID})
edb.get_analysis_timeseries_db().delete_many({"user_id": self.testUUID})
edb.get_usercache_db().delete_many({"user_id": self.testUUID})
def compare_result(self, result, expect):
# This is basically a bunch of asserts to ensure that the timeline is as
# expected. We are not using a recursive diff because things like the IDs
# will change from run to run. Instead, I pick out a bunch of important
# things that are highly user visible
# Since this is deterministic, we can also include things that are not that user visible :)
for rt, et in zip(result, expect):
logging.debug("Comparing %s -> %s with %s -> %s" %
(rt.properties.start_fmt_time, rt.properties.end_fmt_time,
et.properties.start_fmt_time, et.properties.end_fmt_time))
self.assertEqual(len(result), len(expect))
for rt, et in zip(result, expect):
logging.debug("======= Comparing trip =========")
logging.debug(json.dumps(rt.properties, indent=4, default=bju.default))
logging.debug(json.dumps(et.properties, indent=4, default=bju.default))
# Highly user visible
self.assertEqual(rt.properties.start_ts, et.properties.start_ts)
self.assertEqual(rt.properties.end_ts, et.properties.end_ts)
self.assertEqual(rt.properties.start_loc, et.properties.start_loc)
self.assertEqual(rt.properties.end_loc, et.properties.end_loc)
self.assertAlmostEqual(rt.properties.distance, et.properties.distance, places=2)
self.assertEqual(len(rt.features), len(et.features))
for rs, es in zip(rt.features, et.features):
logging.debug("------- Comparing trip feature ---------")
logging.debug(json.dumps(rs, indent=4, default=bju.default))
logging.debug(json.dumps(es, indent=4, default=bju.default))
self.assertEqual(rs.type, es.type)
if rs.type == "Feature":
# The first place will not have an enter time, so we can't check it
if 'enter_fmt_time' not in rs.properties:
self.assertNotIn("enter_fmt_time", es.properties)
else:
self.assertEqual(rs.properties.enter_fmt_time, es.properties.enter_fmt_time)
# Similarly, the last place will not have an exit time, so we can't check it
if 'exit_fmt_time' not in rs.properties:
self.assertNotIn("exit_fmt_time", es.properties)
else:
self.assertEqual(rs.properties.exit_fmt_time, es.properties.exit_fmt_time)
self.assertEqual(rs.properties.feature_type, es.properties.feature_type)
else:
self.assertEqual(rs.type, "FeatureCollection")
self.assertEqual(rs.features[0].properties.start_fmt_time, es.features[0].properties.start_fmt_time)
self.assertEqual(rs.features[0].properties.end_fmt_time, es.features[0].properties.end_fmt_time)
self.assertEqual(rs.features[0].properties.sensed_mode, es.features[0].properties.sensed_mode)
self.assertEqual(len(rs.features[0].properties.speeds), len(es.features[0].properties.speeds))
self.assertEqual(len(rs.features[0].geometry.coordinates), len(es.features[0].geometry.coordinates))
logging.debug(20 * "-")
logging.debug(20 * "=")
def compare_approx_result(self, result, expect, distance_fuzz, time_fuzz):
# This is basically a bunch of asserts to ensure that the timeline is as
# expected. We are not using a recursive diff because things like the IDs
# will change from run to run. Instead, I pick out a bunch of important
# things that are highly user visible
# Since this is deterministic, we can also include things that are not that user visible :)
for rt, et in zip(result, expect):
logging.debug("Comparing %s -> %s with %s -> %s" %
(rt.properties.start_fmt_time, rt.properties.end_fmt_time,
et.properties.start_fmt_time, et.properties.end_fmt_time))
self.assertEqual(len(result), len(expect))
for rt, et in zip(result, expect):
logging.debug("======= Comparing trip =========")
logging.debug(json.dumps(rt.properties, indent=4, default=bju.default))
logging.debug(json.dumps(et.properties, indent=4, default=bju.default))
# Highly user visible
self.assertAlmostEqual(rt.properties.start_ts, et.properties.start_ts, delta=time_fuzz)
self.assertAlmostEqual(rt.properties.end_ts, et.properties.end_ts, delta=time_fuzz)
self.assertLessEqual(ecc.calDistance(rt.properties.start_loc.coordinates, et.properties.start_loc.coordinates), distance_fuzz)
self.assertLessEqual(ecc.calDistance(rt.properties.end_loc.coordinates, et.properties.end_loc.coordinates), distance_fuzz)
self.assertAlmostEqual(rt.properties.distance, et.properties.distance, delta=distance_fuzz)
self.assertEqual(len(rt.features), len(et.features))
for rs, es in zip(rt.features, et.features):
logging.debug("------- Comparing trip feature ---------")
logging.debug(json.dumps(rs, indent=4, default=bju.default))
logging.debug(json.dumps(es, indent=4, default=bju.default))
self.assertEqual(rs.type, es.type)
if rs.type == "Feature":
# The first place will not have an enter time, so we can't check it
if 'enter_fmt_time' not in rs.properties:
self.assertNotIn("enter_fmt_time", es.properties)
else:
self.assertAlmostEqual(rs.properties.enter_ts, es.properties.enter_ts, delta=time_fuzz)
# Similarly, the last place will not have an exit time, so we can't check it
if 'exit_fmt_time' not in rs.properties:
self.assertNotIn("exit_fmt_time", es.properties)
else:
self.assertAlmostEqual(rs.properties.exit_ts, es.properties.exit_ts, delta=time_fuzz)
self.assertEqual(rs.properties.feature_type, es.properties.feature_type)
else:
self.assertEqual(rs.type, "FeatureCollection")
self.assertAlmostEqual(rs.features[0].properties.start_ts, es.features[0].properties.start_ts, delta=time_fuzz)
self.assertAlmostEqual(rs.features[0].properties.end_ts, es.features[0].properties.end_ts, delta=time_fuzz)
self.assertEqual(rs.features[0].properties.sensed_mode, es.features[0].properties.sensed_mode)
# Fuzz for resampled data as well
# https://github.com/e-mission/e-mission-server/issues/288#issuecomment-242450106
self.assertAlmostEqual(len(rs.features[0].properties.speeds), len(es.features[0].properties.speeds), delta=2)
self.assertAlmostEqual(len(rs.features[0].geometry.coordinates), len(es.features[0].geometry.coordinates), delta=2)
logging.debug(20 * "-")
logging.debug(20 * "=")
def testJun20(self):
# This is a fairly straightforward day. Tests mainly:
# - ordering of trips
# - handling repeated location entries with different write timestamps
# We have two identical location points with ts = 1466436483.395 and write_ts = 1466436496.4, 1466436497.047
dataFile = "emission/tests/data/real_examples/shankari_2016-06-20"
ld = ecwl.LocalDate({'year': 2016, 'month': 6, 'day': 20})
cacheKey = "diary/trips-2016-06-20"
with open(dataFile+".ground_truth") as gfp:
ground_truth = json.load(gfp, object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
# runIntakePipeline does not run the common trips, habitica or store views to cache
# So let's manually store to the cache
# tc_query = estt.TimeComponentQuery("data.star_local_dt", ld, ld)
# enuah.UserCacheHandler.getUserCacheHandler(self.testUUID).storeTimelineToCache(tc_query)
# cached_result = edb.get_usercache_db().find_one({'user_id': self.testUUID,
# "metadata.key": cacheKey})
api_result = gfc.get_geojson_for_dt(self.testUUID, ld, ld)
# self.compare_result(cached_result, ground_truth)
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
def testJun21(self):
# This is a more complex day. Tests:
# PR #357 (spurious trip at 14:00 should be segmented and skipped)
# PR #358 (trip back from bella's house at 16:00)
dataFile = "emission/tests/data/real_examples/shankari_2016-06-21"
ld = ecwl.LocalDate({'year': 2016, 'month': 6, 'day': 21})
cacheKey = "diary/trips-2016-06-21"
with open(dataFile+".ground_truth") as gfp:
ground_truth = json.load(gfp, object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
# runIntakePipeline does not run the common trips, habitica or store views to cache
# So let's manually store to the cache
# tc_query = estt.TimeComponentQuery("data.star_local_dt", ld, ld)
# enuah.UserCacheHandler.getUserCacheHandler(self.testUUID).storeTimelineToCache(tc_query)
# cached_result = edb.get_usercache_db().find_one({'user_id': self.testUUID,
# "metadata.key": cacheKey})
api_result = gfc.get_geojson_for_dt(self.testUUID, ld, ld)
# self.compare_result(cached_result, ground_truth)
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
def testAug10(self):
# This is a more complex day. Tests:
# PR #302 (trip to optometrist)
# PR #352 (split optometrist trip)
dataFile = "emission/tests/data/real_examples/shankari_2016-08-10"
ld = ecwl.LocalDate({'year': 2016, 'month': 8, 'day': 10})
cacheKey = "diary/trips-2016-08-10"
with open(dataFile+".ground_truth") as gfp:
ground_truth = json.load(gfp, object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
# runIntakePipeline does not run the common trips, habitica or store views to cache
# So let's manually store to the cache
# tc_query = estt.TimeComponentQuery("data.star_local_dt", ld, ld)
# enuah.UserCacheHandler.getUserCacheHandler(self.testUUID).storeTimelineToCache(tc_query)
# cached_result = edb.get_usercache_db().find_one({'user_id': self.testUUID,
# "metadata.key": cacheKey})
api_result = gfc.get_geojson_for_dt(self.testUUID, ld, ld)
# self.compare_result(cached_result, ground_truth)
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
def testAug11(self):
# This is a more complex day. Tests:
# PR #352 (should not split trip to Oakland)
# PR #348 (trip from station to OAK DOT)
# PR #357 (trip to Radio Shack is complete and not truncated)
# PR #345 (no cleaned trips are skipped)
dataFile = "emission/tests/data/real_examples/shankari_2016-08-11"
ld = ecwl.LocalDate({'year': 2016, 'month': 8, 'day': 11})
cacheKey = "diary/trips-2016-08-11"
with open(dataFile+".ground_truth") as gfp:
ground_truth = json.load(gfp, object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
# runIntakePipeline does not run the common trips, habitica or store views to cache
# So let's manually store to the cache
# tc_query = estt.TimeComponentQuery("data.star_local_dt", ld, ld)
# enuah.UserCacheHandler.getUserCacheHandler(self.testUUID).storeTimelineToCache(tc_query)
# cached_result = edb.get_usercache_db().find_one({'user_id': self.testUUID,
# "metadata.key": cacheKey})
api_result = gfc.get_geojson_for_dt(self.testUUID, ld, ld)
# self.compare_result(cached_result, ground_truth)
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
def testFeb22ShortTripsDistance(self):
dataFile = "emission/tests/data/real_examples/iphone_3_2016-02-22"
start_ld = ecwl.LocalDate({'year': 2016, 'month': 2, 'day': 22})
end_ld = ecwl.LocalDate({'year': 2016, 'month': 2, 'day': 22})
cacheKey = "diary/trips-2016-02-22"
with open(dataFile+".ground_truth") as gfp:
ground_truth = json.load(gfp, object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, end_ld)
# Although we process the day's data in two batches, we should get the same result
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
def testAug27TooMuchExtrapolation(self):
dataFile = "emission/tests/data/real_examples/shankari_2015-aug-27"
start_ld = ecwl.LocalDate({'year': 2015, 'month': 8, 'day': 27})
end_ld = ecwl.LocalDate({'year': 2015, 'month': 8, 'day': 27})
cacheKey = "diary/trips-2015-08-27"
with open(dataFile+".ground_truth") as gfp:
ground_truth = json.load(gfp, object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, end_ld)
# Although we process the day's data in two batches, we should get the same result
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
def testAirTripToHawaii(self):
dataFile = "emission/tests/data/real_examples/shankari_2016-07-27"
start_ld = ecwl.LocalDate({'year': 2016, 'month': 7, 'day': 27})
cacheKey = "diary/trips-2016-07-27"
with open(dataFile+".ground_truth") as gfp:
ground_truth = json.load(gfp, object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, start_ld)
# Although we process the day's data in two batches, we should get the same result
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
def testAirTripHawaiiEnd(self):
dataFile = "emission/tests/data/real_examples/shankari_2016-08-04"
start_ld = ecwl.LocalDate({'year': 2016, 'month': 8, 'day': 4})
cacheKey = "diary/trips-2016-07-27"
with open(dataFile+".ground_truth") as gfp:
ground_truth = json.load(gfp, object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, start_ld)
# Although we process the day's data in two batches, we should get the same result
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
def testAirTripFromHawaii(self):
dataFile = "emission/tests/data/real_examples/shankari_2016-08-05"
start_ld = ecwl.LocalDate({'year': 2016, 'month': 8, 'day': 0o5})
cacheKey = "diary/trips-2016-07-05"
ground_truth = json.load(open(dataFile+".ground_truth"), object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, start_ld)
# Although we process the day's data in two batches, we should get the same result
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
def testSunilShortTrips(self):
dataFile = "emission/tests/data/real_examples/sunil_2016-07-27"
start_ld = ecwl.LocalDate({'year': 2016, 'month': 7, 'day': 27})
cacheKey = "diary/trips-2016-07-27"
ground_truth = json.load(open(dataFile+".ground_truth"), object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, start_ld)
# Although we process the day's data in two batches, we should get the same result
self.assertEqual(api_result, [])
def testGabeShortTrips(self):
dataFile = "emission/tests/data/real_examples/gabe_2016-06-15"
start_ld = ecwl.LocalDate({'year': 2016, 'month': 6, 'day': 15})
cacheKey = "diary/trips-2016-06-15"
ground_truth = json.load(open(dataFile+".ground_truth"), object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, start_ld)
# Although we process the day's data in two batches, we should get the same result
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
def testJumpSmoothingSectionEnd(self):
dataFile = "emission/tests/data/real_examples/shankari_2016-independence_day"
start_ld = ecwl.LocalDate({'year': 2016, 'month': 8, 'day': 15})
cacheKey = "diary/trips-2016-08-15"
ground_truth = json.load(open("emission/tests/data/real_examples/shankari_2016-independence_day.ground_truth"), object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, start_ld)
# Although we process the day's data in two batches, we should get the same result
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
def testJumpSmoothingSectionsStraddle(self):
dataFile = "emission/tests/data/real_examples/shankari_2016-independence_day_jump_straddle"
start_ld = ecwl.LocalDate({'year': 2016, 'month': 8, 'day': 15})
cacheKey = "diary/trips-2016-08-15"
ground_truth = json.load(open("emission/tests/data/real_examples/shankari_2016-independence_day.ground_truth"), object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, start_ld)
# Although we process the day's data in two batches, we should get the same result
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
def testJumpSmoothingSectionStart(self):
dataFile = "emission/tests/data/real_examples/shankari_2016-independence_day_jump_bus_start"
start_ld = ecwl.LocalDate({'year': 2016, 'month': 8, 'day': 15})
cacheKey = "diary/trips-2016-08-15"
ground_truth = json.load(open("emission/tests/data/real_examples/shankari_2016-independence_day.ground_truth"), object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, start_ld)
# Although we process the day's data in two batches, we should get the same result
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
def testIndexLengthChange(self):
# Test for 94f67b4848611fa01c4327a0fa0cab97c2247744
dataFile = "emission/tests/data/real_examples/shankari_2015-08-23"
start_ld = ecwl.LocalDate({'year': 2015, 'month': 8, 'day': 23})
cacheKey = "diary/trips-2015-08-23"
ground_truth = json.load(open("emission/tests/data/real_examples/shankari_2015-08-23.ground_truth"), object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, start_ld)
# Although we process the day's data in two batches, we should get the same result
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
def testSquishedMismatchForUntrackedTime(self):
# Test for a2c0ee4e3ceafa822425ceef299dcdb01c9b32c9
dataFile = "emission/tests/data/real_examples/shankari_2015-07-22"
start_ld = ecwl.LocalDate({'year': 2015, 'month': 7, 'day': 22})
cacheKey = "diary/trips-2015-07-22"
ground_truth = json.load(open("emission/tests/data/real_examples/shankari_2015-07-22.ground_truth"), object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, start_ld)
# Although we process the day's data in two batches, we should get the same result
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
def testUnknownTrips(self):
# Test for a2c0ee4e3ceafa822425ceef299dcdb01c9b32c9
dataFile = "emission/tests/data/real_examples/shankari_2016-09-03"
start_ld = ecwl.LocalDate({'year': 2016, 'month': 9, 'day': 3})
cacheKey = "diary/trips-2016-09-03"
ground_truth = json.load(open("emission/tests/data/real_examples/shankari_2016-09-03.ground_truth"), object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, start_ld)
# Although we process the day's data in two batches, we should get the same result
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
def testIosJumpsAndUntrackedSquishing(self):
# Test for a2c0ee4e3ceafa822425ceef299dcdb01c9b32c9
dataFile = "emission/tests/data/real_examples/sunil_2016-07-20"
start_ld = ecwl.LocalDate({'year': 2016, 'month': 7, 'day': 20})
cacheKey = "diary/trips-2016-07-20"
ground_truth = json.load(open("emission/tests/data/real_examples/sunil_2016-07-20.ground_truth"), object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, start_ld)
# Although we process the day's data in two batches, we should get the same result
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
def testAug10MultiSyncEndDetected(self):
# Re-run, but with multiple calls to sync data
# This tests the effect of online versus offline analysis and segmentation with potentially partial data
dataFile = "emission/tests/data/real_examples/shankari_2016-08-10"
start_ld = ecwl.LocalDate({'year': 2016, 'month': 8, 'day': 9})
end_ld = ecwl.LocalDate({'year': 2016, 'month': 8, 'day': 10})
cacheKey = "diary/trips-2016-08-10"
ground_truth = json.load(open("emission/tests/data/real_examples/shankari_2016-08-910.ground_truth"),
object_hook=bju.object_hook)
logging.info("Before loading, timeseries db size = %s" % edb.get_timeseries_db().count())
all_entries = None
with open(dataFile) as secondfp:
all_entries = json.load(secondfp, object_hook = bju.object_hook)
ts_1030 = arrow.get("2016-08-10T10:30:00-07:00").timestamp
logging.debug("ts_1030 = %s, converted back = %s" % (ts_1030, arrow.get(ts_1030).to("America/Los_Angeles")))
before_1030_entries = [e for e in all_entries if ad.AttrDict(e).metadata.write_ts <= ts_1030]
after_1030_entries = [e for e in all_entries if ad.AttrDict(e).metadata.write_ts > ts_1030]
# First load all data from the 9th. Otherwise, the missed trip is the first trip,
# and we don't set the last_ts_processed
# See the code around "logging.debug("len(segmentation_points) == 0, early return")"
etc.setupRealExample(self, "emission/tests/data/real_examples/shankari_2016-08-09")
# Sync at 10:30 to capture all the points on the trip *to* the optometrist
self.entries = before_1030_entries
etc.setupRealExampleWithEntries(self)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, end_ld)
# Then sync after 10:30
self.entries = after_1030_entries
etc.setupRealExampleWithEntries(self)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, end_ld)
# Although we process the day's data in two batches, we should get the same result
self.compare_approx_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data, time_fuzz=60, distance_fuzz=100)
def testFeb22MultiSyncEndDetected(self):
# Re-run, but with multiple calls to sync data
# This tests the effect of online versus offline analysis and segmentation with potentially partial data
dataFile = "emission/tests/data/real_examples/iphone_2016-02-22"
start_ld = ecwl.LocalDate({'year': 2016, 'month': 2, 'day': 22})
end_ld = ecwl.LocalDate({'year': 2016, 'month': 2, 'day': 22})
cacheKey = "diary/trips-2016-02-22"
ground_truth = json.load(open(dataFile+".ground_truth"), object_hook=bju.object_hook)
logging.info("Before loading, timeseries db size = %s" % edb.get_timeseries_db().count())
all_entries = json.load(open(dataFile), object_hook = bju.object_hook)
# 18:01 because the transition was at 2016-02-22T18:00:09.623404-08:00, so right after
# 18:00
ts_1800 = arrow.get("2016-02-22T18:00:30-08:00").timestamp
logging.debug("ts_1800 = %s, converted back = %s" % (ts_1800, arrow.get(ts_1800).to("America/Los_Angeles")))
before_1800_entries = [e for e in all_entries if ad.AttrDict(e).metadata.write_ts <= ts_1800]
after_1800_entries = [e for e in all_entries if ad.AttrDict(e).metadata.write_ts > ts_1800]
# Sync at 18:00 to capture all the points on the trip *to* the optometrist
import uuid
self.testUUID = uuid.uuid4()
self.entries = before_1800_entries
etc.setupRealExampleWithEntries(self)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, end_ld)
# Then sync after 18:00
self.entries = after_1800_entries
etc.setupRealExampleWithEntries(self)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, end_ld)
# Although we process the day's data in two batches, we should get the same result
self.compare_approx_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data, time_fuzz=60, distance_fuzz=100)
def testAug10MultiSyncEndNotDetected(self):
# Re-run, but with multiple calls to sync data
# This tests the effect of online versus offline analysis and segmentation with potentially partial data
dataFile = "emission/tests/data/real_examples/shankari_2016-08-10"
start_ld = ecwl.LocalDate({'year': 2016, 'month': 8, 'day': 9})
end_ld = ecwl.LocalDate({'year': 2016, 'month': 8, 'day': 10})
cacheKey = "diary/trips-2016-08-10"
ground_truth = json.load(open("emission/tests/data/real_examples/shankari_2016-08-910.ground_truth"),
object_hook=bju.object_hook)
logging.info("Before loading, timeseries db size = %s" % edb.get_timeseries_db().count())
all_entries = json.load(open(dataFile), object_hook = bju.object_hook)
ts_1030 = arrow.get("2016-08-10T10:30:00-07:00").timestamp
logging.debug("ts_1030 = %s, converted back = %s" % (ts_1030, arrow.get(ts_1030).to("America/Los_Angeles")))
before_1030_entries = [e for e in all_entries if ad.AttrDict(e).metadata.write_ts <= ts_1030]
after_1030_entries = [e for e in all_entries if ad.AttrDict(e).metadata.write_ts > ts_1030]
# First load all data from the 9th. Otherwise, the missed trip is the first trip,
# and we don't set the last_ts_processed
# See the code around "logging.debug("len(segmentation_points) == 0, early return")"
etc.setupRealExample(self, "emission/tests/data/real_examples/shankari_2016-08-09")
# Sync at 10:30 to capture all the points on the trip *to* the optometrist
# Skip the last few points to ensure that the trip end is skipped
self.entries = before_1030_entries[0:-2]
etc.setupRealExampleWithEntries(self)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, end_ld)
# Then sync after 10:30
self.entries = after_1030_entries
etc.setupRealExampleWithEntries(self)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, end_ld)
# Although we process the day's data in two batches, we should get the same result
self.compare_approx_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data, time_fuzz=60, distance_fuzz=100)
def testJul22SplitAroundReboot(self):
dataFile_1 = "emission/tests/data/real_examples/shankari_2016-07-22"
dataFile_2 = "emission/tests/data/real_examples/shankari_2016-07-25"
start_ld_1 = ecwl.LocalDate({'year': 2016, 'month': 7, 'day': 22})
start_ld_2 = ecwl.LocalDate({'year': 2016, 'month': 7, 'day': 25})
cacheKey_1 = "diary/trips-2016-07-22"
cacheKey_2 = "diary/trips-2016-07-25"
ground_truth_1 = json.load(open(dataFile_1+".ground_truth"), object_hook=bju.object_hook)
ground_truth_2 = json.load(open(dataFile_2+".ground_truth"), object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile_1)
etc.runIntakePipeline(self.testUUID)
self.entries = json.load(open(dataFile_2), object_hook = bju.object_hook)
etc.setupRealExampleWithEntries(self)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_1, start_ld_1)
# Although we process the day's data in two batches, we should get the same result
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth_1).data)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_2, start_ld_2)
# Although we process the day's data in two batches, we should get the same result
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth_2).data)
def testFeb22MultiSyncEndNotDetected(self):
# Re-run, but with multiple calls to sync data
# This tests the effect of online versus offline analysis and segmentation with potentially partial data
dataFile = "emission/tests/data/real_examples/iphone_2016-02-22"
start_ld = ecwl.LocalDate({'year': 2016, 'month': 2, 'day': 22})
end_ld = ecwl.LocalDate({'year': 2016, 'month': 2, 'day': 22})
cacheKey = "diary/trips-2016-02-22"
ground_truth = json.load(open(dataFile+".ground_truth"), object_hook=bju.object_hook)
logging.info("Before loading, timeseries db size = %s" % edb.get_timeseries_db().count())
all_entries = json.load(open(dataFile), object_hook = bju.object_hook)
# 18:01 because the transition was at 2016-02-22T18:00:09.623404-08:00, so right after
# 18:00
ts_1800 = arrow.get("2016-02-22T18:00:30-08:00").timestamp
logging.debug("ts_1800 = %s, converted back = %s" % (ts_1800, arrow.get(ts_1800).to("America/Los_Angeles")))
before_1800_entries = [e for e in all_entries if ad.AttrDict(e).metadata.write_ts <= ts_1800]
after_1800_entries = [e for e in all_entries if ad.AttrDict(e).metadata.write_ts > ts_1800]
# Sync at 18:00 to capture all the points on the trip *to* the optometrist
# Skip the last few points to ensure that the trip end is skipped
import uuid
self.testUUID = uuid.uuid4()
self.entries = before_1800_entries[0:-2]
etc.setupRealExampleWithEntries(self)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, end_ld)
# Then sync after 18:00
self.entries = after_1800_entries
etc.setupRealExampleWithEntries(self)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, end_ld)
# Although we process the day's data in two batches, we should get the same result
self.compare_approx_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data, time_fuzz=60, distance_fuzz=100)
def testOct07MultiSyncSpuriousEndDetected(self):
# Re-run, but with multiple calls to sync data
# This tests the effect of online versus offline analysis and segmentation with potentially partial data
dataFile = "emission/tests/data/real_examples/issue_436_assertion_error"
start_ld = ecwl.LocalDate({'year': 2016, 'month': 10, 'day': 0o7})
end_ld = ecwl.LocalDate({'year': 2016, 'month': 10, 'day': 0o7})
cacheKey = "diary/trips-2016-10-07"
ground_truth = json.load(open(dataFile+".ground_truth"), object_hook=bju.object_hook)
logging.info("Before loading, timeseries db size = %s" % edb.get_timeseries_db().count())
all_entries = json.load(open(dataFile), object_hook = bju.object_hook)
# 18:01 because the transition was at 2016-02-22T18:00:09.623404-08:00, so right after
# 18:00
ts_1800 = arrow.get("2016-10-07T18:33:11-07:00").timestamp
logging.debug("ts_1800 = %s, converted back = %s" % (ts_1800, arrow.get(ts_1800).to("America/Los_Angeles")))
before_1800_entries = [e for e in all_entries if ad.AttrDict(e).metadata.write_ts <= ts_1800]
after_1800_entries = [e for e in all_entries if ad.AttrDict(e).metadata.write_ts > ts_1800]
# Sync at 18:00 to capture all the points on the trip *to* the optometrist
# Skip the last few points to ensure that the trip end is skipped
import uuid
self.testUUID = uuid.uuid4()
self.entries = before_1800_entries
etc.setupRealExampleWithEntries(self)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, end_ld)
# Then sync after 18:00
self.entries = after_1800_entries
etc.setupRealExampleWithEntries(self)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, end_ld)
# Although we process the day's data in two batches, we should get the same result
self.compare_approx_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data, time_fuzz=60, distance_fuzz=100)
def testZeroDurationPlaceInterpolationSingleSync(self):
# Test for 545114feb5ac15caac4110d39935612525954b71
dataFile_1 = "emission/tests/data/real_examples/shankari_2016-01-12"
dataFile_2 = "emission/tests/data/real_examples/shankari_2016-01-13"
start_ld_1 = ecwl.LocalDate({'year': 2016, 'month': 1, 'day': 12})
start_ld_2 = ecwl.LocalDate({'year': 2016, 'month': 1, 'day': 13})
cacheKey_1 = "diary/trips-2016-01-12"
cacheKey_2 = "diary/trips-2016-01-13"
ground_truth_1 = json.load(open(dataFile_1+".ground_truth"), object_hook=bju.object_hook)
ground_truth_2 = json.load(open(dataFile_2+".ground_truth"), object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile_1)
self.entries = json.load(open(dataFile_2), object_hook = bju.object_hook)
etc.setupRealExampleWithEntries(self)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_1, start_ld_1)
# Although we process the day's data in two batches, we should get the same result
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth_1).data)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_2, start_ld_2)
# Although we process the day's data in two batches, we should get the same result
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth_2).data)
def testZeroDurationPlaceInterpolationMultiSync(self):
# Test for 545114feb5ac15caac4110d39935612525954b71
dataFile_1 = "emission/tests/data/real_examples/shankari_2016-01-12"
dataFile_2 = "emission/tests/data/real_examples/shankari_2016-01-13"
start_ld_1 = ecwl.LocalDate({'year': 2016, 'month': 1, 'day': 12})
start_ld_2 = ecwl.LocalDate({'year': 2016, 'month': 1, 'day': 13})
cacheKey_1 = "diary/trips-2016-01-12"
cacheKey_2 = "diary/trips-2016-01-13"
ground_truth_1 = json.load(open(dataFile_1+".ground_truth"), object_hook=bju.object_hook)
ground_truth_2 = json.load(open(dataFile_2+".ground_truth"), object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile_1)
etc.runIntakePipeline(self.testUUID)
self.entries = json.load(open(dataFile_2), object_hook = bju.object_hook)
etc.setupRealExampleWithEntries(self)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_1, start_ld_1)
# Although we process the day's data in two batches, we should get the same result
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth_1).data)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_2, start_ld_2)
# Although we process the day's data in two batches, we should get the same result
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth_2).data)
def testTsMismatch(self):
# Test for https://github.com/e-mission/e-mission-server/issues/457
dataFile = "emission/tests/data/real_examples/shankari_single_positional_indexer.dec-12"
start_ld = ecwl.LocalDate({'year': 2016, 'month': 12, 'day': 12})
cacheKey = "diary/trips-2016-12-12"
ground_truth = json.load(open("emission/tests/data/real_examples/shankari_single_positional_indexer.dec-12.ground_truth"), object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, start_ld)
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
def testOverriddenModeHack(self):
# Test for https://github.com/e-mission/e-mission-server/issues/457
dataFile = "emission/tests/data/real_examples/test_overriden_mode_hack.jul-31"
start_ld = ecwl.LocalDate({'year': 2017, 'month': 7, 'day': 31})
cacheKey = "diary/trips-2017-07-31"
ground_truth = json.load(open("emission/tests/data/real_examples/test_overriden_mode_hack.jul-31.ground_truth"), object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, start_ld)
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
def testJan16SpeedAssert(self):
# Test for https://github.com/e-mission/e-mission-server/issues/457
dataFile = "emission/tests/data/real_examples/another_speed_assertion_failure.jan-16"
start_ld = ecwl.LocalDate({'year': 2016, 'month': 1, 'day': 16})
cacheKey = "diary/trips-2016-01-16"
ground_truth = json.load(open("emission/tests/data/real_examples/another_speed_assertion_failure.jan-16.ground_truth"), object_hook=bju.object_hook)
etc.setupRealExample(self, dataFile)
etc.runIntakePipeline(self.testUUID)
api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, start_ld)
self.compare_result(ad.AttrDict({'result': api_result}).result,
ad.AttrDict(ground_truth).data)
if __name__ == '__main__':
etc.configLogging()
unittest.main()
| |
# -*- coding: utf-8 -*-
"""MongoDB result store backend."""
from __future__ import absolute_import, unicode_literals
from datetime import datetime, timedelta
from kombu.exceptions import EncodeError
from kombu.utils.objects import cached_property
from kombu.utils.url import maybe_sanitize_url, urlparse
from celery import states
from celery.exceptions import ImproperlyConfigured
from celery.five import items, string_t
from .base import BaseBackend
try:
import pymongo
except ImportError: # pragma: no cover
pymongo = None # noqa
if pymongo:
try:
from bson.binary import Binary
except ImportError: # pragma: no cover
from pymongo.binary import Binary # noqa
from pymongo.errors import InvalidDocument # noqa
else: # pragma: no cover
Binary = None # noqa
class InvalidDocument(Exception): # noqa
pass
__all__ = ('MongoBackend',)
BINARY_CODECS = frozenset(['pickle', 'msgpack'])
class MongoBackend(BaseBackend):
"""MongoDB result backend.
Raises:
celery.exceptions.ImproperlyConfigured:
if module :pypi:`pymongo` is not available.
"""
mongo_host = None
host = 'localhost'
port = 27017
user = None
password = None
database_name = 'celery'
taskmeta_collection = 'celery_taskmeta'
groupmeta_collection = 'celery_groupmeta'
max_pool_size = 10
options = None
supports_autoexpire = False
_connection = None
def __init__(self, app=None, **kwargs):
self.options = {}
super(MongoBackend, self).__init__(app, **kwargs)
if not pymongo:
raise ImproperlyConfigured(
'You need to install the pymongo library to use the '
'MongoDB backend.')
# Set option defaults
for key, value in items(self._prepare_client_options()):
self.options.setdefault(key, value)
# update conf with mongo uri data, only if uri was given
if self.url:
self.url = self._ensure_mongodb_uri_compliance(self.url)
uri_data = pymongo.uri_parser.parse_uri(self.url)
# build the hosts list to create a mongo connection
hostslist = [
'{0}:{1}'.format(x[0], x[1]) for x in uri_data['nodelist']
]
self.user = uri_data['username']
self.password = uri_data['password']
self.mongo_host = hostslist
if uri_data['database']:
# if no database is provided in the uri, use default
self.database_name = uri_data['database']
self.options.update(uri_data['options'])
# update conf with specific settings
config = self.app.conf.get('mongodb_backend_settings')
if config is not None:
if not isinstance(config, dict):
raise ImproperlyConfigured(
'MongoDB backend settings should be grouped in a dict')
config = dict(config) # don't modify original
if 'host' in config or 'port' in config:
# these should take over uri conf
self.mongo_host = None
self.host = config.pop('host', self.host)
self.port = config.pop('port', self.port)
self.mongo_host = config.pop('mongo_host', self.mongo_host)
self.user = config.pop('user', self.user)
self.password = config.pop('password', self.password)
self.database_name = config.pop('database', self.database_name)
self.taskmeta_collection = config.pop(
'taskmeta_collection', self.taskmeta_collection,
)
self.groupmeta_collection = config.pop(
'groupmeta_collection', self.groupmeta_collection,
)
self.options.update(config.pop('options', {}))
self.options.update(config)
@staticmethod
def _ensure_mongodb_uri_compliance(url):
parsed_url = urlparse(url)
if not parsed_url.scheme.startswith('mongodb'):
url = 'mongodb+{}'.format(url)
if url == 'mongodb://':
url += 'localhost'
return url
def _prepare_client_options(self):
if pymongo.version_tuple >= (3,):
return {'maxPoolSize': self.max_pool_size}
else: # pragma: no cover
return {'max_pool_size': self.max_pool_size,
'auto_start_request': False}
def _get_connection(self):
"""Connect to the MongoDB server."""
if self._connection is None:
from pymongo import MongoClient
host = self.mongo_host
if not host:
# The first pymongo.Connection() argument (host) can be
# a list of ['host:port'] elements or a mongodb connection
# URI. If this is the case, don't use self.port
# but let pymongo get the port(s) from the URI instead.
# This enables the use of replica sets and sharding.
# See pymongo.Connection() for more info.
host = self.host
if isinstance(host, string_t) \
and not host.startswith('mongodb://'):
host = 'mongodb://{0}:{1}'.format(host, self.port)
# don't change self.options
conf = dict(self.options)
conf['host'] = host
if self.user:
conf['username'] = self.user
if self.password:
conf['password'] = self.password
self._connection = MongoClient(**conf)
return self._connection
def encode(self, data):
if self.serializer == 'bson':
# mongodb handles serialization
return data
payload = super(MongoBackend, self).encode(data)
# serializer which are in a unsupported format (pickle/binary)
if self.serializer in BINARY_CODECS:
payload = Binary(payload)
return payload
def decode(self, data):
if self.serializer == 'bson':
return data
payload = self.encode(data)
return super(MongoBackend, self).decode(payload)
def _store_result(self, task_id, result, state,
traceback=None, request=None, **kwargs):
"""Store return value and state of an executed task."""
meta = self._get_result_meta(result=result, state=state,
traceback=traceback, request=request)
# Add the _id for mongodb
meta['_id'] = task_id
try:
self.collection.replace_one({'_id': task_id}, meta, upsert=True)
except InvalidDocument as exc:
raise EncodeError(exc)
return result
def _get_task_meta_for(self, task_id):
"""Get task meta-data for a task by id."""
obj = self.collection.find_one({'_id': task_id})
if obj:
return self.meta_from_decoded({
'task_id': obj['_id'],
'status': obj['status'],
'result': self.decode(obj['result']),
'date_done': obj['date_done'],
'traceback': self.decode(obj['traceback']),
'children': self.decode(obj['children']),
})
return {'status': states.PENDING, 'result': None}
def _save_group(self, group_id, result):
"""Save the group result."""
meta = {
'_id': group_id,
'result': self.encode([i.id for i in result]),
'date_done': datetime.utcnow(),
}
self.group_collection.replace_one({'_id': group_id}, meta, upsert=True)
return result
def _restore_group(self, group_id):
"""Get the result for a group by id."""
obj = self.group_collection.find_one({'_id': group_id})
if obj:
return {
'task_id': obj['_id'],
'date_done': obj['date_done'],
'result': [
self.app.AsyncResult(task)
for task in self.decode(obj['result'])
],
}
def _delete_group(self, group_id):
"""Delete a group by id."""
self.group_collection.delete_one({'_id': group_id})
def _forget(self, task_id):
"""Remove result from MongoDB.
Raises:
pymongo.exceptions.OperationsError:
if the task_id could not be removed.
"""
# By using safe=True, this will wait until it receives a response from
# the server. Likewise, it will raise an OperationsError if the
# response was unable to be completed.
self.collection.delete_one({'_id': task_id})
def cleanup(self):
"""Delete expired meta-data."""
self.collection.delete_many(
{'date_done': {'$lt': self.app.now() - self.expires_delta}},
)
self.group_collection.delete_many(
{'date_done': {'$lt': self.app.now() - self.expires_delta}},
)
def __reduce__(self, args=(), kwargs=None):
kwargs = {} if not kwargs else kwargs
return super(MongoBackend, self).__reduce__(
args, dict(kwargs, expires=self.expires, url=self.url))
def _get_database(self):
conn = self._get_connection()
db = conn[self.database_name]
if self.user and self.password:
source = self.options.get(
'authsource',
self.database_name or 'admin'
)
if not db.authenticate(self.user, self.password, source=source):
raise ImproperlyConfigured(
'Invalid MongoDB username or password.')
return db
@cached_property
def database(self):
"""Get database from MongoDB connection.
performs authentication if necessary.
"""
return self._get_database()
@cached_property
def collection(self):
"""Get the meta-data task collection."""
collection = self.database[self.taskmeta_collection]
# Ensure an index on date_done is there, if not process the index
# in the background. Once completed cleanup will be much faster
collection.create_index('date_done', background=True)
return collection
@cached_property
def group_collection(self):
"""Get the meta-data task collection."""
collection = self.database[self.groupmeta_collection]
# Ensure an index on date_done is there, if not process the index
# in the background. Once completed cleanup will be much faster
collection.create_index('date_done', background=True)
return collection
@cached_property
def expires_delta(self):
return timedelta(seconds=self.expires)
def as_uri(self, include_password=False):
"""Return the backend as an URI.
Arguments:
include_password (bool): Password censored if disabled.
"""
if not self.url:
return 'mongodb://'
if include_password:
return self.url
if ',' not in self.url:
return maybe_sanitize_url(self.url)
uri1, remainder = self.url.split(',', 1)
return ','.join([maybe_sanitize_url(uri1), remainder])
| |
#############################################################################
#
# Copyright (c) 2008 by Casey Duncan and contributors
# All Rights Reserved.
#
# This software is subject to the provisions of the MIT License
# A copy of the license should accompany this distribution.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#
#############################################################################
# $Id$
import unittest
import sys
class TestSystem(list):
def add_group(self, group):
self.append(group)
class TestController:
group = None
def __call__(self, td, group):
self.group = group
self.time_delta = td
class TestUnbindingController:
"""Controller that unbinds itself when invoked"""
group = None
def __call__(self, td, group):
self.group = group
group.unbind_controller(self)
class TestBindingController:
"""Controller that binds another controller when invoked"""
group = None
def __init__(self, ctrl):
self.ctrl = ctrl
def __call__(self, td, group):
self.group = group
group.bind_controller(self.ctrl)
class TestRenderer:
group = None
drawn = False
def draw(self, group):
self.drawn = True
self.group = group
class TestParticle:
velocity = (0,0,0)
class GroupTest(unittest.TestCase):
def test_group_system(self):
import lepton
from lepton import ParticleGroup
# By default the group should be added to the global system
group = ParticleGroup()
self.failUnless(group in lepton.default_system)
lepton.default_system.add_group(group)
# We should be able to override the system by argument
test_system = TestSystem()
group = ParticleGroup(system=test_system)
self.failUnless(group in test_system)
self.failIf(group in lepton.default_system)
# If the global system is overridden, the group should respect that
original_system = lepton.default_system
try:
lepton.default_system = test_system
group = ParticleGroup()
self.failUnless(group in test_system)
finally:
lepton.default_system = original_system
# If None is specified for system, group is not added
group = ParticleGroup(system=None)
self.failIf(group in lepton.default_system)
self.failIf(group in test_system)
def test_bind_controllers(self):
from lepton import ParticleGroup
ctrl1 = TestController()
ctrl2 = TestController()
ctrl3 = TestController()
ctrl4 = TestController()
group = ParticleGroup()
self.failIf(group.controllers)
# Can bind controllers in constructor and after
group = ParticleGroup(controllers=(ctrl1, ctrl2))
self.assertEqual(tuple(group.controllers), (ctrl1, ctrl2))
group.bind_controller(ctrl3, ctrl4)
self.assertEqual(tuple(group.controllers), (ctrl1, ctrl2, ctrl3, ctrl4))
'''
# Can bind controllers at the class level and after
class MyGroup(ParticleGroup):
controllers = (ctrl1, ctrl2)
group = MyGroup(controllers=[ctrl3])
self.assertEqual(tuple(group.controllers), (ctrl1, ctrl2, ctrl3))
group.bind_controller(ctrl4)
self.assertEqual(tuple(group.controllers), (ctrl1, ctrl2, ctrl3, ctrl4))
'''
def test_unbind_controllers(self):
from lepton import ParticleGroup
ctrl1 = TestController()
ctrl2 = TestController()
group = ParticleGroup(controllers=(ctrl1, ctrl2))
self.failUnless(ctrl1 in group.controllers)
self.failUnless(ctrl2 in group.controllers)
group.unbind_controller(ctrl1)
self.failUnless(ctrl1 not in group.controllers)
self.failUnless(ctrl2 in group.controllers)
group.unbind_controller(ctrl2)
self.failUnless(ctrl1 not in group.controllers)
self.failUnless(ctrl2 not in group.controllers)
self.assertRaises(ValueError, group.unbind_controller, ctrl1)
def test_modify_controllers_during_update(self):
from lepton import ParticleGroup
ctrl1 = TestController()
ctrl2 = TestUnbindingController()
ctrl4 = TestController()
ctrl3 = TestBindingController(ctrl4)
group = ParticleGroup(controllers=(ctrl1, ctrl2, ctrl3))
self.assertEqual(len(group.controllers), 3)
self.failUnless(ctrl1 in group.controllers)
self.failUnless(ctrl2 in group.controllers)
self.failUnless(ctrl3 in group.controllers)
self.failUnless(ctrl4 not in group.controllers)
group.update(1)
self.failUnless(ctrl1.group is group)
self.failUnless(ctrl2.group is group)
self.failUnless(ctrl3.group is group)
self.failUnless(ctrl4.group is None)
self.assertEqual(len(group.controllers), 3)
self.failUnless(ctrl1 in group.controllers)
self.failUnless(ctrl2 not in group.controllers)
self.failUnless(ctrl3 in group.controllers)
self.failUnless(ctrl4 in group.controllers)
def test_set_renderer(self):
from lepton import ParticleGroup
renderer = TestRenderer()
group = ParticleGroup()
self.assertEqual(group.renderer, None)
# Can set renderer after init
group.renderer = renderer
self.assertEqual(group.renderer, renderer)
# Can set renderer at init
group2 = ParticleGroup(renderer=renderer)
self.assertEqual(group2.renderer, renderer)
# Can set renderer back to None
group.renderer = None
self.assertEqual(renderer.group, None)
def test_new_particle(self):
from lepton import ParticleGroup, Particle
group = ParticleGroup()
self.assertEqual(len(group), 0)
self.assertEqual(group.new_count(), 0)
p1 = Particle(age=1)
p2 = Particle(age=2)
group.new(p1)
self.assertEqual(len(group), 0)
self.assertEqual(group.new_count(), 1)
self.failIf(list(group))
group.new(p2)
self.assertEqual(len(group), 0)
self.assertEqual(group.new_count(), 2)
self.failIf(list(group))
group.update(0) # incorporate new particles
self.assertEqual(len(group), 2)
self.assertEqual(group.new_count(), 0)
particles = list(group)
self.assertEqual(len(particles), 2)
self.assertEqual(particles[0].age, 1)
self.assertEqual(particles[1].age, 2)
return group, particles
def test_new_particle_kwargs(self):
from lepton import ParticleGroup, Particle
group = ParticleGroup()
self.assertEqual(len(group), 0)
self.assertEqual(group.new_count(), 0)
p = group.new(position=(1,-1,2), age=2)
self.assertEqual(tuple(p.position), (1, -1, 2))
self.assertEqual(p.age, 2)
tmpl_p = Particle(age=3, velocity=(-1,2,3))
p = group.new(tmpl_p, age=5)
self.assertEqual(tuple(p.velocity), (-1,2,3))
self.assertEqual(p.age, 5)
self.assertEqual(len(group), 0)
self.assertEqual(group.new_count(), 2)
self.failIf(list(group))
group.update(0) # incorporate new particles
self.assertEqual(len(group), 2)
self.assertEqual(group.new_count(), 0)
def test_particle_attrs(self):
from lepton import ParticleGroup
p = TestParticle()
p.position=(1,2,3)
p.velocity=(4,5,6)
p.color=(7,8,9,10)
p.size=(11,12,13)
p.up=(-1,-2,-3)
p.rotation=(-4,-5,-6)
p.age=111
p.mass=2
group = ParticleGroup()
newp = group.new(p)
self.assertEqual(tuple(newp.position), p.position)
self.assertEqual(tuple(newp.velocity), p.velocity)
self.assertEqual(tuple(newp.color), p.color)
self.assertEqual(tuple(newp.size), p.size)
self.assertEqual(tuple(newp.up), p.up)
self.assertEqual(tuple(newp.rotation), p.rotation)
self.assertEqual(newp.age, p.age)
self.assertEqual(newp.mass, p.mass)
def test_particle_vector_swizzle(self):
from lepton import ParticleGroup
group = ParticleGroup()
newp = group.new(TestParticle())
self.assertEqual(tuple(newp.velocity), (0,0,0))
self.assertEqual(newp.velocity.x, 0)
self.assertEqual(newp.velocity.y, 0)
self.assertEqual(newp.velocity.z, 0)
newp.velocity.x = 2
newp.velocity.y = -2
newp.velocity.z = 1
self.assertEqual(tuple(newp.velocity), (2,-2,1))
self.assertEqual(newp.velocity.x, 2)
self.assertEqual(newp.velocity.y, -2)
self.assertEqual(newp.velocity.z, 1)
newp.velocity = (3,4,5)
self.assertEqual(tuple(newp.velocity), (3,4,5))
self.assertEqual(newp.velocity.x, 3)
self.assertEqual(newp.velocity.y, 4)
self.assertEqual(newp.velocity.z, 5)
self.assertEqual(tuple(newp.color), (0,0,0,0))
self.assertEqual(newp.color.r, 0)
self.assertEqual(newp.color.g, 0)
self.assertEqual(newp.color.b, 0)
self.assertEqual(newp.color.a, 0)
newp.color.r = 1
newp.color.g = -2
newp.color.b = 3
newp.color.a = -1
self.assertEqual(tuple(newp.color), (1,-2,3,-1))
self.assertEqual(newp.color.r, 1)
self.assertEqual(newp.color.g, -2)
self.assertEqual(newp.color.b, 3)
self.assertEqual(newp.color.a, -1)
newp.color = (5,4,3,2)
self.assertEqual(tuple(newp.color), (5,4,3,2))
self.assertEqual(newp.color.r, 5)
self.assertEqual(newp.color.g, 4)
self.assertEqual(newp.color.b, 3)
self.assertEqual(newp.color.a, 2)
newp.color = (6,5,4) # alpha defaults to 1
self.assertEqual(tuple(newp.color), (6,5,4,1))
self.assertEqual(newp.color.r, 6)
self.assertEqual(newp.color.g, 5)
self.assertEqual(newp.color.b, 4)
self.assertEqual(newp.color.a, 1)
def test_particle_vector_clamp(self):
from lepton import ParticleGroup
group = ParticleGroup()
p = TestParticle()
p.color = (2,0.5,-1,5)
p.size = (0, 2, 0)
newp = group.new(p)
self.assertEqual(tuple(newp.color), p.color)
self.assertEqual(tuple(newp.size), p.size)
self.assertEqual(tuple(newp.color.clamp(0, 1)), (1, 0.5, 0, 1))
self.assertEqual(tuple(newp.size.clamp(1, 1.5)), (1, 1.5, 1))
def test_mass_new_particles(self):
from lepton import ParticleGroup
count = 12345
group = ParticleGroup()
p = TestParticle()
for i in xrange(count):
p.mass = i
newp = group.new(p)
self.assertEqual(newp.mass, p.mass)
group.update(0)
piter = iter(group)
for i in xrange(count):
newp = piter.next()
self.assertEqual(newp.mass, i)
self.assertEqual(len(group), count)
self.assertEqual(group.killed_count(), 0)
self.assertRaises(StopIteration, piter.next)
def test_particle_ref_invalidation(self):
from lepton.group import InvalidParticleRefError
group, particles = self.test_new_particle()
piter = iter(group)
self.assertEqual(piter.next().age, particles[0].age)
group.update(0) # Invalidates particle references and iter
self.assertRaises(InvalidParticleRefError, getattr, particles[0], 'age')
self.assertRaises(InvalidParticleRefError, getattr, particles[1], 'age')
self.assertRaises(InvalidParticleRefError, piter.next)
def test_kill_particle(self):
group, particles = self.test_new_particle()
group.kill(particles[0])
self.assertEqual(len(group), 1)
group.kill(particles[1])
self.assertEqual(len(group), 0)
self.assertEqual(list(group), [])
# group.kill() only accepts particle ref objects
p3 = TestParticle()
self.assertRaises(TypeError, group.kill, p3)
# update should reclaim killed particles
self.assertEqual(group.killed_count(), 2)
group.update(0)
self.assertEqual(group.killed_count(), 0)
self.assertEqual(len(group), 0)
def test_len(self):
from lepton import ParticleGroup
group = ParticleGroup()
self.assertEqual(len(group), 0)
group.new(TestParticle())
self.assertEqual(len(group), 0)
group.new(TestParticle())
self.assertEqual(len(group), 0)
# New particles become visible on update
group.update(0)
self.assertEqual(len(group), 2)
def test_update(self):
group, particles = self.test_new_particle()
ctrl1 = TestController()
ctrl2 = TestController()
group.bind_controller(ctrl1, ctrl2)
group.update(0.33)
self.failUnless(ctrl1.group is group)
self.assertAlmostEqual(ctrl1.time_delta, 0.33)
self.failUnless(ctrl2.group is group)
self.assertAlmostEqual(ctrl2.time_delta, 0.33)
def test_draw(self):
group, particles = self.test_new_particle()
renderer = TestRenderer()
self.assertFalse(renderer.drawn)
self.failUnless(renderer.group is None)
group.renderer = renderer
group.draw()
self.assertTrue(renderer.drawn)
self.failUnless(renderer.group is group)
if __name__=='__main__':
unittest.main()
| |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2019-10-08 10:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dmd', '0002_auto_20181007_1443'),
]
operations = [
migrations.AlterModelOptions(
name='aping',
options={'verbose_name': 'Excipients'},
),
migrations.AlterModelOptions(
name='ing',
options={'verbose_name': 'Ingredients'},
),
migrations.AlterModelOptions(
name='packinfo',
options={'verbose_name': 'Appliance Pack Information'},
),
migrations.AlterField(
model_name='amp',
name='abbrevnm',
field=models.CharField(help_text='Abbreviated name', max_length=60, null=True),
),
migrations.AlterField(
model_name='amp',
name='avail_restrict',
field=models.ForeignKey(db_column='avail_restrictcd', help_text='Restrictions on availability', on_delete=django.db.models.deletion.CASCADE, to='dmd.AvailabilityRestriction'),
),
migrations.AlterField(
model_name='amp',
name='bnf_code',
field=models.CharField(help_text='BNF code', max_length=15, null=True),
),
migrations.AlterField(
model_name='amp',
name='combprod',
field=models.ForeignKey(db_column='combprodcd', help_text='Combination product', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.CombinationProdInd'),
),
migrations.AlterField(
model_name='amp',
name='descr',
field=models.CharField(help_text='Description', max_length=700),
),
migrations.AlterField(
model_name='amp',
name='ema',
field=models.BooleanField(help_text='EMA additional monitoring'),
),
migrations.AlterField(
model_name='amp',
name='flavour',
field=models.ForeignKey(db_column='flavourcd', help_text='Flavour', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.Flavour'),
),
migrations.AlterField(
model_name='amp',
name='id',
field=models.BigIntegerField(db_column='apid', help_text='Identifier', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='amp',
name='invalid',
field=models.BooleanField(help_text='Invalid'),
),
migrations.AlterField(
model_name='amp',
name='lic_auth',
field=models.ForeignKey(db_column='lic_authcd', help_text='Current licensing authority', on_delete=django.db.models.deletion.CASCADE, to='dmd.LicensingAuthority'),
),
migrations.AlterField(
model_name='amp',
name='lic_auth_prev',
field=models.ForeignKey(db_column='lic_auth_prevcd', help_text='Previous licensing authority', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.LicensingAuthority'),
),
migrations.AlterField(
model_name='amp',
name='lic_authchange',
field=models.ForeignKey(db_column='lic_authchangecd', help_text='Reason for change of licensing authority', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.LicensingAuthorityChangeReason'),
),
migrations.AlterField(
model_name='amp',
name='lic_authchangedt',
field=models.DateField(help_text='Date of change of licensing authority', null=True),
),
migrations.AlterField(
model_name='amp',
name='nm',
field=models.CharField(help_text='Name', max_length=255),
),
migrations.AlterField(
model_name='amp',
name='nm_prev',
field=models.CharField(help_text='Previous name', max_length=255, null=True),
),
migrations.AlterField(
model_name='amp',
name='nmdt',
field=models.DateField(help_text='Date of name applicability', null=True),
),
migrations.AlterField(
model_name='amp',
name='parallel_import',
field=models.BooleanField(help_text='Parallel import'),
),
migrations.AlterField(
model_name='amp',
name='supp',
field=models.ForeignKey(db_column='suppcd', help_text='Supplier', on_delete=django.db.models.deletion.CASCADE, to='dmd.Supplier'),
),
migrations.AlterField(
model_name='amp',
name='vmp',
field=models.ForeignKey(db_column='vpid', help_text='VMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.VMP'),
),
migrations.AlterField(
model_name='ampp',
name='abbrevnm',
field=models.CharField(help_text='Abbreviated name', max_length=60, null=True),
),
migrations.AlterField(
model_name='ampp',
name='amp',
field=models.ForeignKey(db_column='apid', help_text='AMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.AMP'),
),
migrations.AlterField(
model_name='ampp',
name='bnf_code',
field=models.CharField(help_text='BNF code', max_length=15, null=True),
),
migrations.AlterField(
model_name='ampp',
name='combpack',
field=models.ForeignKey(db_column='combpackcd', help_text='Combination pack', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.CombinationPackInd'),
),
migrations.AlterField(
model_name='ampp',
name='disc',
field=models.ForeignKey(db_column='disccd', help_text='Discontinued', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.DiscontinuedInd'),
),
migrations.AlterField(
model_name='ampp',
name='discdt',
field=models.DateField(help_text='Discontinued change date', null=True),
),
migrations.AlterField(
model_name='ampp',
name='id',
field=models.BigIntegerField(db_column='appid', help_text='Identifier', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='ampp',
name='invalid',
field=models.BooleanField(help_text='Invalid'),
),
migrations.AlterField(
model_name='ampp',
name='legal_cat',
field=models.ForeignKey(db_column='legal_catcd', help_text='Legal category', on_delete=django.db.models.deletion.CASCADE, to='dmd.LegalCategory'),
),
migrations.AlterField(
model_name='ampp',
name='nm',
field=models.CharField(help_text='Description', max_length=774),
),
migrations.AlterField(
model_name='ampp',
name='subp',
field=models.CharField(help_text='Sub pack info', max_length=30, null=True),
),
migrations.AlterField(
model_name='ampp',
name='vmpp',
field=models.ForeignKey(db_column='vppid', help_text='VMPP', on_delete=django.db.models.deletion.CASCADE, to='dmd.VMPP'),
),
migrations.AlterField(
model_name='apinfo',
name='amp',
field=models.OneToOneField(db_column='apid', help_text='AMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.AMP'),
),
migrations.AlterField(
model_name='apinfo',
name='colour',
field=models.ForeignKey(db_column='colourcd', help_text='Colour', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.Colour'),
),
migrations.AlterField(
model_name='apinfo',
name='prod_order_no',
field=models.CharField(help_text='Product order number', max_length=20, null=True),
),
migrations.AlterField(
model_name='apinfo',
name='sz_weight',
field=models.CharField(help_text='Size / weight', max_length=100, null=True),
),
migrations.AlterField(
model_name='aping',
name='amp',
field=models.ForeignKey(db_column='apid', help_text='AMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.AMP'),
),
migrations.AlterField(
model_name='aping',
name='ing',
field=models.ForeignKey(db_column='isid', help_text='Ingredient', on_delete=django.db.models.deletion.CASCADE, to='dmd.Ing'),
),
migrations.AlterField(
model_name='aping',
name='strnth',
field=models.DecimalField(decimal_places=3, help_text='Pharmaceutical strength numerical value', max_digits=10, null=True),
),
migrations.AlterField(
model_name='aping',
name='uom',
field=models.ForeignKey(db_column='uomcd', help_text='Pharmaceutical Strength Unit of Measure', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.UnitOfMeasure'),
),
migrations.AlterField(
model_name='availabilityrestriction',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='availabilityrestriction',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='basisofname',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='basisofname',
name='descr',
field=models.CharField(help_text='Description', max_length=150),
),
migrations.AlterField(
model_name='basisofstrnth',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='basisofstrnth',
name='descr',
field=models.CharField(help_text='Description', max_length=150),
),
migrations.AlterField(
model_name='colour',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='colour',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='combinationpackind',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='combinationpackind',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='combinationprodind',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='combinationprodind',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='controldrugcategory',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='controldrugcategory',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='controlinfo',
name='cat',
field=models.ForeignKey(db_column='catcd', help_text='Controlled Drug category', on_delete=django.db.models.deletion.CASCADE, to='dmd.ControlDrugCategory'),
),
migrations.AlterField(
model_name='controlinfo',
name='cat_prev',
field=models.ForeignKey(db_column='cat_prevcd', help_text='Previous Controlled Drug information', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.ControlDrugCategory'),
),
migrations.AlterField(
model_name='controlinfo',
name='catdt',
field=models.DateField(help_text='Date of applicability', null=True),
),
migrations.AlterField(
model_name='controlinfo',
name='vmp',
field=models.OneToOneField(db_column='vpid', help_text='VMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.VMP'),
),
migrations.AlterField(
model_name='dfindicator',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='dfindicator',
name='descr',
field=models.CharField(help_text='Description', max_length=20),
),
migrations.AlterField(
model_name='dform',
name='form',
field=models.ForeignKey(db_column='formcd', help_text='Formulation', on_delete=django.db.models.deletion.CASCADE, to='dmd.Form'),
),
migrations.AlterField(
model_name='dform',
name='vmp',
field=models.OneToOneField(db_column='vpid', help_text='VMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.VMP'),
),
migrations.AlterField(
model_name='discontinuedind',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='discontinuedind',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='dnd',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='dnd',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='droute',
name='route',
field=models.ForeignKey(db_column='routecd', help_text='Route', on_delete=django.db.models.deletion.CASCADE, to='dmd.Route'),
),
migrations.AlterField(
model_name='droute',
name='vmp',
field=models.ForeignKey(db_column='vpid', help_text='VMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.VMP'),
),
migrations.AlterField(
model_name='dtinfo',
name='dt',
field=models.DateField(help_text='Date from which applicable', null=True),
),
migrations.AlterField(
model_name='dtinfo',
name='pay_cat',
field=models.ForeignKey(db_column='pay_catcd', help_text='Drug Tariff payment category', on_delete=django.db.models.deletion.CASCADE, to='dmd.DtPaymentCategory'),
),
migrations.AlterField(
model_name='dtinfo',
name='prevprice',
field=models.IntegerField(help_text='Previous price', null=True),
),
migrations.AlterField(
model_name='dtinfo',
name='price',
field=models.IntegerField(help_text='Drug Tariff price', null=True),
),
migrations.AlterField(
model_name='dtinfo',
name='vmpp',
field=models.OneToOneField(db_column='vppid', help_text='VMPP', on_delete=django.db.models.deletion.CASCADE, to='dmd.VMPP'),
),
migrations.AlterField(
model_name='dtpaymentcategory',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='dtpaymentcategory',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='flavour',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='flavour',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='form',
name='cd',
field=models.BigIntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='form',
name='cddt',
field=models.DateField(help_text='Date code is applicable from', null=True),
),
migrations.AlterField(
model_name='form',
name='cdprev',
field=models.BigIntegerField(help_text='Previous code', null=True),
),
migrations.AlterField(
model_name='form',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='gtin',
name='ampp',
field=models.OneToOneField(db_column='appid', help_text='AMPP', on_delete=django.db.models.deletion.CASCADE, to='dmd.AMPP'),
),
migrations.AlterField(
model_name='gtin',
name='enddt',
field=models.DateField(help_text='The date the GTIN became invalid', null=True),
),
migrations.AlterField(
model_name='gtin',
name='gtin',
field=models.BigIntegerField(help_text='GTIN'),
),
migrations.AlterField(
model_name='gtin',
name='startdt',
field=models.DateField(help_text='GTIN date'),
),
migrations.AlterField(
model_name='ing',
name='id',
field=models.BigIntegerField(db_column='isid', help_text='Identifier', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='ing',
name='invalid',
field=models.BooleanField(help_text='Invalid'),
),
migrations.AlterField(
model_name='ing',
name='isiddt',
field=models.DateField(help_text='Date identifier became valid', null=True),
),
migrations.AlterField(
model_name='ing',
name='isidprev',
field=models.BigIntegerField(help_text='Previous identifier', null=True),
),
migrations.AlterField(
model_name='ing',
name='nm',
field=models.CharField(help_text='Name', max_length=255),
),
migrations.AlterField(
model_name='legalcategory',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='legalcategory',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='licensingauthority',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='licensingauthority',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='licensingauthoritychangereason',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='licensingauthoritychangereason',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='licroute',
name='amp',
field=models.ForeignKey(db_column='apid', help_text='AMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.AMP'),
),
migrations.AlterField(
model_name='licroute',
name='route',
field=models.ForeignKey(db_column='routecd', help_text='Licenced route', on_delete=django.db.models.deletion.CASCADE, to='dmd.Route'),
),
migrations.AlterField(
model_name='namechangereason',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='namechangereason',
name='descr',
field=models.CharField(help_text='Description', max_length=150),
),
migrations.AlterField(
model_name='ont',
name='form',
field=models.ForeignKey(db_column='formcd', help_text='Form & Route', on_delete=django.db.models.deletion.CASCADE, to='dmd.OntFormRoute'),
),
migrations.AlterField(
model_name='ont',
name='vmp',
field=models.ForeignKey(db_column='vpid', help_text='VMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.VMP'),
),
migrations.AlterField(
model_name='ontformroute',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='ontformroute',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='packinfo',
name='ampp',
field=models.OneToOneField(db_column='appid', help_text='AMPP', on_delete=django.db.models.deletion.CASCADE, to='dmd.AMPP'),
),
migrations.AlterField(
model_name='packinfo',
name='pack_order_no',
field=models.CharField(help_text='Pack order number', max_length=20, null=True),
),
migrations.AlterField(
model_name='packinfo',
name='reimb_stat',
field=models.ForeignKey(db_column='reimb_statcd', help_text='Appliance reimbursement status', on_delete=django.db.models.deletion.CASCADE, to='dmd.ReimbursementStatus'),
),
migrations.AlterField(
model_name='packinfo',
name='reimb_statdt',
field=models.DateField(help_text='Date appliance reimbursement status became effective', null=True),
),
migrations.AlterField(
model_name='packinfo',
name='reimb_statprev',
field=models.ForeignKey(db_column='reimb_statprevcd', help_text='Appliance reimbursement previous status', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.ReimbursementStatus'),
),
migrations.AlterField(
model_name='prescribinfo',
name='acbs',
field=models.BooleanField(help_text='ACBS'),
),
migrations.AlterField(
model_name='prescribinfo',
name='ampp',
field=models.OneToOneField(db_column='appid', help_text='AMPP', on_delete=django.db.models.deletion.CASCADE, to='dmd.AMPP'),
),
migrations.AlterField(
model_name='prescribinfo',
name='dent_f',
field=models.BooleanField(help_text='Dental formulary'),
),
migrations.AlterField(
model_name='prescribinfo',
name='enurse_f',
field=models.BooleanField(help_text='Nurse extended formulary'),
),
migrations.AlterField(
model_name='prescribinfo',
name='fp10_mda',
field=models.BooleanField(help_text='FP10 MDA Prescription'),
),
migrations.AlterField(
model_name='prescribinfo',
name='hosp',
field=models.BooleanField(help_text='Hospital'),
),
migrations.AlterField(
model_name='prescribinfo',
name='nurse_f',
field=models.BooleanField(help_text='Nurse formulary'),
),
migrations.AlterField(
model_name='prescribinfo',
name='padm',
field=models.BooleanField(help_text='Personally administered'),
),
migrations.AlterField(
model_name='prescribinfo',
name='sched_1',
field=models.BooleanField(help_text='Schedule 1'),
),
migrations.AlterField(
model_name='prescribinfo',
name='sched_2',
field=models.BooleanField(help_text='Schedule 2'),
),
migrations.AlterField(
model_name='pricebasis',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='pricebasis',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='priceinfo',
name='ampp',
field=models.OneToOneField(db_column='appid', help_text='AMPP', on_delete=django.db.models.deletion.CASCADE, to='dmd.AMPP'),
),
migrations.AlterField(
model_name='priceinfo',
name='price',
field=models.IntegerField(help_text='Price', null=True),
),
migrations.AlterField(
model_name='priceinfo',
name='price_basis',
field=models.ForeignKey(db_column='price_basiscd', help_text='Price basis', on_delete=django.db.models.deletion.CASCADE, to='dmd.PriceBasis'),
),
migrations.AlterField(
model_name='priceinfo',
name='price_prev',
field=models.IntegerField(help_text='Price prior to change date', null=True),
),
migrations.AlterField(
model_name='priceinfo',
name='pricedt',
field=models.DateField(help_text='Date of price validity', null=True),
),
migrations.AlterField(
model_name='reimbinfo',
name='ampp',
field=models.OneToOneField(db_column='appid', help_text='AMPP', on_delete=django.db.models.deletion.CASCADE, to='dmd.AMPP'),
),
migrations.AlterField(
model_name='reimbinfo',
name='bb',
field=models.BooleanField(help_text='Broken bulk'),
),
migrations.AlterField(
model_name='reimbinfo',
name='cal_pack',
field=models.BooleanField(help_text='Calendar pack'),
),
migrations.AlterField(
model_name='reimbinfo',
name='disp_fees',
field=models.IntegerField(help_text='Dispensing fees', null=True),
),
migrations.AlterField(
model_name='reimbinfo',
name='dnd',
field=models.ForeignKey(db_column='dndcd', help_text='Discount not deducted', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.Dnd'),
),
migrations.AlterField(
model_name='reimbinfo',
name='fp34d',
field=models.BooleanField(help_text='FP34D prescription item'),
),
migrations.AlterField(
model_name='reimbinfo',
name='px_chrgs',
field=models.IntegerField(help_text='Prescription charges', null=True),
),
migrations.AlterField(
model_name='reimbinfo',
name='spec_cont',
field=models.ForeignKey(db_column='spec_contcd', help_text='Special container', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.SpecCont'),
),
migrations.AlterField(
model_name='reimbursementstatus',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='reimbursementstatus',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='route',
name='cd',
field=models.BigIntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='route',
name='cddt',
field=models.DateField(help_text='Date code is applicable from', null=True),
),
migrations.AlterField(
model_name='route',
name='cdprev',
field=models.BigIntegerField(help_text='Previous code', null=True),
),
migrations.AlterField(
model_name='route',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='speccont',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='speccont',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='supplier',
name='cd',
field=models.BigIntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='supplier',
name='cddt',
field=models.DateField(help_text='Date code is applicable from', null=True),
),
migrations.AlterField(
model_name='supplier',
name='cdprev',
field=models.BigIntegerField(help_text='Previous code', null=True),
),
migrations.AlterField(
model_name='supplier',
name='descr',
field=models.CharField(help_text='Description', max_length=80),
),
migrations.AlterField(
model_name='supplier',
name='invalid',
field=models.BooleanField(help_text='Invalid'),
),
migrations.AlterField(
model_name='unitofmeasure',
name='cd',
field=models.BigIntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='unitofmeasure',
name='cddt',
field=models.DateField(help_text='Date code is applicable from', null=True),
),
migrations.AlterField(
model_name='unitofmeasure',
name='cdprev',
field=models.BigIntegerField(help_text='Previous code', null=True),
),
migrations.AlterField(
model_name='unitofmeasure',
name='descr',
field=models.CharField(help_text='Description', max_length=150),
),
migrations.AlterField(
model_name='virtualproductnonavail',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='virtualproductnonavail',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='virtualproductpresstatus',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='virtualproductpresstatus',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='vmp',
name='abbrevnm',
field=models.CharField(help_text='Abbreviated name', max_length=60, null=True),
),
migrations.AlterField(
model_name='vmp',
name='basis',
field=models.ForeignKey(db_column='basiscd', help_text='Basis of preferred name', on_delete=django.db.models.deletion.CASCADE, to='dmd.BasisOfName'),
),
migrations.AlterField(
model_name='vmp',
name='basis_prev',
field=models.ForeignKey(db_column='basis_prevcd', help_text='Basis of previous name', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.BasisOfName'),
),
migrations.AlterField(
model_name='vmp',
name='bnf_code',
field=models.CharField(help_text='BNF code', max_length=15, null=True),
),
migrations.AlterField(
model_name='vmp',
name='cfc_f',
field=models.BooleanField(help_text='CFC free'),
),
migrations.AlterField(
model_name='vmp',
name='combprod',
field=models.ForeignKey(db_column='combprodcd', help_text='Combination product', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.CombinationProdInd'),
),
migrations.AlterField(
model_name='vmp',
name='df_ind',
field=models.ForeignKey(db_column='df_indcd', help_text='Dose form', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.DfIndicator'),
),
migrations.AlterField(
model_name='vmp',
name='glu_f',
field=models.BooleanField(help_text='Gluten free'),
),
migrations.AlterField(
model_name='vmp',
name='id',
field=models.BigIntegerField(db_column='vpid', help_text='Identifier', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='vmp',
name='invalid',
field=models.BooleanField(help_text='Invalid'),
),
migrations.AlterField(
model_name='vmp',
name='nm',
field=models.CharField(help_text='Name', max_length=255),
),
migrations.AlterField(
model_name='vmp',
name='nmchange',
field=models.ForeignKey(db_column='nmchangecd', help_text='Reason for name change', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.NamechangeReason'),
),
migrations.AlterField(
model_name='vmp',
name='nmdt',
field=models.DateField(help_text='Date of name applicability', null=True),
),
migrations.AlterField(
model_name='vmp',
name='nmprev',
field=models.CharField(help_text='Previous name', max_length=255, null=True),
),
migrations.AlterField(
model_name='vmp',
name='non_avail',
field=models.ForeignKey(db_column='non_availcd', help_text='Non-availability', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.VirtualProductNonAvail'),
),
migrations.AlterField(
model_name='vmp',
name='non_availdt',
field=models.DateField(help_text='Non-availability status date', null=True),
),
migrations.AlterField(
model_name='vmp',
name='pres_f',
field=models.BooleanField(help_text='Preservative free'),
),
migrations.AlterField(
model_name='vmp',
name='pres_stat',
field=models.ForeignKey(db_column='pres_statcd', help_text='Prescribing status', on_delete=django.db.models.deletion.CASCADE, to='dmd.VirtualProductPresStatus'),
),
migrations.AlterField(
model_name='vmp',
name='sug_f',
field=models.BooleanField(help_text='Sugar free'),
),
migrations.AlterField(
model_name='vmp',
name='udfs',
field=models.DecimalField(decimal_places=3, help_text='Unit dose form size', max_digits=10, null=True),
),
migrations.AlterField(
model_name='vmp',
name='udfs_uom',
field=models.ForeignKey(db_column='udfs_uomcd', help_text='Unit dose form units', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.UnitOfMeasure'),
),
migrations.AlterField(
model_name='vmp',
name='unit_dose_uom',
field=models.ForeignKey(db_column='unit_dose_uomcd', help_text='Unit dose unit of measure', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.UnitOfMeasure'),
),
migrations.AlterField(
model_name='vmp',
name='vpiddt',
field=models.DateField(help_text='Date identifier became valid', null=True),
),
migrations.AlterField(
model_name='vmp',
name='vpidprev',
field=models.BigIntegerField(help_text='Previous product identifier', null=True),
),
migrations.AlterField(
model_name='vmp',
name='vtm',
field=models.ForeignKey(db_column='vtmid', help_text='VTM', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.VTM'),
),
migrations.AlterField(
model_name='vmpp',
name='bnf_code',
field=models.CharField(help_text='BNF code', max_length=15, null=True),
),
migrations.AlterField(
model_name='vmpp',
name='combpack',
field=models.ForeignKey(db_column='combpackcd', help_text='Combination pack', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.CombinationPackInd'),
),
migrations.AlterField(
model_name='vmpp',
name='id',
field=models.BigIntegerField(db_column='vppid', help_text='Identifier', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='vmpp',
name='invalid',
field=models.BooleanField(help_text='Invalid'),
),
migrations.AlterField(
model_name='vmpp',
name='nm',
field=models.CharField(help_text='Description', max_length=420),
),
migrations.AlterField(
model_name='vmpp',
name='qty_uom',
field=models.ForeignKey(db_column='qty_uomcd', help_text='Quantity unit of measure', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.UnitOfMeasure'),
),
migrations.AlterField(
model_name='vmpp',
name='qtyval',
field=models.DecimalField(decimal_places=2, help_text='Quantity value', max_digits=10, null=True),
),
migrations.AlterField(
model_name='vmpp',
name='vmp',
field=models.ForeignKey(db_column='vpid', help_text='VMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.VMP'),
),
migrations.AlterField(
model_name='vpi',
name='basis_strnt',
field=models.ForeignKey(db_column='basis_strntcd', help_text='Basis of pharmaceutical strength', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.BasisOfStrnth'),
),
migrations.AlterField(
model_name='vpi',
name='bs_subid',
field=models.BigIntegerField(help_text='Basis of strength substance identifier', null=True),
),
migrations.AlterField(
model_name='vpi',
name='ing',
field=models.ForeignKey(db_column='isid', help_text='Ingredient', on_delete=django.db.models.deletion.CASCADE, to='dmd.Ing'),
),
migrations.AlterField(
model_name='vpi',
name='strnt_dnmtr_uom',
field=models.ForeignKey(db_column='strnt_dnmtr_uomcd', help_text='Strength value denominator unit', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.UnitOfMeasure'),
),
migrations.AlterField(
model_name='vpi',
name='strnt_dnmtr_val',
field=models.DecimalField(decimal_places=3, help_text='Strength value denominator', max_digits=10, null=True),
),
migrations.AlterField(
model_name='vpi',
name='strnt_nmrtr_uom',
field=models.ForeignKey(db_column='strnt_nmrtr_uomcd', help_text='Strength value numerator unit', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.UnitOfMeasure'),
),
migrations.AlterField(
model_name='vpi',
name='strnt_nmrtr_val',
field=models.DecimalField(decimal_places=3, help_text='Strength value numerator', max_digits=10, null=True),
),
migrations.AlterField(
model_name='vpi',
name='vmp',
field=models.ForeignKey(db_column='vpid', help_text='VMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.VMP'),
),
migrations.AlterField(
model_name='vtm',
name='abbrevnm',
field=models.CharField(help_text='Abbreviated name', max_length=60, null=True),
),
migrations.AlterField(
model_name='vtm',
name='id',
field=models.BigIntegerField(db_column='vtmid', help_text='Identifier', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='vtm',
name='invalid',
field=models.BooleanField(help_text='Invalid'),
),
migrations.AlterField(
model_name='vtm',
name='nm',
field=models.CharField(help_text='Name', max_length=255),
),
migrations.AlterField(
model_name='vtm',
name='vtmiddt',
field=models.DateField(help_text='VTM identifier date', null=True),
),
migrations.AlterField(
model_name='vtm',
name='vtmidprev',
field=models.BigIntegerField(help_text='Previous identifier', null=True),
),
]
| |
#!/usr/bin/env python
"""
==============
dMRI: DTI, FSL
==============
A pipeline example that uses several interfaces to perform analysis on
diffusion weighted images using FSL FDT tools.
This tutorial is based on the 2010 FSL course and uses data freely available at
the FSL website at: http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz
More details can be found at
http://www.fmrib.ox.ac.uk/fslcourse/lectures/practicals/fdt/index.htm
In order to run this tutorial you need to have fsl tools installed and
accessible from matlab/command line. Check by calling fslinfo from the command
line.
Tell python where to find the appropriate functions.
"""
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.fsl as fsl # fsl
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import os # system functions
from nipype.workflows.dmri.fsl.dti import create_eddy_correct_pipeline,\
create_bedpostx_pipeline
"""
Confirm package dependencies are installed. (This is only for the
tutorial, rarely would you put this in your own code.)
"""
from nipype.utils.misc import package_check
package_check('numpy', '1.3', 'tutorial1')
package_check('scipy', '0.7', 'tutorial1')
package_check('networkx', '1.0', 'tutorial1')
package_check('IPython', '0.10', 'tutorial1')
"""
Setting up workflows
--------------------
This is a generic workflow for DTI data analysis using the FSL
Data specific components
------------------------
The nipype tutorial contains data for two subjects. Subject data is in two
subdirectories, ``dwis1`` and ``dwis2``. Each subject directory contains each
of the following files: bvec, bval, diffusion weighted data, a set of target
masks, a seed file, and a transformation matrix.
Below we set some variables to inform the ``datasource`` about the
layout of our data. We specify the location of the data, the subject
sub-directories and a dictionary that maps each run to a mnemonic (or
field) for the run type (``dwi`` or ``bvals``). These fields become
the output fields of the ``datasource`` node in the pipeline.
Specify the subject directories
"""
subject_list = ['subj1']
"""
Map field names to individual subject runs
"""
info = dict(dwi=[['subject_id', 'data']],
bvecs=[['subject_id', 'bvecs']],
bvals=[['subject_id', 'bvals']],
seed_file=[['subject_id', 'MASK_average_thal_right']],
target_masks=[['subject_id', ['MASK_average_M1_right',
'MASK_average_S1_right',
'MASK_average_occipital_right',
'MASK_average_pfc_right',
'MASK_average_pmc_right',
'MASK_average_ppc_right',
'MASK_average_temporal_right']]])
infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']),
name="infosource")
"""
Here we set up iteration over all the subjects. The following line
is a particular example of the flexibility of the system. The
``datasource`` attribute ``iterables`` tells the pipeline engine that
it should repeat the analysis on each of the items in the
``subject_list``. In the current example, the entire first level
preprocessing and estimation will be repeated for each subject
contained in subject_list.
"""
infosource.iterables = ('subject_id', subject_list)
"""
Now we create a :class:`nipype.interfaces.io.DataGrabber` object and
fill in the information from above about the layout of our data. The
:class:`nipype.pipeline.engine.Node` module wraps the interface object
and provides additional housekeeping and pipeline specific
functionality.
"""
datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
outfields=list(info.keys())),
name='datasource')
datasource.inputs.template = "%s/%s"
# This needs to point to the fdt folder you can find after extracting
# http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz
datasource.inputs.base_directory = os.path.abspath('fsl_course_data/fdt/')
datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz',
seed_file="%s.bedpostX/%s.nii.gz",
target_masks="%s.bedpostX/%s.nii.gz")
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
"""
Setup for Diffusion Tensor Computation
--------------------------------------
Here we will create a generic workflow for DTI computation
"""
computeTensor = pe.Workflow(name='computeTensor')
"""
extract the volume with b=0 (nodif_brain)
"""
fslroi = pe.Node(interface=fsl.ExtractROI(), name='fslroi')
fslroi.inputs.t_min = 0
fslroi.inputs.t_size = 1
"""
create a brain mask from the nodif_brain
"""
bet = pe.Node(interface=fsl.BET(), name='bet')
bet.inputs.mask = True
bet.inputs.frac = 0.34
"""
correct the diffusion weighted images for eddy_currents
"""
eddycorrect = create_eddy_correct_pipeline('eddycorrect')
eddycorrect.inputs.inputnode.ref_num = 0
"""
compute the diffusion tensor in each voxel
"""
dtifit = pe.Node(interface=fsl.DTIFit(), name='dtifit')
"""
connect all the nodes for this workflow
"""
computeTensor.connect([
(fslroi, bet, [('roi_file', 'in_file')]),
(eddycorrect, dtifit, [('outputnode.eddy_corrected', 'dwi')]),
(infosource, dtifit, [['subject_id', 'base_name']]),
(bet, dtifit, [('mask_file', 'mask')])
])
"""
Setup for Tracktography
-----------------------
Here we will create a workflow to enable probabilistic tracktography
and hard segmentation of the seed region
"""
tractography = pe.Workflow(name='tractography')
tractography.base_dir = os.path.abspath('fsl_dti_tutorial')
"""
estimate the diffusion parameters: phi, theta, and so on
"""
bedpostx = create_bedpostx_pipeline()
bedpostx.get_node("xfibres").iterables = ("n_fibres", [1, 2])
flirt = pe.Node(interface=fsl.FLIRT(), name='flirt')
flirt.inputs.in_file = fsl.Info.standard_image('MNI152_T1_2mm_brain.nii.gz')
flirt.inputs.dof = 12
"""
perform probabilistic tracktography
"""
probtrackx = pe.Node(interface=fsl.ProbTrackX(), name='probtrackx')
probtrackx.inputs.mode = 'seedmask'
probtrackx.inputs.c_thresh = 0.2
probtrackx.inputs.n_steps = 2000
probtrackx.inputs.step_length = 0.5
probtrackx.inputs.n_samples = 5000
probtrackx.inputs.opd = True
probtrackx.inputs.os2t = True
probtrackx.inputs.loop_check = True
"""
perform hard segmentation on the output of probtrackx
"""
findthebiggest = pe.Node(interface=fsl.FindTheBiggest(), name='findthebiggest')
"""
connect all the nodes for this workflow
"""
tractography.add_nodes([bedpostx, flirt])
tractography.connect([(bedpostx, probtrackx, [('outputnode.thsamples', 'thsamples'),
('outputnode.phsamples', 'phsamples'),
('outputnode.fsamples', 'fsamples')
]),
(probtrackx, findthebiggest, [('targets', 'in_files')]),
(flirt, probtrackx, [('out_matrix_file', 'xfm')])
])
"""
Setup data storage area
"""
datasink = pe.Node(interface=nio.DataSink(), name='datasink')
datasink.inputs.base_directory = os.path.abspath('dtiresults')
def getstripdir(subject_id):
import os
return os.path.join(os.path.abspath('data/workingdir/dwiproc'), '_subject_id_%s' % subject_id)
"""
Setup the pipeline that combines the two workflows: tractography and computeTensor
----------------------------------------------------------------------------------
"""
dwiproc = pe.Workflow(name="dwiproc")
dwiproc.base_dir = os.path.abspath('fsl_dti_tutorial')
dwiproc.connect([
(infosource, datasource, [('subject_id', 'subject_id')]),
(datasource, computeTensor, [('dwi', 'fslroi.in_file'),
('bvals', 'dtifit.bvals'),
('bvecs', 'dtifit.bvecs'),
('dwi', 'eddycorrect.inputnode.in_file')]),
(datasource, tractography, [('bvals', 'bedpostx.inputnode.bvals'),
('bvecs', 'bedpostx.inputnode.bvecs'),
('seed_file', 'probtrackx.seed'),
('target_masks', 'probtrackx.target_masks')
]),
(computeTensor, tractography, [('eddycorrect.outputnode.eddy_corrected', 'bedpostx.inputnode.dwi'),
('bet.mask_file', 'bedpostx.inputnode.mask'),
('bet.mask_file', 'probtrackx.mask'),
('fslroi.roi_file', 'flirt.reference')]),
(infosource, datasink, [('subject_id', 'container'),
(('subject_id', getstripdir), 'strip_dir')]),
(tractography, datasink, [('findthebiggest.out_file', 'fbiggest.@biggestsegmentation')])
])
if __name__ == '__main__':
dwiproc.run()
dwiproc.write_graph()
| |
from __future__ import print_function
import os, sys, re, json
from collections import defaultdict
import numpy as np
import pandas as pd
try:
from word2keypress.weight_matrix import WEIGHT_MATRIX
from word2keypress.weighted_edist import (
STARTSTR, ENDSTR, KB, BLANK, SHIFT_KEY, CAPS_KEY, all_edits, _editdist)
except ImportError:
from weight_matrix import WEIGHT_MATRIX
from weighted_edist import (
STARTSTR, ENDSTR, KB, BLANK, SHIFT_KEY, CAPS_KEY, all_edits, _editdist
)
EDIT_DIST_CUTOFF = 1
WEIGHT_MATRIX = [
(e,w) for e, w in WEIGHT_MATRIX
if _editdist(e[0], e[1], limit=EDIT_DIST_CUTOFF)[1]
]
# giant_regex = re.complie('|'.join(
# re.escape(l) for ((l,r),w) in WEIGHT_MATRIX))
def allowed_edits(pw_key_str):
"""
Returns all the edits that are allowed for @pw.
An edit=(l -> r) is allowed if @l is in @pw
returns the filtered WEIGHT_MATRIX
"""
if not pw_key_str.startswith(STARTSTR):
pw_key_str = STARTSTR + pw_key_str + ENDSTR
# print(pw_key_str)
return sorted(
[((l,r), w) for ((l,r),w) in WEIGHT_MATRIX
if l.replace(BLANK, '') in pw_key_str],
key=lambda x: x[1], reverse=True
)
def edit_locs(pw_key_str, l):
matched_indexes = [
(m.start(), m.end())
for m in re.finditer('({})'.format(re.escape(l.replace(BLANK, ''))),
pw_key_str)
if m.start()<len(pw_key_str) and m.end()>0
]
return matched_indexes
def apply_edit(pw_key_str, e):
"""
Applies edits on the pw_key_str, whereever the edit e is possible
If there are multiple location, then apply only at one location.
"""
l, r = e
matched_indexes = edit_locs(pw_key_str, l)
assert matched_indexes, "Wow!! matched index is empty for pw={}, e={}"\
.format(pw, e)
# Choose one index at random from the possible options
# i = np.random.randint(0, len(matched_indexes))
# pos_s, pos_e = matched_indexes[i]
# if BLANK in l:
# typo_key_str = _insert_edit(pw_key_str, l, r, pos_s, pos_e)
# else:
for m in matched_indexes:
pos_s, pos_e = m
typo_key_str = pw_key_str[:pos_s] + r + pw_key_str[pos_e:]
yield typo_key_str.replace(BLANK, ''), 1.0/len(matched_indexes)
def num_typos(n, ed):
# type: (int, int) -> int
assert ed>=0, "edit distance should be no less than 0. Got = {}".format(ed)
t = (2*96)**ed
a = n+1
for i in range(2, ed+1):
a *= (n+i)
return a*t
def get_prob(rpw, tpw):
"""
Probability that rpw is mistyped to tpw,
get all the edit locations. sum their probabiliries
"""
edits = set(all_edits(rpw, tpw, N=1, edit_cutoff=1))
pw_key_str = STARTSTR + KB.word_to_keyseq(rpw) + ENDSTR
E = allowed_edits(pw_key_str)
s = float(sum(x[1] for x in E))
if(s==0): return 0.0
# print("s = {} (len(E)={})".format(s, len(E)))
# print(edits)
total_ed1_typos_estimate = 2*96*(len(rpw) + 1)
f = 1.0/num_typos(len(rpw), 1 if edits else 2)
for e, w in E:
if e not in edits: continue
for typo_key_str, w_frac in apply_edit(pw_key_str, e):
typo_key_str = typo_key_str.strip(STARTSTR).strip(ENDSTR)
typo = KB.keyseq_to_word(typo_key_str)
if typo == tpw:
f += w*w_frac
return f/s
def get_topk_typos(pw, k=10):
"""
Returns top k typos of the word pw
"""
pw_key_str = STARTSTR + KB.word_to_keyseq(pw) + ENDSTR
E = sorted(allowed_edits(pw_key_str), key=lambda x: x[1]*len(x[0][0]),
reverse=True)
tt = defaultdict(float)
s = float(sum(x[1] for x in E))
# print("s = {} (len(E)={})".format(s, len(E)))
i = 0
debug_pw = {pw.swapcase()}
while len(tt)<k*len(pw)*10 and i <len(E):
e, w = E[i]
for typo_key_str, w_frac in apply_edit(pw_key_str, e):
typo_key_str = typo_key_str.strip(STARTSTR).strip(ENDSTR)
typo = KB.keyseq_to_word(typo_key_str)
tt[typo] += w * w_frac/s
# if typo in debug_pw:
# print("{!r} ->> {} ({}, {})".format(typo_key_str, e, w, w*w_frac/s))
i += 1
return sorted(tt.items(), key=lambda x: x[1], reverse=True)[:k]
def read_typos(f_name):
d = pd.read_csv(f_name, skipinitialspace=False)\
.astype(str)
d_ts = d[d.rpw != d.tpw].sample(int(0.03*len(d.index)), random_state=435)
return d_ts
def test_model_rank(train_f):
from pyxdameraulevenshtein import damerau_levenshtein_distance as dldist
d_ts = read_typos(train_f)
a = np.array([get_prob(rpw, tpw)
for rpw, tpw in zip(d_ts.rpw, d_ts.tpw)
if dldist(rpw.lower(), tpw.lower())<=1])
a = a[a>0]
rank = []
for rpw, tpw in zip(d_ts.rpw, d_ts.tpw):
if dldist(rpw.lower(), tpw.lower())>1: continue
k = 20
typos = [tp for tp, w in get_topk_typos(rpw, k)]
if tpw in typos:
rank.append(typos.index(tpw)+1)
else:
rank.append(k)
print("Avg_rank: ", sum(rank)/float(len(rank)*k))
print(d_ts.shape, a.shape, a.mean(), a.std())
return a
def test_model_likelihood(train_f):
from pyxdameraulevenshtein import damerau_levenshtein_distance as dldist
d_ts = read_typos(train_f)
ed = d_ts.apply(lambda r: dldist(r.rpw, r.tpw), axis=1)
probs = d_ts[ed<=1].apply(lambda r: get_prob(r.rpw, r.tpw), axis=1)
likelihood = np.log(probs[probs>0]).mean()
return likelihood
if __name__ == '__main__':
USAGE = """Usage:
$ {} [options] [arguments]
-allowed-edits <rpw> : returns the allowed edits of rpw
-sample <password>: samples typos for the password from the model
-prob <rpw> <tpw>: probability of rpw -> tpw
-topktypos <rpw> [<n>] : returns n (default 10) typos of rpw
-test <typo-fname> : Tests the efficacy of the model, ~/pwtypos-code/typodata/typos.csv
-keypress <rpw> : Return the keypress representation of
""".format(sys.argv[0])
if len(sys.argv)<=1:
print(USAGE)
exit(1)
if sys.argv[1] == '-allowed-edits':
pw = KB.word_to_keyseq(sys.argv[2])
(l,r),w = WEIGHT_MATRIX[0]
assert l.replace(BLANK, '') in pw, "{!r}, {} {}"\
.format(l.replace(BLANK, ''), pw, w)
print(allowed_edits(pw))
elif sys.argv[1] == '-sample':
pw = sys.argv[2]
print("{} --> {}".format(pw, len(set((sample_typos(pw, 100))))))
elif sys.argv[1] == '-topktypos':
pw = sys.argv[2]
n = int(sys.argv[3]) if len(sys.argv)>3 else 10
typos = get_topk_typos(pw, n)
print('\n'.join("{}: {:.5f}".format(x,y) for x, y in typos))
print("{} --> {}".format(pw, len(typos)))
elif sys.argv[1] == '-prob':
print(get_prob(sys.argv[2], sys.argv[3]))
elif sys.argv[1] == '-test':
# test_model_rank(sys.argv[2])
print("Log-Likelihood: ", test_model_likelihood(sys.argv[2]))
elif sys.argv[1] == '-keypress':
print(repr(KB.word_to_keyseq(sys.argv[2])))
else:
print(USAGE)
| |
# The MIT License (MIT)
# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import fnmatch
import os
import os.path
import re
import sys
import urllib.parse
from collections import OrderedDict
from contextlib import contextmanager
from datetime import datetime, date, timedelta
from io import StringIO
from typing import Union, Tuple, Sequence, Optional, Iterable, Any, Dict
import numpy as np
from .sround import sround
from .undefined import UNDEFINED
__author__ = "Norman Fomferra (Brockmann Consult GmbH)"
def qualified_name_to_object(qualified_name: str, default_module_name='builtins'):
"""
Convert a fully qualified name into a Python object.
It is true that ``qualified_name_to_object(object_to_qualified_name(obj)) is obj``.
>>> qualified_name_to_object('unittest.TestCase')
<class 'unittest.case.TestCase'>
See also :py:func:`object_to_qualified_name`.
:param qualified_name: fully qualified name of the form [<module>'.'{<name>'.'}]<name>
:param default_module_name: default module name to be used if the name does not contain one
:return: the Python object
:raise ImportError: If the module could not be imported
:raise AttributeError: If the name could not be found
"""
parts = qualified_name.split('.')
if len(parts) == 1:
module_name = default_module_name
else:
module_name = parts[0]
parts = parts[1:]
value = __import__(module_name)
for name in parts:
value = getattr(value, name)
return value
def object_to_qualified_name(value, fail=False, default_module_name='builtins') -> Union[str, None]:
"""
Get the fully qualified name of a Python object.
It is true that ``qualified_name_to_object(object_to_qualified_name(obj)) is obj``.
>>> from unittest import TestCase
>>> object_to_qualified_name(TestCase)
'unittest.case.TestCase'
See also :py:func:`qualified_name_to_object`.
:param value: some Python object
:param fail: raise ``ValueError`` if name cannot be derived.
:param default_module_name: if this is the *value*'s module name, no module name will be returned.
:return: fully qualified name if it can be derived, otherwise ``None`` if *fail* is ``False``.
:raise ValueError: if *fail* is ``True`` and the name cannot be derived.
"""
try:
module_name = value.__module__
except AttributeError:
module_name = None
if module_name == default_module_name:
module_name = None
try:
name = value.__name__
except AttributeError:
name = None
if name:
return module_name + '.' + name if module_name else name
elif fail:
raise ValueError("missing attribute '__name__'")
else:
return str(value)
@contextmanager
def fetch_std_streams():
"""
A context manager which can be used to temporarily fetch the standard output streams
``sys.stdout`` and ``sys.stderr``.
Usage:::
with fetch_std_streams() as stdout, stderr
sys.stdout.write('yes')
sys.stderr.write('oh no')
print('fetched', stdout.getvalue())
print('fetched', stderr.getvalue())
:return: yields ``sys.stdout`` and ``sys.stderr`` redirected into buffers of type ``StringIO``
"""
sys.stdout.flush()
sys.stderr.flush()
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
try:
yield sys.stdout, sys.stderr
finally:
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def encode_url_path(path_pattern: str, path_args: dict = None, query_args: dict = None) -> str:
"""
Return an URL path with an optional query string which is composed of a *path_pattern* that may contain
placeholders of the form ``{name}`` which will be replaced by URL-encoded versions of the
corresponding values in *path_args*, i.e. ``urllib.parse.quote_plus(path_args[name])``.
An optional query string is composed of the URL-encoded key-value pairs given in *query_args*, i.e.
``urllib.parse.urlencode(query_args)``.
:param path_pattern: The path pattern which may include any number of placeholders of the form ``{name}``
:param path_args: The values for the placeholders in *path_pattern*
:param query_args: The query arguments
:return: an URL-encoded path
"""
path = path_pattern
if path_args:
quoted_pattern_args = dict(path_args)
for name, value in path_args.items():
quoted_pattern_args[name] = urllib.parse.quote_plus(str(value)) if value is not None else ''
path = path_pattern.format(**quoted_pattern_args)
query_string = ''
if query_args:
query_string = '?' + urllib.parse.urlencode(query_args)
return path + query_string
def to_datetime_range(start_datetime_or_str: Union[datetime, date, str, None],
end_datetime_or_str: Union[datetime, date, str, None],
default=None) -> Tuple[datetime, datetime]:
if not start_datetime_or_str and not end_datetime_or_str:
return default
if not end_datetime_or_str:
if not start_datetime_or_str:
raise ValueError('start_datetime_or_str argument must be given')
end_datetime_or_str = start_datetime_or_str
start_datetime = to_datetime(start_datetime_or_str, upper_bound=False)
end_datetime = to_datetime(end_datetime_or_str, upper_bound=True)
return start_datetime, end_datetime
def to_datetime(datetime_or_str: Union[datetime, date, str, None], upper_bound=False, default=None) -> datetime:
if datetime_or_str is None:
return default
elif isinstance(datetime_or_str, str):
if datetime_or_str.strip() == '':
return default
format_to_timedelta = [("%Y-%m-%dT%H:%M:%S", timedelta()),
("%Y-%m-%d %H:%M:%S", timedelta()),
("%Y-%m-%d", timedelta(hours=24, seconds=-1)),
("%Y-%m", timedelta(weeks=4, seconds=-1)),
("%Y", timedelta(days=365, seconds=-1)),
]
for f, td in format_to_timedelta:
try:
dt = datetime.strptime(datetime_or_str, f)
return dt + td if upper_bound else dt
except ValueError:
pass
raise ValueError('Invalid date/time value: "%s"' % datetime_or_str)
elif isinstance(datetime_or_str, datetime):
return datetime_or_str
elif isinstance(datetime_or_str, date):
return datetime(datetime_or_str.year, datetime_or_str.month, datetime_or_str.day, 12)
else:
raise TypeError('datetime_or_str argument must be a string or instance of datetime.date')
def to_list(value,
dtype: type = str,
name: str = None,
nullable: bool = True,
csv: bool = True,
strip: bool = True):
"""
Convert *value* into a list of items of type *dtype*.
:param value: Some value that may be a sequence or a scalar
:param dtype: The desired target type
:param name: An (argument) name used for ``ValueError`` messages
:param nullable: Whether *value* can be None.
:param csv: Whether to split *value* if it is a string containing commas.
:param strip: Whether to strip CSV string values, used only if *csv* is True.
:return: A list with elements of type *dtype* or None if *value* is None and *nullable* is True
"""
if value is None:
if not nullable:
raise ValueError('%s argument must not be None' % (name or 'some'))
return value
if csv and isinstance(value, str):
items = value.split(',')
return [dtype(item.strip() if strip else item) for item in items]
if isinstance(value, dtype):
return [value]
# noinspection PyBroadException
try:
return [dtype(item) for item in value]
except Exception:
return [dtype(value)]
_PYTHON_QUOTE_CHARS = ['"', "'"]
def to_str_constant(s: str, quote="'") -> str:
"""
Convert a given string into another string that is a valid Python representation of a string constant.
:param s: the string
:param quote: the quote character, either a single or double quote
:return:
"""
if s is None:
raise ValueError()
if quote not in _PYTHON_QUOTE_CHARS:
raise ValueError()
return quote + s.replace('\\', '\\\\').replace(quote, "\\%s" % quote) + quote
def is_str_constant(s: str) -> bool:
"""
Test whether a given string is a Python representation of a string constant.
:param s: the string
:return: True, if so.
"""
return s and len(s) >= 2 and s[0] == s[-1] and s[0] in _PYTHON_QUOTE_CHARS
@contextmanager
def cwd(path: str):
"""
A context manager which can be used to temporarily change the current working directory to *path*.
Usage:::
print(os.getcwd())
with cwd('./test'):
print(os.getcwd())
print(os.getcwd())
:return: yields the new working directory (absolute *path* passed in)
"""
if path is None:
raise ValueError('path argument must be given')
old_dir = os.getcwd()
try:
os.chdir(path)
yield os.getcwd()
finally:
os.chdir(old_dir)
_DATETIME64 = np.dtype('datetime64')
_ZERO_THMS_POSTFIX = 'T00:00:00'
_ZERO_MICR_POSTFIX = '.000000000'
def date_to_simple_str(v):
time_str = str(v)
if time_str.endswith(_ZERO_MICR_POSTFIX):
time_str = time_str[0: -len(_ZERO_MICR_POSTFIX)]
if time_str.endswith(_ZERO_THMS_POSTFIX):
time_str = time_str[0: -len(_ZERO_THMS_POSTFIX)]
return time_str
def to_json(v):
if v is None:
return v
t = type(v)
if t in {bool, int, float, str}:
return v
if t == complex:
return [v.real, v.imag]
if isinstance(v, type):
return object_to_qualified_name(v)
# TODO (forman): handle dtype=uint64/int64 here, as JSON does not support 64-bit ints
is_datetime64 = False
try:
is_datetime64 = np.issubdtype(v.dtype, np.datetime64)
except AttributeError:
pass
if is_datetime64:
# Convert time values to time strings
is_scalar = False
try:
is_scalar = v.size == 1 and len(v.shape) == 0
except AttributeError:
pass
if is_scalar:
return date_to_simple_str(v)
else:
li = []
for vi in v:
li.append(date_to_simple_str(vi))
return li
if isinstance(v, np.ndarray) and not np.issubdtype(v.dtype, np.datetime64):
try:
return v.tolist()
except AttributeError:
pass
try:
return v.item()
except (AttributeError, ValueError):
pass
try:
d = OrderedDict()
for ki, vi in v.items():
d[str(ki)] = to_json(vi)
return d
except AttributeError:
pass
try:
li = []
for vi in v:
li.append(to_json(vi))
return li
except TypeError:
pass
return str(v)
def filter_fileset(names: Sequence[str],
includes: Optional[Sequence[str]] = None,
excludes: Optional[Sequence[str]] = None) -> Sequence[str]:
"""
Filter a fileset given by the sequence *names* using the wildcard patterns in *includes* and *excludes*.
:param names: The names of the fileset
:param includes: Wildcard patterns that select the file names to be included,
:param excludes: Wildcard patterns that select the file names to be excluded,
:return: The filtered fileset
"""
if includes is not None:
filtered_names = set()
for pattern in includes:
filtered_names.update(fnmatch.filter(names, pattern))
if excludes is not None:
filtered_names_old = filtered_names
filtered_names = set(filtered_names)
for pattern in excludes:
filtered_names.difference_update(fnmatch.filter(filtered_names_old, pattern))
elif excludes is not None:
filtered_names = set(names)
for pattern in excludes:
filtered_names.difference_update(fnmatch.filter(filtered_names, pattern))
else:
filtered_names = names
return filtered_names
def new_indexed_name(names: Iterable[str], pattern: str) -> str:
"""
Return a new name that is unique in *names* and that conforms to *pattern*. The argument
*pattern* must contain a single ``"{index}"`` substring.
:param names: Sequence of names
:param pattern: Naming pattern, e.g. "var_{index}"
:return: a new name, e.g. "var_3"
"""
if "{index}" not in pattern:
raise ValueError('pattern must contain "{index}"')
re_pattern = re.compile(pattern.replace("{index}", r"(\d+)"))
max_index = 0
for name in names:
match_result = re_pattern.match(name)
if match_result and match_result.group(1):
max_index = max(max_index, int(match_result.group(1)))
new_index = max_index + 1
while True:
new_name = pattern.replace("{index}", str(new_index))
if not new_name.isidentifier():
raise ValueError('pattern does not yield a valid name')
if new_name not in names:
return new_name
new_index += 1
NoneType = type(None)
# noinspection PyBroadException
def to_scalar(value: Any, nchars=None, ndigits=None, stringify=False) -> Any:
"""
Convert the given *value* into a JSON-serializable, scalar value.
If the conversion fails, UNDEFINED is returned.
:param value: Any value.
:param nchars: If not None and greater zero, text values will be limited to *nchars* characters.
:param ndigits: If not None, floating point values will be rounded to *ndigits* significant digits.
:param stringify: If True, non-primitive values will be converted to strings.
:return: A JSON-serializable, scalar value or UNDEFINED, if the conversion fails.
"""
is_float = False
is_str = False
if isinstance(value, (int, bool, NoneType)):
return value
elif isinstance(value, float):
is_float = True
elif isinstance(value, str):
is_str = True
elif hasattr(value, 'shape') and hasattr(value, 'dtype'):
try:
shape = value.shape
dtype = 'object' if str(value.dtype) == 'geometry' else value.dtype
ndim = len(shape)
size = 1
for dim in shape:
size *= dim
if size > 1:
return UNDEFINED
if ndim >= 1:
index = 0 if ndim == 1 else (0,) * ndim
try:
value = value[index]
except BaseException:
pass
if np.issubdtype(dtype, np.integer):
return int(value)
elif np.issubdtype(dtype, np.bool_):
return bool(value)
elif np.issubdtype(dtype, np.floating):
value = float(value)
is_float = True
elif np.issubdtype(dtype, np.str_) or stringify:
value = str(value)
is_str = True
else:
return UNDEFINED
except BaseException as e:
print("Error in to_scalar: " + str(e))
return UNDEFINED
elif stringify:
value = str(value)
is_str = True
else:
return UNDEFINED
if is_float:
if ndigits is not None:
return sround(value, ndigits=ndigits)
return value
if is_str:
if nchars is not None and len(value) > nchars:
return value[0: nchars] + '...'
return value
return UNDEFINED
def get_dependencies() -> Dict[str, str]:
"""
Get a mapping from package names to package versions.
Only Cate's core dependencies are listed.
"""
# Idea stolen from xarray.print_versions
dependencies = [
# (MODULE_NAME, f(mod) -> mod version)
("cartopy", lambda mod: mod.__version__),
("conda", lambda mod: mod.__version__),
("dask", lambda mod: mod.__version__),
("distributed", lambda mod: mod.__version__),
("geopandas", lambda mod: mod.__version__),
("matplotlib", lambda mod: mod.__version__),
("numba", lambda mod: mod.__version__),
("numpy", lambda mod: mod.__version__),
("pandas", lambda mod: mod.__version__),
("rasterio", lambda mod: mod.__version__),
("scipy", lambda mod: mod.__version__),
("xarray", lambda mod: mod.__version__),
("xcube.version", lambda mod: mod.version),
("xcube_cci.version", lambda mod: mod.version),
("zarr", lambda mod: mod.__version__),
]
import importlib
dependencies_dict = {}
for (module_name, module_version) in dependencies:
module_key = module_name.split('.')[0]
# noinspection PyBroadException
try:
if module_name in sys.modules:
module = sys.modules[module_name]
else:
module = importlib.import_module(module_name)
except Exception:
pass
else:
# noinspection PyBroadException
try:
dependencies_dict[module_key] = module_version(module)
except Exception as e:
print(e)
dependencies_dict[module_key] = "installed"
return dependencies_dict
| |
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A tool to run tests in many different ways.
from pathlib import Path
from collections import namedtuple
from copy import deepcopy
import argparse
import concurrent.futures as conc
import datetime
import enum
import io
import json
import multiprocessing
import os
import pickle
import platform
import random
import re
import signal
import subprocess
import sys
import tempfile
import textwrap
import time
import typing as T
import xml.etree.ElementTree as et
from . import build
from . import environment
from . import mlog
from .dependencies import ExternalProgram
from .mesonlib import MesonException, get_wine_shortpath, split_args, join_args
from .backend.backends import TestProtocol
if T.TYPE_CHECKING:
from .backend.backends import TestSerialisation
# GNU autotools interprets a return code of 77 from tests it executes to
# mean that the test should be skipped.
GNU_SKIP_RETURNCODE = 77
# GNU autotools interprets a return code of 99 from tests it executes to
# mean that the test failed even before testing what it is supposed to test.
GNU_ERROR_RETURNCODE = 99
def is_windows() -> bool:
platname = platform.system().lower()
return platname == 'windows' or 'mingw' in platname
def is_cygwin() -> bool:
platname = platform.system().lower()
return 'cygwin' in platname
def determine_worker_count() -> int:
varname = 'MESON_TESTTHREADS'
if varname in os.environ:
try:
num_workers = int(os.environ[varname])
except ValueError:
print('Invalid value in {}, using 1 thread.'.format(varname))
num_workers = 1
else:
try:
# Fails in some weird environments such as Debian
# reproducible build.
num_workers = multiprocessing.cpu_count()
except Exception:
num_workers = 1
return num_workers
def add_arguments(parser: argparse.ArgumentParser) -> None:
parser.add_argument('--repeat', default=1, dest='repeat', type=int,
help='Number of times to run the tests.')
parser.add_argument('--no-rebuild', default=False, action='store_true',
help='Do not rebuild before running tests.')
parser.add_argument('--gdb', default=False, dest='gdb', action='store_true',
help='Run test under gdb.')
parser.add_argument('--gdb-path', default='gdb', dest='gdb_path',
help='Path to the gdb binary (default: gdb).')
parser.add_argument('--list', default=False, dest='list', action='store_true',
help='List available tests.')
parser.add_argument('--wrapper', default=None, dest='wrapper', type=split_args,
help='wrapper to run tests with (e.g. Valgrind)')
parser.add_argument('-C', default='.', dest='wd',
# https://github.com/python/typeshed/issues/3107
# https://github.com/python/mypy/issues/7177
type=os.path.abspath, # type: ignore
help='directory to cd into before running')
parser.add_argument('--suite', default=[], dest='include_suites', action='append', metavar='SUITE',
help='Only run tests belonging to the given suite.')
parser.add_argument('--no-suite', default=[], dest='exclude_suites', action='append', metavar='SUITE',
help='Do not run tests belonging to the given suite.')
parser.add_argument('--no-stdsplit', default=True, dest='split', action='store_false',
help='Do not split stderr and stdout in test logs.')
parser.add_argument('--print-errorlogs', default=False, action='store_true',
help="Whether to print failing tests' logs.")
parser.add_argument('--benchmark', default=False, action='store_true',
help="Run benchmarks instead of tests.")
parser.add_argument('--logbase', default='testlog',
help="Base name for log file.")
parser.add_argument('--num-processes', default=determine_worker_count(), type=int,
help='How many parallel processes to use.')
parser.add_argument('-v', '--verbose', default=False, action='store_true',
help='Do not redirect stdout and stderr')
parser.add_argument('-q', '--quiet', default=False, action='store_true',
help='Produce less output to the terminal.')
parser.add_argument('-t', '--timeout-multiplier', type=float, default=None,
help='Define a multiplier for test timeout, for example '
' when running tests in particular conditions they might take'
' more time to execute.')
parser.add_argument('--setup', default=None, dest='setup',
help='Which test setup to use.')
parser.add_argument('--test-args', default=[], type=split_args,
help='Arguments to pass to the specified test(s) or all tests')
parser.add_argument('args', nargs='*',
help='Optional list of tests to run')
def returncode_to_status(retcode: int) -> str:
# Note: We can't use `os.WIFSIGNALED(result.returncode)` and the related
# functions here because the status returned by subprocess is munged. It
# returns a negative value if the process was killed by a signal rather than
# the raw status returned by `wait()`. Also, If a shell sits between Meson
# the the actual unit test that shell is likely to convert a termination due
# to a signal into an exit status of 128 plus the signal number.
if retcode < 0:
signum = -retcode
try:
signame = signal.Signals(signum).name
except ValueError:
signame = 'SIGinvalid'
return '(killed by signal {} {})'.format(signum, signame)
if retcode <= 128:
return '(exit status {})'.format(retcode)
signum = retcode - 128
try:
signame = signal.Signals(signum).name
except ValueError:
signame = 'SIGinvalid'
return '(exit status {} or signal {} {})'.format(retcode, signum, signame)
def env_tuple_to_str(env: T.Iterable[T.Tuple[str, str]]) -> str:
return ''.join(["{}='{}' ".format(k, v) for k, v in env])
class TestException(MesonException):
pass
@enum.unique
class TestResult(enum.Enum):
OK = 'OK'
TIMEOUT = 'TIMEOUT'
SKIP = 'SKIP'
FAIL = 'FAIL'
EXPECTEDFAIL = 'EXPECTEDFAIL'
UNEXPECTEDPASS = 'UNEXPECTEDPASS'
ERROR = 'ERROR'
@staticmethod
def maxlen() -> int:
return 14 # len(UNEXPECTEDPASS)
class TAPParser:
Plan = namedtuple('Plan', ['count', 'late', 'skipped', 'explanation'])
Bailout = namedtuple('Bailout', ['message'])
Test = namedtuple('Test', ['number', 'name', 'result', 'explanation'])
Error = namedtuple('Error', ['message'])
Version = namedtuple('Version', ['version'])
_MAIN = 1
_AFTER_TEST = 2
_YAML = 3
_RE_BAILOUT = re.compile(r'Bail out!\s*(.*)')
_RE_DIRECTIVE = re.compile(r'(?:\s*\#\s*([Ss][Kk][Ii][Pp]\S*|[Tt][Oo][Dd][Oo])\b\s*(.*))?')
_RE_PLAN = re.compile(r'1\.\.([0-9]+)' + _RE_DIRECTIVE.pattern)
_RE_TEST = re.compile(r'((?:not )?ok)\s*(?:([0-9]+)\s*)?([^#]*)' + _RE_DIRECTIVE.pattern)
_RE_VERSION = re.compile(r'TAP version ([0-9]+)')
_RE_YAML_START = re.compile(r'(\s+)---.*')
_RE_YAML_END = re.compile(r'\s+\.\.\.\s*')
def __init__(self, io: T.Iterator[str]):
self.io = io
def parse_test(self, ok: bool, num: int, name: str, directive: T.Optional[str], explanation: T.Optional[str]) -> \
T.Generator[T.Union['TAPParser.Test', 'TAPParser.Error'], None, None]:
name = name.strip()
explanation = explanation.strip() if explanation else None
if directive is not None:
directive = directive.upper()
if directive == 'SKIP':
if ok:
yield self.Test(num, name, TestResult.SKIP, explanation)
return
elif directive == 'TODO':
yield self.Test(num, name, TestResult.UNEXPECTEDPASS if ok else TestResult.EXPECTEDFAIL, explanation)
return
else:
yield self.Error('invalid directive "{}"'.format(directive,))
yield self.Test(num, name, TestResult.OK if ok else TestResult.FAIL, explanation)
def parse(self) -> T.Generator[T.Union['TAPParser.Test', 'TAPParser.Error', 'TAPParser.Version', 'TAPParser.Plan', 'TAPParser.Bailout'], None, None]:
found_late_test = False
bailed_out = False
plan = None
lineno = 0
num_tests = 0
yaml_lineno = None
yaml_indent = ''
state = self._MAIN
version = 12
while True:
lineno += 1
try:
line = next(self.io).rstrip()
except StopIteration:
break
# YAML blocks are only accepted after a test
if state == self._AFTER_TEST:
if version >= 13:
m = self._RE_YAML_START.match(line)
if m:
state = self._YAML
yaml_lineno = lineno
yaml_indent = m.group(1)
continue
state = self._MAIN
elif state == self._YAML:
if self._RE_YAML_END.match(line):
state = self._MAIN
continue
if line.startswith(yaml_indent):
continue
yield self.Error('YAML block not terminated (started on line {})'.format(yaml_lineno))
state = self._MAIN
assert state == self._MAIN
if line.startswith('#'):
continue
m = self._RE_TEST.match(line)
if m:
if plan and plan.late and not found_late_test:
yield self.Error('unexpected test after late plan')
found_late_test = True
num_tests += 1
num = num_tests if m.group(2) is None else int(m.group(2))
if num != num_tests:
yield self.Error('out of order test numbers')
yield from self.parse_test(m.group(1) == 'ok', num,
m.group(3), m.group(4), m.group(5))
state = self._AFTER_TEST
continue
m = self._RE_PLAN.match(line)
if m:
if plan:
yield self.Error('more than one plan found')
else:
count = int(m.group(1))
skipped = (count == 0)
if m.group(2):
if m.group(2).upper().startswith('SKIP'):
if count > 0:
yield self.Error('invalid SKIP directive for plan')
skipped = True
else:
yield self.Error('invalid directive for plan')
plan = self.Plan(count=count, late=(num_tests > 0),
skipped=skipped, explanation=m.group(3))
yield plan
continue
m = self._RE_BAILOUT.match(line)
if m:
yield self.Bailout(m.group(1))
bailed_out = True
continue
m = self._RE_VERSION.match(line)
if m:
# The TAP version is only accepted as the first line
if lineno != 1:
yield self.Error('version number must be on the first line')
continue
version = int(m.group(1))
if version < 13:
yield self.Error('version number should be at least 13')
else:
yield self.Version(version=version)
continue
if not line:
continue
yield self.Error('unexpected input at line {}'.format((lineno,)))
if state == self._YAML:
yield self.Error('YAML block not terminated (started on line {})'.format(yaml_lineno))
if not bailed_out and plan and num_tests != plan.count:
if num_tests < plan.count:
yield self.Error('Too few tests run (expected {}, got {})'.format(plan.count, num_tests))
else:
yield self.Error('Too many tests run (expected {}, got {})'.format(plan.count, num_tests))
class JunitBuilder:
"""Builder for Junit test results.
Junit is impossible to stream out, it requires attributes counting the
total number of tests, failures, skips, and errors in the root element
and in each test suite. As such, we use a builder class to track each
test case, and calculate all metadata before writing it out.
For tests with multiple results (like from a TAP test), we record the
test as a suite with the project_name.test_name. This allows us to track
each result separately. For tests with only one result (such as exit-code
tests) we record each one into a suite with the name project_name. The use
of the project_name allows us to sort subproject tests separately from
the root project.
"""
def __init__(self, filename: str) -> None:
self.filename = filename
self.root = et.Element(
'testsuites', tests='0', errors='0', failures='0')
self.suites = {} # type: T.Dict[str, et.Element]
def log(self, name: str, test: 'TestRun') -> None:
"""Log a single test case."""
if test.junit is not None:
for suite in test.junit.findall('.//testsuite'):
# Assume that we don't need to merge anything here...
suite.attrib['name'] = '{}.{}.{}'.format(test.project, name, suite.attrib['name'])
# GTest can inject invalid attributes
for case in suite.findall('.//testcase[@result]'):
del case.attrib['result']
for case in suite.findall('.//testcase[@timestamp]'):
del case.attrib['timestamp']
self.root.append(suite)
return
# In this case we have a test binary with multiple results.
# We want to record this so that each result is recorded
# separately
if test.results:
suitename = '{}.{}'.format(test.project, name)
assert suitename not in self.suites, 'duplicate suite'
suite = self.suites[suitename] = et.Element(
'testsuite',
name=suitename,
tests=str(len(test.results)),
errors=str(sum(1 for r in test.results if r is TestResult.ERROR)),
failures=str(sum(1 for r in test.results if r in
{TestResult.FAIL, TestResult.UNEXPECTEDPASS, TestResult.TIMEOUT})),
skipped=str(sum(1 for r in test.results if r is TestResult.SKIP)),
)
for i, result in enumerate(test.results):
# Both name and classname are required. Set them both to the
# number of the test in a TAP test, as TAP doesn't give names.
testcase = et.SubElement(suite, 'testcase', name=str(i), classname=str(i))
if result is TestResult.SKIP:
et.SubElement(testcase, 'skipped')
elif result is TestResult.ERROR:
et.SubElement(testcase, 'error')
elif result is TestResult.FAIL:
et.SubElement(testcase, 'failure')
elif result is TestResult.UNEXPECTEDPASS:
fail = et.SubElement(testcase, 'failure')
fail.text = 'Test unexpected passed.'
elif result is TestResult.TIMEOUT:
fail = et.SubElement(testcase, 'failure')
fail.text = 'Test did not finish before configured timeout.'
if test.stdo:
out = et.SubElement(suite, 'system-out')
out.text = test.stdo.rstrip()
if test.stde:
err = et.SubElement(suite, 'system-err')
err.text = test.stde.rstrip()
else:
if test.project not in self.suites:
suite = self.suites[test.project] = et.Element(
'testsuite', name=test.project, tests='1', errors='0',
failures='0', skipped='0')
else:
suite = self.suites[test.project]
suite.attrib['tests'] = str(int(suite.attrib['tests']) + 1)
testcase = et.SubElement(suite, 'testcase', name=name, classname=name)
if test.res is TestResult.SKIP:
et.SubElement(testcase, 'skipped')
suite.attrib['skipped'] = str(int(suite.attrib['skipped']) + 1)
elif test.res is TestResult.ERROR:
et.SubElement(testcase, 'error')
suite.attrib['errors'] = str(int(suite.attrib['errors']) + 1)
elif test.res is TestResult.FAIL:
et.SubElement(testcase, 'failure')
suite.attrib['failures'] = str(int(suite.attrib['failures']) + 1)
if test.stdo:
out = et.SubElement(testcase, 'system-out')
out.text = test.stdo.rstrip()
if test.stde:
err = et.SubElement(testcase, 'system-err')
err.text = test.stde.rstrip()
def write(self) -> None:
"""Calculate total test counts and write out the xml result."""
for suite in self.suites.values():
self.root.append(suite)
# Skipped is really not allowed in the "testsuits" element
for attr in ['tests', 'errors', 'failures']:
self.root.attrib[attr] = str(int(self.root.attrib[attr]) + int(suite.attrib[attr]))
tree = et.ElementTree(self.root)
with open(self.filename, 'wb') as f:
tree.write(f, encoding='utf-8', xml_declaration=True)
class TestRun:
@classmethod
def make_gtest(cls, test: 'TestSerialisation', test_env: T.Dict[str, str],
returncode: int, starttime: float, duration: float,
stdo: T.Optional[str], stde: T.Optional[str],
cmd: T.Optional[T.List[str]]) -> 'TestRun':
filename = '{}.xml'.format(test.name)
if test.workdir:
filename = os.path.join(test.workdir, filename)
tree = et.parse(filename)
return cls.make_exitcode(
test, test_env, returncode, starttime, duration, stdo, stde, cmd,
junit=tree)
@classmethod
def make_exitcode(cls, test: 'TestSerialisation', test_env: T.Dict[str, str],
returncode: int, starttime: float, duration: float,
stdo: T.Optional[str], stde: T.Optional[str],
cmd: T.Optional[T.List[str]], **kwargs) -> 'TestRun':
if returncode == GNU_SKIP_RETURNCODE:
res = TestResult.SKIP
elif returncode == GNU_ERROR_RETURNCODE:
res = TestResult.ERROR
elif test.should_fail:
res = TestResult.EXPECTEDFAIL if bool(returncode) else TestResult.UNEXPECTEDPASS
else:
res = TestResult.FAIL if bool(returncode) else TestResult.OK
return cls(test, test_env, res, [], returncode, starttime, duration, stdo, stde, cmd, **kwargs)
@classmethod
def make_tap(cls, test: 'TestSerialisation', test_env: T.Dict[str, str],
returncode: int, starttime: float, duration: float,
stdo: str, stde: str,
cmd: T.Optional[T.List[str]]) -> 'TestRun':
res = None # type: T.Optional[TestResult]
results = [] # type: T.List[TestResult]
failed = False
for i in TAPParser(io.StringIO(stdo)).parse():
if isinstance(i, TAPParser.Bailout):
results.append(TestResult.ERROR)
failed = True
elif isinstance(i, TAPParser.Test):
results.append(i.result)
if i.result not in {TestResult.OK, TestResult.EXPECTEDFAIL}:
failed = True
elif isinstance(i, TAPParser.Error):
results.append(TestResult.ERROR)
stde += '\nTAP parsing error: ' + i.message
failed = True
if returncode != 0:
res = TestResult.ERROR
stde += '\n(test program exited with status code {})'.format(returncode,)
if res is None:
# Now determine the overall result of the test based on the outcome of the subcases
if all(t is TestResult.SKIP for t in results):
# This includes the case where num_tests is zero
res = TestResult.SKIP
elif test.should_fail:
res = TestResult.EXPECTEDFAIL if failed else TestResult.UNEXPECTEDPASS
else:
res = TestResult.FAIL if failed else TestResult.OK
return cls(test, test_env, res, results, returncode, starttime, duration, stdo, stde, cmd)
def __init__(self, test: 'TestSerialisation', test_env: T.Dict[str, str],
res: TestResult, results: T.List[TestResult], returncode:
int, starttime: float, duration: float,
stdo: T.Optional[str], stde: T.Optional[str],
cmd: T.Optional[T.List[str]], *, junit: T.Optional[et.ElementTree] = None):
assert isinstance(res, TestResult)
self.res = res
self.results = results # May be an empty list
self.returncode = returncode
self.starttime = starttime
self.duration = duration
self.stdo = stdo
self.stde = stde
self.cmd = cmd
self.env = test_env
self.should_fail = test.should_fail
self.project = test.project_name
self.junit = junit
def get_log(self) -> str:
res = '--- command ---\n'
if self.cmd is None:
res += 'NONE\n'
else:
test_only_env = set(self.env.items()) - set(os.environ.items())
starttime_str = time.strftime("%H:%M:%S", time.gmtime(self.starttime))
res += '{} {}{}\n'.format(
starttime_str, env_tuple_to_str(test_only_env), ' '.join(self.cmd)
)
if self.stdo:
res += '--- stdout ---\n'
res += self.stdo
if self.stde:
if res[-1:] != '\n':
res += '\n'
res += '--- stderr ---\n'
res += self.stde
if res[-1:] != '\n':
res += '\n'
res += '-------\n\n'
return res
def decode(stream: T.Union[None, bytes]) -> str:
if stream is None:
return ''
try:
return stream.decode('utf-8')
except UnicodeDecodeError:
return stream.decode('iso-8859-1', errors='ignore')
def write_json_log(jsonlogfile: T.TextIO, test_name: str, result: TestRun) -> None:
jresult = {'name': test_name,
'stdout': result.stdo,
'result': result.res.value,
'starttime': result.starttime,
'duration': result.duration,
'returncode': result.returncode,
'env': result.env,
'command': result.cmd} # type: T.Dict[str, T.Any]
if result.stde:
jresult['stderr'] = result.stde
jsonlogfile.write(json.dumps(jresult) + '\n')
def run_with_mono(fname: str) -> bool:
return fname.endswith('.exe') and not (is_windows() or is_cygwin())
def load_benchmarks(build_dir: str) -> T.List['TestSerialisation']:
datafile = Path(build_dir) / 'meson-private' / 'meson_benchmark_setup.dat'
if not datafile.is_file():
raise TestException('Directory {!r} does not seem to be a Meson build directory.'.format(build_dir))
with datafile.open('rb') as f:
obj = T.cast(T.List['TestSerialisation'], pickle.load(f))
return obj
def load_tests(build_dir: str) -> T.List['TestSerialisation']:
datafile = Path(build_dir) / 'meson-private' / 'meson_test_setup.dat'
if not datafile.is_file():
raise TestException('Directory {!r} does not seem to be a Meson build directory.'.format(build_dir))
with datafile.open('rb') as f:
obj = T.cast(T.List['TestSerialisation'], pickle.load(f))
return obj
class SingleTestRunner:
def __init__(self, test: 'TestSerialisation', test_env: T.Dict[str, str],
env: T.Dict[str, str], options: argparse.Namespace):
self.test = test
self.test_env = test_env
self.env = env
self.options = options
def _get_cmd(self) -> T.Optional[T.List[str]]:
if self.test.fname[0].endswith('.jar'):
return ['java', '-jar'] + self.test.fname
elif not self.test.is_cross_built and run_with_mono(self.test.fname[0]):
return ['mono'] + self.test.fname
elif self.test.cmd_is_built and self.test.needs_exe_wrapper:
if self.test.exe_runner is None:
# Can not run test on cross compiled executable
# because there is no execute wrapper.
return None
elif self.test.cmd_is_built:
# If the command is not built (ie, its a python script),
# then we don't check for the exe-wrapper
if not self.test.exe_runner.found():
msg = ('The exe_wrapper defined in the cross file {!r} was not '
'found. Please check the command and/or add it to PATH.')
raise TestException(msg.format(self.test.exe_runner.name))
return self.test.exe_runner.get_command() + self.test.fname
return self.test.fname
def run(self) -> TestRun:
cmd = self._get_cmd()
if cmd is None:
skip_stdout = 'Not run because can not execute cross compiled binaries.'
return TestRun(self.test, self.test_env, TestResult.SKIP, [], GNU_SKIP_RETURNCODE, time.time(), 0.0, skip_stdout, None, None)
else:
wrap = TestHarness.get_wrapper(self.options)
if self.options.gdb:
self.test.timeout = None
return self._run_cmd(wrap + cmd + self.test.cmd_args + self.options.test_args)
def _run_cmd(self, cmd: T.List[str]) -> TestRun:
starttime = time.time()
if self.test.extra_paths:
self.env['PATH'] = os.pathsep.join(self.test.extra_paths + ['']) + self.env['PATH']
winecmd = []
for c in cmd:
winecmd.append(c)
if os.path.basename(c).startswith('wine'):
self.env['WINEPATH'] = get_wine_shortpath(
winecmd,
['Z:' + p for p in self.test.extra_paths] + self.env.get('WINEPATH', '').split(';')
)
break
# If MALLOC_PERTURB_ is not set, or if it is set to an empty value,
# (i.e., the test or the environment don't explicitly set it), set
# it ourselves. We do this unconditionally for regular tests
# because it is extremely useful to have.
# Setting MALLOC_PERTURB_="0" will completely disable this feature.
if ('MALLOC_PERTURB_' not in self.env or not self.env['MALLOC_PERTURB_']) and not self.options.benchmark:
self.env['MALLOC_PERTURB_'] = str(random.randint(1, 255))
stdout = None
stderr = None
if not self.options.verbose:
stdout = tempfile.TemporaryFile("wb+")
stderr = tempfile.TemporaryFile("wb+") if self.options.split else stdout
if self.test.protocol is TestProtocol.TAP and stderr is stdout:
stdout = tempfile.TemporaryFile("wb+")
# Let gdb handle ^C instead of us
if self.options.gdb:
previous_sigint_handler = signal.getsignal(signal.SIGINT)
# Make the meson executable ignore SIGINT while gdb is running.
signal.signal(signal.SIGINT, signal.SIG_IGN)
def preexec_fn() -> None:
if self.options.gdb:
# Restore the SIGINT handler for the child process to
# ensure it can handle it.
signal.signal(signal.SIGINT, signal.SIG_DFL)
else:
# We don't want setsid() in gdb because gdb needs the
# terminal in order to handle ^C and not show tcsetpgrp()
# errors avoid not being able to use the terminal.
os.setsid() # type: ignore
extra_cmd = [] # type: T.List[str]
if self.test.protocol is TestProtocol.GTEST:
gtestname = '{}.xml'.format(self.test.name)
if self.test.workdir:
gtestname = '{}:{}'.format(self.test.workdir, self.test.name)
extra_cmd.append('--gtest_output=xml:{}'.format(gtestname))
p = subprocess.Popen(cmd + extra_cmd,
stdout=stdout,
stderr=stderr,
env=self.env,
cwd=self.test.workdir,
preexec_fn=preexec_fn if not is_windows() else None)
timed_out = False
kill_test = False
if self.test.timeout is None:
timeout = None
elif self.options.timeout_multiplier is not None:
timeout = self.test.timeout * self.options.timeout_multiplier
else:
timeout = self.test.timeout
try:
p.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
if self.options.verbose:
print('{} time out (After {} seconds)'.format(self.test.name, timeout))
timed_out = True
except KeyboardInterrupt:
mlog.warning('CTRL-C detected while running {}'.format(self.test.name))
kill_test = True
finally:
if self.options.gdb:
# Let us accept ^C again
signal.signal(signal.SIGINT, previous_sigint_handler)
additional_error = None
if kill_test or timed_out:
# Python does not provide multiplatform support for
# killing a process and all its children so we need
# to roll our own.
if is_windows():
subprocess.run(['taskkill', '/F', '/T', '/PID', str(p.pid)])
else:
def _send_signal_to_process_group(pgid : int, signum : int):
""" sends a signal to a process group """
try:
os.killpg(pgid, signum) # type: ignore
except ProcessLookupError:
# Sometimes (e.g. with Wine) this happens.
# There's nothing we can do (maybe the process
# already died) so carry on.
pass
# Send a termination signal to the process group that setsid()
# created - giving it a chance to perform any cleanup.
_send_signal_to_process_group(p.pid, signal.SIGTERM)
# Make sure the termination signal actually kills the process
# group, otherwise retry with a SIGKILL.
try:
p.communicate(timeout=0.5)
except subprocess.TimeoutExpired:
_send_signal_to_process_group(p.pid, signal.SIGKILL)
try:
p.communicate(timeout=1)
except subprocess.TimeoutExpired:
# An earlier kill attempt has not worked for whatever reason.
# Try to kill it one last time with a direct call.
# If the process has spawned children, they will remain around.
p.kill()
try:
p.communicate(timeout=1)
except subprocess.TimeoutExpired:
additional_error = 'Test process could not be killed.'
except ValueError:
additional_error = 'Could not read output. Maybe the process has redirected its stdout/stderr?'
endtime = time.time()
duration = endtime - starttime
if additional_error is None:
if stdout is None:
stdo = ''
else:
stdout.seek(0)
stdo = decode(stdout.read())
if stderr is None or stderr is stdout:
stde = ''
else:
stderr.seek(0)
stde = decode(stderr.read())
else:
stdo = ""
stde = additional_error
if timed_out:
return TestRun(self.test, self.test_env, TestResult.TIMEOUT, [], p.returncode, starttime, duration, stdo, stde, cmd)
else:
if self.test.protocol is TestProtocol.EXITCODE:
return TestRun.make_exitcode(self.test, self.test_env, p.returncode, starttime, duration, stdo, stde, cmd)
elif self.test.protocol is TestProtocol.GTEST:
return TestRun.make_gtest(self.test, self.test_env, p.returncode, starttime, duration, stdo, stde, cmd)
else:
if self.options.verbose:
print(stdo, end='')
return TestRun.make_tap(self.test, self.test_env, p.returncode, starttime, duration, stdo, stde, cmd)
class TestHarness:
def __init__(self, options: argparse.Namespace):
self.options = options
self.collected_logs = [] # type: T.List[str]
self.fail_count = 0
self.expectedfail_count = 0
self.unexpectedpass_count = 0
self.success_count = 0
self.skip_count = 0
self.timeout_count = 0
self.is_run = False
self.tests = None
self.results = [] # type: T.List[TestRun]
self.logfilename = None # type: T.Optional[str]
self.logfile = None # type: T.Optional[T.TextIO]
self.jsonlogfile = None # type: T.Optional[T.TextIO]
self.junit = None # type: T.Optional[JunitBuilder]
if self.options.benchmark:
self.tests = load_benchmarks(options.wd)
else:
self.tests = load_tests(options.wd)
ss = set()
for t in self.tests:
for s in t.suite:
ss.add(s)
self.suites = list(ss)
def __del__(self) -> None:
self.close_logfiles()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
self.close_logfiles()
def close_logfiles(self) -> None:
for f in ['logfile', 'jsonlogfile']:
lfile = getattr(self, f)
if lfile:
lfile.close()
setattr(self, f, None)
def merge_suite_options(self, options: argparse.Namespace, test: 'TestSerialisation') -> T.Dict[str, str]:
if ':' in options.setup:
if options.setup not in self.build_data.test_setups:
sys.exit("Unknown test setup '{}'.".format(options.setup))
current = self.build_data.test_setups[options.setup]
else:
full_name = test.project_name + ":" + options.setup
if full_name not in self.build_data.test_setups:
sys.exit("Test setup '{}' not found from project '{}'.".format(options.setup, test.project_name))
current = self.build_data.test_setups[full_name]
if not options.gdb:
options.gdb = current.gdb
if options.gdb:
options.verbose = True
if options.timeout_multiplier is None:
options.timeout_multiplier = current.timeout_multiplier
# if options.env is None:
# options.env = current.env # FIXME, should probably merge options here.
if options.wrapper is not None and current.exe_wrapper is not None:
sys.exit('Conflict: both test setup and command line specify an exe wrapper.')
if options.wrapper is None:
options.wrapper = current.exe_wrapper
return current.env.get_env(os.environ.copy())
def get_test_runner(self, test: 'TestSerialisation') -> SingleTestRunner:
options = deepcopy(self.options)
if not options.setup:
options.setup = self.build_data.test_setup_default_name
if options.setup:
env = self.merge_suite_options(options, test)
else:
env = os.environ.copy()
test_env = test.env.get_env(env)
env.update(test_env)
if (test.is_cross_built and test.needs_exe_wrapper and
test.exe_runner and test.exe_runner.found()):
env['MESON_EXE_WRAPPER'] = join_args(test.exe_runner.get_command())
return SingleTestRunner(test, test_env, env, options)
def process_test_result(self, result: TestRun) -> None:
if result.res is TestResult.TIMEOUT:
self.timeout_count += 1
elif result.res is TestResult.SKIP:
self.skip_count += 1
elif result.res is TestResult.OK:
self.success_count += 1
elif result.res is TestResult.FAIL or result.res is TestResult.ERROR:
self.fail_count += 1
elif result.res is TestResult.EXPECTEDFAIL:
self.expectedfail_count += 1
elif result.res is TestResult.UNEXPECTEDPASS:
self.unexpectedpass_count += 1
else:
sys.exit('Unknown test result encountered: {}'.format(result.res))
def print_stats(self, test_count: int, name_max_len: int,
tests: T.List['TestSerialisation'],
name: str, result: TestRun, i: int) -> None:
ok_statuses = (TestResult.OK, TestResult.EXPECTEDFAIL)
bad_statuses = (TestResult.FAIL, TestResult.TIMEOUT,
TestResult.UNEXPECTEDPASS, TestResult.ERROR)
result_str = '{num:{numlen}}/{testcount} {name:{name_max_len}} {res:{reslen}} {dur:.2f}s'.format(
numlen=len(str(test_count)),
num=i,
testcount=test_count,
name_max_len=name_max_len,
name=name,
reslen=TestResult.maxlen(),
res=result.res.value,
dur=result.duration)
if result.res is TestResult.FAIL:
result_str += ' ' + returncode_to_status(result.returncode)
if not self.options.quiet or result.res not in ok_statuses:
if result.res not in ok_statuses and mlog.colorize_console:
if result.res in bad_statuses:
decorator = mlog.red
elif result.res is TestResult.SKIP:
decorator = mlog.yellow
else:
sys.exit('Unreachable code was ... well ... reached.')
print(decorator(result_str).get_text(True))
else:
print(result_str)
result_str += "\n\n" + result.get_log()
if result.res in bad_statuses:
if self.options.print_errorlogs:
self.collected_logs.append(result_str)
if self.logfile:
self.logfile.write(result_str)
if self.jsonlogfile:
write_json_log(self.jsonlogfile, name, result)
if self.junit:
self.junit.log(name, result)
def print_summary(self) -> None:
msg = textwrap.dedent('''
Ok: {:<4}
Expected Fail: {:<4}
Fail: {:<4}
Unexpected Pass: {:<4}
Skipped: {:<4}
Timeout: {:<4}
''').format(self.success_count, self.expectedfail_count, self.fail_count,
self.unexpectedpass_count, self.skip_count, self.timeout_count)
print(msg)
if self.logfile:
self.logfile.write(msg)
if self.junit:
self.junit.write()
def print_collected_logs(self) -> None:
if self.collected_logs:
if len(self.collected_logs) > 10:
print('\nThe output from 10 first failed tests:\n')
else:
print('\nThe output from the failed tests:\n')
for log in self.collected_logs[:10]:
lines = log.splitlines()
if len(lines) > 104:
print('\n'.join(lines[0:4]))
print('--- Listing only the last 100 lines from a long log. ---')
lines = lines[-100:]
for line in lines:
try:
print(line)
except UnicodeEncodeError:
line = line.encode('ascii', errors='replace').decode()
print(line)
def total_failure_count(self) -> int:
return self.fail_count + self.unexpectedpass_count + self.timeout_count
def doit(self) -> int:
if self.is_run:
raise RuntimeError('Test harness object can only be used once.')
self.is_run = True
tests = self.get_tests()
if not tests:
return 0
self.run_tests(tests)
return self.total_failure_count()
@staticmethod
def split_suite_string(suite: str) -> T.Tuple[str, str]:
if ':' in suite:
# mypy can't figure out that str.split(n, 1) will return a list of
# length 2, so we have to help it.
return T.cast(T.Tuple[str, str], tuple(suite.split(':', 1)))
else:
return suite, ""
@staticmethod
def test_in_suites(test: 'TestSerialisation', suites: T.List[str]) -> bool:
for suite in suites:
(prj_match, st_match) = TestHarness.split_suite_string(suite)
for prjst in test.suite:
(prj, st) = TestHarness.split_suite_string(prjst)
# the SUITE can be passed as
# suite_name
# or
# project_name:suite_name
# so we need to select only the test belonging to project_name
# this if handle the first case (i.e., SUITE == suite_name)
# in this way we can run tests belonging to different
# (sub)projects which share the same suite_name
if not st_match and st == prj_match:
return True
# these two conditions are needed to handle the second option
# i.e., SUITE == project_name:suite_name
# in this way we select the only the tests of
# project_name with suite_name
if prj_match and prj != prj_match:
continue
if st_match and st != st_match:
continue
return True
return False
def test_suitable(self, test: 'TestSerialisation') -> bool:
return ((not self.options.include_suites or
TestHarness.test_in_suites(test, self.options.include_suites)) and not
TestHarness.test_in_suites(test, self.options.exclude_suites))
def get_tests(self) -> T.List['TestSerialisation']:
if not self.tests:
print('No tests defined.')
return []
if self.options.include_suites or self.options.exclude_suites:
tests = []
for tst in self.tests:
if self.test_suitable(tst):
tests.append(tst)
else:
tests = self.tests
# allow specifying test names like "meson test foo1 foo2", where test('foo1', ...)
if self.options.args:
tests = [t for t in tests if t.name in self.options.args]
if not tests:
print('No suitable tests defined.')
return []
return tests
def open_log_files(self) -> None:
if not self.options.logbase or self.options.verbose:
return
namebase = None
logfile_base = os.path.join(self.options.wd, 'meson-logs', self.options.logbase)
if self.options.wrapper:
namebase = os.path.basename(self.get_wrapper(self.options)[0])
elif self.options.setup:
namebase = self.options.setup.replace(":", "_")
if namebase:
logfile_base += '-' + namebase.replace(' ', '_')
self.junit = JunitBuilder(logfile_base + '.junit.xml')
self.logfilename = logfile_base + '.txt'
self.jsonlogfilename = logfile_base + '.json'
self.jsonlogfile = open(self.jsonlogfilename, 'w', encoding='utf-8', errors='replace')
self.logfile = open(self.logfilename, 'w', encoding='utf-8', errors='surrogateescape')
self.logfile.write('Log of Meson test suite run on {}\n\n'.format(datetime.datetime.now().isoformat()))
inherit_env = env_tuple_to_str(os.environ.items())
self.logfile.write('Inherited environment: {}\n\n'.format(inherit_env))
@staticmethod
def get_wrapper(options: argparse.Namespace) -> T.List[str]:
wrap = [] # type: T.List[str]
if options.gdb:
wrap = [options.gdb_path, '--quiet', '--nh']
if options.repeat > 1:
wrap += ['-ex', 'run', '-ex', 'quit']
# Signal the end of arguments to gdb
wrap += ['--args']
if options.wrapper:
wrap += options.wrapper
return wrap
def get_pretty_suite(self, test: 'TestSerialisation') -> str:
if len(self.suites) > 1 and test.suite:
rv = TestHarness.split_suite_string(test.suite[0])[0]
s = "+".join(TestHarness.split_suite_string(s)[1] for s in test.suite)
if s:
rv += ":"
return rv + s + " / " + test.name
else:
return test.name
def run_tests(self, tests: T.List['TestSerialisation']) -> None:
executor = None
futures = [] # type: T.List[T.Tuple[conc.Future[TestRun], int, int, T.List[TestSerialisation], str, int]]
test_count = len(tests)
name_max_len = max([len(self.get_pretty_suite(test)) for test in tests])
self.open_log_files()
startdir = os.getcwd()
if self.options.wd:
os.chdir(self.options.wd)
self.build_data = build.load(os.getcwd())
try:
for _ in range(self.options.repeat):
for i, test in enumerate(tests, 1):
visible_name = self.get_pretty_suite(test)
single_test = self.get_test_runner(test)
if not test.is_parallel or self.options.num_processes == 1 or single_test.options.gdb:
self.drain_futures(futures)
futures = []
res = single_test.run()
self.process_test_result(res)
self.print_stats(test_count, name_max_len, tests, visible_name, res, i)
else:
if not executor:
executor = conc.ThreadPoolExecutor(max_workers=self.options.num_processes)
f = executor.submit(single_test.run)
futures.append((f, test_count, name_max_len, tests, visible_name, i))
if self.options.repeat > 1 and self.fail_count:
break
if self.options.repeat > 1 and self.fail_count:
break
self.drain_futures(futures)
self.print_summary()
self.print_collected_logs()
if self.logfilename:
print('Full log written to {}'.format(self.logfilename))
finally:
os.chdir(startdir)
def drain_futures(self, futures: T.List[T.Tuple['conc.Future[TestRun]', int, int, T.List['TestSerialisation'], str, int]]) -> None:
for x in futures:
(result, test_count, name_max_len, tests, name, i) = x
if self.options.repeat > 1 and self.fail_count:
result.cancel()
if self.options.verbose:
result.result()
self.process_test_result(result.result())
self.print_stats(test_count, name_max_len, tests, name, result.result(), i)
def run_special(self) -> int:
'''Tests run by the user, usually something like "under gdb 1000 times".'''
if self.is_run:
raise RuntimeError('Can not use run_special after a full run.')
tests = self.get_tests()
if not tests:
return 0
self.run_tests(tests)
return self.total_failure_count()
def list_tests(th: TestHarness) -> bool:
tests = th.get_tests()
for t in tests:
print(th.get_pretty_suite(t))
return not tests
def rebuild_all(wd: str) -> bool:
if not (Path(wd) / 'build.ninja').is_file():
print('Only ninja backend is supported to rebuild tests before running them.')
return True
ninja = environment.detect_ninja()
if not ninja:
print("Can't find ninja, can't rebuild test.")
return False
ret = subprocess.run([ninja, '-C', wd]).returncode
if ret != 0:
print('Could not rebuild {}'.format(wd))
return False
return True
def run(options: argparse.Namespace) -> int:
if options.benchmark:
options.num_processes = 1
if options.verbose and options.quiet:
print('Can not be both quiet and verbose at the same time.')
return 1
check_bin = None
if options.gdb:
options.verbose = True
if options.wrapper:
print('Must not specify both a wrapper and gdb at the same time.')
return 1
check_bin = 'gdb'
if options.wrapper:
check_bin = options.wrapper[0]
if check_bin is not None:
exe = ExternalProgram(check_bin, silent=True)
if not exe.found():
print('Could not find requested program: {!r}'.format(check_bin))
return 1
if not options.list and not options.no_rebuild:
if not rebuild_all(options.wd):
# We return 125 here in case the build failed.
# The reason is that exit code 125 tells `git bisect run` that the current commit should be skipped.
# Thus users can directly use `meson test` to bisect without needing to handle the does-not-build case separately in a wrapper script.
return 125
with TestHarness(options) as th:
try:
if options.list:
return list_tests(th)
if not options.args:
return th.doit()
return th.run_special()
except TestException as e:
print('Meson test encountered an error:\n')
if os.environ.get('MESON_FORCE_BACKTRACE'):
raise e
else:
print(e)
return 1
def run_with_args(args: T.List[str]) -> int:
parser = argparse.ArgumentParser(prog='meson test')
add_arguments(parser)
options = parser.parse_args(args)
return run(options)
| |
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Asserts-like functions that raise actual exceptions.
These asserts are convenient helpers to quickly validate parameters. If an
assert fails, it will by default raise a ValueError exception (this may be
overridden when the assert is defined). (The Google style guide prohibits using
`assert` when validating arguments to a function; see
https://google.github.io/styleguide/pyguide.html#244-decision for additional
context.)
The following asserts are currently defined:
- asserts.none(a): Check that the value is equal to None.
- asserts.not_none(a): Check that the value is not equal to None.
- asserts.eq(a, b): Check that two values are equal.
- asserts.ne(a, b): Check that two values are not equal.
- asserts.instance(a, instances): Check that a has a type within `instances`.
- asserts.subclass(a, subclasses): Check that a is a subclass of `subclasses`.
- asserts.le(a, b): Check that `a <= b`.
- asserts.lt(a, b): Check that `a < b`.
- asserts.ge(a, b): Check that `a >= b`.
- asserts.gt(a, b): Check that `a > b`.
- asserts.in_set(a, elements): Check that a belongs to the set of `elements`.
- asserts.between(a, min, max): Check that `min </= a </= max`.
They can be used as follows:
asserts.eq(p.atten_dim, p.model_dim // p.dim_per_head)
If the assert statement does not fit on a single line, it is preferable to
explicitly set `value_str`-like argument to get nice error message, e.g.:
asserts.between(
p.dropout_rate,
min_value=0.,
max_value=1.,
left_strict=True,
right_strict=False,
value_str=f'p.dropout_rate={p.dropout_rate}')
"""
import inspect
from typing import Any, List, Optional, Sequence, Type
def _retrieve_argnames(assert_name: str) -> Optional[List[str]]:
"""Retrieves the argnames of the upper level caller function.
The expected usage is within an assert-like function from this module:
It first extracts the corresponding line call as a string, and, second,
retrieves the corresponding function arguments as a list of strings.
Note that this only works when the function call fits on a single line,
since the inspect module can only return a single line number for each frame.
Args:
assert_name: name of the assert function from which this helper was called.
Returns:
A list of arguments as strings. For instance, if the original user code with
the assert function looks like:
asserts.eq(p.atten_dim, p.model_dim // p.dim_per_head)
it returns:
['p.atten_dim', 'p.model_dim // p.dim_per_head']
"""
# Retrieve the code line as a string with the assert's call.
frame = inspect.stack()[2].frame
code_context = inspect.getframeinfo(frame).code_context[0].strip()
first_p = code_context.find(f'{assert_name}(') + len(assert_name) + 1
if first_p == -1:
return None
# Parse all the functions arguments from the assert's call. E.g.:
# Input: " asserts.eq(alpha, beta, ...)\n"
# Output: ['alpha', 'beta', ...]
code_context = code_context[first_p:]
open_p = 1
last_start_index = 0
current_index = 0
args = []
while open_p > 0 and current_index < len(code_context):
current_char = code_context[current_index]
if current_char == '(':
open_p += 1
elif current_char == ',' and open_p == 1:
args.append(code_context[last_start_index:current_index].strip())
last_start_index = current_index + 1
elif current_char == ')':
if open_p == 1:
args.append(code_context[last_start_index:current_index].strip())
break
else:
open_p -= 1
current_index += 1
return args or None
def _get_value_str(value: Any, arguments: Sequence[str], index: int = 0) -> str:
"""Returns the `value_str` given parsed `arguments` and the desired `index`.
Args:
value: The input value to generate a string representation for.
arguments: The input sequence of arguments as returned by
`_retrieve_argnames()`.
index: The index of the argument in `arguments` correspoding to value.
Returns:
The corresponding `value_str` representation.
"""
if arguments and len(arguments) > index:
return f'{arguments[index]}={value}'
return f'{value}'
def none(value: Any,
*,
value_str: Optional[str] = None,
msg: Optional[str] = None,
exception_type: Type[Exception] = ValueError) -> None:
"""Checks that `value` is None and raises an exception otherwise.
Args:
value: The element to compare against None.
value_str: Optional string representation of the `value` element used in the
exception message overriding the default one.
msg: Optional exception message overriding the default one.
exception_type: Type of exception to raise.
"""
if value is None:
return
if msg:
error_msg = msg
else:
if value_str is None:
arguments = _retrieve_argnames('none')
value_str = _get_value_str(value, arguments)
error_msg = f'`{value_str}` must be `None`.'
raise exception_type(error_msg)
def not_none(value: Any,
*,
value_str: Optional[str] = None,
msg: Optional[str] = None,
exception_type: Type[Exception] = ValueError) -> None:
"""Checks that `value` is not None and raises an exception otherwise.
Args:
value: The element to compare against None.
value_str: Optional string representation of the `value` element used in the
exception message overriding the default one.
msg: Optional exception message overriding the default one.
exception_type: Type of exception to raise.
"""
if value is not None:
return
if msg:
error_msg = msg
else:
if value_str is None:
arguments = _retrieve_argnames('not_none')
value_str = _get_value_str(value, arguments)
error_msg = f'`{value_str}` must not be `None`.'
raise exception_type(error_msg)
def eq(value1: Any,
value2: Any,
*,
value_str1: Optional[str] = None,
value_str2: Optional[str] = None,
msg: Optional[str] = None,
exception_type: Type[Exception] = ValueError) -> None:
"""Checks that `value1` and `value2` are equal.
Raises an exception otherwise.
Args:
value1: The first element to compare against.
value2: The second element to compare against.
value_str1: Optional string representation of the `value1` element used in
the exception message overriding the default one.
value_str2: Optional string representation of the `value2` element used in
the exception message overriding the default one.
msg: Optional exception message overriding the default one.
exception_type: Type of exception to raise.
"""
if value1 == value2:
return
if msg:
error_msg = msg
else:
if value_str1 is None or value_str2 is None:
arguments = _retrieve_argnames('eq')
if value_str1 is None:
value_str1 = _get_value_str(value1, arguments, index=0)
if value_str2 is None:
value_str2 = _get_value_str(value2, arguments, index=1)
error_msg = f'`{value_str1}` must be equal to `{value_str2}`.'
raise exception_type(error_msg)
def ne(value1: Any,
value2: Any,
*,
value_str1: Optional[str] = None,
value_str2: Optional[str] = None,
msg: Optional[str] = None,
exception_type: Type[Exception] = ValueError) -> None:
"""Checks that `value1` and `value2` are not equal.
Raises an exception otherwise.
Args:
value1: The first element to compare against.
value2: The second element to compare against.
value_str1: Optional string representation of the `value1` element used in
the exception message overriding the default one.
value_str2: Optional string representation of the `value2` element used in
the exception message overriding the default one.
msg: Optional exception message overriding the default one.
exception_type: Type of exception to raise.
"""
if value1 != value2:
return
if msg:
error_msg = msg
else:
if value_str1 is None or value_str2 is None:
arguments = _retrieve_argnames('ne')
if value_str1 is None:
value_str1 = _get_value_str(value1, arguments, index=0)
if value_str2 is None:
value_str2 = _get_value_str(value2, arguments, index=1)
error_msg = f'`{value_str1}` must not be equal to `{value_str2}`.'
raise exception_type(error_msg)
def instance(value: Any,
instances: Any,
*,
value_str: Optional[str] = None,
msg: Optional[str] = None,
exception_type: Type[Exception] = ValueError) -> None:
"""Checks that `value` is of `instance` type.
Raises an exception otherwise.
Args:
value: The element to compare against None.
instances: A type or a tuple of types.
value_str: Optional string representation of the `value` element used in the
exception message overriding the default one.
msg: Optional exception message overriding the default one.
exception_type: Type of exception to raise.
"""
if isinstance(value, instances):
return
if msg:
error_msg = msg
else:
if value_str is None:
arguments = _retrieve_argnames('instance')
value_str = _get_value_str(value, arguments)
error_msg = f'`{value_str}` must be of type `{instances}`.'
raise exception_type(error_msg)
def subclass(value: Any,
subclasses: Any,
*,
value_str: Optional[str] = None,
msg: Optional[str] = None,
exception_type: Type[Exception] = ValueError) -> None:
"""Checks that `value` is a subclass of one of the provided `subclasses`.
Raises an exception otherwise.
Args:
value: The element to compare against None.
subclasses: A class or a tuple of classess.
value_str: Optional string representation of the `value` element used in the
exception message overriding the default one.
msg: Optional exception message overriding the default one.
exception_type: Type of exception to raise.
"""
if issubclass(value, subclasses):
return
if msg:
error_msg = msg
else:
if value_str is None:
arguments = _retrieve_argnames('subclasses')
value_str = _get_value_str(value, arguments)
error_msg = f'`{value_str}` must be a subclass of `{subclasses}`.'
raise exception_type(error_msg)
def le(value1: Any,
value2: Any,
*,
value_str1: Optional[str] = None,
value_str2: Optional[str] = None,
msg: Optional[str] = None,
exception_type: Type[Exception] = ValueError) -> None:
"""Checks that `value1 <= value2`.
Raises an exception otherwise.
Args:
value1: The first element to compare against.
value2: The second element to compare against.
value_str1: Optional string representation of the `value1` element used in
the exception message overriding the default one.
value_str2: Optional string representation of the `value2` element used in
the exception message overriding the default one.
msg: Optional exception message overriding the default one.
exception_type: Type of exception to raise.
"""
if value1 <= value2:
return
if msg:
error_msg = msg
else:
if value_str1 is None or value_str2 is None:
arguments = _retrieve_argnames('le')
if value_str1 is None:
value_str1 = _get_value_str(value1, arguments, index=0)
if value_str2 is None:
value_str2 = _get_value_str(value2, arguments, index=1)
error_msg = f'`{value_str1}` must be less than or equal to `{value_str2}`.'
raise exception_type(error_msg)
def lt(value1: Any,
value2: Any,
*,
value_str1: Optional[str] = None,
value_str2: Optional[str] = None,
msg: Optional[str] = None,
exception_type: Type[Exception] = ValueError) -> None:
"""Checks that `value1 < value2`.
Raises an exception otherwise.
Args:
value1: The first element to compare against.
value2: The second element to compare against.
value_str1: Optional string representation of the `value1` element used in
the exception message overriding the default one.
value_str2: Optional string representation of the `value2` element used in
the exception message overriding the default one.
msg: Optional exception message overriding the default one.
exception_type: Type of exception to raise.
"""
if value1 < value2:
return
if msg:
error_msg = msg
else:
if value_str1 is None or value_str2 is None:
arguments = _retrieve_argnames('lt')
if value_str1 is None:
value_str1 = _get_value_str(value1, arguments, index=0)
if value_str2 is None:
value_str2 = _get_value_str(value2, arguments, index=1)
error_msg = f'`{value_str1}` must be strictly less than `{value_str2}`.'
raise exception_type(error_msg)
def ge(value1: Any,
value2: Any,
*,
value_str1: Optional[str] = None,
value_str2: Optional[str] = None,
msg: Optional[str] = None,
exception_type: Type[Exception] = ValueError) -> None:
"""Checks that `value1 >= value2`.
Raises an exception otherwise.
Args:
value1: The first element to compare against.
value2: The second element to compare against.
value_str1: Optional string representation of the `value1` element used in
the exception message overriding the default one.
value_str2: Optional string representation of the `value2` element used in
the exception message overriding the default one.
msg: Optional exception message overriding the default one.
exception_type: Type of exception to raise.
"""
if value1 >= value2:
return
if msg:
error_msg = msg
else:
if value_str1 is None or value_str2 is None:
arguments = _retrieve_argnames('ge')
if value_str1 is None:
value_str1 = _get_value_str(value1, arguments, index=0)
if value_str2 is None:
value_str2 = _get_value_str(value2, arguments, index=1)
error_msg = f'`{value_str1}` must be greater than or equal to `{value_str2}`.'
raise exception_type(error_msg)
def gt(value1: Any,
value2: Any,
*,
value_str1: Optional[str] = None,
value_str2: Optional[str] = None,
msg: Optional[str] = None,
exception_type: Type[Exception] = ValueError) -> None:
"""Checks that `value1 > value2`.
Raises an exception otherwise.
Args:
value1: The first element to compare against.
value2: The second element to compare against.
value_str1: Optional string representation of the `value1` element used in
the exception message overriding the default one.
value_str2: Optional string representation of the `value2` element used in
the exception message overriding the default one.
msg: Optional exception message overriding the default one.
exception_type: Type of exception to raise.
"""
if value1 > value2:
return
if msg:
error_msg = msg
else:
if value_str1 is None or value_str2 is None:
arguments = _retrieve_argnames('gt')
if value_str1 is None:
value_str1 = _get_value_str(value1, arguments, index=0)
if value_str2 is None:
value_str2 = _get_value_str(value2, arguments, index=1)
error_msg = f'`{value_str1}` must be strictly greater than `{value_str2}`.'
raise exception_type(error_msg)
def in_set(value: Any,
elements: Sequence[Any],
*,
value_str: Optional[str] = None,
msg: Optional[str] = None,
exception_type: Type[Exception] = ValueError) -> None:
"""Checks that `value` is within the valid `elements`.
Raises an exception otherwise.
Args:
value: The element to look up for.
elements: The list of valid elements. Raises if `value` is not in this set.
value_str: Optional string representation of the `value` element used in the
exception message overriding the default one.
msg: Optional exception message overriding the default one.
exception_type: Type of exception to raise.
"""
if value in elements:
return
if msg:
error_msg = msg
else:
if value_str is None:
arguments = _retrieve_argnames('in_set')
value_str = _get_value_str(value, arguments)
error_msg = f'`{value_str}` must be within `{elements}`.'
raise exception_type(error_msg)
def between(value: Any,
min_value: Any,
max_value: Any,
*,
left_strict: bool = False,
right_strict: bool = False,
value_str: Optional[str] = None,
msg: Optional[str] = None,
exception_type: Type[Exception] = ValueError) -> None:
"""Checks that `min_value </= value </= max_value`.
Raises an exception otherwise.
Args:
value: The element to compare against.
min_value: The minimum value of the valid range.
max_value: The maximum value of the valid range.
left_strict: Whether the left inequality for the range is strict or not.
right_strict: Whether the right inequality for the range is strict or not.
value_str: Optional string representation of the `value` element used in the
exception message overriding the default one.
msg: Optional exception message overriding the default one.
exception_type: Type of exception to raise.
"""
if ((left_strict and right_strict and min_value < value < max_value) or
(not left_strict and right_strict and min_value <= value < max_value) or
(left_strict and not right_strict and min_value < value <= max_value) or
(not left_strict and not right_strict and
min_value <= value <= max_value)):
return
if msg:
error_msg = msg
else:
if value_str is None:
arguments = _retrieve_argnames('between')
value_str = _get_value_str(value, arguments)
left_bracket = '(' if left_strict else '['
right_bracket = ')' if right_strict else ']'
error_msg = (f'`{value_str}` must be in the range '
f'`{left_bracket}{min_value}, {max_value}{right_bracket}`.')
raise exception_type(error_msg)
| |
# MIT License
#
# Copyright (c) 2017
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
#
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
import time
import numpy as np
import scipy.stats as st
from sklearn.metrics import auc, roc_auc_score, confusion_matrix
from sklearn.preprocessing import label_binarize
from OSMBase import OSMBaseModel
from OSMModelData import OSMModelData
# ============================================================================
# The Classification Results Presentation Object.
# ============================================================================
class OSMClassification(OSMBaseModel):
def __init__(self, args, log):
super(OSMClassification, self).__init__(args, log)
# Shallow copies of the runtime environment.
self.log = log
self.args = args
# Maintain a vector of statistics
self.test_statistics_history = []
self.train_statistics_history = []
#####################################################################################
#
# Virtual member functions called from OSMBase
#
#####################################################################################
def model_is_classifier(self):
return True
def model_classification_results(self):
self.train_predictions = self.model_prediction(self.data.training()) # Returns a dict. with "prediction" and "actual"
self.train_probability = self.model_probability(self.data.training()) # Returns a dict. with "probability"
self.train_objective = self.model_evaluate(self.data.training())
self.train_stats = self.model_accuracy(self.train_predictions, self.train_probability) # dictionary of stats
self.train_statistics_history.append({"STATISTICS": self.train_stats, "OBJECTIVE": self.train_objective})
self.test_predictions = self.model_prediction(self.data.testing()) # Returns a dict. with "prediction" and "actual"
self.test_probability = self.model_probability(self.data.testing()) # Returns a dict. with "probability"
self.test_objective = self.model_evaluate(self.data.testing())
self.test_stats = self.model_accuracy(self.test_predictions, self.test_probability) # dictionary of stats
self.test_statistics_history.append({"STATISTICS": self.test_stats, "OBJECTIVE": self.test_objective})
# Send statistics to the console and log file.
self.model_log_statistics()
# Generate graphics (only if the virtual function defined at model level).
self.model_graphics()
# Append statistics to the stats file.
self.model_write_statistics()
def model_log_statistics(self):
self.log_train_statistics(self.data.training(),
self.train_stats,
self.train_predictions,
self.train_probability,
self.train_objective)
self.log_test_statistics(self.data.testing(),
self.test_stats,
self.test_predictions,
self.test_probability,
self.test_objective)
def model_write_statistics(self):
self.write_statistics(self.data.training(),
self.train_stats,
self.train_predictions,
self.train_probability,
self.train_objective,
self.args.trainDirectory)
self.write_statistics(self.data.testing(),
self.test_stats,
self.test_predictions,
self.test_probability,
self.test_objective,
self.args.testDirectory)
def model_training_summary(self):
self.write_training_statistics(self.train_statistics_history,self.args.trainDirectory)
self.write_training_statistics(self.test_statistics_history,self.args.testDirectory)
self.write_model_analytics(self.model_analytics(self.data.testing()), self.args.testDirectory)
self.write_model_analytics(self.model_analytics(self.data.training()), self.args.trainDirectory)
def model_accuracy(self, predictions, probability):
classes = self.model_enumerate_classes()
predict_text = predictions["prediction"]
actual_text = predictions["actual"]
probabilities = probability["probability"]
inv_probability = [1 - x[0] for x in probabilities] # only interested in the first column ("active")
# Sort rankings.
probability_ranks = st.rankdata(inv_probability, method="average")
actual_one_hot =label_binarize(actual_text, classes)
predict_one_hot = label_binarize(predict_text, classes)
if len(classes) == 2 and actual_one_hot.shape[1] == 1:
auc_probs = [ x[1] for x in probabilities]
# print("auc_prob", auc_probs)
# auc_probs = probabilities[:,1]
else:
auc_probs = probabilities
# print("actual_one_hot", actual_one_hot)
# print("auc_probs", auc_probs)
try:
class_auc = roc_auc_score(actual_one_hot, auc_probs, average=None, sample_weight=None)
macro_auc = roc_auc_score(actual_one_hot, auc_probs, average="macro", sample_weight=None)
micro_auc = roc_auc_score(actual_one_hot, auc_probs, average="micro", sample_weight=None)
except ValueError:
class_auc = [-1] * len(classes)
macro_auc = -1
micro_auc = -1
if len(classes) == 2 and actual_one_hot.shape[1] == 1:
mod_class_auc = None
else:
mod_class_auc = class_auc
confusion = confusion_matrix(actual_text, predict_text)
epoch = self.model_epochs()
# Return the model analysis statistics in a dictionary.
return {"classes": classes, "actual_one_hot": actual_one_hot, "predict_one_host": predict_one_hot
, "prob_rank": probability_ranks, "actual_text": actual_text, "predict_text": predict_text
, "confusion" : confusion, "class_auc": mod_class_auc, "macro_auc": macro_auc
, "micro_auc": micro_auc, "epoch": epoch }
def model_prediction_records(self, data, statistics, predictions, probabilities):
classes = self.model_enumerate_classes()
prediction_list = []
for idx in range(len(data.get_field("ID"))):
prediction_record = []
prediction_record.append(data.get_field("ID")[idx])
prediction_record.append(statistics["actual_text"][idx])
prediction_record.append(statistics["predict_text"][idx])
prediction_record.append(statistics["prob_rank"][idx])
prediction_record.append(data.get_field("SMILE")[idx])
prob_list = []
for cls_idx in range(len(classes)):
prob_list.append(probabilities["probability"][idx][cls_idx])
prediction_record.append(prob_list)
prediction_list.append(prediction_record)
# Sort by actual ranking (inverse order).
sorted_predict_list= sorted(prediction_list, key=lambda predict_record: (predict_record[5][0] * -1))
return sorted_predict_list
#####################################################################################
#
# Local member functions
#
#####################################################################################
def log_train_statistics(self, data, statistics, predictions, probabilities, objective):
self.log.info("Training Compounds macro AUC: %f", statistics["macro_auc"])
self.log.info("Training Compounds micro AUC: %f", statistics["micro_auc"])
if statistics["class_auc"] is not None:
for aclass, auc in zip(statistics["classes"], statistics["class_auc"]):
self.log.info("Training Compounds Class %s AUC: %f", aclass, auc)
for idx in range(len(objective)):
self.log.info("Train Model Objective-%d, %f",idx, objective[idx])
# Display the classification results and write to the log file.
def log_test_statistics(self, data, statistics, predictions, probabilities, objective):
"""Display all the calculated statistics for each model; run"""
independent_list = []
for var in self.model_arguments()["INDEPENDENT"]:
independent_list.append(var["VARIABLE"])
dependent_var = self.model_arguments()["DEPENDENT"]["VARIABLE"]
self.log.info("Dependent (Target) Variable: %s", dependent_var)
for var in independent_list:
self.log.info("Independent (Input) Variable(s): %s", var)
self.log.info("Training Epochs: %d", self.model_epochs())
for idx in range(len(objective)):
self.log.info("Test Model Objective-%d, %f",idx, objective[idx])
self.log.info("Test Compounds macro AUC: %f", statistics["macro_auc"])
self.log.info("Test Compounds micro AUC: %f", statistics["micro_auc"])
if statistics["class_auc"] is not None:
for aclass, auc in zip(statistics["classes"], statistics["class_auc"]):
self.log.info("Test Class %s AUC: %f", aclass, auc)
self.log.info("+++++++++++++++ Confusion matrix +++++++++++++++++++++++")
line = "true/predict "
for a_class in statistics["classes"]:
line += "{:10s}".format(a_class)
self.log.info(line)
for rowidx in range(len(statistics["confusion"])):
line = "{:10s}".format(statistics["classes"][rowidx])
for colidx in range(len(statistics["confusion"][rowidx])):
line += "{:8d}".format(statistics["confusion"][rowidx][colidx])
self.log.info(line)
self.log.info("ID, Actual Class, Pred. Class, Prob. Rank, Prob. %s", statistics["classes"][0])
self.log.info("===================================================================================")
sorted_records = self.model_prediction_records(data, statistics, predictions, probabilities)
for record in sorted_records:
line = "{:10s} {:10s} {:10s} {:3.1f} {:8.7f}".format(record[0], record[1],
record[2], record[3], record[5][0])
self.log.info(line)
# Open the statistics file and append the model results statistics.
def write_statistics(self, data, statistics, predictions, probabilities, objective, directory):
stats_filename = os.path.join(directory, self.args.statsFilename)
independent_list = []
for var in self.model_arguments()["INDEPENDENT"]:
independent_list.append(var["VARIABLE"])
dependent_var = self.model_arguments()["DEPENDENT"]["VARIABLE"]
try:
with open(stats_filename, 'a') as stats_file:
line = "****************,Classification,******************\n"
stats_file.write(line)
line = "Model, {}\n".format(self.model_name())
stats_file.write(line)
line = "DependentVar(Target), {}\n".format(dependent_var)
stats_file.write(line)
for var in independent_list:
line = "IndependentVar(Input), {}\n".format(var)
stats_file.write(line)
line = "Load File, {}\n".format(self.args.loadFilename)
stats_file.write(line)
line = "TrainingEpochs, {}\n".format(self.model_epochs())
stats_file.write(line)
line = "Runtime, {}\n".format(time.asctime(time.localtime(time.time())))
stats_file.write(line)
line = "CPUtime, {}\n".format(time.clock())
stats_file.write(line)
for idx in range(len(objective)):
line = "ModelObjective-{}, {}\n".format(idx,objective[idx])
stats_file.write(line)
line = "++++++++++++++++,Test_Statistics,++++++++++++++++\n"
stats_file.write(line)
line = "Macro AUC, {}\n".format(statistics["macro_auc"])
stats_file.write(line)
line = "Micro AUC, {}\n".format(statistics["micro_auc"])
stats_file.write(line)
if statistics["class_auc"] is not None:
for aclass, auc in zip(statistics["classes"], statistics["class_auc"]):
line = "Class {} AUC, {}\n".format(aclass, auc)
stats_file.write(line)
stats_file.write("Confusion matrix\n")
line = "true/predict"
for a_class in statistics["classes"]:
line += ",{}".format(a_class)
line += "\n"
stats_file.write(line)
for rowidx in range(len(statistics["confusion"])):
line = "{}".format(statistics["classes"][rowidx])
for colidx in range(len(statistics["confusion"][rowidx])):
line += ",{}".format(statistics["confusion"][rowidx][colidx])
line += "\n"
stats_file.write(line)
sorted_records = self.model_prediction_records(data, statistics, predictions, probabilities)
line = "++++++++++++++++,Compound_Statistics,++++++++++++++++\n"
stats_file.write(line)
line = "ID, Actual_Class, Pred_Class"
for cls in statistics["classes"]:
line += ", Prob_" + cls
line += ", SMILE\n"
stats_file.write(line)
for record in sorted_records:
line = "{}, {}, {}".format(record[0], record[1], record[2])
for cls_idx in range(len(statistics["classes"])):
line += ", {}".format(record[5][cls_idx])
line += ", {}\n".format(record[4])
stats_file.write(line)
except IOError:
self.log.error("Problem writing to statistics file %s, check path and permissions", stats_filename)
def write_training_statistics(self, statistics_vector, directory):
stats_filename = os.path.join(directory, self.args.statsFilename)
try:
with open(stats_filename, 'a') as stats_file:
line = "++++++++++++++++++,Training_Summary,+++++++++++++++++++\n"
stats_file.write(line)
for statistics in statistics_vector:
micro_AUC = statistics["STATISTICS"]["micro_auc"]
macro_AUC = statistics["STATISTICS"]["macro_auc"]
epoch = statistics["STATISTICS"]["epoch"]
line = "epoch, {}, micro AUC, {}, macro AUC, {}".format(epoch, micro_AUC, macro_AUC)
objective = statistics["OBJECTIVE"]
for idx in range(len(objective)):
line += ",Objective-{}, {}".format(idx, objective[idx])
stats_file.write(line+"\n")
except IOError:
self.log.error("Problem writing to statistics file %s, check path and permissions", stats_filename)
def write_model_analytics(self, sensitivity_dict, directory):
if sensitivity_dict is None: return
if "1Q_SENSITIVITY" in sensitivity_dict:
self.write_step_analytic_type(directory, sensitivity_dict["1Q_SENSITIVITY"])
if "MEDIAN_SENSITIVITY" in sensitivity_dict:
self.write_step_analytic_type(directory, sensitivity_dict["MEDIAN_SENSITIVITY"])
if "3Q_SENSITIVITY" in sensitivity_dict:
self.write_step_analytic_type(directory, sensitivity_dict["3Q_SENSITIVITY"])
if "DERIVATIVE" in sensitivity_dict:
self.write_analytic_type(directory, sensitivity_dict["DERIVATIVE"], "DERIVATIVE")
if "PARTIAL" in sensitivity_dict:
self.write_partial_type(directory, sensitivity_dict["PARTIAL"])
def write_analytic_type(self, directory, sensitivity_vector, title):
stats_filename = os.path.join(directory, self.args.statsFilename)
try:
with open(stats_filename, 'a') as stats_file:
line = "++++++++++++++++++,{},+++++++++++++++++++\n".format(title)
stats_file.write(line)
for field_sens in sensitivity_vector:
line = 'field,{},description,"{}",index,{},sensitivity,{}\n'.format(field_sens[0],
field_sens[1],
field_sens[2],
field_sens[3])
stats_file.write(line)
except IOError:
self.log.error("Problem writing to sensitivity file %s, check path and permissions", stats_filename)
def write_step_analytic_type(self, directory, sensitivity_step_list):
stats_filename = os.path.join(directory, self.args.statsFilename)
try:
with open(stats_filename, 'a') as stats_file:
line = "++++++++++++++++++,{},+++++++++++++++++++\n".format("STEP SENSITIVITY")
stats_file.write(line)
line = "field,description,index,sum(abs()),min,max,step,percentile,mean,Prob(sum(abs())),Prob(sum())"
step_size = sensitivity_step_list[0][4].shape[0]
field_size = len(sensitivity_step_list)
for step in range(step_size-7):
line += ",Prob({})".format(step)
stats_file.write(line+"\n")
for field in range(field_size):
line = '{},"{}",{},{}'.format(sensitivity_step_list[field][0],
sensitivity_step_list[field][1],
sensitivity_step_list[field][2],
sensitivity_step_list[field][3])
for step in range(step_size):
line += ",{}".format(sensitivity_step_list[field][4][step])
stats_file.write(line+"\n")
except IOError:
self.log.error("Problem writing to step sensitivity file %s, check path and permissions", stats_filename)
def write_partial_type(self, directory, partial_list):
stats_filename = os.path.join(directory, self.args.statsFilename)
try:
with open(stats_filename, 'a') as stats_file:
line = "++++++++++++++++++,{},+++++++++++++++++++\n".format("PARTIAL")
stats_file.write(line)
line = "field,description"
for field in partial_list[0]:
line += ",{}".format(field)
stats_file.write(line+"\n")
partial_size = partial_list[2].shape[0]
for row in range(partial_size):
line = '{},"{}"'.format(partial_list[0][row], partial_list[1][row])
for col in range(partial_size):
line += ",{}".format(partial_list[2][row][col])
stats_file.write(line+"\n")
except IOError:
self.log.error("Problem writing to partial derivative file %s, check path and permissions", stats_filename)
def model_enumerate_classes(self):
training_classes = OSMModelData.enumerate_classes(self.data.training().target_data())
test_classes = OSMModelData.enumerate_classes(self.data.testing().target_data())
if not set(test_classes) <= set(training_classes):
self.log.error("There are test classes %s not in the set of training classes %s",
",".join(test_classes), ",".join(training_classes))
sys.exit()
return training_classes
| |
import contextlib
import re
import uuid
from urllib.parse import quote
from django.conf import settings
from django.contrib.auth import SESSION_KEY
from django.contrib.auth.middleware import AuthenticationMiddleware
from django.contrib.auth.models import AnonymousUser
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.exceptions import ImproperlyConfigured
from django.db import transaction
from django.http import (
Http404,
HttpResponse,
HttpResponsePermanentRedirect,
HttpResponseRedirect,
JsonResponse,
)
from django.middleware import common
from django.template.response import TemplateResponse
from django.utils.cache import (
add_never_cache_headers,
get_max_age,
patch_cache_control,
patch_vary_headers,
)
from django.utils.crypto import constant_time_compare
from django.utils.deprecation import MiddlewareMixin
from django.utils.encoding import force_str, iri_to_uri
from django.utils.translation import activate, gettext_lazy as _
from django.urls import is_valid_path
from django_statsd.clients import statsd
from rest_framework import permissions
import MySQLdb as mysql
import olympia.core.logger
from olympia import amo
from olympia.accounts.utils import redirect_for_login
from olympia.accounts.verify import (
check_and_update_fxa_access_token,
IdentificationError,
)
from . import urlresolvers
from .reverse import set_url_prefix
from .templatetags.jinja_helpers import urlparams
log = olympia.core.logger.getLogger('amo.middleware')
auth_path = re.compile('%saccounts/authenticate/?$' % settings.DRF_API_REGEX)
class LocaleAndAppURLMiddleware(MiddlewareMixin):
"""
1. search for locale first
2. see if there are acceptable apps
3. save those matched parameters in the request
4. strip them from the URL so we can do stuff
"""
def process_request(self, request):
# Find locale, app
prefixer = urlresolvers.Prefixer(request)
# Always use a 302 redirect to avoid users being stuck in case of
# accidental misconfiguration.
redirect_type = HttpResponseRedirect
set_url_prefix(prefixer)
full_path = prefixer.fix(prefixer.shortened_path)
if prefixer.app == amo.MOBILE.short and request.path.rstrip('/').endswith(
'/' + amo.MOBILE.short
):
return redirect_type(request.path.replace('/mobile', '/android'))
if ('lang' in request.GET or 'app' in request.GET) and not re.match(
settings.SUPPORTED_NONAPPS_NONLOCALES_REGEX, prefixer.shortened_path
):
# Blank out the locale so that we can set a new one. Remove
# lang/app from query params so we don't have an infinite loop.
prefixer.locale = ''
new_path = prefixer.fix(prefixer.shortened_path)
query = request.GET.dict()
query.pop('app', None)
query.pop('lang', None)
return redirect_type(urlparams(new_path, **query))
if full_path != request.path:
query_string = request.META.get('QUERY_STRING', '')
full_path = quote(full_path.encode('utf-8'))
if query_string:
query_string = force_str(query_string, errors='ignore')
full_path = f'{full_path}?{query_string}'
response = redirect_type(full_path)
# Cache the redirect for a year.
if not settings.DEBUG:
patch_cache_control(response, max_age=60 * 60 * 24 * 365)
# Vary on Accept-Language or User-Agent if we changed the locale or
# app.
old_app = prefixer.app
old_locale = prefixer.locale
new_locale, new_app, _ = prefixer.split_path(full_path)
if old_locale != new_locale:
patch_vary_headers(response, ['Accept-Language'])
if old_app != new_app:
patch_vary_headers(response, ['User-Agent'])
return response
request.path_info = '/' + prefixer.shortened_path
request.LANG = prefixer.locale or prefixer.get_language()
activate(request.LANG)
class AuthenticationMiddlewareWithoutAPI(AuthenticationMiddleware):
"""
Like AuthenticationMiddleware, but disabled for the API, which uses its
own authentication mechanism.
"""
def process_request(self, request):
if request.is_api and not auth_path.match(request.path):
request.user = AnonymousUser()
else:
return super().process_request(request)
class NoVarySessionMiddleware(SessionMiddleware):
"""
SessionMiddleware sets Vary: Cookie anytime request.session is accessed.
request.session is accessed indirectly anytime request.user is touched.
We always touch request.user to see if the user is authenticated, so every
request would be sending vary, so we'd get no caching.
We skip the cache in Zeus if someone has an AMOv3+ cookie, so varying on
Cookie at this level only hurts us.
"""
def process_response(self, request, response):
if settings.READ_ONLY:
return response
# Let SessionMiddleware do its processing but prevent it from changing
# the Vary header.
vary = None
if hasattr(response, 'get'):
vary = response.get('Vary', None)
new_response = super().process_response(request, response)
if vary:
new_response['Vary'] = vary
else:
del new_response['Vary']
return new_response
class RemoveSlashMiddleware(MiddlewareMixin):
"""
Middleware that tries to remove a trailing slash if there was a 404.
If the response is a 404 because url resolution failed, we'll look for a
better url without a trailing slash.
"""
def process_response(self, request, response):
if (
response.status_code == 404
and request.path_info.endswith('/')
and not is_valid_path(request.path_info)
and is_valid_path(request.path_info[:-1])
):
# Use request.path because we munged app/locale in path_info.
newurl = request.path[:-1]
if request.GET:
with safe_query_string(request):
newurl += '?' + request.META.get('QUERY_STRING', '')
return HttpResponsePermanentRedirect(newurl)
else:
return response
@contextlib.contextmanager
def safe_query_string(request):
"""
Turn the QUERY_STRING into a unicode- and ascii-safe string.
We need unicode so it can be combined with a reversed URL, but it has to be
ascii to go in a Location header. iri_to_uri seems like a good compromise.
"""
qs = request.META.get('QUERY_STRING', '')
try:
request.META['QUERY_STRING'] = iri_to_uri(qs)
yield
finally:
request.META['QUERY_STRING'] = qs
class CommonMiddleware(common.CommonMiddleware):
def process_request(self, request):
with safe_query_string(request):
return super().process_request(request)
class NonAtomicRequestsForSafeHttpMethodsMiddleware(MiddlewareMixin):
"""
Middleware to make the view non-atomic if the HTTP method used is safe,
in order to avoid opening and closing a useless transaction.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
# This uses undocumented django APIS:
# - transaction.get_connection() followed by in_atomic_block property,
# which we need to make sure we're not messing with a transaction
# that has already started (which happens in tests using the regular
# TestCase class)
# - _non_atomic_requests(), which set the property to prevent the
# transaction on the view itself. We can't use non_atomic_requests
# (without the '_') as it returns a *new* view, and we can't do that
# in a middleware, we need to modify it in place and return None so
# that the rest of the middlewares are run.
is_method_safe = request.method in ('HEAD', 'GET', 'OPTIONS', 'TRACE')
if is_method_safe and not transaction.get_connection().in_atomic_block:
transaction._non_atomic_requests(view_func, using='default')
return None
class ReadOnlyMiddleware(MiddlewareMixin):
"""Middleware that announces a downtime which for us usually means
putting the site into read only mode.
Supports issuing `Retry-After` header.
"""
ERROR_MSG = _(
'Some features are temporarily disabled while we '
"perform website maintenance. We'll be back to "
'full capacity shortly.'
)
def render_html_error(self, request):
response = TemplateResponse(request, 'amo/read-only.html', status=503)
# render() is normally called behind the scenes by django's base
# handler inside get_response(), but here we might be bypassing that,
# so we need to force the rendering ourselves.
response.render()
return response
def render_readonly_api_error(self, request):
return JsonResponse({'error': self.ERROR_MSG}, status=503)
def process_request(self, request):
if not settings.READ_ONLY:
return
if request.is_api:
writable_method = request.method not in permissions.SAFE_METHODS
if writable_method:
return self.render_readonly_api_error(request)
elif request.method == 'POST':
return self.render_html_error(request)
def process_exception(self, request, exception):
if not settings.READ_ONLY:
return
if isinstance(exception, mysql.OperationalError):
if request.is_api:
return self.render_readonly_api_error(request)
return self.render_html_error(request)
class SetRemoteAddrFromForwardedFor(MiddlewareMixin):
"""
Set REMOTE_ADDR from HTTP_X_FORWARDED_FOR if the request came from the CDN.
If the request came from the CDN, the client IP will always be in
HTTP_X_FORWARDED_FOR in second to last position (CDN puts it last, but the
load balancer then appends the IP from where it saw the request come from,
which is the CDN server that forwarded the request). In addition, the CDN
will set HTTP_X_REQUEST_VIA_CDN to a secret value known to us so we can
verify the request did originate from the CDN.
If the request didn't come from the CDN and is a direct origin request, the
client IP will be in last position in HTTP_X_FORWARDED_FOR but in this case
nginx already sets REMOTE_ADDR so we don't need to use it.
"""
def is_request_from_cdn(self, request):
return settings.SECRET_CDN_TOKEN and constant_time_compare(
request.META.get('HTTP_X_REQUEST_VIA_CDN'), settings.SECRET_CDN_TOKEN
)
def process_request(self, request):
x_forwarded_for_header = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for_header and self.is_request_from_cdn(request):
# We only have to split twice from the right to find what we're looking for.
try:
value = x_forwarded_for_header.rsplit(sep=',', maxsplit=2)[-2].strip()
if not value:
raise IndexError
request.META['REMOTE_ADDR'] = value
except IndexError:
# Shouldn't happen, must be a misconfiguration, raise an error
# rather than potentially use/record incorrect IPs.
raise ImproperlyConfigured('Invalid HTTP_X_FORWARDED_FOR')
class RequestIdMiddleware(MiddlewareMixin):
"""Middleware that adds a unique request-id to every incoming request.
This can be used to track a request across different system layers,
e.g to correlate logs with sentry exceptions.
We are exposing this request id in the `X-AMO-Request-ID` response header.
"""
def process_request(self, request):
request.request_id = uuid.uuid4().hex
def process_response(self, request, response):
request_id = getattr(request, 'request_id', None)
if request_id:
response['X-AMO-Request-ID'] = request.request_id
return response
class CacheControlMiddleware:
"""Middleware to add Cache-Control: max-age=xxx header to responses that
should be cached, Cache-Control: s-maxage:0 to responses that should not.
The only responses that should be cached are API, unauthenticated responses
from "safe" HTTP verbs, or responses that already had a max-age set before
being processed by this middleware. In that last case, the Cache-Control
header already present is left intact.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
max_age_from_response = get_max_age(response)
request_conditions = (
request.is_api
and request.method in ('GET', 'HEAD')
and 'HTTP_AUTHORIZATION' not in request.META
and 'disable_caching' not in request.GET
)
response_conditions = (
not response.cookies
and response.status_code >= 200
and response.status_code < 400
and not max_age_from_response
)
if request_conditions and response_conditions:
patch_cache_control(response, max_age=settings.API_CACHE_DURATION)
elif max_age_from_response is None:
patch_cache_control(response, s_maxage=0)
return response
class LBHeartbeatMiddleware:
"""Middleware to capture request to /__lbheartbeat__ and return a 200.
Must be placed above CommonMiddleware to work with ELB.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if request.path == '/__lbheartbeat__':
response = HttpResponse(status=200)
add_never_cache_headers(response)
return response
return self.get_response(request)
class TokenValidMiddleware:
"""Middleware to check the FxA auth tokens haven't expired, and refresh if
necessary.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# API requests are validated in SessionIDAuthentication
if not getattr(request, 'is_api', False):
if SESSION_KEY not in request.session:
# Without SESSION_KEY the session is definately anonymous so assume that
request.user = AnonymousUser()
else:
try:
check_and_update_fxa_access_token(request)
except IdentificationError:
log.info(f'Failed refreshing access_token for {request.user.id}')
return redirect_for_login(request)
return self.get_response(request)
class GraphiteMiddlewareNoAuth(MiddlewareMixin):
"""Like django-statsd's GraphiteMiddleware, but without the request.auth.*
pings that would force us to evaluate request.user."""
def process_response(self, request, response):
statsd.incr('response.%s' % response.status_code)
return response
def process_exception(self, request, exception):
if not isinstance(exception, Http404):
statsd.incr('response.500')
| |
# -*- coding: utf-8 -*-
from __future__ import with_statement
from cms.api import create_page, add_plugin
from cms.models import Page
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.plugins.text.models import Text
from cms.tests.plugins import PluginsTestBaseCase
from cms.test_utils.util.context_managers import SettingsOverride
from cms.utils.copy_plugins import copy_plugins_to
URL_CMS_MOVE_PLUGIN = u'/en/admin/cms/page/%d/move-plugin/'
class NestedPluginsTestCase(PluginsTestBaseCase):
def copy_placeholders_and_check_results(self, placeholders):
"""
This function is not itself a test; rather, it can be used by any test
that has created placeholders. It will check that whatever the plugin
structure in the placeholder, it will be copied accurately when they are
copied.
placeholders is a list of placeholders
"""
for original_placeholder in placeholders:
# get the plugins
original_plugins = original_placeholder.get_plugins()
# copy them to a new placeholder
copied_placeholder = Placeholder.objects.create(slot=original_placeholder.slot)
copy_plugins_to(
original_placeholder.get_plugins(),
copied_placeholder
)
copied_plugins = copied_placeholder.get_plugins()
# we should find the same number of plugins in both placeholders
self.assertEquals(
original_plugins.count(),
copied_plugins.count()
)
# quick check: make sure the two querysets match:
for original, copy in zip(original_plugins, copied_plugins):
self.assertEquals(
Text.objects.get(id=original.id).body,
Text.objects.get(id=copy.id).body
)
# Now build a *tree* of the plugins, and match those - it's not
# enough just to compare querysets as above; we should *also* check
# that when we build a tree, the various nodes are assembled as we
# would expect. We will pump the trees into a pair of lists:
original_plugins_list = []
copied_plugins_list = []
# This function builds the tree of plugins, starting from its roots.
# In that respect it's like many of the plugin tree-building
# routines elsewhere in the system.
def plugin_list_from_tree(roots, plugin_list):
for plugin in roots:
plugin_list.append(plugin)
# recurse over the set of nodes
plugin_list_from_tree(plugin.get_children(), plugin_list)
# build the tree for each set of plugins
plugin_list_from_tree(original_plugins.filter(level=0), original_plugins_list)
plugin_list_from_tree(copied_plugins.filter(level=0), copied_plugins_list)
# Check that each pair of items in the two lists match, in lots of
# different ways
for original, copy in zip(original_plugins_list, copied_plugins_list):
original_text_plugin = Text.objects.get(id=original.id)
copied_text_plugin = Text.objects.get(id=copy.id)
# This first one is a sanity test, just to prove that we aren't
# simply comparing *exactly the same items* in all these tests.
# It could happen...
self.assertNotEquals(original.id, copy.id)
self.assertEquals(
original_text_plugin.body,
copied_text_plugin.body
)
self.assertEquals(
original_text_plugin.level,
copied_text_plugin.level
)
self.assertEquals(
original_text_plugin.position,
copied_text_plugin.position
)
self.assertEquals(
original_text_plugin.rght,
copied_text_plugin.rght
)
self.assertEquals(
original_text_plugin.lft,
copied_text_plugin.lft
)
self.assertEquals(
original_text_plugin.get_descendant_count(),
copied_text_plugin.get_descendant_count()
)
self.assertEquals(
original_text_plugin.get_ancestors().count(),
copied_text_plugin.get_ancestors().count()
)
# just in case the test method that called us wants it:
return copied_placeholder
def test_plugin_deep_nesting_and_copying(self):
"""
Create a deeply-nested plugin structure, tests its properties, and tests
that it is copied accurately when the placeholder containing them is
copied.
The structure below isn't arbitrary, but has been designed to test
various conditions, including:
* nodes four levels deep
* multiple successive level increases
* multiple successive level decreases
* successive nodes on the same level followed by level changes
* multiple level decreases between successive nodes
* siblings with and without children
* nodes and branches added to the tree out of sequence
First we create the structure:
11
1
2
12
4
10
8
3
9
5
6
7
13
14
and then we move it all around.
"""
placeholder = Placeholder(slot=u"some_slot")
placeholder.save() # a good idea, if not strictly necessary
# plugin in placeholder
plugin_1 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"01",
)
plugin_1.save()
# IMPORTANT: plugins must be reloaded, before they can be assigned
# as a parent. Otherwise, the MPTT structure doesn't seem to rebuild
# properly.
# child of plugin_1
plugin_2 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"02",
)
plugin_1 = self.reload(plugin_1)
plugin_2.parent = plugin_1
plugin_2.save()
# plugin_2 should be plugin_1's only child
# for a single item we use assertItemsEqual
self.assertItemsEqual(
CMSPlugin.objects.get(id=plugin_1.pk).get_children(),
[CMSPlugin.objects.get(id=plugin_2.pk)])
# create a second child of plugin_1
plugin_3 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"03",
)
plugin_1 = self.reload(plugin_1)
plugin_3.parent = plugin_1
plugin_3.save()
# plugin_2 & plugin_3 should be plugin_1's children
# for multiple items we use assertSequenceEqual, because
# assertItemsEqual may re-order the list without warning
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_1.pk).get_children(),
[
CMSPlugin.objects.get(id=plugin_2.pk),
CMSPlugin.objects.get(id=plugin_3.pk),
])
# child of plugin_2
plugin_4 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"04",
)
plugin_2 = self.reload(plugin_2)
plugin_4.parent = plugin_2
plugin_4.save()
# plugin_4 should be plugin_2's child
self.assertItemsEqual(
CMSPlugin.objects.get(id=plugin_2.pk).get_children(),
[CMSPlugin.objects.get(id=plugin_4.pk)])
# 2,3 & 4 should be descendants of 1
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_1.pk).get_descendants(),
[
# note tree_id ordering of MPTT reflected here:
CMSPlugin.objects.get(id=plugin_2.pk),
CMSPlugin.objects.get(id=plugin_4.pk),
CMSPlugin.objects.get(id=plugin_3.pk),
],
)
# create a second root plugin
plugin_5 = add_plugin(placeholder, u"TextPlugin", u"en",
# force this to first-child, to make the tree more challenging
position='first-child',
body=u"05",
)
plugin_5.save()
# child of plugin_5
plugin_6 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"06",
)
plugin_5 = self.reload(plugin_5)
plugin_6.parent = plugin_5
plugin_6.save()
# plugin_6 should be plugin_5's child
self.assertItemsEqual(
CMSPlugin.objects.get(id=plugin_5.pk).get_children(),
[CMSPlugin.objects.get(id=plugin_6.pk)])
# child of plugin_6
plugin_7 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"07",
)
plugin_5 = self.reload(plugin_5)
plugin_7.parent = plugin_5
plugin_7.save()
# plugin_7 should be plugin_5's child
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_5.pk).get_children(),
[
CMSPlugin.objects.get(id=plugin_6.pk),
CMSPlugin.objects.get(id=plugin_7.pk)
])
# 6 & 7 should be descendants of 5
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_5.pk).get_descendants(),
[
CMSPlugin.objects.get(id=plugin_6.pk),
CMSPlugin.objects.get(id=plugin_7.pk),
])
# another child of plugin_2
plugin_8 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"08",
)
plugin_2 = self.reload(plugin_2)
plugin_8.parent = plugin_2
plugin_8.save()
# plugin_4 should be plugin_2's child
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_2.pk).get_children(),
[
CMSPlugin.objects.get(id=plugin_4.pk),
CMSPlugin.objects.get(id=plugin_8.pk),
])
# child of plugin_3
plugin_9 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"09",
)
plugin_3 = self.reload(plugin_3)
plugin_9.parent = plugin_3
plugin_9.save()
# plugin_9 should be plugin_3's child
self.assertItemsEqual(
CMSPlugin.objects.get(id=plugin_3.pk).get_children(),
[CMSPlugin.objects.get(id=plugin_9.pk)])
# child of plugin_4
plugin_10 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"10",
)
plugin_4 = self.reload(plugin_4)
plugin_10.parent = plugin_4
plugin_10.save()
# plugin_10 should be plugin_4's child
self.assertItemsEqual(
CMSPlugin.objects.get(id=plugin_4.pk).get_children(),
[CMSPlugin.objects.get(id=plugin_10.pk)])
original_plugins = placeholder.get_plugins()
self.assertEquals(original_plugins.count(), 10)
# elder sibling of plugin_1
plugin_1 = self.reload(plugin_1)
plugin_11 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"11",
target=plugin_1,
position="left"
)
plugin_11.save()
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_1.pk).get_children(),
[
CMSPlugin.objects.get(id=plugin_2.pk),
CMSPlugin.objects.get(id=plugin_3.pk)
])
# elder sibling of plugin_4
plugin_4 = self.reload(plugin_4)
plugin_12 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"12",
target=plugin_4,
position="left"
)
plugin_12.save()
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_2.pk).get_children(),
[
CMSPlugin.objects.get(id=plugin_12.pk),
CMSPlugin.objects.get(id=plugin_4.pk),
CMSPlugin.objects.get(id=plugin_8.pk)
])
# younger sibling of plugin_7
plugin_7 = self.reload(plugin_7)
plugin_13 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"13",
target=plugin_7,
position="right"
)
plugin_13.save()
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_5.pk).get_children(),
[
CMSPlugin.objects.get(id=plugin_6.pk),
CMSPlugin.objects.get(id=plugin_7.pk),
CMSPlugin.objects.get(id=plugin_13.pk)
])
# new sibling of plugin_5
plugin_5 = self.reload(plugin_5)
plugin_14 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"14"
)
plugin_14.save()
self.assertSequenceEqual(
CMSPlugin.objects.filter(level=0),
[
CMSPlugin.objects.get(id=plugin_11.pk),
CMSPlugin.objects.get(id=plugin_1.pk),
CMSPlugin.objects.get(id=plugin_5.pk),
CMSPlugin.objects.get(id=plugin_14.pk)
])
self.assertEquals(CMSPlugin.objects.get(id=plugin_11.pk).tree_id, 1)
self.copy_placeholders_and_check_results([placeholder])
# now let's move plugins around in the tree
# move plugin_2 before plugin_11
plugin_2 = self.reload(plugin_2)
plugin_2.move_to(target=plugin_1, position="left")
plugin_2.save()
self.assertEquals(CMSPlugin.objects.get(id=plugin_2.pk).tree_id, 1)
self.copy_placeholders_and_check_results([placeholder])
# move plugin_6 after plugin_7
plugin_6 = self.reload(plugin_6)
plugin_7 = self.reload(plugin_7)
plugin_6.move_to(target=plugin_7, position="right")
plugin_6.save()
self.copy_placeholders_and_check_results([placeholder])
# move plugin_3 before plugin_2
plugin_2 = self.reload(plugin_2)
plugin_3 = self.reload(plugin_3)
plugin_3.move_to(target=plugin_2, position="left")
plugin_3.save()
self.copy_placeholders_and_check_results([placeholder])
# make plugin_3 plugin_2's first-child
plugin_2 = self.reload(plugin_2)
plugin_3 = self.reload(plugin_3)
plugin_3.move_to(target=plugin_2, position="first-child")
plugin_3.save()
self.copy_placeholders_and_check_results([placeholder])
# make plugin_7 plugin_2's first-child
plugin_2 = self.reload(plugin_2)
plugin_7 = self.reload(plugin_7)
plugin_7.move_to(target=plugin_3, position="right")
plugin_7.save()
self.copy_placeholders_and_check_results([placeholder, ])
def test_nested_plugin_on_page(self):
"""
Validate a textplugin with a nested link plugin
mptt values are correctly showing a parent child relationship
of a nested plugin
"""
with SettingsOverride(CMS_PERMISSION=False):
# setup page 1
page_one = create_page(u"Three Placeholder", u"col_three.html", u"en",
position=u"last-child", published=True, in_navigation=True)
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
# add a plugin
pre_nesting_body = u"<p>the nested text plugin with a link inside</p>"
text_plugin = add_plugin(page_one_ph_two, u"TextPlugin", u"en", body=pre_nesting_body)
# prepare nestin plugin
page_one_ph_two = self.reload(page_one_ph_two)
text_plugin = self.reload(text_plugin)
link_plugin = add_plugin(page_one_ph_two, u"LinkPlugin", u"en", target=text_plugin)
link_plugin.name = u"django-cms Link"
link_plugin.url = u"https://www.django-cms.org"
# as for some reason mptt does not
# update the parent child relationship
# in the add_plugin method when a target present
# but this is not the topic of the test
link_plugin.parent = text_plugin
link_plugin.save()
# reloading needs to be done after every save
link_plugin = self.reload(link_plugin)
text_plugin = self.reload(text_plugin)
# mptt related insertion correct?
msg = u"parent plugin right is not updated, child not inserted correctly"
self.assertTrue(text_plugin.rght > link_plugin.rght, msg=msg)
msg = u"link has no parent"
self.assertFalse(link_plugin.parent == None, msg=msg)
msg = u"parent plugin left is not updated, child not inserted correctly"
self.assertTrue(text_plugin.lft < link_plugin.lft, msg=msg)
msg = u"child level is not bigger than parent level"
self.assertTrue(text_plugin.level < link_plugin.level , msg=msg)
# add the link plugin to the body
# emulate the editor in admin that adds some txt for the nested plugin
in_txt = u"""<img id="plugin_obj_%s" title="Link" alt="Link" src="/static/cms/images/plugins/link.png">"""
nesting_body = u"%s<p>%s</p>" % (text_plugin.body, (in_txt % (link_plugin.id)))
text_plugin.body = nesting_body
text_plugin.save()
text_plugin = self.reload(text_plugin)
# none of the descendants should have a placeholder other then my own one
self.assertEquals(text_plugin.get_descendants().exclude(placeholder=text_plugin.placeholder).count(), 0)
post_add_plugin_count = CMSPlugin.objects.count()
self.assertEqual(post_add_plugin_count, 2)
def test_copy_page_nested_plugin(self):
"""
Test to verify that page copy with a nested plugin works
page one - 3 placeholder
col_sidebar: 1 text plugin
col_left: 1 text plugin with nested link plugin
col_right: no plugin
page two (copy target)
Verify copied page, placeholders, plugins and body text
"""
with SettingsOverride(CMS_PERMISSION=False):
templates = []
# setup page 1
page_one = create_page(u"Three Placeholder", u"col_three.html", u"en",
position=u"last-child", published=True, in_navigation=True)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one_ph_three = page_one.placeholders.get(slot=u"col_right")
# add the text plugin to placeholder one
text_plugin_en = add_plugin(page_one_ph_one, u"TextPlugin", u"en", body="Hello World")
self.assertEquals(text_plugin_en.id, CMSPlugin.objects.all()[0].id)
self.assertEquals(text_plugin_en.get_children().count(), 0)
pre_add_plugin_count = CMSPlugin.objects.count()
self.assertEqual(pre_add_plugin_count, 1)
###
# add a plugin to placeholder two
###
pre_nesting_body = u"<p>the nested text plugin with a link inside</p>"
text_plugin_two = add_plugin(page_one_ph_two, u"TextPlugin", u"en", body=pre_nesting_body)
text_plugin_two = self.reload(text_plugin_two)
# prepare nesting plugin
page_one_ph_two = self.reload(page_one_ph_two)
text_plugin_two = self.reload(text_plugin_two)
link_plugin = add_plugin(page_one_ph_two, u"LinkPlugin", u"en", target=text_plugin_two)
link_plugin.name = u"django-cms Link"
link_plugin.url = u"https://www.django-cms.org"
link_plugin.parent = text_plugin_two
link_plugin.save()
link_plugin = self.reload(link_plugin)
text_plugin_two = self.reload(text_plugin_two)
in_txt = """<img id="plugin_obj_%s" title="Link" alt="Link" src="/static/cms/images/plugins/link.png">"""
nesting_body = "%s<p>%s</p>" % (text_plugin_two.body, (in_txt % (link_plugin.id)))
# emulate the editor in admin that adds some txt for the nested plugin
text_plugin_two.body = nesting_body
text_plugin_two.save()
text_plugin_two = self.reload(text_plugin_two)
# the link is attached as a child?
self.assertEquals(text_plugin_two.get_children().count(), 1)
post_add_plugin_count = CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=True).count()
self.assertEqual(post_add_plugin_count, 3)
page_one.save()
# get the plugins from the original page
page_one = self.reload(page_one)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one_ph_three = page_one.placeholders.get(slot=u"col_right")
# verify that the plugins got created
org_placeholder_one_plugins = page_one_ph_one.get_plugins()
self.assertEquals(len(org_placeholder_one_plugins), 1)
org_placeholder_two_plugins = page_one_ph_two.get_plugins()
self.assertEquals(len(org_placeholder_two_plugins), 2)
org_placeholder_three_plugins = page_one_ph_three.get_plugins()
self.assertEquals(len(org_placeholder_three_plugins), 0)
self.assertEquals(page_one.placeholders.count(), 3)
placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()
self.assertEquals(placeholder_count, 3)
self.assertEquals(CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=True).count(), 3)
page_one_plugins = CMSPlugin.objects.all()
##
# setup page_copy_target page
##
page_copy_target = create_page("Three Placeholder - page copy target", "col_three.html", "en",
position="last-child", published=True, in_navigation=True)
all_page_count = Page.objects.drafts().count()
pre_copy_placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()
self.assertEquals(pre_copy_placeholder_count, 6)
# copy the page
superuser = self.get_superuser()
with self.login_user_context(superuser):
page_two = self.copy_page(page_one, page_copy_target)
# validate the expected pages,placeholders,plugins,pluginbodies
after_copy_page_plugin_count = CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=True).count()
self.assertEquals(after_copy_page_plugin_count, 6)
# check the amount of copied stuff
after_copy_page_count = Page.objects.drafts().count()
after_copy_placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()
self.assertGreater(after_copy_page_count, all_page_count, u"no new page after copy")
self.assertGreater(after_copy_page_plugin_count, post_add_plugin_count, u"plugin count is not grown")
self.assertGreater(after_copy_placeholder_count, pre_copy_placeholder_count, u"placeholder count is not grown")
self.assertEqual(after_copy_page_count, 3, u"no new page after copy")
# original placeholder
page_one = self.reload(page_one)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one_ph_three = page_one.placeholders.get(slot=u"col_right")
# check if there are multiple pages assigned to this placeholders
found_page = page_one_ph_one.page if page_one_ph_one else None
self.assertEqual(found_page, page_one)
found_page = page_one_ph_two.page if page_one_ph_two else None
self.assertEqual(found_page, page_one)
found_page = page_one_ph_three.page if page_one_ph_three else None
self.assertEqual(found_page, page_one)
page_two = self.reload(page_two)
page_two_ph_one = page_two.placeholders.get(slot=u"col_sidebar")
page_two_ph_two = page_two.placeholders.get(slot=u"col_left")
page_two_ph_three = page_two.placeholders.get(slot=u"col_right")
# check if there are multiple pages assigned to this placeholders
found_page = page_two_ph_one.page if page_two_ph_one else None
self.assertEqual(found_page, page_two)
found_page = page_two_ph_two.page if page_two_ph_two else None
self.assertEqual(found_page, page_two)
found_page = page_two_ph_three.page if page_two_ph_three else None
self.assertEqual(found_page, page_two)
# check the stored placeholders org vs copy
msg = 'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (page_two_ph_one.pk, page_one_ph_one.pk, page_two.pk)
self.assertNotEquals(page_two_ph_one.pk, page_one_ph_one.pk, msg)
msg = 'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (page_two_ph_two.pk, page_one_ph_two.pk, page_two.pk)
self.assertNotEquals(page_two_ph_two.pk, page_one_ph_two.pk, msg)
msg = 'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (page_two_ph_three.pk, page_one_ph_three.pk, page_two.pk)
self.assertNotEquals(page_two_ph_three.pk, page_one_ph_three.pk, msg)
# get the plugins from the original page
org_placeholder_one_plugins = page_one_ph_one.get_plugins()
self.assertEquals(len(org_placeholder_one_plugins), 1)
org_placeholder_two_plugins = page_one_ph_two.get_plugins()
self.assertEquals(len(org_placeholder_two_plugins), 2)
org_placeholder_three_plugins = page_one_ph_three.get_plugins()
self.assertEquals(len(org_placeholder_three_plugins), 0)
# get the plugins from the copied page
copied_placeholder_one_plugins = page_two_ph_one.get_plugins()
self.assertEquals(len(copied_placeholder_one_plugins), 1)
copied_placeholder_two_plugins = page_two_ph_two.get_plugins()
self.assertEquals(len(copied_placeholder_two_plugins), 2)
copied_placeholder_three_plugins = page_two_ph_three.get_plugins()
self.assertEquals(len(copied_placeholder_three_plugins), 0)
# verify the plugins got copied
# placeholder 1
count_plugins_copied = len(copied_placeholder_one_plugins)
count_plugins_org = len(org_placeholder_one_plugins)
msg = u"plugin count %s %s for placeholder one not equal" % (count_plugins_copied, count_plugins_org)
self.assertEquals(count_plugins_copied, count_plugins_org, msg)
# placeholder 2
count_plugins_copied = len(copied_placeholder_two_plugins)
count_plugins_org = len(org_placeholder_two_plugins)
msg = u"plugin count %s %s for placeholder two not equal" % (count_plugins_copied, count_plugins_org)
self.assertEquals(count_plugins_copied, count_plugins_org, msg)
# placeholder 3
count_plugins_copied = len(copied_placeholder_three_plugins)
count_plugins_org = len(org_placeholder_three_plugins)
msg = u"plugin count %s %s for placeholder three not equal" % (count_plugins_copied, count_plugins_org)
self.assertEquals(count_plugins_copied, count_plugins_org, msg)
# verify the body of text plugin with nested link plugin
# org to copied
org_nested_text_plugin = None
# do this iteration to find the real text plugin with the attached link
# the inheritance mechanism for the cmsplugins works through
# (tuple)get_plugin_instance()
for x in org_placeholder_two_plugins:
if x.plugin_type == u"TextPlugin":
instance = x.get_plugin_instance()[0]
if instance.body.startswith(pre_nesting_body):
org_nested_text_plugin = instance
break
copied_nested_text_plugin = None
for x in copied_placeholder_two_plugins:
if x.plugin_type == u"TextPlugin":
instance = x.get_plugin_instance()[0]
if instance.body.startswith(pre_nesting_body):
copied_nested_text_plugin = instance
break
msg = u"orginal nested text plugin not found"
self.assertNotEquals(org_nested_text_plugin, None, msg=msg)
msg = u"copied nested text plugin not found"
self.assertNotEquals(copied_nested_text_plugin, None, msg=msg)
# get the children ids of the texplugin with a nested link
# to check if the body of the text is genrated correctly
org_link_child_plugin = org_nested_text_plugin.get_children()[0]
copied_link_child_plugin = copied_nested_text_plugin.get_children()[0]
# validate the textplugin body texts
msg = u"org plugin and copied plugin are the same"
self.assertTrue(org_link_child_plugin.id != copied_link_child_plugin.id, msg)
needle = u"plugin_obj_%s"
msg = u"child plugin id differs to parent in body plugin_obj_id"
# linked child is in body
self.assertTrue(org_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) != -1, msg)
msg = u"copy: child plugin id differs to parent in body plugin_obj_id"
self.assertTrue(copied_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) != -1, msg)
# really nothing else
msg = u"child link plugin id differs to parent body plugin_obj_id"
self.assertTrue(org_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) == -1, msg)
msg = u"copy: child link plugin id differs to parent body plugin_obj_id"
self.assertTrue(copied_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) == -1, msg)
# now reverse lookup the placeholders from the plugins
org_placeholder = org_link_child_plugin.placeholder
copied_placeholder = copied_link_child_plugin.placeholder
msg = u"placeholder of the orginal plugin and copied plugin are the same"
ok = ((org_placeholder.id != copied_placeholder.id))
self.assertTrue(ok, msg)
def test_copy_page_nested_plugin_moved_parent_plugin(self):
"""
Test to verify that page copy with a nested plugin works
when a plugin with child got moved to another placeholder
page one - 3 placeholder
col_sidebar:
1 text plugin
col_left: 1 text plugin with nested link plugin
col_right: no plugin
page two (copy target)
step2: move the col_left text plugin to col_right
col_sidebar:
1 text plugin
col_left: no plugin
col_right: 1 text plugin with nested link plugin
verify the copied page structure
"""
with SettingsOverride(CMS_PERMISSION=False):
templates = []
# setup page 1
page_one = create_page(u"Three Placeholder", u"col_three.html", u"en",
position=u"last-child", published=True, in_navigation=True)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one_ph_three = page_one.placeholders.get(slot=u"col_right")
# add the text plugin to placeholder one
text_plugin_en = add_plugin(page_one_ph_one, u"TextPlugin", u"en", body=u"Hello World")
self.assertEquals(text_plugin_en.id, CMSPlugin.objects.all()[0].id)
self.assertEquals(text_plugin_en.get_children().count(), 0)
pre_add_plugin_count = CMSPlugin.objects.count()
self.assertEqual(pre_add_plugin_count, 1)
# add a plugin to placeholder twho
pre_nesting_body = u"<p>the nested text plugin with a link inside</p>"
text_plugin_two = add_plugin(page_one_ph_two, u"TextPlugin", u"en", body=pre_nesting_body)
text_plugin_two = self.reload(text_plugin_two)
# prepare nestin plugin
page_one_ph_two = self.reload(page_one_ph_two)
text_plugin_two = self.reload(text_plugin_two)
link_plugin = add_plugin(page_one_ph_two, u"LinkPlugin", u"en", target=text_plugin_two)
link_plugin.name = u"django-cms Link"
link_plugin.url = u"https://www.django-cms.org"
link_plugin.parent = text_plugin_two
link_plugin.save()
# reload after every save
link_plugin = self.reload(link_plugin)
text_plugin_two = self.reload(text_plugin_two)
in_txt = u"""<img id="plugin_obj_%s" title="Link" alt="Link" src="/static/cms/images/plugins/link.png">"""
nesting_body = "%s<p>%s</p>" % (text_plugin_two.body, (in_txt % (link_plugin.id)))
# emulate the editor in admin that adds some txt for the nested plugin
text_plugin_two.body = nesting_body
text_plugin_two.save()
text_plugin_two = self.reload(text_plugin_two)
# the link is attached as a child?
self.assertEquals(text_plugin_two.get_children().count(), 1)
post_add_plugin_count = CMSPlugin.objects.count()
self.assertEqual(post_add_plugin_count, 3)
page_one.save()
# get the plugins from the original page
page_one = self.reload(page_one)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one_ph_three = page_one.placeholders.get(slot=u"col_right")
# verify the plugins got created
org_placeholder_one_plugins = page_one_ph_one.get_plugins()
self.assertEquals(len(org_placeholder_one_plugins), 1)
org_placeholder_two_plugins = page_one_ph_two.get_plugins()
self.assertEquals(len(org_placeholder_two_plugins), 2)
org_placeholder_three_plugins = page_one_ph_three.get_plugins()
self.assertEquals(len(org_placeholder_three_plugins), 0)
self.assertEquals(page_one.placeholders.count(), 3)
placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()
self.assertEquals(placeholder_count, 3)
self.assertEquals(CMSPlugin.objects.count(), 3)
page_one_plugins = CMSPlugin.objects.all()
# setup page_copy_target
page_copy_target = create_page("Three Placeholder - page copy target", "col_three.html", "en",
position="last-child", published=True, in_navigation=True)
all_page_count = Page.objects.drafts().count()
pre_copy_placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()
self.assertEquals(pre_copy_placeholder_count, 6)
superuser = self.get_superuser()
with self.login_user_context(superuser):
# now move the parent text plugin to another placeholder
post_data = {
u'placeholder': u"col_right",
u'placeholder_id': u"%s" % (page_one_ph_three.id),
u'ids': u"%s" % (text_plugin_two.id),
u'plugin_id': u"%s" % (text_plugin_two.id),
}
edit_url = URL_CMS_MOVE_PLUGIN % (page_one.id)
response = self.client.post(edit_url, post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, u'ok')
# check if the plugin got moved
page_one = self.reload(page_one)
text_plugin_two = self.reload(text_plugin_two)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one_ph_three = page_one.placeholders.get(slot=u"col_right")
org_placeholder_one_plugins = page_one_ph_one.get_plugins()
self.assertEquals(len(org_placeholder_one_plugins), 1)
org_placeholder_two_plugins = page_one_ph_two.get_plugins()
# the plugin got moved and child got moved
self.assertEquals(len(org_placeholder_two_plugins), 0)
org_placeholder_three_plugins = page_one_ph_three.get_plugins()
self.assertEquals(len(org_placeholder_three_plugins), 2)
# copy the page
page_two = self.copy_page(page_one, page_copy_target)
# validate the expected pages,placeholders,plugins,pluginbodies
after_copy_page_plugin_count = CMSPlugin.objects.count()
self.assertEquals(after_copy_page_plugin_count, 6)
after_copy_page_count = Page.objects.drafts().count()
after_copy_placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()
self.assertGreater(after_copy_page_count, all_page_count, u"no new page after copy")
self.assertGreater(after_copy_page_plugin_count, post_add_plugin_count, u"plugin count is not grown")
self.assertGreater(after_copy_placeholder_count, pre_copy_placeholder_count, u"placeholder count is not grown")
self.assertEquals(after_copy_page_count, 3, u"no new page after copy")
# validate the structure
# orginal placeholder
page_one = self.reload(page_one)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one_ph_three = page_one.placeholders.get(slot=u"col_right")
# check if there are multiple pages assigned to this placeholders
found_page = page_one_ph_one.page if page_one_ph_one else None
self.assertEqual(found_page, page_one)
found_page = page_one_ph_two.page if page_one_ph_two else None
self.assertEqual(found_page, page_one)
found_page = page_one_ph_three.page if page_one_ph_three else None
self.assertEqual(found_page, page_one)
page_two = self.reload(page_two)
page_two_ph_one = page_two.placeholders.get(slot=u"col_sidebar")
page_two_ph_two = page_two.placeholders.get(slot=u"col_left")
page_two_ph_three = page_two.placeholders.get(slot=u"col_right")
# check if there are multiple pages assigned to this placeholders
found_page = page_two_ph_one.page if page_two_ph_one else None
self.assertEqual(found_page, page_two)
found_page = page_two_ph_two.page if page_two_ph_two else None
self.assertEqual(found_page, page_two)
found_page = page_two_ph_three.page if page_two_ph_three else None
self.assertEqual(found_page, page_two)
# check the stored placeholders org vs copy
msg = u'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (page_two_ph_one.pk, page_one_ph_one.pk, page_two.pk)
self.assertNotEquals(page_two_ph_one.pk, page_one_ph_one.pk, msg)
msg = u'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (page_two_ph_two.pk, page_one_ph_two.pk, page_two.pk)
self.assertNotEquals(page_two_ph_two.pk, page_one_ph_two.pk, msg)
msg = u'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (page_two_ph_three.pk, page_one_ph_three.pk, page_two.pk)
self.assertNotEquals(page_two_ph_three.pk, page_one_ph_three.pk, msg)
# get the plugins from the original page
org_placeholder_one_plugins = page_one_ph_one.get_plugins()
self.assertEquals(len(org_placeholder_one_plugins), 1)
org_placeholder_two_plugins = page_one_ph_two.get_plugins()
self.assertEquals(len(org_placeholder_two_plugins), 0)
org_placeholder_three_plugins = page_one_ph_three.get_plugins()
self.assertEquals(len(org_placeholder_three_plugins), 2)
# get the plugins from the copied page
copied_placeholder_one_plugins = page_two_ph_one.get_plugins()
self.assertEquals(len(copied_placeholder_one_plugins), 1)
copied_placeholder_two_plugins = page_two_ph_two.get_plugins()
self.assertEquals(len(copied_placeholder_two_plugins), 0)
copied_placeholder_three_plugins = page_two_ph_three.get_plugins()
self.assertEquals(len(copied_placeholder_three_plugins), 2)
# verify the plugins got copied
# placeholder 1
count_plugins_copied = len(copied_placeholder_one_plugins)
count_plugins_org = len(org_placeholder_one_plugins)
msg = u"plugin count %s %s for placeholder one not equal" % (count_plugins_copied, count_plugins_org)
self.assertEquals(count_plugins_copied, count_plugins_org, msg)
# placeholder 2
count_plugins_copied = len(copied_placeholder_two_plugins)
count_plugins_org = len(org_placeholder_two_plugins)
msg = u"plugin count %s %s for placeholder two not equal" % (count_plugins_copied, count_plugins_org)
self.assertEquals(count_plugins_copied, count_plugins_org, msg)
# placeholder 3
count_plugins_copied = len(copied_placeholder_three_plugins)
count_plugins_org = len(org_placeholder_three_plugins)
msg = u"plugin count %s %s for placeholder three not equal" % (count_plugins_copied, count_plugins_org)
self.assertEquals(count_plugins_copied, count_plugins_org, msg)
# verify the body of text plugin with nested link plugin
# org to copied
org_nested_text_plugin = None
# do this iteration to find the real text plugin with the attached link
# the inheritance mechanism for the cmsplugins works through
# (tuple)get_plugin_instance()
for x in org_placeholder_three_plugins:
if x.plugin_type == u"TextPlugin":
instance = x.get_plugin_instance()[0]
if instance.body.startswith(pre_nesting_body):
org_nested_text_plugin = instance
break
copied_nested_text_plugin = None
for x in copied_placeholder_three_plugins:
if x.plugin_type == u"TextPlugin":
instance = x.get_plugin_instance()[0]
if instance.body.startswith(pre_nesting_body):
copied_nested_text_plugin = instance
break
msg = u"orginal nested text plugin not found"
self.assertNotEquals(org_nested_text_plugin, None, msg=msg)
msg = u"copied nested text plugin not found"
self.assertNotEquals(copied_nested_text_plugin, None, msg=msg)
# get the children ids of the texplugin with a nested link
# to check if the body of the text is generated correctly
org_link_child_plugin = org_nested_text_plugin.get_children()[0]
copied_link_child_plugin = copied_nested_text_plugin.get_children()[0]
# validate the textplugin body texts
msg = u"org plugin and copied plugin are the same"
self.assertNotEqual(org_link_child_plugin.id, copied_link_child_plugin.id, msg)
needle = u"plugin_obj_%s"
msg = u"child plugin id differs to parent in body plugin_obj_id"
# linked child is in body
self.assertTrue(org_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) != -1, msg)
msg = u"copy: child plugin id differs to parent in body plugin_obj_id"
self.assertTrue(copied_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) != -1, msg)
# really nothing else
msg = u"child link plugin id differs to parent body plugin_obj_id"
self.assertTrue(org_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) == -1, msg)
msg = u"copy: child link plugin id differs to parent body plugin_obj_id"
self.assertTrue(copied_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) == -1, msg)
# now reverse lookup the placeholders from the plugins
org_placeholder = org_link_child_plugin.placeholder
copied_placeholder = copied_link_child_plugin.placeholder
msg = u"placeholder of the orginal plugin and copied plugin are the same"
self.assertNotEqual(org_placeholder.id, copied_placeholder.id, msg)
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import division
import os
from ....interfaces import fsl as fsl # fsl
from ....interfaces import utility as util # utility
from ....pipeline import engine as pe # pypeline engine
from ....interfaces import freesurfer as fs # freesurfer
from ....interfaces import spm as spm
from ...smri.freesurfer.utils import create_getmask_flow
from .... import LooseVersion
def getthreshop(thresh):
return ['-thr %.10f -Tmin -bin' % (0.1 * val[1]) for val in thresh]
def pickfirst(files):
if isinstance(files, list):
return files[0]
else:
return files
def pickmiddle(files):
from nibabel import load
import numpy as np
middlevol = []
for f in files:
middlevol.append(int(np.ceil(load(f).shape[3] / 2)))
return middlevol
def pickvol(filenames, fileidx, which):
from nibabel import load
import numpy as np
if which.lower() == 'first':
idx = 0
elif which.lower() == 'middle':
idx = int(np.ceil(load(filenames[fileidx]).shape[3] / 2))
elif which.lower() == 'last':
idx = load(filenames[fileidx]).shape[3] - 1
else:
raise Exception('unknown value for volume selection : %s' % which)
return idx
def getbtthresh(medianvals):
return [0.75 * val for val in medianvals]
def chooseindex(fwhm):
if fwhm < 1:
return [0]
else:
return [1]
def getmeanscale(medianvals):
return ['-mul %.10f' % (10000. / val) for val in medianvals]
def getusans(x):
return [[tuple([val[0], 0.75 * val[1]])] for val in x]
tolist = lambda x: [x]
highpass_operand = lambda x: '-bptf %.10f -1' % x
def create_parallelfeat_preproc(name='featpreproc', highpass=True):
"""Preprocess each run with FSL independently of the others
Parameters
----------
::
name : name of workflow (default: featpreproc)
highpass : boolean (default: True)
Inputs::
inputspec.func : functional runs (filename or list of filenames)
inputspec.fwhm : fwhm for smoothing with SUSAN
inputspec.highpass : HWHM in TRs (if created with highpass=True)
Outputs::
outputspec.reference : volume to which runs are realigned
outputspec.motion_parameters : motion correction parameters
outputspec.realigned_files : motion corrected files
outputspec.motion_plots : plots of motion correction parameters
outputspec.mask : mask file used to mask the brain
outputspec.smoothed_files : smoothed functional data
outputspec.highpassed_files : highpassed functional data (if highpass=True)
outputspec.mean : mean file
Example
-------
>>> preproc = create_parallelfeat_preproc()
>>> preproc.inputs.inputspec.func = ['f3.nii', 'f5.nii']
>>> preproc.inputs.inputspec.fwhm = 5
>>> preproc.inputs.inputspec.highpass = 128./(2*2.5)
>>> preproc.base_dir = '/tmp'
>>> preproc.run() # doctest: +SKIP
>>> preproc = create_parallelfeat_preproc(highpass=False)
>>> preproc.inputs.inputspec.func = 'f3.nii'
>>> preproc.inputs.inputspec.fwhm = 5
>>> preproc.base_dir = '/tmp'
>>> preproc.run() # doctest: +SKIP
"""
featpreproc = pe.Workflow(name=name)
"""
Set up a node to define all inputs required for the preprocessing workflow
"""
if highpass:
inputnode = pe.Node(interface=util.IdentityInterface(fields=['func',
'fwhm',
'highpass']),
name='inputspec')
outputnode = pe.Node(interface=util.IdentityInterface(fields=['reference',
'motion_parameters',
'realigned_files',
'motion_plots',
'mask',
'smoothed_files',
'highpassed_files',
'mean']),
name='outputspec')
else:
inputnode = pe.Node(interface=util.IdentityInterface(fields=['func',
'fwhm']),
name='inputspec')
outputnode = pe.Node(interface=util.IdentityInterface(fields=['reference',
'motion_parameters',
'realigned_files',
'motion_plots',
'mask',
'smoothed_files',
'mean']),
name='outputspec')
"""
Set up a node to define outputs for the preprocessing workflow
"""
"""
Convert functional images to float representation. Since there can
be more than one functional run we use a MapNode to convert each
run.
"""
img2float = pe.MapNode(interface=fsl.ImageMaths(out_data_type='float',
op_string='',
suffix='_dtype'),
iterfield=['in_file'],
name='img2float')
featpreproc.connect(inputnode, 'func', img2float, 'in_file')
"""
Extract the first volume of the first run as the reference
"""
extract_ref = pe.MapNode(interface=fsl.ExtractROI(t_size=1),
iterfield=['in_file', 't_min'],
name='extractref')
featpreproc.connect(img2float, 'out_file', extract_ref, 'in_file')
featpreproc.connect(img2float, ('out_file', pickmiddle), extract_ref, 't_min')
featpreproc.connect(extract_ref, 'roi_file', outputnode, 'reference')
"""
Realign the functional runs to the reference (1st volume of first run)
"""
motion_correct = pe.MapNode(interface=fsl.MCFLIRT(save_mats=True,
save_plots=True),
name='realign',
iterfield=['in_file', 'ref_file'])
featpreproc.connect(img2float, 'out_file', motion_correct, 'in_file')
featpreproc.connect(extract_ref, 'roi_file', motion_correct, 'ref_file')
featpreproc.connect(motion_correct, 'par_file', outputnode, 'motion_parameters')
featpreproc.connect(motion_correct, 'out_file', outputnode, 'realigned_files')
"""
Plot the estimated motion parameters
"""
plot_motion = pe.MapNode(interface=fsl.PlotMotionParams(in_source='fsl'),
name='plot_motion',
iterfield=['in_file'])
plot_motion.iterables = ('plot_type', ['rotations', 'translations'])
featpreproc.connect(motion_correct, 'par_file', plot_motion, 'in_file')
featpreproc.connect(plot_motion, 'out_file', outputnode, 'motion_plots')
"""
Extract the mean volume of the first functional run
"""
meanfunc = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean',
suffix='_mean'),
iterfield=['in_file'],
name='meanfunc')
featpreproc.connect(motion_correct, 'out_file', meanfunc, 'in_file')
"""
Strip the skull from the mean functional to generate a mask
"""
meanfuncmask = pe.MapNode(interface=fsl.BET(mask=True,
no_output=True,
frac=0.3),
iterfield=['in_file'],
name='meanfuncmask')
featpreproc.connect(meanfunc, 'out_file', meanfuncmask, 'in_file')
"""
Mask the functional runs with the extracted mask
"""
maskfunc = pe.MapNode(interface=fsl.ImageMaths(suffix='_bet',
op_string='-mas'),
iterfield=['in_file', 'in_file2'],
name='maskfunc')
featpreproc.connect(motion_correct, 'out_file', maskfunc, 'in_file')
featpreproc.connect(meanfuncmask, 'mask_file', maskfunc, 'in_file2')
"""
Determine the 2nd and 98th percentile intensities of each functional run
"""
getthresh = pe.MapNode(interface=fsl.ImageStats(op_string='-p 2 -p 98'),
iterfield=['in_file'],
name='getthreshold')
featpreproc.connect(maskfunc, 'out_file', getthresh, 'in_file')
"""
Threshold the first run of the functional data at 10% of the 98th percentile
"""
threshold = pe.MapNode(interface=fsl.ImageMaths(out_data_type='char',
suffix='_thresh'),
iterfield=['in_file', 'op_string'],
name='threshold')
featpreproc.connect(maskfunc, 'out_file', threshold, 'in_file')
"""
Define a function to get 10% of the intensity
"""
featpreproc.connect(getthresh, ('out_stat', getthreshop), threshold, 'op_string')
"""
Determine the median value of the functional runs using the mask
"""
medianval = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'),
iterfield=['in_file', 'mask_file'],
name='medianval')
featpreproc.connect(motion_correct, 'out_file', medianval, 'in_file')
featpreproc.connect(threshold, 'out_file', medianval, 'mask_file')
"""
Dilate the mask
"""
dilatemask = pe.MapNode(interface=fsl.ImageMaths(suffix='_dil',
op_string='-dilF'),
iterfield=['in_file'],
name='dilatemask')
featpreproc.connect(threshold, 'out_file', dilatemask, 'in_file')
featpreproc.connect(dilatemask, 'out_file', outputnode, 'mask')
"""
Mask the motion corrected functional runs with the dilated mask
"""
maskfunc2 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file', 'in_file2'],
name='maskfunc2')
featpreproc.connect(motion_correct, 'out_file', maskfunc2, 'in_file')
featpreproc.connect(dilatemask, 'out_file', maskfunc2, 'in_file2')
"""
Smooth each run using SUSAN with the brightness threshold set to 75%
of the median value for each run and a mask consituting the mean
functional
"""
smooth = create_susan_smooth()
featpreproc.connect(inputnode, 'fwhm', smooth, 'inputnode.fwhm')
featpreproc.connect(maskfunc2, 'out_file', smooth, 'inputnode.in_files')
featpreproc.connect(dilatemask, 'out_file', smooth, 'inputnode.mask_file')
"""
Mask the smoothed data with the dilated mask
"""
maskfunc3 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file', 'in_file2'],
name='maskfunc3')
featpreproc.connect(smooth, 'outputnode.smoothed_files', maskfunc3, 'in_file')
featpreproc.connect(dilatemask, 'out_file', maskfunc3, 'in_file2')
concatnode = pe.Node(interface=util.Merge(2),
name='concat')
featpreproc.connect(maskfunc2, ('out_file', tolist), concatnode, 'in1')
featpreproc.connect(maskfunc3, ('out_file', tolist), concatnode, 'in2')
"""
The following nodes select smooth or unsmoothed data depending on the
fwhm. This is because SUSAN defaults to smoothing the data with about the
voxel size of the input data if the fwhm parameter is less than 1/3 of the
voxel size.
"""
selectnode = pe.Node(interface=util.Select(), name='select')
featpreproc.connect(concatnode, 'out', selectnode, 'inlist')
featpreproc.connect(inputnode, ('fwhm', chooseindex), selectnode, 'index')
featpreproc.connect(selectnode, 'out', outputnode, 'smoothed_files')
"""
Scale the median value of the run is set to 10000
"""
meanscale = pe.MapNode(interface=fsl.ImageMaths(suffix='_gms'),
iterfield=['in_file', 'op_string'],
name='meanscale')
featpreproc.connect(selectnode, 'out', meanscale, 'in_file')
"""
Define a function to get the scaling factor for intensity normalization
"""
featpreproc.connect(medianval, ('out_stat', getmeanscale), meanscale, 'op_string')
"""
Perform temporal highpass filtering on the data
"""
if highpass:
highpass = pe.MapNode(interface=fsl.ImageMaths(suffix='_tempfilt'),
iterfield=['in_file'],
name='highpass')
featpreproc.connect(inputnode, ('highpass', highpass_operand), highpass, 'op_string')
featpreproc.connect(meanscale, 'out_file', highpass, 'in_file')
featpreproc.connect(highpass, 'out_file', outputnode, 'highpassed_files')
"""
Generate a mean functional image from the first run
"""
meanfunc3 = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean',
suffix='_mean'),
iterfield=['in_file'],
name='meanfunc3')
if highpass:
featpreproc.connect(highpass, 'out_file', meanfunc3, 'in_file')
else:
featpreproc.connect(meanscale, 'out_file', meanfunc3, 'in_file')
featpreproc.connect(meanfunc3, 'out_file', outputnode, 'mean')
return featpreproc
def create_featreg_preproc(name='featpreproc', highpass=True, whichvol='middle'):
"""Create a FEAT preprocessing workflow with registration to one volume of the first run
Parameters
----------
::
name : name of workflow (default: featpreproc)
highpass : boolean (default: True)
whichvol : which volume of the first run to register to ('first', 'middle', 'last', 'mean')
Inputs::
inputspec.func : functional runs (filename or list of filenames)
inputspec.fwhm : fwhm for smoothing with SUSAN
inputspec.highpass : HWHM in TRs (if created with highpass=True)
Outputs::
outputspec.reference : volume to which runs are realigned
outputspec.motion_parameters : motion correction parameters
outputspec.realigned_files : motion corrected files
outputspec.motion_plots : plots of motion correction parameters
outputspec.mask : mask file used to mask the brain
outputspec.smoothed_files : smoothed functional data
outputspec.highpassed_files : highpassed functional data (if highpass=True)
outputspec.mean : mean file
Example
-------
>>> preproc = create_featreg_preproc()
>>> preproc.inputs.inputspec.func = ['f3.nii', 'f5.nii']
>>> preproc.inputs.inputspec.fwhm = 5
>>> preproc.inputs.inputspec.highpass = 128./(2*2.5)
>>> preproc.base_dir = '/tmp'
>>> preproc.run() # doctest: +SKIP
>>> preproc = create_featreg_preproc(highpass=False, whichvol='mean')
>>> preproc.inputs.inputspec.func = 'f3.nii'
>>> preproc.inputs.inputspec.fwhm = 5
>>> preproc.base_dir = '/tmp'
>>> preproc.run() # doctest: +SKIP
"""
version = 0
if fsl.Info.version() and \
LooseVersion(fsl.Info.version()) > LooseVersion('5.0.6'):
version = 507
featpreproc = pe.Workflow(name=name)
"""
Set up a node to define all inputs required for the preprocessing workflow
"""
if highpass:
inputnode = pe.Node(interface=util.IdentityInterface(fields=['func',
'fwhm',
'highpass']),
name='inputspec')
outputnode = pe.Node(interface=util.IdentityInterface(fields=['reference',
'motion_parameters',
'realigned_files',
'motion_plots',
'mask',
'smoothed_files',
'highpassed_files',
'mean']),
name='outputspec')
else:
inputnode = pe.Node(interface=util.IdentityInterface(fields=['func',
'fwhm']),
name='inputspec')
outputnode = pe.Node(interface=util.IdentityInterface(fields=['reference',
'motion_parameters',
'realigned_files',
'motion_plots',
'mask',
'smoothed_files',
'mean']),
name='outputspec')
"""
Set up a node to define outputs for the preprocessing workflow
"""
"""
Convert functional images to float representation. Since there can
be more than one functional run we use a MapNode to convert each
run.
"""
img2float = pe.MapNode(interface=fsl.ImageMaths(out_data_type='float',
op_string='',
suffix='_dtype'),
iterfield=['in_file'],
name='img2float')
featpreproc.connect(inputnode, 'func', img2float, 'in_file')
"""
Extract the middle (or what whichvol points to) volume of the first run as the reference
"""
if whichvol != 'mean':
extract_ref = pe.Node(interface=fsl.ExtractROI(t_size=1),
iterfield=['in_file'],
name='extractref')
featpreproc.connect(img2float, ('out_file', pickfirst), extract_ref, 'in_file')
featpreproc.connect(img2float, ('out_file', pickvol, 0, whichvol), extract_ref, 't_min')
featpreproc.connect(extract_ref, 'roi_file', outputnode, 'reference')
"""
Realign the functional runs to the reference (`whichvol` volume of first run)
"""
motion_correct = pe.MapNode(interface=fsl.MCFLIRT(save_mats=True,
save_plots=True,
interpolation='spline'),
name='realign',
iterfield=['in_file'])
featpreproc.connect(img2float, 'out_file', motion_correct, 'in_file')
if whichvol != 'mean':
featpreproc.connect(extract_ref, 'roi_file', motion_correct, 'ref_file')
else:
motion_correct.inputs.mean_vol = True
featpreproc.connect(motion_correct, ('mean_img', pickfirst), outputnode, 'reference')
featpreproc.connect(motion_correct, 'par_file', outputnode, 'motion_parameters')
featpreproc.connect(motion_correct, 'out_file', outputnode, 'realigned_files')
"""
Plot the estimated motion parameters
"""
plot_motion = pe.MapNode(interface=fsl.PlotMotionParams(in_source='fsl'),
name='plot_motion',
iterfield=['in_file'])
plot_motion.iterables = ('plot_type', ['rotations', 'translations'])
featpreproc.connect(motion_correct, 'par_file', plot_motion, 'in_file')
featpreproc.connect(plot_motion, 'out_file', outputnode, 'motion_plots')
"""
Extract the mean volume of the first functional run
"""
meanfunc = pe.Node(interface=fsl.ImageMaths(op_string='-Tmean',
suffix='_mean'),
name='meanfunc')
featpreproc.connect(motion_correct, ('out_file', pickfirst), meanfunc, 'in_file')
"""
Strip the skull from the mean functional to generate a mask
"""
meanfuncmask = pe.Node(interface=fsl.BET(mask=True,
no_output=True,
frac=0.3),
name='meanfuncmask')
featpreproc.connect(meanfunc, 'out_file', meanfuncmask, 'in_file')
"""
Mask the functional runs with the extracted mask
"""
maskfunc = pe.MapNode(interface=fsl.ImageMaths(suffix='_bet',
op_string='-mas'),
iterfield=['in_file'],
name='maskfunc')
featpreproc.connect(motion_correct, 'out_file', maskfunc, 'in_file')
featpreproc.connect(meanfuncmask, 'mask_file', maskfunc, 'in_file2')
"""
Determine the 2nd and 98th percentile intensities of each functional run
"""
getthresh = pe.MapNode(interface=fsl.ImageStats(op_string='-p 2 -p 98'),
iterfield=['in_file'],
name='getthreshold')
featpreproc.connect(maskfunc, 'out_file', getthresh, 'in_file')
"""
Threshold the first run of the functional data at 10% of the 98th percentile
"""
threshold = pe.MapNode(interface=fsl.ImageMaths(out_data_type='char',
suffix='_thresh'),
iterfield=['in_file', 'op_string'],
name='threshold')
featpreproc.connect(maskfunc, 'out_file', threshold, 'in_file')
"""
Define a function to get 10% of the intensity
"""
featpreproc.connect(getthresh, ('out_stat', getthreshop), threshold, 'op_string')
"""
Determine the median value of the functional runs using the mask
"""
medianval = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'),
iterfield=['in_file', 'mask_file'],
name='medianval')
featpreproc.connect(motion_correct, 'out_file', medianval, 'in_file')
featpreproc.connect(threshold, 'out_file', medianval, 'mask_file')
"""
Dilate the mask
"""
dilatemask = pe.MapNode(interface=fsl.ImageMaths(suffix='_dil',
op_string='-dilF'),
iterfield=['in_file'],
name='dilatemask')
featpreproc.connect(threshold, 'out_file', dilatemask, 'in_file')
featpreproc.connect(dilatemask, 'out_file', outputnode, 'mask')
"""
Mask the motion corrected functional runs with the dilated mask
"""
maskfunc2 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file', 'in_file2'],
name='maskfunc2')
featpreproc.connect(motion_correct, 'out_file', maskfunc2, 'in_file')
featpreproc.connect(dilatemask, 'out_file', maskfunc2, 'in_file2')
"""
Smooth each run using SUSAN with the brightness threshold set to 75%
of the median value for each run and a mask constituting the mean
functional
"""
smooth = create_susan_smooth()
featpreproc.connect(inputnode, 'fwhm', smooth, 'inputnode.fwhm')
featpreproc.connect(maskfunc2, 'out_file', smooth, 'inputnode.in_files')
featpreproc.connect(dilatemask, 'out_file', smooth, 'inputnode.mask_file')
"""
Mask the smoothed data with the dilated mask
"""
maskfunc3 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file', 'in_file2'],
name='maskfunc3')
featpreproc.connect(smooth, 'outputnode.smoothed_files', maskfunc3, 'in_file')
featpreproc.connect(dilatemask, 'out_file', maskfunc3, 'in_file2')
concatnode = pe.Node(interface=util.Merge(2),
name='concat')
featpreproc.connect(maskfunc2, ('out_file', tolist), concatnode, 'in1')
featpreproc.connect(maskfunc3, ('out_file', tolist), concatnode, 'in2')
"""
The following nodes select smooth or unsmoothed data depending on the
fwhm. This is because SUSAN defaults to smoothing the data with about the
voxel size of the input data if the fwhm parameter is less than 1/3 of the
voxel size.
"""
selectnode = pe.Node(interface=util.Select(), name='select')
featpreproc.connect(concatnode, 'out', selectnode, 'inlist')
featpreproc.connect(inputnode, ('fwhm', chooseindex), selectnode, 'index')
featpreproc.connect(selectnode, 'out', outputnode, 'smoothed_files')
"""
Scale the median value of the run is set to 10000
"""
meanscale = pe.MapNode(interface=fsl.ImageMaths(suffix='_gms'),
iterfield=['in_file', 'op_string'],
name='meanscale')
featpreproc.connect(selectnode, 'out', meanscale, 'in_file')
"""
Define a function to get the scaling factor for intensity normalization
"""
featpreproc.connect(medianval, ('out_stat', getmeanscale), meanscale, 'op_string')
"""
Generate a mean functional image from the first run
"""
meanfunc3 = pe.Node(interface=fsl.ImageMaths(op_string='-Tmean',
suffix='_mean'),
iterfield=['in_file'],
name='meanfunc3')
featpreproc.connect(meanscale, ('out_file', pickfirst), meanfunc3, 'in_file')
featpreproc.connect(meanfunc3, 'out_file', outputnode, 'mean')
"""
Perform temporal highpass filtering on the data
"""
if highpass:
highpass = pe.MapNode(interface=fsl.ImageMaths(suffix='_tempfilt'),
iterfield=['in_file'],
name='highpass')
featpreproc.connect(inputnode, ('highpass', highpass_operand), highpass, 'op_string')
featpreproc.connect(meanscale, 'out_file', highpass, 'in_file')
if version < 507:
featpreproc.connect(highpass, 'out_file', outputnode, 'highpassed_files')
else:
"""
Add back the mean removed by the highpass filter operation as of FSL 5.0.7
"""
meanfunc4 = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean',
suffix='_mean'),
iterfield=['in_file'],
name='meanfunc4')
featpreproc.connect(meanscale, 'out_file', meanfunc4, 'in_file')
addmean = pe.MapNode(interface=fsl.BinaryMaths(operation='add'),
iterfield=['in_file', 'operand_file'],
name='addmean')
featpreproc.connect(highpass, 'out_file', addmean, 'in_file')
featpreproc.connect(meanfunc4, 'out_file', addmean, 'operand_file')
featpreproc.connect(addmean, 'out_file', outputnode, 'highpassed_files')
return featpreproc
def create_susan_smooth(name="susan_smooth", separate_masks=True):
"""Create a SUSAN smoothing workflow
Parameters
----------
::
name : name of workflow (default: susan_smooth)
separate_masks : separate masks for each run
Inputs::
inputnode.in_files : functional runs (filename or list of filenames)
inputnode.fwhm : fwhm for smoothing with SUSAN
inputnode.mask_file : mask used for estimating SUSAN thresholds (but not for smoothing)
Outputs::
outputnode.smoothed_files : functional runs (filename or list of filenames)
Example
-------
>>> smooth = create_susan_smooth()
>>> smooth.inputs.inputnode.in_files = 'f3.nii'
>>> smooth.inputs.inputnode.fwhm = 5
>>> smooth.inputs.inputnode.mask_file = 'mask.nii'
>>> smooth.run() # doctest: +SKIP
"""
susan_smooth = pe.Workflow(name=name)
"""
Set up a node to define all inputs required for the preprocessing workflow
"""
inputnode = pe.Node(interface=util.IdentityInterface(fields=['in_files',
'fwhm',
'mask_file']),
name='inputnode')
"""
Smooth each run using SUSAN with the brightness threshold set to 75%
of the median value for each run and a mask consituting the mean
functional
"""
smooth = pe.MapNode(interface=fsl.SUSAN(),
iterfield=['in_file', 'brightness_threshold', 'usans'],
name='smooth')
"""
Determine the median value of the functional runs using the mask
"""
if separate_masks:
median = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'),
iterfield=['in_file', 'mask_file'],
name='median')
else:
median = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'),
iterfield=['in_file'],
name='median')
susan_smooth.connect(inputnode, 'in_files', median, 'in_file')
susan_smooth.connect(inputnode, 'mask_file', median, 'mask_file')
"""
Mask the motion corrected functional runs with the dilated mask
"""
if separate_masks:
mask = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file', 'in_file2'],
name='mask')
else:
mask = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file'],
name='mask')
susan_smooth.connect(inputnode, 'in_files', mask, 'in_file')
susan_smooth.connect(inputnode, 'mask_file', mask, 'in_file2')
"""
Determine the mean image from each functional run
"""
meanfunc = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean',
suffix='_mean'),
iterfield=['in_file'],
name='meanfunc2')
susan_smooth.connect(mask, 'out_file', meanfunc, 'in_file')
"""
Merge the median values with the mean functional images into a coupled list
"""
merge = pe.Node(interface=util.Merge(2, axis='hstack'),
name='merge')
susan_smooth.connect(meanfunc, 'out_file', merge, 'in1')
susan_smooth.connect(median, 'out_stat', merge, 'in2')
"""
Define a function to get the brightness threshold for SUSAN
"""
susan_smooth.connect(inputnode, 'fwhm', smooth, 'fwhm')
susan_smooth.connect(inputnode, 'in_files', smooth, 'in_file')
susan_smooth.connect(median, ('out_stat', getbtthresh), smooth, 'brightness_threshold')
susan_smooth.connect(merge, ('out', getusans), smooth, 'usans')
outputnode = pe.Node(interface=util.IdentityInterface(fields=['smoothed_files']),
name='outputnode')
susan_smooth.connect(smooth, 'smoothed_file', outputnode, 'smoothed_files')
return susan_smooth
def create_fsl_fs_preproc(name='preproc', highpass=True, whichvol='middle'):
"""Create a FEAT preprocessing workflow together with freesurfer
Parameters
----------
::
name : name of workflow (default: preproc)
highpass : boolean (default: True)
whichvol : which volume of the first run to register to ('first', 'middle', 'mean')
Inputs::
inputspec.func : functional runs (filename or list of filenames)
inputspec.fwhm : fwhm for smoothing with SUSAN
inputspec.highpass : HWHM in TRs (if created with highpass=True)
inputspec.subject_id : freesurfer subject id
inputspec.subjects_dir : freesurfer subjects dir
Outputs::
outputspec.reference : volume to which runs are realigned
outputspec.motion_parameters : motion correction parameters
outputspec.realigned_files : motion corrected files
outputspec.motion_plots : plots of motion correction parameters
outputspec.mask_file : mask file used to mask the brain
outputspec.smoothed_files : smoothed functional data
outputspec.highpassed_files : highpassed functional data (if highpass=True)
outputspec.reg_file : bbregister registration files
outputspec.reg_cost : bbregister registration cost files
Example
-------
>>> preproc = create_fsl_fs_preproc(whichvol='first')
>>> preproc.inputs.inputspec.highpass = 128./(2*2.5)
>>> preproc.inputs.inputspec.func = ['f3.nii', 'f5.nii']
>>> preproc.inputs.inputspec.subjects_dir = '.'
>>> preproc.inputs.inputspec.subject_id = 's1'
>>> preproc.inputs.inputspec.fwhm = 6
>>> preproc.run() # doctest: +SKIP
"""
featpreproc = pe.Workflow(name=name)
"""
Set up a node to define all inputs required for the preprocessing workflow
"""
if highpass:
inputnode = pe.Node(interface=util.IdentityInterface(fields=['func',
'fwhm',
'subject_id',
'subjects_dir',
'highpass']),
name='inputspec')
outputnode = pe.Node(interface=util.IdentityInterface(fields=['reference',
'motion_parameters',
'realigned_files',
'motion_plots',
'mask_file',
'smoothed_files',
'highpassed_files',
'reg_file',
'reg_cost'
]),
name='outputspec')
else:
inputnode = pe.Node(interface=util.IdentityInterface(fields=['func',
'fwhm',
'subject_id',
'subjects_dir'
]),
name='inputspec')
outputnode = pe.Node(interface=util.IdentityInterface(fields=['reference',
'motion_parameters',
'realigned_files',
'motion_plots',
'mask_file',
'smoothed_files',
'reg_file',
'reg_cost'
]),
name='outputspec')
"""
Set up a node to define outputs for the preprocessing workflow
"""
"""
Convert functional images to float representation. Since there can
be more than one functional run we use a MapNode to convert each
run.
"""
img2float = pe.MapNode(interface=fsl.ImageMaths(out_data_type='float',
op_string='',
suffix='_dtype'),
iterfield=['in_file'],
name='img2float')
featpreproc.connect(inputnode, 'func', img2float, 'in_file')
"""
Extract the first volume of the first run as the reference
"""
if whichvol != 'mean':
extract_ref = pe.Node(interface=fsl.ExtractROI(t_size=1),
iterfield=['in_file'],
name='extractref')
featpreproc.connect(img2float, ('out_file', pickfirst), extract_ref, 'in_file')
featpreproc.connect(img2float, ('out_file', pickvol, 0, whichvol), extract_ref, 't_min')
featpreproc.connect(extract_ref, 'roi_file', outputnode, 'reference')
"""
Realign the functional runs to the reference (1st volume of first run)
"""
motion_correct = pe.MapNode(interface=fsl.MCFLIRT(save_mats=True,
save_plots=True,
interpolation='sinc'),
name='realign',
iterfield=['in_file'])
featpreproc.connect(img2float, 'out_file', motion_correct, 'in_file')
if whichvol != 'mean':
featpreproc.connect(extract_ref, 'roi_file', motion_correct, 'ref_file')
else:
motion_correct.inputs.mean_vol = True
featpreproc.connect(motion_correct, 'mean_img', outputnode, 'reference')
featpreproc.connect(motion_correct, 'par_file', outputnode, 'motion_parameters')
featpreproc.connect(motion_correct, 'out_file', outputnode, 'realigned_files')
"""
Plot the estimated motion parameters
"""
plot_motion = pe.MapNode(interface=fsl.PlotMotionParams(in_source='fsl'),
name='plot_motion',
iterfield=['in_file'])
plot_motion.iterables = ('plot_type', ['rotations', 'translations'])
featpreproc.connect(motion_correct, 'par_file', plot_motion, 'in_file')
featpreproc.connect(plot_motion, 'out_file', outputnode, 'motion_plots')
"""Get the mask from subject for each run
"""
maskflow = create_getmask_flow()
featpreproc.connect([(inputnode, maskflow, [('subject_id', 'inputspec.subject_id'),
('subjects_dir', 'inputspec.subjects_dir')])])
maskflow.inputs.inputspec.contrast_type = 't2'
if whichvol != 'mean':
featpreproc.connect(extract_ref, 'roi_file', maskflow, 'inputspec.source_file')
else:
featpreproc.connect(motion_correct, ('mean_img', pickfirst), maskflow, 'inputspec.source_file')
"""
Mask the functional runs with the extracted mask
"""
maskfunc = pe.MapNode(interface=fsl.ImageMaths(suffix='_bet',
op_string='-mas'),
iterfield=['in_file'],
name='maskfunc')
featpreproc.connect(motion_correct, 'out_file', maskfunc, 'in_file')
featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), maskfunc, 'in_file2')
"""
Smooth each run using SUSAN with the brightness threshold set to 75%
of the median value for each run and a mask consituting the mean
functional
"""
smooth = create_susan_smooth(separate_masks=False)
featpreproc.connect(inputnode, 'fwhm', smooth, 'inputnode.fwhm')
featpreproc.connect(maskfunc, 'out_file', smooth, 'inputnode.in_files')
featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), smooth, 'inputnode.mask_file')
"""
Mask the smoothed data with the dilated mask
"""
maskfunc3 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file'],
name='maskfunc3')
featpreproc.connect(smooth, 'outputnode.smoothed_files', maskfunc3, 'in_file')
featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), maskfunc3, 'in_file2')
concatnode = pe.Node(interface=util.Merge(2),
name='concat')
featpreproc.connect(maskfunc, ('out_file', tolist), concatnode, 'in1')
featpreproc.connect(maskfunc3, ('out_file', tolist), concatnode, 'in2')
"""
The following nodes select smooth or unsmoothed data depending on the
fwhm. This is because SUSAN defaults to smoothing the data with about the
voxel size of the input data if the fwhm parameter is less than 1/3 of the
voxel size.
"""
selectnode = pe.Node(interface=util.Select(), name='select')
featpreproc.connect(concatnode, 'out', selectnode, 'inlist')
featpreproc.connect(inputnode, ('fwhm', chooseindex), selectnode, 'index')
featpreproc.connect(selectnode, 'out', outputnode, 'smoothed_files')
"""
Scale the median value of the run is set to 10000
"""
meanscale = pe.MapNode(interface=fsl.ImageMaths(suffix='_gms'),
iterfield=['in_file', 'op_string'],
name='meanscale')
featpreproc.connect(selectnode, 'out', meanscale, 'in_file')
"""
Determine the median value of the functional runs using the mask
"""
medianval = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'),
iterfield=['in_file'],
name='medianval')
featpreproc.connect(motion_correct, 'out_file', medianval, 'in_file')
featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), medianval, 'mask_file')
"""
Define a function to get the scaling factor for intensity normalization
"""
featpreproc.connect(medianval, ('out_stat', getmeanscale), meanscale, 'op_string')
"""
Perform temporal highpass filtering on the data
"""
if highpass:
highpass = pe.MapNode(interface=fsl.ImageMaths(suffix='_tempfilt'),
iterfield=['in_file'],
name='highpass')
featpreproc.connect(inputnode, ('highpass', highpass_operand), highpass, 'op_string')
featpreproc.connect(meanscale, 'out_file', highpass, 'in_file')
featpreproc.connect(highpass, 'out_file', outputnode, 'highpassed_files')
featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), outputnode, 'mask_file')
featpreproc.connect(maskflow, 'outputspec.reg_file', outputnode, 'reg_file')
featpreproc.connect(maskflow, 'outputspec.reg_cost', outputnode, 'reg_cost')
return featpreproc
def create_reg_workflow(name='registration'):
"""Create a FEAT preprocessing workflow
Parameters
----------
::
name : name of workflow (default: 'registration')
Inputs::
inputspec.source_files : files (filename or list of filenames to register)
inputspec.mean_image : reference image to use
inputspec.anatomical_image : anatomical image to coregister to
inputspec.target_image : registration target
Outputs::
outputspec.func2anat_transform : FLIRT transform
outputspec.anat2target_transform : FLIRT+FNIRT transform
outputspec.transformed_files : transformed files in target space
outputspec.transformed_mean : mean image in target space
Example
-------
"""
register = pe.Workflow(name=name)
inputnode = pe.Node(interface=util.IdentityInterface(fields=['source_files',
'mean_image',
'anatomical_image',
'target_image',
'target_image_brain',
'config_file']),
name='inputspec')
outputnode = pe.Node(interface=util.IdentityInterface(fields=['func2anat_transform',
'anat2target_transform',
'transformed_files',
'transformed_mean',
]),
name='outputspec')
"""
Estimate the tissue classes from the anatomical image. But use spm's segment
as FSL appears to be breaking.
"""
stripper = pe.Node(fsl.BET(), name='stripper')
register.connect(inputnode, 'anatomical_image', stripper, 'in_file')
fast = pe.Node(fsl.FAST(), name='fast')
register.connect(stripper, 'out_file', fast, 'in_files')
"""
Binarize the segmentation
"""
binarize = pe.Node(fsl.ImageMaths(op_string='-nan -thr 0.5 -bin'),
name='binarize')
pickindex = lambda x, i: x[i]
register.connect(fast, ('partial_volume_files', pickindex, 2),
binarize, 'in_file')
"""
Calculate rigid transform from mean image to anatomical image
"""
mean2anat = pe.Node(fsl.FLIRT(), name='mean2anat')
mean2anat.inputs.dof = 6
register.connect(inputnode, 'mean_image', mean2anat, 'in_file')
register.connect(stripper, 'out_file', mean2anat, 'reference')
"""
Now use bbr cost function to improve the transform
"""
mean2anatbbr = pe.Node(fsl.FLIRT(), name='mean2anatbbr')
mean2anatbbr.inputs.dof = 6
mean2anatbbr.inputs.cost = 'bbr'
mean2anatbbr.inputs.schedule = os.path.join(os.getenv('FSLDIR'),
'etc/flirtsch/bbr.sch')
register.connect(inputnode, 'mean_image', mean2anatbbr, 'in_file')
register.connect(binarize, 'out_file', mean2anatbbr, 'wm_seg')
register.connect(inputnode, 'anatomical_image', mean2anatbbr, 'reference')
register.connect(mean2anat, 'out_matrix_file',
mean2anatbbr, 'in_matrix_file')
"""
Calculate affine transform from anatomical to target
"""
anat2target_affine = pe.Node(fsl.FLIRT(), name='anat2target_linear')
anat2target_affine.inputs.searchr_x = [-180, 180]
anat2target_affine.inputs.searchr_y = [-180, 180]
anat2target_affine.inputs.searchr_z = [-180, 180]
register.connect(stripper, 'out_file', anat2target_affine, 'in_file')
register.connect(inputnode, 'target_image_brain',
anat2target_affine, 'reference')
"""
Calculate nonlinear transform from anatomical to target
"""
anat2target_nonlinear = pe.Node(fsl.FNIRT(), name='anat2target_nonlinear')
anat2target_nonlinear.inputs.fieldcoeff_file = True
register.connect(anat2target_affine, 'out_matrix_file',
anat2target_nonlinear, 'affine_file')
register.connect(inputnode, 'anatomical_image',
anat2target_nonlinear, 'in_file')
register.connect(inputnode, 'config_file',
anat2target_nonlinear, 'config_file')
register.connect(inputnode, 'target_image',
anat2target_nonlinear, 'ref_file')
"""
Transform the mean image. First to anatomical and then to target
"""
warpmean = pe.Node(fsl.ApplyWarp(interp='spline'), name='warpmean')
register.connect(inputnode, 'mean_image', warpmean, 'in_file')
register.connect(mean2anatbbr, 'out_matrix_file', warpmean, 'premat')
register.connect(inputnode, 'target_image', warpmean, 'ref_file')
register.connect(anat2target_nonlinear, 'fieldcoeff_file',
warpmean, 'field_file')
"""
Transform the remaining images. First to anatomical and then to target
"""
warpall = pe.MapNode(fsl.ApplyWarp(interp='spline'),
iterfield=['in_file'],
nested=True,
name='warpall')
register.connect(inputnode, 'source_files', warpall, 'in_file')
register.connect(mean2anatbbr, 'out_matrix_file', warpall, 'premat')
register.connect(inputnode, 'target_image', warpall, 'ref_file')
register.connect(anat2target_nonlinear, 'fieldcoeff_file',
warpall, 'field_file')
"""
Assign all the output files
"""
register.connect(warpmean, 'out_file', outputnode, 'transformed_mean')
register.connect(warpall, 'out_file', outputnode, 'transformed_files')
register.connect(mean2anatbbr, 'out_matrix_file',
outputnode, 'func2anat_transform')
register.connect(anat2target_nonlinear, 'fieldcoeff_file',
outputnode, 'anat2target_transform')
return register
| |
#
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import heapq
import logging
import threading
_g_logger = logging.getLogger(__name__)
class UserCallback(object):
"""
This object is a handle to an event which was registered in an EventSpace.
It can be used to cancel a registered event, check the event's status, or
examine the results of a event that has completed.
"""
def __init__(self, func, args, kwargs, delay, in_thread):
self._func = func
self._canceled = False
self._called = False
self._calling = False
self._args = args
self._in_thread = in_thread
self._run_thread = None
if args is None:
self._args = []
self._kwargs = kwargs
if kwargs is None:
self._kwargs = {}
self._time_ready = datetime.datetime.now() +\
datetime.timedelta(seconds=delay)
self._lock = threading.RLock()
self._rc = None
self._exception = None
def get_time_ready(self):
"""
:return: The time at which this callback will be ready to be called.
"""
self._lock.acquire()
try:
return self._time_ready
finally:
self._lock.release()
def __repr__(self):
return str(self._func)
def __lt__(self, other):
return self._time_ready < other.get_time_ready()
def call(self):
_g_logger.debug("UserCallback calling %s" % self._func.__name__)
self._lock.acquire()
try:
if self._canceled:
return
self._calling = True
finally:
self._lock.release()
try:
self._rc = self._func(*self._args, **self._kwargs)
self._called = True # This should be safe unlocked
except Exception as ex:
_g_logger.error("UserCallback function %(func_name)s threw "
"exception %(ex)s" %
{'func_name': self._func.__name__,
'ex': str(ex)})
self._exception = ex
else:
_g_logger.debug("UserCallback function %s returned successfully."
% self._func.__name__)
finally:
self._called = True # This should be safe unlocked
def _cancel(self):
"""
:returns a boolean saying if the call was canceled or not.
If true is returned the call was not an will not be called.
If false is returned the call already happened or will happen
"""
self._lock.acquire()
try:
self._time_ready = datetime.datetime.now()
self._canceled = True
return not self._calling
finally:
self._lock.release()
def is_ready(self, tm=None):
"""
:param tm: The time to check the ready time against. If None the
current time will be used.
:return: A bool indicating if this callback object is ready to be
called. If the associated delay has expired the callback is ready
to be called and True is returned.
"""
if tm is None:
tm = datetime.datetime.now()
self._lock.acquire()
try:
return self._time_ready <= tm
finally:
self._lock.release()
def in_thread(self):
return self._in_thread
def get_rc(self):
"""
Get the return value from the called function. IF this is called
before the callback is called it will return None in all cases
:return: The return code from the function or None
"""
self._lock.acquire()
try:
return self._rc
finally:
self._lock.release()
def get_exception(self):
"""
If when called the callback threw an exception a call to this method
will retrieve that exception. If no exception was called, or the
callback has not yet been called this will be None
:return: The exception returned from the callback
"""
self._lock.acquire()
try:
return self._exception
finally:
self._lock.release()
def has_run(self):
"""
:return: bool indicating if the callback has run yet
There is a small window of time where this is true but the callback
has not actually run yet. A true value here means that it will
inevitably run imminently
"""
self._lock.acquire()
try:
return self._called
finally:
self._lock.release()
def join(self):
"""
When a callback is run in its own thread the user may want to join
that thread. This method allows for this.
"""
if self._run_thread:
self._run_thread.join()
self._run_thread = None
class EventSpace(object):
"""
A class for managing and running events.
This class allows a user to register events to be called either at a later
time from a new thread or from the top level polling function. This allows
a functions to be scheduled for later execution outside of the registering
functions call stack. The events are managed by this object. In some
cases they can be canceled. poll() must be called on this function in
order for the events to occur. The object is thread safe. poll() can
be called in a thread and the registered event callbacks are called
unlocked and thus they can safely manipulate the event space which is
managing them.
"""
def __init__(self):
"""
:return:
"""
self._q = []
self._cond = threading.Condition()
self._done = False
self._running_threads = []
self._done_threads = []
def register_callback(self, func, args=None, kwargs=None, delay=0,
in_thread=False):
"""
:param func: The callable object (typically a function or a method)
which will be called later by the event system.
:param args: The list of arguments that will be passed to the callable
func object as *args
:param kwargs:
The dictionary of arguments will be passed to the callable func object
as **kwargs
:param delay: The number or seconds to wait before calling func. More
time may expire but at least the given number of seconds will pass.
:param in_thread: Run the callback in its own thread.
:return: A UserCallback object which is a handle to this callback
registration. It can be used to inspect and manage that event.
"""
self._cond.acquire()
try:
if self._done:
raise Exception("We cannot register callbacks because this "
"space has been stopped.")
ub = UserCallback(func, args, kwargs, delay, in_thread)
heapq.heappush(self._q, ub)
self._cond.notify()
return ub
finally:
self._cond.release()
def stop(self):
"""
Stop this event space. Once this is called no future events can be
registered and this cannot be reused. Outstanding events will be
abandoned.
:return: None
"""
self._cond.acquire()
try:
self._done = True
self._cond.notify()
finally:
self._cond.release()
def cancel_callback(self, ub):
"""
This will attempt to safely cancel a callback. In some cases the
callback will be too far into the registration process to successfully
cancel, or it may have already been run. If the cancel is successful
True will be returned, otherwise False.
:param ub: The handle to the registered func to cancel.
:return: A boolean indicating if the event was successfully canceled.
"""
self._cond.acquire()
try:
rc = ub._cancel()
self._cond.notify()
return rc
finally:
self._cond.release()
def _run_threaded(self, ub):
this_thread = threading.currentThread()
try:
ub.call()
finally:
self._cond.acquire()
try:
self._running_threads.remove(this_thread)
self._done_threads.append(this_thread)
self._cond.notifyAll()
finally:
self._cond.release()
def _build_ready_list(self, now, end_time):
# get everything that is ready right now while under lock. It nothing
# is ready a time to sleep is returned
ready_list = []
done = False
first_not_ready = None
while not done:
head_ub = heapq.nsmallest(1, self._q)
if head_ub:
if head_ub[0].is_ready(tm=now):
ub = heapq.heappop(self._q)
if ub.in_thread():
_run_thread = threading.Thread(
target=self._run_threaded,
args=(ub,))
self._running_threads.append(_run_thread)
_run_thread.start()
else:
ready_list.append(ub)
else:
first_not_ready = head_ub[0]
done = True
else:
done = True
if ready_list:
sleep_time = 0.0
else:
if first_not_ready:
ready_time = min(end_time, head_ub[0].get_time_ready())
else:
ready_time = end_time
td = ready_time - now
sleep_time = max(0.0, td.total_seconds())
return ready_list, sleep_time
def _clear_done_threads(self):
# This should only be called locked
for t in self._done_threads[:]:
t.join()
self._done_threads.remove(t)
def poll(self, timeblock=5.0):
"""
Poll an event space to check for ready event callbacks. If a event
is scheduled it will be called directly from the current call stack
(if this object was initialized with use_threads=False) or it will
be registered in a new thread.
:param timeblock: The amount of time to wait for events to be ready
:return: A boolean is returned to indicate if an event was called
or not
"""
now = datetime.datetime.now()
end_time = now + datetime.timedelta(seconds=timeblock)
done = False
any_called = False
while not done:
self._cond.acquire()
try:
# if this event space was shutdown exit out immediately
if self._done:
return any_called
self._clear_done_threads()
ready_to_unlock = False
while not ready_to_unlock and not done:
ready_list, sleep_time =\
self._build_ready_list(now, end_time)
if not ready_list:
self._cond.wait(sleep_time)
# it is possible for the lock to wake up before the
# blocking time. In this case we should see if any
# new callbacks are ready.
else:
# We have something to call so we need to unlock
ready_to_unlock = True
# check to see if time expired here to end all loops
now = datetime.datetime.now()
done = end_time < now
finally:
self._cond.release()
# call everything that was found ready. We may want to kick
# these out in threads in the event that they block
for ready_ub in ready_list:
any_called = True
ready_ub.call()
return any_called
def wakeup(self, cancel_all=False):
"""
Wake up a call to poll even if no callbacks are ready
:param cancel_all: If this is set all registered callbacks will be
cancelled.
:return:
"""
self._cond.acquire()
try:
if cancel_all:
for ub in self._q:
ub._cancel()
self._cond.notifyAll()
finally:
self._cond.release()
def reset(self):
# first disallow any new registrations and cancel anything that has
# not yet started
self._cond.acquire()
try:
self._done = True
# canceling everything in the list will mark everything that is
# not already effectively running to never run
for ub in self._q:
ub._cancel()
self._q = []
while len(self._running_threads) > 0:
self._cond.wait()
self._clear_done_threads()
# now that everything is clear allow new registrations again
self._done = False
finally:
self._cond.release()
| |
import numpy as np
from math import ceil
from NN.Errors import *
from NN.TF.Optimizers import *
class Layer:
LayerTiming = Timing()
def __init__(self, shape, **kwargs):
"""
:param shape: shape[0] = units of previous layer
shape[1] = units of current layer (self)
"""
self._shape = shape
self.parent = None
self.is_fc = False
self.is_sub_layer = False
self.apply_bias = kwargs["apply_bias"]
self.position = kwargs["position"]
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return str(self)
def init(self, sess):
pass
@property
def name(self):
return str(self)
@property
def root(self):
return self
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, value):
self._shape = value
@property
def params(self):
return self._shape,
@property
def info(self):
return "Layer : {:<16s} - {}{}".format(
self.name, self.shape[1], " (apply_bias: False)" if not self.apply_bias else "")
def get_special_params(self, sess):
pass
def set_special_params(self, dic):
for key, value in dic.items():
setattr(self, key, value)
# Core
@LayerTiming.timeit(level=1, prefix="[Core] ")
def activate(self, x, w, bias=None, predict=False):
if self.is_fc:
fc_shape = np.prod(x.get_shape()[1:]) # type: int
x = tf.reshape(x, [-1, int(fc_shape)])
if self.is_sub_layer:
if not self.apply_bias:
return self._activate(x, predict)
return self._activate(x + bias, predict)
if not self.apply_bias:
return self._activate(tf.matmul(x, w), predict)
return self._activate(tf.matmul(x, w) + bias, predict)
def _activate(self, x, predict):
pass
class SubLayer(Layer):
def __init__(self, parent, shape, **kwargs):
Layer.__init__(self, shape, **kwargs)
self.parent = parent
self.description = ""
self.is_sub_layer = True
@property
def root(self):
_root = self.parent
while _root.parent:
_root = _root.parent
return _root
def get_params(self):
pass
@property
def params(self):
return self.get_params()
@property
def info(self):
return "Layer : {:<16s} - {} {}".format(self.name, self.shape[1], self.description)
def _activate(self, x, predict):
raise NotImplementedError("Please implement activation function for " + self.name)
class ConvLayer(Layer):
LayerTiming = Timing()
def __init__(self, shape, stride=1, padding=None, parent=None, **kwargs):
"""
:param shape: shape[0] = shape of previous layer c x h x w
shape[1] = shape of current layer's weight f x c x h x w
:param stride: stride
:param padding: zero-padding
"""
if parent is not None:
_parent = parent.root if parent.is_sub_layer else parent
shape = _parent.shape
Layer.__init__(self, shape, **kwargs)
self._stride = stride
if padding is None:
padding = "SAME"
if isinstance(padding, str):
if padding.upper() == "VALID":
self._padding = 0
self._pad_flag = "VALID"
else:
self._padding = self._pad_flag = "SAME"
elif isinstance(padding, int):
self._padding = padding
self._pad_flag = "VALID"
else:
raise BuildLayerError("Padding should be 'SAME' or 'VALID' or integer")
self.parent = parent
if len(shape) == 1:
self.n_channels, self.n_filters, self.out_h, self.out_w = None, None, None, None
else:
self.feed_shape(shape)
def feed_shape(self, shape):
self._shape = shape
self.n_channels, height, width = shape[0]
self.n_filters, filter_height, filter_width = shape[1]
if self._pad_flag == "VALID":
self.out_h = ceil((height - filter_height + 1) / self._stride)
self.out_w = ceil((width - filter_width + 1) / self._stride)
else:
self.out_h = ceil(height / self._stride)
self.out_w = ceil(width / self._stride)
@property
def params(self):
return self._shape, self._stride, self._padding
@property
def stride(self):
return self._stride
@property
def padding(self):
return self._padding
@property
def pad_flag(self):
return self._pad_flag
@property
def info(self):
return "Layer : {:<16s} - {:<14s} - strides: {:2d} - padding: {:6s}{} - out: {}".format(
self.name, str(self.shape[1]), self.stride, self.pad_flag,
" " * 5 if self.pad_flag == "SAME" else " ({:2d})".format(self.padding),
(self.n_filters, self.out_h, self.out_w)
)
class ConvPoolLayer(ConvLayer):
LayerTiming = Timing()
@property
def params(self):
return (self._shape[0], self._shape[1][1:]), self._stride, self._padding
def feed_shape(self, shape):
shape = (shape[0], (shape[0][0], *shape[1]))
ConvLayer.feed_shape(self, shape)
@LayerTiming.timeit(level=1, prefix="[Core] ")
def activate(self, x, w, bias=None, predict=False):
pool_height, pool_width = self._shape[1][1:]
if self._pad_flag == "VALID" and self._padding > 0:
_pad = [self._padding] * 2
x = tf.pad(x, [[0, 0], _pad, _pad, [0, 0]], "CONSTANT")
return self._activate(None)(
x, ksize=[1, pool_height, pool_width, 1],
strides=[1, self._stride, self._stride, 1], padding=self._pad_flag)
def _activate(self, x, *args):
raise NotImplementedError("Please implement activation function for " + self.name)
# noinspection PyProtectedMember
class ConvMeta(type):
def __new__(mcs, *args, **kwargs):
name, bases, attr = args[:3]
conv_layer, layer = bases
def __init__(self, shape, stride=1, padding="SAME", **_kwargs):
conv_layer.__init__(self, shape, stride, padding, **_kwargs)
def _conv(self, x, w):
return tf.nn.conv2d(x, w, strides=[1, self.stride, self.stride, 1], padding=self._pad_flag)
def _activate(self, x, w, bias, predict):
res = self._conv(x, w) + bias if self.apply_bias else self._conv(x, w)
return layer._activate(self, res, predict)
def activate(self, x, w, bias=None, predict=False):
if self._pad_flag == "VALID" and self._padding > 0:
_pad = [self._padding] * 2
x = tf.pad(x, [[0, 0], _pad, _pad, [0, 0]], "CONSTANT")
return self.LayerTiming.timeit(level=1, func_name="activate", cls_name=name, prefix="[Core] ")(
_activate)(self, x, w, bias, predict)
for key, value in locals().items():
if str(value).find("function") >= 0 or str(value).find("property"):
attr[key] = value
return type(name, bases, attr)
# noinspection PyProtectedMember
class ConvSubMeta(type):
def __new__(mcs, *args, **kwargs):
name, bases, attr = args[:3]
conv_layer, sub_layer = bases
def __init__(self, parent, shape, *_args, **_kwargs):
conv_layer.__init__(self, None, parent=parent, **_kwargs)
self.out_h, self.out_w = parent.out_h, parent.out_w
sub_layer.__init__(self, parent, shape, *_args, **_kwargs)
self._shape = ((shape[0][0], self.out_h, self.out_w), shape[0])
if name == "ConvNorm":
self.tf_gamma = tf.Variable(tf.ones(self.n_filters), name="norm_scale")
self.tf_beta = tf.Variable(tf.zeros(self.n_filters), name="norm_beta")
def _activate(self, x, predict):
return sub_layer._activate(self, x, predict)
# noinspection PyUnusedLocal
def activate(self, x, w, bias=None, predict=False):
return self.LayerTiming.timeit(level=1, func_name="activate", cls_name=name, prefix="[Core] ")(
_activate)(self, x, predict)
@property
def params(self):
return sub_layer.get_params(self)
for key, value in locals().items():
if str(value).find("function") >= 0 or str(value).find("property"):
attr[key] = value
return type(name, bases, attr)
# Activation Layers
class Tanh(Layer):
def _activate(self, x, predict):
return tf.tanh(x)
class Sigmoid(Layer):
def _activate(self, x, predict):
return tf.nn.sigmoid(x)
class ELU(Layer):
def _activate(self, x, predict):
return tf.nn.elu(x)
class ReLU(Layer):
def _activate(self, x, predict):
return tf.nn.relu(x)
class Softplus(Layer):
def _activate(self, x, predict):
return tf.nn.softplus(x)
class Identical(Layer):
def _activate(self, x, predict):
return x
class CF0910(Layer):
def _activate(self, x, predict):
return tf.minimum(tf.maximum(x, 0), 6)
# Convolution Layers
class ConvTanh(ConvLayer, Tanh, metaclass=ConvMeta):
pass
class ConvSigmoid(ConvLayer, Sigmoid, metaclass=ConvMeta):
pass
class ConvELU(ConvLayer, ELU, metaclass=ConvMeta):
pass
class ConvReLU(ConvLayer, ReLU, metaclass=ConvMeta):
pass
class ConvSoftplus(ConvLayer, Softplus, metaclass=ConvMeta):
pass
class ConvIdentical(ConvLayer, Identical, metaclass=ConvMeta):
pass
class ConvCF0910(ConvLayer, CF0910, metaclass=ConvMeta):
pass
# Pooling Layers
class MaxPool(ConvPoolLayer):
def _activate(self, x, *args):
return tf.nn.max_pool
class AvgPool(ConvPoolLayer):
def _activate(self, x, *args):
return tf.nn.avg_pool
# Special Layers
class Dropout(SubLayer):
def __init__(self, parent, shape, keep_prob=0.5, **kwargs):
if keep_prob < 0 or keep_prob >= 1:
raise BuildLayerError("(Dropout) Keep probability of Dropout should be a positive float smaller than 1")
SubLayer.__init__(self, parent, shape, **kwargs)
self._keep_prob = keep_prob
self.description = "(Keep prob: {})".format(keep_prob)
def get_params(self):
return self._keep_prob,
def _activate(self, x, predict):
if not predict:
return tf.nn.dropout(x, self._keep_prob)
return x
class Normalize(SubLayer):
def __init__(self, parent, shape, activation="Identical", eps=1e-8, momentum=0.9, **kwargs):
SubLayer.__init__(self, parent, shape, **kwargs)
self._eps, self._activation = eps, activation
self.rm = self.rv = None
self.tf_rm = self.tf_rv = None
self.tf_gamma = tf.Variable(tf.ones(self.shape[1]), name="norm_scale")
self.tf_beta = tf.Variable(tf.zeros(self.shape[1]), name="norm_beta")
self._momentum = momentum
self.description = "(eps: {}, momentum: {}, activation: {})".format(eps, momentum, activation)
def init(self, sess):
if self.rm is not None:
self.tf_rm = tf.Variable(self.rm, trainable=False, name="norm_mean")
if self.rv is not None:
self.tf_rv = tf.Variable(self.rv, trainable=False, name="norm_var")
sess.run(tf.variables_initializer([self.tf_rm, self.tf_rv]))
def get_special_params(self, sess):
with sess.as_default():
return {
"rm": self.tf_rm.eval(), "rv": self.tf_rv.eval(),
}
def get_params(self):
return self._activation, self._eps, self._momentum
# noinspection PyTypeChecker
def _activate(self, x, predict):
if self.tf_rm is None or self.tf_rv is None:
shape = x.get_shape()[-1]
self.tf_rm = tf.Variable(tf.zeros(shape), trainable=False, name="norm_mean")
self.tf_rv = tf.Variable(tf.ones(shape), trainable=False, name="norm_var")
if not predict:
_sm, _sv = tf.nn.moments(x, list(range(len(x.get_shape()) - 1)))
_rm = tf.assign(self.tf_rm, self._momentum * self.tf_rm + (1 - self._momentum) * _sm)
_rv = tf.assign(self.tf_rv, self._momentum * self.tf_rv + (1 - self._momentum) * _sv)
with tf.control_dependencies([_rm, _rv]):
_norm = tf.nn.batch_normalization(x, _sm, _sv, self.tf_beta, self.tf_gamma, self._eps)
else:
_norm = tf.nn.batch_normalization(x, self.tf_rm, self.tf_rv, self.tf_beta, self.tf_gamma, self._eps)
if self._activation == "ReLU":
return tf.nn.relu(_norm)
if self._activation == "Sigmoid":
return tf.nn.sigmoid(_norm)
return _norm
class ConvDrop(ConvLayer, Dropout, metaclass=ConvSubMeta):
pass
class ConvNorm(ConvLayer, Normalize, metaclass=ConvSubMeta):
pass
# Cost Layers
class CostLayer(Layer):
@property
def info(self):
return "Cost : {:<16s}".format(self.name)
def calculate(self, y, y_pred):
return self._activate(y_pred, y)
class CrossEntropy(CostLayer):
def _activate(self, x, y):
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=x, labels=y))
class MSE(CostLayer):
def _activate(self, x, y):
return tf.reduce_mean(tf.square(x - y))
# Factory
class LayerFactory:
available_root_layers = {
# Normal Layers
"Tanh": Tanh, "Sigmoid": Sigmoid,
"ELU": ELU, "ReLU": ReLU, "Softplus": Softplus,
"Identical": Identical,
"CF0910": CF0910,
# Cost Layers
"CrossEntropy": CrossEntropy, "MSE": MSE,
# Conv Layers
"ConvTanh": ConvTanh, "ConvSigmoid": ConvSigmoid,
"ConvELU": ConvELU, "ConvReLU": ConvReLU, "ConvSoftplus": ConvSoftplus,
"ConvIdentical": ConvIdentical,
"ConvCF0910": ConvCF0910,
"MaxPool": MaxPool, "AvgPool": AvgPool
}
available_special_layers = {
"Dropout": Dropout,
"Normalize": Normalize,
"ConvDrop": ConvDrop,
"ConvNorm": ConvNorm
}
def get_root_layer_by_name(self, name, *args, **kwargs):
if name not in self.available_special_layers:
if name in self.available_root_layers:
name = self.available_root_layers[name]
else:
raise BuildNetworkError("Undefined layer '{}' found".format(name))
return name(*args, **kwargs)
return None
def get_layer_by_name(self, name, parent, current_dimension, *args, **kwargs):
_layer = self.get_root_layer_by_name(name, *args, **kwargs)
if _layer:
return _layer, None
_current, _next = parent.shape[1], current_dimension
_layer = self.available_special_layers[name]
if args:
_layer = _layer(parent, (_current, _next), *args, **kwargs)
else:
_layer = _layer(parent, (_current, _next), **kwargs)
return _layer, (_current, _next)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops.bijectors import AffineLinearOperator
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import normal
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = [
"MultivariateNormalLinearOperator",
]
_mvn_sample_note = """
`value` is a batch vector with compatible shape if `value` is a `Tensor` whose
shape can be broadcast up to either:
```python
self.batch_shape + self.event_shape
```
or
```python
[M1, ..., Mm] + self.batch_shape + self.event_shape
```
"""
# TODO(b/35290280): Import in `../../__init__.py` after adding unit-tests.
class MultivariateNormalLinearOperator(
transformed_distribution.TransformedDistribution):
"""The multivariate normal distribution on `R^k`.
The Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`scale` matrix; `covariance = scale @ scale.T`, where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-0.5 ||y||**2) / Z,
y = inv(scale) @ (x - loc),
Z = (2 pi)**(0.5 k) |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||**2` denotes the squared Euclidean norm of `y`.
The MultivariateNormal distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.
Y = scale @ X + loc
```
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3-variate Gaussian.
mu = [1., 2, 3]
cov = [[ 0.36, 0.12, 0.06],
[ 0.12, 0.29, -0.13],
[ 0.06, -0.13, 0.26]]
scale = tf.cholesky(cov)
# ==> [[ 0.6, 0. , 0. ],
# [ 0.2, 0.5, 0. ],
# [ 0.1, -0.3, 0.4]])
mvn = tfd.MultivariateNormalLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorLowerTriangular(scale))
# Covariance agrees with cholesky(cov) parameterization.
mvn.covariance().eval()
# ==> [[ 0.36, 0.12, 0.06],
# [ 0.12, 0.29, -0.13],
# [ 0.06, -0.13, 0.26]]
# Compute the pdf of an`R^3` observation; return a scalar.
mvn.prob([-1., 0, 1]).eval() # shape: []
# Initialize a 2-batch of 3-variate Gaussians.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
scale_diag = [[1., 2, 3],
[0.5, 1, 1.5]] # shape: [2, 3]
mvn = tfd.MultivariateNormalLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorDiag(scale_diag))
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
mvn.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tfp.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalLinearOperator"):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale: Instance of `LinearOperator` with same `dtype` as `loc` and shape
`[B1, ..., Bb, k, k]`.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
ValueError: if `scale` is unspecified.
TypeError: if not `scale.dtype.is_floating`
"""
parameters = dict(locals())
if scale is None:
raise ValueError("Missing required `scale` parameter.")
if not scale.dtype.is_floating:
raise TypeError("`scale` parameter must have floating-point dtype.")
with ops.name_scope(name, values=[loc] + scale.graph_parents) as name:
# Since expand_dims doesn't preserve constant-ness, we obtain the
# non-dynamic value if possible.
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
super(MultivariateNormalLinearOperator, self).__init__(
distribution=normal.Normal(
loc=array_ops.zeros([], dtype=scale.dtype),
scale=array_ops.ones([], dtype=scale.dtype)),
bijector=AffineLinearOperator(
shift=loc, scale=scale, validate_args=validate_args),
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
@property
def loc(self):
"""The `loc` `Tensor` in `Y = scale @ X + loc`."""
return self.bijector.shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + loc`."""
return self.bijector.scale
@distribution_util.AppendDocstring(_mvn_sample_note)
def _log_prob(self, x):
return super(MultivariateNormalLinearOperator, self)._log_prob(x)
@distribution_util.AppendDocstring(_mvn_sample_note)
def _prob(self, x):
return super(MultivariateNormalLinearOperator, self)._prob(x)
def _mean(self):
shape = self.batch_shape.concatenate(self.event_shape)
has_static_shape = shape.is_fully_defined()
if not has_static_shape:
shape = array_ops.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], 0)
if self.loc is None:
return array_ops.zeros(shape, self.dtype)
if has_static_shape and shape == self.loc.get_shape():
return array_ops.identity(self.loc)
# Add dummy tensor of zeros to broadcast. This is only necessary if shape
# != self.loc.shape, but we could not determine if this is the case.
return array_ops.identity(self.loc) + array_ops.zeros(shape, self.dtype)
def _covariance(self):
if distribution_util.is_diagonal_scale(self.scale):
return array_ops.matrix_diag(math_ops.square(self.scale.diag_part()))
else:
return self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.square(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense()))
else:
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
def _stddev(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.abs(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense())))
else:
return math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))
def _mode(self):
return self._mean()
@kullback_leibler.RegisterKL(MultivariateNormalLinearOperator,
MultivariateNormalLinearOperator)
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tfp.distributions`.",
warn_once=True)
def _kl_brute_force(a, b, name=None):
"""Batched KL divergence `KL(a || b)` for multivariate Normals.
With `X`, `Y` both multivariate Normals in `R^k` with means `mu_a`, `mu_b` and
covariance `C_a`, `C_b` respectively,
```
KL(a || b) = 0.5 * ( L - k + T + Q ),
L := Log[Det(C_b)] - Log[Det(C_a)]
T := trace(C_b^{-1} C_a),
Q := (mu_b - mu_a)^T C_b^{-1} (mu_b - mu_a),
```
This `Op` computes the trace by solving `C_b^{-1} C_a`. Although efficient
methods for solving systems with `C_b` may be available, a dense version of
(the square root of) `C_a` is used, so performance is `O(B s k**2)` where `B`
is the batch size, and `s` is the cost of solving `C_b x = y` for vectors `x`
and `y`.
Args:
a: Instance of `MultivariateNormalLinearOperator`.
b: Instance of `MultivariateNormalLinearOperator`.
name: (optional) name to use for created ops. Default "kl_mvn".
Returns:
Batchwise `KL(a || b)`.
"""
def squared_frobenius_norm(x):
"""Helper to make KL calculation slightly more readable."""
# http://mathworld.wolfram.com/FrobeniusNorm.html
# The gradient of KL[p,q] is not defined when p==q. The culprit is
# linalg_ops.norm, i.e., we cannot use the commented out code.
# return math_ops.square(linalg_ops.norm(x, ord="fro", axis=[-2, -1]))
return math_ops.reduce_sum(math_ops.square(x), axis=[-2, -1])
# TODO(b/35041439): See also b/35040945. Remove this function once LinOp
# supports something like:
# A.inverse().solve(B).norm(order='fro', axis=[-1, -2])
def is_diagonal(x):
"""Helper to identify if `LinearOperator` has only a diagonal component."""
return (isinstance(x, linalg.LinearOperatorIdentity) or
isinstance(x, linalg.LinearOperatorScaledIdentity) or
isinstance(x, linalg.LinearOperatorDiag))
with ops.name_scope(name, "kl_mvn", values=[a.loc, b.loc] +
a.scale.graph_parents + b.scale.graph_parents):
# Calculation is based on:
# http://stats.stackexchange.com/questions/60680/kl-divergence-between-two-multivariate-gaussians
# and,
# https://en.wikipedia.org/wiki/Matrix_norm#Frobenius_norm
# i.e.,
# If Ca = AA', Cb = BB', then
# tr[inv(Cb) Ca] = tr[inv(B)' inv(B) A A']
# = tr[inv(B) A A' inv(B)']
# = tr[(inv(B) A) (inv(B) A)']
# = sum_{ij} (inv(B) A)_{ij}**2
# = ||inv(B) A||_F**2
# where ||.||_F is the Frobenius norm and the second equality follows from
# the cyclic permutation property.
if is_diagonal(a.scale) and is_diagonal(b.scale):
# Using `stddev` because it handles expansion of Identity cases.
b_inv_a = (a.stddev() / b.stddev())[..., array_ops.newaxis]
else:
b_inv_a = b.scale.solve(a.scale.to_dense())
kl_div = (b.scale.log_abs_determinant()
- a.scale.log_abs_determinant()
+ 0.5 * (
- math_ops.cast(a.scale.domain_dimension_tensor(), a.dtype)
+ squared_frobenius_norm(b_inv_a)
+ squared_frobenius_norm(b.scale.solve(
(b.mean() - a.mean())[..., array_ops.newaxis]))))
kl_div.set_shape(array_ops.broadcast_static_shape(
a.batch_shape, b.batch_shape))
return kl_div
| |
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Test file manipulation functionality of Item.
"""
import shutil
import os
import stat
from os.path import join
import _common
from _common import unittest
from _common import item, touch
import beets.library
from beets import util
class MoveTest(_common.TestCase):
def setUp(self):
super(MoveTest, self).setUp()
# make a temporary file
self.path = join(self.temp_dir, 'temp.mp3')
shutil.copy(join(_common.RSRC, 'full.mp3'), self.path)
# add it to a temporary library
self.lib = beets.library.Library(':memory:')
self.i = beets.library.Item.from_path(self.path)
self.lib.add(self.i)
# set up the destination
self.libdir = join(self.temp_dir, 'testlibdir')
os.mkdir(self.libdir)
self.lib.directory = self.libdir
self.lib.path_formats = [('default',
join('$artist', '$album', '$title'))]
self.i.artist = 'one'
self.i.album = 'two'
self.i.title = 'three'
self.dest = join(self.libdir, 'one', 'two', 'three.mp3')
self.otherdir = join(self.temp_dir, 'testotherdir')
def test_move_arrives(self):
self.i.move()
self.assertExists(self.dest)
def test_move_to_custom_dir(self):
self.i.move(basedir=self.otherdir)
self.assertExists(join(self.otherdir, 'one', 'two', 'three.mp3'))
def test_move_departs(self):
self.i.move()
self.assertNotExists(self.path)
def test_move_in_lib_prunes_empty_dir(self):
self.i.move()
old_path = self.i.path
self.assertExists(old_path)
self.i.artist = 'newArtist'
self.i.move()
self.assertNotExists(old_path)
self.assertNotExists(os.path.dirname(old_path))
def test_copy_arrives(self):
self.i.move(copy=True)
self.assertExists(self.dest)
def test_copy_does_not_depart(self):
self.i.move(copy=True)
self.assertExists(self.path)
def test_move_changes_path(self):
self.i.move()
self.assertEqual(self.i.path, util.normpath(self.dest))
def test_copy_already_at_destination(self):
self.i.move()
old_path = self.i.path
self.i.move(copy=True)
self.assertEqual(self.i.path, old_path)
def test_move_already_at_destination(self):
self.i.move()
old_path = self.i.path
self.i.move(copy=False)
self.assertEqual(self.i.path, old_path)
def test_read_only_file_copied_writable(self):
# Make the source file read-only.
os.chmod(self.path, 0444)
try:
self.i.move(copy=True)
self.assertTrue(os.access(self.i.path, os.W_OK))
finally:
# Make everything writable so it can be cleaned up.
os.chmod(self.path, 0777)
os.chmod(self.i.path, 0777)
def test_move_avoids_collision_with_existing_file(self):
# Make a conflicting file at the destination.
dest = self.i.destination()
os.makedirs(os.path.dirname(dest))
touch(dest)
self.i.move()
self.assertNotEqual(self.i.path, dest)
self.assertEqual(os.path.dirname(self.i.path),
os.path.dirname(dest))
def test_link_arrives(self):
self.i.move(link=True)
self.assertExists(self.dest)
self.assertTrue(os.path.islink(self.dest))
self.assertEqual(os.readlink(self.dest), self.path)
def test_link_does_not_depart(self):
self.i.move(link=True)
self.assertExists(self.path)
def test_link_changes_path(self):
self.i.move(link=True)
self.assertEqual(self.i.path, util.normpath(self.dest))
class HelperTest(_common.TestCase):
def test_ancestry_works_on_file(self):
p = '/a/b/c'
a = ['/', '/a', '/a/b']
self.assertEqual(util.ancestry(p), a)
def test_ancestry_works_on_dir(self):
p = '/a/b/c/'
a = ['/', '/a', '/a/b', '/a/b/c']
self.assertEqual(util.ancestry(p), a)
def test_ancestry_works_on_relative(self):
p = 'a/b/c'
a = ['a', 'a/b']
self.assertEqual(util.ancestry(p), a)
def test_components_works_on_file(self):
p = '/a/b/c'
a = ['/', 'a', 'b', 'c']
self.assertEqual(util.components(p), a)
def test_components_works_on_dir(self):
p = '/a/b/c/'
a = ['/', 'a', 'b', 'c']
self.assertEqual(util.components(p), a)
def test_components_works_on_relative(self):
p = 'a/b/c'
a = ['a', 'b', 'c']
self.assertEqual(util.components(p), a)
class AlbumFileTest(_common.TestCase):
def setUp(self):
super(AlbumFileTest, self).setUp()
# Make library and item.
self.lib = beets.library.Library(':memory:')
self.lib.path_formats = \
[('default', join('$albumartist', '$album', '$title'))]
self.libdir = os.path.join(self.temp_dir, 'testlibdir')
self.lib.directory = self.libdir
self.i = item(self.lib)
# Make a file for the item.
self.i.path = self.i.destination()
util.mkdirall(self.i.path)
touch(self.i.path)
# Make an album.
self.ai = self.lib.add_album((self.i,))
# Alternate destination dir.
self.otherdir = os.path.join(self.temp_dir, 'testotherdir')
def test_albuminfo_move_changes_paths(self):
self.ai.album = 'newAlbumName'
self.ai.move()
self.ai.store()
self.i.load()
self.assert_('newAlbumName' in self.i.path)
def test_albuminfo_move_moves_file(self):
oldpath = self.i.path
self.ai.album = 'newAlbumName'
self.ai.move()
self.ai.store()
self.i.load()
self.assertFalse(os.path.exists(oldpath))
self.assertTrue(os.path.exists(self.i.path))
def test_albuminfo_move_copies_file(self):
oldpath = self.i.path
self.ai.album = 'newAlbumName'
self.ai.move(True)
self.ai.store()
self.i.load()
self.assertTrue(os.path.exists(oldpath))
self.assertTrue(os.path.exists(self.i.path))
def test_albuminfo_move_to_custom_dir(self):
self.ai.move(basedir=self.otherdir)
self.i.load()
self.ai.store()
self.assertTrue('testotherdir' in self.i.path)
class ArtFileTest(_common.TestCase):
def setUp(self):
super(ArtFileTest, self).setUp()
# Make library and item.
self.lib = beets.library.Library(':memory:')
self.libdir = os.path.join(self.temp_dir, 'testlibdir')
self.lib.directory = self.libdir
self.i = item(self.lib)
self.i.path = self.i.destination()
# Make a music file.
util.mkdirall(self.i.path)
touch(self.i.path)
# Make an album.
self.ai = self.lib.add_album((self.i,))
# Make an art file too.
self.art = self.lib.get_album(self.i).art_destination('something.jpg')
touch(self.art)
self.ai.artpath = self.art
self.ai.store()
# Alternate destination dir.
self.otherdir = os.path.join(self.temp_dir, 'testotherdir')
def test_art_deleted_when_items_deleted(self):
self.assertTrue(os.path.exists(self.art))
self.ai.remove(True)
self.assertFalse(os.path.exists(self.art))
def test_art_moves_with_album(self):
self.assertTrue(os.path.exists(self.art))
oldpath = self.i.path
self.ai.album = 'newAlbum'
self.ai.move()
self.i.load()
self.assertNotEqual(self.i.path, oldpath)
self.assertFalse(os.path.exists(self.art))
newart = self.lib.get_album(self.i).art_destination(self.art)
self.assertTrue(os.path.exists(newart))
def test_art_moves_with_album_to_custom_dir(self):
# Move the album to another directory.
self.ai.move(basedir=self.otherdir)
self.ai.store()
self.i.load()
# Art should be in new directory.
self.assertNotExists(self.art)
newart = self.lib.get_album(self.i).artpath
self.assertExists(newart)
self.assertTrue('testotherdir' in newart)
def test_setart_copies_image(self):
os.remove(self.art)
newart = os.path.join(self.libdir, 'newart.jpg')
touch(newart)
i2 = item()
i2.path = self.i.path
i2.artist = 'someArtist'
ai = self.lib.add_album((i2,))
i2.move(True)
self.assertEqual(ai.artpath, None)
ai.set_art(newart)
self.assertTrue(os.path.exists(ai.artpath))
def test_setart_to_existing_art_works(self):
os.remove(self.art)
# Original art.
newart = os.path.join(self.libdir, 'newart.jpg')
touch(newart)
i2 = item()
i2.path = self.i.path
i2.artist = 'someArtist'
ai = self.lib.add_album((i2,))
i2.move(True)
ai.set_art(newart)
# Set the art again.
ai.set_art(ai.artpath)
self.assertTrue(os.path.exists(ai.artpath))
def test_setart_to_existing_but_unset_art_works(self):
newart = os.path.join(self.libdir, 'newart.jpg')
touch(newart)
i2 = item()
i2.path = self.i.path
i2.artist = 'someArtist'
ai = self.lib.add_album((i2,))
i2.move(True)
# Copy the art to the destination.
artdest = ai.art_destination(newart)
shutil.copy(newart, artdest)
# Set the art again.
ai.set_art(artdest)
self.assertTrue(os.path.exists(ai.artpath))
def test_setart_to_conflicting_file_gets_new_path(self):
newart = os.path.join(self.libdir, 'newart.jpg')
touch(newart)
i2 = item()
i2.path = self.i.path
i2.artist = 'someArtist'
ai = self.lib.add_album((i2,))
i2.move(True)
# Make a file at the destination.
artdest = ai.art_destination(newart)
touch(artdest)
# Set the art.
ai.set_art(newart)
self.assertNotEqual(artdest, ai.artpath)
self.assertEqual(os.path.dirname(artdest),
os.path.dirname(ai.artpath))
def test_setart_sets_permissions(self):
os.remove(self.art)
newart = os.path.join(self.libdir, 'newart.jpg')
touch(newart)
os.chmod(newart, 0400) # read-only
try:
i2 = item()
i2.path = self.i.path
i2.artist = 'someArtist'
ai = self.lib.add_album((i2,))
i2.move(True)
ai.set_art(newart)
mode = stat.S_IMODE(os.stat(ai.artpath).st_mode)
self.assertTrue(mode & stat.S_IRGRP)
self.assertTrue(os.access(ai.artpath, os.W_OK))
finally:
# Make everything writable so it can be cleaned up.
os.chmod(newart, 0777)
os.chmod(ai.artpath, 0777)
def test_move_last_file_moves_albumart(self):
oldartpath = self.lib.albums()[0].artpath
self.assertExists(oldartpath)
self.ai.album = 'different_album'
self.ai.store()
self.ai.items()[0].move()
artpath = self.lib.albums()[0].artpath
self.assertTrue('different_album' in artpath)
self.assertExists(artpath)
self.assertNotExists(oldartpath)
def test_move_not_last_file_does_not_move_albumart(self):
i2 = item()
i2.albumid = self.ai.id
self.lib.add(i2)
oldartpath = self.lib.albums()[0].artpath
self.assertExists(oldartpath)
self.i.album = 'different_album'
self.i.album_id = None # detach from album
self.i.move()
artpath = self.lib.albums()[0].artpath
self.assertFalse('different_album' in artpath)
self.assertEqual(artpath, oldartpath)
self.assertExists(oldartpath)
class RemoveTest(_common.TestCase):
def setUp(self):
super(RemoveTest, self).setUp()
# Make library and item.
self.lib = beets.library.Library(':memory:')
self.libdir = os.path.join(self.temp_dir, 'testlibdir')
self.lib.directory = self.libdir
self.i = item(self.lib)
self.i.path = self.i.destination()
# Make a music file.
util.mkdirall(self.i.path)
touch(self.i.path)
# Make an album with the item.
self.ai = self.lib.add_album((self.i,))
def test_removing_last_item_prunes_empty_dir(self):
parent = os.path.dirname(self.i.path)
self.assertExists(parent)
self.i.remove(True)
self.assertNotExists(parent)
def test_removing_last_item_preserves_nonempty_dir(self):
parent = os.path.dirname(self.i.path)
touch(os.path.join(parent, 'dummy.txt'))
self.i.remove(True)
self.assertExists(parent)
def test_removing_last_item_prunes_dir_with_blacklisted_file(self):
parent = os.path.dirname(self.i.path)
touch(os.path.join(parent, '.DS_Store'))
self.i.remove(True)
self.assertNotExists(parent)
def test_removing_without_delete_leaves_file(self):
path = self.i.path
self.i.remove(False)
self.assertExists(path)
def test_removing_last_item_preserves_library_dir(self):
self.i.remove(True)
self.assertExists(self.libdir)
def test_removing_item_outside_of_library_deletes_nothing(self):
self.lib.directory = os.path.join(self.temp_dir, 'xxx')
parent = os.path.dirname(self.i.path)
self.i.remove(True)
self.assertExists(parent)
def test_removing_last_item_in_album_with_albumart_prunes_dir(self):
artfile = os.path.join(self.temp_dir, 'testart.jpg')
touch(artfile)
self.ai.set_art(artfile)
self.ai.store()
parent = os.path.dirname(self.i.path)
self.i.remove(True)
self.assertNotExists(parent)
# Tests that we can "delete" nonexistent files.
class SoftRemoveTest(_common.TestCase):
def setUp(self):
super(SoftRemoveTest, self).setUp()
self.path = os.path.join(self.temp_dir, 'testfile')
touch(self.path)
def test_soft_remove_deletes_file(self):
util.remove(self.path, True)
self.assertNotExists(self.path)
def test_soft_remove_silent_on_no_file(self):
try:
util.remove(self.path + 'XXX', True)
except OSError:
self.fail('OSError when removing path')
class SafeMoveCopyTest(_common.TestCase):
def setUp(self):
super(SafeMoveCopyTest, self).setUp()
self.path = os.path.join(self.temp_dir, 'testfile')
touch(self.path)
self.otherpath = os.path.join(self.temp_dir, 'testfile2')
touch(self.otherpath)
self.dest = self.path + '.dest'
def test_successful_move(self):
util.move(self.path, self.dest)
self.assertExists(self.dest)
self.assertNotExists(self.path)
def test_successful_copy(self):
util.copy(self.path, self.dest)
self.assertExists(self.dest)
self.assertExists(self.path)
def test_unsuccessful_move(self):
with self.assertRaises(util.FilesystemError):
util.move(self.path, self.otherpath)
def test_unsuccessful_copy(self):
with self.assertRaises(util.FilesystemError):
util.copy(self.path, self.otherpath)
def test_self_move(self):
util.move(self.path, self.path)
self.assertExists(self.path)
def test_self_copy(self):
util.copy(self.path, self.path)
self.assertExists(self.path)
class PruneTest(_common.TestCase):
def setUp(self):
super(PruneTest, self).setUp()
self.base = os.path.join(self.temp_dir, 'testdir')
os.mkdir(self.base)
self.sub = os.path.join(self.base, 'subdir')
os.mkdir(self.sub)
def test_prune_existent_directory(self):
util.prune_dirs(self.sub, self.base)
self.assertExists(self.base)
self.assertNotExists(self.sub)
def test_prune_nonexistent_directory(self):
util.prune_dirs(os.path.join(self.sub, 'another'), self.base)
self.assertExists(self.base)
self.assertNotExists(self.sub)
class WalkTest(_common.TestCase):
def setUp(self):
super(WalkTest, self).setUp()
self.base = os.path.join(self.temp_dir, 'testdir')
os.mkdir(self.base)
touch(os.path.join(self.base, 'y'))
touch(os.path.join(self.base, 'x'))
os.mkdir(os.path.join(self.base, 'd'))
touch(os.path.join(self.base, 'd', 'z'))
def test_sorted_files(self):
res = list(util.sorted_walk(self.base))
self.assertEqual(len(res), 2)
self.assertEqual(res[0],
(self.base, ['d'], ['x', 'y']))
self.assertEqual(res[1],
(os.path.join(self.base, 'd'), [], ['z']))
def test_ignore_file(self):
res = list(util.sorted_walk(self.base, ('x',)))
self.assertEqual(len(res), 2)
self.assertEqual(res[0],
(self.base, ['d'], ['y']))
self.assertEqual(res[1],
(os.path.join(self.base, 'd'), [], ['z']))
def test_ignore_directory(self):
res = list(util.sorted_walk(self.base, ('d',)))
self.assertEqual(len(res), 1)
self.assertEqual(res[0],
(self.base, [], ['x', 'y']))
def test_ignore_everything(self):
res = list(util.sorted_walk(self.base, ('*',)))
self.assertEqual(len(res), 1)
self.assertEqual(res[0],
(self.base, [], []))
class UniquePathTest(_common.TestCase):
def setUp(self):
super(UniquePathTest, self).setUp()
self.base = os.path.join(self.temp_dir, 'testdir')
os.mkdir(self.base)
touch(os.path.join(self.base, 'x.mp3'))
touch(os.path.join(self.base, 'x.1.mp3'))
touch(os.path.join(self.base, 'x.2.mp3'))
touch(os.path.join(self.base, 'y.mp3'))
def test_new_file_unchanged(self):
path = util.unique_path(os.path.join(self.base, 'z.mp3'))
self.assertEqual(path, os.path.join(self.base, 'z.mp3'))
def test_conflicting_file_appends_1(self):
path = util.unique_path(os.path.join(self.base, 'y.mp3'))
self.assertEqual(path, os.path.join(self.base, 'y.1.mp3'))
def test_conflicting_file_appends_higher_number(self):
path = util.unique_path(os.path.join(self.base, 'x.mp3'))
self.assertEqual(path, os.path.join(self.base, 'x.3.mp3'))
def test_conflicting_file_with_number_increases_number(self):
path = util.unique_path(os.path.join(self.base, 'x.1.mp3'))
self.assertEqual(path, os.path.join(self.base, 'x.3.mp3'))
class MkDirAllTest(_common.TestCase):
def test_parent_exists(self):
path = os.path.join(self.temp_dir, 'foo', 'bar', 'baz', 'qux.mp3')
util.mkdirall(path)
self.assertTrue(os.path.isdir(
os.path.join(self.temp_dir, 'foo', 'bar', 'baz')
))
def test_child_does_not_exist(self):
path = os.path.join(self.temp_dir, 'foo', 'bar', 'baz', 'qux.mp3')
util.mkdirall(path)
self.assertTrue(not os.path.exists(
os.path.join(self.temp_dir, 'foo', 'bar', 'baz', 'qux.mp3')
))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| |
# std
from collections import defaultdict
from functools import wraps
# 3rd party
from pysnmp.entity.rfc3413.oneliner import cmdgen
import pysnmp.proto.rfc1902 as snmp_type
from pysnmp.smi import builder
from pysnmp.smi.exval import noSuchInstance, noSuchObject
# project
from checks.network_checks import NetworkCheck, Status
from config import _is_affirmative
# Additional types that are not part of the SNMP protocol. cf RFC 2856
(CounterBasedGauge64, ZeroBasedCounter64) = builder.MibBuilder().importSymbols(
"HCNUM-TC",
"CounterBasedGauge64",
"ZeroBasedCounter64")
# Metric type that we support
SNMP_COUNTERS = frozenset([
snmp_type.Counter32.__name__,
snmp_type.Counter64.__name__,
ZeroBasedCounter64.__name__])
SNMP_GAUGES = frozenset([
snmp_type.Gauge32.__name__,
snmp_type.Unsigned32.__name__,
CounterBasedGauge64.__name__,
snmp_type.Integer.__name__,
snmp_type.Integer32.__name__])
DEFAULT_OID_BATCH_SIZE = 10
def reply_invalid(oid):
return noSuchInstance.isSameTypeWith(oid) or \
noSuchObject.isSameTypeWith(oid)
class SnmpCheck(NetworkCheck):
SOURCE_TYPE_NAME = 'system'
cmd_generator = None
# pysnmp default values
DEFAULT_RETRIES = 5
DEFAULT_TIMEOUT = 1
SC_STATUS = 'snmp.can_check'
def __init__(self, name, init_config, agentConfig, instances):
for instance in instances:
if 'name' not in instance:
instance['name'] = instance['ip_address']
instance['skip_event'] = True
# Set OID batch size
self.oid_batch_size = int(init_config.get("oid_batch_size", DEFAULT_OID_BATCH_SIZE))
# Load Custom MIB directory
self.mibs_path = None
self.ignore_nonincreasing_oid = False
if init_config is not None:
self.mibs_path = init_config.get("mibs_folder")
self.ignore_nonincreasing_oid = _is_affirmative(
init_config.get("ignore_nonincreasing_oid", False))
# Create SNMP command generator and aliases
self.create_command_generator(self.mibs_path, self.ignore_nonincreasing_oid)
NetworkCheck.__init__(self, name, init_config, agentConfig, instances)
def _load_conf(self, instance):
tags = instance.get("tags", [])
ip_address = instance["ip_address"]
metrics = instance.get('metrics', [])
timeout = int(instance.get('timeout', self.DEFAULT_TIMEOUT))
retries = int(instance.get('retries', self.DEFAULT_RETRIES))
return ip_address, tags, metrics, timeout, retries
def snmp_logger(self, func):
"""
Decorator to log, with DEBUG level, SNMP commands
"""
@wraps(func)
def wrapper(*args, **kwargs):
self.log.debug("Running SNMP command {0} on OIDS {1}"
.format(func.__name__, args[2:]))
result = func(*args, **kwargs)
self.log.debug("Returned vars: {0}".format(result[-1]))
return result
return wrapper
def create_command_generator(self, mibs_path, ignore_nonincreasing_oid):
'''
Create a command generator to perform all the snmp query.
If mibs_path is not None, load the mibs present in the custom mibs
folder. (Need to be in pysnmp format)
'''
self.cmd_generator = cmdgen.CommandGenerator()
self.cmd_generator.ignoreNonIncreasingOid = ignore_nonincreasing_oid
if mibs_path is not None:
mib_builder = self.cmd_generator.snmpEngine.msgAndPduDsp.\
mibInstrumController.mibBuilder
mib_sources = mib_builder.getMibSources() + \
(builder.DirMibSource(mibs_path), )
mib_builder.setMibSources(*mib_sources)
# Set aliases for snmpget and snmpgetnext with logging
self.snmpget = self.snmp_logger(self.cmd_generator.getCmd)
self.snmpgetnext = self.snmp_logger(self.cmd_generator.nextCmd)
@classmethod
def get_auth_data(cls, instance):
'''
Generate a Security Parameters object based on the instance's
configuration.
See http://pysnmp.sourceforge.net/docs/current/security-configuration.html
'''
if "community_string" in instance:
# SNMP v1 - SNMP v2
# See http://pysnmp.sourceforge.net/docs/current/security-configuration.html
if int(instance.get("snmp_version", 2)) == 1:
return cmdgen.CommunityData(instance['community_string'],
mpModel=0)
return cmdgen.CommunityData(instance['community_string'], mpModel=1)
elif "user" in instance:
# SNMP v3
user = instance["user"]
auth_key = None
priv_key = None
auth_protocol = None
priv_protocol = None
if "authKey" in instance:
auth_key = instance["authKey"]
auth_protocol = cmdgen.usmHMACMD5AuthProtocol
if "privKey" in instance:
priv_key = instance["privKey"]
auth_protocol = cmdgen.usmHMACMD5AuthProtocol
priv_protocol = cmdgen.usmDESPrivProtocol
if "authProtocol" in instance:
auth_protocol = getattr(cmdgen, instance["authProtocol"])
if "privProtocol" in instance:
priv_protocol = getattr(cmdgen, instance["privProtocol"])
return cmdgen.UsmUserData(user,
auth_key,
priv_key,
auth_protocol,
priv_protocol)
else:
raise Exception("An authentication method needs to be provided")
@classmethod
def get_transport_target(cls, instance, timeout, retries):
'''
Generate a Transport target object based on the instance's configuration
'''
if "ip_address" not in instance:
raise Exception("An IP address needs to be specified")
ip_address = instance["ip_address"]
port = int(instance.get("port", 161)) # Default SNMP port
return cmdgen.UdpTransportTarget((ip_address, port), timeout=timeout, retries=retries)
def raise_on_error_indication(self, error_indication, instance):
if error_indication:
message = "{0} for instance {1}".format(error_indication,
instance["ip_address"])
instance["service_check_error"] = message
raise Exception(message)
def check_table(self, instance, oids, lookup_names, timeout, retries):
'''
Perform a snmpwalk on the domain specified by the oids, on the device
configured in instance.
lookup_names is a boolean to specify whether or not to use the mibs to
resolve the name and values.
Returns a dictionary:
dict[oid/metric_name][row index] = value
In case of scalar objects, the row index is just 0
'''
# UPDATE: We used to perform only a snmpgetnext command to fetch metric values.
# It returns the wrong value when the OID passeed is referring to a specific leaf.
# For example:
# snmpgetnext -v2c -c public localhost:11111 1.36.1.2.1.25.4.2.1.7.222
# iso.3.6.1.2.1.25.4.2.1.7.224 = INTEGER: 2
# SOLUTION: perform a snmget command and fallback with snmpgetnext if not found
transport_target = self.get_transport_target(instance, timeout, retries)
auth_data = self.get_auth_data(instance)
first_oid = 0
all_binds = []
results = defaultdict(dict)
while first_oid < len(oids):
# Start with snmpget command
error_indication, error_status, error_index, var_binds = self.snmpget(
auth_data,
transport_target,
*(oids[first_oid:first_oid + self.oid_batch_size]),
lookupValues=lookup_names,
lookupNames=lookup_names)
first_oid = first_oid + self.oid_batch_size
# Raise on error_indication
self.raise_on_error_indication(error_indication, instance)
missing_results = []
complete_results = []
for var in var_binds:
result_oid, value = var
if reply_invalid(value):
oid_tuple = result_oid.asTuple()
oid = ".".join([str(i) for i in oid_tuple])
missing_results.append(oid)
else:
complete_results.append(var)
if missing_results:
# If we didn't catch the metric using snmpget, try snmpnext
error_indication, error_status, error_index, var_binds_table = self.snmpgetnext(
auth_data,
transport_target,
*missing_results,
lookupValues=lookup_names,
lookupNames=lookup_names)
# Raise on error_indication
self.raise_on_error_indication(error_indication, instance)
if error_status:
message = "{0} for instance {1}".format(error_status.prettyPrint(),
instance["ip_address"])
instance["service_check_error"] = message
self.warning(message)
for table_row in var_binds_table:
complete_results.extend(table_row)
all_binds.extend(complete_results)
for result_oid, value in all_binds:
if lookup_names:
_, metric, indexes = result_oid.getMibSymbol()
results[metric][indexes] = value
else:
oid = result_oid.asTuple()
matching = ".".join([str(i) for i in oid])
results[matching] = value
self.log.debug("Raw results: {0}".format(results))
return results
def _check(self, instance):
'''
Perform two series of SNMP requests, one for all that have MIB asociated
and should be looked up and one for those specified by oids
'''
ip_address, tags, metrics, timeout, retries = self._load_conf(instance)
tags += ['snmp_device:{0}'.format(ip_address)]
table_oids = []
raw_oids = []
# Check the metrics completely defined
for metric in metrics:
if 'MIB' in metric:
try:
assert "table" in metric or "symbol" in metric
to_query = metric.get("table", metric.get("symbol"))
table_oids.append(cmdgen.MibVariable(metric["MIB"], to_query))
except Exception as e:
self.log.warning("Can't generate MIB object for variable : %s\n"
"Exception: %s", metric, e)
elif 'OID' in metric:
raw_oids.append(metric['OID'])
else:
raise Exception('Unsupported metric in config file: %s' % metric)
try:
if table_oids:
self.log.debug("Querying device %s for %s oids", ip_address, len(table_oids))
table_results = self.check_table(instance, table_oids, True, timeout, retries)
self.report_table_metrics(metrics, table_results, tags)
if raw_oids:
self.log.debug("Querying device %s for %s oids", ip_address, len(raw_oids))
raw_results = self.check_table(instance, raw_oids, False, timeout, retries)
self.report_raw_metrics(metrics, raw_results, tags)
except Exception as e:
if "service_check_error" not in instance:
instance["service_check_error"] = "Fail to collect metrics: {0}".format(e)
self.warning(instance["service_check_error"])
return [(self.SC_STATUS, Status.CRITICAL, instance["service_check_error"])]
finally:
# Report service checks
tags = ["snmp_device:%s" % ip_address]
if "service_check_error" in instance:
return [(self.SC_STATUS, Status.DOWN, instance["service_check_error"])]
return [(self.SC_STATUS, Status.UP, None)]
def report_as_service_check(self, sc_name, status, instance, msg=None):
sc_tags = ['snmp_device:{0}'.format(instance["ip_address"])]
custom_tags = instance.get('tags', [])
tags = sc_tags + custom_tags
self.service_check(sc_name,
NetworkCheck.STATUS_TO_SERVICE_CHECK[status],
tags=tags,
message=msg
)
def report_raw_metrics(self, metrics, results, tags):
'''
For all the metrics that are specified as oid,
the conf oid is going to be a prefix of the oid sent back by the device
Use the instance configuration to find the name to give to the metric
Submit the results to the aggregator.
'''
for metric in metrics:
if 'OID' in metric:
queried_oid = metric['OID']
for oid in results:
if oid.startswith(queried_oid):
value = results[oid]
break
else:
self.log.warning("No matching results found for oid %s",
queried_oid)
continue
name = metric.get('name', 'unnamed_metric')
self.submit_metric(name, value, tags)
def report_table_metrics(self, metrics, results, tags):
'''
For each of the metrics specified as needing to be resolved with mib,
gather the tags requested in the instance conf for each row.
Submit the results to the aggregator.
'''
for metric in metrics:
if 'table' in metric:
index_based_tags = []
column_based_tags = []
for metric_tag in metric.get('metric_tags', []):
tag_key = metric_tag['tag']
if 'index' in metric_tag:
index_based_tags.append((tag_key, metric_tag.get('index')))
elif 'column' in metric_tag:
column_based_tags.append((tag_key, metric_tag.get('column')))
else:
self.log.warning("No indication on what value to use for this tag")
for value_to_collect in metric.get("symbols", []):
for index, val in results[value_to_collect].items():
metric_tags = tags + self.get_index_tags(index, results,
index_based_tags,
column_based_tags)
self.submit_metric(value_to_collect, val, metric_tags)
elif 'symbol' in metric:
name = metric['symbol']
result = results[name].items()
if len(result) > 1:
self.log.warning("Several rows corresponding while the metric is supposed to be a scalar")
continue
val = result[0][1]
self.submit_metric(name, val, tags)
elif 'OID' in metric:
pass # This one is already handled by the other batch of requests
else:
raise Exception('Unsupported metric in config file: %s' % metric)
def get_index_tags(self, index, results, index_tags, column_tags):
'''
Gather the tags for this row of the table (index) based on the
results (all the results from the query).
index_tags and column_tags are the tags to gather.
- Those specified in index_tags contain the tag_group name and the
index of the value we want to extract from the index tuple.
cf. 1 for ipVersion in the IP-MIB::ipSystemStatsTable for example
- Those specified in column_tags contain the name of a column, which
could be a potential result, to use as a tage
cf. ifDescr in the IF-MIB::ifTable for example
'''
tags = []
for idx_tag in index_tags:
tag_group = idx_tag[0]
try:
tag_value = index[idx_tag[1] - 1].prettyPrint()
except IndexError:
self.log.warning("Not enough indexes, skipping this tag")
continue
tags.append("{0}:{1}".format(tag_group, tag_value))
for col_tag in column_tags:
tag_group = col_tag[0]
try:
tag_value = results[col_tag[1]][index]
except KeyError:
self.log.warning("Column %s not present in the table, skipping this tag", col_tag[1])
continue
if reply_invalid(tag_value):
self.log.warning("Can't deduct tag from column for tag %s",
tag_group)
continue
tag_value = tag_value.prettyPrint()
tags.append("{0}:{1}".format(tag_group, tag_value))
return tags
def submit_metric(self, name, snmp_value, tags=[]):
'''
Convert the values reported as pysnmp-Managed Objects to values and
report them to the aggregator
'''
if reply_invalid(snmp_value):
# Metrics not present in the queried object
self.log.warning("No such Mib available: %s" % name)
return
metric_name = self.normalize(name, prefix="snmp")
# Ugly hack but couldn't find a cleaner way
# Proper way would be to use the ASN1 method isSameTypeWith but it
# wrongfully returns True in the case of CounterBasedGauge64
# and Counter64 for example
snmp_class = snmp_value.__class__.__name__
if snmp_class in SNMP_COUNTERS:
value = int(snmp_value)
self.rate(metric_name, value, tags)
return
if snmp_class in SNMP_GAUGES:
value = int(snmp_value)
self.gauge(metric_name, value, tags)
return
self.log.warning("Unsupported metric type %s", snmp_class)
| |
# -*- coding: utf-8 -*-
# Copyright (c)2013 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pyrax.client import BaseClient
import pyrax.exceptions as exc
from pyrax.manager import BaseManager
from pyrax.resource import BaseResource
import pyrax.utils as utils
# Constants to represent the 'special' network IDs.
PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
PSEUDO_NETWORKS = (PUBLIC_NET_ID, SERVICE_NET_ID)
def _get_server_networks(network, public=False, private=False, key=None):
key = key or "net-id"
net_id = utils.get_id(network)
ret = [{key: net_id}]
if public:
ret.append({key: PUBLIC_NET_ID})
if private:
ret.append({key: SERVICE_NET_ID})
return ret
class CloudNetwork(BaseResource):
"""
This represents a network in the cloud. It can be either an isolated
network, the public network, or the ServiceNet network.
While resources generally use 'name' as the text identifier, the Cloud
Networks API uses 'label' instead. This module aliases the attributes andi
methods so that you can use either in your code.
"""
id = None
cidr = None
label = None
def _get_name(self):
return self.label
def _set_name(self, name):
self.label = name
name = property(_get_name, _set_name)
@property
def is_isolated(self):
"""Returns True if this is a user-defined network."""
return self.id not in PSEUDO_NETWORKS
def get(self):
if not self.is_isolated:
# These are placeholders, not actual networks
return
return super(CloudNetwork, self).get()
def delete(self):
"""
Wraps the standard delete() method to catch expected exceptions and
raise the appropriate pyrax exceptions.
"""
try:
return super(CloudNetwork, self).delete()
except exc.Forbidden as e:
# Network is in use
raise exc.NetworkInUse("Cannot delete a network in use by a server.")
def get_server_networks(self, public=False, private=False, key=None):
"""
Creates the dict of network UUIDs required by Cloud Servers when
creating a new server with isolated networks. By default, the UUID
values are returned with the key of "net-id", which is what novaclient
expects. Other tools may require different values, such as 'uuid'. If
that is the case, pass the desired key as the 'key' parameter.
By default only this network is included. If you wish to create a
server that has either the public (internet) or private (ServiceNet)
networks, you have to pass those parameters in with values of True.
"""
return _get_server_networks(self, public=public, private=private,
key=key)
class CloudNetworkManager(BaseManager):
"""
Does nothing special, but is used in testing.
"""
def _create_body(self, name, label=None, cidr=None):
"""
Used to create the dict required to create a network. Accepts either
'label' or 'name' as the keyword parameter for the label attribute.
"""
label = label or name
body = {"network": {
"label": label,
"cidr": cidr,
}}
return body
class CloudNetworkClient(BaseClient):
"""
This is the base client for creating and managing Cloud Networks.
"""
def __init__(self, *args, **kwargs):
super(CloudNetworkClient, self).__init__(*args, **kwargs)
self.name = "Cloud Networks"
# Constants to represent the 'special' network IDs.
self.PUBLIC_NET_ID = PUBLIC_NET_ID
self.SERVICE_NET_ID = SERVICE_NET_ID
self.PSEUDO_NETWORKS = PSEUDO_NETWORKS
def _configure_manager(self):
"""
Creates the Manager instance to handle networks.
"""
self._manager = CloudNetworkManager(self, resource_class=CloudNetwork,
response_key="network", uri_base="os-networksv2")
def create(self, label=None, name=None, cidr=None):
"""
Wraps the basic create() call to handle specific failures.
"""
try:
return super(CloudNetworkClient, self).create(label=label,
name=name, cidr=cidr)
except exc.BadRequest as e:
msg = e.message
if "too many networks" in msg:
raise exc.NetworkCountExceeded("Cannot create network; the "
"maximum number of isolated networks already exist.")
elif "does not contain enough" in msg:
raise exc.NetworkCIDRInvalid("Networks must contain two or "
"more hosts; the CIDR '%s' is too restrictive." % cidr)
elif "CIDR is malformed" in msg:
raise exc.NetworkCIDRMalformed("The CIDR '%s' is not valid." % cidr)
else:
# Something unexpected
raise
def delete(self, network):
"""
Wraps the standard delete() method to catch expected exceptions and
raise the appropriate pyrax exceptions.
"""
try:
return super(CloudNetworkClient, self).delete(network)
except exc.Forbidden as e:
# Network is in use
raise exc.NetworkInUse("Cannot delete a network in use by a server.")
def find_network_by_label(self, label):
"""
This is inefficient; it gets all the networks and then filters on
the client side to find the matching name.
"""
networks = self.list()
match = [network for network in networks
if network.label == label]
if not match:
raise exc.NetworkNotFound("No network with the label '%s' exists" %
label)
elif len(match) > 1:
raise exc.NetworkLabelNotUnique("There were %s matches for the label "
"'%s'." % (len(match), label))
return match[0]
# Create an alias using 'name'
find_network_by_name = find_network_by_label
def get_server_networks(self, network, public=False, private=False,
key=None):
"""
Creates the dict of network UUIDs required by Cloud Servers when
creating a new server with isolated networks. By default, the UUID
values are returned with the key of "net-id", which is what novaclient
expects. Other tools may require different values, such as 'uuid'. If
that is the case, pass the desired key as the 'key' parameter.
By default only this network is included. If you wish to create a
server that has either the public (internet) or private (ServiceNet)
networks, you have to pass those parameters in with values of True.
"""
return _get_server_networks(network, public=public, private=private,
key=key)
| |
"""
Tado component to create some sensors for each zone.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.tado/
"""
import logging
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
from homeassistant.components.tado import (DATA_TADO)
_LOGGER = logging.getLogger(__name__)
ATTR_DATA_ID = 'data_id'
ATTR_DEVICE = 'device'
ATTR_ID = 'id'
ATTR_NAME = 'name'
ATTR_ZONE = 'zone'
SENSOR_TYPES = ['temperature', 'humidity', 'power',
'link', 'heating', 'tado mode', 'overlay']
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the sensor platform."""
tado = hass.data[DATA_TADO]
try:
zones = tado.get_zones()
except RuntimeError:
_LOGGER.error("Unable to get zone info from mytado")
return False
sensor_items = []
for zone in zones:
if zone['type'] == 'HEATING':
for variable in SENSOR_TYPES:
sensor_items.append(create_zone_sensor(
tado, zone, zone['name'], zone['id'],
variable))
me_data = tado.get_me()
sensor_items.append(create_device_sensor(
tado, me_data, me_data['homes'][0]['name'],
me_data['homes'][0]['id'], "tado bridge status"))
if sensor_items:
add_devices(sensor_items, True)
return True
else:
return False
def create_zone_sensor(tado, zone, name, zone_id, variable):
"""Create a zone sensor."""
data_id = 'zone {} {}'.format(name, zone_id)
tado.add_sensor(data_id, {
ATTR_ZONE: zone,
ATTR_NAME: name,
ATTR_ID: zone_id,
ATTR_DATA_ID: data_id
})
return TadoSensor(tado, name, zone_id, variable, data_id)
def create_device_sensor(tado, device, name, device_id, variable):
"""Create a device sensor."""
data_id = 'device {} {}'.format(name, device_id)
tado.add_sensor(data_id, {
ATTR_DEVICE: device,
ATTR_NAME: name,
ATTR_ID: device_id,
ATTR_DATA_ID: data_id
})
return TadoSensor(tado, name, device_id, variable, data_id)
class TadoSensor(Entity):
"""Representation of a tado Sensor."""
def __init__(self, store, zone_name, zone_id, zone_variable, data_id):
"""Initialize of the Tado Sensor."""
self._store = store
self.zone_name = zone_name
self.zone_id = zone_id
self.zone_variable = zone_variable
self._unique_id = '{} {}'.format(zone_variable, zone_id)
self._data_id = data_id
self._state = None
self._state_attributes = None
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self.zone_name, self.zone_variable)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._state_attributes
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
if self.zone_variable == "temperature":
return self.hass.config.units.temperature_unit
elif self.zone_variable == "humidity":
return '%'
elif self.zone_variable == "heating":
return '%'
@property
def icon(self):
"""Icon for the sensor."""
if self.zone_variable == "temperature":
return 'mdi:thermometer'
elif self.zone_variable == "humidity":
return 'mdi:water-percent'
def update(self):
"""Update method called when should_poll is true."""
self._store.update()
data = self._store.get_data(self._data_id)
if data is None:
_LOGGER.debug("Recieved no data for zone %s", self.zone_name)
return
unit = TEMP_CELSIUS
# pylint: disable=R0912
if self.zone_variable == 'temperature':
if 'sensorDataPoints' in data:
sensor_data = data['sensorDataPoints']
temperature = float(
sensor_data['insideTemperature']['celsius'])
self._state = self.hass.config.units.temperature(
temperature, unit)
self._state_attributes = {
"time":
sensor_data['insideTemperature']['timestamp'],
"setting": 0 # setting is used in climate device
}
# temperature setting will not exist when device is off
if 'temperature' in data['setting'] and \
data['setting']['temperature'] is not None:
temperature = float(
data['setting']['temperature']['celsius'])
self._state_attributes["setting"] = \
self.hass.config.units.temperature(
temperature, unit)
elif self.zone_variable == 'humidity':
if 'sensorDataPoints' in data:
sensor_data = data['sensorDataPoints']
self._state = float(
sensor_data['humidity']['percentage'])
self._state_attributes = {
"time": sensor_data['humidity']['timestamp'],
}
elif self.zone_variable == 'power':
if 'setting' in data:
self._state = data['setting']['power']
elif self.zone_variable == 'link':
if 'link' in data:
self._state = data['link']['state']
elif self.zone_variable == 'heating':
if 'activityDataPoints' in data:
activity_data = data['activityDataPoints']
self._state = float(
activity_data['heatingPower']['percentage'])
self._state_attributes = {
"time": activity_data['heatingPower']['timestamp'],
}
elif self.zone_variable == 'tado bridge status':
if 'connectionState' in data:
self._state = data['connectionState']['value']
elif self.zone_variable == 'tado mode':
if 'tadoMode' in data:
self._state = data['tadoMode']
elif self.zone_variable == 'overlay':
if 'overlay' in data and data['overlay'] is not None:
self._state = True
self._state_attributes = {
"termination": data['overlay']['termination']['type'],
}
else:
self._state = False
self._state_attributes = {}
| |
from django.db import connection, transaction
from django.db.backends import util
from django.db.models import signals, get_model
from django.db.models.fields import AutoField, Field, IntegerField, PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist
from django.db.models.related import RelatedObject
from django.db.models.query import QuerySet
from django.db.models.query_utils import QueryWrapper
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy, string_concat, ungettext, ugettext as _
from django.utils.functional import curry
from django.core import exceptions
from django import forms
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
pending_lookups = {}
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_model returns None, it means that the related
# model isn't loaded yet, so we need to pend the relation until the class
# is prepared.
model = get_model(app_label, model_name, False)
if model:
operation(field, model, cls)
else:
key = (app_label, model_name)
value = (cls, field, operation)
pending_lookups.setdefault(key, []).append(value)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
#HACK
class RelatedField(object):
def contribute_to_class(self, cls, name):
sup = super(RelatedField, self)
# Add an accessor to allow easy determination of the related query path for this field
self.related_query_name = curry(self._get_related_query_name, cls._meta)
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name)
if not cls._meta.abstract and self.rel.related_name:
self.rel.related_name = self.rel.related_name % {'class': cls.__name__.lower()}
other = self.rel.to
if isinstance(other, basestring):
def resolve_related_class(field, model, cls):
field.rel.to = model
field.do_related_class(model, cls)
add_lazy_relation(cls, self, other, resolve_related_class)
else:
self.do_related_class(other, cls)
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.object_name.lower() + '_' + self.rel.to._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.rel.to._meta.verbose_name
self.rel.field_name = self.rel.field_name or self.rel.to._meta.pk.name
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
related = RelatedObject(other, cls, self)
if not cls._meta.abstract:
self.contribute_to_related_class(other, related)
def get_db_prep_lookup(self, lookup_type, value):
# If we are doing a lookup on a Related Field, we must be
# comparing object instances. The value should be the PK of value,
# not value itself.
def pk_trace(value):
# Value may be a primary key, or an object held in a relation.
# If it is an object, then we need to get the primary key value for
# that object. In certain conditions (especially one-to-one relations),
# the primary key may itself be an object - so we need to keep drilling
# down until we hit a value that can be used for a comparison.
v, field = value, None
try:
while True:
v, field = getattr(v, v._meta.pk.name), v._meta.pk
except AttributeError:
pass
if field:
if lookup_type in ('range', 'in'):
v = [v]
v = field.get_db_prep_lookup(lookup_type, v)
if isinstance(v, list):
v = v[0]
return v
if hasattr(value, 'as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
sql, params = value.as_sql()
return QueryWrapper(('(%s)' % sql), params)
# FIXME: lt and gt are explicitally allowed to make
# get_(next/prev)_by_date work; other lookups are not allowed since that
# gets messy pretty quick. This is a good candidate for some refactoring
# in the future.
if lookup_type in ['exact', 'gt', 'lt']:
return [pk_trace(value)]
if lookup_type in ('range', 'in'):
return [pk_trace(v) for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError, "Related Field has invalid lookup: %s" % lookup_type
def _get_related_query_name(self, opts):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_name or opts.object_name.lower()
class SingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = '_%s_cache' % related.get_accessor_name()
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_name)
except AttributeError:
params = {'%s__pk' % self.related.field.name: instance._get_pk_val()}
rel_obj = self.related.model._default_manager.get(**params)
setattr(instance, self.cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.related.opts.object_name
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.related.get_accessor_name()))
elif value is not None and not isinstance(value, self.related.model):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.related.get_accessor_name(), self.related.opts.object_name))
# Set the value of the related field
setattr(value, self.related.field.rel.get_related_field().attname, instance)
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
def __get__(self, instance, instance_type=None):
if instance is None:
return self
cache_name = self.field.get_cache_name()
try:
return getattr(instance, cache_name)
except AttributeError:
val = getattr(instance, self.field.attname)
if val is None:
# If NULL is an allowed value, return it.
if self.field.null:
return None
raise self.field.rel.to.DoesNotExist
other_field = self.field.rel.get_related_field()
if other_field.rel:
params = {'%s__pk' % self.field.rel.field_name: val}
else:
params = {'%s__exact' % self.field.rel.field_name: val}
# If the related manager indicates that it should be used for
# related fields, respect that.
rel_mgr = self.field.rel.to._default_manager
if getattr(rel_mgr, 'use_for_related_fields', False):
rel_obj = rel_mgr.get(**params)
else:
rel_obj = QuerySet(self.field.rel.to).get(**params)
setattr(instance, cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self._field.name
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name))
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.field.name, self.field.rel.to._meta.object_name))
# Set the value of the related field
try:
val = getattr(value, self.field.rel.get_related_field().attname)
except AttributeError:
val = None
setattr(instance, self.field.attname, val)
# Since we already know what the related object is, seed the related
# object cache now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.field.get_cache_name(), value)
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
rel_field = self.related.field
rel_model = self.related.model
# Dynamically create a class that subclasses the related
# model's default manager.
superclass = self.related.model._default_manager.__class__
class RelatedManager(superclass):
def get_query_set(self):
return superclass.get_query_set(self).filter(**(self.core_filters))
def add(self, *objs):
for obj in objs:
setattr(obj, rel_field.name, instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs.update({rel_field.name: instance})
return super(RelatedManager, self).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
# Update kwargs with the related object that this
# ForeignRelatedObjectsDescriptor knows about.
kwargs.update({rel_field.name: instance})
return super(RelatedManager, self).get_or_create(**kwargs)
get_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs):
val = getattr(instance, rel_field.rel.get_related_field().attname)
for obj in objs:
# Is obj actually part of this descriptor set?
if getattr(obj, rel_field.attname) == val:
setattr(obj, rel_field.name, None)
obj.save()
else:
raise rel_field.rel.to.DoesNotExist, "%r is not related to %r." % (obj, instance)
remove.alters_data = True
def clear(self):
for obj in self.all():
setattr(obj, rel_field.name, None)
obj.save()
clear.alters_data = True
manager = RelatedManager()
attname = rel_field.rel.get_related_field().name
manager.core_filters = {'%s__%s' % (rel_field.name, attname):
getattr(instance, attname)}
manager.model = self.related.model
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
manager = self.__get__(instance)
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
def create_many_related_manager(superclass, through=False):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
class ManyRelatedManager(superclass):
def __init__(self, model=None, core_filters=None, instance=None, symmetrical=None,
join_table=None, source_col_name=None, target_col_name=None):
super(ManyRelatedManager, self).__init__()
self.core_filters = core_filters
self.model = model
self.symmetrical = symmetrical
self.instance = instance
self.join_table = join_table
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self.through = through
self._pk_val = self.instance._get_pk_val()
if self._pk_val is None:
raise ValueError("%r instance needs to have a primary key value before a many-to-many relationship can be used." % instance.__class__.__name__)
def get_query_set(self):
return superclass.get_query_set(self)._next_is_sticky().filter(**(self.core_filters))
# If the ManyToMany relation has an intermediary model,
# the add and remove methods do not exist.
if through is None:
def add(self, *objs):
self._add_items(self.source_col_name, self.target_col_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_col_name, self.source_col_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_items(self.source_col_name, self.target_col_name, *objs)
# If this is a symmetrical m2m relation to self, remove the mirror entry in the m2m table
if self.symmetrical:
self._remove_items(self.target_col_name, self.source_col_name, *objs)
remove.alters_data = True
def clear(self):
self._clear_items(self.source_col_name)
# If this is a symmetrical m2m relation to self, clear the mirror entry in the m2m table
if self.symmetrical:
self._clear_items(self.target_col_name)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if through is not None:
raise AttributeError, "Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s's Manager instead." % through
new_obj = super(ManyRelatedManager, self).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
obj, created = \
super(ManyRelatedManager, self).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def _add_items(self, source_col_name, target_col_name, *objs):
# join_table: name of the m2m link table
# source_col_name: the PK colname in join_table for the source object
# target_col_name: the PK colname in join_table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
new_ids.add(obj._get_pk_val())
else:
new_ids.add(obj)
# Add the newly created or already existing objects to the join table.
# First find out which items are already added, to avoid adding them twice
cursor = connection.cursor()
cursor.execute("SELECT %s FROM %s WHERE %s = %%s AND %s IN (%s)" % \
(target_col_name, self.join_table, source_col_name,
target_col_name, ",".join(['%s'] * len(new_ids))),
[self._pk_val] + list(new_ids))
existing_ids = set([row[0] for row in cursor.fetchall()])
# Add the ones that aren't there already
for obj_id in (new_ids - existing_ids):
cursor.execute("INSERT INTO %s (%s, %s) VALUES (%%s, %%s)" % \
(self.join_table, source_col_name, target_col_name),
[self._pk_val, obj_id])
transaction.commit_unless_managed()
def _remove_items(self, source_col_name, target_col_name, *objs):
# source_col_name: the PK colname in join_table for the source object
# target_col_name: the PK colname in join_table for the target object
# *objs - objects to remove
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
old_ids.add(obj._get_pk_val())
else:
old_ids.add(obj)
# Remove the specified objects from the join table
cursor = connection.cursor()
cursor.execute("DELETE FROM %s WHERE %s = %%s AND %s IN (%s)" % \
(self.join_table, source_col_name,
target_col_name, ",".join(['%s'] * len(old_ids))),
[self._pk_val] + list(old_ids))
transaction.commit_unless_managed()
def _clear_items(self, source_col_name):
# source_col_name: the PK colname in join_table for the source object
cursor = connection.cursor()
cursor.execute("DELETE FROM %s WHERE %s = %%s" % \
(self.join_table, source_col_name),
[self._pk_val])
transaction.commit_unless_managed()
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model = self.related.model
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass, self.related.field.rel.through)
qn = connection.ops.quote_name
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.related.field.name: instance._get_pk_val()},
instance=instance,
symmetrical=False,
join_table=qn(self.related.field.m2m_db_table()),
source_col_name=qn(self.related.field.m2m_reverse_name()),
target_col_name=qn(self.related.field.m2m_column_name())
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
through = getattr(self.related.field.rel, 'through', None)
if through is not None:
raise AttributeError, "Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s's Manager instead." % through
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model=self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass, self.field.rel.through)
qn = connection.ops.quote_name
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.field.related_query_name(): instance._get_pk_val()},
instance=instance,
symmetrical=(self.field.rel.symmetrical and instance.__class__ == rel_model),
join_table=qn(self.field.m2m_db_table()),
source_col_name=qn(self.field.m2m_column_name()),
target_col_name=qn(self.field.m2m_reverse_name())
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
through = getattr(self.field.rel, 'through', None)
if through is not None:
raise AttributeError, "Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s's Manager instead." % through
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ManyToOneRel(object):
def __init__(self, to, field_name, related_name=None,
limit_choices_to=None, lookup_overrides=None, parent_link=False):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.to, self.field_name = to, field_name
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.lookup_overrides = lookup_overrides or {}
self.multiple = True
self.parent_link = parent_link
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
data = self.to._meta.get_field_by_name(self.field_name)
if not data[2]:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return data[0]
class OneToOneRel(ManyToOneRel):
def __init__(self, to, field_name, related_name=None,
limit_choices_to=None, lookup_overrides=None, parent_link=False):
super(OneToOneRel, self).__init__(to, field_name,
related_name=related_name, limit_choices_to=limit_choices_to,
lookup_overrides=lookup_overrides, parent_link=parent_link)
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, related_name=None, limit_choices_to=None,
symmetrical=True, through=None):
self.to = to
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.symmetrical = symmetrical
self.multiple = True
self.through = through
class ForeignKey(RelatedField, Field):
empty_strings_allowed = False
def __init__(self, to, to_field=None, rel_class=ManyToOneRel, **kwargs):
try:
to_name = to._meta.object_name.lower()
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
to_field = to_field or to._meta.pk.name
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = rel_class(to, to_field,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
lookup_overrides=kwargs.pop('lookup_overrides', None),
parent_link=kwargs.pop('parent_link', False))
Field.__init__(self, **kwargs)
self.db_index = True
def get_attname(self):
return '%s_id' % self.name
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.rel.get_related_field().name)
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.rel.get_related_field().attname)
return field_default
def get_db_prep_save(self, value):
if value == '' or value == None:
return None
else:
return self.rel.get_related_field().get_db_prep_save(value)
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_unicode(choice_list[1][0])
return Field.value_to_string(self, obj)
def contribute_to_class(self, cls, name):
super(ForeignKey, self).contribute_to_class(cls, name)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "o2m")
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(), ForeignRelatedObjectsDescriptor(related))
def formfield(self, **kwargs):
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.complex_filter(
self.rel.limit_choices_to),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.rel.get_related_field()
if (isinstance(rel_field, AutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, (PositiveIntegerField,
PositiveSmallIntegerField)))):
return IntegerField().db_type()
return rel_field.db_type()
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(),
SingleRelatedObjectDescriptor(related))
def formfield(self, **kwargs):
if self.rel.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
class ManyToManyField(RelatedField, Field):
def __init__(self, to, **kwargs):
try:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ManyToManyField must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(to,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', True),
through=kwargs.pop('through', None))
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
self.creates_table = False
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
else:
self.creates_table = True
Field.__init__(self, **kwargs)
msg = ugettext_lazy('Hold down "Control", or "Command" on a Mac, to select more than one.')
self.help_text = string_concat(self.help_text, ' ', msg)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.rel.through is not None:
return self.rel.through_model._meta.db_table
elif self.db_table:
return self.db_table
else:
return util.truncate_name('%s_%s' % (opts.db_table, self.name),
connection.ops.max_name_length())
def _get_m2m_column_name(self, related):
"Function that can be curried to provide the source column name for the m2m table"
try:
return self._m2m_column_name_cache
except:
if self.rel.through is not None:
for f in self.rel.through_model._meta.fields:
if hasattr(f,'rel') and f.rel and f.rel.to == related.model:
self._m2m_column_name_cache = f.column
break
# If this is an m2m relation to self, avoid the inevitable name clash
elif related.model == related.parent_model:
self._m2m_column_name_cache = 'from_' + related.model._meta.object_name.lower() + '_id'
else:
self._m2m_column_name_cache = related.model._meta.object_name.lower() + '_id'
# Return the newly cached value
return self._m2m_column_name_cache
def _get_m2m_reverse_name(self, related):
"Function that can be curried to provide the related column name for the m2m table"
try:
return self._m2m_reverse_name_cache
except:
if self.rel.through is not None:
found = False
for f in self.rel.through_model._meta.fields:
if hasattr(f,'rel') and f.rel and f.rel.to == related.parent_model:
if related.model == related.parent_model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
self._m2m_reverse_name_cache = f.column
break
else:
found = True
else:
self._m2m_reverse_name_cache = f.column
break
# If this is an m2m relation to self, avoid the inevitable name clash
elif related.model == related.parent_model:
self._m2m_reverse_name_cache = 'to_' + related.parent_model._meta.object_name.lower() + '_id'
else:
self._m2m_reverse_name_cache = related.parent_model._meta.object_name.lower() + '_id'
# Return the newly cached value
return self._m2m_reverse_name_cache
def isValidIDList(self, field_data, all_data):
"Validates that the value is a valid list of foreign keys"
mod = self.rel.to
try:
pks = map(int, field_data.split(','))
except ValueError:
# the CommaSeparatedIntegerField validator will catch this error
return
objects = mod._default_manager.in_bulk(pks)
if len(objects) != len(pks):
badkeys = [k for k in pks if k not in objects]
raise exceptions.ValidationError(
ungettext("Please enter valid %(self)s IDs. The value %(value)r is invalid.",
"Please enter valid %(self)s IDs. The values %(value)r are invalid.",
len(badkeys)) % {
'self': self.verbose_name,
'value': len(badkeys) == 1 and badkeys[0] or tuple(badkeys),
})
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_unicode(data)
def contribute_to_class(self, cls, name):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.rel.symmetrical and self.rel.to == "self" and self.rel.related_name is None:
self.rel.related_name = "%s_rel_+" % name
super(ManyToManyField, self).contribute_to_class(cls, name)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
# Populate some necessary rel arguments so that cross-app relations
# work correctly.
if isinstance(self.rel.through, basestring):
def resolve_through_model(field, model, cls):
field.rel.through_model = model
add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
elif self.rel.through:
self.rel.through_model = self.rel.through
self.rel.through = self.rel.through._meta.object_name
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "m2m")
def contribute_to_related_class(self, cls, related):
# m2m relations to self do not have a ManyRelatedObjectsDescriptor,
# as it would be redundant - unless the field is non-symmetrical.
if related.model != related.parent_model or not self.rel.symmetrical:
# Add the descriptor for the m2m relation
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_column_name, related)
self.m2m_reverse_name = curry(self._get_m2m_reverse_name, related)
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ModelMultipleChoiceField, 'queryset': self.rel.to._default_manager.complex_filter(self.rel.limit_choices_to)}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
defaults['initial'] = [i._get_pk_val() for i in defaults['initial']]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
| |
#!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
#-*- coding:utf-8 -*-
import datetime
import os
import shutil
import sys
import yaml
def gen_report_header(car_type, p, output_dir):
"""
doc string:
"""
report_header_tpl_file = "template/report_protocol.h.tpl"
FMT = get_tpl_fmt(report_header_tpl_file)
report_header_file = output_dir + "%s.h" % p["name"]
with open(report_header_file, 'w') as h_fp:
fmt_val = {}
fmt_val["car_type_lower"] = car_type.lower()
fmt_val["car_type_upper"] = car_type.upper()
fmt_val["protocol_name_upper"] = p["name"].upper()
fmt_val["classname"] = p["name"].replace('_', '').capitalize()
func_declare_list = []
for var in p["vars"]:
fmt = """
// config detail: %s
%s %s(const std::uint8_t* bytes, const int32_t length) const;"""
returntype = var["type"]
if var["type"] == "enum":
returntype = p["name"].capitalize(
) + "::" + var["name"].capitalize() + "Type"
declare = fmt % (str(var), returntype, var["name"].lower())
func_declare_list.append(declare)
fmt_val["func_declare_list"] = "\n".join(func_declare_list)
h_fp.write(FMT % fmt_val)
def gen_report_cpp(car_type, p, output_dir):
"""
doc string:
"""
report_cpp_tpl_file = "template/report_protocol.cc.tpl"
FMT = get_tpl_fmt(report_cpp_tpl_file)
report_cpp_file = output_dir + "%s.cc" % p["name"]
with open(report_cpp_file, 'w') as fp:
fmt_val = {}
fmt_val["car_type_lower"] = car_type
fmt_val["protocol_name_lower"] = p["name"]
classname = p["name"].replace('_', '').capitalize()
fmt_val["classname"] = classname
fmt_val["id_upper"] = p["id"].upper()
set_var_to_protocol_list = []
func_impl_list = []
for var in p["vars"]:
var["name"] = var["name"].lower()
returntype = var["type"]
if var["type"] == "enum":
returntype = p["name"].capitalize(
) + "::" + var["name"].capitalize() + "Type"
# gen func top
fmt = """
// config detail: %s
%s %s::%s(const std::uint8_t* bytes, int32_t length) const {"""
impl = fmt % (str(var), returntype, classname, var["name"])
byte_info = get_byte_info(var)
impl = impl + gen_parse_value_impl(var, byte_info)
impl = impl + gen_report_value_offset_precision(var, p)
impl = impl + "}"
func_impl_list.append(impl)
proto_set_fmt = " chassis->mutable_%s()->mutable_%s()->set_%s(%s(bytes, length));"
func_name = var["name"]
proto_set = proto_set_fmt % (car_type, p["name"], var["name"],
func_name)
set_var_to_protocol_list.append(proto_set)
fmt_val["set_var_to_protocol_list"] = "\n".join(
set_var_to_protocol_list)
fmt_val["func_impl_list"] = "\n".join(func_impl_list)
fp.write(FMT % fmt_val)
def gen_report_value_offset_precision(var, p):
"""
doc string:
"""
impl = ""
if var["is_signed_var"]:
fmt = "\n x <<= %d;\n x >>= %d;\n"
# x is a int32_t var
shift_bit = 32 - var["len"]
impl = impl + fmt % (shift_bit, shift_bit)
returntype = var["type"]
if var["type"] == "enum":
returntype = p["name"].capitalize() + "::" + var["name"].capitalize(
) + "Type"
impl = impl + "\n " + returntype + " ret = "
if var["type"] == "enum":
impl = impl + " static_cast<" + returntype + ">(x);\n"
else:
impl = impl + "x"
if var["precision"] != 1.0:
impl = impl + " * %f" % var["precision"]
if var["offset"] != 0.0:
impl = impl + " + %f" % (var["offset"])
impl = impl + ";\n"
return impl + " return ret;\n"
def gen_parse_value_impl(var, byte_info):
"""
doc string:
"""
impl = ""
fmt = "\n Byte t%d(bytes + %d);\n"
shift_bit = 0
for i in range(0, len(byte_info)):
info = byte_info[i]
impl = impl + fmt % (i, info["byte"])
if i == 0:
impl = impl + " int32_t x = t%d.get_byte(%d, %d);\n" %\
(i, info["start_bit"], info["len"])
elif i == 1:
impl = impl + " int32_t t = t%d.get_byte(%d, %d);\n x <<= %d;\n x |= t;\n" %\
(i, info["start_bit"], info["len"], info["len"])
else:
impl = impl + " t = t%d.get_byte(%d, %d);\n x <<= %d;\n x |= t;\n" %\
(i, info["start_bit"], info["len"], info["len"])
shift_bit = shift_bit + info["len"]
return impl
def gen_control_header(car_type, p, output_dir):
"""
doc string:
"""
control_header_tpl_file = "template/control_protocol.h.tpl"
FMT = get_tpl_fmt(control_header_tpl_file)
control_header_file = output_dir + "%s.h" % p["name"]
with open(control_header_file, 'w') as h_fp:
fmt_val = {}
fmt_val["car_type_lower"] = car_type
fmt_val["car_type_upper"] = car_type.upper()
fmt_val["protocol_name_upper"] = p["name"].upper()
classname = p["name"].replace('_', '').capitalize()
fmt_val["classname"] = classname
declare_public_func_list = []
declare_private_func_list = []
declare_private_var_list = []
fmtpub = "\n // config detail: %s\n %s* set_%s(%s %s);"
fmtpri = "\n // config detail: %s\n void set_p_%s(uint8_t* data, %s %s);"
for var in p["vars"]:
returntype = var["type"]
if var["type"] == "enum":
returntype = p["name"].capitalize(
) + "::" + var["name"].capitalize() + "Type"
private_var = ""
public_func_declare = fmtpub % (str(var), classname,
var["name"].lower(), returntype,
var["name"].lower())
private_func_declare = fmtpri % (str(var), var["name"].lower(),
returntype, var["name"].lower())
private_var = " %s %s_;" % (returntype, var["name"].lower())
declare_private_var_list.append(private_var)
declare_public_func_list.append(public_func_declare)
declare_private_func_list.append(private_func_declare)
fmt_val["declare_public_func_list"] = "\n".join(
declare_public_func_list)
fmt_val["declare_private_func_list"] = "\n".join(
declare_private_func_list)
fmt_val["declare_private_var_list"] = "\n".join(
declare_private_var_list)
h_fp.write(FMT % fmt_val)
def get_byte_info(var):
"""
doc string: https://wenku.baidu.com/view/3fe9a7a4dd3383c4bb4cd293.html
u can reference this link to known the difference between motorola and intel encoding
return : the byte info of a variable in the protocol how many bytes are, and every byte use
how many bits, and bit start position
for the purpose of easily parsing value from CAN frame, the byte_info is arranged
from msb byte to lsb byte order
"""
bit = var["bit"]
byte_info = []
left_len = var["len"]
byte_idx = bit / 8
bit_start = bit % 8
if var["order"] == "motorola":
while left_len > 0:
info = {}
info["byte"] = byte_idx
info["len"] = min(bit_start + 1, left_len)
# start_bit is always the lowest bit
info["start_bit"] = bit_start - info["len"] + 1
byte_info.append(info)
left_len = left_len - info["len"]
byte_idx = byte_idx + 1
bit_start = 7
else:
while left_len > 0:
info = {}
info["byte"] = byte_idx
info["len"] = min(8 - bit_start, left_len)
info["start_bit"] = bit_start
byte_info.append(info)
left_len = left_len - info["len"]
byte_idx = byte_idx + 1
bit_start = 0
# byte_info is always construct with msb(most significant bit) byte to lsb byte
byte_info.reverse()
return byte_info
def gen_control_decode_offset_precision(var):
"""
doc string:
"""
impl = "\n"
range_info = get_range_info(var)
if var["type"] == "double":
if range_info["low"].find(".") == -1:
range_info["low"] = "%s.0" % range_info["low"]
if range_info["high"].find(".") == -1:
range_info["high"] = "%s.0" % range_info["high"]
if var["type"] != "enum" and var["type"] != "bool":
impl = impl + " %s = ProtocolData::BoundedValue(%s, %s, %s);\n" %\
(var["name"].lower(), range_info["low"], range_info["high"], var["name"].lower())
impl = impl + " int x ="
if var["offset"] != 0.0:
impl = impl + " (%s - %f)" % (var["name"].lower(), var["offset"])
else:
impl = impl + " %s" % var["name"].lower()
if var["precision"] != 1.0:
impl = impl + " / %f" % var["precision"]
return impl + ";\n"
def gen_control_encode_one_byte_value_impl(var, byte_info):
"""
only has int and double, int can hold all the value whatever it is signed or unsigned
"""
fmt = """
Byte to_set(data + %d);
to_set.set_value(x, %d, %d);
"""
return fmt % (byte_info["byte"], byte_info["start_bit"], byte_info["len"])
def get_range_info(var):
"""
doc string:
"""
info = {}
if "physical_range" not in var.keys():
return info
items = var["physical_range"].split('|')
info["low"] = items[0].split('[')[1]
info["high"] = items[1].split(']')[0]
return info
def gen_control_encode_value_impl(var, byte_info):
"""
doc string:
"""
impl = " uint8_t t = 0;\n"
fmt = """
t = x & %s;
Byte to_set%d(data + %d);
to_set%d.set_value(t, %d, %d);
"""
shift_bit = 0
for i in range(0, len(byte_info)):
info = byte_info[i]
if i != 0:
impl = impl + " x >>= %d;\n" % shift_bit
mask_bit = "0x%X" % ((1 << info["len"]) - 1)
impl = impl + fmt % (mask_bit, i, info["byte"], i, info["start_bit"],
info["len"])
shift_bit = info["len"]
return impl
def gen_control_value_func_impl(classname, var, p):
"""
doc string:
"""
impl = ""
if var["len"] > 32:
print "This generator not support big than four bytes var." + \
"protocol classname: %s, var_name:%s " % (class_name, var["name"])
return impl
fmt = """
%(classname)s* %(classname)s::set_%(var_name)s(
%(var_type)s %(var_name)s) {
%(var_name)s_ = %(var_name)s;
return this;
}
// config detail: %(config)s
void %(classname)s::set_p_%(var_name)s(uint8_t* data,
%(var_type)s %(var_name)s) {"""
fmt_val = {}
fmt_val["classname"] = classname
fmt_val["var_name"] = var["name"].lower()
returntype = var["type"]
if var["type"] == "enum":
returntype = p["name"].capitalize() + "::" + var["name"].capitalize(
) + "Type"
fmt_val["var_type"] = returntype
fmt_val["config"] = str(var)
impl = impl + fmt % fmt_val
impl = impl + gen_control_decode_offset_precision(var)
# get lsb to msb order
byte_info = get_byte_info(var)
byte_info.reverse()
if len(byte_info) == 1:
impl = impl + gen_control_encode_one_byte_value_impl(var, byte_info[0])
else:
impl = impl + gen_control_encode_value_impl(var, byte_info)
return impl + "}\n"
def gen_control_cpp(car_type, p, output_dir):
"""
doc string:
"""
control_cpp_tpl_file = "template/control_protocol.cc.tpl"
FMT = get_tpl_fmt(control_cpp_tpl_file)
control_cpp_file = output_dir + "%s.cc" % p["name"]
with open(control_cpp_file, 'w') as fp:
fmt_val = {}
fmt_val["car_type_lower"] = car_type
fmt_val["protocol_name_lower"] = p["name"]
fmt_val["id_upper"] = p["id"].upper()
classname = p["name"].replace('_', '').capitalize()
fmt_val["classname"] = classname
set_private_var_list = []
set_private_var_init_list = []
set_func_impl_list = []
for var in p["vars"]:
func_impl = gen_control_value_func_impl(classname, var, p)
set_func_impl_list.append(func_impl)
set_private_var = " set_p_%s(data, %s_);" % (var["name"].lower(),
var["name"].lower())
set_private_var_list.append(set_private_var)
init_val = "0"
if var["type"] == "double":
init_val = "0.0"
elif var["type"] == "bool":
init_val = "false"
elif var["type"] == "enum":
if 0 in var["enum"]:
init_val = p["name"].capitalize(
) + "::" + var["enum"][0].upper()
else:
init_val = p["name"].capitalize(
) + "::" + var["enum"].values()[0].upper()
set_private_var_init_list.append(" %s_ = %s;" %
(var["name"].lower(), init_val))
fmt_val["set_private_var_list"] = "\n".join(set_private_var_list)
fmt_val["set_private_var_init_list"] = "\n".join(
set_private_var_init_list)
fmt_val["set_func_impl_list"] = "\n".join(set_func_impl_list)
fp.write(FMT % fmt_val)
def get_tpl_fmt(tpl_file):
"""
get fmt from tpl_file
"""
with open(tpl_file, 'r') as tpl:
fmt = tpl.readlines()
fmt = "".join(fmt)
return fmt
def gen_build_file(car_type, work_dir):
"""
doc string:
"""
build_tpl_file = "template/protocol_BUILD.tpl"
fmt = get_tpl_fmt(build_tpl_file)
with open(work_dir + "BUILD", "w") as build_fp:
fmt_var = {}
fmt_var["car_type"] = car_type.lower()
build_fp.write(fmt % fmt_var)
def gen_protocols(protocol_conf_file, protocol_dir):
"""
doc string:
"""
print "Generating protocols"
if not os.path.exists(protocol_dir):
os.makedirs(protocol_dir)
with open(protocol_conf_file, 'r') as fp:
content = yaml.load(fp)
protocols = content["protocols"]
car_type = content["car_type"]
for p_name in protocols:
p = protocols[p_name]
if p["protocol_type"] == "report":
gen_report_header(car_type, p, protocol_dir)
gen_report_cpp(car_type, p, protocol_dir)
elif p["protocol_type"] == "control":
gen_control_header(car_type, p, protocol_dir)
gen_control_cpp(car_type, p, protocol_dir)
else:
print "Unknown protocol_type:%s" % p["protocol_type"]
gen_build_file(car_type, protocol_dir)
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Usage:\npython %s some_config.yml" % sys.argv[0]
sys.exit(1)
with open(sys.argv[1], 'r') as fp:
conf = yaml.load(fp)
protocol_conf = conf["protocol_conf"]
protocol_dir = conf["output_dir"] + "vehicle/" + conf["car_type"].lower(
) + "/protocol/"
shutil.rmtree(output_dir, True)
os.makedirs(output_dir)
gen_protocols(protocol_conf, protocol_dir)
| |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division
import os
import tempfile
import time
from helpers import unittest
import luigi
import luigi.notifications
import luigi.scheduler
import luigi.six as six
import luigi.worker
luigi.notifications.DEBUG = True
tempdir = tempfile.mkdtemp()
class DummyTask(luigi.Task):
task_id = luigi.Parameter()
def run(self):
f = self.output().open('w')
f.close()
def output(self):
return luigi.LocalTarget(os.path.join(tempdir, str(self)))
class FactorTask(luigi.Task):
product = luigi.Parameter()
def requires(self):
for factor in range(2, self.product):
if self.product % factor == 0:
yield FactorTask(factor)
yield FactorTask(self.product // factor)
return
def run(self):
f = self.output().open('w')
f.close()
def output(self):
return luigi.LocalTarget(os.path.join(tempdir, 'luigi_test_factor_%d' % self.product))
class BadReqTask(luigi.Task):
succeed = luigi.BoolParameter()
def requires(self):
assert self.succeed
yield BadReqTask(False)
def run(self):
pass
def complete(self):
return False
class FailingTask(luigi.Task):
task_id = luigi.Parameter()
def run(self):
raise Exception("Error Message")
class SchedulerVisualisationTest(unittest.TestCase):
def setUp(self):
self.scheduler = luigi.scheduler.CentralPlannerScheduler()
def tearDown(self):
pass
def _assert_complete(self, tasks):
for t in tasks:
self.assert_(t.complete())
def _build(self, tasks):
with luigi.worker.Worker(scheduler=self.scheduler, worker_processes=1) as w:
for t in tasks:
w.add(t)
w.run()
def _remote(self):
return self.scheduler
def _test_run(self, workers):
tasks = [DummyTask(i) for i in range(20)]
self._build(tasks, workers=workers)
self._assert_complete(tasks)
def test_graph(self):
start = time.time()
tasks = [DummyTask(task_id=1), DummyTask(task_id=2)]
self._build(tasks)
self._assert_complete(tasks)
end = time.time()
remote = self._remote()
graph = remote.graph()
self.assertEqual(len(graph), 2)
self.assert_(DummyTask(task_id=1).task_id in graph)
d1 = graph[DummyTask(task_id=1).task_id]
self.assertEqual(d1[u'status'], u'DONE')
self.assertEqual(d1[u'deps'], [])
self.assertGreaterEqual(d1[u'start_time'], start)
self.assertLessEqual(d1[u'start_time'], end)
d2 = graph[DummyTask(task_id=2).task_id]
self.assertEqual(d2[u'status'], u'DONE')
self.assertEqual(d2[u'deps'], [])
self.assertGreaterEqual(d2[u'start_time'], start)
self.assertLessEqual(d2[u'start_time'], end)
def test_large_graph_truncate(self):
class LinearTask(luigi.Task):
idx = luigi.IntParameter()
def requires(self):
if self.idx > 0:
yield LinearTask(self.idx - 1)
def complete(self):
return False
root_task = LinearTask(100)
self.scheduler = luigi.scheduler.CentralPlannerScheduler(max_graph_nodes=10)
self._build([root_task])
graph = self.scheduler.dep_graph(root_task.task_id)
self.assertEqual(10, len(graph))
expected_nodes = [LinearTask(i).task_id for i in range(100, 90, -1)]
six.assertCountEqual(self, expected_nodes, graph)
def test_large_inverse_graph_truncate(self):
class LinearTask(luigi.Task):
idx = luigi.IntParameter()
def requires(self):
if self.idx > 0:
yield LinearTask(self.idx - 1)
def complete(self):
return False
root_task = LinearTask(100)
self.scheduler = luigi.scheduler.CentralPlannerScheduler(max_graph_nodes=10)
self._build([root_task])
graph = self.scheduler.inverse_dep_graph(LinearTask(0).task_id)
self.assertEqual(10, len(graph))
expected_nodes = [LinearTask(i).task_id for i in range(10)]
six.assertCountEqual(self, expected_nodes, graph)
def test_truncate_graph_with_full_levels(self):
class BinaryTreeTask(luigi.Task):
idx = luigi.IntParameter()
def requires(self):
if self.idx < 100:
return map(BinaryTreeTask, (self.idx * 2, self.idx * 2 + 1))
root_task = BinaryTreeTask(1)
self.scheduler = luigi.scheduler.CentralPlannerScheduler(max_graph_nodes=10)
self._build([root_task])
graph = self.scheduler.dep_graph(root_task.task_id)
self.assertEqual(10, len(graph))
expected_nodes = [BinaryTreeTask(i).task_id for i in range(1, 11)]
six.assertCountEqual(self, expected_nodes, graph)
def test_truncate_graph_with_multiple_depths(self):
class LinearTask(luigi.Task):
idx = luigi.IntParameter()
def requires(self):
if self.idx > 0:
yield LinearTask(self.idx - 1)
yield LinearTask(0)
def complete(self):
return False
root_task = LinearTask(100)
self.scheduler = luigi.scheduler.CentralPlannerScheduler(max_graph_nodes=10)
self._build([root_task])
graph = self.scheduler.dep_graph(root_task.task_id)
self.assertEqual(10, len(graph))
expected_nodes = [LinearTask(i).task_id for i in range(100, 91, -1)] +\
[LinearTask(0).task_id]
self.maxDiff = None
six.assertCountEqual(self, expected_nodes, graph)
def _assert_all_done(self, tasks):
self._assert_all(tasks, u'DONE')
def _assert_all(self, tasks, status):
for task in tasks.values():
self.assertEqual(task[u'status'], status)
def test_dep_graph_single(self):
self._build([FactorTask(1)])
remote = self._remote()
dep_graph = remote.dep_graph(FactorTask(product=1).task_id)
self.assertEqual(len(dep_graph), 1)
self._assert_all_done(dep_graph)
d1 = dep_graph.get(FactorTask(product=1).task_id)
self.assertEqual(type(d1), type({}))
self.assertEqual(d1[u'deps'], [])
def test_dep_graph_not_found(self):
self._build([FactorTask(1)])
remote = self._remote()
dep_graph = remote.dep_graph(FactorTask(product=5).task_id)
self.assertEqual(len(dep_graph), 0)
def test_inverse_dep_graph_not_found(self):
self._build([FactorTask(1)])
remote = self._remote()
dep_graph = remote.inverse_dep_graph('FactorTask(product=5)')
self.assertEqual(len(dep_graph), 0)
def test_dep_graph_tree(self):
self._build([FactorTask(30)])
remote = self._remote()
dep_graph = remote.dep_graph(FactorTask(product=30).task_id)
self.assertEqual(len(dep_graph), 5)
self._assert_all_done(dep_graph)
d30 = dep_graph[FactorTask(product=30).task_id]
self.assertEqual(sorted(d30[u'deps']), sorted([FactorTask(product=15).task_id, FactorTask(product=2).task_id]))
d2 = dep_graph[FactorTask(product=2).task_id]
self.assertEqual(sorted(d2[u'deps']), [])
d15 = dep_graph[FactorTask(product=15).task_id]
self.assertEqual(sorted(d15[u'deps']), sorted([FactorTask(product=3).task_id, FactorTask(product=5).task_id]))
d3 = dep_graph[FactorTask(product=3).task_id]
self.assertEqual(sorted(d3[u'deps']), [])
d5 = dep_graph[FactorTask(product=5).task_id]
self.assertEqual(sorted(d5[u'deps']), [])
def test_dep_graph_missing_deps(self):
self._build([BadReqTask(True)])
dep_graph = self._remote().dep_graph(BadReqTask(succeed=True).task_id)
self.assertEqual(len(dep_graph), 2)
suc = dep_graph[BadReqTask(succeed=True).task_id]
self.assertEqual(suc[u'deps'], [BadReqTask(succeed=False).task_id])
fail = dep_graph[BadReqTask(succeed=False).task_id]
self.assertEqual(fail[u'name'], 'UNKNOWN')
self.assertEqual(fail[u'status'], 'UNKNOWN')
def test_dep_graph_diamond(self):
self._build([FactorTask(12)])
remote = self._remote()
dep_graph = remote.dep_graph(FactorTask(product=12).task_id)
self.assertEqual(len(dep_graph), 4)
self._assert_all_done(dep_graph)
d12 = dep_graph[FactorTask(product=12).task_id]
self.assertEqual(sorted(d12[u'deps']), sorted([FactorTask(product=2).task_id, FactorTask(product=6).task_id]))
d6 = dep_graph[FactorTask(product=6).task_id]
self.assertEqual(sorted(d6[u'deps']), sorted([FactorTask(product=2).task_id, FactorTask(product=3).task_id]))
d3 = dep_graph[FactorTask(product=3).task_id]
self.assertEqual(sorted(d3[u'deps']), [])
d2 = dep_graph[FactorTask(product=2).task_id]
self.assertEqual(sorted(d2[u'deps']), [])
def test_task_list_single(self):
self._build([FactorTask(7)])
remote = self._remote()
tasks_done = remote.task_list('DONE', '')
self.assertEqual(len(tasks_done), 1)
self._assert_all_done(tasks_done)
t7 = tasks_done.get(FactorTask(product=7).task_id)
self.assertEqual(type(t7), type({}))
self.assertEqual(remote.task_list('', ''), tasks_done)
self.assertEqual(remote.task_list('FAILED', ''), {})
self.assertEqual(remote.task_list('PENDING', ''), {})
def test_task_list_failed(self):
self._build([FailingTask(8)])
remote = self._remote()
failed = remote.task_list('FAILED', '')
self.assertEqual(len(failed), 1)
f8 = failed.get(FailingTask(task_id=8).task_id)
self.assertEqual(f8[u'status'], u'FAILED')
self.assertEqual(remote.task_list('DONE', ''), {})
self.assertEqual(remote.task_list('PENDING', ''), {})
def test_task_list_upstream_status(self):
class A(luigi.ExternalTask):
pass
class B(luigi.ExternalTask):
def complete(self):
return True
class C(luigi.Task):
def requires(self):
return [A(), B()]
class F(luigi.Task):
def run(self):
raise Exception()
class D(luigi.Task):
def requires(self):
return [F()]
class E(luigi.Task):
def requires(self):
return [C(), D()]
self._build([E()])
remote = self._remote()
done = remote.task_list('DONE', '')
self.assertEqual(len(done), 1)
db = done.get(B().task_id)
self.assertEqual(db['status'], 'DONE')
missing_input = remote.task_list('PENDING', 'UPSTREAM_MISSING_INPUT')
self.assertEqual(len(missing_input), 2)
pa = missing_input.get(A().task_id)
self.assertEqual(pa['status'], 'PENDING')
self.assertEqual(remote._upstream_status(A().task_id, {}), 'UPSTREAM_MISSING_INPUT')
pc = missing_input.get(C().task_id)
self.assertEqual(pc['status'], 'PENDING')
self.assertEqual(remote._upstream_status(C().task_id, {}), 'UPSTREAM_MISSING_INPUT')
upstream_failed = remote.task_list('PENDING', 'UPSTREAM_FAILED')
self.assertEqual(len(upstream_failed), 2)
pe = upstream_failed.get(E().task_id)
self.assertEqual(pe['status'], 'PENDING')
self.assertEqual(remote._upstream_status(E().task_id, {}), 'UPSTREAM_FAILED')
pe = upstream_failed.get(D().task_id)
self.assertEqual(pe['status'], 'PENDING')
self.assertEqual(remote._upstream_status(D().task_id, {}), 'UPSTREAM_FAILED')
pending = dict(missing_input)
pending.update(upstream_failed)
self.assertEqual(remote.task_list('PENDING', ''), pending)
self.assertEqual(remote.task_list('PENDING', 'UPSTREAM_RUNNING'), {})
failed = remote.task_list('FAILED', '')
self.assertEqual(len(failed), 1)
fd = failed.get(F().task_id)
self.assertEqual(fd['status'], 'FAILED')
all = dict(pending)
all.update(done)
all.update(failed)
self.assertEqual(remote.task_list('', ''), all)
self.assertEqual(remote.task_list('RUNNING', ''), {})
def test_task_search(self):
self._build([FactorTask(8)])
self._build([FailingTask(8)])
remote = self._remote()
all_tasks = remote.task_search('Task')
self.assertEqual(len(all_tasks), 2)
self._assert_all(all_tasks['DONE'], 'DONE')
self._assert_all(all_tasks['FAILED'], 'FAILED')
def test_fetch_error(self):
self._build([FailingTask(8)])
remote = self._remote()
error = remote.fetch_error(FailingTask(task_id=8).task_id)
self.assertEqual(error["taskId"], FailingTask(task_id=8).task_id)
self.assertTrue("Error Message" in error["error"])
self.assertTrue("Runtime error" in error["error"])
self.assertTrue("Traceback" in error["error"])
def test_inverse_deps(self):
class X(luigi.Task):
pass
class Y(luigi.Task):
def requires(self):
return [X()]
class Z(luigi.Task):
id = luigi.Parameter()
def requires(self):
return [Y()]
class ZZ(luigi.Task):
def requires(self):
return [Z(1), Z(2)]
self._build([ZZ()])
dep_graph = self._remote().inverse_dep_graph(X().task_id)
def assert_has_deps(task_id, deps):
self.assertTrue(task_id in dep_graph, '%s not in dep_graph %s' % (task_id, dep_graph))
task = dep_graph[task_id]
self.assertEqual(sorted(task['deps']), sorted(deps), '%s does not have deps %s' % (task_id, deps))
assert_has_deps(X().task_id, [Y().task_id])
assert_has_deps(Y().task_id, [Z(id=1).task_id, Z(id=2).task_id])
assert_has_deps(Z(id=1).task_id, [ZZ().task_id])
assert_has_deps(Z(id=2).task_id, [ZZ().task_id])
assert_has_deps(ZZ().task_id, [])
def test_simple_worker_list(self):
class X(luigi.Task):
def run(self):
self._complete = True
def complete(self):
return getattr(self, '_complete', False)
task_x = X()
self._build([task_x])
workers = self._remote().worker_list()
self.assertEqual(1, len(workers))
worker = workers[0]
self.assertEqual(task_x.task_id, worker['first_task'])
self.assertEqual(0, worker['num_pending'])
self.assertEqual(0, worker['num_uniques'])
self.assertEqual(0, worker['num_running'])
self.assertEqual(1, worker['workers'])
def test_worker_list_pending_uniques(self):
class X(luigi.Task):
def complete(self):
return False
class Y(X):
def requires(self):
return X()
class Z(Y):
pass
w1 = luigi.worker.Worker(scheduler=self.scheduler, worker_processes=1)
w2 = luigi.worker.Worker(scheduler=self.scheduler, worker_processes=1)
w1.add(Y())
w2.add(Z())
workers = self._remote().worker_list()
self.assertEqual(2, len(workers))
for worker in workers:
self.assertEqual(2, worker['num_pending'])
self.assertEqual(1, worker['num_uniques'])
self.assertEqual(0, worker['num_running'])
def test_worker_list_running(self):
class X(luigi.Task):
n = luigi.IntParameter()
w = luigi.worker.Worker(worker_id='w', scheduler=self.scheduler, worker_processes=3)
w.add(X(0))
w.add(X(1))
w.add(X(2))
w.add(X(3))
self.scheduler.get_work(worker='w')
self.scheduler.get_work(worker='w')
self.scheduler.get_work(worker='w')
workers = self._remote().worker_list()
self.assertEqual(1, len(workers))
worker = workers[0]
self.assertEqual(3, worker['num_running'])
self.assertEqual(1, worker['num_pending'])
self.assertEqual(1, worker['num_uniques'])
if __name__ == '__main__':
unittest.main()
| |
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
L{SFTPFile}
"""
from binascii import hexlify
from collections import deque
import socket
import threading
import time
from paramiko.common import *
from paramiko.sftp import *
from paramiko.file import BufferedFile
from paramiko.sftp_attr import SFTPAttributes
class SFTPFile (BufferedFile):
"""
Proxy object for a file on the remote server, in client mode SFTP.
Instances of this class may be used as context managers in the same way
that built-in Python file objects are.
"""
# Some sftp servers will choke if you send read/write requests larger than
# this size.
MAX_REQUEST_SIZE = 32768
def __init__(self, sftp, handle, mode='r', bufsize=-1):
BufferedFile.__init__(self)
self.sftp = sftp
self.handle = handle
BufferedFile._set_mode(self, mode, bufsize)
self.pipelined = False
self._prefetching = False
self._prefetch_done = False
self._prefetch_data = {}
self._prefetch_reads = []
self._saved_exception = None
self._reqs = deque()
def __del__(self):
self._close(async=True)
def close(self):
self._close(async=False)
def _close(self, async=False):
# We allow double-close without signaling an error, because real
# Python file objects do. However, we must protect against actually
# sending multiple CMD_CLOSE packets, because after we close our
# handle, the same handle may be re-allocated by the server, and we
# may end up mysteriously closing some random other file. (This is
# especially important because we unconditionally call close() from
# __del__.)
if self._closed:
return
self.sftp._log(DEBUG, 'close(%s)' % hexlify(self.handle))
if self.pipelined:
self.sftp._finish_responses(self)
BufferedFile.close(self)
try:
if async:
# GC'd file handle could be called from an arbitrary thread -- don't wait for a response
self.sftp._async_request(type(None), CMD_CLOSE, self.handle)
else:
self.sftp._request(CMD_CLOSE, self.handle)
except EOFError:
# may have outlived the Transport connection
pass
except (IOError, socket.error):
# may have outlived the Transport connection
pass
def _data_in_prefetch_requests(self, offset, size):
k = [i for i in self._prefetch_reads if i[0] <= offset]
if len(k) == 0:
return False
k.sort(lambda x, y: cmp(x[0], y[0]))
buf_offset, buf_size = k[-1]
if buf_offset + buf_size <= offset:
# prefetch request ends before this one begins
return False
if buf_offset + buf_size >= offset + size:
# inclusive
return True
# well, we have part of the request. see if another chunk has the rest.
return self._data_in_prefetch_requests(buf_offset + buf_size, offset + size - buf_offset - buf_size)
def _data_in_prefetch_buffers(self, offset):
"""
if a block of data is present in the prefetch buffers, at the given
offset, return the offset of the relevant prefetch buffer. otherwise,
return None. this guarantees nothing about the number of bytes
collected in the prefetch buffer so far.
"""
k = [i for i in self._prefetch_data.keys() if i <= offset]
if len(k) == 0:
return None
index = max(k)
buf_offset = offset - index
if buf_offset >= len(self._prefetch_data[index]):
# it's not here
return None
return index
def _read_prefetch(self, size):
"""
read data out of the prefetch buffer, if possible. if the data isn't
in the buffer, return None. otherwise, behaves like a normal read.
"""
# while not closed, and haven't fetched past the current position, and haven't reached EOF...
while True:
offset = self._data_in_prefetch_buffers(self._realpos)
if offset is not None:
break
if self._prefetch_done or self._closed:
break
self.sftp._read_response()
self._check_exception()
if offset is None:
self._prefetching = False
return None
prefetch = self._prefetch_data[offset]
del self._prefetch_data[offset]
buf_offset = self._realpos - offset
if buf_offset > 0:
self._prefetch_data[offset] = prefetch[:buf_offset]
prefetch = prefetch[buf_offset:]
if size < len(prefetch):
self._prefetch_data[self._realpos + size] = prefetch[size:]
prefetch = prefetch[:size]
return prefetch
def _read(self, size):
size = min(size, self.MAX_REQUEST_SIZE)
if self._prefetching:
data = self._read_prefetch(size)
if data is not None:
return data
t, msg = self.sftp._request(CMD_READ, self.handle, long(self._realpos), int(size))
if t != CMD_DATA:
raise SFTPError('Expected data')
return msg.get_string()
def _write(self, data):
# may write less than requested if it would exceed max packet size
chunk = min(len(data), self.MAX_REQUEST_SIZE)
self._reqs.append(self.sftp._async_request(type(None), CMD_WRITE, self.handle, long(self._realpos), str(data[:chunk])))
if not self.pipelined or (len(self._reqs) > 100 and self.sftp.sock.recv_ready()):
while len(self._reqs):
req = self._reqs.popleft()
t, msg = self.sftp._read_response(req)
if t != CMD_STATUS:
raise SFTPError('Expected status')
# convert_status already called
return chunk
def settimeout(self, timeout):
"""
Set a timeout on read/write operations on the underlying socket or
ssh L{Channel}.
@see: L{Channel.settimeout}
@param timeout: seconds to wait for a pending read/write operation
before raising C{socket.timeout}, or C{None} for no timeout
@type timeout: float
"""
self.sftp.sock.settimeout(timeout)
def gettimeout(self):
"""
Returns the timeout in seconds (as a float) associated with the socket
or ssh L{Channel} used for this file.
@see: L{Channel.gettimeout}
@rtype: float
"""
return self.sftp.sock.gettimeout()
def setblocking(self, blocking):
"""
Set blocking or non-blocking mode on the underiying socket or ssh
L{Channel}.
@see: L{Channel.setblocking}
@param blocking: 0 to set non-blocking mode; non-0 to set blocking
mode.
@type blocking: int
"""
self.sftp.sock.setblocking(blocking)
def seek(self, offset, whence=0):
self.flush()
if whence == self.SEEK_SET:
self._realpos = self._pos = offset
elif whence == self.SEEK_CUR:
self._pos += offset
self._realpos = self._pos
else:
self._realpos = self._pos = self._get_size() + offset
self._rbuffer = ''
def stat(self):
"""
Retrieve information about this file from the remote system. This is
exactly like L{SFTP.stat}, except that it operates on an already-open
file.
@return: an object containing attributes about this file.
@rtype: SFTPAttributes
"""
t, msg = self.sftp._request(CMD_FSTAT, self.handle)
if t != CMD_ATTRS:
raise SFTPError('Expected attributes')
return SFTPAttributes._from_msg(msg)
def chmod(self, mode):
"""
Change the mode (permissions) of this file. The permissions are
unix-style and identical to those used by python's C{os.chmod}
function.
@param mode: new permissions
@type mode: int
"""
self.sftp._log(DEBUG, 'chmod(%s, %r)' % (hexlify(self.handle), mode))
attr = SFTPAttributes()
attr.st_mode = mode
self.sftp._request(CMD_FSETSTAT, self.handle, attr)
def chown(self, uid, gid):
"""
Change the owner (C{uid}) and group (C{gid}) of this file. As with
python's C{os.chown} function, you must pass both arguments, so if you
only want to change one, use L{stat} first to retrieve the current
owner and group.
@param uid: new owner's uid
@type uid: int
@param gid: new group id
@type gid: int
"""
self.sftp._log(DEBUG, 'chown(%s, %r, %r)' % (hexlify(self.handle), uid, gid))
attr = SFTPAttributes()
attr.st_uid, attr.st_gid = uid, gid
self.sftp._request(CMD_FSETSTAT, self.handle, attr)
def utime(self, times):
"""
Set the access and modified times of this file. If
C{times} is C{None}, then the file's access and modified times are set
to the current time. Otherwise, C{times} must be a 2-tuple of numbers,
of the form C{(atime, mtime)}, which is used to set the access and
modified times, respectively. This bizarre API is mimicked from python
for the sake of consistency -- I apologize.
@param times: C{None} or a tuple of (access time, modified time) in
standard internet epoch time (seconds since 01 January 1970 GMT)
@type times: tuple(int)
"""
if times is None:
times = (time.time(), time.time())
self.sftp._log(DEBUG, 'utime(%s, %r)' % (hexlify(self.handle), times))
attr = SFTPAttributes()
attr.st_atime, attr.st_mtime = times
self.sftp._request(CMD_FSETSTAT, self.handle, attr)
def truncate(self, size):
"""
Change the size of this file. This usually extends
or shrinks the size of the file, just like the C{truncate()} method on
python file objects.
@param size: the new size of the file
@type size: int or long
"""
self.sftp._log(DEBUG, 'truncate(%s, %r)' % (hexlify(self.handle), size))
attr = SFTPAttributes()
attr.st_size = size
self.sftp._request(CMD_FSETSTAT, self.handle, attr)
def check(self, hash_algorithm, offset=0, length=0, block_size=0):
"""
Ask the server for a hash of a section of this file. This can be used
to verify a successful upload or download, or for various rsync-like
operations.
The file is hashed from C{offset}, for C{length} bytes. If C{length}
is 0, the remainder of the file is hashed. Thus, if both C{offset}
and C{length} are zero, the entire file is hashed.
Normally, C{block_size} will be 0 (the default), and this method will
return a byte string representing the requested hash (for example, a
string of length 16 for MD5, or 20 for SHA-1). If a non-zero
C{block_size} is given, each chunk of the file (from C{offset} to
C{offset + length}) of C{block_size} bytes is computed as a separate
hash. The hash results are all concatenated and returned as a single
string.
For example, C{check('sha1', 0, 1024, 512)} will return a string of
length 40. The first 20 bytes will be the SHA-1 of the first 512 bytes
of the file, and the last 20 bytes will be the SHA-1 of the next 512
bytes.
@param hash_algorithm: the name of the hash algorithm to use (normally
C{"sha1"} or C{"md5"})
@type hash_algorithm: str
@param offset: offset into the file to begin hashing (0 means to start
from the beginning)
@type offset: int or long
@param length: number of bytes to hash (0 means continue to the end of
the file)
@type length: int or long
@param block_size: number of bytes to hash per result (must not be less
than 256; 0 means to compute only one hash of the entire segment)
@type block_size: int
@return: string of bytes representing the hash of each block,
concatenated together
@rtype: str
@note: Many (most?) servers don't support this extension yet.
@raise IOError: if the server doesn't support the "check-file"
extension, or possibly doesn't support the hash algorithm
requested
@since: 1.4
"""
t, msg = self.sftp._request(CMD_EXTENDED, 'check-file', self.handle,
hash_algorithm, long(offset), long(length), block_size)
ext = msg.get_string()
alg = msg.get_string()
data = msg.get_remainder()
return data
def set_pipelined(self, pipelined=True):
"""
Turn on/off the pipelining of write operations to this file. When
pipelining is on, paramiko won't wait for the server response after
each write operation. Instead, they're collected as they come in.
At the first non-write operation (including L{close}), all remaining
server responses are collected. This means that if there was an error
with one of your later writes, an exception might be thrown from
within L{close} instead of L{write}.
By default, files are I{not} pipelined.
@param pipelined: C{True} if pipelining should be turned on for this
file; C{False} otherwise
@type pipelined: bool
@since: 1.5
"""
self.pipelined = pipelined
def prefetch(self):
"""
Pre-fetch the remaining contents of this file in anticipation of
future L{read} calls. If reading the entire file, pre-fetching can
dramatically improve the download speed by avoiding roundtrip latency.
The file's contents are incrementally buffered in a background thread.
The prefetched data is stored in a buffer until read via the L{read}
method. Once data has been read, it's removed from the buffer. The
data may be read in a random order (using L{seek}); chunks of the
buffer that haven't been read will continue to be buffered.
@since: 1.5.1
"""
size = self.stat().st_size
# queue up async reads for the rest of the file
chunks = []
n = self._realpos
while n < size:
chunk = min(self.MAX_REQUEST_SIZE, size - n)
chunks.append((n, chunk))
n += chunk
if len(chunks) > 0:
self._start_prefetch(chunks)
def readv(self, chunks):
"""
Read a set of blocks from the file by (offset, length). This is more
efficient than doing a series of L{seek} and L{read} calls, since the
prefetch machinery is used to retrieve all the requested blocks at
once.
@param chunks: a list of (offset, length) tuples indicating which
sections of the file to read
@type chunks: list(tuple(long, int))
@return: a list of blocks read, in the same order as in C{chunks}
@rtype: list(str)
@since: 1.5.4
"""
self.sftp._log(DEBUG, 'readv(%s, %r)' % (hexlify(self.handle), chunks))
read_chunks = []
for offset, size in chunks:
# don't fetch data that's already in the prefetch buffer
if self._data_in_prefetch_buffers(offset) or self._data_in_prefetch_requests(offset, size):
continue
# break up anything larger than the max read size
while size > 0:
chunk_size = min(size, self.MAX_REQUEST_SIZE)
read_chunks.append((offset, chunk_size))
offset += chunk_size
size -= chunk_size
self._start_prefetch(read_chunks)
# now we can just devolve to a bunch of read()s :)
for x in chunks:
self.seek(x[0])
yield self.read(x[1])
### internals...
def _get_size(self):
try:
return self.stat().st_size
except:
return 0
def _start_prefetch(self, chunks):
self._prefetching = True
self._prefetch_done = False
self._prefetch_reads.extend(chunks)
t = threading.Thread(target=self._prefetch_thread, args=(chunks,))
t.setDaemon(True)
t.start()
def _prefetch_thread(self, chunks):
# do these read requests in a temporary thread because there may be
# a lot of them, so it may block.
for offset, length in chunks:
self.sftp._async_request(self, CMD_READ, self.handle, long(offset), int(length))
def _async_response(self, t, msg):
if t == CMD_STATUS:
# save exception and re-raise it on next file operation
try:
self.sftp._convert_status(msg)
except Exception, x:
self._saved_exception = x
return
if t != CMD_DATA:
raise SFTPError('Expected data')
data = msg.get_string()
offset, length = self._prefetch_reads.pop(0)
self._prefetch_data[offset] = data
if len(self._prefetch_reads) == 0:
self._prefetch_done = True
def _check_exception(self):
"if there's a saved exception, raise & clear it"
if self._saved_exception is not None:
x = self._saved_exception
self._saved_exception = None
raise x
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
| |
from numpy import eye, array, sort, empty
from scipy.linalg import block_diag, eigvals
from scipy.signal.filter_design import _cplxpair
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
assert_array_equal)
from pytest import raises as assert_raises
from harold import lqr, ackermann, State, Transfer, haroldcompanion
from harold._static_ctrl_design import _get_pole_reps
def test_lqr_arguments():
# First arg is not LTI
assert_raises(ValueError, lqr, 1, 1)
# Static Gain
assert_raises(ValueError, lqr, State(1), 1)
# Wrong string
assert_raises(ValueError, lqr, Transfer(1, [1, 1]), 1, weight_on='asdf')
# scalar matrices
H = Transfer(1, [1, 1])
k, x, e = lqr(H, 3)
assert_almost_equal(array([k[0, 0], x[0, 0], e[0]]), [1, 1, -2+0j])
def test_simple_lqr():
# Example taken from M. de Oliveira's MAE280B lecture notes
H = State([[0, 0, 1, 0],
[0, 0, 0, 1],
[4.03428022844288e-06, 0, 0, 0.0515652322798669],
[0, 0, -0.000104315254033883, 0]],
[[0, 0], [1e-5/3, 0], [0, 0], [0, 0.01]],
eye(4))
k, _, _ = lqr(H[:, 1], eye(4))
H.a = H.a.T
f, _, _ = lqr(H[:, 0], block_diag(0, 0, 1e-5, 1e-5), 0.1)
assert_almost_equal(k, array([[1.00554916, -1, 52.52180106, 18.51107167]]))
assert_almost_equal(f, array([[-577.370350, 173.600463,
0.383744946, 0.050228534]]), decimal=5)
def test_simple_lqry():
# Scalar matrices
H = State(1, 1, 1, 1)
k, x, e = lqr(H, Q=3, weight_on='output')
assert_almost_equal(array([k[0, 0], x[0, 0], e[0]]), [1.5, 3, -0.5+0j])
# Wrong S shape
assert_raises(ValueError, lqr, H, Q=3, S=eye(2), weight_on='output')
def test_simple_dlqr():
# Example taken from M. de Oliveira's MAE280B lecture notes
H = State([[0, 0, 1, 0],
[0, 0, 0, 1],
[4.03428022844288e-06, 0, 0, 0.0515652322798669],
[0, 0, -0.000104315254033883, 0]],
[[0, 0], [1e-5/3, 0], [0, 0], [0, 0.01]],
eye(4), dt=0.1)
k, _, _ = lqr(H[:, 1], eye(4))
H.a = H.a.T
f, _, _ = lqr(H[:, 0], block_diag(0, 0, 1e-5, 1e-5), 0.1)
assert_almost_equal(k, array([[0, 0, -2.08727337333631e-06, 0]]))
assert_almost_equal(f, array([[1.71884123e-11, 0, 0, -1.79301359e-15]]))
def test_ackermann_args():
# Not SIxO system
G = State(eye(2), eye(2), eye(2))
assert_raises(ValueError, ackermann, G, [1, 2])
# Wrong # of poles
G = State(eye(2), [[1], [0]], [1, 0])
assert_raises(ValueError, ackermann, G, [1, 2, 3])
def test_ackermann_controllable():
#
A = haroldcompanion([1, 6, 5, 1])
B = eye(3)[:, [-1]]
p = [-10, -9, -8]
K = ackermann((A, B), p)
pa = eigvals(A - B@K)
assert_array_almost_equal(array(p, dtype=complex), sort(pa))
def test_ackermann_uncontrollable():
A = block_diag(haroldcompanion([1, 6, 5, 1]), 1)
B = eye(4)[:, [-2]]
p = [-10, -9, -8, -7]
assert_raises(ValueError, ackermann, (A, B), p)
def byersnash_A_B_test_pairs():
ABs = [
# Chemical Reactor (Munro 1979)
(array([[1.38, -0.2077, 6.715, -5.676],
[-0.5814, -4.29, 0, 0.675],
[1.067, 4.273, -6.654, 5.893],
[0.048, 4.273, 1.343, -2.104]]),
array([[0, 0],
[5.679, 0],
[1.136, -3.146],
[1.136, 0]])),
# Distillation Column (Klein, Moore 1977)
(array([[-0.1094, 0.0628, 0, 0, 0],
[1.306, -2.132, 0.9807, 0, 0],
[0, 1.595, -3.149, 1.547, 0],
[0, 0.0355, 2.632, -4.257, 1.855],
[0, 0.0023, 0, 0.1636, -0.1625]]),
array([[0, 0],
[0.638, 0],
[0.0838, -0.1396],
[0.1004, -0.206],
[0.0063, -0.0128]])),
# Nuclear rocket engine (Davison, Chow 1974)
(array([[-65.0, 65, -19.5, 19.5],
[0.1, -0.1, 0, 0],
[1, 0, -0.5, -1],
[0, 0, 0.4, 0]]),
array([[65., 0],
[0, 0],
[0, 0],
[0, 0.4]])),
# MIMO system (Atkinson, 1985)
(array([[0, 1, 0],
[0, 0, 1],
[-6, -11, -6]]),
array([[1, 1],
[0, 1],
[1, 1]])),
# Drum boiler (Bengtsson 1973)
(array([[-0.129, 0, 0.396, 0.25, 0.00191],
[0.0329, 0, -0.00779, 0.0122, -0.621],
[0.00718, 0, -0.1, 0.000887, -0.0385],
[0.00411, 0, 0, -0.0822, 0],
[0.00351, 0, 0.0035, 0.00426, -0.0743]]),
array([[0, 0.1390],
[0, 0.0359],
[0, -0.0989],
[0.0249, 0],
[0, -0.00534]])),
# Miminis random example #1
(array([[5.8765, 9.3456, 4.5634, 9.3520],
[6.6526, 0.5867, 3.5829, 0.6534],
[0.0000, 9.6738, 7.4876, 4.7654],
[0.0000, 0.0000, 6.6784, 2.5678]]),
array([[3.9878, 0.5432],
[0.0000, 2.7650],
[0.0000, 0.0000],
[0.0000, 0.0000]])),
# Miminis random example #2
(array([[.5257, .8544, .5596, .5901, .0259, .6213, .7227, .5617],
[.9931, .0643, .1249, .3096, .5174, .3455, .8977, .4682],
[.6489, .8279, .7279, .2552, .3917, .7065, .2428, .7795],
[.9923, .9262, .2678, .6252, .2414, .5211, .4338, .9677],
[.0000, .5667, .5465, .1157, .5064, .2870, .7901, .9809],
[.0000, .0000, .8672, .6117, .4236, .6503, .5069, .8187],
[.0000, .0000, .0000, .0000, .2894, .0881, .5233, .4257],
[.0000, .0000, .0000, .0000, .0000, .4499, .5597, .2462]]),
array([[0.9230, 0.3950, 0.8325],
[0.0000, 0.0366, 0.6105],
[0.0000, 0.0000, 0.1871],
[0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000]])),
# Aircraft control example I (Kautsky and Nichols 1983)
(array([[0, 1, 0, 0],
[1.40e-4, -2.04, -1.95, -1.33e-2],
[-2.51e-4, 1, -1.32, -2.38e-2],
[-5.61e-1, 0, 0.358, -2.79e-1]]),
array([[0, 0, 0],
[-5.33, 6.45e-3, -2.67e-1],
[-1.60e-1, -1.16e-2, -2.51e-1],
[0, 1.06e-1, 8.62e-2]])),
# Aircraft control example II (Kautsky and Nichols 1983)
(array([[0, 1, 0, 0],
[5.32e-7, -4.18e-1, -0.12, -2.32e-2],
[-4.62e-9, 1, -0.752, -2.39e-2],
[-5.61e-1, 0, 0.3, -1.74e-2]]),
array([[0, 0],
[-1.72e-1, 7.45e-6],
[-2.82e-2, -7.78e-5],
[0, 3.69e-3]])),
# Symmetric example (Kautsky and Nichols 1983)
(array([[-3.624, 4.9567e-2, -2.4564e-1, 1.3853e-2],
[3.3486e-1, -1.8875, -8.1251e-1, -2.8102e-1],
[-1.9958e-1, -1.1335, -2.2039, -4.5523e-1],
[1.3784e-1, -4.7140e-1, -3.3229e-1, -4.0605]]),
array([[2.3122e-1, 3.0761e-1, 3.6164e-1, 3.3217e-1],
[8.8339e-1, 2.1460e-1, 5.6642e-1, 5.0153e-1]]).T),
# Ad-hoc ill-conditioned example (Byers and Nash 1989)
(array([[0, 0, 0, 0],
[1, 10, 100, 1000],
[0, 1, 10, 100],
[0, 0, 1, 10]]),
array([[1, 0],
[0, 1],
[0, 0],
[0, 0]]))
]
# Return a generator
return (x for x in ABs)
def _test_get_pole_reps():
# Only complex
p = array([1.+1j, 1-1j, 2.+1j, 2-1j])
pr, nc, nr = _get_pole_reps(p)
for x in range(2):
assert_array_equal(pr[x], empty((0, 2)))
assert nc == 4
assert nr == 0
# Only real
p = array([1, 2, 3])
pr, nc, nr = _get_pole_reps(p)
for x in range(2):
assert_array_equal(pr[x], empty((0, 2)))
assert nc == 0
assert nr == 3
# Mixed, no reps
p = array([1.+1j, 1-1j, 3])
pr, nc, nr = _get_pole_reps(p)
for x in range(2):
assert_array_equal(pr[x], empty((0, 2)))
assert nc == 2
assert nr == 1
# Mixed, complex reps
p = array([1.+1j, 1-1j, 1.+1j, 1-1j, 3])
p = _cplxpair(p).conj()
pr, nc, nr = _get_pole_reps(p)
assert_array_equal(pr[0], array([[0, 2]]))
assert_array_equal(pr[1], empty((0, 2)))
assert nc == 4
assert nr == 1
# Mixed real reps
p = array([1.+1j, 1-1j, 1., 1])
p = _cplxpair(p).conj()
pr, nc, nr = _get_pole_reps(p)
assert_array_equal(pr[0], empty((0, 2)))
assert_array_equal(pr[1], array([[2, 4]]))
assert nc == 2
assert nr == 2
# Mixed real reps, real dangling
p = array([1.+1j, 1-1j, 1., 1, 0.54, 3.8])
p = _cplxpair(p).conj()
pr, nc, nr = _get_pole_reps(p)
assert_array_equal(pr[0], empty((0, 2)))
assert_array_equal(pr[1], array([[3, 5]]))
assert nc == 2
assert nr == 4
# Mixed complex reps, complex dangling
p = array([1.+1j, 1-1j, 1.+1j, 1-1j, 0.+1j, 0-1j, 0.5, 3.])
p = _cplxpair(p).conj()
pr, nc, nr = _get_pole_reps(p)
assert_array_equal(pr[0], array([[1, 3]]))
assert_array_equal(pr[1], empty((0, 2)))
assert nc == 6
assert nr == 2
# Mixed reps and dangling
p = array([1.+1j, 1-1j, 1.+1j, 1-1j,
2.+1j, 2-1j,
3.+1j, 3-1j, 3.+1j, 3-1j, 3.+1j, 3-1j,
4.+1j, 4-1j,
0,
0.5, 0.5,
3.,
6, 6, 6])
p = _cplxpair(p).conj()
pr, nc, nr = _get_pole_reps(p)
assert_array_equal(pr[0], array([[0, 2],
[3, 6]]))
assert_array_equal(pr[1], array([[15, 17],
[18, 21]]))
assert nc == 14
assert nr == 7
| |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and methods for managing Courses."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import copy
import logging
import os
import pickle
import sys
from tools import verify
import yaml
from models import MemcacheManager
import progress
import transforms
import vfs
COURSE_MODEL_VERSION_1_2 = '1.2'
COURSE_MODEL_VERSION_1_3 = '1.3'
def deep_dict_merge(real_values_dict, default_values_dict):
"""Merges default and real value dictionaries recursively."""
def _deep_merge(real_values, default_values):
"""Updates real with default values recursively."""
# Recursively merge dictionaries.
for key, value in real_values.items():
default_value = default_values.get(key)
if (default_value and isinstance(
value, dict) and isinstance(default_value, dict)):
_deep_merge(value, default_value)
# Copy over other values.
for key, value in default_values.items():
if not key in real_values:
real_values[key] = value
result = {}
if real_values_dict:
result = copy.deepcopy(real_values_dict)
_deep_merge(result, default_values_dict)
return result
# Here are the defaults for a new course.
DEFAULT_COURSE_YAML_DICT = {
'course': {
'title': 'UNTITLED COURSE',
'locale': 'en_US',
'main_image': {},
'now_available': False},
'base': {
'show_gplus_button': True},
'institution': {
'logo': {},
'url': ''},
'preview': {},
'unit': {},
'reg_form': {
'can_register': True,
'additional_registration_fields': (
'<!-- reg_form.additional_registration_fields -->')}
}
# Here are the defaults for an existing course.
DEFAULT_EXISTING_COURSE_YAML_DICT = deep_dict_merge(
{'course': {
'now_available': True}},
DEFAULT_COURSE_YAML_DICT)
# Here is the default course.yaml for a new course.
EMPTY_COURSE_YAML = u"""# my new course.yaml
course:
title: 'New Course by %s'
now_available: False
"""
# Here are the default assessment weights corresponding to the sample course.
DEFAULT_LEGACY_ASSESSMENT_WEIGHTS = {'Pre': 0, 'Mid': 30, 'Fin': 70}
def is_editable_fs(app_context):
return isinstance(app_context.fs.impl, vfs.DatastoreBackedFileSystem)
def copy_attributes(source, target, converter):
"""Copies source object attributes into a target using a converter."""
for source_name, value in converter.items():
if value:
target_name = value[0]
target_type = value[1]
setattr(
target, target_name, target_type(getattr(source, source_name)))
def load_csv_course(app_context):
"""Loads course data from the CSV files."""
logging.info('Initializing datastore from CSV files.')
unit_file = os.path.join(app_context.get_data_home(), 'unit.csv')
lesson_file = os.path.join(app_context.get_data_home(), 'lesson.csv')
# Check files exist.
if (not app_context.fs.isfile(unit_file) or
not app_context.fs.isfile(lesson_file)):
return None, None
unit_stream = app_context.fs.open(unit_file)
lesson_stream = app_context.fs.open(lesson_file)
# Verify CSV file integrity.
units = verify.read_objects_from_csv_stream(
unit_stream, verify.UNITS_HEADER, verify.Unit)
lessons = verify.read_objects_from_csv_stream(
lesson_stream, verify.LESSONS_HEADER, verify.Lesson)
verifier = verify.Verifier()
verifier.verify_unit_fields(units)
verifier.verify_lesson_fields(lessons)
verifier.verify_unit_lesson_relationships(units, lessons)
assert verifier.errors == 0
assert verifier.warnings == 0
# Load data from CSV files into a datastore.
units = verify.read_objects_from_csv_stream(
app_context.fs.open(unit_file), verify.UNITS_HEADER, Unit12,
converter=verify.UNIT_CSV_TO_DB_CONVERTER)
lessons = verify.read_objects_from_csv_stream(
app_context.fs.open(lesson_file), verify.LESSONS_HEADER, Lesson12,
converter=verify.LESSON_CSV_TO_DB_CONVERTER)
return units, lessons
def index_units_and_lessons(course):
"""Index all 'U' type units and their lessons. Indexes are 1-based."""
unit_index = 1
for unit in course.get_units():
if verify.UNIT_TYPE_UNIT == unit.type:
unit._index = unit_index # pylint: disable-msg=protected-access
unit_index += 1
lesson_index = 1
for lesson in course.get_lessons(unit.unit_id):
lesson._index = ( # pylint: disable-msg=protected-access
lesson_index)
lesson_index += 1
class AbstractCachedObject(object):
"""Abstract serializable versioned object that can stored in memcache."""
@classmethod
def _make_key(cls):
# The course content files may change between deployment. To avoid
# reading old cached values by the new version of the application we
# add deployment version to the key. Now each version of the application
# can put/get its own version of the course and the deployment.
return 'course:model:pickle:%s:%s' % (
cls.VERSION, os.environ.get('CURRENT_VERSION_ID'))
@classmethod
def new_memento(cls):
"""Creates new empty memento instance; must be pickle serializable."""
raise Exception('Not implemented')
@classmethod
def instance_from_memento(cls, unused_app_context, unused_memento):
"""Creates instance from serializable memento."""
raise Exception('Not implemented')
@classmethod
def memento_from_instance(cls, unused_instance):
"""Creates serializable memento from instance."""
raise Exception('Not implemented')
@classmethod
def load(cls, app_context):
"""Loads instance from memcache; does not fail on errors."""
try:
binary_data = MemcacheManager.get(
cls._make_key(),
namespace=app_context.get_namespace_name())
if binary_data:
memento = cls.new_memento()
memento.deserialize(binary_data)
return cls.instance_from_memento(app_context, memento)
except Exception as e: # pylint: disable-msg=broad-except
logging.error(
'Failed to load object \'%s\' from memcache. %s',
cls._make_key(), e)
return None
@classmethod
def save(cls, app_context, instance):
"""Saves instance to memcache."""
MemcacheManager.set(
cls._make_key(),
cls.memento_from_instance(instance).serialize(),
namespace=app_context.get_namespace_name())
@classmethod
def delete(cls, app_context):
"""Deletes instance from memcache."""
MemcacheManager.delete(
cls._make_key(),
namespace=app_context.get_namespace_name())
def serialize(self):
"""Saves instance to a pickle representation."""
return pickle.dumps(self.__dict__)
def deserialize(self, binary_data):
"""Loads instance from a pickle representation."""
adict = pickle.loads(binary_data)
if not self.version == adict.get('version'):
raise Exception('Expected version %s, found %s.' % (
self.version, adict.get('version')))
self.__dict__.update(adict)
class Unit12(object):
"""An object to represent a Unit, Assessment or Link (version 1.2)."""
def __init__(self):
self.unit_id = '' # primary key
self.type = ''
self.title = ''
self.release_date = ''
self.now_available = False
# Units of 'U' types have 1-based index. An index is automatically
# computed.
self._index = None
@property
def href(self):
assert verify.UNIT_TYPE_LINK == self.type
return self.unit_id
@property
def index(self):
assert verify.UNIT_TYPE_UNIT == self.type
return self._index
class Lesson12(object):
"""An object to represent a Lesson (version 1.2)."""
def __init__(self):
self.lesson_id = 0 # primary key
self.unit_id = 0 # unit.unit_id of parent
self.title = ''
self.objectives = ''
self.video = ''
self.notes = ''
self.duration = ''
self.activity = ''
self.activity_title = ''
# Lessons have 1-based index inside the unit they belong to. An index
# is automatically computed.
self._index = None
@property
def now_available(self):
return True
@property
def index(self):
return self._index
class CachedCourse12(AbstractCachedObject):
"""A representation of a Course12 optimized for storing in memcache."""
VERSION = COURSE_MODEL_VERSION_1_2
def __init__(self, units=None, lessons=None, unit_id_to_lessons=None):
self.version = self.VERSION
self.units = units
self.lessons = lessons
self.unit_id_to_lessons = unit_id_to_lessons
@classmethod
def new_memento(cls):
return CachedCourse12()
@classmethod
def instance_from_memento(cls, app_context, memento):
return CourseModel12(
app_context, units=memento.units, lessons=memento.lessons,
unit_id_to_lessons=memento.unit_id_to_lessons)
@classmethod
def memento_from_instance(cls, course):
return CachedCourse12(
units=course.units, lessons=course.lessons,
unit_id_to_lessons=course.unit_id_to_lessons)
class CourseModel12(object):
"""A course defined in terms of CSV files (version 1.2)."""
VERSION = COURSE_MODEL_VERSION_1_2
@classmethod
def load(cls, app_context):
"""Loads course data into a model."""
course = CachedCourse12.load(app_context)
if not course:
units, lessons = load_csv_course(app_context)
if units and lessons:
course = CourseModel12(app_context, units, lessons)
if course:
CachedCourse12.save(app_context, course)
return course
@classmethod
def _make_unit_id_to_lessons_lookup_dict(cls, lessons):
"""Creates an index of unit.unit_id to unit.lessons."""
unit_id_to_lessons = {}
for lesson in lessons:
key = str(lesson.unit_id)
if not key in unit_id_to_lessons:
unit_id_to_lessons[key] = []
unit_id_to_lessons[key].append(lesson)
return unit_id_to_lessons
def __init__(
self, app_context,
units=None, lessons=None, unit_id_to_lessons=None):
self._app_context = app_context
self._units = []
self._lessons = []
self._unit_id_to_lessons = {}
if units:
self._units = units
if lessons:
self._lessons = lessons
if unit_id_to_lessons:
self._unit_id_to_lessons = unit_id_to_lessons
else:
self._unit_id_to_lessons = (
self._make_unit_id_to_lessons_lookup_dict(self._lessons))
index_units_and_lessons(self)
@property
def app_context(self):
return self._app_context
@property
def units(self):
return self._units
@property
def lessons(self):
return self._lessons
@property
def unit_id_to_lessons(self):
return self._unit_id_to_lessons
def get_units(self):
return self._units[:]
def get_lessons(self, unit_id):
return self._unit_id_to_lessons.get(str(unit_id), [])
def find_unit_by_id(self, unit_id):
"""Finds a unit given its id."""
for unit in self._units:
if str(unit.unit_id) == str(unit_id):
return unit
return None
def get_assessment_filename(self, unit_id):
"""Returns assessment base filename."""
unit = self.find_unit_by_id(unit_id)
assert unit and verify.UNIT_TYPE_ASSESSMENT == unit.type
return 'assets/js/assessment-%s.js' % unit.unit_id
def get_activity_filename(self, unit_id, lesson_id):
"""Returns activity base filename."""
return 'assets/js/activity-%s.%s.js' % (unit_id, lesson_id)
def find_lesson_by_id(self, unit, lesson_id):
"""Finds a lesson given its id (or 1-based index in this model)."""
index = int(lesson_id) - 1
return self.get_lessons(unit.unit_id)[index]
def to_json(self):
"""Creates JSON representation of this instance."""
adict = copy.deepcopy(self)
del adict._app_context
return transforms.dumps(
adict,
indent=4, sort_keys=True,
default=lambda o: o.__dict__)
class Unit13(object):
"""An object to represent a Unit, Assessment or Link (version 1.3)."""
def __init__(self):
self.unit_id = 0 # primary key
self.type = ''
self.title = ''
self.release_date = ''
self.now_available = False
# Units of 'U' types have 1-based index. An index is automatically
# computed.
self._index = None
# Only valid for the unit.type == verify.UNIT_TYPE_LINK.
self.href = None
# Only valid for the unit.type == verify.UNIT_TYPE_ASSESSMENT.
self.weight = 0
@property
def index(self):
assert verify.UNIT_TYPE_UNIT == self.type
return self._index
class Lesson13(object):
"""An object to represent a Lesson (version 1.3)."""
def __init__(self):
self.lesson_id = 0 # primary key
self.unit_id = 0 # unit.unit_id of parent
self.title = ''
self.objectives = ''
self.video = ''
self.notes = ''
self.duration = ''
self.now_available = False
self.has_activity = False
self.activity_title = ''
# Lessons have 1-based index inside the unit they belong to. An index
# is automatically computed.
self._index = None
@property
def index(self):
return self._index
@property
def activity(self):
"""A symbolic name to old attribute."""
return self.has_activity
class PersistentCourse13(object):
"""A representation of a Course13 optimized for persistence."""
COURSES_FILENAME = 'data/course.json'
def __init__(self, next_id=None, units=None, lessons=None):
self.version = CourseModel13.VERSION
self.next_id = next_id
self.units = units
self.lessons = lessons
def to_dict(self):
"""Saves object attributes into a dict."""
result = {}
result['version'] = str(self.version)
result['next_id'] = int(self.next_id)
units = []
for unit in self.units:
units.append(transforms.instance_to_dict(unit))
result['units'] = units
lessons = []
for lesson in self.lessons:
lessons.append(transforms.instance_to_dict(lesson))
result['lessons'] = lessons
return result
def _from_dict(self, adict):
"""Loads instance attributes from the dict."""
self.next_id = int(adict.get('next_id'))
self.units = []
unit_dicts = adict.get('units')
if unit_dicts:
for unit_dict in unit_dicts:
unit = Unit13()
transforms.dict_to_instance(unit_dict, unit)
self.units.append(unit)
self.lessons = []
lesson_dicts = adict.get('lessons')
if lesson_dicts:
for lesson_dict in lesson_dicts:
lesson = Lesson13()
transforms.dict_to_instance(lesson_dict, lesson)
self.lessons.append(lesson)
@classmethod
def save(cls, app_context, course):
"""Saves course to datastore."""
persistent = PersistentCourse13(
next_id=course.next_id,
units=course.units, lessons=course.lessons)
fs = app_context.fs.impl
filename = fs.physical_to_logical(cls.COURSES_FILENAME)
app_context.fs.put(filename, vfs.FileStreamWrapped(
None, persistent.serialize()))
@classmethod
def load(cls, app_context):
"""Loads course from datastore."""
fs = app_context.fs.impl
filename = fs.physical_to_logical(cls.COURSES_FILENAME)
if app_context.fs.isfile(filename):
persistent = PersistentCourse13()
persistent.deserialize(app_context.fs.get(filename))
return CourseModel13(
app_context, next_id=persistent.next_id,
units=persistent.units, lessons=persistent.lessons)
return None
def serialize(self):
"""Saves instance to a JSON representation."""
adict = self.to_dict()
json_text = transforms.dumps(adict)
return json_text.encode('utf-8')
def deserialize(self, binary_data):
"""Loads instance from a JSON representation."""
json_text = binary_data.decode('utf-8')
adict = transforms.loads(json_text)
if not self.version == adict.get('version'):
raise Exception('Expected version %s, found %s.' % (
self.version, adict.get('version')))
self._from_dict(adict)
class CachedCourse13(AbstractCachedObject):
"""A representation of a Course12 optimized for storing in memcache."""
VERSION = COURSE_MODEL_VERSION_1_3
def __init__(
self, next_id=None, units=None, lessons=None,
unit_id_to_lesson_ids=None):
self.version = self.VERSION
self.next_id = next_id
self.units = units
self.lessons = lessons
# This is almost the same as PersistentCourse13 above, but it also
# stores additional indexes used for performance optimizations. There
# is no need to persist these indexes in durable storage, but it is
# nice to have them in memcache.
self.unit_id_to_lesson_ids = unit_id_to_lesson_ids
@classmethod
def new_memento(cls):
return CachedCourse13()
@classmethod
def instance_from_memento(cls, app_context, memento):
return CourseModel13(
app_context, next_id=memento.next_id,
units=memento.units, lessons=memento.lessons,
unit_id_to_lesson_ids=memento.unit_id_to_lesson_ids)
@classmethod
def memento_from_instance(cls, course):
return CachedCourse13(
next_id=course.next_id,
units=course.units, lessons=course.lessons,
unit_id_to_lesson_ids=course.unit_id_to_lesson_ids)
class CourseModel13(object):
"""A course defined in terms of objects (version 1.3)."""
VERSION = COURSE_MODEL_VERSION_1_3
@classmethod
def load(cls, app_context):
"""Loads course from memcache or persistence."""
course = CachedCourse13.load(app_context)
if not course:
course = PersistentCourse13.load(app_context)
if course:
CachedCourse13.save(app_context, course)
return course
@classmethod
def _make_unit_id_to_lessons_lookup_dict(cls, lessons):
"""Creates an index of unit.unit_id to unit.lessons."""
unit_id_to_lesson_ids = {}
for lesson in lessons:
key = str(lesson.unit_id)
if not key in unit_id_to_lesson_ids:
unit_id_to_lesson_ids[key] = []
unit_id_to_lesson_ids[key].append(str(lesson.lesson_id))
return unit_id_to_lesson_ids
def __init__(
self, app_context, next_id=None, units=None, lessons=None,
unit_id_to_lesson_ids=None):
# Init default values.
self._app_context = app_context
self._next_id = 1 # a counter for creating sequential entity ids
self._units = []
self._lessons = []
self._unit_id_to_lesson_ids = {}
# These array keep dirty object in current transaction.
self._dirty_units = []
self._dirty_lessons = []
self._deleted_units = []
self._deleted_lessons = []
# Set provided values.
if next_id:
self._next_id = next_id
if units:
self._units = units
if lessons:
self._lessons = lessons
if unit_id_to_lesson_ids:
self._unit_id_to_lesson_ids = unit_id_to_lesson_ids
else:
self._index()
@property
def app_context(self):
return self._app_context
@property
def next_id(self):
return self._next_id
@property
def units(self):
return self._units
@property
def lessons(self):
return self._lessons
@property
def unit_id_to_lesson_ids(self):
return self._unit_id_to_lesson_ids
def _get_next_id(self):
"""Allocates next id in sequence."""
next_id = self._next_id
self._next_id += 1
return next_id
def _index(self):
"""Indexes units and lessons."""
self._unit_id_to_lesson_ids = self._make_unit_id_to_lessons_lookup_dict(
self._lessons)
index_units_and_lessons(self)
def is_dirty(self):
"""Checks if course object has been modified and needs to be saved."""
return self._dirty_units or self._dirty_lessons
def _flush_deleted_objects(self):
"""Delete files owned by deleted objects."""
# TODO(psimakov): handle similarly add_unit() and set_assessment()
# To delete an activity/assessment one must look up its filename. This
# requires a valid unit/lesson. If unit was deleted it's no longer
# found in _units, same for lesson. So we temporarily install deleted
# unit/lesson array instead of actual. We also temporarily empty
# so _unit_id_to_lesson_ids is not accidentally used. This is a hack,
# and we will improve it as object model gets more complex, but for
# now it works fine.
units = self._units
lessons = self._lessons
unit_id_to_lesson_ids = self._unit_id_to_lesson_ids
try:
self._units = self._deleted_units
self._lessons = self._deleted_lessons
self._unit_id_to_lesson_ids = None
# Delete owned assessments.
for unit in self._deleted_units:
if verify.UNIT_TYPE_ASSESSMENT == unit.type:
self._delete_assessment(unit)
# Delete owned activities.
for lesson in self._deleted_lessons:
if lesson.has_activity:
self._delete_activity(lesson)
finally:
self._units = units
self._lessons = lessons
self._unit_id_to_lesson_ids = unit_id_to_lesson_ids
def _update_dirty_objects(self):
"""Update files owned by course."""
fs = self.app_context.fs
# Update state of owned assessments.
for unit in self._dirty_units:
unit = self.find_unit_by_id(unit.unit_id)
if not unit or verify.UNIT_TYPE_ASSESSMENT != unit.type:
continue
path = fs.impl.physical_to_logical(
self.get_assessment_filename(unit.unit_id))
if fs.isfile(path):
fs.put(
path, None, metadata_only=True,
is_draft=not unit.now_available)
# Update state of owned activities.
for lesson in self._dirty_lessons:
lesson = self.find_lesson_by_id(None, lesson.lesson_id)
if not lesson or not lesson.has_activity:
continue
path = fs.impl.physical_to_logical(
self.get_activity_filename(None, lesson.lesson_id))
if fs.isfile(path):
fs.put(
path, None, metadata_only=True,
is_draft=not lesson.now_available)
def save(self):
"""Saves course to datastore and memcache."""
self._flush_deleted_objects()
self._update_dirty_objects()
self._dirty_units = []
self._dirty_lessons = []
self._deleted_units = []
self._deleted_lessons = []
self._index()
PersistentCourse13.save(self._app_context, self)
CachedCourse13.delete(self._app_context)
def get_units(self):
return self._units[:]
def get_lessons(self, unit_id):
lesson_ids = self._unit_id_to_lesson_ids.get(str(unit_id))
lessons = []
if lesson_ids:
for lesson_id in lesson_ids:
lessons.append(self.find_lesson_by_id(None, lesson_id))
return lessons
def get_assessment_filename(self, unit_id):
"""Returns assessment base filename."""
unit = self.find_unit_by_id(unit_id)
assert unit
assert verify.UNIT_TYPE_ASSESSMENT == unit.type
return 'assets/js/assessment-%s.js' % unit.unit_id
def get_activity_filename(self, unused_unit_id, lesson_id):
"""Returns activity base filename."""
lesson = self.find_lesson_by_id(None, lesson_id)
assert lesson
if lesson.has_activity:
return 'assets/js/activity-%s.js' % lesson_id
return None
def find_unit_by_id(self, unit_id):
"""Finds a unit given its id."""
for unit in self._units:
if str(unit.unit_id) == str(unit_id):
return unit
return None
def find_lesson_by_id(self, unused_unit, lesson_id):
"""Finds a lesson given its id."""
for lesson in self._lessons:
if str(lesson.lesson_id) == str(lesson_id):
return lesson
return None
def add_unit(self, unit_type, title):
"""Adds a brand new unit."""
assert unit_type in verify.UNIT_TYPES
unit = Unit13()
unit.type = unit_type
unit.unit_id = self._get_next_id()
unit.title = title
unit.now_available = False
self._units.append(unit)
self._index()
self._dirty_units.append(unit)
return unit
def add_lesson(self, unit, title):
"""Adds brand new lesson to a unit."""
unit = self.find_unit_by_id(unit.unit_id)
assert unit
lesson = Lesson13()
lesson.lesson_id = self._get_next_id()
lesson.unit_id = unit.unit_id
lesson.title = title
lesson.now_available = False
self._lessons.append(lesson)
self._index()
self._dirty_lessons.append(lesson)
return lesson
def move_lesson_to(self, lesson, unit):
"""Moves a lesson to another unit."""
unit = self.find_unit_by_id(unit.unit_id)
assert unit
assert verify.UNIT_TYPE_UNIT == unit.type
lesson = self.find_lesson_by_id(None, lesson.lesson_id)
assert lesson
lesson.unit_id = unit.unit_id
self._index()
return lesson
def _delete_activity(self, lesson):
"""Deletes activity."""
filename = self._app_context.fs.impl.physical_to_logical(
self.get_activity_filename(None, lesson.lesson_id))
if self.app_context.fs.isfile(filename):
self.app_context.fs.delete(filename)
return True
return False
def _delete_assessment(self, unit):
"""Deletes assessment."""
filename = self._app_context.fs.impl.physical_to_logical(
self.get_assessment_filename(unit.unit_id))
if self.app_context.fs.isfile(filename):
self.app_context.fs.delete(filename)
return True
return False
def delete_lesson(self, lesson):
"""Delete a lesson."""
lesson = self.find_lesson_by_id(None, lesson.lesson_id)
if not lesson:
return False
self._lessons.remove(lesson)
self._index()
self._deleted_lessons.append(lesson)
self._dirty_lessons.append(lesson)
return True
def delete_unit(self, unit):
"""Deletes a unit."""
unit = self.find_unit_by_id(unit.unit_id)
if not unit:
return False
for lesson in self.get_lessons(unit.unit_id):
self.delete_lesson(lesson)
self._units.remove(unit)
self._index()
self._deleted_units.append(unit)
self._dirty_units.append(unit)
return True
def update_unit(self, unit):
"""Updates an existing unit."""
existing_unit = self.find_unit_by_id(unit.unit_id)
if not existing_unit:
return False
existing_unit.title = unit.title
existing_unit.release_date = unit.release_date
existing_unit.now_available = unit.now_available
if verify.UNIT_TYPE_LINK == existing_unit.type:
existing_unit.href = unit.href
if verify.UNIT_TYPE_ASSESSMENT == existing_unit.type:
existing_unit.weight = unit.weight
self._dirty_units.append(existing_unit)
return existing_unit
def update_lesson(self, lesson):
"""Updates an existing lesson."""
existing_lesson = self.find_lesson_by_id(
lesson.unit_id, lesson.lesson_id)
if not existing_lesson:
return False
existing_lesson.title = lesson.title
existing_lesson.unit_id = lesson.unit_id
existing_lesson.objectives = lesson.objectives
existing_lesson.video = lesson.video
existing_lesson.notes = lesson.notes
existing_lesson.activity_title = lesson.activity_title
self._index()
self._dirty_lessons.append(existing_lesson)
return existing_lesson
def reorder_units(self, order_data):
"""Reorder the units and lessons based on the order data given.
Args:
order_data: list of dict. Format is
The order_data is in the following format:
[
{'id': 0, 'lessons': [{'id': 0}, {'id': 1}, {'id': 2}]},
{'id': 1},
{'id': 2, 'lessons': [{'id': 0}, {'id': 1}]}
...
]
"""
reordered_units = []
unit_ids = set()
for unit_data in order_data:
unit_id = unit_data['id']
unit = self.find_unit_by_id(unit_id)
assert unit
reordered_units.append(self.find_unit_by_id(unit_id))
unit_ids.add(unit_id)
assert len(unit_ids) == len(self._units)
self._units = reordered_units
reordered_lessons = []
lesson_ids = set()
for unit_data in order_data:
unit_id = unit_data['id']
unit = self.find_unit_by_id(unit_id)
assert unit
if verify.UNIT_TYPE_UNIT != unit.type:
continue
for lesson_data in unit_data['lessons']:
lesson_id = lesson_data['id']
reordered_lessons.append(
self.find_lesson_by_id(None, lesson_id))
lesson_ids.add((unit_id, lesson_id))
assert len(lesson_ids) == len(self._lessons)
self._lessons = reordered_lessons
self._index()
def set_assessment_content(self, unit, assessment_content, errors=None):
"""Updates the content of an assessment."""
if errors is None:
errors = []
path = self._app_context.fs.impl.physical_to_logical(
self.get_assessment_filename(unit.unit_id))
root_name = 'assessment'
try:
content, noverify_text = verify.convert_javascript_to_python(
assessment_content, root_name)
assessment = verify.evaluate_python_expression_from_text(
content, root_name, verify.Assessment().scope, noverify_text)
except Exception: # pylint: disable-msg=broad-except
errors.append('Unable to parse %s:\n%s' % (
root_name,
str(sys.exc_info()[1])))
return
verifier = verify.Verifier()
try:
verifier.verify_assessment_instance(assessment, path)
except verify.SchemaException:
errors.append('Error validating %s\n' % root_name)
return
fs = self.app_context.fs
fs.put(
path, vfs.string_to_stream(assessment_content),
is_draft=not unit.now_available)
def set_activity_content(self, lesson, activity_content, errors=None):
"""Updates the content of an activity."""
if errors is None:
errors = []
path = self._app_context.fs.impl.physical_to_logical(
self.get_activity_filename(lesson.unit_id, lesson.lesson_id))
root_name = 'activity'
try:
content, noverify_text = verify.convert_javascript_to_python(
activity_content, root_name)
activity = verify.evaluate_python_expression_from_text(
content, root_name, verify.Activity().scope, noverify_text)
except Exception: # pylint: disable-msg=broad-except
errors.append('Unable to parse %s:\n%s' % (
root_name,
str(sys.exc_info()[1])))
return
verifier = verify.Verifier()
try:
verifier.verify_activity_instance(activity, path)
except verify.SchemaException:
errors.append('Error validating %s\n' % root_name)
return
fs = self.app_context.fs
fs.put(
path, vfs.string_to_stream(activity_content),
is_draft=not lesson.now_available)
def import_from(self, src_course, errors):
"""Imports a content of another course into this course."""
def copy_unit12_into_unit13(src_unit, dst_unit):
"""Copies unit object attributes between versions."""
assert dst_unit.type == src_unit.type
dst_unit.title = src_unit.title
dst_unit.release_date = src_unit.release_date
dst_unit.now_available = src_unit.now_available
if verify.UNIT_TYPE_LINK == dst_unit.type:
dst_unit.href = src_unit.href
# Copy over the assessment. Note that we copy files directly and
# avoid all logical validations of their content. This is done for
# a purpose - at this layer we don't care what is in those files.
if verify.UNIT_TYPE_ASSESSMENT == dst_unit.type:
if dst_unit.unit_id in DEFAULT_LEGACY_ASSESSMENT_WEIGHTS:
dst_unit.weight = (
DEFAULT_LEGACY_ASSESSMENT_WEIGHTS[dst_unit.unit_id])
src_filename = os.path.join(
src_course.app_context.get_home(),
src_course.get_assessment_filename(src_unit.unit_id))
if src_course.app_context.fs.isfile(src_filename):
astream = src_course.app_context.fs.open(src_filename)
if astream:
dst_filename = os.path.join(
self.app_context.get_home(),
self.get_assessment_filename(dst_unit.unit_id))
self.app_context.fs.put(dst_filename, astream)
def copy_lesson12_into_lesson13(
src_unit, src_lesson, unused_dst_unit, dst_lesson):
"""Copies lessons object attributes between versions."""
dst_lesson.objectives = src_lesson.objectives
dst_lesson.video = src_lesson.video
dst_lesson.notes = src_lesson.notes
dst_lesson.duration = src_lesson.duration
dst_lesson.has_activity = src_lesson.activity
dst_lesson.activity_title = src_lesson.activity_title
# Old model does not have this flag, but all lessons are available.
dst_lesson.now_available = True
# Copy over the activity. Note that we copy files directly and
# avoid all logical validations of their content. This is done for a
# purpose - at this layer we don't care what is in those files.
if src_lesson.activity:
src_filename = os.path.join(
src_course.app_context.get_home(),
src_course.get_activity_filename(
src_unit.unit_id, src_lesson.lesson_id))
if src_course.app_context.fs.isfile(src_filename):
astream = src_course.app_context.fs.open(src_filename)
if astream:
dst_filename = os.path.join(
self.app_context.get_home(),
self.get_activity_filename(
None, dst_lesson.lesson_id))
self.app_context.fs.put(dst_filename, astream)
if not is_editable_fs(self._app_context):
errors.append(
'Target course %s must be '
'on read-write media.' % self.app_context.raw)
return None, None
if self.get_units():
errors.append(
'Target course %s must be '
'empty.' % self.app_context.raw)
return None, None
# Iterate over course structure and assets and import each item.
for unit in src_course.get_units():
new_unit = self.add_unit(unit.type, unit.title)
copy_unit12_into_unit13(unit, new_unit)
for lesson in src_course.get_lessons(unit.unit_id):
new_lesson = self.add_lesson(new_unit, lesson.title)
copy_lesson12_into_lesson13(unit, lesson, new_unit, new_lesson)
return src_course, self
def to_json(self):
"""Creates JSON representation of this instance."""
persistent = PersistentCourse13(
next_id=self._next_id, units=self._units, lessons=self._lessons)
return transforms.dumps(
persistent.to_dict(),
indent=4, sort_keys=True,
default=lambda o: o.__dict__)
class Course(object):
"""Manages a course and all of its components."""
@classmethod
def get_environ(cls, app_context):
"""Returns currently defined course settings as a dictionary."""
course_yaml = None
course_yaml_dict = None
course_data_filename = app_context.get_config_filename()
if app_context.fs.isfile(course_data_filename):
course_yaml = app_context.fs.open(course_data_filename)
if not course_yaml:
return DEFAULT_COURSE_YAML_DICT
try:
course_yaml_dict = yaml.safe_load(
course_yaml.read().decode('utf-8'))
except Exception as e: # pylint: disable-msg=broad-except
logging.info(
'Error: course.yaml file at %s not accessible, '
'loading defaults. %s', course_data_filename, e)
if not course_yaml_dict:
return DEFAULT_COURSE_YAML_DICT
return deep_dict_merge(
course_yaml_dict, DEFAULT_EXISTING_COURSE_YAML_DICT)
@property
def version(self):
return self._model.VERSION
@classmethod
def create_new_default_course(cls, app_context):
return CourseModel13(app_context)
@classmethod
def custom_new_default_course_for_test(cls, app_context):
# There is an expectation in our tests of automatic import
# of data/*.csv files. This method can be used in tests to achieve
# exactly that.
model = CourseModel12.load(app_context)
if model:
return model
return CourseModel13(app_context)
@classmethod
def _load(cls, app_context):
"""Loads course data from persistence storage into this instance."""
if not is_editable_fs(app_context):
model = CourseModel12.load(app_context)
if model:
return model
else:
model = CourseModel13.load(app_context)
if model:
return model
return cls.create_new_default_course(app_context)
def __init__(self, handler, app_context=None):
self._app_context = app_context if app_context else handler.app_context
self._namespace = self._app_context.get_namespace_name()
self._model = self._load(self._app_context)
self._tracker = None
@property
def app_context(self):
return self._app_context
def to_json(self):
return self._model.to_json()
def get_progress_tracker(self):
if not self._tracker:
self._tracker = progress.UnitLessonCompletionTracker(self)
return self._tracker
def get_units(self):
return self._model.get_units()
def get_lessons(self, unit_id):
return self._model.get_lessons(unit_id)
def save(self):
return self._model.save()
def find_unit_by_id(self, unit_id):
return self._model.find_unit_by_id(unit_id)
def find_lesson_by_id(self, unit, lesson_id):
return self._model.find_lesson_by_id(unit, lesson_id)
def is_last_assessment(self, unit):
"""Checks whether the given unit is the last of all the assessments."""
for current_unit in reversed(self.get_units()):
if current_unit.type == verify.UNIT_TYPE_ASSESSMENT:
return current_unit.unit_id == unit.unit_id
return False
def add_unit(self):
"""Adds new unit to a course."""
return self._model.add_unit('U', 'New Unit')
def add_link(self):
"""Adds new link (other) to a course."""
return self._model.add_unit('O', 'New Link')
def add_assessment(self):
"""Adds new assessment to a course."""
return self._model.add_unit('A', 'New Assessment')
def add_lesson(self, unit):
return self._model.add_lesson(unit, 'New Lesson')
def update_unit(self, unit):
return self._model.update_unit(unit)
def update_lesson(self, lesson):
return self._model.update_lesson(lesson)
def move_lesson_to(self, lesson, unit):
return self._model.move_lesson_to(lesson, unit)
def delete_unit(self, unit):
return self._model.delete_unit(unit)
def delete_lesson(self, lesson):
return self._model.delete_lesson(lesson)
def get_score(self, student, assessment_id):
"""Gets a student's score for a particular assessment."""
assert self.is_valid_assessment_id(assessment_id)
scores = transforms.loads(student.scores) if student.scores else {}
return scores.get(assessment_id) if scores else None
def get_overall_score(self, student):
"""Gets the overall course score for a student."""
score_list = self.get_all_scores(student)
overall_score = 0
total_weight = 0
for unit in score_list:
total_weight += unit['weight']
overall_score += unit['weight'] * unit['score']
if total_weight == 0:
return None
return int(float(overall_score) / total_weight)
def get_overall_result(self, student):
"""Gets the overall result based on a student's score profile."""
score = self.get_overall_score(student)
if score is None:
return None
# This can be replaced with a custom definition for an overall result
# string.
return 'pass' if self.get_overall_score(student) >= 70 else 'fail'
def get_all_scores(self, student):
"""Gets all score data for a student.
Args:
student: the student whose scores should be retrieved.
Returns:
an array of dicts, each representing an assessment. Each dict has
the keys 'id', 'title', 'weight' and 'score' (if available),
representing the unit id, the assessment title, the weight
contributed by the assessment to the final score, and the
assessment score.
"""
assessment_list = self.get_assessment_list()
scores = transforms.loads(student.scores) if student.scores else {}
assessment_score_list = []
for unit in assessment_list:
# Compute the weight for this assessment.
weight = 0
if hasattr(unit, 'weight'):
weight = unit.weight
elif unit.unit_id in DEFAULT_LEGACY_ASSESSMENT_WEIGHTS:
weight = DEFAULT_LEGACY_ASSESSMENT_WEIGHTS[unit.unit_id]
assessment_score_list.append({
'id': str(unit.unit_id),
'title': unit.title,
'weight': weight,
'score': (scores[str(unit.unit_id)]
if str(unit.unit_id) in scores else 0),
})
return assessment_score_list
def get_assessment_list(self):
"""Returns a list of units that are assessments."""
# TODO(psimakov): Streamline this so that it does not require a full
# iteration on each request, probably by modifying the index() method.
assessment_list = []
for unit in self.get_units():
if verify.UNIT_TYPE_ASSESSMENT == unit.type:
assessment_list.append(unit)
return copy.deepcopy(assessment_list)
def get_assessment_filename(self, unit_id):
return self._model.get_assessment_filename(unit_id)
def get_activity_filename(self, unit_id, lesson_id):
return self._model.get_activity_filename(unit_id, lesson_id)
def reorder_units(self, order_data):
return self._model.reorder_units(order_data)
def set_assessment_content(self, unit, assessment_content, errors=None):
return self._model.set_assessment_content(
unit, assessment_content, errors)
def set_activity_content(self, lesson, activity_content, errors=None):
return self._model.set_activity_content(
lesson, activity_content, errors)
def is_valid_assessment_id(self, assessment_id):
"""Tests whether the given assessment id is valid."""
for unit in self.get_units():
if (verify.UNIT_TYPE_ASSESSMENT == unit.type and
str(assessment_id) == str(unit.unit_id)):
return True
return False
def is_valid_unit_lesson_id(self, unit_id, lesson_id):
"""Tests whether the given unit id and lesson id are valid."""
for unit in self.get_units():
if str(unit.unit_id) == str(unit_id):
for lesson in self.get_lessons(unit_id):
if str(lesson.lesson_id) == str(lesson_id):
return True
return False
def import_from(self, app_context, errors=None):
"""Import course structure and assets from another courses."""
src_course = Course(None, app_context=app_context)
if errors is None:
errors = []
# Import 1.2 -> 1.3
if (src_course.version == CourseModel12.VERSION and
self.version == CourseModel13.VERSION):
return self._model.import_from(src_course, errors)
# import 1.3 -> 1.3
if (src_course.version == CourseModel13.VERSION and
self.version == CourseModel13.VERSION):
return self._model.import_from(src_course, errors)
errors.append(
'Import of '
'course %s (version %s) into '
'course %s (version %s) '
'is not supported.' % (
app_context.raw, src_course.version,
self.app_context.raw, self.version))
return None, None
def init_new_course_settings(self, title, admin_email):
"""Initializes new course.yaml file if it does not yet exists."""
fs = self.app_context.fs.impl
course_yaml = fs.physical_to_logical('/course.yaml')
if fs.isfile(course_yaml):
return False
title = title.replace('\'', '\'\'')
course_yaml_text = u"""# my new course.yaml
course:
title: '%s'
admin_user_emails: '[%s]'
now_available: False
""" % (title, admin_email)
fs.put(course_yaml, vfs.string_to_stream(course_yaml_text))
return True
| |
"""
Support for Google Play Music Desktop Player.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.gpmdp/
"""
import logging
import json
import socket
import time
import voluptuous as vol
from homeassistant.components.media_player import (
MEDIA_TYPE_MUSIC, SUPPORT_NEXT_TRACK, SUPPORT_PREVIOUS_TRACK,
SUPPORT_PAUSE, SUPPORT_VOLUME_SET, SUPPORT_SEEK, SUPPORT_PLAY,
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.const import (
STATE_PLAYING, STATE_PAUSED, STATE_OFF, CONF_HOST, CONF_PORT, CONF_NAME)
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json, save_json
REQUIREMENTS = ['websocket-client==0.37.0']
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = 'localhost'
DEFAULT_NAME = 'GPM Desktop Player'
DEFAULT_PORT = 5672
GPMDP_CONFIG_FILE = 'gpmpd.conf'
SUPPORT_GPMDP = SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_SEEK | SUPPORT_VOLUME_SET | SUPPORT_PLAY
PLAYBACK_DICT = {'0': STATE_PAUSED, # Stopped
'1': STATE_PAUSED,
'2': STATE_PLAYING}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
def request_configuration(hass, config, url, add_devices_callback):
"""Request configuration steps from the user."""
configurator = hass.components.configurator
if 'gpmdp' in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING['gpmdp'], "Failed to register, please try again.")
return
from websocket import create_connection
websocket = create_connection((url), timeout=1)
websocket.send(json.dumps({'namespace': 'connect',
'method': 'connect',
'arguments': ['Home Assistant']}))
# pylint: disable=unused-argument
def gpmdp_configuration_callback(callback_data):
"""Handle configuration changes."""
while True:
from websocket import _exceptions
try:
msg = json.loads(websocket.recv())
except _exceptions.WebSocketConnectionClosedException:
continue
if msg['channel'] != 'connect':
continue
if msg['payload'] != "CODE_REQUIRED":
continue
pin = callback_data.get('pin')
websocket.send(json.dumps({'namespace': 'connect',
'method': 'connect',
'arguments': ['Home Assistant', pin]}))
tmpmsg = json.loads(websocket.recv())
if tmpmsg['channel'] == 'time':
_LOGGER.error("Error setting up GPMDP. Please pause "
"the desktop player and try again")
break
code = tmpmsg['payload']
if code == 'CODE_REQUIRED':
continue
setup_gpmdp(hass, config, code,
add_devices_callback)
save_json(hass.config.path(GPMDP_CONFIG_FILE), {"CODE": code})
websocket.send(json.dumps({'namespace': 'connect',
'method': 'connect',
'arguments': ['Home Assistant', code]}))
websocket.close()
break
_CONFIGURING['gpmdp'] = configurator.request_config(
DEFAULT_NAME, gpmdp_configuration_callback,
description=(
'Enter the pin that is displayed in the '
'Google Play Music Desktop Player.'),
submit_caption="Submit",
fields=[{'id': 'pin', 'name': 'Pin Code', 'type': 'number'}]
)
def setup_gpmdp(hass, config, code, add_devices):
"""Set up gpmdp."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
url = 'ws://{}:{}'.format(host, port)
if not code:
request_configuration(hass, config, url, add_devices)
return
if 'gpmdp' in _CONFIGURING:
configurator = hass.components.configurator
configurator.request_done(_CONFIGURING.pop('gpmdp'))
add_devices([GPMDP(name, url, code)], True)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the GPMDP platform."""
codeconfig = load_json(hass.config.path(GPMDP_CONFIG_FILE))
if codeconfig:
code = codeconfig.get('CODE')
elif discovery_info is not None:
if 'gpmdp' in _CONFIGURING:
return
code = None
else:
code = None
setup_gpmdp(hass, config, code, add_devices)
class GPMDP(MediaPlayerDevice):
"""Representation of a GPMDP."""
def __init__(self, name, url, code):
"""Initialize the media player."""
from websocket import create_connection
self._connection = create_connection
self._url = url
self._authorization_code = code
self._name = name
self._status = STATE_OFF
self._ws = None
self._title = None
self._artist = None
self._albumart = None
self._seek_position = None
self._duration = None
self._volume = None
self._request_id = 0
def get_ws(self):
"""Check if the websocket is setup and connected."""
if self._ws is None:
try:
self._ws = self._connection((self._url), timeout=1)
msg = json.dumps({'namespace': 'connect',
'method': 'connect',
'arguments': ['Home Assistant',
self._authorization_code]})
self._ws.send(msg)
except (socket.timeout, ConnectionRefusedError,
ConnectionResetError):
self._ws = None
return self._ws
def send_gpmdp_msg(self, namespace, method, with_id=True):
"""Send ws messages to GPMDP and verify request id in response."""
from websocket import _exceptions
try:
websocket = self.get_ws()
if websocket is None:
self._status = STATE_OFF
return
self._request_id += 1
websocket.send(json.dumps({'namespace': namespace,
'method': method,
'requestID': self._request_id}))
if not with_id:
return
while True:
msg = json.loads(websocket.recv())
if 'requestID' in msg:
if msg['requestID'] == self._request_id:
return msg
except (ConnectionRefusedError, ConnectionResetError,
_exceptions.WebSocketTimeoutException,
_exceptions.WebSocketProtocolException,
_exceptions.WebSocketPayloadException,
_exceptions.WebSocketConnectionClosedException):
self._ws = None
def update(self):
"""Get the latest details from the player."""
time.sleep(1)
playstate = self.send_gpmdp_msg('playback', 'getPlaybackState')
if playstate is None:
return
self._status = PLAYBACK_DICT[str(playstate['value'])]
time_data = self.send_gpmdp_msg('playback', 'getCurrentTime')
if time_data is not None:
self._seek_position = int(time_data['value'] / 1000)
track_data = self.send_gpmdp_msg('playback', 'getCurrentTrack')
if track_data is not None:
self._title = track_data['value']['title']
self._artist = track_data['value']['artist']
self._albumart = track_data['value']['albumArt']
self._duration = int(track_data['value']['duration'] / 1000)
volume_data = self.send_gpmdp_msg('volume', 'getVolume')
if volume_data is not None:
self._volume = volume_data['value'] / 100
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def state(self):
"""Return the state of the device."""
return self._status
@property
def media_title(self):
"""Title of current playing media."""
return self._title
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
return self._artist
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._albumart
@property
def media_seek_position(self):
"""Time in seconds of current seek position."""
return self._seek_position
@property
def media_duration(self):
"""Time in seconds of current song duration."""
return self._duration
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_GPMDP
def media_next_track(self):
"""Send media_next command to media player."""
self.send_gpmdp_msg('playback', 'forward', False)
def media_previous_track(self):
"""Send media_previous command to media player."""
self.send_gpmdp_msg('playback', 'rewind', False)
def media_play(self):
"""Send media_play command to media player."""
self.send_gpmdp_msg('playback', 'playPause', False)
self._status = STATE_PLAYING
self.schedule_update_ha_state()
def media_pause(self):
"""Send media_pause command to media player."""
self.send_gpmdp_msg('playback', 'playPause', False)
self._status = STATE_PAUSED
self.schedule_update_ha_state()
def media_seek(self, position):
"""Send media_seek command to media player."""
websocket = self.get_ws()
if websocket is None:
return
websocket.send(json.dumps({'namespace': 'playback',
'method': 'setCurrentTime',
'arguments': [position*1000]}))
self.schedule_update_ha_state()
def volume_up(self):
"""Send volume_up command to media player."""
websocket = self.get_ws()
if websocket is None:
return
websocket.send('{"namespace": "volume", "method": "increaseVolume"}')
self.schedule_update_ha_state()
def volume_down(self):
"""Send volume_down command to media player."""
websocket = self.get_ws()
if websocket is None:
return
websocket.send('{"namespace": "volume", "method": "decreaseVolume"}')
self.schedule_update_ha_state()
def set_volume_level(self, volume):
"""Set volume on media player, range(0..1)."""
websocket = self.get_ws()
if websocket is None:
return
websocket.send(json.dumps({'namespace': 'volume',
'method': 'setVolume',
'arguments': [volume*100]}))
self.schedule_update_ha_state()
| |
import h2.exceptions
import time
import enum
from mitmproxy import connections # noqa
from mitmproxy import exceptions
from mitmproxy import http
from mitmproxy import flow
from mitmproxy.proxy.protocol import base
from mitmproxy.proxy.protocol.websocket import WebSocketLayer
from mitmproxy.net import websockets
class _HttpTransmissionLayer(base.Layer):
def read_request_headers(self, flow):
raise NotImplementedError()
def read_request_body(self, request):
raise NotImplementedError()
def send_request(self, request):
raise NotImplementedError()
def read_response_headers(self):
raise NotImplementedError()
def read_response_body(self, request, response):
raise NotImplementedError()
yield "this is a generator" # pragma: no cover
def read_response(self, request):
response = self.read_response_headers()
response.data.content = b"".join(
self.read_response_body(request, response)
)
return response
def send_response(self, response):
if response.data.content is None:
raise exceptions.HttpException("Cannot assemble flow with missing content")
self.send_response_headers(response)
self.send_response_body(response, [response.data.content])
def send_response_headers(self, response):
raise NotImplementedError()
def send_response_body(self, response, chunks):
raise NotImplementedError()
def check_close_connection(self, f):
raise NotImplementedError()
class ConnectServerConnection:
"""
"Fake" ServerConnection to represent state after a CONNECT request to an upstream proxy.
"""
def __init__(self, address, ctx):
self.address = address
self._ctx = ctx
@property
def via(self):
return self._ctx.server_conn
def __getattr__(self, item):
return getattr(self.via, item)
def connected(self):
return self.via.connected()
class UpstreamConnectLayer(base.Layer):
def __init__(self, ctx, connect_request):
super().__init__(ctx)
self.connect_request = connect_request
self.server_conn = ConnectServerConnection(
(connect_request.host, connect_request.port),
self.ctx
)
def __call__(self):
layer = self.ctx.next_layer(self)
layer()
def _send_connect_request(self):
self.log("Sending CONNECT request", "debug", [
"Proxy Server: {}".format(self.ctx.server_conn.address),
"Connect to: {}:{}".format(self.connect_request.host, self.connect_request.port)
])
self.send_request(self.connect_request)
resp = self.read_response(self.connect_request)
if resp.status_code != 200:
raise exceptions.ProtocolException("Reconnect: Upstream server refuses CONNECT request")
def connect(self):
if not self.server_conn.connected():
self.ctx.connect()
self._send_connect_request()
else:
pass # swallow the message
def change_upstream_proxy_server(self, address):
self.log("Changing upstream proxy to {} (CONNECTed)".format(repr(address)), "debug")
if address != self.server_conn.via.address:
self.ctx.set_server(address)
def set_server(self, address):
if self.ctx.server_conn.connected():
self.ctx.disconnect()
self.connect_request.host = address[0]
self.connect_request.port = address[1]
self.server_conn.address = address
def is_ok(status):
return 200 <= status < 300
class HTTPMode(enum.Enum):
regular = 1
transparent = 2
upstream = 3
# At this point, we see only a subset of the proxy modes
MODE_REQUEST_FORMS = {
HTTPMode.regular: ("authority", "absolute"),
HTTPMode.transparent: ("relative",),
HTTPMode.upstream: ("authority", "absolute"),
}
def validate_request_form(mode, request):
if request.first_line_format == "absolute" and request.scheme != "http":
raise exceptions.HttpException(
"Invalid request scheme: %s" % request.scheme
)
allowed_request_forms = MODE_REQUEST_FORMS[mode]
if request.first_line_format not in allowed_request_forms:
if mode == HTTPMode.transparent:
err_message = (
"Mitmproxy received an {} request even though it is not running in regular mode. "
"This usually indicates a misconfiguration, please see "
"http://docs.mitmproxy.org/en/stable/modes.html for details."
).format("HTTP CONNECT" if request.first_line_format == "authority" else "absolute-form")
else:
err_message = "Invalid HTTP request form (expected: %s, got: %s)" % (
" or ".join(allowed_request_forms), request.first_line_format
)
raise exceptions.HttpException(err_message)
class HttpLayer(base.Layer):
if False:
# mypy type hints
server_conn = None # type: connections.ServerConnection
def __init__(self, ctx, mode):
super().__init__(ctx)
self.mode = mode
self.__initial_server_conn = None
"Contains the original destination in transparent mode, which needs to be restored"
"if an inline script modified the target server for a single http request"
# We cannot rely on server_conn.tls_established,
# see https://github.com/mitmproxy/mitmproxy/issues/925
self.__initial_server_tls = None
# Requests happening after CONNECT do not need Proxy-Authorization headers.
self.connect_request = False
def __call__(self):
if self.mode == HTTPMode.transparent:
self.__initial_server_tls = self.server_tls
self.__initial_server_conn = self.server_conn
while True:
flow = http.HTTPFlow(
self.client_conn,
self.server_conn,
live=self,
mode=self.mode.name
)
if not self._process_flow(flow):
return
def handle_regular_connect(self, f):
self.connect_request = True
try:
self.set_server((f.request.host, f.request.port))
if f.response:
resp = f.response
else:
resp = http.make_connect_response(f.request.data.http_version)
self.send_response(resp)
if is_ok(resp.status_code):
layer = self.ctx.next_layer(self)
layer()
except (
exceptions.ProtocolException, exceptions.NetlibException
) as e:
# HTTPS tasting means that ordinary errors like resolution
# and connection errors can happen here.
self.send_error_response(502, repr(e))
f.error = flow.Error(str(e))
self.channel.ask("error", f)
return False
return False
def handle_upstream_connect(self, f):
self.establish_server_connection(
f.request.host,
f.request.port,
f.request.scheme
)
self.send_request(f.request)
f.response = self.read_response_headers()
f.response.data.content = b"".join(
self.read_response_body(f.request, f.response)
)
self.send_response(f.response)
if is_ok(f.response.status_code):
layer = UpstreamConnectLayer(self, f.request)
return layer()
return False
def _process_flow(self, f):
try:
try:
request = self.read_request_headers(f)
except exceptions.HttpReadDisconnect:
# don't throw an error for disconnects that happen
# before/between requests.
return False
f.request = request
if request.first_line_format == "authority":
# The standards are silent on what we should do with a CONNECT
# request body, so although it's not common, it's allowed.
f.request.data.content = b"".join(
self.read_request_body(f.request)
)
f.request.timestamp_end = time.time()
self.channel.ask("http_connect", f)
if self.mode is HTTPMode.regular:
return self.handle_regular_connect(f)
elif self.mode is HTTPMode.upstream:
return self.handle_upstream_connect(f)
else:
msg = "Unexpected CONNECT request."
self.send_error_response(400, msg)
raise exceptions.ProtocolException(msg)
self.channel.ask("requestheaders", f)
if request.headers.get("expect", "").lower() == "100-continue":
# TODO: We may have to use send_response_headers for HTTP2
# here.
self.send_response(http.expect_continue_response)
request.headers.pop("expect")
request.data.content = b"".join(self.read_request_body(request))
request.timestamp_end = time.time()
validate_request_form(self.mode, request)
except exceptions.HttpException as e:
# We optimistically guess there might be an HTTP client on the
# other end
self.send_error_response(400, repr(e))
raise exceptions.ProtocolException(
"HTTP protocol error in client request: {}".format(e)
)
self.log("request", "debug", [repr(request)])
# set first line format to relative in regular mode,
# see https://github.com/mitmproxy/mitmproxy/issues/1759
if self.mode is HTTPMode.regular and request.first_line_format == "absolute":
request.first_line_format = "relative"
# update host header in reverse proxy mode
if self.config.options.mode == "reverse":
f.request.host_header = self.config.upstream_server.address[0]
# Determine .scheme, .host and .port attributes for inline scripts. For
# absolute-form requests, they are directly given in the request. For
# authority-form requests, we only need to determine the request
# scheme. For relative-form requests, we need to determine host and
# port as well.
if self.mode is HTTPMode.transparent:
# Setting request.host also updates the host header, which we want
# to preserve
host_header = f.request.host_header
f.request.host = self.__initial_server_conn.address[0]
f.request.port = self.__initial_server_conn.address[1]
f.request.host_header = host_header # set again as .host overwrites this.
f.request.scheme = "https" if self.__initial_server_tls else "http"
self.channel.ask("request", f)
try:
if websockets.check_handshake(request.headers) and websockets.check_client_version(request.headers):
# We only support RFC6455 with WebSocket version 13
# allow inline scripts to manipulate the client handshake
self.channel.ask("websocket_handshake", f)
if not f.response:
self.establish_server_connection(
f.request.host,
f.request.port,
f.request.scheme
)
def get_response():
self.send_request(f.request)
f.response = self.read_response_headers()
try:
get_response()
except exceptions.NetlibException as e:
self.log(
"server communication error: %s" % repr(e),
level="debug"
)
# In any case, we try to reconnect at least once. This is
# necessary because it might be possible that we already
# initiated an upstream connection after clientconnect that
# has already been expired, e.g consider the following event
# log:
# > clientconnect (transparent mode destination known)
# > serverconnect (required for client tls handshake)
# > read n% of large request
# > server detects timeout, disconnects
# > read (100-n)% of large request
# > send large request upstream
if isinstance(e, exceptions.Http2ProtocolException):
# do not try to reconnect for HTTP2
raise exceptions.ProtocolException(
"First and only attempt to get response via HTTP2 failed."
)
self.disconnect()
self.connect()
get_response()
# call the appropriate script hook - this is an opportunity for
# an inline script to set f.stream = True
self.channel.ask("responseheaders", f)
if f.response.stream:
f.response.data.content = None
else:
f.response.data.content = b"".join(
self.read_response_body(f.request, f.response)
)
f.response.timestamp_end = time.time()
# no further manipulation of self.server_conn beyond this point
# we can safely set it as the final attribute value here.
f.server_conn = self.server_conn
else:
# response was set by an inline script.
# we now need to emulate the responseheaders hook.
self.channel.ask("responseheaders", f)
self.log("response", "debug", [repr(f.response)])
self.channel.ask("response", f)
if not f.response.stream:
# no streaming:
# we already received the full response from the server and can
# send it to the client straight away.
self.send_response(f.response)
else:
# streaming:
# First send the headers and then transfer the response incrementally
self.send_response_headers(f.response)
chunks = self.read_response_body(
f.request,
f.response
)
if callable(f.response.stream):
chunks = f.response.stream(chunks)
self.send_response_body(f.response, chunks)
f.response.timestamp_end = time.time()
if self.check_close_connection(f):
return False
# Handle 101 Switching Protocols
if f.response.status_code == 101:
# Handle a successful HTTP 101 Switching Protocols Response,
# received after e.g. a WebSocket upgrade request.
# Check for WebSocket handshake
is_websocket = (
websockets.check_handshake(f.request.headers) and
websockets.check_handshake(f.response.headers)
)
if is_websocket and not self.config.options.websocket:
self.log(
"Client requested WebSocket connection, but the protocol is disabled.",
"info"
)
if is_websocket and self.config.options.websocket:
layer = WebSocketLayer(self, f)
else:
layer = self.ctx.next_layer(self)
layer()
return False # should never be reached
except (exceptions.ProtocolException, exceptions.NetlibException) as e:
self.send_error_response(502, repr(e))
if not f.response:
f.error = flow.Error(str(e))
self.channel.ask("error", f)
return False
else:
raise exceptions.ProtocolException(
"Error in HTTP connection: %s" % repr(e)
)
finally:
if f:
f.live = False
return True
def send_error_response(self, code, message, headers=None) -> None:
try:
response = http.make_error_response(code, message, headers)
self.send_response(response)
except (exceptions.NetlibException, h2.exceptions.H2Error, exceptions.Http2ProtocolException):
self.log("Failed to send error response to client: {}".format(message), "debug")
def change_upstream_proxy_server(self, address):
# Make set_upstream_proxy_server always available,
# even if there's no UpstreamConnectLayer
if hasattr(self.ctx, "change_upstream_proxy_server"):
self.ctx.change_upstream_proxy_server(address)
elif address != self.server_conn.address:
self.log("Changing upstream proxy to {} (not CONNECTed)".format(repr(address)), "debug")
self.set_server(address)
def establish_server_connection(self, host: str, port: int, scheme: str):
tls = (scheme == "https")
if self.mode is HTTPMode.regular or self.mode is HTTPMode.transparent:
# If there's an existing connection that doesn't match our expectations, kill it.
address = (host, port)
if address != self.server_conn.address or tls != self.server_tls:
self.set_server(address)
self.set_server_tls(tls, address[0])
# Establish connection is neccessary.
if not self.server_conn.connected():
self.connect()
else:
if not self.server_conn.connected():
self.connect()
if tls:
raise exceptions.HttpProtocolException("Cannot change scheme in upstream proxy mode.")
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Deterministic distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.util import deprecation
__all__ = [
"Deterministic",
"VectorDeterministic",
]
@six.add_metaclass(abc.ABCMeta)
class _BaseDeterministic(distribution.Distribution):
"""Base class for Deterministic distributions."""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
atol=None,
rtol=None,
is_vector=False,
validate_args=False,
allow_nan_stats=True,
name="_BaseDeterministic"):
"""Initialize a batch of `_BaseDeterministic` distributions.
The `atol` and `rtol` parameters allow for some slack in `pmf`, `cdf`
computations, e.g. due to floating-point error.
```
pmf(x; loc)
= 1, if Abs(x - loc) <= atol + rtol * Abs(loc),
= 0, otherwise.
```
Args:
loc: Numeric `Tensor`. The point (or batch of points) on which this
distribution is supported.
atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The absolute tolerance for comparing closeness to `loc`.
Default is `0`.
rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The relative tolerance for comparing closeness to `loc`.
Default is `0`.
is_vector: Python `bool`. If `True`, this is for `VectorDeterministic`,
else `Deterministic`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: If `loc` is a scalar.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, atol, rtol]) as name:
loc = ops.convert_to_tensor(loc, name="loc")
if is_vector and validate_args:
msg = "Argument loc must be at least rank 1."
if loc.get_shape().ndims is not None:
if loc.get_shape().ndims < 1:
raise ValueError(msg)
else:
loc = control_flow_ops.with_dependencies(
[check_ops.assert_rank_at_least(loc, 1, message=msg)], loc)
self._loc = loc
super(_BaseDeterministic, self).__init__(
dtype=self._loc.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc],
name=name)
self._atol = self._get_tol(atol)
self._rtol = self._get_tol(rtol)
# Avoid using the large broadcast with self.loc if possible.
if rtol is None:
self._slack = self.atol
else:
self._slack = self.atol + self.rtol * math_ops.abs(self.loc)
def _get_tol(self, tol):
if tol is None:
return ops.convert_to_tensor(0, dtype=self.loc.dtype)
tol = ops.convert_to_tensor(tol, dtype=self.loc.dtype)
if self.validate_args:
tol = control_flow_ops.with_dependencies([
check_ops.assert_non_negative(
tol, message="Argument 'tol' must be non-negative")
], tol)
return tol
@property
def loc(self):
"""Point (or batch of points) at which this distribution is supported."""
return self._loc
@property
def atol(self):
"""Absolute tolerance for comparing points to `self.loc`."""
return self._atol
@property
def rtol(self):
"""Relative tolerance for comparing points to `self.loc`."""
return self._rtol
def _mean(self):
return array_ops.identity(self.loc)
def _variance(self):
return array_ops.zeros_like(self.loc)
def _mode(self):
return self.mean()
def _sample_n(self, n, seed=None): # pylint: disable=unused-arg
n_static = tensor_util.constant_value(ops.convert_to_tensor(n))
if n_static is not None and self.loc.get_shape().ndims is not None:
ones = [1] * self.loc.get_shape().ndims
multiples = [n_static] + ones
else:
ones = array_ops.ones_like(array_ops.shape(self.loc))
multiples = array_ops.concat(([n], ones), axis=0)
return array_ops.tile(self.loc[array_ops.newaxis, ...], multiples=multiples)
class Deterministic(_BaseDeterministic):
"""Scalar `Deterministic` distribution on the real line.
The scalar `Deterministic` distribution is parameterized by a [batch] point
`loc` on the real line. The distribution is supported at this point only,
and corresponds to a random variable that is constant, equal to `loc`.
See [Degenerate rv](https://en.wikipedia.org/wiki/Degenerate_distribution).
#### Mathematical Details
The probability mass function (pmf) and cumulative distribution function (cdf)
are
```none
pmf(x; loc) = 1, if x == loc, else 0
cdf(x; loc) = 1, if x >= loc, else 0
```
#### Examples
```python
# Initialize a single Deterministic supported at zero.
constant = tf.contrib.distributions.Deterministic(0.)
constant.prob(0.)
==> 1.
constant.prob(2.)
==> 0.
# Initialize a [2, 2] batch of scalar constants.
loc = [[0., 1.], [2., 3.]]
x = [[0., 1.1], [1.99, 3.]]
constant = tf.contrib.distributions.Deterministic(loc)
constant.prob(x)
==> [[1., 0.], [0., 1.]]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
atol=None,
rtol=None,
validate_args=False,
allow_nan_stats=True,
name="Deterministic"):
"""Initialize a scalar `Deterministic` distribution.
The `atol` and `rtol` parameters allow for some slack in `pmf`, `cdf`
computations, e.g. due to floating-point error.
```
pmf(x; loc)
= 1, if Abs(x - loc) <= atol + rtol * Abs(loc),
= 0, otherwise.
```
Args:
loc: Numeric `Tensor` of shape `[B1, ..., Bb]`, with `b >= 0`.
The point (or batch of points) on which this distribution is supported.
atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The absolute tolerance for comparing closeness to `loc`.
Default is `0`.
rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The relative tolerance for comparing closeness to `loc`.
Default is `0`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
super(Deterministic, self).__init__(
loc,
atol=atol,
rtol=rtol,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
def _batch_shape_tensor(self):
return array_ops.shape(self.loc)
def _batch_shape(self):
return self.loc.get_shape()
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _prob(self, x):
return math_ops.cast(
math_ops.abs(x - self.loc) <= self._slack, dtype=self.dtype)
def _cdf(self, x):
return math_ops.cast(x >= self.loc - self._slack, dtype=self.dtype)
class VectorDeterministic(_BaseDeterministic):
"""Vector `Deterministic` distribution on `R^k`.
The `VectorDeterministic` distribution is parameterized by a [batch] point
`loc in R^k`. The distribution is supported at this point only,
and corresponds to a random variable that is constant, equal to `loc`.
See [Degenerate rv](https://en.wikipedia.org/wiki/Degenerate_distribution).
#### Mathematical Details
The probability mass function (pmf) is
```none
pmf(x; loc)
= 1, if All[Abs(x - loc) <= atol + rtol * Abs(loc)],
= 0, otherwise.
```
#### Examples
```python
tfd = tf.contrib.distributions
# Initialize a single VectorDeterministic supported at [0., 2.] in R^2.
constant = tfd.Deterministic([0., 2.])
constant.prob([0., 2.])
==> 1.
constant.prob([0., 3.])
==> 0.
# Initialize a [3] batch of constants on R^2.
loc = [[0., 1.], [2., 3.], [4., 5.]]
constant = tfd.VectorDeterministic(loc)
constant.prob([[0., 1.], [1.9, 3.], [3.99, 5.]])
==> [1., 0., 0.]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
atol=None,
rtol=None,
validate_args=False,
allow_nan_stats=True,
name="VectorDeterministic"):
"""Initialize a `VectorDeterministic` distribution on `R^k`, for `k >= 0`.
Note that there is only one point in `R^0`, the "point" `[]`. So if `k = 0`
then `self.prob([]) == 1`.
The `atol` and `rtol` parameters allow for some slack in `pmf`
computations, e.g. due to floating-point error.
```
pmf(x; loc)
= 1, if All[Abs(x - loc) <= atol + rtol * Abs(loc)],
= 0, otherwise
```
Args:
loc: Numeric `Tensor` of shape `[B1, ..., Bb, k]`, with `b >= 0`, `k >= 0`
The point (or batch of points) on which this distribution is supported.
atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The absolute tolerance for comparing closeness to `loc`.
Default is `0`.
rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The relative tolerance for comparing closeness to `loc`.
Default is `0`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
super(VectorDeterministic, self).__init__(
loc,
atol=atol,
rtol=rtol,
is_vector=True,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
def _batch_shape_tensor(self):
return array_ops.shape(self.loc)[:-1]
def _batch_shape(self):
return self.loc.get_shape()[:-1]
def _event_shape_tensor(self):
return array_ops.shape(self.loc)[-1]
def _event_shape(self):
return self.loc.get_shape()[-1:]
def _prob(self, x):
if self.validate_args:
is_vector_check = check_ops.assert_rank_at_least(x, 1)
right_vec_space_check = check_ops.assert_equal(
self.event_shape_tensor(),
array_ops.gather(array_ops.shape(x), array_ops.rank(x) - 1),
message=
"Argument 'x' not defined in the same space R^k as this distribution")
with ops.control_dependencies([is_vector_check]):
with ops.control_dependencies([right_vec_space_check]):
x = array_ops.identity(x)
return math_ops.cast(
math_ops.reduce_all(math_ops.abs(x - self.loc) <= self._slack, axis=-1),
dtype=self.dtype)
| |
import sys
from unittest import TestCase
import unittest
import os
from importlib import reload
from mock import Mock, call
from mockextras import stub
sys.path = [os.path.abspath(os.path.join('..', os.pardir))] + sys.path
from digesters.digestion_processor import DigestionProcessor
from digesters.linkedin.linkedin_invitation_digester import LinkedinInvitationDigester
MAIL_HDR = """From: "Linkedin" <ph@example.com>
Content-Transfer-Encoding: 8bit
Content-Type: multipart/alternative; boundary="---NOTIFICATION_BOUNDARY-5678"
MIME-Version: 1.0
This is a multi-part message in MIME format.
-----NOTIFICATION_BOUNDARY-5678
Content-Type: text/html; charset="utf-8"
Content-Transfer-Encoding: 8bit
"""
class InvitationsStore(object):
def __init__(self, cls=object):
self._cls = cls
self.invitations = None
def __eq__(self, other):
self.invitations = other
return True
def __ne__(self, other):
return False
def __repr__(self):
return "InvitationsStore(..)"
class TestLinkedinInvitations(TestCase):
def __init__(self, methodName='runTest'):
super(TestLinkedinInvitations, self).__init__(methodName)
reload(sys)
# sys.setdefaultencoding('utf8')
def test_two_related_invitations_can_be_rolled_up(self):
expected_payload = """<html><body><span>You have previously read invitations up to: Apr 01 2016 06:13 PM</span>
<table>
<tr style="background-color: #acf;">
<th colspan="2">Invitations</th>
</tr>
<tr style="">
<td><img style="max-width:100px;height:auto" src="https://upload.wikimedia.org/wikipedia/commons/8/85/Border_collie.jpg"/></td>
<td>
<strong>Michael McAuliffe</strong><br>
Hi Paul,<br/>
I\'d like to join your LinkedIn network.<br/>
Michael McAuliffe<br/>
Managing Director<br/>
View profile: https://www.linkedin.com/comm/profile/view?id=AAsAAAJUVTYBWkKAZwppmyYBjwgm1AI0nKRyTwA&authType=name&authToken=eAsV&invAcpt=2197625_I6132985453227360256_500&midToken=AQHQ1w5V4ws4wA&trk=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta&trkEmail=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta-null-1b3p5%7Einqenmmy%7Eir<br/>
<br>
<a href="https://www.linkedin.com/comm/people/invite-accept?mboxid=I6132985453227360256_500&sharedKey=48j6iM8P&fr=false&invitationId=6132985418842456064&fe=true&midToken=AQHQ1w5V4ws4wA&trk=eml-email_m2m_invite_single_01-hero-0-accept%7Ecta&trkEmail=eml-email_m2m_invite_single_01-hero-0-accept%7Ecta-null-1b3p5%7Einqenmmy%7Eir">Accept Invitation</a>
<a href="https://www.linkedin.com/comm/profile/view?id=AAsAAAJUVTYBWkKAZwppmyYBjwgm1AI0nKRyTwA&authType=name&authToken=eAsV&invAcpt=2197625_I6132985453227360256_500&midToken=AQHQ1w5V4ws4wA&trk=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta&trkEmail=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta-null-1b3p5%7Einqenmmy%7Eir">View Profile</a>
</td>
</tr> <tr style="background-color: #def;">
<td><img style="max-width:100px;height:auto" src="https://upload.wikimedia.org/wikipedia/commons/8/85/Border_collie.jpg"/></td>
<td>
<strong>Foo Bar</strong><br>
Hi Paul,<br/>
I\'d like to join your LinkedIn network.<br/>
FOO BAR<br/>
Vice President<br/>
View profile: https://www.linkedin.com/comm/profile/view?id=AAsAAAFlvJcBCnnIlLvQhDO6ZBU5rdb7fAb_-IU&authType=name&authToken=95up&invAcpt=2197625_I6132926076281774083_500&midToken=AQHQ1w5V4ws4wA&trk=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta&trkEmail=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta-null-1b3p5%7Einq68641%7E1h<br/>
<br>
<a href="https://www.linkedin.com/comm/people/invite-accept?mboxid=I6132926076281774083_500&sharedKey=w447gWge&fr=false&invitationId=6132926046288310272&fe=true&midToken=AQHQ1w5V4ws4wA&trk=eml-email_m2m_invite_single_01-hero-0-accept%7Ecta&trkEmail=eml-email_m2m_invite_single_01-hero-0-accept%7Ecta-null-1b3p5%7Einq68641%7E1h">Accept Invitation</a>
<a href="https://www.linkedin.com/comm/profile/view?id=AAsAAAFlvJcBCnnIlLvQhDO6ZBU5rdb7fAb_-IU&authType=name&authToken=95up&invAcpt=2197625_I6132926076281774083_500&midToken=AQHQ1w5V4ws4wA&trk=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta&trkEmail=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta-null-1b3p5%7Einq68641%7E1h">View Profile</a>
</td>
</tr> <tr><td colspan="2" style="border-bottom: 1pt solid red; border-top: 1pt solid red;"><center>^ New Invitations Since You Last Checked ^</center></td></tr> <tr style="">
<td><img style="max-width:100px;height:auto" src="https://upload.wikimedia.org/wikipedia/commons/8/85/Border_collie.jpg"/></td>
<td>
<strong>Aaaa Bbbb</strong><br>
Hi Paul,<br/>
I\'d like to join your LinkedIn network.<br/>
Aaaa Bbbb<br/>
Managing Director<br/>
View profile: https://www.linkedin.com/comm/profile/view?id=AAsAAAJUVTYBWkKAZwppmyYBjwgm1AI0nKRyTwA&authType=name&authToken=eAsV&invAcpt=2197625_I6132985453227360256_500&midToken=AQHQ1w5V4ws4wA&trk=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta&trkEmail=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta-null-1b3p5%7Einqenmmy%7Eir<br/>
<br>
<a href="https://www.linkedin.com/comm/people/invite-accept?mboxid=I6132985453227360256_500&sharedKey=48j6iM8P&fr=false&invitationId=6132985418842456064&fe=true&midToken=AQHQ1w5V4ws4wA&trk=eml-email_m2m_invite_single_01-hero-0-accept%7Ecta&trkEmail=eml-email_m2m_invite_single_01-hero-0-accept%7Ecta-null-1b3p5%7Einqenmmy%7Eir">Accept Invitation</a>
<a href="https://www.linkedin.com/comm/profile/view?id=AAsAAAJUVTYBWkKAZwppmyYBjwgm1AI0nKRyTwA&authType=name&authToken=eAsV&invAcpt=2197625_I6132985453227360256_500&midToken=AQHQ1w5V4ws4wA&trk=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta&trkEmail=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta-null-1b3p5%7Einqenmmy%7Eir">View Profile</a>
</td>
</tr>
</table></body></html>"""
notification_store = {}
final_invitations_store = InvitationsStore()
store_writer = Mock()
store_writer.get_from_binary.side_effect = stub(
(call('linkedin-invitations'), notification_store),
(call('most-recently-seen'), 1459548811)
)
store_writer.store_as_binary.side_effect = stub(
(call('linkedin-invitations', final_invitations_store), True),
(call('most-recently-seen', 1459548811), True)
)
expected_message = ("Subject: Invitation Digest: 1 new invitation(s)\n" + MAIL_HDR + expected_payload + \
"\n\n-----NOTIFICATION_BOUNDARY-5678")
digest_inbox_proxy = Mock()
digest_inbox_proxy.delete_previous_message.side_effect = stub((call(), True))
digest_inbox_proxy.append.side_effect = stub((call(expected_message), True))
digesters = []
digester = LinkedinInvitationDigester(store_writer) ## What we are testing
digester.notification_boundary_rand = "-5678" # no random number for the email's notification boundary
digesters.append(digester)
digestion_processor = DigestionProcessor(None, None, digesters, False, "ph@example.com", False, "INBOX")
unmatched_to_move = []
to_delete_from_notification_folder = []
notification_2_content = INCOMING_1.replace("Michael McAuliffe", "Aaaa Bbbb").replace("2 May 2016", "1 May 2016")
digestion_processor.process_incoming_notification(1234, digesters, INCOMING_1, to_delete_from_notification_folder, unmatched_to_move, False)
digestion_processor.process_incoming_notification(1235, digesters, notification_2_content, to_delete_from_notification_folder, unmatched_to_move, False)
digestion_processor.process_incoming_notification(1236, digesters, FOOBAR, to_delete_from_notification_folder, unmatched_to_move, False)
digester.rewrite_digest_emails(digest_inbox_proxy, has_previous_message=True,
previously_seen=False, sender_to_implicate="ph@example.com")
self.assertEqual(digest_inbox_proxy.mock_calls, [call.delete_previous_message(), call.append(expected_message)])
calls = store_writer.mock_calls
self.assertEqual(calls, [
call.get_from_binary('linkedin-invitations'),
call.get_from_binary('most-recently-seen'),
call.store_as_binary('linkedin-invitations',
{1462203530: {
'spiel': "Hi Paul,\nI'd like to join your LinkedIn network.\nFOO BAR\nVice President\nView profile: https://www.linkedin.com/comm/profile/view?id=AAsAAAFlvJcBCnnIlLvQhDO6ZBU5rdb7fAb_-IU&authType=name&authToken=95up&invAcpt=2197625_I6132926076281774083_500&midToken=AQHQ1w5V4ws4wA&trk=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta&trkEmail=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta-null-1b3p5%7Einq68641%7E1h\n",
'accept_url': 'https://www.linkedin.com/comm/people/invite-accept?mboxid=I6132926076281774083_500&sharedKey=w447gWge&fr=false&invitationId=6132926046288310272&fe=true&midToken=AQHQ1w5V4ws4wA&trk=eml-email_m2m_invite_single_01-hero-0-accept%7Ecta&trkEmail=eml-email_m2m_invite_single_01-hero-0-accept%7Ecta-null-1b3p5%7Einq68641%7E1h',
'who': 'Foo Bar',
'img_src': 'https://upload.wikimedia.org/wikipedia/commons/8/85/Border_collie.jpg',
'profile_url': 'https://www.linkedin.com/comm/profile/view?id=AAsAAAFlvJcBCnnIlLvQhDO6ZBU5rdb7fAb_-IU&authType=name&authToken=95up&invAcpt=2197625_I6132926076281774083_500&midToken=AQHQ1w5V4ws4wA&trk=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta&trkEmail=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta-null-1b3p5%7Einq68641%7E1h'},
1462131286: {
'accept_url': 'https://www.linkedin.com/comm/people/invite-accept?mboxid=I6132985453227360256_500&sharedKey=48j6iM8P&fr=false&invitationId=6132985418842456064&fe=true&midToken=AQHQ1w5V4ws4wA&trk=eml-email_m2m_invite_single_01-hero-0-accept%7Ecta&trkEmail=eml-email_m2m_invite_single_01-hero-0-accept%7Ecta-null-1b3p5%7Einqenmmy%7Eir',
'line_here': True, 'who': 'Aaaa Bbbb',
'spiel': "Hi Paul,\nI'd like to join your LinkedIn network.\nAaaa Bbbb\nManaging Director\nView profile: https://www.linkedin.com/comm/profile/view?id=AAsAAAJUVTYBWkKAZwppmyYBjwgm1AI0nKRyTwA&authType=name&authToken=eAsV&invAcpt=2197625_I6132985453227360256_500&midToken=AQHQ1w5V4ws4wA&trk=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta&trkEmail=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta-null-1b3p5%7Einqenmmy%7Eir\n",
'img_src': 'https://upload.wikimedia.org/wikipedia/commons/8/85/Border_collie.jpg',
'profile_url': 'https://www.linkedin.com/comm/profile/view?id=AAsAAAJUVTYBWkKAZwppmyYBjwgm1AI0nKRyTwA&authType=name&authToken=eAsV&invAcpt=2197625_I6132985453227360256_500&midToken=AQHQ1w5V4ws4wA&trk=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta&trkEmail=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta-null-1b3p5%7Einqenmmy%7Eir'},
1462217686: {
'spiel': "Hi Paul,\nI'd like to join your LinkedIn network.\nMichael McAuliffe\nManaging Director\nView profile: https://www.linkedin.com/comm/profile/view?id=AAsAAAJUVTYBWkKAZwppmyYBjwgm1AI0nKRyTwA&authType=name&authToken=eAsV&invAcpt=2197625_I6132985453227360256_500&midToken=AQHQ1w5V4ws4wA&trk=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta&trkEmail=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta-null-1b3p5%7Einqenmmy%7Eir\n",
'accept_url': 'https://www.linkedin.com/comm/people/invite-accept?mboxid=I6132985453227360256_500&sharedKey=48j6iM8P&fr=false&invitationId=6132985418842456064&fe=true&midToken=AQHQ1w5V4ws4wA&trk=eml-email_m2m_invite_single_01-hero-0-accept%7Ecta&trkEmail=eml-email_m2m_invite_single_01-hero-0-accept%7Ecta-null-1b3p5%7Einqenmmy%7Eir',
'who': 'Michael McAuliffe',
'img_src': 'https://upload.wikimedia.org/wikipedia/commons/8/85/Border_collie.jpg',
'profile_url': 'https://www.linkedin.com/comm/profile/view?id=AAsAAAJUVTYBWkKAZwppmyYBjwgm1AI0nKRyTwA&authType=name&authToken=eAsV&invAcpt=2197625_I6132985453227360256_500&midToken=AQHQ1w5V4ws4wA&trk=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta&trkEmail=eml-email_m2m_invite_single_01-hero-3-prof%7Ecta-null-1b3p5%7Einqenmmy%7Eir'}}
),
call.store_as_binary('most-recently-seen', 1459548811)])
self.assertEqual(len(unmatched_to_move), 0)
self.assertEqual(str(to_delete_from_notification_folder), "[1234, 1235, 1236]")
self.assertEqual(len(final_invitations_store.invitations), 3)
INCOMING_1 = """From: Michael McAuliffe <invitations@linkedin.com>
Message-ID: <543052688.1354069.1462217686519.JavaMail.app@lva1-app3333.prod.linkedin.com>
Subject: Paul, please add me to your LinkedIn network
Content-Type: multipart/alternative;
boundary="----=_Part_1354067_596425972.1462217686511"
To: Paul Hammant <Paul@Hammant.org>
Date: Mon, 2 May 2016 19:34:46 +0000 (UTC)
MIME-Version: 1.0
------=_Part_1354067_596425972.1462217686511
Content-Type: text/plain;charset=UTF-8
Content-Transfer-Encoding: quoted-printable
Content-ID: text-body
Hi Paul,
I'd like to join your LinkedIn network.
Michael McAuliffe
Managing Director
View profile: https://www.linkedin.com/comm/profile/view?id=3DAAsAAAJUVTYBW=
kKAZwppmyYBjwgm1AI0nKRyTwA&authType=3Dname&authToken=3DeAsV&invAcpt=3D21976=
25_I6132985453227360256_500&midToken=3DAQHQ1w5V4ws4wA&trk=3Deml-email_m2m_i=
nvite_single_01-hero-3-prof%7Ecta&trkEmail=3Deml-email_m2m_invite_single_01=
-hero-3-prof%7Ecta-null-1b3p5%7Einqenmmy%7Eir
Accept: https://www.linkedin.com/comm/people/invite-accept?mboxid=3DI613298=
5453227360256_500&sharedKey=3D48j6iM8P&fr=3Dfalse&invitationId=3D6132985418=
842456064&fe=3Dtrue&midToken=3DAQHQ1w5V4ws4wA&trk=3Deml-email_m2m_invite_si=
ngle_01-hero-0-accept%7Ecta&trkEmail=3Deml-email_m2m_invite_single_01-hero-=
0-accept%7Ecta-null-1b3p5%7Einqenmmy%7Eir
.....................................
Unsubscribe: https://www.linkedin.com/e/v2?e=3D1b3p5-inqenmmy-ir&t=3Dlun&mi=
dToken=3DAQHQ1w5V4ws4wA&ek=3Demail_m2m_invite_single_01&li=3D10&m=3Dunsub&t=
s=3Dunsub&loid=3DAQEYGYigaB9xLgAAAVRy9zzAiKA6TSjt5qtue7m6n7C7x-67Bc2z6DzP-H=
IHX3HVoujjV4VE5dFOCqIvFw&eid=3D1b3p5-inqenmmy-ir
Help: https://www.linkedin.com/e/v2?e=3D1b3p5-inqenmmy-ir&a=3DcustomerServi=
ceUrl&midToken=3DAQHQ1w5V4ws4wA&ek=3Demail_m2m_invite_single_01&li=3D9&m=3D=
footer&ts=3Dhelp&articleId=3D67
You are receiving Invitation emails.
This email was intended for Paul Hammant (Senior Director of Engineering at=
HedgeServ).
Learn why we included this: https://www.linkedin.com/e/v2?e=3D1b3p5-inqenmm=
y-ir&a=3DcustomerServiceUrl&midToken=3DAQHQ1w5V4ws4wA&ek=3Demail_m2m_invite=
_single_01&articleId=3D4788
=C2=A9 2016 LinkedIn Corporation, 2029 Stierlin Court, Mountain View CA 940=
43. LinkedIn and the LinkedIn logo are registered trademarks of LinkedIn.
------=_Part_1354067_596425972.1462217686511
Content-Type: text/html;charset=UTF-8
Content-Transfer-Encoding: quoted-printable
Content-ID: html-body
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.=
w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"><html xmlns=3D"http://www.w3.=
org/1999/xhtml" lang=3D"en" xml:lang=3D"en"><head>
<meta http-equiv=3D"Content-Type" content=3D"text/html; charset=3Dutf-8"> <=
meta name=3D"HandheldFriendly" content=3D"true"> <meta name=3D"viewport" co=
ntent=3D"width=3Ddevice-width; initial-scale=3D0.666667; maximum-scale=3D0.=
666667; user-scalable=3D0"> <meta name=3D"viewport" content=3D"width=3Ddevi=
ce-width"> <title></title> <!--[if mso]><style type=3D"text/css">body {font=
-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;}.phoenix-email-con=
tainer {width: 512px !important;}</style><![endif]--> <!--[if IE]><style ty=
pe=3D"text/css">.phoenix-email-container {width: 512px !important;}</style>=
<![endif]--> <style type=3D"text/css">@media only screen and (max-width:32e=
m) { .phoenix-email-container { width:100% !important; } } @media only scre=
en and (max-width:20em) {} @media only screen and (max-device-width:30em) {=
} @media screen and (device-width:30em) and (device-height:22.5em), screen =
and (device-width:22.5em) and (device-height:30em), screen and (device-widt=
h:20em) and (device-height:15em) {} @media screen and (-webkit-min-device-p=
ixel-ratio:0) {} @media screen and (max-device-width:25.88em) and (max-devi=
ce-height:48.5em) {} </style> </head> <body style=3D"padding:0;margin:0 aut=
o;-webkit-text-size-adjust:100%;width:100% !important;-ms-text-size-adjust:=
100%;font-family:'Helvetica Neue',Helvetica,Arial,sans-serif;"> <div style=
=3D"overflow:hidden;color:transparent;visibility:hidden;mso-hide:all;width:=
0;font-size:0;opacity:0;height:0;"> Hi Paul, I'd like to join your LinkedIn=
network. </div> <table align=3D"center" border=3D"0" cellspacing=3D"0" cel=
lpadding=3D"0" width=3D"100%" bgcolor=3D"#EDF0F3" style=3D"background-color=
:#EDF0F3;table-layout:fixed;-webkit-text-size-adjust:100%;mso-table-rspace:=
0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;"> <tbody> <tr> <td alig=
n=3D"center" style=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;ms=
o-table-lspace:0pt;-ms-text-size-adjust:100%;"> <center style=3D"width:100%=
;"> <table border=3D"0" class=3D"phoenix-email-container" cellspacing=3D"0"=
cellpadding=3D"0" width=3D"512" bgcolor=3D"#FFFFFF" style=3D"background-co=
lor:#FFFFFF;margin:0 auto;max-width:512px;-webkit-text-size-adjust:100%;mso=
-table-rspace:0pt;width:inherit;mso-table-lspace:0pt;-ms-text-size-adjust:1=
00%;"> <tbody> <tr> <td bgcolor=3D"#F6F8FA" style=3D"background-color:#F6F8=
FA;padding:12px;-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-tabl=
e-lspace:0pt;-ms-text-size-adjust:100%;border-bottom:1px solid #ECECEC;"> <=
table border=3D"0" cellspacing=3D"0" cellpadding=3D"0" width=3D"100%" style=
=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;width:100% !importan=
t;mso-table-lspace:0pt;-ms-text-size-adjust:100%;min-width:100% !important;=
"> <tbody> <tr> <td align=3D"left" valign=3D"middle" style=3D"-webkit-text-=
size-adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-size-ad=
just:100%;"><a href=3D"https://www.linkedin.com/comm/nhome/?midToken=3DAQHQ=
1w5V4ws4wA&trk=3Deml-email_m2m_invite_single_01-header-4-home&trkEm=
ail=3Deml-email_m2m_invite_single_01-header-4-home-null-1b3p5%7Einqenmmy%7E=
ir" style=3D"cursor:pointer;color:#008CC9;-webkit-text-size-adjust:100%;dis=
play:inline-block;text-decoration:none;-ms-text-size-adjust:100%;"> <img al=
t=3D"LinkedIn" border=3D"0" src=3D"https://static.licdn.com/scds/common/u/i=
mages/email/phoenix/logos/logo_phoenix_header_blue_78x66_v1.png" height=3D"=
34" width=3D"40" style=3D"outline:none;-ms-interpolation-mode:bicubic;color=
:#FFFFFF;text-decoration:none;"></a></td> <td valign=3D"middle" width=3D"10=
0%" align=3D"right" style=3D"padding:0 0 0 10px;-webkit-text-size-adjust:10=
0%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;"><a=
href=3D"https://www.linkedin.com/comm/profile/view?id=3DAAsAAAAhiHkB2Xl5Qq=
Gw01CP-K2o5AvAA-e9my0&midToken=3DAQHQ1w5V4ws4wA&trk=3Deml-email_m2m=
_invite_single_01-header-6-profile&trkEmail=3Deml-email_m2m_invite_sing=
le_01-header-6-profile-null-1b3p5%7Einqenmmy%7Eir" style=3D"cursor:pointer;=
margin:0;color:#008CC9;-webkit-text-size-adjust:100%;display:inline-block;t=
ext-decoration:none;-ms-text-size-adjust:100%;"> <span style=3D"word-wrap:b=
reak-word;color:#4C4C4C;word-break:break-word;font-weight:400;-ms-word-brea=
k:break-all;font-size:14px;line-height:1.429;overflow-wrap:break-word;">Pau=
l Hammant</span></a></td> <td valign=3D"middle" width=3D"40" style=3D"-webk=
it-text-size-adjust:100%;mso-table-rspace:0pt;padding-left:10px;mso-table-l=
space:0pt;-ms-text-size-adjust:100%;"> <a href=3D"https://www.linkedin.com/=
comm/profile/view?id=3DAAsAAAAhiHkB2Xl5QqGw01CP-K2o5AvAA-e9my0&midToken=
=3DAQHQ1w5V4ws4wA&trk=3Deml-email_m2m_invite_single_01-header-6-profile=
&trkEmail=3Deml-email_m2m_invite_single_01-header-6-profile-null-1b3p5%=
7Einqenmmy%7Eir" style=3D"border-radius:50%;cursor:pointer;color:#008CC9;-w=
ebkit-text-size-adjust:100%;display:inline-block;text-decoration:none;-ms-t=
ext-size-adjust:100%;"><img alt=3D"" border=3D"0" height=3D"36" width=3D"36=
" src=3D"https://media.licdn.com/mpr/mpr/shrinknp_100_100/p/6/005/095/3cc/2=
4a8290.jpg" style=3D"border-radius:50%;outline:none;-ms-interpolation-mode:=
bicubic;color:#FFFFFF;text-decoration:none;"></a></td> <td width=3D"1" styl=
e=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0p=
t;-ms-text-size-adjust:100%;"> </td> </tr> </tbody> </table></td> </tr=
> <tr> <td style=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-=
table-lspace:0pt;-ms-text-size-adjust:100%;"> <table border=3D"0" cellspaci=
ng=3D"0" cellpadding=3D"0" width=3D"100%" style=3D"-webkit-text-size-adjust=
:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;"=
> <tbody> <tr> <td style=3D"-webkit-text-size-adjust:100%;mso-table-rspace:=
0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;"> <table border=3D"0" c=
ellspacing=3D"0" cellpadding=3D"0" width=3D"100%" style=3D"-webkit-text-siz=
e-adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-size-adjus=
t:100%;"> <tbody> <tr> <td style=3D"padding:24px 24px 36px 24px;-webkit-tex=
t-size-adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-size-=
adjust:100%;"> <table border=3D"0" cellspacing=3D"0" cellpadding=3D"0" widt=
h=3D"100%" style=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-=
table-lspace:0pt;-ms-text-size-adjust:100%;"> <tbody> <tr> <td align=3D"lef=
t" style=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-table-ls=
pace:0pt;-ms-text-size-adjust:100%;"> <p style=3D"margin:0;word-wrap:break-=
word;color:#4C4C4C;word-break:break-word;font-weight:400;-ms-word-break:bre=
ak-all;font-size:16px;line-height:1.5;overflow-wrap:break-word;">Hi Paul, I=
'd like to join your LinkedIn network.</p></td> </tr> <tr> <td align=3D"lef=
t" style=3D"padding:22px 0 16px 0;-webkit-text-size-adjust:100%;mso-table-r=
space:0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;"> <table border=
=3D"0" cellspacing=3D"0" cellpadding=3D"0" width=3D"100%" style=3D"-webkit-=
text-size-adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-si=
ze-adjust:100%;"> <tbody> <tr> <td valign=3D"top" style=3D"padding:0 15px 0=
0;-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;=
-ms-text-size-adjust:100%;"><a href=3D"https://www.linkedin.com/comm/profil=
e/view?id=3DAAsAAAJUVTYBWkKAZwppmyYBjwgm1AI0nKRyTwA&authType=3Dname&=
;authToken=3DeAsV&invAcpt=3D2197625_I6132985453227360256_500&midTok=
en=3DAQHQ1w5V4ws4wA&trk=3Deml-email_m2m_invite_single_01-hero-1-prof%7E=
photo&trkEmail=3Deml-email_m2m_invite_single_01-hero-1-prof%7Ephoto-nul=
l-1b3p5%7Einqenmmy%7Eir" style=3D"cursor:pointer;color:#008CC9;-webkit-text=
-size-adjust:100%;display:inline-block;text-decoration:none;-ms-text-size-a=
djust:100%;"><img src=3D"https://media.licdn.com/mpr/mpr/shrinknp_100_100/A=
AEAAQAAAAAAAALZAAAAJDFhMTQ5YTQ4LWUxNmYtNDhhNy05ZjIyLWI3NTdiNzZkZDFmNw.jpg" =
alt=3D"" height=3D"70" width=3D"70" style=3D"border-radius:50%;outline:none=
;-ms-interpolation-mode:bicubic;color:#FFFFFF;text-decoration:none;"></a></=
td> <td valign=3D"top" width=3D"100%" style=3D"-webkit-text-size-adjust:100=
%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;"><a =
href=3D"https://www.linkedin.com/comm/profile/view?id=3DAAsAAAJUVTYBWkKAZwp=
pmyYBjwgm1AI0nKRyTwA&authType=3Dname&authToken=3DeAsV&invAcpt=
=3D2197625_I6132985453227360256_500&midToken=3DAQHQ1w5V4ws4wA&trk=
=3Deml-email_m2m_invite_single_01-hero-2-prof%7Ename&trkEmail=3Deml-ema=
il_m2m_invite_single_01-hero-2-prof%7Ename-null-1b3p5%7Einqenmmy%7Eir" styl=
e=3D"cursor:pointer;color:#008CC9;-webkit-text-size-adjust:100%;display:inl=
ine-block;text-decoration:none;-ms-text-size-adjust:100%;"> <span style=3D"=
word-wrap:break-word;color:#262626;word-break:break-word;font-weight:700;-m=
s-word-break:break-all;font-size:16px;line-height:1.5;overflow-wrap:break-w=
ord;">Michael McAuliffe</span></a> <p style=3D"margin:0;word-wrap:break-wor=
d;color:#737373;word-break:break-word;font-weight:400;-ms-word-break:break-=
all;font-size:14px;line-height:1.429;overflow-wrap:break-word;">Managing Di=
rector</p> <p style=3D"margin:0;color:#737373;font-weight:400;font-size:14p=
x;line-height:1.429;">Charlotte, North Carolina Area</p></td> </tr> </tbody=
> </table></td> </tr> <tr> <td dir=3D"rtl" align=3D"left" style=3D"-webkit-=
text-size-adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-si=
ze-adjust:100%;direction:rtl !important;text-align:left !important;"> <!--[=
if mso]><table border=3D"0" cellpadding=3D"0" cellspacing=3D"0" width=3D"au=
to"><tr><td style=3D"padding:12px 0 0 0;"><![endif]--><span style=3D"displa=
y:inline-block;margin-top:12px;"> <table border=3D"0" cellpadding=3D"0" cel=
lspacing=3D"0" style=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;=
display:inline-block;mso-table-lspace:0pt;-ms-text-size-adjust:100%;"> <tbo=
dy> <tr> <td align=3D"center" valign=3D"middle" style=3D"-webkit-text-size-=
adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-size-adjust:=
100%;"><a href=3D"https://www.linkedin.com/comm/people/invite-accept?mboxid=
=3DI6132985453227360256_500&sharedKey=3D48j6iM8P&fr=3Dfalse&inv=
itationId=3D6132985418842456064&fe=3Dtrue&midToken=3DAQHQ1w5V4ws4wA=
&trk=3Deml-email_m2m_invite_single_01-hero-0-accept%7Ecta&trkEmail=
=3Deml-email_m2m_invite_single_01-hero-0-accept%7Ecta-null-1b3p5%7Einqenmmy=
%7Eir" target=3D"_blank" style=3D"cursor:pointer;word-wrap:normal;color:#00=
8CC9;word-break:normal;white-space:nowrap;-webkit-text-size-adjust:100%;dis=
play:block;text-decoration:none;-ms-text-size-adjust:100%;overflow-wrap:nor=
mal;"> <table border=3D"0" cellspacing=3D"0" cellpadding=3D"0" width=3D"aut=
o" style=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-table-ls=
pace:0pt;-ms-text-size-adjust:100%;"> <tbody> <tr> <td bgcolor=3D"#008CC9" =
style=3D"padding:6px 16px;color:#FFFFFF;-webkit-text-size-adjust:100%;font-=
weight:500;font-size:16px;-ms-text-size-adjust:100%;border-color:#008CC9;ba=
ckground-color:#008CC9;border-radius:2px;mso-table-rspace:0pt;mso-table-lsp=
ace:0pt;border-width:1px;border-style:solid;"><a href=3D"https://www.linked=
in.com/comm/people/invite-accept?mboxid=3DI6132985453227360256_500&shar=
edKey=3D48j6iM8P&fr=3Dfalse&invitationId=3D6132985418842456064&=
fe=3Dtrue&midToken=3DAQHQ1w5V4ws4wA&trk=3Deml-email_m2m_invite_sing=
le_01-hero-0-accept%7Ecta&trkEmail=3Deml-email_m2m_invite_single_01-her=
o-0-accept%7Ecta-null-1b3p5%7Einqenmmy%7Eir" target=3D"_blank" style=3D"cur=
sor:pointer;color:#FFFFFF;-webkit-text-size-adjust:100%;display:inline-bloc=
k;text-decoration:none;-ms-text-size-adjust:100%;">Accept</a></td> </tr> </=
tbody> </table></a></td> </tr> </tbody> </table></span> <!--[if mso]></td><=
td style=3D"padding-top:12px;"><![endif]--><span style=3D"display:inline-bl=
ock;margin-top:12px;margin-right:12px;"> <table border=3D"0" cellpadding=3D=
"0" cellspacing=3D"0" style=3D"-webkit-text-size-adjust:100%;mso-table-rspa=
ce:0pt;display:inline-block;mso-table-lspace:0pt;-ms-text-size-adjust:100%;=
"> <tbody> <tr> <td align=3D"center" valign=3D"middle" style=3D"-webkit-tex=
t-size-adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-size-=
adjust:100%;"><a href=3D"https://www.linkedin.com/comm/profile/view?id=3DAA=
sAAAJUVTYBWkKAZwppmyYBjwgm1AI0nKRyTwA&authType=3Dname&authToken=3De=
AsV&invAcpt=3D2197625_I6132985453227360256_500&midToken=3DAQHQ1w5V4=
ws4wA&trk=3Deml-email_m2m_invite_single_01-hero-3-prof%7Ecta&trkEma=
il=3Deml-email_m2m_invite_single_01-hero-3-prof%7Ecta-null-1b3p5%7Einqenmmy=
%7Eir" target=3D"_blank" style=3D"cursor:pointer;word-wrap:normal;color:#00=
8CC9;word-break:normal;white-space:nowrap;-webkit-text-size-adjust:100%;dis=
play:block;text-decoration:none;-ms-text-size-adjust:100%;overflow-wrap:nor=
mal;"> <table border=3D"0" cellspacing=3D"0" cellpadding=3D"0" width=3D"aut=
o" style=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-table-ls=
pace:0pt;-ms-text-size-adjust:100%;"> <tbody> <tr> <td style=3D"border-radi=
us:2px;padding:6px 16px;color:#4C4C4C;-webkit-text-size-adjust:100%;mso-tab=
le-rspace:0pt;font-weight:500;mso-table-lspace:0pt;font-size:16px;-ms-text-=
size-adjust:100%;border-color:#737373;border-width:1px;border-style:solid;"=
><a href=3D"https://www.linkedin.com/comm/profile/view?id=3DAAsAAAJUVTYBWkK=
AZwppmyYBjwgm1AI0nKRyTwA&authType=3Dname&authToken=3DeAsV&invAc=
pt=3D2197625_I6132985453227360256_500&midToken=3DAQHQ1w5V4ws4wA&trk=
=3Deml-email_m2m_invite_single_01-hero-3-prof%7Ecta&trkEmail=3Deml-emai=
l_m2m_invite_single_01-hero-3-prof%7Ecta-null-1b3p5%7Einqenmmy%7Eir" target=
=3D"_blank" style=3D"cursor:pointer;color:#4C4C4C;-webkit-text-size-adjust:=
100%;display:inline-block;text-decoration:none;-ms-text-size-adjust:100%;">=
View profile</a></td> </tr> </tbody> </table></a></td> </tr> </tbody> </tab=
le></span> <!--[if mso]></td></tr></table><![endif]--></td> </tr> </tbody> =
</table></td> </tr> </tbody> </table></td> </tr> </tbody> </table></td> </t=
r> <tr> <td style=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso=
-table-lspace:0pt;-ms-text-size-adjust:100%;"> <table border=3D"0" cellspac=
ing=3D"0" cellpadding=3D"0" width=3D"100%" bgcolor=3D"#EDF0F3" align=3D"cen=
ter" style=3D"background-color:#EDF0F3;padding:0 24px;color:#999999;-webkit=
-text-size-adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-s=
ize-adjust:100%;text-align:center;"> <tbody> <tr> <td align=3D"center" styl=
e=3D"padding:16px 0 0 0;-webkit-text-size-adjust:100%;mso-table-rspace:0pt;=
mso-table-lspace:0pt;-ms-text-size-adjust:100%;text-align:center;"> <table =
align=3D"center" border=3D"0" cellspacing=3D"0" cellpadding=3D"0" width=3D"=
100%" style=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-table=
-lspace:0pt;-ms-text-size-adjust:100%;"> <tbody> <tr> <td valign=3D"middle"=
align=3D"center" style=3D"padding:0 0 16px 0;-webkit-text-size-adjust:100%=
;mso-table-rspace:0pt;vertical-align:middle;mso-table-lspace:0pt;-ms-text-s=
ize-adjust:100%;text-align:center;"><a href=3D"https://www.linkedin.com/e/v=
2?e=3D1b3p5-inqenmmy-ir&t=3Dlun&midToken=3DAQHQ1w5V4ws4wA&ek=3D=
email_m2m_invite_single_01&li=3D10&m=3Dunsub&ts=3Dunsub&loi=
d=3DAQEYGYigaB9xLgAAAVRy9zzAiKA6TSjt5qtue7m6n7C7x-67Bc2z6DzP-HIHX3HVoujjV4V=
E5dFOCqIvFw&eid=3D1b3p5-inqenmmy-ir" style=3D"cursor:pointer;color:#737=
373;-webkit-text-size-adjust:100%;text-decoration:underline;display:inline-=
block;-ms-text-size-adjust:100%;"> <span style=3D"color:#737373;font-weight=
:400;text-decoration:underline;font-size:12px;line-height:1.333;">Unsubscri=
be</span></a> | <a href=3D"https://www.linkedin.com/e=
/v2?e=3D1b3p5-inqenmmy-ir&a=3DcustomerServiceUrl&midToken=3DAQHQ1w5=
V4ws4wA&ek=3Demail_m2m_invite_single_01&li=3D9&m=3Dfooter&t=
s=3Dhelp&articleId=3D67" style=3D"cursor:pointer;color:#737373;-webkit-=
text-size-adjust:100%;text-decoration:underline;display:inline-block;-ms-te=
xt-size-adjust:100%;"> <span style=3D"color:#737373;font-weight:400;text-de=
coration:underline;font-size:12px;line-height:1.333;">Help</span></a></td> =
</tr> </tbody> </table></td> </tr> <tr> <td style=3D"-webkit-text-size-adju=
st:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%=
;"> <table border=3D"0" cellspacing=3D"0" cellpadding=3D"0" width=3D"100%" =
style=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-table-lspac=
e:0pt;-ms-text-size-adjust:100%;"> <tbody> <tr> <td align=3D"center" style=
=3D"padding:0 0 12px 0;-webkit-text-size-adjust:100%;mso-table-rspace:0pt;m=
so-table-lspace:0pt;-ms-text-size-adjust:100%;text-align:center;"> <p style=
=3D"margin:0;color:#737373;font-weight:400;font-size:12px;line-height:1.333=
;">You are receiving Invitation emails.</p></td> </tr> <tr> <td align=3D"ce=
nter" style=3D"padding:0 0 12px 0;-webkit-text-size-adjust:100%;mso-table-r=
space:0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;text-align:center;=
"> <p style=3D"margin:0;word-wrap:break-word;color:#737373;word-break:break=
-word;font-weight:400;-ms-word-break:break-all;font-size:12px;line-height:1=
.333;overflow-wrap:break-word;">This email was intended for Paul Hammant (S=
enior Director of Engineering at HedgeServ). <a href=3D"https://www.linkedi=
n.com/e/v2?e=3D1b3p5-inqenmmy-ir&a=3DcustomerServiceUrl&midToken=3D=
AQHQ1w5V4ws4wA&ek=3Demail_m2m_invite_single_01&articleId=3D4788" st=
yle=3D"cursor:pointer;color:#737373;-webkit-text-size-adjust:100%;text-deco=
ration:underline;display:inline-block;-ms-text-size-adjust:100%;">Learn why=
we included this.</a></p></td> </tr> <tr> <td align=3D"center" style=3D"pa=
dding:0 0 8px 0;-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-tabl=
e-lspace:0pt;-ms-text-size-adjust:100%;text-align:center;"><a href=3D"https=
://www.linkedin.com/comm/nhome/?midToken=3DAQHQ1w5V4ws4wA&trk=3Deml-ema=
il_m2m_invite_single_01-footer-8-home&trkEmail=3Deml-email_m2m_invite_s=
ingle_01-footer-8-home-null-1b3p5%7Einqenmmy%7Eir" style=3D"cursor:pointer;=
color:#737373;-webkit-text-size-adjust:100%;text-decoration:underline;displ=
ay:inline-block;-ms-text-size-adjust:100%;"><img alt=3D"LinkedIn" border=3D=
"0" height=3D"14" src=3D"https://static.licdn.com/scds/common/u/images/emai=
l/phoenix/logos/logo_phoenix_footer_gray_197x48_v1.png" width=3D"58" style=
=3D"outline:none;-ms-interpolation-mode:bicubic;color:#FFFFFF;display:block=
;text-decoration:none;"></a></td> </tr> <tr> <td align=3D"center" style=3D"=
padding:0 0 12px 0;-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-t=
able-lspace:0pt;-ms-text-size-adjust:100%;text-align:center;"> <p style=3D"=
margin:0;color:#737373;font-weight:400;font-size:12px;line-height:1.333;">=
=C2=A9 2016 LinkedIn Corporation, 2029 Stierlin Court, Mountain View CA 940=
43. LinkedIn and the LinkedIn logo are registered trademarks of LinkedIn.</=
p></td> </tr> </tbody> </table></td> </tr> </tbody> </table></td> </tr> </t=
body> </table> </center></td> </tr> </tbody> </table> <img src=3D"http://ww=
w.linkedin.com/emimp/1b3p5-inqenmmy-ir.gif" style=3D"outline:none;-ms-inter=
polation-mode:bicubic;color:#FFFFFF;text-decoration:none;width:1px;height:1=
px;"> </body> </html>=
------=_Part_1354067_596425972.1462217686511--"""
FOOBAR = """From: Foo Bar <invitations@linkedin.com>
Message-ID: <311349338.1161874.1462203530637.JavaMail.app@lva1-app3333.prod.linkedin.com>
Subject: Paul, please add me to your LinkedIn network
Content-Type: multipart/alternative;
boundary="----=_Part_1161867_773496138.1462203530633"
To: Paul Hammant <Paul@Hammant.org>
Date: Mon, 2 May 2016 15:38:50 +0000 (UTC)
MIME-Version: 1.0
------=_Part_1161867_773496138.1462203530633
Content-Type: text/plain;charset=UTF-8
Content-Transfer-Encoding: quoted-printable
Content-ID: text-body
Hi Paul,
I'd like to join your LinkedIn network.
FOO BAR
Vice President
View profile: https://www.linkedin.com/comm/profile/view?id=3DAAsAAAFlvJcBC=
nnIlLvQhDO6ZBU5rdb7fAb_-IU&authType=3Dname&authToken=3D95up&invAcpt=3D21976=
25_I6132926076281774083_500&midToken=3DAQHQ1w5V4ws4wA&trk=3Deml-email_m2m_i=
nvite_single_01-hero-3-prof%7Ecta&trkEmail=3Deml-email_m2m_invite_single_01=
-hero-3-prof%7Ecta-null-1b3p5%7Einq68641%7E1h
Accept: https://www.linkedin.com/comm/people/invite-accept?mboxid=3DI613292=
6076281774083_500&sharedKey=3Dw447gWge&fr=3Dfalse&invitationId=3D6132926046=
288310272&fe=3Dtrue&midToken=3DAQHQ1w5V4ws4wA&trk=3Deml-email_m2m_invite_si=
ngle_01-hero-0-accept%7Ecta&trkEmail=3Deml-email_m2m_invite_single_01-hero-=
0-accept%7Ecta-null-1b3p5%7Einq68641%7E1h
.....................................
Unsubscribe: https://www.linkedin.com/e/v2?e=3D1b3p5-inq68641-1h&t=3Dlun&mi=
dToken=3DAQHQ1w5V4ws4wA&ek=3Demail_m2m_invite_single_01&li=3D10&m=3Dunsub&t=
s=3Dunsub&loid=3DAQFdQYOv_FVJAgAAAVRyHzzTguEadC55UC3CMRW6tuWfby8cpsHi1xOrXC=
RZJ4MzTaYlDlG2VPl5Zx_ohw&eid=3D1b3p5-inq68641-1h
Help: https://www.linkedin.com/e/v2?e=3D1b3p5-inq68641-1h&a=3DcustomerServi=
ceUrl&midToken=3DAQHQ1w5V4ws4wA&ek=3Demail_m2m_invite_single_01&li=3D9&m=3D=
footer&ts=3Dhelp&articleId=3D67
You are receiving Invitation emails.
This email was intended for Paul Hammant (Senior Director of Engineering at=
HedgeServ).
Learn why we included this: https://www.linkedin.com/e/v2?e=3D1b3p5-inq6864=
1-1h&a=3DcustomerServiceUrl&midToken=3DAQHQ1w5V4ws4wA&ek=3Demail_m2m_invite=
_single_01&articleId=3D4788
=C2=A9 2016 LinkedIn Corporation, 2029 Stierlin Court, Mountain View CA 940=
43. LinkedIn and the LinkedIn logo are registered trademarks of LinkedIn.
------=_Part_1161867_773496138.1462203530633
Content-Type: text/html;charset=UTF-8
Content-Transfer-Encoding: quoted-printable
Content-ID: html-body
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.=
w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"><html xmlns=3D"http://www.w3.=
org/1999/xhtml" lang=3D"en" xml:lang=3D"en"><head>
<meta http-equiv=3D"Content-Type" content=3D"text/html; charset=3Dutf-8"> <=
meta name=3D"HandheldFriendly" content=3D"true"> <meta name=3D"viewport" co=
ntent=3D"width=3Ddevice-width; initial-scale=3D0.666667; maximum-scale=3D0.=
666667; user-scalable=3D0"> <meta name=3D"viewport" content=3D"width=3Ddevi=
ce-width"> <title></title> <!--[if mso]><style type=3D"text/css">body {font=
-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;}.phoenix-email-con=
tainer {width: 512px !important;}</style><![endif]--> <!--[if IE]><style ty=
pe=3D"text/css">.phoenix-email-container {width: 512px !important;}</style>=
<![endif]--> <style type=3D"text/css">@media only screen and (max-width:32e=
m) { .phoenix-email-container { width:100% !important; } } @media only scre=
en and (max-width:20em) {} @media only screen and (max-device-width:30em) {=
} @media screen and (device-width:30em) and (device-height:22.5em), screen =
and (device-width:22.5em) and (device-height:30em), screen and (device-widt=
h:20em) and (device-height:15em) {} @media screen and (-webkit-min-device-p=
ixel-ratio:0) {} @media screen and (max-device-width:25.88em) and (max-devi=
ce-height:48.5em) {} </style> </head> <body style=3D"padding:0;margin:0 aut=
o;-webkit-text-size-adjust:100%;width:100% !important;-ms-text-size-adjust:=
100%;font-family:'Helvetica Neue',Helvetica,Arial,sans-serif;"> <div style=
=3D"overflow:hidden;color:transparent;visibility:hidden;mso-hide:all;width:=
0;font-size:0;opacity:0;height:0;"> Hi Paul, I'd like to join your LinkedIn=
network. </div> <table align=3D"center" border=3D"0" cellspacing=3D"0" cel=
lpadding=3D"0" width=3D"100%" bgcolor=3D"#EDF0F3" style=3D"background-color=
:#EDF0F3;table-layout:fixed;-webkit-text-size-adjust:100%;mso-table-rspace:=
0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;"> <tbody> <tr> <td alig=
n=3D"center" style=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;ms=
o-table-lspace:0pt;-ms-text-size-adjust:100%;"> <center style=3D"width:100%=
;"> <table border=3D"0" class=3D"phoenix-email-container" cellspacing=3D"0"=
cellpadding=3D"0" width=3D"512" bgcolor=3D"#FFFFFF" style=3D"background-co=
lor:#FFFFFF;margin:0 auto;max-width:512px;-webkit-text-size-adjust:100%;mso=
-table-rspace:0pt;width:inherit;mso-table-lspace:0pt;-ms-text-size-adjust:1=
00%;"> <tbody> <tr> <td bgcolor=3D"#F6F8FA" style=3D"background-color:#F6F8=
FA;padding:12px;-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-tabl=
e-lspace:0pt;-ms-text-size-adjust:100%;border-bottom:1px solid #ECECEC;"> <=
table border=3D"0" cellspacing=3D"0" cellpadding=3D"0" width=3D"100%" style=
=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;width:100% !importan=
t;mso-table-lspace:0pt;-ms-text-size-adjust:100%;min-width:100% !important;=
"> <tbody> <tr> <td align=3D"left" valign=3D"middle" style=3D"-webkit-text-=
size-adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-size-ad=
just:100%;"><a href=3D"https://www.linkedin.com/comm/nhome/?midToken=3DAQHQ=
1w5V4ws4wA&trk=3Deml-email_m2m_invite_single_01-header-4-home&trkEm=
ail=3Deml-email_m2m_invite_single_01-header-4-home-null-1b3p5%7Einq68641%7E=
1h" style=3D"cursor:pointer;color:#008CC9;-webkit-text-size-adjust:100%;dis=
play:inline-block;text-decoration:none;-ms-text-size-adjust:100%;"> <img al=
t=3D"LinkedIn" border=3D"0" src=3D"https://static.licdn.com/scds/common/u/i=
mages/email/phoenix/logos/logo_phoenix_header_blue_78x66_v1.png" height=3D"=
34" width=3D"40" style=3D"outline:none;-ms-interpolation-mode:bicubic;color=
:#FFFFFF;text-decoration:none;"></a></td> <td valign=3D"middle" width=3D"10=
0%" align=3D"right" style=3D"padding:0 0 0 10px;-webkit-text-size-adjust:10=
0%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;"><a=
href=3D"https://www.linkedin.com/comm/profile/view?id=3DAAsAAAAhiHkB2Xl5Qq=
Gw01CP-K2o5AvAA-e9my0&midToken=3DAQHQ1w5V4ws4wA&trk=3Deml-email_m2m=
_invite_single_01-header-6-profile&trkEmail=3Deml-email_m2m_invite_sing=
le_01-header-6-profile-null-1b3p5%7Einq68641%7E1h" style=3D"cursor:pointer;=
margin:0;color:#008CC9;-webkit-text-size-adjust:100%;display:inline-block;t=
ext-decoration:none;-ms-text-size-adjust:100%;"> <span style=3D"word-wrap:b=
reak-word;color:#4C4C4C;word-break:break-word;font-weight:400;-ms-word-brea=
k:break-all;font-size:14px;line-height:1.429;overflow-wrap:break-word;">Pau=
l Hammant</span></a></td> <td valign=3D"middle" width=3D"40" style=3D"-webk=
it-text-size-adjust:100%;mso-table-rspace:0pt;padding-left:10px;mso-table-l=
space:0pt;-ms-text-size-adjust:100%;"> <a href=3D"https://www.linkedin.com/=
comm/profile/view?id=3DAAsAAAAhiHkB2Xl5QqGw01CP-K2o5AvAA-e9my0&midToken=
=3DAQHQ1w5V4ws4wA&trk=3Deml-email_m2m_invite_single_01-header-6-profile=
&trkEmail=3Deml-email_m2m_invite_single_01-header-6-profile-null-1b3p5%=
7Einq68641%7E1h" style=3D"border-radius:50%;cursor:pointer;color:#008CC9;-w=
ebkit-text-size-adjust:100%;display:inline-block;text-decoration:none;-ms-t=
ext-size-adjust:100%;"><img alt=3D"" border=3D"0" height=3D"36" width=3D"36=
" src=3D"https://media.licdn.com/mpr/mpr/shrinknp_100_100/p/6/005/095/3cc/2=
4a8290.jpg" style=3D"border-radius:50%;outline:none;-ms-interpolation-mode:=
bicubic;color:#FFFFFF;text-decoration:none;"></a></td> <td width=3D"1" styl=
e=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0p=
t;-ms-text-size-adjust:100%;"> </td> </tr> </tbody> </table></td> </tr=
> <tr> <td style=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-=
table-lspace:0pt;-ms-text-size-adjust:100%;"> <table border=3D"0" cellspaci=
ng=3D"0" cellpadding=3D"0" width=3D"100%" style=3D"-webkit-text-size-adjust=
:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;"=
> <tbody> <tr> <td style=3D"-webkit-text-size-adjust:100%;mso-table-rspace:=
0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;"> <table border=3D"0" c=
ellspacing=3D"0" cellpadding=3D"0" width=3D"100%" style=3D"-webkit-text-siz=
e-adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-size-adjus=
t:100%;"> <tbody> <tr> <td style=3D"padding:24px 24px 36px 24px;-webkit-tex=
t-size-adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-size-=
adjust:100%;"> <table border=3D"0" cellspacing=3D"0" cellpadding=3D"0" widt=
h=3D"100%" style=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-=
table-lspace:0pt;-ms-text-size-adjust:100%;"> <tbody> <tr> <td align=3D"lef=
t" style=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-table-ls=
pace:0pt;-ms-text-size-adjust:100%;"> <p style=3D"margin:0;word-wrap:break-=
word;color:#4C4C4C;word-break:break-word;font-weight:400;-ms-word-break:bre=
ak-all;font-size:16px;line-height:1.5;overflow-wrap:break-word;">Hi Paul, I=
'd like to join your LinkedIn network.</p></td> </tr> <tr> <td align=3D"lef=
t" style=3D"padding:22px 0 16px 0;-webkit-text-size-adjust:100%;mso-table-r=
space:0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;"> <table border=
=3D"0" cellspacing=3D"0" cellpadding=3D"0" width=3D"100%" style=3D"-webkit-=
text-size-adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-si=
ze-adjust:100%;"> <tbody> <tr> <td valign=3D"top" width=3D"100%" style=3D"-=
webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-=
text-size-adjust:100%;"><a href=3D"https://www.linkedin.com/comm/profile/vi=
ew?id=3DAAsAAAFlvJcBCnnIlLvQhDO6ZBU5rdb7fAb_-IU&authType=3Dname&aut=
hToken=3D95up&invAcpt=3D2197625_I6132926076281774083_500&midToken=
=3DAQHQ1w5V4ws4wA&trk=3Deml-email_m2m_invite_single_01-hero-2-prof%7Ena=
me&trkEmail=3Deml-email_m2m_invite_single_01-hero-2-prof%7Ename-null-1b=
3p5%7Einq68641%7E1h" style=3D"cursor:pointer;color:#008CC9;-webkit-text-siz=
e-adjust:100%;display:inline-block;text-decoration:none;-ms-text-size-adjus=
t:100%;"> <span style=3D"word-wrap:break-word;color:#262626;word-break:brea=
k-word;font-weight:700;-ms-word-break:break-all;font-size:16px;line-height:=
1.5;overflow-wrap:break-word;">FOO BAR</span></a> <p style=3D"margin:0;wo=
rd-wrap:break-word;color:#737373;word-break:break-word;font-weight:400;-ms-=
word-break:break-all;font-size:14px;line-height:1.429;overflow-wrap:break-w=
ord;">Vice President</p> <p style=3D"margin:0;color:#737373;font-weight:400=
;font-size:14px;line-height:1.429;">Greater New York City Area</p></td> </t=
r> </tbody> </table></td> </tr> <tr> <td dir=3D"rtl" align=3D"left" style=
=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0pt=
;-ms-text-size-adjust:100%;direction:rtl !important;text-align:left !import=
ant;"> <!--[if mso]><table border=3D"0" cellpadding=3D"0" cellspacing=3D"0"=
width=3D"auto"><tr><td style=3D"padding:12px 0 0 0;"><![endif]--><span sty=
le=3D"display:inline-block;margin-top:12px;"> <table border=3D"0" cellpaddi=
ng=3D"0" cellspacing=3D"0" style=3D"-webkit-text-size-adjust:100%;mso-table=
-rspace:0pt;display:inline-block;mso-table-lspace:0pt;-ms-text-size-adjust:=
100%;"> <tbody> <tr> <td align=3D"center" valign=3D"middle" style=3D"-webki=
t-text-size-adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-=
size-adjust:100%;"><a href=3D"https://www.linkedin.com/comm/people/invite-a=
ccept?mboxid=3DI6132926076281774083_500&sharedKey=3Dw447gWge&fr=3Df=
alse&invitationId=3D6132926046288310272&fe=3Dtrue&midToken=3DAQ=
HQ1w5V4ws4wA&trk=3Deml-email_m2m_invite_single_01-hero-0-accept%7Ecta&a=
mp;trkEmail=3Deml-email_m2m_invite_single_01-hero-0-accept%7Ecta-null-1b3p5=
%7Einq68641%7E1h" target=3D"_blank" style=3D"cursor:pointer;word-wrap:norma=
l;color:#008CC9;word-break:normal;white-space:nowrap;-webkit-text-size-adju=
st:100%;display:block;text-decoration:none;-ms-text-size-adjust:100%;overfl=
ow-wrap:normal;"> <table border=3D"0" cellspacing=3D"0" cellpadding=3D"0" w=
idth=3D"auto" style=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;m=
so-table-lspace:0pt;-ms-text-size-adjust:100%;"> <tbody> <tr> <td bgcolor=
=3D"#008CC9" style=3D"padding:6px 16px;color:#FFFFFF;-webkit-text-size-adju=
st:100%;font-weight:500;font-size:16px;-ms-text-size-adjust:100%;border-col=
or:#008CC9;background-color:#008CC9;border-radius:2px;mso-table-rspace:0pt;=
mso-table-lspace:0pt;border-width:1px;border-style:solid;"><a href=3D"https=
://www.linkedin.com/comm/people/invite-accept?mboxid=3DI6132926076281774083=
_500&sharedKey=3Dw447gWge&fr=3Dfalse&invitationId=3D61329260462=
88310272&fe=3Dtrue&midToken=3DAQHQ1w5V4ws4wA&trk=3Deml-email_m2=
m_invite_single_01-hero-0-accept%7Ecta&trkEmail=3Deml-email_m2m_invite_=
single_01-hero-0-accept%7Ecta-null-1b3p5%7Einq68641%7E1h" target=3D"_blank"=
style=3D"cursor:pointer;color:#FFFFFF;-webkit-text-size-adjust:100%;displa=
y:inline-block;text-decoration:none;-ms-text-size-adjust:100%;">Accept</a><=
/td> </tr> </tbody> </table></a></td> </tr> </tbody> </table></span> <!--[i=
f mso]></td><td style=3D"padding-top:12px;"><![endif]--><span style=3D"disp=
lay:inline-block;margin-top:12px;margin-right:12px;"> <table border=3D"0" c=
ellpadding=3D"0" cellspacing=3D"0" style=3D"-webkit-text-size-adjust:100%;m=
so-table-rspace:0pt;display:inline-block;mso-table-lspace:0pt;-ms-text-size=
-adjust:100%;"> <tbody> <tr> <td align=3D"center" valign=3D"middle" style=
=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0pt=
;-ms-text-size-adjust:100%;"><a href=3D"https://www.linkedin.com/comm/profi=
le/view?id=3DAAsAAAFlvJcBCnnIlLvQhDO6ZBU5rdb7fAb_-IU&authType=3Dname&am=
p;authToken=3D95up&invAcpt=3D2197625_I6132926076281774083_500&midTo=
ken=3DAQHQ1w5V4ws4wA&trk=3Deml-email_m2m_invite_single_01-hero-3-prof%7=
Ecta&trkEmail=3Deml-email_m2m_invite_single_01-hero-3-prof%7Ecta-null-1=
b3p5%7Einq68641%7E1h" target=3D"_blank" style=3D"cursor:pointer;word-wrap:n=
ormal;color:#008CC9;word-break:normal;white-space:nowrap;-webkit-text-size-=
adjust:100%;display:block;text-decoration:none;-ms-text-size-adjust:100%;ov=
erflow-wrap:normal;"> <table border=3D"0" cellspacing=3D"0" cellpadding=3D"=
0" width=3D"auto" style=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0=
pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;"> <tbody> <tr> <td style=
=3D"border-radius:2px;padding:6px 16px;color:#4C4C4C;-webkit-text-size-adju=
st:100%;mso-table-rspace:0pt;font-weight:500;mso-table-lspace:0pt;font-size=
:16px;-ms-text-size-adjust:100%;border-color:#737373;border-width:1px;borde=
r-style:solid;"><a href=3D"https://www.linkedin.com/comm/profile/view?id=3D=
AAsAAAFlvJcBCnnIlLvQhDO6ZBU5rdb7fAb_-IU&authType=3Dname&authToken=
=3D95up&invAcpt=3D2197625_I6132926076281774083_500&midToken=3DAQHQ1=
w5V4ws4wA&trk=3Deml-email_m2m_invite_single_01-hero-3-prof%7Ecta&tr=
kEmail=3Deml-email_m2m_invite_single_01-hero-3-prof%7Ecta-null-1b3p5%7Einq6=
8641%7E1h" target=3D"_blank" style=3D"cursor:pointer;color:#4C4C4C;-webkit-=
text-size-adjust:100%;display:inline-block;text-decoration:none;-ms-text-si=
ze-adjust:100%;">View profile</a></td> </tr> </tbody> </table></a></td> </t=
r> </tbody> </table></span> <!--[if mso]></td></tr></table><![endif]--></td=
> </tr> </tbody> </table></td> </tr> </tbody> </table></td> </tr> </tbody> =
</table></td> </tr> <tr> <td style=3D"-webkit-text-size-adjust:100%;mso-tab=
le-rspace:0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;"> <table bord=
er=3D"0" cellspacing=3D"0" cellpadding=3D"0" width=3D"100%" bgcolor=3D"#EDF=
0F3" align=3D"center" style=3D"background-color:#EDF0F3;padding:0 24px;colo=
r:#999999;-webkit-text-size-adjust:100%;mso-table-rspace:0pt;mso-table-lspa=
ce:0pt;-ms-text-size-adjust:100%;text-align:center;"> <tbody> <tr> <td alig=
n=3D"center" style=3D"padding:16px 0 0 0;-webkit-text-size-adjust:100%;mso-=
table-rspace:0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;text-align:=
center;"> <table align=3D"center" border=3D"0" cellspacing=3D"0" cellpaddin=
g=3D"0" width=3D"100%" style=3D"-webkit-text-size-adjust:100%;mso-table-rsp=
ace:0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;"> <tbody> <tr> <td =
valign=3D"middle" align=3D"center" style=3D"padding:0 0 16px 0;-webkit-text=
-size-adjust:100%;mso-table-rspace:0pt;vertical-align:middle;mso-table-lspa=
ce:0pt;-ms-text-size-adjust:100%;text-align:center;"><a href=3D"https://www=
.linkedin.com/e/v2?e=3D1b3p5-inq68641-1h&t=3Dlun&midToken=3DAQHQ1w5=
V4ws4wA&ek=3Demail_m2m_invite_single_01&li=3D10&m=3Dunsub&t=
s=3Dunsub&loid=3DAQFdQYOv_FVJAgAAAVRyHzzTguEadC55UC3CMRW6tuWfby8cpsHi1x=
OrXCRZJ4MzTaYlDlG2VPl5Zx_ohw&eid=3D1b3p5-inq68641-1h" style=3D"cursor:p=
ointer;color:#737373;-webkit-text-size-adjust:100%;text-decoration:underlin=
e;display:inline-block;-ms-text-size-adjust:100%;"> <span style=3D"color:#7=
37373;font-weight:400;text-decoration:underline;font-size:12px;line-height:=
1.333;">Unsubscribe</span></a> | <a href=3D"https://w=
ww.linkedin.com/e/v2?e=3D1b3p5-inq68641-1h&a=3DcustomerServiceUrl&m=
idToken=3DAQHQ1w5V4ws4wA&ek=3Demail_m2m_invite_single_01&li=3D9&=
;m=3Dfooter&ts=3Dhelp&articleId=3D67" style=3D"cursor:pointer;color=
:#737373;-webkit-text-size-adjust:100%;text-decoration:underline;display:in=
line-block;-ms-text-size-adjust:100%;"> <span style=3D"color:#737373;font-w=
eight:400;text-decoration:underline;font-size:12px;line-height:1.333;">Help=
</span></a></td> </tr> </tbody> </table></td> </tr> <tr> <td style=3D"-webk=
it-text-size-adjust:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text=
-size-adjust:100%;"> <table border=3D"0" cellspacing=3D"0" cellpadding=3D"0=
" width=3D"100%" style=3D"-webkit-text-size-adjust:100%;mso-table-rspace:0p=
t;mso-table-lspace:0pt;-ms-text-size-adjust:100%;"> <tbody> <tr> <td align=
=3D"center" style=3D"padding:0 0 12px 0;-webkit-text-size-adjust:100%;mso-t=
able-rspace:0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;text-align:c=
enter;"> <p style=3D"margin:0;color:#737373;font-weight:400;font-size:12px;=
line-height:1.333;">You are receiving Invitation emails.</p></td> </tr> <tr=
> <td align=3D"center" style=3D"padding:0 0 12px 0;-webkit-text-size-adjust=
:100%;mso-table-rspace:0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;t=
ext-align:center;"> <p style=3D"margin:0;word-wrap:break-word;color:#737373=
;word-break:break-word;font-weight:400;-ms-word-break:break-all;font-size:1=
2px;line-height:1.333;overflow-wrap:break-word;">This email was intended fo=
r Paul Hammant (Senior Director of Engineering at HedgeServ). <a href=3D"ht=
tps://www.linkedin.com/e/v2?e=3D1b3p5-inq68641-1h&a=3DcustomerServiceUr=
l&midToken=3DAQHQ1w5V4ws4wA&ek=3Demail_m2m_invite_single_01&art=
icleId=3D4788" style=3D"cursor:pointer;color:#737373;-webkit-text-size-adju=
st:100%;text-decoration:underline;display:inline-block;-ms-text-size-adjust=
:100%;">Learn why we included this.</a></p></td> </tr> <tr> <td align=3D"ce=
nter" style=3D"padding:0 0 8px 0;-webkit-text-size-adjust:100%;mso-table-rs=
pace:0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;text-align:center;"=
><a href=3D"https://www.linkedin.com/comm/nhome/?midToken=3DAQHQ1w5V4ws4wA&=
amp;trk=3Deml-email_m2m_invite_single_01-footer-8-home&trkEmail=3Deml-e=
mail_m2m_invite_single_01-footer-8-home-null-1b3p5%7Einq68641%7E1h" style=
=3D"cursor:pointer;color:#737373;-webkit-text-size-adjust:100%;text-decorat=
ion:underline;display:inline-block;-ms-text-size-adjust:100%;"><img alt=3D"=
LinkedIn" border=3D"0" height=3D"14" src=3D"https://static.licdn.com/scds/c=
ommon/u/images/email/phoenix/logos/logo_phoenix_footer_gray_197x48_v1.png" =
width=3D"58" style=3D"outline:none;-ms-interpolation-mode:bicubic;color:#FF=
FFFF;display:block;text-decoration:none;"></a></td> </tr> <tr> <td align=3D=
"center" style=3D"padding:0 0 12px 0;-webkit-text-size-adjust:100%;mso-tabl=
e-rspace:0pt;mso-table-lspace:0pt;-ms-text-size-adjust:100%;text-align:cent=
er;"> <p style=3D"margin:0;color:#737373;font-weight:400;font-size:12px;lin=
e-height:1.333;">=C2=A9 2016 LinkedIn Corporation, 2029 Stierlin Court, Mou=
ntain View CA 94043. LinkedIn and the LinkedIn logo are registered trademar=
ks of LinkedIn.</p></td> </tr> </tbody> </table></td> </tr> </tbody> </tabl=
e></td> </tr> </tbody> </table> </center></td> </tr> </tbody> </table> <img=
src=3D"http://www.linkedin.com/emimp/1b3p5-inq68641-1h.gif" style=3D"outli=
ne:none;-ms-interpolation-mode:bicubic;color:#FFFFFF;text-decoration:none;w=
idth:1px;height:1px;"> </body> </html>=
------=_Part_1161867_773496138.1462203530633--"""
if __name__ == '__main__':
unittest.main()
| |
#!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osgparticleeffects"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import osg
from osgpypp import osgDB
from osgpypp import osgParticle
from osgpypp import osgText
from osgpypp import osgUtil
from osgpypp import osgViewer
# Translated from file 'osgparticleeffects.cpp'
# OpenSceneGraph example, osgparticleeffects.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <osgViewer/Viewer>
#include <osg/Group>
#include <osg/Geode>
#include <osg/ShapeDrawable>
#include <osg/Texture2D>
#include <osg/PositionAttitudeTransform>
#include <osg/MatrixTransform>
#include <osg/io_utils>
#include <osgUtil/Optimizer>
#include <osgDB/ReadFile>
#include <osgText/Text>
#include <osgParticle/ExplosionEffect>
#include <osgParticle/ExplosionDebrisEffect>
#include <osgParticle/SmokeEffect>
#include <osgParticle/SmokeTrailEffect>
#include <osgParticle/FireEffect>
# for the grid data..
#include "../osghangglide/terrain_coords.h"
wind = osg.Vec3(1.0,0.0,0.0)
def createAnimationPath(center, radius, looptime):
# set up the animation path
animationPath = osg.AnimationPath()
animationPath.setLoopMode(osg.AnimationPath.LOOP)
numSamples = 40
yaw = 0.0
yaw_delta = 2.0*osg.PI/((float)numSamples-1.0)
roll = osg.inDegrees(30.0)
time = 0.0
time_delta = looptime/(double)numSamples
for(int i=0i<numSamples++i)
position = osg.Vec3(center+osg.Vec3(sinf(yaw)*radius,cosf(yaw)*radius,0.0))
rotation = osg.Quat(osg.Quat(roll,osg.Vec3(0.0,1.0,0.0))*osg.Quat(-(yaw+osg.inDegrees(90.0)),osg.Vec3(0.0,0.0,1.0)))
animationPath.insert(time,osg.AnimationPath.ControlPoint(position,rotation))
yaw += yaw_delta
time += time_delta
return animationPath
def createMovingModel(center, radius):
animationLength = 10.0
animationPath = createAnimationPath(center,radius,animationLength)
model = osg.Group()
glider = osgDB.readNodeFile("glider.osgt")
if glider :
bs = glider.getBound()
size = radius/bs.radius()*0.15
positioned = osg.MatrixTransform()
positioned.setDataVariance(osg.Object.STATIC)
positioned.setMatrix(osg.Matrix.translate(-bs.center())*
osg.Matrix.scale(size,size,size)*
osg.Matrix.rotate(osg.inDegrees(-90.0),0.0,0.0,1.0))
positioned.addChild(glider)
xform = osg.PositionAttitudeTransform()
xform.setDataVariance(osg.Object.DYNAMIC)
xform.getOrCreateStateSet().setMode(GL_NORMALIZE, osg.StateAttribute.ON)
xform.setUpdateCallback(osg.AnimationPathCallback(animationPath,0.0,0.5))
xform.addChild(positioned)
model.addChild(xform)
cessna = osgDB.readNodeFile("cessna.osgt")
if cessna :
bs = cessna.getBound()
size = radius/bs.radius()*0.15
positioned = osg.MatrixTransform()
positioned.getOrCreateStateSet().setMode(GL_NORMALIZE, osg.StateAttribute.ON)
positioned.setDataVariance(osg.Object.STATIC)
positioned.setMatrix(osg.Matrix.translate(-bs.center())*
osg.Matrix.scale(size,size,size)*
osg.Matrix.rotate(osg.inDegrees(180.0),0.0,0.0,1.0))
#positioned.addChild(cessna)
positioned.addChild(cessna)
xform = osg.MatrixTransform()
xform.setDataVariance(osg.Object.DYNAMIC)
xform.setUpdateCallback(osg.AnimationPathCallback(animationPath,0.0,1.0))
xform.addChild(positioned)
model.addChild(xform)
return model
def computeTerrainIntersection(subgraph, x, y):
bs = subgraph.getBound()
zMax = bs.center().z()+bs.radius()
zMin = bs.center().z()-bs.radius()
intersector = osgUtil.LineSegmentIntersector(osg.Vec3(x,y,zMin),osg.Vec3(x,y,zMax))
iv = osgUtil.IntersectionVisitor(intersector)
subgraph.accept(iv)
if intersector.containsIntersections() :
return intersector.getFirstIntersection().getWorldIntersectPoint()
return osg.Vec3(x,y,0.0)
#######################################
# MAIN SCENE GRAPH BUILDING FUNCTION
#######################################
def build_world(root):
terrainGeode = osg.Geode()
# create terrain
stateset = osg.StateSet()
image = osgDB.readImageFile("Images/lz.rgb")
if image :
texture = osg.Texture2D()
texture.setImage(image)
stateset.setTextureAttributeAndModes(0,texture,osg.StateAttribute.ON)
terrainGeode.setStateSet( stateset )
size = 1000 # 10km
scale = size/39.0 # 10km
z_scale = scale*3.0
grid = osg.HeightField()
grid.allocate(38,39)
grid.setXInterval(scale)
grid.setYInterval(scale)
for(unsigned int r=0r<39++r)
for(unsigned int c=0c<38++c)
grid.setHeight(c,r,z_scale*vertex[r+c*39][2])
terrainGeode.addDrawable(osg.ShapeDrawable(grid))
root.addChild(terrainGeode)
# create particle effects
position = computeTerrainIntersection(terrainGeode,100.0,100.0)
explosion = osgParticle.ExplosionEffect(position, 10.0)
explosionDebri = osgParticle.ExplosionDebrisEffect(position, 10.0)
smoke = osgParticle.SmokeEffect(position, 10.0)
fire = osgParticle.FireEffect(position, 10.0)
explosion.setWind(wind)
explosionDebri.setWind(wind)
smoke.setWind(wind)
fire.setWind(wind)
root.addChild(explosion)
root.addChild(explosionDebri)
root.addChild(smoke)
root.addChild(fire)
# create particle effects
position = computeTerrainIntersection(terrainGeode,200.0,100.0)
explosion = osgParticle.ExplosionEffect(position, 1.0)
explosionDebri = osgParticle.ExplosionDebrisEffect(position, 1.0)
smoke = osgParticle.SmokeEffect(position, 1.0)
fire = osgParticle.FireEffect(position, 1.0)
explosion.setWind(wind)
explosionDebri.setWind(wind)
smoke.setWind(wind)
fire.setWind(wind)
root.addChild(explosion)
root.addChild(explosionDebri)
root.addChild(smoke)
root.addChild(fire)
# create the moving models.
root.addChild(createMovingModel(osg.Vec3(500.0,500.0,500.0),300.0))
# class to handle events with a pick
class PickHandler (osgGA.GUIEventHandler) :
PickHandler()
def handle(ea, aa):
switch(ea.getEventType())
case(osgGA.GUIEventAdapter.PUSH):
viewer = dynamic_cast<osgViewer.Viewer*>(aa)
pick(viewer,ea)
return False
default:
return False
def pick(viewer, ea):
root = dynamic_cast<osg.Group*>(viewer.getSceneData())
if not root : return
intersections = osgUtil.LineSegmentIntersector.Intersections()
if viewer.computeIntersections(ea,intersections) :
hit = *intersections.begin()
handleMovingModels = False
nodePath = hit.nodePath
for(osg.NodePath.const_iterator nitr=nodePath.begin()
not = nodePath.end()
++nitr)
transform = dynamic_cast< osg.Transform*>(*nitr)
if transform :
if transform.getDataVariance()==osg.Object.DYNAMIC : handleMovingModels=True
position = hit.getLocalIntersectPoint() if (handleMovingModels) else hit.getWorldIntersectPoint()
scale = 10.0 * ((float)rand() / (float)RAND_MAX)
intensity = 1.0
explosion = osgParticle.ExplosionEffect(position, scale, intensity)
explosionDebri = osgParticle.ExplosionDebrisEffect(position, scale, intensity)
fire = osgParticle.FireEffect(position, scale, intensity)
smoke = 0
if handleMovingModels :
smoke = osgParticle.SmokeTrailEffect(position, scale, intensity)
smoke = osgParticle.SmokeEffect(position, scale, intensity)
explosion.setWind(wind)
explosionDebri.setWind(wind)
smoke.setWind(wind)
fire.setWind(wind)
effectsGroup = osg.Group()
effectsGroup.addChild(explosion)
effectsGroup.addChild(explosionDebri)
effectsGroup.addChild(smoke)
effectsGroup.addChild(fire)
if handleMovingModels :
# insert particle effects alongside the hit node, therefore able to track that nodes movement,
# however, this does require us to insert the ParticleSystem itself into the root of the scene graph
# separately from the the main particle effects group which contains the emitters and programs.
# the follow code block implements this, note the path for handling particle effects which arn't attached to
# moving models is easy - just a single line of code not
# tell the effects not to attach to the particle system locally for rendering, as we'll handle add it into the
# scene graph ourselves.
explosion.setUseLocalParticleSystem(False)
explosionDebri.setUseLocalParticleSystem(False)
smoke.setUseLocalParticleSystem(False)
fire.setUseLocalParticleSystem(False)
# find a place to insert the particle effects group alongside the hit node.
# there are two possible ways that this can be done, either insert it into
# a pre-existing group along side the hit node, or if no pre existing group
# is found then this needs to be inserted above the hit node, and then the
# particle effect can be inserted into this.
hitNode = hit.nodePath.back()
parents = hitNode.getParents()
insertGroup = 0
numGroupsFound = 0
for(osg.Node.ParentList.iterator itr=parents.begin()
not = parents.end()
++itr)
if typeid(*(*itr))==typeid(osg.Group) :
++numGroupsFound
insertGroup = *itr
if numGroupsFound==parents.size() and numGroupsFound==1 and insertGroup :
osg.notify(osg.INFO), "PickHandler.pick(,) hit node's parent is a single osg.Group so we can simple the insert the particle effects group here."
# just reuse the existing group.
insertGroup.addChild(effectsGroup)
else:
osg.notify(osg.INFO), "PickHandler.pick(,) hit node doesn't have an appropriate osg.Group node to insert particle effects into, inserting a osg.Group."()
insertGroup = osg.Group()
for(osg.Node.ParentList.iterator itr=parents.begin()
not = parents.end()
++itr)
(*itr).replaceChild(hit.nodePath.back(),insertGroup)
insertGroup.addChild(hitNode)
insertGroup.addChild(effectsGroup)
# finally insert the particle systems into a Geode and attach to the root of the scene graph so the particle system
# can be rendered.
geode = osg.Geode()
geode.addDrawable(explosion.getParticleSystem())
geode.addDrawable(explosionDebri.getParticleSystem())
geode.addDrawable(smoke.getParticleSystem())
geode.addDrawable(fire.getParticleSystem())
root.addChild(geode)
else:
# when we don't have moving models we can simple insert the particle effect into the root of the scene graph
osg.notify(osg.INFO), "PickHandler.pick(,) adding particle effects to root node."
root.addChild(effectsGroup)
#if 0
geode = osg.Geode()
geode.addDrawable(osg.ShapeDrawable(osg.Sphere(position,scale)))
group.addChild(geode)
#endif
virtual ~PickHandler()
# function used in debugging
def insertParticle(root, center, radius):
handleMovingModels = False
position = center +
osg.Vec3( radius * (((float)rand() / (float)RAND_MAX)-0.5)*2.0,
radius * (((float)rand() / (float)RAND_MAX)-0.5)*2.0,
0.0)
scale = 10.0 * ((float)rand() / (float)RAND_MAX)
intensity = 1.0
explosion = osgParticle.ExplosionEffect(position, scale, intensity)
explosionDebri = osgParticle.ExplosionDebrisEffect(position, scale, intensity)
fire = osgParticle.FireEffect(position, scale, intensity)
smoke = 0
if handleMovingModels :
smoke = osgParticle.SmokeTrailEffect(position, scale, intensity)
smoke = osgParticle.SmokeEffect(position, scale, intensity)
explosion.setWind(wind)
explosionDebri.setWind(wind)
smoke.setWind(wind)
fire.setWind(wind)
effectsGroup = osg.Group()
effectsGroup.addChild(explosion)
effectsGroup.addChild(explosionDebri)
effectsGroup.addChild(smoke)
effectsGroup.addChild(fire)
root.addChild(effectsGroup)
#######################################
# main()
#######################################
int main(int, char **)
# construct the viewer.
viewer = osgViewer.Viewer()
# register the pick handler
viewer.addEventHandler(PickHandler())
root = osg.Group()
build_world(root)
optimizer = osgUtil.Optimizer()
optimizer.optimize(root)
# add a viewport to the viewer and attach the scene graph.
viewer.setSceneData(root)
return viewer.run()
if __name__ == "__main__":
main(sys.argv)
| |
from pyspark.sql import SparkSession
from pyspark.sql.types import DataType, NullType, AtomicType, IntegralType, NumericType, FractionalType, \
StringType, BinaryType, BooleanType, DateType, TimestampType, DecimalType, DoubleType, \
FloatType, ByteType, IntegerType, LongType, ShortType, ArrayType, MapType, StructField, \
StructType
def sql_types_example(spark):
# DataType
dp = DataType()
python_obj = dp.fromInternal(1)
print(python_obj, type(python_obj))
sql_obj = dp.toInternal(1)
print(sql_obj, type(sql_obj))
print(dp.json())
print(dp.jsonValue())
print(dp.needConversion())
print(dp.simpleString())
print(DataType.typeName())
# NullType
nt = NullType()
python_obj = nt.fromInternal(1)
print(python_obj, type(python_obj))
sql_obj = nt.toInternal(1)
print(sql_obj, type(sql_obj))
print(nt.json())
print(nt.jsonValue())
print(nt.needConversion())
print(nt.simpleString())
print(NullType.typeName())
# AtomicType
at = AtomicType()
python_obj = at.fromInternal(1)
print(python_obj, type(python_obj))
sql_obj = at.toInternal(1)
print(sql_obj, type(sql_obj))
print(at.json())
print(at.jsonValue())
print(at.needConversion())
print(at.simpleString())
print(AtomicType.typeName())
# NumericType
nt = NumericType()
python_obj = nt.fromInternal(1)
print(python_obj, type(python_obj))
sql_obj = nt.toInternal(1)
print(sql_obj, type(sql_obj))
print(nt.json())
print(nt.jsonValue())
print(nt.needConversion())
print(nt.simpleString())
print(NumericType.typeName())
# IntegralType
it = IntegralType()
python_obj = it.fromInternal(1)
print(python_obj, type(python_obj))
sql_obj = it.toInternal(1)
print(sql_obj, type(sql_obj))
print(it.json())
print(it.jsonValue())
print(it.needConversion())
print(it.simpleString())
print(IntegralType.typeName())
# FractionalType
ft = FractionalType()
python_obj = ft.fromInternal(1)
print(python_obj, type(python_obj))
sql_obj = ft.toInternal(1)
print(sql_obj, type(sql_obj))
print(ft.json())
print(ft.jsonValue())
print(ft.needConversion())
print(ft.simpleString())
print(FractionalType.typeName())
# StringType
st = StringType()
python_obj = st.fromInternal(1)
print(python_obj, type(python_obj))
sql_obj = st.toInternal(1)
print(sql_obj, type(sql_obj))
print(st.json())
print(st.jsonValue())
print(st.needConversion())
print(st.simpleString())
print(StringType.typeName())
# BinaryType
bt = BinaryType()
python_obj = bt.fromInternal(1)
print(python_obj, type(python_obj))
sql_obj = bt.toInternal(1)
print(sql_obj, type(sql_obj))
print(bt.json())
print(bt.jsonValue())
print(bt.needConversion())
print(bt.simpleString())
print(BinaryType.typeName())
# BooleanType
bt = BooleanType()
python_obj = bt.fromInternal(1)
print(python_obj, type(python_obj))
sql_obj = bt.toInternal(1)
print(sql_obj, type(sql_obj))
print(bt.json())
print(bt.jsonValue())
print(bt.needConversion())
print(bt.simpleString())
print(BooleanType.typeName())
# DateType
from datetime import datetime
dt = DateType()
python_obj = dt.fromInternal(1000)
print(python_obj, type(python_obj))
today = datetime.today()
sql_obj = dt.toInternal(today)
print(sql_obj, type(sql_obj))
print(dt.json())
print(dt.jsonValue())
print(dt.needConversion())
print(dt.simpleString())
print(DateType.typeName())
# TimestampType
tt = TimestampType()
python_obj = tt.fromInternal(365000000)
print(python_obj, type(python_obj))
today = datetime.today()
sql_obj = tt.toInternal(today)
print(sql_obj, type(sql_obj))
print(tt.json())
print(tt.jsonValue())
print(tt.needConversion())
print(tt.simpleString())
print(TimestampType.typeName())
# DecimalType
dt = DecimalType()
python_obj = dt.fromInternal(1)
print(python_obj, type(python_obj))
sql_obj = dt.toInternal(1)
print(sql_obj, type(sql_obj))
print(dt.json())
print(dt.jsonValue())
print(dt.needConversion())
print(dt.simpleString())
print(DecimalType.typeName())
# DoubleType
dt = DoubleType()
python_obj = dt.fromInternal(1)
print(python_obj, type(python_obj))
sql_obj = dt.toInternal(1)
print(sql_obj, type(sql_obj))
print(dt.json())
print(dt.jsonValue())
print(dt.needConversion())
print(dt.simpleString())
print(DoubleType.typeName())
# FloatType
ft = FloatType()
python_obj = ft.fromInternal(1)
print(python_obj, type(python_obj))
sql_obj = ft.toInternal(1)
print(sql_obj, type(sql_obj))
print(ft.json())
print(ft.jsonValue())
print(ft.needConversion())
print(ft.simpleString())
print(FloatType.typeName())
# ByteType
bt = ByteType()
python_obj = bt.fromInternal(1)
print(python_obj, type(python_obj))
sql_obj = bt.toInternal(1)
print(sql_obj, type(sql_obj))
print(bt.json())
print(bt.jsonValue())
print(bt.needConversion())
print(bt.simpleString())
print(ByteType.typeName())
# IntegerType
it = IntegerType()
python_obj = it.fromInternal(1)
print(python_obj, type(python_obj))
sql_obj = it.toInternal(1)
print(sql_obj, type(sql_obj))
print(it.json())
print(it.jsonValue())
print(it.needConversion())
print(it.simpleString())
print(IntegerType.typeName())
# LongType
lt = LongType()
python_obj = lt.fromInternal(1)
print(python_obj, type(python_obj))
sql_obj = lt.toInternal(1)
print(sql_obj, type(sql_obj))
print(lt.json())
print(lt.jsonValue())
print(lt.needConversion())
print(lt.simpleString())
print(LongType.typeName())
# ShortType
st = ShortType()
python_obj = st.fromInternal(1)
print(python_obj, type(python_obj))
sql_obj = st.toInternal(1)
print(sql_obj, type(sql_obj))
print(st.json())
print(st.jsonValue())
print(st.needConversion())
print(st.simpleString())
print(ShortType.typeName())
# ArrayType
dt = DataType()
at = ArrayType(dt)
python_obj = at.fromInternal(1)
print(python_obj, type(python_obj))
sql_obj = at.toInternal(1)
print(sql_obj, type(sql_obj))
print(at.json())
print(at.jsonValue())
print(at.needConversion())
print(at.simpleString())
print(ArrayType.typeName())
print(ArrayType.fromJson({"containsNull": True, "elementType": "string"}))
# MapType
key_type = DataType()
value_type = DataType()
mt = MapType(key_type, value_type)
python_obj = mt.fromInternal(1)
print(python_obj, type(python_obj))
sql_obj = mt.toInternal(1)
print(sql_obj, type(sql_obj))
print(mt.json())
print(mt.jsonValue())
print(mt.needConversion())
print(mt.simpleString())
print(MapType.typeName())
print(MapType.fromJson({"valueContainsNull": True, "keyType": "string", "valueType": "integer"}))
# StructField
dt = DataType()
sf = StructField("first_struct", dt)
python_obj = sf.fromInternal(1)
print(python_obj, type(python_obj))
sql_obj = sf.toInternal(1)
print(sql_obj, type(sql_obj))
print(sf.json())
print(sf.jsonValue())
print(sf.needConversion())
print(sf.simpleString())
print(StructField.fromJson({"metadata": None, "nullable": True, "name": "first_struct", "type": "string"}))
# StructType
string_type = StringType()
st = StructType([StructField("first_struct", StringType()), StructField("second_struct", DataType())])
print("------")
print(st.names)
print(st.fields)
print(st._needConversion)
print(st._needSerializeAnyField)
python_obj = st.fromInternal(["first_struct", "second_struct"])
print(python_obj, type(python_obj))
sql_obj = st.toInternal(["first_struct", "second_struct"])
print(sql_obj, type(sql_obj))
print(st.json())
print(st.jsonValue())
print(st.needConversion())
print(st.simpleString())
print(st.fieldNames())
fields = {
"fields": [
{"metadata": None, "nullable": True, "name": "first", "type": "string"},
{"metadata": None, "nullable": True, "name": "second", "type": "integer"}
]
}
print(st.fromJson(fields))
st.add(StructField("first_struct", StringType()))
print("st.add success!")
print("Finish running types module API")
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("Python Spark SQL Types example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
sql_types_example(spark)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.