gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
import logging
import sys
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models.loading import get_model
from haystack.backends import BaseSearchBackend, BaseSearchQuery, log_query, EmptyResults
from haystack.constants import ID, DJANGO_CT, DJANGO_ID
from haystack.exceptions import MissingDependency, MoreLikeThisError
from haystack.models import SearchResult
from haystack.utils import get_identifier
try:
from django.db.models.sql.query import get_proxied_model
except ImportError:
# Likely on Django 1.0
get_proxied_model = None
try:
from pysolr import Solr, SolrError
except ImportError:
raise MissingDependency("The 'solr' backend requires the installation of 'pysolr'. Please refer to the documentation.")
BACKEND_NAME = 'solr'
class SearchBackend(BaseSearchBackend):
# Word reserved by Solr for special use.
RESERVED_WORDS = (
'AND',
'NOT',
'OR',
'TO',
)
# Characters reserved by Solr for special use.
# The '\\' must come first, so as not to overwrite the other slash replacements.
RESERVED_CHARACTERS = (
'\\', '+', '-', '&&', '||', '!', '(', ')', '{', '}',
'[', ']', '^', '"', '~', '*', '?', ':',
)
def __init__(self, site=None):
super(SearchBackend, self).__init__(site)
if not hasattr(settings, 'HAYSTACK_SOLR_URL'):
raise ImproperlyConfigured('You must specify a HAYSTACK_SOLR_URL in your settings.')
timeout = getattr(settings, 'HAYSTACK_SOLR_TIMEOUT', 10)
self.conn = Solr(settings.HAYSTACK_SOLR_URL, timeout=timeout)
self.log = logging.getLogger('haystack')
def update(self, index, iterable, commit=True):
docs = []
try:
for obj in iterable:
docs.append(index.full_prepare(obj))
except UnicodeDecodeError:
sys.stderr.write("Chunk failed.\n")
if len(docs) > 0:
try:
self.conn.add(docs, commit=commit, boost=index.get_field_weights())
except (IOError, SolrError), e:
self.log.error("Failed to add documents to Solr: %s", e)
def remove(self, obj_or_string, commit=True):
solr_id = get_identifier(obj_or_string)
try:
kwargs = {
'commit': commit,
ID: solr_id
}
self.conn.delete(**kwargs)
except (IOError, SolrError), e:
self.log.error("Failed to remove document '%s' from Solr: %s", solr_id, e)
def clear(self, models=[], commit=True):
try:
if not models:
# *:* matches all docs in Solr
self.conn.delete(q='*:*', commit=commit)
else:
models_to_delete = []
for model in models:
models_to_delete.append("%s:%s.%s" % (DJANGO_CT, model._meta.app_label, model._meta.module_name))
self.conn.delete(q=" OR ".join(models_to_delete), commit=commit)
# Run an optimize post-clear. http://wiki.apache.org/solr/FAQ#head-9aafb5d8dff5308e8ea4fcf4b71f19f029c4bb99
self.conn.optimize()
except (IOError, SolrError), e:
if len(models):
self.log.error("Failed to clear Solr index of models '%s': %s", ','.join(models_to_delete), e)
else:
self.log.error("Failed to clear Solr index: %s", e)
@log_query
def search(self, query_string, sort_by=None, start_offset=0, end_offset=None,
fields='', highlight=False, facets=None, date_facets=None, query_facets=None,
narrow_queries=None, spelling_query=None,
limit_to_registered_models=None, result_class=None, **kwargs):
if len(query_string) == 0:
return {
'results': [],
'hits': 0,
}
kwargs = {
'fl': '* score',
}
if fields:
kwargs['fl'] = fields
if sort_by is not None:
kwargs['sort'] = sort_by
if start_offset is not None:
kwargs['start'] = start_offset
if end_offset is not None:
kwargs['rows'] = end_offset - start_offset
if highlight is True:
kwargs['hl'] = 'true'
kwargs['hl.fragsize'] = '200'
if getattr(settings, 'HAYSTACK_INCLUDE_SPELLING', False) is True:
kwargs['spellcheck'] = 'true'
kwargs['spellcheck.collate'] = 'true'
kwargs['spellcheck.count'] = 1
if spelling_query:
kwargs['spellcheck.q'] = spelling_query
if facets is not None:
kwargs['facet'] = 'on'
kwargs['facet.field'] = facets
if date_facets is not None:
kwargs['facet'] = 'on'
kwargs['facet.date'] = date_facets.keys()
kwargs['facet.date.other'] = 'none'
for key, value in date_facets.items():
kwargs["f.%s.facet.date.start" % key] = self.conn._from_python(value.get('start_date'))
kwargs["f.%s.facet.date.end" % key] = self.conn._from_python(value.get('end_date'))
gap_by_string = value.get('gap_by').upper()
gap_string = "%d%s" % (value.get('gap_amount'), gap_by_string)
if value.get('gap_amount') != 1:
gap_string += "S"
kwargs["f.%s.facet.date.gap" % key] = '+%s/%s' % (gap_string, gap_by_string)
if query_facets is not None:
kwargs['facet'] = 'on'
kwargs['facet.query'] = ["%s:%s" % (field, value) for field, value in query_facets]
if limit_to_registered_models is None:
limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)
if limit_to_registered_models:
# Using narrow queries, limit the results to only models registered
# with the current site.
if narrow_queries is None:
narrow_queries = set()
registered_models = self.build_registered_models_list()
if len(registered_models) > 0:
narrow_queries.add('%s:(%s)' % (DJANGO_CT, ' OR '.join(registered_models)))
if narrow_queries is not None:
kwargs['fq'] = list(narrow_queries)
try:
raw_results = self.conn.search(query_string, **kwargs)
except (IOError, SolrError), e:
self.log.error("Failed to query Solr using '%s': %s", query_string, e)
raw_results = EmptyResults()
return self._process_results(raw_results, highlight=highlight, result_class=result_class)
def more_like_this(self, model_instance, additional_query_string=None,
start_offset=0, end_offset=None,
limit_to_registered_models=None, result_class=None, **kwargs):
# Handle deferred models.
if get_proxied_model and hasattr(model_instance, '_deferred') and model_instance._deferred:
model_klass = get_proxied_model(model_instance._meta)
else:
model_klass = type(model_instance)
index = self.site.get_index(model_klass)
field_name = index.get_content_field()
params = {
'fl': '*,score',
}
if start_offset is not None:
params['start'] = start_offset
if end_offset is not None:
params['rows'] = end_offset
narrow_queries = set()
if limit_to_registered_models is None:
limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)
if limit_to_registered_models:
# Using narrow queries, limit the results to only models registered
# with the current site.
if narrow_queries is None:
narrow_queries = set()
registered_models = self.build_registered_models_list()
if len(registered_models) > 0:
narrow_queries.add('%s:(%s)' % (DJANGO_CT, ' OR '.join(registered_models)))
if additional_query_string:
narrow_queries.add(additional_query_string)
if narrow_queries:
params['fq'] = list(narrow_queries)
query = "%s:%s" % (ID, get_identifier(model_instance))
try:
raw_results = self.conn.more_like_this(query, field_name, **params)
except (IOError, SolrError), e:
self.log.error("Failed to fetch More Like This from Solr for document '%s': %s", query, e)
raw_results = EmptyResults()
return self._process_results(raw_results, result_class=result_class)
def _process_results(self, raw_results, highlight=False, result_class=None):
if not self.site:
from haystack import site
else:
site = self.site
results = []
hits = raw_results.hits
facets = {}
spelling_suggestion = None
if result_class is None:
result_class = SearchResult
if hasattr(raw_results, 'facets'):
facets = {
'fields': raw_results.facets.get('facet_fields', {}),
'dates': raw_results.facets.get('facet_dates', {}),
'queries': raw_results.facets.get('facet_queries', {}),
}
for key in ['fields']:
for facet_field in facets[key]:
# Convert to a two-tuple, as Solr's json format returns a list of
# pairs.
facets[key][facet_field] = zip(facets[key][facet_field][::2], facets[key][facet_field][1::2])
if getattr(settings, 'HAYSTACK_INCLUDE_SPELLING', False) is True:
if hasattr(raw_results, 'spellcheck'):
if len(raw_results.spellcheck.get('suggestions', [])):
# For some reason, it's an array of pairs. Pull off the
# collated result from the end.
spelling_suggestion = raw_results.spellcheck.get('suggestions')[-1]
indexed_models = site.get_indexed_models()
for raw_result in raw_results.docs:
app_label, model_name = raw_result[DJANGO_CT].split('.')
additional_fields = {}
model = get_model(app_label, model_name)
if model and model in indexed_models:
for key, value in raw_result.items():
index = site.get_index(model)
string_key = str(key)
if string_key in index.fields and hasattr(index.fields[string_key], 'convert'):
additional_fields[string_key] = index.fields[string_key].convert(value)
else:
additional_fields[string_key] = self.conn._to_python(value)
del(additional_fields[DJANGO_CT])
del(additional_fields[DJANGO_ID])
del(additional_fields['score'])
if raw_result[ID] in getattr(raw_results, 'highlighting', {}):
additional_fields['highlighted'] = raw_results.highlighting[raw_result[ID]]
result = result_class(app_label, model_name, raw_result[DJANGO_ID], raw_result['score'], searchsite=self.site, **additional_fields)
results.append(result)
else:
hits -= 1
return {
'results': results,
'hits': hits,
'facets': facets,
'spelling_suggestion': spelling_suggestion,
}
def build_schema(self, fields):
content_field_name = ''
schema_fields = []
for field_name, field_class in fields.items():
field_data = {
'field_name': field_class.index_fieldname,
'type': 'text',
'indexed': 'true',
'stored': 'true',
'multi_valued': 'false',
}
if field_class.document is True:
content_field_name = field_class.index_fieldname
# DRL_FIXME: Perhaps move to something where, if none of these
# checks succeed, call a custom method on the form that
# returns, per-backend, the right type of storage?
if field_class.field_type in ['date', 'datetime']:
field_data['type'] = 'date'
elif field_class.field_type == 'integer':
field_data['type'] = 'slong'
elif field_class.field_type == 'float':
field_data['type'] = 'sfloat'
elif field_class.field_type == 'boolean':
field_data['type'] = 'boolean'
elif field_class.field_type == 'ngram':
field_data['type'] = 'ngram'
elif field_class.field_type == 'edge_ngram':
field_data['type'] = 'edge_ngram'
if field_class.is_multivalued:
field_data['multi_valued'] = 'true'
if field_class.stored is False:
field_data['stored'] = 'false'
# Do this last to override `text` fields.
if field_class.indexed is False:
field_data['indexed'] = 'false'
# If it's text and not being indexed, we probably don't want
# to do the normal lowercase/tokenize/stemming/etc. dance.
if field_data['type'] == 'text':
field_data['type'] = 'string'
# If it's a ``FacetField``, make sure we don't postprocess it.
if hasattr(field_class, 'facet_for'):
# If it's text, it ought to be a string.
if field_data['type'] == 'text':
field_data['type'] = 'string'
schema_fields.append(field_data)
return (content_field_name, schema_fields)
class SearchQuery(BaseSearchQuery):
def __init__(self, site=None, backend=None):
super(SearchQuery, self).__init__(site, backend)
if backend is not None:
self.backend = backend
else:
self.backend = SearchBackend(site=site)
def matching_all_fragment(self):
return '*:*'
def build_query_fragment(self, field, filter_type, value):
result = ''
# Handle when we've got a ``ValuesListQuerySet``...
if hasattr(value, 'values_list'):
value = list(value)
if not isinstance(value, (set, list, tuple)):
# Convert whatever we find to what pysolr wants.
value = self.backend.conn._from_python(value)
# Check to see if it's a phrase for an exact match.
if ' ' in value:
value = '"%s"' % value
index_fieldname = self.backend.site.get_index_fieldname(field)
# 'content' is a special reserved word, much like 'pk' in
# Django's ORM layer. It indicates 'no special field'.
if field == 'content':
result = value
else:
filter_types = {
'exact': "%s:%s",
'gt': "%s:{%s TO *}",
'gte': "%s:[%s TO *]",
'lt': "%s:{* TO %s}",
'lte': "%s:[* TO %s]",
'startswith': "%s:%s*",
}
if filter_type == 'in':
in_options = []
for possible_value in value:
in_options.append('%s:"%s"' % (index_fieldname, self.backend.conn._from_python(possible_value)))
result = "(%s)" % " OR ".join(in_options)
elif filter_type == 'range':
start = self.backend.conn._from_python(value[0])
end = self.backend.conn._from_python(value[1])
return "%s:[%s TO %s]" % (index_fieldname, start, end)
else:
result = filter_types[filter_type] % (index_fieldname, value)
return result
def run(self, spelling_query=None):
"""Builds and executes the query. Returns a list of search results."""
final_query = self.build_query()
kwargs = {
'start_offset': self.start_offset,
'result_class': self.result_class,
}
if self.order_by:
order_by_list = []
for order_by in self.order_by:
if order_by.startswith('-'):
order_by_list.append('%s desc' % order_by[1:])
else:
order_by_list.append('%s asc' % order_by)
kwargs['sort_by'] = ", ".join(order_by_list)
if self.end_offset is not None:
kwargs['end_offset'] = self.end_offset
if self.highlight:
kwargs['highlight'] = self.highlight
if self.facets:
kwargs['facets'] = list(self.facets)
if self.date_facets:
kwargs['date_facets'] = self.date_facets
if self.query_facets:
kwargs['query_facets'] = self.query_facets
if self.narrow_queries:
kwargs['narrow_queries'] = self.narrow_queries
if spelling_query:
kwargs['spelling_query'] = spelling_query
results = self.backend.search(final_query, **kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
self._facet_counts = self.post_process_facets(results)
self._spelling_suggestion = results.get('spelling_suggestion', None)
def run_mlt(self):
"""Builds and executes the query. Returns a list of search results."""
if self._more_like_this is False or self._mlt_instance is None:
raise MoreLikeThisError("No instance was provided to determine 'More Like This' results.")
additional_query_string = self.build_query()
kwargs = {
'start_offset': self.start_offset,
'result_class': self.result_class,
}
if self.end_offset is not None:
kwargs['end_offset'] = self.end_offset - self.start_offset
results = self.backend.more_like_this(self._mlt_instance, additional_query_string, **kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
| |
# -*- coding: utf-8 -*-
#==============================================================================
# module : LeCroy64Xi.py
# author : Pierre Heidmann
# license : MIT license
#==============================================================================
"""
This module defines drivers for LeCroy64Xi using VISA library.
:Contains:
LeCroy64Xi
"""
from threading import Lock
from contextlib import contextmanager
from ..driver_tools import (BaseInstrument, InstrIOError, secure_communication,
instrument_property)
from ..visa_tools import VisaInstrument
from inspect import cleandoc
import time
import struct
import numpy as np
class LeCroyChannel(BaseInstrument):
"""
"""
def __init__(self, LeCroy64Xi, channel_num, caching_allowed=True,
caching_permissions={}):
super(LeCroyChannel, self).__init__(None, caching_allowed,
caching_permissions)
self._LeCroy64Xi = LeCroy64Xi
self._channel = channel_num
self.descriptor_start = 21
self.data = {}
@contextmanager
def secure(self):
i = 0
while not self._LeCroy64Xi.lock.acquire():
time.sleep(0.1)
i += 1
if i > 50:
raise InstrIOError
try:
yield
finally:
self._LeCroy64Xi.lock.release()
@instrument_property
@secure_communication()
def verticalbase(self):
''' Get vertical sensitivity in Volts/div of the channel
Output:
value (str) : Vertical base in V.
'''
with self.secure():
# check if the channel studied is not a trace channel
if len(self._channel) == 1:
result = self._LeCroy64Xi.ask('C{}:VDIV?'
.format(self._channel))
result = result.replace('C{}:VDIV '.format(self._channel), '')
return result
else:
mes = '{} is a trace and not a channel'.format(self._channel)
raise InstrIOError(mes)
@verticalbase.setter
@secure_communication()
def verticalbase(self, value):
''' Set vertical sensitivity in Volts/div of the channel
Input:
value (str) : Vertical base in V. (UV (microvolts), MV (milivolts),
V (volts) or KV (kilovolts))
(Example: '20E-3', '20 MV')
Output:
None
'''
with self.secure():
# check if the channel studied is not a trace channel
if len(self._channel) == 1:
self._LeCroy64Xi.write('C{}:VDIV {}'
.format(self._channel, value))
result = self._LeCroy64Xi.ask('C{}:VDIV?'
.format(self._channel))
result = result.replace('C{}:VDIV '.format(self._channel), '')
result = result.replace('V', '')
result = float(result)
if value[-2:] == ' V':
value_expected = float(value[:-2])
elif value[-2:] == 'UV':
value_expected = float(value[:-3])*1e-6
elif value[-2:] == 'MV':
value_expected = float(value[:-3])*1e-3
elif value[-2:] == 'KV':
value_expected = float(value[:-3])*1e3
else:
value_expected = float(value)
if result != value_expected:
raise InstrIOError(cleandoc('''Instrument did not set
correctly the verticalbase'''))
else:
mes = '{} is a trace and not a channel'.format(self._channel)
raise InstrIOError(mes)
@instrument_property
@secure_communication()
def vertical_offset(self):
''' Get vertical offset in Volts of the channel
Input:
None
Output:
value (str) : Vertical offset in V.
'''
with self.secure():
# check if the channel studied is not a trace channel
if len(self._channel) == 1:
result = self._LeCroy64Xi.ask('C{}:OFST?'
.format(self._channel))
result = result.replace('C{}:OFST '.format(self._channel), '')
result = result.replace('V', '')
return result
else:
mes = '{} is a trace and not a channel'.format(self._channel)
raise InstrIOError(mes)
@vertical_offset.setter
@secure_communication()
def vertical_offset(self, value):
''' Set vertical offset in Volts of the channel
Input:
value (str) : Vertical offset in V. (UV (microvolts), MV (milivolts),
V (volts) or KV (kilovolts))
(Example: '20E-3', '20 MV')
Output:
None
'''
with self.secure():
# check if the channel studied is not a trace channel
if len(self._channel) == 1:
self._LeCroy64Xi.write('C{}:OFST {}'
.format(self._channel, value))
result = self._LeCroy64Xi.ask('C{}:OFST?'
.format(self._channel))
result = result.replace('C{}:OFST '.format(self._channel), '')
result = result.replace('V', '')
result = float(result)
if value[-2:] == ' V':
value_expected = float(value[:-2])
elif value[-2:] == 'UV':
value_expected = float(value[:-3])*1e-6
elif value[-2:] == 'MV':
value_expected = float(value[:-3])*1e-3
elif value[-2:] == 'KV':
value_expected = float(value[:-3])*1e3
else:
value_expected = float(value)
if result != value_expected:
raise InstrIOError(cleandoc('''Instrument did not set
correctly the verticalbase'''))
else:
mes = '{} is a trace and not a channel'.format(self._channel)
raise InstrIOError(mes)
@secure_communication()
def sweep(self):
''' Get the number of sweeps of the channel
Input:
None
Output:
value (str)
'''
instr = self._LeCroy64Xi
with self.secure():
# check if the channel studied is not a trace channel
if len(self._channel) == 1:
cmd = 'VBS? "return=app.Acquisition.C{}.Out.Result.Sweeps"'
result = instr.ask_for_values(cmd.format(self._channel))
if result:
return result[0]
else:
raise InstrIOError('LeCraoy failed to return sweep')
else:
mes = '{} is a trace and not a channel'.format(self._channel)
raise InstrIOError(mes)
@secure_communication()
def do_save_data(self, destination='HDD', mode='OFF', format='ASCII'):
''' Store a trace in ASCII format in internal memory
Input:
destination = {'CARD', 'FLPY', 'HDD', 'M1', 'M2', 'M3', 'M4'}
mode = {'OFF', 'WRAP', 'FILL'}
format = {'BINARY', 'SPREADSHEET', 'MATLAB', 'MATHCAD'}
Output:
None
'''
with self.secure():
# check if the channel studied is not a trace channel
if len(self._channel) == 1:
self._LeCroy64Xi.write('STST C{},{},AUTO,{},FORMAT,{}; STO'
.format(self._channel, destination,
mode, format))
else:
self._LeCroy64Xi.write('STST {},{},AUTO,{},FORMAT,{}; STO'
.format(self._channel, destination,
mode, format))
@secure_communication()
def add_save_data_func(self):
''' Adds save_ch[n]_data functions, based on _do_save_data(channel).
n = (1,2,3,4) for 4 channels.
'''
with self.secure():
func = lambda: self.do_save_data(self._channel)
setattr(self, 'save_ch{}_data'.format(self._channel), func)
@secure_communication()
def read_data_complete(self, hires):
'''
Input:
{'True', 'Yes', 'No', 'False'}
Output:
Library self.data :
many parameters in string
vertical values data : 'Volt_Value_array'
horizontal values data : 'SingleSweepTimesValuesArray' or
'SEQNCEWaveformTimesValuesArray'
'''
if hires in ('True', 'Yes'):
self._LeCroy64Xi.write('CFMT DEF9,WORD,BIN')
result = self._LeCroy64Xi.ask('CFMT?')
if result != 'CFMT DEF9,WORD,BIN':
mes = 'Instrument did not set the WORD mode'
raise InstrIOError(mes)
elif hires in ('No', 'False'):
self._LeCroy64Xi.write('CFMT DEF9,BYTE,BIN')
result = self._LeCroy64Xi.ask('CFMT?')
if result != 'CFMT DEF9,BYTE,BIN':
mes = 'Instrument did not set the BYTE mode'
raise InstrIOError(mes)
else:
mes = "{} is not an allowed input. Input:{'True', 'Yes', 'No', 'False'}".format(hires)
raise InstrIOError(mes)
if len(self._channel) == 1:
databyte = bytearray(self._LeCroy64Xi.ask('C{}:WF?'.format(self._channel)))
else:
databyte = bytearray(self._LeCroy64Xi.ask('{}:WF?'.format(self._channel)))
databyte = databyte[self.descriptor_start:]
self.data['COMM_TYPE'] = struct.unpack('<b', databyte[32:33]) # /COMM_TYPE: enum ; chosen by remote command COMM_FORMAT
self.data['COMM_ORDER'] = struct.unpack('<b', databyte[34:35]) # COMM_ORDER: enum
# 'The following variables of this basic wave descriptor block specify
# ' the block lengths of all blocks of which the entire waveform (as it is
# ' currently being read) is composed. If a block length is zero, this
# ' block is (currently) not present.
#
# ' Blocks and arrays that are present will be found in the same order
# ' as their descriptions below.
# BLOCKS:
self.data['WAVE_DESCRIPTOR'] = struct.unpack('<i', databyte[36:40]) # WAVE_DESCRIPTOR: long ; length in bytes of block WAVEDESC
self.data['USER_TEXT'] = struct.unpack('<i', databyte[40:44]) # USER_TEXT: long ; length in bytes of block USERTEXT
self.data['RES_DESC1'] = struct.unpack('<i', databyte[44:48]) # RES_DESC1: long
# ARRAYS:
self.data['TRIGTIME_ARRAY'] = struct.unpack('<i', databyte[48:52]) # TRIGTIME_ARRAY: long ; length in bytes of TRIGTIME array
self.data['RIS_TIME_ARRAY'] = struct.unpack('<i', databyte[52:56]) # RIS_TIME_ARRAY: long ; length in bytes of RIS_TIME array
self.data['RES_ARRAY1'] = struct.unpack('<i', databyte[56:60]) # RES_ARRAY1: long ; an expansion entry is reserved
self.data['WAVE_ARRAY_1'] = struct.unpack('<i', databyte[60:64]) # WAVE_ARRAY_1: long ; length in bytes of 1st simple data array. In transmitted waveform, represent the number of transmitted bytes in accordance with the NP parameter of the WFSU remote command and the used format (see COMM_TYPE).
self.data['WAVE_ARRAY_2'] = struct.unpack('<i', databyte[64:68]) # WAVE_ARRAY_2: long ; length in bytes of 2nd simple data array
self.data['RES_ARRAY2'] = struct.unpack('<i', databyte[68:72]) # RES_ARRAY2: long
self.data['RES_ARRAY3'] = struct.unpack('<i', databyte[72:76]) # RES_ARRAY3: long ; 2 expansion entries are reserved
# The following variables identify the instrument
self.data['INSTRUMENT_NAME'] = databyte[76:92] # INSTRUMENT_NAME: string
self.data['INSTRUMENT_NUMBER'] = struct.unpack('<i', databyte[92:96]) # INSTRUMENT_NUMBER: long
self.data['TRACE_LABEL'] = databyte[96:112] # /TRACE_LABEL: string ; identifies the waveform.
# '<112> RESERVED1: word
# '<114> RESERVED2: word ; 2 expansion entries
# The following variables describe the waveform and the time at which the waveform was generated.
self.data['WAVE_ARRAY_COUNT'] = struct.unpack('<i', databyte[116:120]) # WAVE_ARRAY_COUNT: long ; number of data points in the data array. If there are two data arrays (FFT or Extrema), this number applies to each array separately.
self.data['PNTS_PER_SCREEN'] = struct.unpack('<i', databyte[120:124]) # PNTS_PER_SCREEN: long ; nominal number of data points on the screen
self.data['FIRST_VALID_PNT'] = struct.unpack('<i', databyte[124:128]) # FIRST_VALID_PNT: long ; count of number of points to skip before first good point FIRST_VALID_POINT = 0 for normal waveforms.
self.data['LAST_VALID_PNT'] = struct.unpack('<i', databyte[128:132]) # LAST_VALID_PNT: long ; index of last good data point in record before padding (blanking) was started. LAST_VALID_POINT = WAVE_ARRAY_COUNT-1 except for aborted sequence and rollmode acquisitions
self.data['FIRST_POINT'] = struct.unpack('<i', databyte[132:136]) # FIRST_POINT: long ; for input and output, indicates the offset relative to the beginning of the trace buffer. Value is the same as the FP parameter of the WFSU remote command.
self.data['STARTING_FACTOR'] = struct.unpack('<i', databyte[136:140]) # SPARSING_FACTOR: long ; for input and output, indicates the sparsing into the transmitted data block. Value is the same as the SP parameter of the WFSU remote command.
self.data['SEGMENT_INDEX'] = struct.unpack('<i', databyte[140:144]) # SEGMENT_INDEX: long ; for input and output, indicates the index of the transmitted segment. Value is the same as the SN parameter of the WFSU remote command.
self.data['SUBARRAY_COUNT'] = struct.unpack('<i', databyte[144:148]) # SUBARRAY_COUNT: long ; for Sequence, acquired segment count, between 0 and NOM_SUBARRAY_COUNT
self.data['SWEEPS_PER_ACQ'] = struct.unpack('<i', databyte[148:152]) # SWEEPS_PER_ACQ: long ; for Average or Extrema, number of sweeps accumulated else 1
self.data['POINTS_PER_PAIR'] = struct.unpack('<h', databyte[152:154]) # POINTS_PER_PAIR: word ; for Peak Detect waveforms (which always include data points in DATA_ARRAY_1 and min/max pairs in DATA_ARRAY_2). Value is the number of data points for each min/max pair.
self.data['PAIR_OFFSET'] = struct.unpack('<h', databyte[154:156]) # PAIR_OFFSET: word ; for Peak Detect waveforms only Value is the number of data points by which the first min/max pair in DATA_ARRAY_2 is offset relative to the first data value in DATA_ARRAY_1.
self.data['VERTICAL_GAIN'] = struct.unpack('<f', databyte[156:160]) # VERTICAL_GAIN: float
self.data['VERTICAL_OFFSET'] = struct.unpack('<f', databyte[160:164]) # VERTICAL_OFFSET: float ; to get floating values from raw data : VERTICAL_GAIN * data - VERTICAL_OFFSET
self.data['MAX_VALUE'] = struct.unpack('<f', databyte[164:168]) # MAX_VALUE: float ; maximum allowed value. It corresponds to the upper edge of the grid.
self.data['MIN_VALUE'] = struct.unpack('<f', databyte[168:172]) # MIN_VALUE: float ; minimum allowed value. It corresponds to the lower edge of the grid.
self.data['NOMINAL_BITS'] = struct.unpack('<h', databyte[172:174]) # NOMINAL_BITS: word ; a measure of the intrinsic precision of the observation: ADC data is 8 bit averaged data is 10-12 bit, etc.
self.data['NOM_SUBARRAY_COUNT'] = struct.unpack('<h', databyte[174:176]) # NOM_SUBARRAY_COUNT: word ; for Sequence, nominal segment count else 1
self.data['HORIZ_INTERVAL'] = struct.unpack('<f', databyte[176:180]) # HORIZ_INTERVAL: float ; sampling interval for time domain waveforms
self.data['HORIZ_OFFSET'] = struct.unpack('<d', databyte[180:188]) # HORIZ_OFFSET: double ; trigger offset for the first sweep of the trigger, seconds between the trigger and the first data point
self.data['PIXEL_OFFSET'] = struct.unpack('<d', databyte[188:196]) # PIXEL_OFFSET: double ; needed to know how to display the waveform
self.data['VERTUNIT'] = databyte[196:244] # VERTUNIT: unit_definition ; units of the vertical axis;INSTRUMENT_NAME: string
self.data['HORUNIT'] = databyte[244:292] # HORUNIT: unit_definition ; units of the horizontal axis
self.data['HORIZ_UNCERTAINTY'] = struct.unpack('<f', databyte[292:296]) # HORIZ_UNCERTAINTY: float ; uncertainty from one acquisition to the next, of the horizontal offset in seconds
self.data['TRIGGER_TIME_seconds'] = struct.unpack('<d', databyte[296:304]) # TRIGGER_TIME: time_stamp ; time of the trigger
self.data['TRIGGER_TIME_minutes'] = struct.unpack('<b', databyte[304:305]) # TRIGGER_TIME: time_stamp ; time of the trigger
self.data['TRIGGER_TIME_hours'] = struct.unpack('<b', databyte[305:306]) # TRIGGER_TIME: time_stamp ; time of the trigger
self.data['TRIGGER_TIME_days'] = struct.unpack('<b', databyte[306:307]) # TRIGGER_TIME: time_stamp ; time of the trigger
self.data['TRIGGER_TIME_months'] = struct.unpack('<b', databyte[307:308]) # TRIGGER_TIME: time_stamp ; time of the trigger
self.data['TRIGGER_TIME_year'] = struct.unpack('<h', databyte[308:310]) # TRIGGER_TIME: time_stamp ; time of the trigger
self.data['ACQ_DURATION'] = struct.unpack('<f', databyte[312:316]) # ACQ_DURATION: float ; duration of the acquisition (in sec) in multi-trigger waveforms. (e.g. sequence, RIS, or averaging)
self.data['RECORD_TYPE'] = struct.unpack('<h', databyte[316:318])
# RECORD_TYPE:
# 'enum
# '_0(single_sweep)
# '_1(interleaved)
# '_2(histogram)
# '_3(graph)
# '_4(filter_coefficient)
# '_5(complex)
# '_6(extrema)
# '_7(sequence_obsolete)
# '_8(centered_RIS)
# '_9(peak_)
self.data['PROCESSING_DONE'] = struct.unpack('<h', databyte[318:320])
# PROCESSING_DONE:
# 'enum
# '_0 no_processing
# '_1 fir_filter
# '_2 interpolated
# '_3 sparsed
# '_4 autoscaled
# '_5 no_result
# '_6 rolling
# '_7 cumulative
self.data['RIS_SWEEPS'] = struct.unpack('<h', databyte[322:324]) # RIS_SWEEPS: word ; for RIS, the number of sweeps else 1
# The following variables describe the basic acquisition conditions used when the waveform was acquired
self.data['TIMEBASE'] = struct.unpack('<h', databyte[324:326])
# TIMEBASE: enum
# '_0 1_ps/div
# '_1 2_ps/div
# '_2 5_ps/div
# '_3 10_ps/div
# '_4 20_ps/div
# '_5 50_ps/div
# '_6 100_ps/div
# '_7 200_ps/div
# '_8 500_ps/div
# '_9 1_ns/div
# '_10 2_ns/div
# '_11 5_ns/div
# '_12 10_ns/div
# '_13 20_ns/div
# '_14 50_ns/div
# '_15 100_ns/div
# '_16 200_ns/div
# '_17 500_ns/div
# '_18 1_us/div
# '_19 2_us/div
# '_20 5_us/div
# '_21 10_us/div
# '_22 20_us/div
# '_23 50_us/div
# '_24 100_us/div
# '_25 200_us/div
# '_26 500_us/div
# '_27 1_ms/div
# '_28 2_ms/div
# '_29 5_ms/div
# '_30 10_ms/div
# '_31 20_ms/div
# '_32 50_ms/div
# '_33 100_ms/div
# '_34 200_ms/div
# '_35 500_ms/div
# '_36 1_s/div
# '_37 2_s/div
# '_38 5_s/div
# '_39 10_s/div
# '_40 20_s/div
# '_41 50_s/div
# '_42 100_s/div
# '_43 200_s/div
# '_44 500_s/div
# '_45 1_ks/div
# '_46 2_ks/div
# '_47 5_ks/div
# '_100(EXTERNAL)
self.data['VERT_COUPLING'] = struct.unpack('<h', databyte[326:328])
# VERT_COUPLING: enum
# '_0(DC_50_Ohms)
# '_1(ground)
# '_2(DC_1MOhm)
# '_3(ground)
# '_4(AC, _1MOhm)
self.data['PROBE_ATT'] = struct.unpack('<f', databyte[328:332]) # PROBE_ATT: float
self.data['FIXED_VERT_GAIN'] = struct.unpack('<h', databyte[332:334])
# FIXED_VERT_GAIN: enum
# '_0 1_uV/div
# '_1 2_uV/div
# '_2 5_uV/div
# '_3 10_uV/div
# '_4 20_uV/div
# '_5 50_uV/div
# '_6 100_uV/div
# '_7 200_uV/div
# '_8 500_uV/div
# '_9 1_mV/div
# '_10 2_mV/div
# '_11 5_mV/div
# '_12 10_mV/div
# '_13 20_mV/div
# '_14 50_mV/div
# '_15 100_mV/div
# '_16 200_mV/div
# '_17 500_mV/div
# '_18 1_V/div
# '_19 2_V/div
# '_20 5_V/div
# '_21 10_V/div
# '_22 20_V/div
# '_23 50_V/div
# '_24 100_V/div
# '_25 200_V/div
# '_26 500_V/div
# '_27 1_kV/div
self.data['BANDWIDTH_LIMIT'] = struct.unpack('<h', databyte[334:336])
# BANDWIDTH_LIMIT: enum
# '_0(off)
# '_1 on
self.data['VERTICAL_VERNIER'] = struct.unpack('<f', databyte[336:340]) # VERTICAL_VERNIER: float
self.data['TACQ_VERT_OFFET'] = struct.unpack('<f', databyte[340:344]) # ACQ_VERT_OFFSET: float
self.data['WAVE_SOURCE'] = struct.unpack('<h', databyte[344:346])
# WAVE_SOURCE: enum
# '_0(CHANNEL_1)
# '_1(CHANNEL_2)
# '_2(CHANNEL_3)
# '_3(CHANNEL_4)
# Get the vertical values :
waveform_size = self.data['WAVE_ARRAY_COUNT'][0]
waveform_starting_point = self.data['WAVE_DESCRIPTOR'][0] + self.data['TRIGTIME_ARRAY'][0]
self.data['Volt_Value_array'] = np.empty(waveform_size)
if hires in ('Yes', 'True'):
Values16 = np.empty(waveform_size, dtype=np.int16)
for i in range(0, waveform_size-1):
Values16[i] = struct.unpack('<h', databyte[(waveform_starting_point+2*i):(waveform_starting_point+2*i+2)])[0]
self.data['Volt_Value_array'][i] = self.data['VERTICAL_GAIN'][0] * Values16[i] + self.data['VERTICAL_OFFSET'][0]
else:
Values8 = np.empty(waveform_size, dtype=np.int8)
for i in range(0, waveform_size-1):
Values8[i] = struct.unpack('<b', databyte[(waveform_starting_point+i):(waveform_starting_point+i+1)])[0]
self.data['Volt_Value_array'][i] = self.data['VERTICAL_GAIN'][0] * Values8[i] + self.data['VERTICAL_OFFSET'][0]
# Get the horizontal values :
# Single Sweep waveforms: x[i] = HORIZ_INTERVAL x i + HORIZ_OFFSET
if self.data['TRIGTIME_ARRAY'][0] == 0: # if the TrigArray lentgh is null, it tells us, it's a simple single sweep waveform
self.data['SingleSweepTimesValuesArray'] = np.empty(waveform_size)
for i in range(0,waveform_size-1):
self.data['SingleSweepTimesValuesArray'][i] = self.data['HORIZ_INTERVAL'][0] * i + self.data['HORIZ_OFFSET'][0]
else:
self.data['TrigTimeCount'] = np.empty(self.data['TRIGTIME_ARRAY'][0] / 16)
self.data['TrigTimeOffset'] = np.empty(self.data['TRIGTIME_ARRAY'][0] / 16)
for i in range(0, self.data['TRIGTIME_ARRAY'][0] / 16 - 1):
self.data['TrigTimeCount'][i] = struct.unpack('<d', databyte[(self.data['WAVE_DESCRIPTOR'][0]+i*16):(self.data['WAVE_DESCRIPTOR'][0]+8+i*16)])[0]
self.data['TrigTimeOffset'][i] = struct.unpack('<d', databyte[(self.data['WAVE_DESCRIPTOR'][0]+8+i*16):(self.data['WAVE_DESCRIPTOR'][0]+16+i*16)])[0]
self.data['SEQNCEWaveformTimesValuesArray'] = np.empty(waveform_size)
# Array of horizontal values
for n in range(0, len(self.data['TrigTimeCount']) - 1):
for i in range(0, waveform_size / len(self.data['TrigTimeCount']) - 1):
self.data['SEQNCEWaveformTimesValuesArray'][n * (waveform_size / len(self.data['TrigTimeCount'])) + i] = self.data['HORIZ_INTERVAL'][0] * i + self.data['TrigTimeOffset'][n]
return self.data
@secure_communication()
def read_data_cfast(self, hires):
'''
Input:
{'True', 'Yes', 'No', 'False'}
Output:
Library self.data :
many parameters in string
vertical values data : 'Volt_Value_array'
horizontal values data : 'SingleSweepTimesValuesArray' or
'SEQNCEWaveformTimesValuesArray'
'''
if hires in ('True', 'Yes'):
self._LeCroy64Xi.write('CFMT DEF9,WORD,BIN')
result = self._LeCroy64Xi.ask('CFMT?')
if result != 'CFMT DEF9,WORD,BIN':
mes = 'Instrument did not set the WORD mode'
raise InstrIOError(mes)
elif hires in ('No', 'False'):
self._LeCroy64Xi.write('CFMT DEF9,BYTE,BIN')
result = self._LeCroy64Xi.ask('CFMT?')
if result != 'CFMT DEF9,BYTE,BIN':
mes = 'Instrument did not set the BYTE mode'
raise InstrIOError(mes)
else:
mes = "{} is not an allowed input. Input:{'True', 'Yes', 'No', 'False'}".format(hires)
raise InstrIOError(mes)
if len(self._channel) == 1:
databyte = bytearray(self._LeCroy64Xi.ask('C{}:WF?'.format(self._channel)))
else:
databyte = bytearray(self._LeCroy64Xi.ask('{}:WF?'.format(self._channel)))
databyte = databyte[self.descriptor_start:]
# BLOCKS:
self.data['WAVE_DESCRIPTOR'] = struct.unpack('<i', databyte[36:40]) # WAVE_DESCRIPTOR: long ; length in bytes of block WAVEDESC
# ARRAYS:
self.data['TRIGTIME_ARRAY'] = struct.unpack('<i', databyte[48:52]) # TRIGTIME_ARRAY: long ; length in bytes of TRIGTIME array
# The following variables describe the waveform and the time at which the waveform was generated.
self.data['WAVE_ARRAY_COUNT'] = struct.unpack('<i', databyte[116:120]) # WAVE_ARRAY_COUNT: long ; number of data points in the data array. If there are two data arrays (FFT or Extrema), this number applies to each array separately.
self.data['PNTS_PER_SCREEN'] = struct.unpack('<i', databyte[120:124]) # PNTS_PER_SCREEN: long ; nominal number of data points on the screen
self.data['FIRST_VALID_PNT'] = struct.unpack('<i', databyte[124:128]) # FIRST_VALID_PNT: long ; count of number of points to skip before first good point FIRST_VALID_POINT = 0 for normal waveforms.
self.data['LAST_VALID_PNT'] = struct.unpack('<i', databyte[128:132]) # LAST_VALID_PNT: long ; index of last good data point in record before padding (blanking) was started. LAST_VALID_POINT = WAVE_ARRAY_COUNT-1 except for aborted sequence and rollmode acquisitions
self.data['FIRST_POINT'] = struct.unpack('<i', databyte[132:136]) # FIRST_POINT: long ; for input and output, indicates the offset relative to the beginning of the trace buffer. Value is the same as the FP parameter of the WFSU remote command.
self.data['STARTING_FACTOR'] = struct.unpack('<i', databyte[136:140]) # SPARSING_FACTOR: long ; for input and output, indicates the sparsing into the transmitted data block. Value is the same as the SP parameter of the WFSU remote command.
self.data['SEGMENT_INDEX'] = struct.unpack('<i', databyte[140:144]) # SEGMENT_INDEX: long ; for input and output, indicates the index of the transmitted segment. Value is the same as the SN parameter of the WFSU remote command.
self.data['SUBARRAY_COUNT'] = struct.unpack('<i', databyte[144:148]) # SUBARRAY_COUNT: long ; for Sequence, acquired segment count, between 0 and NOM_SUBARRAY_COUNT
self.data['SWEEPS_PER_ACQ'] = struct.unpack('<i', databyte[148:152]) # SWEEPS_PER_ACQ: long ; for Average or Extrema, number of sweeps accumulated else 1
self.data['POINTS_PER_PAIR'] = struct.unpack('<h', databyte[152:154]) # POINTS_PER_PAIR: word ; for Peak Detect waveforms (which always include data points in DATA_ARRAY_1 and min/max pairs in DATA_ARRAY_2). Value is the number of data points for each min/max pair.
self.data['PAIR_OFFSET'] = struct.unpack('<h', databyte[154:156]) # PAIR_OFFSET: word ; for Peak Detect waveforms only Value is the number of data points by which the first min/max pair in DATA_ARRAY_2 is offset relative to the first data value in DATA_ARRAY_1.
self.data['VERTICAL_GAIN'] = struct.unpack('<f', databyte[156:160]) # VERTICAL_GAIN: float
self.data['VERTICAL_OFFSET'] = struct.unpack('<f', databyte[160:164]) # VERTICAL_OFFSET: float ; to get floating values from raw data : VERTICAL_GAIN * data - VERTICAL_OFFSET
self.data['MAX_VALUE'] = struct.unpack('<f', databyte[164:168]) # MAX_VALUE: float ; maximum allowed value. It corresponds to the upper edge of the grid.
self.data['MIN_VALUE'] = struct.unpack('<f', databyte[168:172]) # MIN_VALUE: float ; minimum allowed value. It corresponds to the lower edge of the grid.
self.data['NOMINAL_BITS'] = struct.unpack('<h', databyte[172:174]) # NOMINAL_BITS: word ; a measure of the intrinsic precision of the observation: ADC data is 8 bit averaged data is 10-12 bit, etc.
self.data['NOM_SUBARRAY_COUNT'] = struct.unpack('<h', databyte[174:176]) # NOM_SUBARRAY_COUNT: word ; for Sequence, nominal segment count else 1
self.data['HORIZ_INTERVAL'] = struct.unpack('<f', databyte[176:180]) # HORIZ_INTERVAL: float ; sampling interval for time domain waveforms
self.data['HORIZ_OFFSET'] = struct.unpack('<d', databyte[180:188]) # HORIZ_OFFSET: double ; trigger offset for the first sweep of the trigger, seconds between the trigger and the first data point
self.data['PIXEL_OFFSET'] = struct.unpack('<d', databyte[188:196]) # PIXEL_OFFSET: double ; needed to know how to display the waveform
self.data['VERTUNIT'] = databyte[196:244] # VERTUNIT: unit_definition ; units of the vertical axis;INSTRUMENT_NAME: string
self.data['HORUNIT'] = databyte[244:292] # HORUNIT: unit_definition ; units of the horizontal axis
self.data['HORIZ_UNCERTAINTY'] = struct.unpack('<f', databyte[292:296]) # HORIZ_UNCERTAINTY: float ; uncertainty from one acquisition to the next, of the horizontal offset in seconds
self.data['TRIGGER_TIME_seconds'] = struct.unpack('<d', databyte[296:304]) # TRIGGER_TIME: time_stamp ; time of the trigger
self.data['TRIGGER_TIME_minutes'] = struct.unpack('<b', databyte[304:305]) # TRIGGER_TIME: time_stamp ; time of the trigger
self.data['TRIGGER_TIME_hours'] = struct.unpack('<b', databyte[305:306]) # TRIGGER_TIME: time_stamp ; time of the trigger
self.data['TRIGGER_TIME_days'] = struct.unpack('<b', databyte[306:307]) # TRIGGER_TIME: time_stamp ; time of the trigger
self.data['TRIGGER_TIME_months'] = struct.unpack('<b', databyte[307:308]) # TRIGGER_TIME: time_stamp ; time of the trigger
self.data['TRIGGER_TIME_year'] = struct.unpack('<h', databyte[308:310]) # TRIGGER_TIME: time_stamp ; time of the trigger
self.data['ACQ_DURATION'] = struct.unpack('<f', databyte[312:316]) # ACQ_DURATION: float ; duration of the acquisition (in sec) in multi-trigger waveforms. (e.g. sequence, RIS, or averaging)
self.data['RECORD_TYPE'] = struct.unpack('<h', databyte[316:318])
# RECORD_TYPE:
# 'enum
# '_0(single_sweep)
# '_1(interleaved)
# '_2(histogram)
# '_3(graph)
# '_4(filter_coefficient)
# '_5(complex)
# '_6(extrema)
# '_7(sequence_obsolete)
# '_8(centered_RIS)
# '_9(peak_)
self.data['PROCESSING_DONE'] = struct.unpack('<h', databyte[318:320])
# PROCESSING_DONE:
# 'enum
# '_0 no_processing
# '_1 fir_filter
# '_2 interpolated
# '_3 sparsed
# '_4 autoscaled
# '_5 no_result
# '_6 rolling
# '_7 cumulative
self.data['RIS_SWEEPS'] = struct.unpack('<h', databyte[322:324]) # RIS_SWEEPS: word ; for RIS, the number of sweeps else 1
# The following variables describe the basic acquisition conditions used when the waveform was acquired
self.data['TIMEBASE'] = struct.unpack('<h', databyte[324:326])
# TIMEBASE: enum
# '_0 1_ps/div
# '_1 2_ps/div
# '_2 5_ps/div
# '_3 10_ps/div
# '_4 20_ps/div
# '_5 50_ps/div
# '_6 100_ps/div
# '_7 200_ps/div
# '_8 500_ps/div
# '_9 1_ns/div
# '_10 2_ns/div
# '_11 5_ns/div
# '_12 10_ns/div
# '_13 20_ns/div
# '_14 50_ns/div
# '_15 100_ns/div
# '_16 200_ns/div
# '_17 500_ns/div
# '_18 1_us/div
# '_19 2_us/div
# '_20 5_us/div
# '_21 10_us/div
# '_22 20_us/div
# '_23 50_us/div
# '_24 100_us/div
# '_25 200_us/div
# '_26 500_us/div
# '_27 1_ms/div
# '_28 2_ms/div
# '_29 5_ms/div
# '_30 10_ms/div
# '_31 20_ms/div
# '_32 50_ms/div
# '_33 100_ms/div
# '_34 200_ms/div
# '_35 500_ms/div
# '_36 1_s/div
# '_37 2_s/div
# '_38 5_s/div
# '_39 10_s/div
# '_40 20_s/div
# '_41 50_s/div
# '_42 100_s/div
# '_43 200_s/div
# '_44 500_s/div
# '_45 1_ks/div
# '_46 2_ks/div
# '_47 5_ks/div
# '_100(EXTERNAL)
self.data['VERT_COUPLING'] = struct.unpack('<h', databyte[326:328])
# VERT_COUPLING: enum
# '_0(DC_50_Ohms)
# '_1(ground)
# '_2(DC_1MOhm)
# '_3(ground)
# '_4(AC, _1MOhm)
self.data['PROBE_ATT'] = struct.unpack('<f', databyte[328:332]) # PROBE_ATT: float
self.data['FIXED_VERT_GAIN'] = struct.unpack('<h', databyte[332:334])
# FIXED_VERT_GAIN: enum
# '_0 1_uV/div
# '_1 2_uV/div
# '_2 5_uV/div
# '_3 10_uV/div
# '_4 20_uV/div
# '_5 50_uV/div
# '_6 100_uV/div
# '_7 200_uV/div
# '_8 500_uV/div
# '_9 1_mV/div
# '_10 2_mV/div
# '_11 5_mV/div
# '_12 10_mV/div
# '_13 20_mV/div
# '_14 50_mV/div
# '_15 100_mV/div
# '_16 200_mV/div
# '_17 500_mV/div
# '_18 1_V/div
# '_19 2_V/div
# '_20 5_V/div
# '_21 10_V/div
# '_22 20_V/div
# '_23 50_V/div
# '_24 100_V/div
# '_25 200_V/div
# '_26 500_V/div
# '_27 1_kV/div
self.data['BANDWIDTH_LIMIT'] = struct.unpack('<h', databyte[334:336])
# BANDWIDTH_LIMIT: enum
# '_0(off)
# '_1 on
self.data['VERTICAL_VERNIER'] = struct.unpack('<f', databyte[336:340]) # VERTICAL_VERNIER: float
self.data['TACQ_VERT_OFFET'] = struct.unpack('<f', databyte[340:344]) # ACQ_VERT_OFFSET: float
self.data['WAVE_SOURCE'] = struct.unpack('<h', databyte[344:346])
# WAVE_SOURCE: enum
# '_0(CHANNEL_1)
# '_1(CHANNEL_2)
# '_2(CHANNEL_3)
# '_3(CHANNEL_4)
# Get the vertical values :
waveform_size = self.data['WAVE_ARRAY_COUNT'][0]
waveform_starting_point = self.data['WAVE_DESCRIPTOR'][0] + self.data['TRIGTIME_ARRAY'][0]
self.data['Volt_Value_array'] = np.empty(waveform_size)
if hires in ('Yes', 'True'):
Values16 = np.empty(waveform_size, dtype=np.int16)
for i in range(0, waveform_size-1):
Values16[i] = struct.unpack('<h', databyte[(waveform_starting_point+2*i):(waveform_starting_point+2*i+2)])[0]
self.data['Volt_Value_array'][i] = self.data['VERTICAL_GAIN'][0] * Values16[i] + self.data['VERTICAL_OFFSET'][0]
else:
Values8 = np.empty(waveform_size, dtype=np.int8)
for i in range(0, waveform_size-1):
Values8[i] = struct.unpack('<b', databyte[(waveform_starting_point+i):(waveform_starting_point+i+1)])[0]
self.data['Volt_Value_array'][i] = self.data['VERTICAL_GAIN'][0] * Values8[i] + self.data['VERTICAL_OFFSET'][0]
# Get the horizontal values :
# Single Sweep waveforms: x[i] = HORIZ_INTERVAL x i + HORIZ_OFFSET
if self.data['TRIGTIME_ARRAY'][0] == 0: # if the TrigArray lentgh is null, it tells us, it's a simple single sweep waveform
self.data['SingleSweepTimesValuesArray'] = np.empty(waveform_size)
for i in range(0,waveform_size-1):
self.data['SingleSweepTimesValuesArray'][i] = self.data['HORIZ_INTERVAL'][0] * i + self.data['HORIZ_OFFSET'][0]
else:
self.data['TrigTimeCount'] = np.empty(self.data['TRIGTIME_ARRAY'][0] / 16)
self.data['TrigTimeOffset'] = np.empty(self.data['TRIGTIME_ARRAY'][0] / 16)
for i in range(0, self.data['TRIGTIME_ARRAY'][0] / 16 - 1):
self.data['TrigTimeCount'][i] = struct.unpack('<d', databyte[(self.data['WAVE_DESCRIPTOR'][0]+i*16):(self.data['WAVE_DESCRIPTOR'][0]+8+i*16)])[0]
self.data['TrigTimeOffset'][i] = struct.unpack('<d', databyte[(self.data['WAVE_DESCRIPTOR'][0]+8+i*16):(self.data['WAVE_DESCRIPTOR'][0]+16+i*16)])[0]
self.data['SEQNCEWaveformTimesValuesArray'] = np.empty(waveform_size)
# Array of horizontal values
for n in range(0, len(self.data['TrigTimeCount']) - 1):
for i in range(0, waveform_size / len(self.data['TrigTimeCount']) - 1):
self.data['SEQNCEWaveformTimesValuesArray'][n * (waveform_size / len(self.data['TrigTimeCount'])) + i] = self.data['HORIZ_INTERVAL'][0] * i + self.data['TrigTimeOffset'][n]
return self.data
class LeCroy64Xi(VisaInstrument):
""" This is the python driver for the LeCroy Waverunner 64Xi
Digital Oscilloscope
"""
caching_permissions = {'defined_channels': True}
def __init__(self, connection_info, caching_allowed=True,
caching_permissions={}, auto_open=True):
super(LeCroy64Xi, self).__init__(connection_info,
caching_allowed,
caching_permissions,
auto_open)
self.channels = {}
self.lock = Lock()
def get_channel(self, num):
"""
"""
if num not in self.defined_channels:
return None
if num in self.channels:
return self.channels[num]
else:
channel = LeCroyChannel(self, num)
self.channels[num] = channel
return channel
@instrument_property
@secure_communication()
def defined_channels(self):
""" {'1', '2', '3', '4'} are the real channel of the instrument
{'TA', 'TB', 'TC', 'TD'} are the trace calculated from the channel.
It is only useful for the property do_save_data.
Same thing for 'ALL_DISPLAYED'
"""
defined_channels = ['1', '2', '3', '4', 'TA', 'TB', 'TC', 'TD',
'ALL_DISPLAYED']
return defined_channels
@instrument_property
@secure_communication()
def trigger_mode(self):
''' Method to get the trigger mode
'''
mode = self.ask('TRMD?')
if mode is not None:
mode = mode.replace('TRMD ', '')
return mode
else:
mes = 'LeCroy 354A did not return its trigger mode'
raise InstrIOError(mes)
@trigger_mode.setter
@secure_communication()
def trigger_mode(self, value):
''' Method to set the trigger mode
Input:
{'AUTO','NORM','SINGLE','STOP'}
'''
self.write('TRMD {}'.format(value))
result = self.ask('TRMD?')
result = result.replace('TRMD ', '')
if result != value:
raise InstrIOError(cleandoc('''Instrument did not set correctly
the trigger mode'''))
@secure_communication()
def auto_setup(self):
''' Adjust vertical, timebase and trigger parameters automatically
Input:
None
Output:
None
'''
self.write('ASET')
@instrument_property
@secure_communication()
def auto_calibrate(self):
''' Method to know if the instrument is in auto calibrate mode
'''
answer = self.ask('ACAL?')
if answer is not None:
answer = answer.replace('ACAL ', '')
return answer
else:
mes = 'LeCroy 354A did not return its answer'
raise InstrIOError(mes)
@auto_calibrate.setter
@secure_communication()
def auto_calibrate(self, value):
''' Method to set the trigger mode
Input:
{'ON', 'Yes', 'OFF', 'No'}
'''
if value in ('ON', 'Yes'):
self.write('ACAL ON')
result = self.ask('ACAL?')
result = result.replace('ACAL ', '')
if result != 'ON':
raise InstrIOError(cleandoc('''Instrument did not set correctly
the auto calibrate mode'''))
elif value in ('OFF', 'No'):
self.write('ACAL OFF')
result = self.ask('ACAL?')
result = result.replace('ACAL ', '')
if result != 'OFF':
raise InstrIOError(cleandoc('''Instrument did not set correctly
the auto calibrate mode'''))
else:
mes = '{} is not an allowed value'.format(value)
raise InstrIOError(mes)
@instrument_property
@secure_communication()
def timebase(self):
''' Method to get the time base.
Input:
None
Output:
value (str) : Timebase in S
'''
result = self.ask('TDIV?')
result = result.replace('TDIV ', '')
if result is not None:
return result
else:
mes = 'LeCroy 354A did not return its timebase'
raise InstrIOError(mes)
@timebase.setter
@secure_communication()
def timebase(self, value):
''' Modify the timebase setting
Input:
value (str): Timebase in S. (NS (nanosec), US (microsec), MS (milisec),
S (sec) or KS (kilosec))
(Example: '50E-6', '50 MS')
Output:
None
'''
self.write('TDIV {}'.format(value))
result = self.ask('TDIV?')
result = result.replace('TDIV ', '')
result = result.replace('S', '')
result = float(result)
if value[-2:] == ' S':
value_expected = float(value[:-2])
elif value[-2:] == 'US':
value_expected = float(value[:-3])*1e-6
elif value[-2:] == 'MS':
value_expected = float(value[:-3])*1e-3
elif value[-2:] == 'KS':
value_expected = float(value[:-3])*1e3
else:
value_expected = float(value)
if result != value_expected:
raise InstrIOError(cleandoc('''Instrument did not set correctly
the timebase'''))
@instrument_property
@secure_communication()
def memory_size(self):
''' Get the current maximum memory length used to capture waveforms.
Input:
None
Output:
result(float) : maximum memory size in Samples
'''
result = self.ask('MSIZ?')
result = result.replace('MSIZ ', '')
result = result.replace(' SAMPLE', '')
return float(result)
@memory_size.setter
@secure_communication()
def memory_size(self, msize):
''' Set the current maximum memory length used to capture waveforms.
Input:
msize(float) : Max. memory length size in Samples.
Output:
None
'''
self.write('MSIZ {}'.format(msize))
result = self.ask('MSIZ?')
result = result.replace('MSIZ ', '')
result = float(result.replace(' SAMPLE', ''))
if result != float(msize):
raise InstrIOError(cleandoc('''Instrument did not set correctly
the memory size'''))
@secure_communication()
def screen_dump(self, file, type='JPEG', background='BLACK', dir='E:\\',
area='FULLSCREEN'):
''' Initiate a screen dump
Input:
file(str) : destination filename, auto incremented
type(str) : image type (PSD, BMP, BMPCOMP, JPEG (default), PNG, TIFF)
background(str) : background color (BLACK (default), WHITE)
dir(str) : destination directory (E:\\ is the default shared folder)
area(str) : hardcopy area (GRIDAREAONLY, DSOWINDOW, FULLSCREEN)
Output:
'''
mes = cleandoc('''HCSU DEV, {}, BCKG, {}, DEST, FILE, DIR, {}, FILE, {}
, AREA, {}; SCDP'''.format(type, background,
dir, file, area))
self.write(mes)
@secure_communication()
def sequence(self, segments, max_size):
''' Set the sequence mode on and set number of segments, maximum memory
size.
Input:
segments(int) : number of segments. max: 2000.
max_size(float) : maximum memory length. Format:
{10e3, 10.0e3, 11e+3, 25K, 10M (mili), 10MA (mega))
Output:
None
'''
self.write('SEQ ON, {}, {}'.format(segments, max_size))
@secure_communication()
def clear_sweeps(self):
''' restart the cumulative processing:
Input:
None
Output:
None
'''
self.write('CLSW')
DRIVERS = {'LeCroy64Xi': LeCroy64Xi}
| |
# -*- coding: ascii -*-
#
# Copyright 2005-2013
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Configuration Handling
======================
This modules handles configuration loading and provides an easy API
for accessing it.
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
import os as _os
import re as _re
import sys as _sys
from wtf import Error
class ConfigurationError(Error):
""" Configuration error """
class ConfigurationIOError(ConfigurationError):
""" Config file IO error """
class ParseError(ConfigurationError):
"""
Parse error
:CVariables:
- `_MESSAGE`: The message format string
:IVariables:
- `filename`: The name of the file where the error occured
- `lineno`: The line number of the error
:Types:
- `_MESSAGE`: ``str``
- `filename`: ``basestring``
- `lineno`: ``int``
"""
_MESSAGE = "Parse error in %(filename)r, line %(lineno)s"
def __init__(self, filename, lineno):
"""
Initialization
:Parameters:
- `filename`: The name of the file, where the error occured
- `lineno`: The erroneous line number
:Types:
- `filename`: ``basestring``
- `lineno`: ``int``
"""
ConfigurationError.__init__(self, filename, lineno)
self.filename = filename
self.lineno = lineno
self._param = dict(filename=filename, lineno=lineno)
def __str__(self):
""" Returns a string representation of the Exception """
return self._MESSAGE % self._param
class ContinuationError(ParseError):
""" A line continuation without a previous option line occured """
_MESSAGE = "Invalid line continuation in %(filename)r, line %(lineno)s"
class OptionSyntaxError(ParseError):
""" A option line could not be parsed """
_MESSAGE = "Option syntax error in %(filename)r, line %(lineno)s"
class RecursiveIncludeError(ParseError):
""" Recursive Include Detected """
_MESSAGE = "Recursive include detected in %(filename)r, line " \
"%(lineno)d: %(included)r"
def __init__(self, filename, lineno, included):
"""
Initialization
:Parameters:
- `filename`: The name of the file, where the error occured
- `lineno`: The erroneous line number
- `included`: recursively included file
:Types:
- `filename`: ``basestring``
- `lineno`: ``int``
- `included`: ``basestring``
"""
ParseError.__init__(self, filename, lineno)
self.included = included
self._param['included'] = included
class OptionTypeError(ParseError):
""" An option type could not be recognized """
_MESSAGE = "Failed option type conversion"
class Parser(object):
"""
Simplified config file parser
The ``ConfigParser`` module does too much magic (partially
not even documented). Further we don't need all the set and
save stuff here, so we write our own - clean - variant.
This variant just reads the stuff and does not apply any
typing or transformation. It also uses a better design...
:IVariables:
- `_config`: Config instance to feed
- `_charset`: Default config charset
:Types:
- `_config`: `Config`
- `_charset`: ``str``
"""
def __init__(self, config, charset='latin-1'):
"""
Initialization
:Parameters:
- `config`: Config instance
- `charset`: Default config charset
:Types:
- `config`: `Config`
- `charset`: ``str``
"""
self._config, self._charset = config, charset
def parse(self, fp, filename, _included=None):
"""
Reads from `fp` until EOF and parses line by line
:Parameters:
- `fp`: The stream to read from
- `filename`: The filename used for relative includes and
error messages
- `_included`: Set of already included filenames for recursion check
:Types:
- `fp`: ``file``
- `filename`: ``basestring``
- `_included`: ``set``
:Exceptions:
- `ContinuationError`: An invalid line continuation occured
- `OptionSyntaxError`: An option line could not be parsed
- `IOError`: An I/O error occured while reading the stream
"""
# pylint: disable = R0912, R0914, R0915
lineno, section, option = 0, None, None
root_section, charset, includes = None, self._charset, ()
# speed / readability enhancements
config = self._config
readline = fp.readline
is_comment = self._is_comment
try_section = self._try_section
parse = self._parse_option
make_section = self._make_section
def handle_root(root_section):
""" Handle root section """
if root_section is None:
return charset, includes, None
self._cast(root_section)
_charset, _includes = charset, []
if u'charset' in root_section:
_charset = list(root_section.charset)
if len(_charset) != 1:
raise ContinuationError("Invalid charset declaration")
_charset = _charset[0].encode('ascii')
if u'include' in root_section:
_includes = list(root_section.include)
return _charset, _includes, None
while True:
line = readline()
if not line:
break
line = line.decode(charset)
lineno += 1
# skip blank lines and comments
if line.strip() and not is_comment(line):
# section header?
header = try_section(line)
if header is not None:
charset, includes, root_section = \
handle_root(root_section)
option = None # reset for the next continuation line
header = header.strip()
if header in config:
section = config[header]
else:
config[header] = section = make_section()
# line continuation?
elif line[0].isspace():
if option is None:
raise ContinuationError(filename, lineno)
option.append(line.strip())
# must be a new option
else:
name, value = parse(line)
name = name.strip()
if not name:
raise OptionSyntaxError(filename, lineno)
option = [value]
if section is None:
if root_section is None:
root_section = make_section()
section = root_section
section[name] = option
charset, includes, root_section = handle_root(root_section)
basedir = _os.path.abspath(_os.path.dirname(filename))
# recode includes to updated charset
includes = [item.encode(self._charset).decode(charset)
for item in includes]
if not isinstance(basedir, unicode):
fsenc = _sys.getfilesystemencoding()
includes = [item.encode(fsenc) for item in includes]
oldseen = _included
if oldseen is None:
oldseen = set()
seen = set()
for fname in includes:
fname = _os.path.normpath(_os.path.join(basedir, fname))
rpath = _os.path.realpath(fname)
if rpath in oldseen:
raise RecursiveIncludeError(filename, lineno, fname)
elif rpath not in seen:
seen.add(rpath)
fp = file(fname, 'rb')
try:
self.parse(fp, fname, oldseen | seen)
finally:
fp.close()
if _included is None:
for _, section in config:
self._cast(section)
def _cast(self, section):
"""
Cast the options of a section to python types
:Parameters:
- `section`: The section to process
:Types:
- `section`: `Section`
"""
# pylint: disable = R0912
tokre = _re.compile(ur'''
[;#]?"[^"\\]*(\\.[^"\\]*)*"
| [;#]?'[^'\\]*(\\.[^'\\]*)*'
| \S+
''', _re.X).finditer
escsub = _re.compile(ur'''(\\(?:
x[\da-fA-F]{2}
| u[\da-fA-F]{4}
| U[\da-fA-F]{8}
))''', _re.X).sub
def escsubber(match):
""" Substitution function """
return match.group(1).encode('ascii').decode('unicode_escape')
make_option, make_section = self._make_option, self._make_section
for name, value in section:
newvalue = []
for match in tokre(u' '.join(value)):
val = match.group(0)
if val.startswith('#'):
continue
if (val.startswith(u'"') and val.endswith(u'"')) or \
(val.startswith(u"'") and val.endswith(u"'")):
val = escsub(escsubber, val[1:-1])
else:
try:
val = human_bool(val)
except ValueError:
try:
val = float(val)
except ValueError:
#raise OptionTypeError(val)
pass
newvalue.append(val)
option = make_option(newvalue)
# nest dotted options
if u'.' in name:
parts, sect = name.split(u'.'), section
parts.reverse()
while parts:
part = parts.pop()
if parts:
if part not in sect:
sect[part] = make_section()
sect = sect[part]
else:
sect[part] = option
del section[name]
else:
section[name] = option
def _is_comment(self, line):
"""
Decide if `line` is comment
:Parameters:
- `line`: The line to inspect
:Types:
- `line`: ``str``
:return: Is `line` is comment line?
:rtype: ``bool``
"""
return line.startswith(u'#') or line.startswith(u';')
def _try_section(self, line):
"""
Try to extract a section header from `line`
:Parameters:
- `line`: The line to process
:Types:
- `line`: ``str``
:return: The section header name or ``None``
:rtype: ``str``
"""
if line.startswith(u'['):
pos = line.find(u']')
if pos > 1: # one name char minimum
return line[1:pos]
return None
def _parse_option(self, line):
"""
Parse `line` as option (``name [:=] value``)
:Parameters:
- `line`: The line to process
:Types:
- `line`: ``str``
:return: The name and the value (both ``None`` if an error occured)
:rtype: ``tuple``
"""
pose = line.find('=')
posc = line.find(':')
pos = min(pose, posc)
if pos < 0:
pos = max(pose, posc)
if pos > 0: # name must not be empty
return (line[:pos], line[pos + 1:])
return (None, None)
def _make_section(self):
"""
Make a new `Section` instance
:return: The new `Section` instance
:rtype: `Section`
"""
return Section()
def _make_option(self, valuelist):
"""
Make a new option value
The function will do the right thing[tm] in order to determine
the correct option type based on `valuelist`.
:Parameters:
- `valuelist`: List of values of that option
:Types:
- `valuelist`: ``list``
:return: Option type appropriate for the valuelist
:rtype: any
"""
if not valuelist:
valuelist = [None]
if len(valuelist) > 1:
return valuelist
else:
return TypedIterOption(valuelist[0])
class TypedIterOption(object):
""" Option, typed dynamically
Provides an iterator of the single value list
"""
def __new__(cls, value):
"""
Create the final option type
This gives the type a new name, inherits from the original type
(where possible) and adds an ``__iter__`` method in order to
be able to iterate over the one-value-list.
The following type conversions are done:
``bool``
Will be converted to ``int``
:Parameters:
- `value`: The value to decorate
:Types:
- `value`: any
:return: subclass of ``type(value)``
:rtype: any
"""
space = {}
if value is None:
newcls = unicode
value = u''
def itermethod(self):
""" Single value list iteration method """
# pylint: disable = W0613
return iter([])
else:
newcls = type(value)
def itermethod(self):
""" Single value list iteration method """
# pylint: disable = W0613
yield value
if newcls is bool:
newcls = int
def reducemethod(self, _cls=cls):
""" Mixed Pickles """
# pylint: disable = W0613
return (_cls, (value,))
space = dict(
__module__=cls.__module__,
__iter__=itermethod,
__reduce__=reducemethod,
)
cls = type(cls.__name__, (newcls,), space)
return cls(value)
class Config(object):
"""
Config access class
:IVariables:
- `ROOT`: The current working directory at startup time
:Types:
- `ROOT`: ``str``
"""
def __init__(self, root):
"""
Initialization
:Parameters:
- `root`: The current working directory at startup time
:Types:
- `root`: ``str``
"""
self.ROOT = root
self.__config_sections__ = {}
def __iter__(self):
""" Return (sectionname, section) tuples of parsed sections """
return iter(self.__config_sections__.items())
def __setitem__(self, name, value):
"""
Set a section
:Parameters:
- `name`: Section name
- `value`: Section instance
:Types:
- `name`: ``unicode``
- `value`: `Section`
"""
self.__config_sections__[unicode(name)] = value
def __getitem__(self, name):
"""
Get section by key
:Parameters:
- `name`: The section name
:Types:
- `name`: ``basestring``
:return: Section object
:rtype: `Section`
:Exceptions:
- `KeyError`: section not found
"""
return self.__config_sections__[unicode(name)]
def __contains__(self, name):
"""
Determine if a section named `name` exists
:Parameters:
- `name`: The section name
:Types:
- `name`: ``unicode``
:return: Does the section exist?
:rtype: ``bool``
"""
return unicode(name) in self.__config_sections__
def __getattr__(self, name):
"""
Return section by dotted notation
:Parameters:
- `name`: The section name
:Types:
- `name`: ``str``
:return: Section object
:rtype: `Section`
:Exceptions:
- `AttributeError`: section not found
"""
try:
return self[name]
except KeyError:
raise AttributeError(name)
class Section(object):
"""
Config section container
:IVariables:
- `__section_options__`: Option dict
:Types:
- `__section_options__`: ``dict``
"""
def __init__(self):
""" Initialization """
self.__section_options__ = {}
def __iter__(self):
""" (Name, Value) tuple iterator """
return iter(self.__section_options__.items())
def __setitem__(self, name, value):
"""
Set a new option
:Parameters:
- `name`: Option name
- `value`: Option value
:Types:
- `name`: ``unicode``
- `value`: any
"""
self.__section_options__[unicode(name)] = value
def __getitem__(self, name):
"""
Return a config option by key
:Parameters:
- `name`: The key to look up
:Types:
- `name`: ``unicode``
:return: The value of the option
:rtype: any
:Exceptions:
- `KeyError`: No suitable option could be found
"""
return self.__section_options__[unicode(name)]
def __delitem__(self, name):
"""
Delete option
:Parameters:
- `name`: Option key to process
:Types:
- `name`: ``unicode``
:Exceptions:
- `KeyError`: Option did not exist
"""
del self.__section_options__[unicode(name)]
def __getattr__(self, name):
"""
Get option in dotted notation
:Parameters:
- `name`: Option key to look up
:Types:
- `name`: ``str``
:return: The value of the option
:rtype: any
:Exceptions:
- `AttributeError`: No suitable option could be found
"""
try:
return self[unicode(name)]
except KeyError:
raise AttributeError(name)
def __call__(self, name, default=None):
"""
Get option or default value
:Parameters:
- `name`: The option key to look up
- `default`: Default value
:Types:
- `name`: ``unicode``
- `default`: any
:return: The value of the option
:rtype: any
"""
try:
return self[unicode(name)]
except KeyError:
return default
def __contains__(self, name):
"""
Determine whether `name` is an available option key
:Parameters:
- `name`: The option key to look up
:Types:
- `name`: ``unicode``
:return: Is `name` an available option?
:rtype: ``bool``
"""
return unicode(name) in self.__section_options__
def merge_sections(*sections):
"""
Merge sections together
:Parameters:
`sections` : ``tuple``
The sections to merge, later sections take more priority
:Return: The merged section
:Rtype: `Section`
:Exceptions:
- `TypeError`: Either one of the section was not a section or the
sections contained unmergable attributes (subsections vs. plain
values)
"""
result = Section()
for section in sections:
if not isinstance(section, Section):
raise TypeError("Expected Section, found %r" % (section,))
for key, value in dict(section).iteritems():
if isinstance(value, Section) and key in result:
value = merge_sections(result[key], value)
result[key] = value
return result
def human_bool(value):
"""
Interpret human readable boolean value
``True``
``yes``, ``true``, ``on``, any number other than ``0``
``False``
``no``, ``false``, ``off``, ``0``, empty, ``none``
The return value is not a boolean on purpose. It's a number, so you
can pass more than just boolean values (by passing a number)
:Parameters:
- `value`: The value to interpret
:Types:
- `value`: ``str``
:return: ``number``
:rtype: ``int``
"""
if not value:
value = 0
else:
self = human_bool
value = str(value).lower()
if value in self.yes: # pylint: disable = E1101
value = 1
elif value in self.no: # pylint: disable = E1101
value = 0
else:
value = int(value)
return value
# pylint: disable = W0612
human_bool.yes = dict.fromkeys("yes true on 1".split())
human_bool.no = dict.fromkeys("no false off 0 none".split())
# pylint: enable = W0612
def dump(config, stream=None):
"""
Dump config object
:Parameters:
`stream` : ``file``
The stream to dump to. If omitted or ``None``, it's dumped to
``sys.stdout``.
"""
# pylint: disable = R0912
def subsection(basename, section):
""" Determine option list from subsection """
opts = []
if basename is None:
make_base = lambda s: s
else:
make_base = lambda s: ".".join((basename, s))
for opt_name, opt_value in section:
opt_name = make_base(opt_name)
if isinstance(opt_value, Section):
opts.extend(subsection(opt_name, opt_value))
else:
opts.append((opt_name, opt_value))
return opts
def pretty(name, value):
""" Pretty format a value list """
value = tuple(value)
if len(value) == 0:
return u''
elif len(value) == 1:
return cast(value[0])
result = u" ".join(cast(item) for item in value)
if len(u"%s = %s" % (name, result)) < 80:
return result
return u"\n " + u"\n ".join(cast(item) for item in value)
def cast(value):
""" Format output by type """
if isinstance(value, float):
return unicode(value)
elif isinstance(value, unicode):
return u"'%s'" % value.replace(u'\\', u'\\\\').encode(
'unicode_escape').decode(
'ascii').replace(
u"'", u"\\'"
)
return repr(value).decode('ascii')
if stream is None:
stream = _sys.stdout
print >> stream, "# ----8<------- WTF config dump -------------"
print >> stream, "# This is, what the WTF systems gets to see"
print >> stream, "# after loading and merging all config files."
print >> stream
print >> stream, "charset = %r" % 'utf-8'
for section_name, section in sorted(config):
if section_name is None:
continue
print >> stream
print >> stream, (u"[%s]" % section_name).encode('utf-8')
for opt_name, opt_value in sorted(subsection(None, section)):
print >> stream, u"%s = %s" % (
opt_name, pretty(opt_name, opt_value)
)
print >> stream
print >> stream, "# ------------- WTF config dump ------->8----"
def load(name, charset='latin-1'):
"""
Load configuration
It is not a failure if the file does not exist.
:Parameters:
- `name`: The name of the file
- `charset`: Default charset of config files
:Types:
- `name`: ``basestring``
- `charset`: ``str``
:return: A config object
:rtype: `Config`
"""
config = Config(_os.path.normpath(_os.path.abspath(_os.getcwd())))
parser = Parser(config, charset)
try:
fp = file(name, 'rb')
try:
parser.parse(fp, name)
finally:
fp.close()
except IOError:
e = _sys.exc_info()
try:
raise ConfigurationIOError, e[1], e[2]
finally:
del e
return config
| |
"""
wait_server.py
Server that does a hanging get. Doesn't need to be a WSGI app I think.
Override SimpelHttpServer.
Only things like /<roll-name/1/ can hang.
Even index.html should be static?
There is a thread that reads from a Queue. And then some event does it.o
Hanger() -- this is an object that hangs at the right moment.
Blocker()
"""
import os
import re
import sys
import threading
from common import util
from common import httpd
import jsontemplate
log = util.Logger(util.ANSI_BLUE)
# TODO:
# - The "Directory Listing" page should let you go back up.
# - link to help text?
# - use same <head> everywhere -- reuse templates
# - doctype, charset, etc.
HOME_PAGE = jsontemplate.Template("""\
<html>
<head>
<title>webpipe home</title>
<link href="/static/webpipe.css" rel="stylesheet">
</head>
<body>
<p align="right">
<a href="plugins/">plugins</a>
- <a href="/">home</a>
<p>
<h2>webpipe</h2>
<div id="scrolls">
<p>
Active Scroll: <a href="s/{active_scroll|htmltag}">{active_scroll}</a>
</p>
<h4>Old Scrolls</h4>
{.repeated section scrolls}
<a href="s/{@|htmltag}">{@}</a> <br/>
{.end}
<p>
(<a href="s/">browse</a>)
</p>
</div>
</body>
</html>
""", default_formatter='html')
# TODO: put file system paths here? So people can easily find their plugins.
PLUGINS_PAGE = jsontemplate.Template("""\
<p align="right">
<a href="/">home</a>
<p>
<h2>webpipe Plugins</h2>
<h3>User plugins installed in ~/webpipe</h3>
<p><i>User plugins override packaged plugins.</i></p>
<ul>
{.repeated section user}
<li>{name} {.if test static} <a href="{name}/static/">static</a> {.end} </li>
{.end}
</ul>
<h3>Packaged Plugins</h3>
<ul>
{.repeated section package}
<li>{name} {.if test static} <a href="{name}/static/">static</a> {.end} </li>
{.end}
</ul>
""", default_formatter='html')
# /s//<session>/<partnum>.html
PATH_RE = re.compile(r'/s/(\S+)/(\d+).html$')
def _ListPlugins(root_dir):
"""
Returns a template data dictionary. Plugins are directories. Plugins with
'static' dirs are marked.
"""
plugin_root = os.path.join(root_dir, 'plugins')
data = []
for name in os.listdir(plugin_root):
path = os.path.join(plugin_root, name)
if not os.path.isdir(path):
continue
p = os.path.join(path, 'static')
s = os.path.isdir(p)
# e.g. {name: csv, static: True}
data.append({'name': name, 'static': s})
data.sort(key=lambda d: d['name'])
return data
class WaitingRequestHandler(httpd.BaseRequestHandler):
"""
differences:
- block on certain paths
- don't always serve in the current directory, let the user do it.
- daemon threads so we don't block the process
- what about cache headers? I think I saw a bug where the browser would
cache instead of waiting.
"""
server_version = "webpipe"
root_dir = None # from BaseRequestHandler, not used
user_dir = None # initialize to ~/webpipe
package_dir = None # initialize to /<package>/webpipe
waiters = None
active_scroll = None
def send_webpipe_index(self):
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
s_root = os.path.join(self.user_dir, 's')
scrolls = os.listdir(s_root)
scrolls.sort(reverse=True)
# Show the active one separately
try:
scrolls.remove(self.active_scroll)
except ValueError:
pass
h = HOME_PAGE.expand({'scrolls': scrolls, 'active_scroll': self.active_scroll})
self.wfile.write(h)
def send_plugins_index(self):
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
# Session are saved on disk; allow the user to choose one.
u = _ListPlugins(self.user_dir)
p = _ListPlugins(self.package_dir)
html = PLUGINS_PAGE.expand({'user': u, 'package': p})
self.wfile.write(html)
def url_to_fs_path(self, url):
"""Translate a URL to a local file system path.
By default, we just treat URLs as paths relative to self.user_dir.
If it returns None, then a 404 is generated, without looking at disk.
Called from send_head() (see SimpleHTTPServer).
NOTE: This is adapted from Python stdlib SimpleHTTPServer.py.
"""
# Disallow path traversal with '..'
parts = [p for p in url.split('/') if p and p not in ('.', '..')]
if not parts: # corresponds to /, which is already handled by send_webpipe_index
return None
first_part = parts[0]
rest = parts[1:]
if first_part == 'static':
return os.path.join(self.package_dir, *parts)
if first_part == 's':
return os.path.join(self.user_dir, *parts)
if first_part == 'plugins':
# looking for ['plugins', <anything>, 'static'].
# Note these can be files OR directories. Directories will be listed.
if len(parts) >= 3 and parts[2] == 'static':
packaged_res = os.path.join(self.package_dir, *parts)
user_res = os.path.join(self.user_dir, *parts)
# Return the one that exists, starting with the user dir.
if os.path.exists(user_res):
return user_res
if os.path.exists(packaged_res):
return packaged_res
return None
def do_GET(self):
"""Serve a GET request."""
if self.path == '/':
self.send_webpipe_index()
return
if self.path == '/plugins':
# As is done in send_head
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return
if self.path == '/plugins/':
self.send_plugins_index()
return
m = PATH_RE.match(self.path)
if m:
session, num = m.groups()
num = int(num)
waiter = self.waiters.get(session)
if waiter is not None:
log('PATH: %s', self.path)
log('MaybeWait session %r, part %d', session, num)
result = waiter.MaybeWait(num)
log('Done %d', num)
# result could be:
# 404: too big
# 503: 503
# Serve static file.
# NOTE: url_to_fs_path is called in send_head.
f = self.send_head()
# f is None if the file doesn't exist, and send_error(404) was called.
if f:
self.copyfile(f, self.wfile)
f.close()
WAIT_OK, WAIT_TOO_BIG, WAIT_TOO_BUSY = range(3)
class SequenceWaiter(object):
"""
Call Notify() for every item. Then you can call MaybeWait(n) for.
"""
def __init__(self, max_waiters=None):
# If this limit it
self.max_waiters = max_waiters
# even, odd scheme. When one event is notified, the other is reset.
self.events = [threading.Event(), threading.Event()]
self.lock = threading.Lock() # protects self.events
self.counter = 1
def SetCounter(self, n):
# TODO: Make this a constructor param?
assert self.counter == 1, "Only call before using"
self.counter = n
def MaybeWait(self, n):
"""
Returns:
success.
200 it's OK to proceed (we may have waited)
404: index is too big?
503: maximum waiters exceeded.
"""
i = self.counter
# Block for the next item
if i > n:
#print '%d / %d' % (i, n)
#print self.items
return WAIT_OK
elif i == n:
log('Waiting for event %d (%d)', i, i % 2)
self.events[i % 2].wait() # wait for it to be added
return WAIT_OK
else:
return WAIT_TOO_BIG
def Notify(self):
# *Atomically* increment counter and add event event N+1.
with self.lock:
n = self.counter
self.counter += 1
# instantiate a new event in the other space
self.events[self.counter % 2] = threading.Event()
# unblock all MaybeWait() calls
self.events[n % 2].set()
def Length(self):
return self.counter
| |
import wx
from wx.lib.masked.ipaddrctrl import IpAddrCtrl
from spacq.devices.config import device_tree, ConnectionError, DeviceConfig
from ...tool.box import load_pickled, save_pickled, Dialog, MessageDialog
from .resource_tree import DeviceResourcesPanel
"""
Device configuration for and through the GUI.
"""
class DeviceConfigPanel(wx.Panel):
"""
Set up a device for consumption of its resources.
"""
def __init__(self, parent, connection_callback=None, *args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
self.connection_callback = connection_callback
# Implementation info.
## Find all the available devices.
self.device_tree = device_tree()
self.manufacturers = [''] + sorted(self.device_tree.keys())
self.models = ['']
## Chosen values.
self.manufacturer = None
self.model = None
# Panel.
panel_box = wx.BoxSizer(wx.VERTICAL)
## Address.
address_static_box = wx.StaticBox(self, label='Address')
address_box = wx.StaticBoxSizer(address_static_box, wx.VERTICAL)
address_sizer = wx.BoxSizer(wx.HORIZONTAL)
address_box.Add(address_sizer, flag=wx.EXPAND)
panel_box.Add(address_box, flag=wx.EXPAND|wx.ALL, border=5)
### Ethernet.
ethernet_static_box = wx.StaticBox(self)
ethernet_box = wx.StaticBoxSizer(ethernet_static_box, wx.VERTICAL)
address_sizer.Add(ethernet_box, proportion=1)
self.address_mode_eth = wx.RadioButton(self, label='Ethernet', style=wx.RB_GROUP)
ethernet_box.Add(self.address_mode_eth)
ethernet_sizer = wx.FlexGridSizer(rows=2, cols=2, hgap=5)
ethernet_box.Add(ethernet_sizer, flag=wx.EXPAND)
ethernet_sizer.Add(wx.StaticText(self, label='IP address:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.ip_address_input = IpAddrCtrl(self)
ethernet_sizer.Add(self.ip_address_input, flag=wx.CENTER)
### GPIB.
self.gpib_static_box = wx.StaticBox(self)
gpib_box = wx.StaticBoxSizer(self.gpib_static_box, wx.VERTICAL)
address_sizer.Add(gpib_box, proportion=1)
self.address_mode_gpib = wx.RadioButton(self, label='GPIB')
gpib_box.Add(self.address_mode_gpib)
gpib_sizer = wx.FlexGridSizer(rows=3, cols=2, hgap=5)
gpib_box.Add(gpib_sizer, flag=wx.EXPAND)
gpib_sizer.Add(wx.StaticText(self, label='Board:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.gpib_board_input = wx.SpinCtrl(self, min=0, max=100, initial=0)
gpib_sizer.Add(self.gpib_board_input, flag=wx.CENTER)
gpib_sizer.Add(wx.StaticText(self, label='PAD:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.gpib_pad_input = wx.SpinCtrl(self, min=1, max=30, initial=1)
gpib_sizer.Add(self.gpib_pad_input, flag=wx.CENTER)
gpib_sizer.Add(wx.StaticText(self, label='SAD:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.gpib_sad_input = wx.SpinCtrl(self, min=0, max=30, initial=0)
gpib_sizer.Add(self.gpib_sad_input, flag=wx.CENTER)
### USB.
usb_static_box = wx.StaticBox(self)
usb_box = wx.StaticBoxSizer(usb_static_box, wx.VERTICAL)
address_box.Add(usb_box, flag=wx.EXPAND)
self.address_mode_usb = wx.RadioButton(self, label='USB')
usb_box.Add(self.address_mode_usb)
usb_sizer = wx.BoxSizer(wx.HORIZONTAL)
usb_box.Add(usb_sizer, flag=wx.EXPAND)
usb_sizer.Add(wx.StaticText(self, label='USB resource: '),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.usb_resource_input = wx.TextCtrl(self, size=(300, -1))
usb_sizer.Add(self.usb_resource_input, proportion=1)
## Implementation.
implementation_static_box = wx.StaticBox(self, label='Implementation')
implementation_box = wx.StaticBoxSizer(implementation_static_box, wx.HORIZONTAL)
panel_box.Add(implementation_box, flag=wx.EXPAND|wx.ALL, border=5)
self.manufacturer_input = wx.Choice(self, choices=self.manufacturers)
self.Bind(wx.EVT_CHOICE, self.OnManufacturer, self.manufacturer_input)
implementation_box.Add(self.manufacturer_input, proportion=1)
self.model_input = wx.Choice(self, choices=self.models)
self.Bind(wx.EVT_CHOICE, self.OnModel, self.model_input)
implementation_box.Add(self.model_input, proportion=1)
self.mock_input = wx.CheckBox(self, label='Mock')
implementation_box.Add(self.mock_input, flag=wx.CENTER)
## Connection buttons.
button_box = wx.BoxSizer(wx.HORIZONTAL)
panel_box.Add(button_box, flag=wx.CENTER|wx.ALL, border=5)
self.connect_button = wx.Button(self, label='Connect')
self.Bind(wx.EVT_BUTTON, self.OnConnect, self.connect_button)
button_box.Add(self.connect_button)
self.disconnect_button = wx.Button(self, label='Disconnect')
self.Bind(wx.EVT_BUTTON, self.OnDisconnect, self.disconnect_button)
button_box.Add(self.disconnect_button)
self.SetSizerAndFit(panel_box)
def get_address_mode(self):
if self.address_mode_eth.Value:
return DeviceConfig.address_modes.ethernet
elif self.address_mode_gpib.Value:
return DeviceConfig.address_modes.gpib
elif self.address_mode_usb.Value:
return DeviceConfig.address_modes.usb
def GetValue(self):
dev_cfg = DeviceConfig(name=self.name)
# Address mode.
dev_cfg.address_mode = self.get_address_mode()
## Ethernet.
possible_address = self.ip_address_input.GetAddress()
if self.ip_address_input.IsValid() and len(possible_address) > 6:
dev_cfg.ip_address = possible_address
else:
dev_cfg.ip_address = None
## GPIB.
dev_cfg.gpib_board = self.gpib_board_input.Value
dev_cfg.gpib_pad = self.gpib_pad_input.Value
dev_cfg.gpib_sad = self.gpib_sad_input.Value
## USB.
possible_resource = self.usb_resource_input.Value
if possible_resource:
dev_cfg.usb_resource = possible_resource
else:
dev_cfg.usb_resource = None
# Implementation.
dev_cfg.manufacturer = self.manufacturer
dev_cfg.model = self.model
dev_cfg.mock = self.mock_input.Value
# Device.
dev_cfg.device = self.device
# Resource labels.
dev_cfg.resource_labels = self.resource_labels
return dev_cfg
def SetValue(self, dev_cfg):
self.name = dev_cfg.name
# Address mode.
if dev_cfg.address_mode == DeviceConfig.address_modes.ethernet:
self.address_mode_eth.Value = True
elif dev_cfg.address_mode == DeviceConfig.address_modes.gpib:
self.address_mode_gpib.Value = True
elif dev_cfg.address_mode == DeviceConfig.address_modes.usb:
self.address_mode_usb.Value = True
## Ethernet.
if dev_cfg.ip_address:
self.ip_address_input.SetValue(dev_cfg.ip_address)
## GPIB.
self.gpib_board_input.Value = dev_cfg.gpib_board
self.gpib_pad_input.Value = dev_cfg.gpib_pad
self.gpib_sad_input.Value = dev_cfg.gpib_sad
## USB.
if dev_cfg.usb_resource:
self.usb_resource_input.Value = dev_cfg.usb_resource
# Implementation.
if dev_cfg.manufacturer is not None:
self.manufacturer_input.StringSelection = dev_cfg.manufacturer
self.OnManufacturer()
if dev_cfg.model is not None:
self.model_input.StringSelection = dev_cfg.model
self.OnModel()
self.mock_input.Value = dev_cfg.mock
# Device.
self.device = dev_cfg.device
if self.device is not None:
self.connect_button.Disable()
self.disconnect_button.Enable()
else:
self.connect_button.Enable()
self.disconnect_button.Disable()
# Resource labels.
self.resource_labels = dev_cfg.resource_labels
if self.device is not None and self.connection_callback is not None:
self.connection_callback(self.device, self.resource_labels)
def OnManufacturer(self, evt=None):
self.manufacturer = self.manufacturers[self.manufacturer_input.Selection]
self.models = ['']
if self.manufacturer:
self.models.extend(self.device_tree[self.manufacturer].keys())
else:
self.manufacturer = None
self.model_input.SetItems(self.models)
def OnModel(self, evt=None):
self.model = self.models[self.model_input.Selection]
if self.model:
model = self.device_tree[self.manufacturer][self.model]
if 'real' in model and 'mock' not in model:
self.mock_input.Value = False
self.mock_input.Disable()
elif 'real' not in model and 'mock' in model:
self.mock_input.Value = True
self.mock_input.Disable()
else:
self.mock_input.Enable()
else:
self.model = None
def OnDisconnect(self, evt=None):
self.device = None
if self.connection_callback is not None:
self.connection_callback(None, {})
self.disconnect_button.Disable()
def OnConnect(self, evt=None):
dev_cfg = self.GetValue()
try:
dev_cfg.connect()
except ConnectionError as e:
MessageDialog(self, str(e), 'Connection error').Show()
return
self.device = dev_cfg.device
if self.connection_callback is not None:
self.connection_callback(self.device, self.resource_labels)
self.connect_button.Disable()
class DeviceConfigDialog(Dialog):
"""
A dialog for configuring a device, including connection and resources.
"""
def __init__(self, parent, ok_callback, *args, **kwargs):
Dialog.__init__(self, parent, style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER,
*args, **kwargs)
self.ok_callback = ok_callback
# Dialog.
dialog_box = wx.BoxSizer(wx.VERTICAL)
## Tabs.
self.notebook = wx.Notebook(self)
dialog_box.Add(self.notebook, proportion=1, flag=wx.EXPAND)
self.resources_panel = DeviceResourcesPanel(self.notebook)
self.connection_panel = DeviceConfigPanel(self.notebook,
connection_callback=self.resources_panel.set_device)
self.notebook.AddPage(self.connection_panel, 'Connection')
self.notebook.AddPage(self.resources_panel, 'Resources')
## End buttons.
button_box = wx.BoxSizer(wx.HORIZONTAL)
dialog_box.Add(button_box, flag=wx.CENTER|wx.TOP, border=10)
### OK, cancel.
dialog_button_box = wx.BoxSizer(wx.HORIZONTAL)
button_box.Add(dialog_button_box)
ok_button = wx.Button(self, wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.OnOk, ok_button)
dialog_button_box.Add(ok_button)
cancel_button = wx.Button(self, wx.ID_CANCEL)
dialog_button_box.Add(cancel_button)
### Save, load.
save_button_box = wx.BoxSizer(wx.HORIZONTAL)
button_box.Add(save_button_box, flag=wx.LEFT, border=20)
save_button = wx.Button(self, wx.ID_SAVE, label='Save...')
self.Bind(wx.EVT_BUTTON, self.OnSave, save_button)
save_button_box.Add(save_button)
load_button = wx.Button(self, wx.ID_OPEN, label='Load...')
self.Bind(wx.EVT_BUTTON, self.OnLoad, load_button)
save_button_box.Add(load_button)
self.SetSizerAndFit(dialog_box)
def GetValue(self):
dev_cfg = self.connection_panel.GetValue()
labels, resources = self.resources_panel.GetValue()
dev_cfg.resources = resources
# Preserve labels between device instances.
if dev_cfg.device is not None:
dev_cfg.resource_labels = labels
return dev_cfg
def SetValue(self, dev_cfg):
self.connection_panel.SetValue(dev_cfg)
self.resources_panel.SetValue(dev_cfg.resource_labels, dev_cfg.resources)
def OnOk(self, evt=None):
if self.ok_callback(self):
self.Destroy()
def OnSave(self, evt=None):
try:
save_pickled(self, self.GetValue(), extension='dev',
file_type='Device configuration')
except IOError as e:
MessageDialog(self, str(e), 'Save error').Show()
return
def OnLoad(self, evt=None):
try:
value = load_pickled(self, extension='dev', file_type='Device configuration')
try:
if value is not None:
value.name = self.connection_panel.name
self.SetValue(value)
except Exception as e:
raise IOError('Could not set values.', e)
except IOError as e:
MessageDialog(self, str(e), 'Load error').Show()
return
| |
# yellowbrick.colors
# Colors and color helpers brought in from a different library.
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Fri Jun 24 17:02:53 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: colors.py [c6aff34] benjamin@bengfort.com $
"""
Colors and color helpers brought in from an alternate library.
See https://bl.ocks.org/mbostock/5577023
"""
##########################################################################
## Imports
##########################################################################
import random
import warnings
import numpy as np
import matplotlib as mpl
import matplotlib.cm as cm
from copy import copy
from yellowbrick.exceptions import YellowbrickValueError
# Check to see if matplotlib is at least sorta up to date
from distutils.version import LooseVersion
mpl_ge_150 = LooseVersion(mpl.__version__) >= "1.5.0"
##########################################################################
## Color Utilities
##########################################################################
def get_color_cycle():
"""
Returns the current color cycle from matplotlib.
"""
if mpl_ge_150:
cyl = mpl.rcParams['axes.prop_cycle']
# matplotlib 1.5 verifies that axes.prop_cycle *is* a cycler
# but no garuantee that there's a `color` key.
# so users could have a custom rcParams w/ no color...
try:
return [x['color'] for x in cyl]
except KeyError:
pass # just return axes.color style below
return mpl.rcParams['axes.color_cycle']
def resolve_colors(n_colors=None, colormap=None, colors=None):
"""
Generates a list of colors based on common color arguments, for example
the name of a colormap or palette or another iterable of colors. The list
is then truncated (or multiplied) to the specific number of requested
colors.
Parameters
----------
n_colors : int, default: None
Specify the length of the list of returned colors, which will either
truncate or multiple the colors available. If None the length of the
colors will not be modified.
colormap : str, yellowbrick.style.palettes.ColorPalette, matplotlib.cm, default: None
The name of the matplotlib color map with which to generate colors.
colors : iterable, default: None
A collection of colors to use specifically with the plot. Overrides colormap if both are specified.
Returns
-------
colors : list
A list of colors that can be used in matplotlib plots.
Notes
-----
This function was originally based on a similar function in the pandas
plotting library that has been removed in the new version of the library.
"""
# Work with the colormap if specified and colors is not
if colormap is not None and colors is None:
# Must import here to avoid recursive import
from .palettes import PALETTES, ColorPalette
if isinstance(colormap, str):
try:
# try to get colormap from PALETTES first
_colormap = PALETTES.get(colormap, None)
if _colormap is None:
colormap = cm.get_cmap(colormap)
n_colors = n_colors or len(get_color_cycle())
_colors = list(map(colormap, np.linspace(0, 1, num=n_colors)))
else:
_colors = ColorPalette(_colormap).as_rgb()
n_colors = n_colors or len(_colors)
except ValueError as e:
raise YellowbrickValueError(e)
# if yellowbrick color palette is provided as colormap
elif isinstance(colormap, ColorPalette):
_colors = colormap.as_rgb()
n_colors = n_colors or len(_colors)
# if matplotlib color palette is provided as colormap
elif isinstance(colormap, mpl.colors.Colormap):
n_colors = n_colors or len(get_color_cycle())
_colors = list(map(colormap, np.linspace(0, 1, num=n_colors)))
else:
raise YellowbrickValueError(
"Colormap type {} is not recognized. Possible types are: {}"
.format(type(colormap), ', '.join(['yellowbrick.style.ColorPalette,',
'matplotlib.cm,',
'str'])))
# Work with the color list
elif colors is not None:
# Warn if both colormap and colors is specified.
if colormap is not None:
warnings.warn(
"both colormap and colors specified; using colors"
)
_colors = list(colors) # Ensure colors is a list
# Get the default colors
else:
_colors = get_color_cycle()
# Truncate or multiple the color list according to the number of colors
if n_colors is not None and len(_colors) != n_colors:
_colors = [
_colors[idx % len(_colors)] for idx in np.arange(n_colors)
]
return _colors
class ColorMap(object):
"""
A helper for mapping categorical values to colors on demand.
"""
def __init__(self, colors='flatui', shuffle=False):
"""
Specify either a list of colors or one of the color names. If shuffle
is True then the colors will be shuffled randomly.
"""
self.mapping = {}
self.colors = colors
if shuffle:
random.shuffle(self._colors)
@property
def colors(self):
return self._colors
@colors.setter
def colors(self, value):
"""
Converts color strings into a color listing.
"""
if isinstance(value, str):
# Must import here to avoid recursive import
from .palettes import PALETTES
if value not in PALETTES:
raise YellowbrickValueError(
"'{}' is not a registered color palette".format(value)
)
self._colors = copy(PALETTES[value])
elif isinstance(value, list):
self._colors = value
else:
self._colors = list(value)
def __call__(self, category):
if category not in self.mapping:
if self.colors:
self.mapping[category] = self.colors.pop()
else:
raise YellowbrickValueError(
"Not enough colors for this many categories!"
)
return self.mapping[category]
| |
from rpython.rlib import jit_libffi, clibffi, unroll
from rpython.rtyper.lltypesystem import rffi, lltype
from space import unwind, LTypeError, Object, Integer, Float, to_float, to_int, String
# Simple, platform independent concepts are put up
# here, so they won't take space elsewhere.
class Type(Object):
parameter = None # Some of the C types are parametric
# it's ok.
size = 0 # These fields remain zero if it's an
align = 0 # opaque type.
def cast_to_ffitype(self):
raise unwind(LTypeError(u".cast_to_ffitype method missing"))
def load(self, offset, copy):
raise unwind(LTypeError(u".load method missing"))
def store(self, pool, offset, value):
raise unwind(LTypeError(u".store method missing"))
def typecheck(self, other):
return self is other
def on_getattr(self, mem, name):
if name == u"to":
return self.load(mem.pointer, False)
if name == u"str" and self.size == 1:
s = rffi.charp2str(rffi.cast(rffi.CCHARP, mem.pointer))
return String(s.decode('utf-8'))
raise Object.getattr(mem, name)
def on_setattr(self, mem, name, value):
if name == u"to":
return self.store(mem.pool, mem.pointer, value)
raise Object.setattr(mem, name, value)
# Many systems are sensitive to memory alignment
def align(x, a):
return x + (a - x % a) % a
def sizeof(tp):
assert isinstance(tp, Type)
if tp.size == 0 or tp.align == 0:
raise unwind(LTypeError(u"cannot determine size of opaque type"))
return tp.size
# This is something rpython's allocator is doing, and
# it looks nice enough. Allocations are treated as arrays,
# in parametric records the parameter is treated as an array.
def sizeof_a(tp, n):
assert isinstance(tp, Type)
if tp.size == 0 or tp.align == 0:
raise unwind(LTypeError(u"cannot determine size of opaque type"))
if tp.parameter is not None:
return tp.size + sizeof(tp.parameter)*n
else:
return tp.size * n
signed_types = unroll.unrolling_iterable([rffi.LONG, rffi.INT, rffi.SHORT, rffi.CHAR, rffi.LONGLONG])
unsigned_types = unroll.unrolling_iterable([rffi.ULONG, rffi.UINT, rffi.USHORT, rffi.UCHAR, rffi.ULONGLONG])
floating_types = unroll.unrolling_iterable([rffi.FLOAT, rffi.DOUBLE])
class Signed(Type):
def __init__(self, size=8):
assert isinstance(size, int)
self.align = size
self.size = size
def cast_to_ffitype(self):
for rtype in signed_types:
if self.size == rffi.sizeof(rtype):
return clibffi.cast_type_to_ffitype(rtype)
else:
raise unwind(LTypeError(u"undefined ffi type: %s" % self.repr()))
def load(self, offset, copy):
for rtype in signed_types:
if self.size == rffi.sizeof(rtype):
return Integer(rffi.cast(rffi.LONG, rffi.cast(rffi.CArrayPtr(rtype), offset)[0]))
else:
raise unwind(LTypeError(u"undefined ffi type: %s" % self.repr()))
def store(self, pool, offset, value):
for rtype in signed_types:
if self.size == rffi.sizeof(rtype):
pnt = rffi.cast(rffi.CArrayPtr(rtype), offset)
pnt[0] = rffi.cast(rtype, to_int(value))
break
else:
raise unwind(LTypeError(u"undefined ffi type: %s" % self.repr()))
return value
def typecheck(self, other):
if isinstance(other, Signed) and self.size == other.size:
return True
if isinstance(other, Unsigned) and self.size == other.size:
return True
return False
def repr(self):
return u"<signed %d>" % self.size
class Unsigned(Type):
def __init__(self, size=8):
assert isinstance(size, int)
self.align = size
self.size = size
def cast_to_ffitype(self):
for rtype in unsigned_types:
if self.size == rffi.sizeof(rtype):
return clibffi.cast_type_to_ffitype(rtype)
else:
raise unwind(LTypeError(u"undefined ffi type: %s" % self.repr()))
def load(self, offset, copy):
for rtype in unsigned_types:
if self.size == rffi.sizeof(rtype):
return Integer(rffi.cast(rffi.LONG, rffi.cast(rffi.CArrayPtr(rtype), offset)[0]))
else:
raise unwind(LTypeError(u"undefined ffi type: %s" % self.repr()))
def store(self, pool, offset, value):
for rtype in unsigned_types:
if self.size == rffi.sizeof(rtype):
pnt = rffi.cast(rffi.CArrayPtr(rtype), offset)
pnt[0] = rffi.cast(rtype, to_int(value))
break
else:
raise unwind(LTypeError(u"undefined ffi type: %s" % self.repr()))
return value
def repr(self):
return u"<unsigned %d>" % self.size
def typecheck(self, other):
if isinstance(other, Signed) and self.size == other.size:
return True
if isinstance(other, Unsigned) and self.size == other.size:
return True
return False
class Floating(Type):
def __init__(self, size=4):
self.align = size
self.size = size
def cast_to_ffitype(self):
for rtype in floating_types:
if self.size == rffi.sizeof(rtype):
return clibffi.cast_type_to_ffitype(rtype)
else:
raise unwind(LTypeError(u"undefined ffi type: %s" % self.repr()))
def load(self, offset, copy):
for rtype in floating_types:
if self.size == rffi.sizeof(rtype):
return Float(rffi.cast(rffi.DOUBLE, rffi.cast(rffi.CArrayPtr(rtype), offset)[0]))
else:
raise unwind(LTypeError(u"undefined ffi type: %s" % self.repr()))
def store(self, pool, offset, value):
number = to_float(value)
for rtype in floating_types:
if self.size == rffi.sizeof(rtype):
pnt = rffi.cast(rffi.CArrayPtr(rtype), offset)
pnt[0] = rffi.cast(rtype, number)
break
else:
raise unwind(LTypeError(u"undefined ffi type: %s" % self.repr()))
return value
def repr(self):
return u"<floating %d>" % self.size
def typecheck(self, other):
if isinstance(other, Floating) and self.size == other.size:
return True
return False
# The idea here is that you can shadow the ffi types
# With your own classes or objects, changing their
# behavior.
class Shadow(Type):
def __init__(self, basetype, obj):
self.basetype = basetype
self.parameter = basetype.parameter
self.size = basetype.size
self.align = basetype.align
self.obj = obj
def cast_to_ffitype(self):
return self.basetype.cast_to_ffitype()
def load(self, offset, copy):
value = self.basetype.load(offset, copy)
return self.obj.callattr(u"load", [value])
def store(self, pool, offset, value):
value = self.obj.callattr(u"store", [value])
return self.basetype.store(pool, offset, value)
def on_getattr(self, mem, name):
return self.basetype.on_getattr(mem, name)
def on_setattr(self, mem, name, value):
return self.basetype.on_setattr(mem, name, value)
def to_type(obj):
if isinstance(obj, Type):
return obj
return Shadow(to_type(obj.getattr(u"shadow")), obj)
def unshadow(ctype):
if isinstance(ctype, Shadow):
return ctype.obj
return ctype
| |
#!/usr/bin/env python
from __future__ import division
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
from scipy.signal import filtfilt
from numpy import nonzero, diff
import pyqtgraph as pg
from recorder import SoundCardDataSource
# Based on function from numpy 1.8
def rfftfreq(n, d=1.0):
"""
Return the Discrete Fourier Transform sample frequencies
(for usage with rfft, irfft).
The returned float array `f` contains the frequency bin centers in cycles
per unit of the sample spacing (with zero at the start). For instance, if
the sample spacing is in seconds, then the frequency unit is cycles/second.
Given a window length `n` and a sample spacing `d`::
f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even
f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd
Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`)
the Nyquist frequency component is considered to be positive.
Parameters
----------
n : int
Window length.
d : scalar, optional
Sample spacing (inverse of the sampling rate). Defaults to 1.
Returns
-------
f : ndarray
Array of length ``n//2 + 1`` containing the sample frequencies.
"""
if not isinstance(n, int):
raise ValueError("n should be an integer")
val = 1.0/(n*d)
N = n//2 + 1
results = np.arange(0, N, dtype=int)
return results * val
def fft_slices(x):
Nslices, Npts = x.shape
window = np.hanning(Npts)
# Calculate FFT
fx = np.fft.rfft(window[np.newaxis, :] * x, axis=1)
# Convert to normalised PSD
Pxx = abs(fx)**2 / (np.abs(window)**2).sum()
# Scale for one-sided (excluding DC and Nyquist frequencies)
Pxx[:, 1:-1] *= 2
# And scale by frequency to get a result in (dB/Hz)
# Pxx /= Fs
return Pxx ** 0.5
def find_peaks(Pxx):
# filter parameters
b, a = [0.01], [1, -0.99]
Pxx_smooth = filtfilt(b, a, abs(Pxx))
peakedness = abs(Pxx) / Pxx_smooth
# find peaky regions which are separated by more than 10 samples
peaky_regions = nonzero(peakedness > 1)[0]
edge_indices = nonzero(diff(peaky_regions) > 10)[0] # RH edges of peaks
edges = [0] + [(peaky_regions[i] + 5) for i in edge_indices]
if len(edges) < 2:
edges += [len(Pxx) - 1]
peaks = []
for i in range(len(edges) - 1):
j, k = edges[i], edges[i+1]
peaks.append(j + np.argmax(peakedness[j:k]))
return peaks
def fft_buffer(x):
window = np.hanning(x.shape[0])
# Calculate FFT
fx = np.fft.rfft(window * x)
# Convert to normalised PSD
Pxx = abs(fx)**2 / (np.abs(window)**2).sum()
# Scale for one-sided (excluding DC and Nyquist frequencies)
Pxx[1:-1] *= 2
# And scale by frequency to get a result in (dB/Hz)
# Pxx /= Fs
return Pxx ** 0.5
class LiveFFTWindow(pg.GraphicsWindow):
def __init__(self, recorder):
super(LiveFFTWindow, self).__init__(title="Live FFT")
self.recorder = recorder
self.paused = False
self.logScale = False
self.showPeaks = False
self.downsample = True
# Setup plots
self.p1 = self.addPlot()
self.p1.setLabel('bottom', 'Time', 's')
self.p1.setLabel('left', 'Amplitude')
self.p1.setTitle("")
self.p1.setLimits(xMin=0, yMin=-1, yMax=1)
self.ts = self.p1.plot(pen='y')
self.nextRow()
self.p2 = self.addPlot()
self.p2.setLabel('bottom', 'Frequency', 'Hz')
self.p2.setLimits(xMin=0, yMin=0)
self.spec = self.p2.plot(pen=(50, 100, 200),
brush=(50,100,200),
fillLevel=-100)
# Show note lines
A = 440.0
notePen = pg.mkPen((0, 200, 50, 50))
while A < (self.recorder.fs / 2):
self.p2.addLine(x=A, pen=notePen)
A *= 2
# Lines for marking peaks
self.peakMarkers = []
# Data ranges
self.resetRanges()
# Timer to update plots
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update)
interval_ms = 1000 * (self.recorder.chunk_size / self.recorder.fs)
print("Updating graphs every %.1f ms" % interval_ms)
self.timer.start(interval_ms)
def resetRanges(self):
self.timeValues = self.recorder.timeValues
self.freqValues = rfftfreq(len(self.timeValues),
1./self.recorder.fs)
self.p1.setRange(xRange=(0, self.timeValues[-1]), yRange=(-1, 1))
self.p1.setLimits(xMin=0, xMax=self.timeValues[-1], yMin=-1, yMax=1)
if self.logScale:
self.p2.setRange(xRange=(0, self.freqValues[-1] / 2),
yRange=(-60, 20))
self.p2.setLimits(xMax=self.freqValues[-1], yMin=-60, yMax=20)
self.spec.setData(fillLevel=-100)
self.p2.setLabel('left', 'PSD', 'dB / Hz')
else:
self.p2.setRange(xRange=(0, self.freqValues[-1] / 2),
yRange=(0, 50))
self.p2.setLimits(xMax=self.freqValues[-1], yMax=50)
self.spec.setData(fillLevel=0)
self.p2.setLabel('left', 'PSD', '1 / Hz')
def plotPeaks(self, Pxx):
# find peaks bigger than a certain threshold
peaks = [p for p in find_peaks(Pxx) if Pxx[p] > 0.3]
if self.logScale:
Pxx = 20*np.log10(Pxx)
# Label peaks
old = self.peakMarkers
self.peakMarkers = []
for p in peaks:
if old:
t = old.pop()
else:
t = pg.TextItem(color=(150, 150, 150, 150))
self.p2.addItem(t)
self.peakMarkers.append(t)
t.setText("%.1f Hz" % self.freqValues[p])
t.setPos(self.freqValues[p], Pxx[p])
for t in old:
self.p2.removeItem(t)
del t
def update(self):
if self.paused:
return
data = self.recorder.get_buffer()
weighting = np.exp(self.timeValues / self.timeValues[-1])
Pxx = fft_buffer(weighting * data[:, 0])
if self.downsample:
downsample_args = dict(autoDownsample=False,
downsampleMethod='subsample',
downsample=10)
else:
downsample_args = dict(autoDownsample=True)
self.ts.setData(x=self.timeValues, y=data[:, 0], **downsample_args)
self.spec.setData(x=self.freqValues,
y=(20*np.log10(Pxx) if self.logScale else Pxx))
if self.showPeaks:
self.plotPeaks(Pxx)
def keyPressEvent(self, event):
text = event.text()
if text == " ":
self.paused = not self.paused
self.p1.setTitle("PAUSED" if self.paused else "")
elif text == "l":
self.logScale = not self.logScale
self.resetRanges()
elif text == "d":
self.downsample = not self.downsample
elif text == "+":
self.recorder.num_chunks *= 2
self.resetRanges()
elif text == "-":
self.recorder.num_chunks /= 2
self.resetRanges()
elif text == "p":
self.showPeaks = not self.showPeaks
else:
super(LiveFFTWindow, self).keyPressEvent(event)
# Setup plots
#QtGui.QApplication.setGraphicsSystem('opengl')
app = QtGui.QApplication([])
#pg.setConfigOptions(antialias=True)
# Setup recorder
#FS = 12000
#FS = 22000
FS = 44000
recorder = SoundCardDataSource(num_chunks=3,
sampling_rate=FS,
chunk_size=4*1024)
win = LiveFFTWindow(recorder)
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-interfaces - based on the path /interfaces/interface/routed-vlan/ipv4/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Top level IPv4 operational state data
"""
__slots__ = ("_path_helper", "_extmethods", "__enabled", "__mtu")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("true"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="boolean",
is_config=False,
)
self.__mtu = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
restriction_dict={"range": ["68..max"]},
),
is_leaf=True,
yang_name="mtu",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint16",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return ["interfaces", "interface", "routed-vlan", "ipv4", "state"]
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/enabled (boolean)
YANG Description: Controls whether IPv4 is enabled or disabled on this
interface. When IPv4 is enabled, this interface is
connected to an IPv4 stack, and the interface can send
and receive IPv4 packets.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: Controls whether IPv4 is enabled or disabled on this
interface. When IPv4 is enabled, this interface is
connected to an IPv4 stack, and the interface can send
and receive IPv4 packets.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("true"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='boolean', is_config=False)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("true"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="boolean",
is_config=False,
)
def _get_mtu(self):
"""
Getter method for mtu, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/mtu (uint16)
YANG Description: The size, in octets, of the largest IPv4 packet that the
interface will send and receive.
The server may restrict the allowed values for this leaf,
depending on the interface's type.
If this leaf is not configured, the operationally used MTU
depends on the interface's type.
"""
return self.__mtu
def _set_mtu(self, v, load=False):
"""
Setter method for mtu, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/mtu (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_mtu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mtu() directly.
YANG Description: The size, in octets, of the largest IPv4 packet that the
interface will send and receive.
The server may restrict the allowed values for this leaf,
depending on the interface's type.
If this leaf is not configured, the operationally used MTU
depends on the interface's type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..65535"]},
int_size=16,
),
restriction_dict={"range": ["68..max"]},
),
is_leaf=True,
yang_name="mtu",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint16",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """mtu must be of a type compatible with uint16""",
"defined-type": "uint16",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['68..max']}), is_leaf=True, yang_name="mtu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint16', is_config=False)""",
}
)
self.__mtu = t
if hasattr(self, "_set"):
self._set()
def _unset_mtu(self):
self.__mtu = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
restriction_dict={"range": ["68..max"]},
),
is_leaf=True,
yang_name="mtu",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint16",
is_config=False,
)
enabled = __builtin__.property(_get_enabled)
mtu = __builtin__.property(_get_mtu)
_pyangbind_elements = OrderedDict([("enabled", enabled), ("mtu", mtu)])
| |
import collections
import copy
import os
import os.path as osp
import time
import warnings
import chainer
import numpy as np
import skimage.io
import skimage.util
import tqdm
from . import datasets
from . import utils
class Trainer(object):
"""Training class for FCN models.
Parameters
----------
device: int
GPU id, negative values represents use of CPU.
model: chainer.Chain
NN model.
optimizer: chainer.Optimizer
Optimizer.
iter_train: chainer.Iterator
Dataset itarator for training dataset.
iter_valid: chainer.Iterator
Dataset itarator for validation dataset.
out: str
Log output directory.
max_iter: int
Max iteration to stop training iterations.
interval_validate: None or int
If None, validation is never run. (default: 4000)
Returns
-------
None
"""
def __init__(
self,
device,
model,
optimizer,
iter_train,
iter_valid,
out,
max_iter,
interval_validate=4000):
warnings.warn('fcn.Trainer is deprecated.\n'
'Please use chainer.training.Trainer.')
self.device = device
self.model = model
self.optimizer = optimizer
self.iter_train = iter_train
self.iter_valid = iter_valid
self.out = out
self.epoch = 0
self.iteration = 0
self.max_iter = max_iter
self.interval_validate = interval_validate
self.stamp_start = None
# for logging
self.log_headers = [
'epoch',
'iteration',
'elapsed_time',
'train/loss',
'train/acc',
'train/acc_cls',
'train/mean_iu',
'train/fwavacc',
'valid/loss',
'valid/acc',
'valid/acc_cls',
'valid/mean_iu',
'valid/fwavacc',
]
if not osp.exists(self.out):
os.makedirs(self.out)
with open(osp.join(self.out, 'log.csv'), 'w') as f:
f.write(','.join(self.log_headers) + '\n')
def validate(self, n_viz=9):
"""Validate current model using validation dataset.
Parameters
----------
n_viz: int
Number fo visualization.
Returns
-------
log: dict
Log values.
"""
iter_valid = copy.copy(self.iter_valid)
losses, lbl_trues, lbl_preds = [], [], []
vizs = []
dataset = iter_valid.dataset
desc = 'valid [iteration=%08d]' % self.iteration
for batch in tqdm.tqdm(iter_valid, desc=desc, total=len(dataset),
ncols=80, leave=False):
img, lbl_true = zip(*batch)
batch = map(datasets.transform_lsvrc2012_vgg16, batch)
with chainer.no_backprop_mode(), \
chainer.using_config('train', False):
in_vars = utils.batch_to_vars(batch, device=self.device)
loss = self.model(*in_vars)
losses.append(float(loss.data))
score = self.model.score
lbl_pred = chainer.functions.argmax(score, axis=1)
lbl_pred = chainer.cuda.to_cpu(lbl_pred.data)
for im, lt, lp in zip(img, lbl_true, lbl_pred):
lbl_trues.append(lt)
lbl_preds.append(lp)
if len(vizs) < n_viz:
viz = utils.visualize_segmentation(
lbl_pred=lp, lbl_true=lt,
img=im, n_class=self.model.n_class)
vizs.append(viz)
# save visualization
out_viz = osp.join(self.out, 'visualizations_valid',
'iter%08d.jpg' % self.iteration)
if not osp.exists(osp.dirname(out_viz)):
os.makedirs(osp.dirname(out_viz))
viz = utils.get_tile_image(vizs)
skimage.io.imsave(out_viz, viz)
# generate log
acc = utils.label_accuracy_score(
lbl_trues, lbl_preds, self.model.n_class)
self._write_log(**{
'epoch': self.epoch,
'iteration': self.iteration,
'elapsed_time': time.time() - self.stamp_start,
'valid/loss': np.mean(losses),
'valid/acc': acc[0],
'valid/acc_cls': acc[1],
'valid/mean_iu': acc[2],
'valid/fwavacc': acc[3],
})
self._save_model()
def _write_log(self, **kwargs):
log = collections.defaultdict(str)
log.update(kwargs)
with open(osp.join(self.out, 'log.csv'), 'a') as f:
f.write(','.join(str(log[h]) for h in self.log_headers) + '\n')
def _save_model(self):
out_model_dir = osp.join(self.out, 'models')
if not osp.exists(out_model_dir):
os.makedirs(out_model_dir)
model_name = self.model.__class__.__name__
out_model = osp.join(out_model_dir, '%s_iter%08d.npz' %
(model_name, self.iteration))
chainer.serializers.save_npz(out_model, self.model)
def train(self):
"""Train the network using the training dataset.
Parameters
----------
None
Returns
-------
None
"""
self.stamp_start = time.time()
for iteration, batch in tqdm.tqdm(enumerate(self.iter_train),
desc='train', total=self.max_iter,
ncols=80):
self.epoch = self.iter_train.epoch
self.iteration = iteration
############
# validate #
############
if self.interval_validate and \
self.iteration % self.interval_validate == 0:
self.validate()
#########
# train #
#########
batch = map(datasets.transform_lsvrc2012_vgg16, batch)
in_vars = utils.batch_to_vars(batch, device=self.device)
self.model.zerograds()
loss = self.model(*in_vars)
if loss is not None:
loss.backward()
self.optimizer.update()
lbl_true = zip(*batch)[1]
lbl_pred = chainer.functions.argmax(self.model.score, axis=1)
lbl_pred = chainer.cuda.to_cpu(lbl_pred.data)
acc = utils.label_accuracy_score(
lbl_true, lbl_pred, self.model.n_class)
self._write_log(**{
'epoch': self.epoch,
'iteration': self.iteration,
'elapsed_time': time.time() - self.stamp_start,
'train/loss': float(loss.data),
'train/acc': acc[0],
'train/acc_cls': acc[1],
'train/mean_iu': acc[2],
'train/fwavacc': acc[3],
})
if iteration >= self.max_iter:
self._save_model()
break
| |
# docket - code related to revlog "docket"
#
# Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
### Revlog docket file
#
# The revlog is stored on disk using multiple files:
#
# * a small docket file, containing metadata and a pointer,
#
# * an index file, containing fixed width information about revisions,
#
# * a data file, containing variable width data for these revisions,
from __future__ import absolute_import
import errno
import os
import random
import struct
from .. import (
encoding,
error,
node,
pycompat,
util,
)
from . import (
constants,
)
def make_uid(id_size=8):
"""return a new unique identifier.
The identifier is random and composed of ascii characters."""
# size we "hex" the result we need half the number of bits to have a final
# uuid of size ID_SIZE
return node.hex(os.urandom(id_size // 2))
# some special test logic to avoid anoying random output in the test
stable_docket_file = encoding.environ.get(b'HGTEST_UUIDFILE')
if stable_docket_file:
def make_uid(id_size=8):
try:
with open(stable_docket_file, mode='rb') as f:
seed = f.read().strip()
except IOError as inst:
if inst.errno != errno.ENOENT:
raise
seed = b'04' # chosen by a fair dice roll. garanteed to be random
if pycompat.ispy3:
iter_seed = iter(seed)
else:
# pytype: disable=wrong-arg-types
iter_seed = (ord(c) for c in seed)
# pytype: enable=wrong-arg-types
# some basic circular sum hashing on 64 bits
int_seed = 0
low_mask = int('1' * 35, 2)
for i in iter_seed:
high_part = int_seed >> 35
low_part = (int_seed & low_mask) << 28
int_seed = high_part + low_part + i
r = random.Random()
if pycompat.ispy3:
r.seed(int_seed, version=1)
else:
r.seed(int_seed)
# once we drop python 3.8 support we can simply use r.randbytes
raw = r.getrandbits(id_size * 4)
assert id_size == 8
p = struct.pack('>L', raw)
new = node.hex(p)
with open(stable_docket_file, 'wb') as f:
f.write(new)
return new
# Docket format
#
# * 4 bytes: revlog version
# | This is mandatory as docket must be compatible with the previous
# | revlog index header.
# * 1 bytes: size of index uuid
# * 1 bytes: number of outdated index uuid
# * 1 bytes: size of data uuid
# * 1 bytes: number of outdated data uuid
# * 1 bytes: size of sizedata uuid
# * 1 bytes: number of outdated data uuid
# * 8 bytes: size of index-data
# * 8 bytes: pending size of index-data
# * 8 bytes: size of data
# * 8 bytes: size of sidedata
# * 8 bytes: pending size of data
# * 8 bytes: pending size of sidedata
# * 1 bytes: default compression header
S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBBBBBLLLLLLc')
# * 1 bytes: size of index uuid
# * 8 bytes: size of file
S_OLD_UID = struct.Struct('>BL')
class RevlogDocket(object):
"""metadata associated with revlog"""
def __init__(
self,
revlog,
use_pending=False,
version_header=None,
index_uuid=None,
older_index_uuids=(),
data_uuid=None,
older_data_uuids=(),
sidedata_uuid=None,
older_sidedata_uuids=(),
index_end=0,
pending_index_end=0,
data_end=0,
pending_data_end=0,
sidedata_end=0,
pending_sidedata_end=0,
default_compression_header=None,
):
self._version_header = version_header
self._read_only = bool(use_pending)
self._dirty = False
self._radix = revlog.radix
self._path = revlog._docket_file
self._opener = revlog.opener
self._index_uuid = index_uuid
self._older_index_uuids = older_index_uuids
self._data_uuid = data_uuid
self._older_data_uuids = older_data_uuids
self._sidedata_uuid = sidedata_uuid
self._older_sidedata_uuids = older_sidedata_uuids
assert not set(older_index_uuids) & set(older_data_uuids)
assert not set(older_data_uuids) & set(older_sidedata_uuids)
assert not set(older_index_uuids) & set(older_sidedata_uuids)
# thes asserts should be True as long as we have a single index filename
assert index_end <= pending_index_end
assert data_end <= pending_data_end
assert sidedata_end <= pending_sidedata_end
self._initial_index_end = index_end
self._pending_index_end = pending_index_end
self._initial_data_end = data_end
self._pending_data_end = pending_data_end
self._initial_sidedata_end = sidedata_end
self._pending_sidedata_end = pending_sidedata_end
if use_pending:
self._index_end = self._pending_index_end
self._data_end = self._pending_data_end
self._sidedata_end = self._pending_sidedata_end
else:
self._index_end = self._initial_index_end
self._data_end = self._initial_data_end
self._sidedata_end = self._initial_sidedata_end
self.default_compression_header = default_compression_header
def index_filepath(self):
"""file path to the current index file associated to this docket"""
# very simplistic version at first
if self._index_uuid is None:
self._index_uuid = make_uid()
return b"%s-%s.idx" % (self._radix, self._index_uuid)
def new_index_file(self):
"""switch index file to a new UID
The previous index UID is moved to the "older" list."""
old = (self._index_uuid, self._index_end)
self._older_index_uuids.insert(0, old)
self._index_uuid = make_uid()
return self.index_filepath()
def old_index_filepaths(self, include_empty=True):
"""yield file path to older index files associated to this docket"""
# very simplistic version at first
for uuid, size in self._older_index_uuids:
if include_empty or size > 0:
yield b"%s-%s.idx" % (self._radix, uuid)
def data_filepath(self):
"""file path to the current data file associated to this docket"""
# very simplistic version at first
if self._data_uuid is None:
self._data_uuid = make_uid()
return b"%s-%s.dat" % (self._radix, self._data_uuid)
def new_data_file(self):
"""switch data file to a new UID
The previous data UID is moved to the "older" list."""
old = (self._data_uuid, self._data_end)
self._older_data_uuids.insert(0, old)
self._data_uuid = make_uid()
return self.data_filepath()
def old_data_filepaths(self, include_empty=True):
"""yield file path to older data files associated to this docket"""
# very simplistic version at first
for uuid, size in self._older_data_uuids:
if include_empty or size > 0:
yield b"%s-%s.dat" % (self._radix, uuid)
def sidedata_filepath(self):
"""file path to the current sidedata file associated to this docket"""
# very simplistic version at first
if self._sidedata_uuid is None:
self._sidedata_uuid = make_uid()
return b"%s-%s.sda" % (self._radix, self._sidedata_uuid)
def new_sidedata_file(self):
"""switch sidedata file to a new UID
The previous sidedata UID is moved to the "older" list."""
old = (self._sidedata_uuid, self._sidedata_end)
self._older_sidedata_uuids.insert(0, old)
self._sidedata_uuid = make_uid()
return self.sidedata_filepath()
def old_sidedata_filepaths(self, include_empty=True):
"""yield file path to older sidedata files associated to this docket"""
# very simplistic version at first
for uuid, size in self._older_sidedata_uuids:
if include_empty or size > 0:
yield b"%s-%s.sda" % (self._radix, uuid)
@property
def index_end(self):
return self._index_end
@index_end.setter
def index_end(self, new_size):
if new_size != self._index_end:
self._index_end = new_size
self._dirty = True
@property
def data_end(self):
return self._data_end
@data_end.setter
def data_end(self, new_size):
if new_size != self._data_end:
self._data_end = new_size
self._dirty = True
@property
def sidedata_end(self):
return self._sidedata_end
@sidedata_end.setter
def sidedata_end(self, new_size):
if new_size != self._sidedata_end:
self._sidedata_end = new_size
self._dirty = True
def write(self, transaction, pending=False, stripping=False):
"""write the modification of disk if any
This make the new content visible to all process"""
if not self._dirty:
return False
else:
if self._read_only:
msg = b'writing read-only docket: %s'
msg %= self._path
raise error.ProgrammingError(msg)
if not stripping:
# XXX we could, leverage the docket while stripping. However it
# is not powerfull enough at the time of this comment
transaction.addbackup(self._path, location=b'store')
with self._opener(self._path, mode=b'w', atomictemp=True) as f:
f.write(self._serialize(pending=pending))
# if pending we still need to the write final data eventually
self._dirty = pending
return True
def _serialize(self, pending=False):
if pending:
official_index_end = self._initial_index_end
official_data_end = self._initial_data_end
official_sidedata_end = self._initial_sidedata_end
else:
official_index_end = self._index_end
official_data_end = self._data_end
official_sidedata_end = self._sidedata_end
# this assert should be True as long as we have a single index filename
assert official_data_end <= self._data_end
assert official_sidedata_end <= self._sidedata_end
data = (
self._version_header,
len(self._index_uuid),
len(self._older_index_uuids),
len(self._data_uuid),
len(self._older_data_uuids),
len(self._sidedata_uuid),
len(self._older_sidedata_uuids),
official_index_end,
self._index_end,
official_data_end,
self._data_end,
official_sidedata_end,
self._sidedata_end,
self.default_compression_header,
)
s = []
s.append(S_HEADER.pack(*data))
s.append(self._index_uuid)
for u, size in self._older_index_uuids:
s.append(S_OLD_UID.pack(len(u), size))
for u, size in self._older_index_uuids:
s.append(u)
s.append(self._data_uuid)
for u, size in self._older_data_uuids:
s.append(S_OLD_UID.pack(len(u), size))
for u, size in self._older_data_uuids:
s.append(u)
s.append(self._sidedata_uuid)
for u, size in self._older_sidedata_uuids:
s.append(S_OLD_UID.pack(len(u), size))
for u, size in self._older_sidedata_uuids:
s.append(u)
return b''.join(s)
def default_docket(revlog, version_header):
"""given a revlog version a new docket object for the given revlog"""
rl_version = version_header & 0xFFFF
if rl_version not in (constants.REVLOGV2, constants.CHANGELOGV2):
return None
comp = util.compengines[revlog._compengine].revlogheader()
docket = RevlogDocket(
revlog,
version_header=version_header,
default_compression_header=comp,
)
docket._dirty = True
return docket
def _parse_old_uids(get_data, count):
all_sizes = []
all_uids = []
for i in range(0, count):
raw = get_data(S_OLD_UID.size)
all_sizes.append(S_OLD_UID.unpack(raw))
for uid_size, file_size in all_sizes:
uid = get_data(uid_size)
all_uids.append((uid, file_size))
return all_uids
def parse_docket(revlog, data, use_pending=False):
"""given some docket data return a docket object for the given revlog"""
header = S_HEADER.unpack(data[: S_HEADER.size])
# this is a mutable closure capture used in `get_data`
offset = [S_HEADER.size]
def get_data(size):
"""utility closure to access the `size` next bytes"""
if offset[0] + size > len(data):
# XXX better class
msg = b"docket is too short, expected %d got %d"
msg %= (offset[0] + size, len(data))
raise error.Abort(msg)
raw = data[offset[0] : offset[0] + size]
offset[0] += size
return raw
iheader = iter(header)
version_header = next(iheader)
index_uuid_size = next(iheader)
index_uuid = get_data(index_uuid_size)
older_index_uuid_count = next(iheader)
older_index_uuids = _parse_old_uids(get_data, older_index_uuid_count)
data_uuid_size = next(iheader)
data_uuid = get_data(data_uuid_size)
older_data_uuid_count = next(iheader)
older_data_uuids = _parse_old_uids(get_data, older_data_uuid_count)
sidedata_uuid_size = next(iheader)
sidedata_uuid = get_data(sidedata_uuid_size)
older_sidedata_uuid_count = next(iheader)
older_sidedata_uuids = _parse_old_uids(get_data, older_sidedata_uuid_count)
index_size = next(iheader)
pending_index_size = next(iheader)
data_size = next(iheader)
pending_data_size = next(iheader)
sidedata_size = next(iheader)
pending_sidedata_size = next(iheader)
default_compression_header = next(iheader)
docket = RevlogDocket(
revlog,
use_pending=use_pending,
version_header=version_header,
index_uuid=index_uuid,
older_index_uuids=older_index_uuids,
data_uuid=data_uuid,
older_data_uuids=older_data_uuids,
sidedata_uuid=sidedata_uuid,
older_sidedata_uuids=older_sidedata_uuids,
index_end=index_size,
pending_index_end=pending_index_size,
data_end=data_size,
pending_data_end=pending_data_size,
sidedata_end=sidedata_size,
pending_sidedata_end=pending_sidedata_size,
default_compression_header=default_compression_header,
)
return docket
| |
from flask import Blueprint, request, render_template, session, redirect, url_for
from src.models.categories.categories import CategoryModel, CategoryAddForm
from src.models.startlist.startlist import StartlistModel, StartlistNameModel
from src.models.timedb.timedb import TimeDbModel
from src.models.timedb.timydb import TimyDbModel
import src.models.startlist.startlist_processing as startlist_processing
import src.models.startlist.startlist_alg as startlist_alg
from sqlalchemy import Time
import time
import datetime
import random
from pprint import pprint as pp
startlist_blueprint = Blueprint('startlist', __name__)
@startlist_blueprint.route('/', methods=['GET', 'POST'])
def startlist():
output, output_length = startlist_processing.get_startlist_all_frontend()
return render_template('startlist/startlist_all.html', data=output, length=output_length)
@startlist_blueprint.route('/list_all', methods=['GET', 'POST'])
def startlist_menu():
startlist_all = [(stlist.id, stlist.name) for stlist in StartlistNameModel.list_all()]
return render_template('startlist/startlist_one_menu.html', data=startlist_all)
@startlist_blueprint.route('/startlist_one', methods=['POST'])
def startlist_one():
startlist_id = request.form['startlist_select']
startlist_instance = StartlistNameModel.get_by_id(startlist_id)
output_list = startlist_processing.startlist_generate(startlist_id)
output_length = startlist_processing.startlist_generate_length(startlist_id)
return render_template('startlist/startlist_one.html',
startlist_name=startlist_instance.name,
data=output_list,
length=output_length)
@startlist_blueprint.route('/list_all_edit', methods=['GET', 'POST'])
def startlist_menu_edit():
startlist_all = [(stlist.id, stlist.name) for stlist in StartlistNameModel.list_all()]
return render_template('startlist/startlist_one_menu_edit.html', data=startlist_all)
@startlist_blueprint.route('/startlist_one_edit', methods=['POST'])
def startlist_one_edit():
startlist_id = request.form['startlist_select']
# for startlist_one_edit_save()
session['startlist_id'] = startlist_id
startlist_instance = StartlistNameModel.get_by_id(startlist_id)
output_list = startlist_processing.startlist_generate(startlist_id)
output_length = startlist_processing.startlist_generate_length(startlist_id)
rounds, line_count = startlist_processing.startlist_get_rounds_lines(startlist_id)
return render_template('startlist/startlist_one_edit.html',
startlist_name=startlist_instance.name,
data=output_list,
length=output_length,
rounds=rounds,
line_count=line_count)
@startlist_blueprint.route('/startlist_one_edit_save', methods=['POST'])
def startlist_one_edit_save():
startlist_id = session['startlist_id']
new_values = startlist_processing.parse_request_form(request.form)
startlist_processing.update_startlist_records(startlist_id, new_values)
return redirect(url_for('startlist.startlist_menu_edit'))
@startlist_blueprint.route('/next', methods=['GET', 'POST'])
def next_round():
if request.method == "POST":
# Note: Verification if the received values are unique.
# There cannot be 2 times assigned to the same starting line
if startlist_processing.wizard_input_verification(request.form) is False:
# used to let the 'startlist.wizard' know, which template should be generated
session['wrong_entry'] = 1
return redirect(url_for('startlist.wizard'))
results_possition = startlist_processing.wizard_process_received_form(request.form)
# print("Results tuple:")
# print(results_possition)
# print()
results_id = []
for _, _, start_position, _, startlist_id in session['startlist_round']:
# print("Start position: {}".format(start_position))
# print("Start startlist_id: {}".format(startlist_id))
result_tuple = (startlist_id, results_possition[start_position])
results_id.append(result_tuple)
for startlist_id, time_measured in results_id:
# print("ST.ID: {} --- TIME: {}".format(startlist_id, time_measured))
found_runner = StartlistModel.get_by_startlist_id(startlist_id)
# if an athlete doesn't finish, the DNF may be entered.
# he will then be assigned max time possible to enter
# because of his high time, he will be also listed at the end in results.
if time_measured in "DNF dnf".split():
time_measured = "59:59.59"
try:
found_runner.time_measured = convert_time_to_delta(time_measured)
found_runner.save_to_db()
except ValueError:
session['wrong_entry'] = 2
return redirect(url_for('startlist.wizard'))
plus_session_counter()
return redirect(url_for('startlist.wizard'))
def convert_time_to_delta(time_entered):
epoch = datetime.datetime.utcfromtimestamp(0)
time_entered = time_entered.strip()
datetime_composite = "1 Jan 1970 {}".format(time_entered)
time_converted = datetime.datetime.strptime(datetime_composite, '%d %b %Y %M:%S.%f')
delta_time = time_converted - epoch
return delta_time
@startlist_blueprint.route('/create_wizard', methods=['GET', 'POST'])
def wizard_start():
# clearing session counter
clearsession()
startlist_display = [(st.id, st.name) for st in StartlistNameModel.list_all() if not st.measured_flag]
return render_template('startlist/create_new_wizard.html', data=startlist_display)
@startlist_blueprint.route('/get_times', methods=['POST'])
def get_times_from_db():
position = request.form.get('position', '0', type=int)
times = [item for item in TimeDbModel.list_all()]
print(position)
print(times)
return "Hello World"
@startlist_blueprint.route('/wizard', methods=['GET', 'POST'])
def wizard():
if request.method == 'POST':
try:
session['startlist_selected'] = request.form['startlist_select']
except:
print("error - method wizard")
try:
startlist_selected = session['startlist_selected']
except KeyError:
return redirect(url_for('.wizard_start'))
startlist_instance = StartlistNameModel.get_by_id(startlist_selected)
# it does nothing if session['counter'] already exists
init_session_counter()
if session['counter'] > startlist_instance.startlist_rounds:
# indicates that there are times stored in this startlist
startlist_instance.measured_flag = True
startlist_instance.save_to_db()
return redirect(url_for('.wizard_start'))
found_records = [record for record in StartlistModel.get_records_by_startlist_id_and_round_number(
startlist_selected,
session['counter']
)]
startlist_round = []
for stm, ptm in found_records:
record = (ptm.last_name, ptm.first_name, stm.start_position, stm.start_round, stm.id)
startlist_round.append(record)
# to easily receive startlist_id in the next_round()
session['startlist_round'] = startlist_round
startlist_lines = len(startlist_round)
# not used at the moment
# random_times = time_random(startlist_lines)
# loading of the times from old database
# db_times = [str(item.time_measured)[2:-4] for item in TimeDbModel.list_all()][-startlist_lines:]
# session['random_times'] = db_times
# loading of the times from the external database
db_times_ext = [str(item.time_measured)[2:-4] for item in TimyDbModel.list_all()][-startlist_lines:]
session['random_times'] = db_times_ext
progress_now = session['counter'] * 100 / startlist_instance.startlist_rounds
progress_now_int = int(round(progress_now))
# Note: Verification if the site has been reloaded due to wrong assignment of startlines by user.
# redirected from next_round() function.
try:
if session['wrong_entry'] == 1:
session['wrong_entry'] = 0
return render_template(
'startlist/wizard_wrong_lines_assigned.html',
name=startlist_instance.name,
startlist=startlist_round,
progress_now=progress_now_int,
startlist_lines=startlist_lines,
random_times=db_times_ext,
rounds_count=startlist_instance.startlist_rounds
)
if session['wrong_entry'] == 2:
session['wrong_entry'] = 0
return render_template(
'startlist/wizard_wrong_time_entered.html',
name=startlist_instance.name,
startlist=startlist_round,
progress_now=progress_now_int,
startlist_lines=startlist_lines,
random_times=db_times_ext,
rounds_count=startlist_instance.startlist_rounds
)
except KeyError:
pass
return render_template(
'startlist/wizard.html',
name=startlist_instance.name,
startlist=startlist_round,
progress_now=progress_now_int,
startlist_lines=startlist_lines,
random_times=db_times_ext,
rounds_count=startlist_instance.startlist_rounds
)
@startlist_blueprint.route('/clear')
def clearsession():
# Clear the session
session.clear()
return True
def init_session_counter():
try:
session['counter']
except KeyError:
session['counter'] = 1
def plus_session_counter():
try:
session['counter'] += 1
except KeyError:
session['counter'] = 1
def minus_session_counter():
try:
session['counter'] -= 1
except KeyError:
session['counter'] = 1
@startlist_blueprint.route('/results', methods=['GET', 'POST'])
def results():
startlist_finished = [(stlist.id, stlist.name) for stlist in StartlistNameModel.list_measured_all()]
return render_template('startlist/results_finished_startlists_menu.html', data=startlist_finished)
@startlist_blueprint.route('/result_startlist', methods=['POST'])
def results_specific_startlist():
startlist_id = request.form['startlist_select']
startlist_instance = StartlistNameModel.get_by_id(startlist_id)
output_list = startlist_processing.result_list_generate(startlist_id)
return render_template('startlist/results_specific_startlist.html',
startlist_name=startlist_instance.name,
data=output_list)
@startlist_blueprint.route('/results_all', methods=['GET'])
def results_all():
data = startlist_processing.results_all()
return render_template('startlist/results_finished_startlists.html', data=data)
@startlist_blueprint.route('/findrunner', methods=['GET', 'POST'])
def find_runner():
# NOT USED AT THE MOMENT
return render_template('startlist/find_runner.html')
@startlist_blueprint.route('/addtime', methods=['GET', 'POST'])
def add_time():
if request.method == 'POST':
# TODO add time to DB
try:
user_id = int(request.form['participant'])
time_entered = request.form['time'].strip()
datetime_composite = "1 Jan 1970 {}".format(time_entered)
time_converted = datetime.datetime.strptime(datetime_composite, '%d %b %Y %M:%S.%f')
except ValueError:
return render_template('startlist/add_time_wrong.html')
epoch = datetime.datetime.utcfromtimestamp(0)
delta = time_converted - epoch
print(delta)
found_runner = StartlistModel.get_by_participant_id(user_id)
found_runner.time_measured = delta
found_runner.save_to_db()
return render_template('startlist/add_time_added.html', time=time_converted)
return render_template('startlist/add_time.html')
@startlist_blueprint.route('/create_category', methods=['GET'])
def create_startlist_category():
defined_categories = [(category.id, category.category_name) for category in CategoryModel.list_all()]
return render_template('startlist/create_new_list_category.html', categories=defined_categories)
@startlist_blueprint.route('/startlist_created_cat', methods=['POST'])
def generate_startlist_category():
if request.method == 'POST':
print(request.form)
startlist_name = request.form['startlist_name'].strip()
startlist_lines = request.form['startlist_lines']
startlist_category = request.form['startlist_category']
# print(startlist_name)
# print(startlist_lines)
# print(startlist_category)
new_startlist = StartlistNameModel(startlist_name, startlist_lines)
new_startlist.save_to_db()
print("Startlist ID: {} - {} - {}".format(new_startlist.id, new_startlist.name, new_startlist.startline_count))
new_startlist.startlist_rounds = startlist_processing.process(
new_startlist.id,
startlist_category,
int(startlist_lines)
)
new_startlist.save_to_db()
return redirect(url_for('.create_startlist_category'))
@startlist_blueprint.route('/create_classification', methods=['GET'])
def create_startlist_classification():
# TODO pass number of athletes in each finished startlist to the template.
startlist_finished = [(stlist.id, stlist.name) for stlist in StartlistNameModel.list_measured_all()]
return render_template('startlist/create_new_list_classification.html', startlist_finished=startlist_finished)
@startlist_blueprint.route('/startlist_created_class', methods=['POST'])
def generate_startlist_classfication():
if request.method == 'POST':
# print(request.form)
startlist_finished_id = request.form['startlist_select']
startlist_name = request.form['startlist_name'].strip()
startlist_top_times = int(request.form['startlist_top_times'])
startlist_lines = request.form['startlist_lines']
new_startlist = StartlistNameModel(startlist_name, startlist_lines)
new_startlist.save_to_db()
# Note: Not used at the moment
# startlist_finished_instance = StartlistNameModel.get_by_id(startlist_finished_id)
startlist_finished_results_ordered = \
[result for result in StartlistModel.get_records_by_startlist_id_order_by_time(startlist_finished_id)]\
[:startlist_top_times]
print(startlist_finished_results_ordered)
# removing Participant objects from a tuples
# removing Participant objects from a tuples
# and also ignoring Athletes with the time 59:59.59 - datetime.timedelta(0, 3599, 590000)
# 59:59.59 means that an athletes is DNF. This form of time has been chosen for code simplicity.
# At the UZE sprinter event, it is impossible that an athlete will have such a time
startlist_finished_only_results_ordered = \
[startlist_record for startlist_record, _ in startlist_finished_results_ordered
if startlist_record.time_measured != datetime.timedelta(0, 3599, 590000)]
# In case there is only one classification run, the records are re-ordered so that the fasterst
# athletes are placed in the middle of the start field.
if startlist_top_times <= int(startlist_lines):
startlist_finished_only_results_ordered = startlist_alg.order_from_the_middle(startlist_finished_only_results_ordered)
# generating new startlist record instances, startline numbers and rounds assignment
new_startlist.startlist_rounds = startlist_processing.process_classification(
new_startlist.id,
startlist_finished_only_results_ordered,
int(startlist_lines)
)
new_startlist.save_to_db()
return redirect(url_for('.create_startlist_classification'))
def time_random(number_of_random_times):
random_times = []
for minutes in range(10, 10+int(number_of_random_times)):
seconds = round(random.uniform(10.0, 60.0), 4)
random_times.append("{0}:{1}".format(minutes, seconds))
return random_times
| |
#!/usr/bin/env python
# Copyright (c) 2013 The MITRE Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""
Scan data streams with Yara using various algorithms.
"""
# Standard Imports
import os
import sys
import errno
import logging
import argparse
import binascii
# Libary Imports
import yara
class ProcessorException(Exception):
pass
class Processor(object):
"""
A wrapper to Yara.
"""
def __init__(self, rule_files, processing_mode='raw',
compiled=False, **kwargs):
"""
Default initializer.
Keyword arguments:
rule_files -- (List) Filepaths to yara rule files.
(Ex. ['/path/to/file1', '/path/to/file2'])
processing_mode -- (String) Mode used in processing data. Allowed
options include; fixed_buffer, sliding_window,
and raw. Default is raw mode.
compiled -- (Boolean) If True, treat the provided rule file as compiled.
Optional arguments:
"fixed_buffer" processing mode:
Data will be processed by yara in fixed sized buffers.
buffer_size -- (Integer) Amount of data to buffer
before processing in bytes. Default is
1024 bytes.
"sliding_window" processing mode:
Data will be processed by yara in fixed sized buffers, but it
is possible for buffers to "overlap" by controlling the buffer
increment.
buffer_size -- (Integer) Amount of data to process in bytes.
Default is 1024 bytes.
window_step -- (Integer) Amount to increment window per chunk.
Default is 1 byte.
"""
# Get handle to logger
self.logger = logging.getLogger('yaraprocessor')
# Validate all file names to ensure they exist and can be read
for f in rule_files:
if os.path.isfile(f):
try:
with open(f):
pass
except IOError:
raise IOError((errno.EACCES, 'Cannot open/read file.', f))
else:
raise IOError((errno.ENOENT, 'Cannot find file.', f))
if not compiled:
self._rule_files = self._prepare_rules(rule_files)
# Try to load the rules into yara
try:
self._rules = yara.compile(filepaths=self._rule_files)
except yara.SyntaxError as e:
err = ('Rule syntax error. If using compiled rules, you must '
'pass the "compiled" argument. Original error: %s' % e)
raise ProcessorException(err)
except yara.Error:
raise
else: # rules are compiled
try:
# yara.load only accepts a single file
assert(len(rule_files) == 1)
except AssertionError:
err = ('Compiled rules must be compiled to one file. Loading '
'from compiled rules does not support multiple rule files.')
raise ProcessorException(err)
self._rule_files = rule_files[0]
try:
self._rules = yara.load(self._rule_files)
except yara.Error as e:
err = ('Generic error loading compiled rules. '
'Original error: %s' % e)
raise ProcessorException(err)
# Validate that the processing mode is supported
self._allowed_modes = ['raw', 'fixed_buffer', 'sliding_window']
if not processing_mode.lower() in self._allowed_modes:
raise ProcessorException("%s is not a supported processing mode." \
% processing_mode)
self._processing_mode = processing_mode
# Optional arguments with defaults
self._buffer_size = kwargs.get('buffer_size', 1024)
self._window_step = kwargs.get('window_step', 1)
# Set window_step to buffer size when processing in fixed buffer mode
# This makes the analysis code simpler
if self._processing_mode == 'fixed_buffer':
self._window_step = self._buffer_size
# Attribute used to hold data and results to be processed
self._raw_results = []
self._formatted_results = []
self.data = ''
# Private variables for buffering and window processing
self._current = ''
self._next = None
self._window_index = 0
self._offset = 0
def __str__(self):
"""
Pretty way to print a processor.
"""
s = 'Processor ' + __name__
if self._rule_files:
s += ' running with rules ' + ' '.join(self._rule_files.values())
return s
def _prepare_rules(self, rules):
"""
Convert a list of rule files to a dict of rule files.
Keyword arguments:
rules -- list of rule files as fully qualified paths
Yara expects a dictionary of {Namespaces:filepaths}. Returns a
dictionary of rule files.
"""
results = {}
for i, fn in enumerate(rules):
results['RuleFile%s' % i] = fn
return results
def _window(self, sequence, size=2, step=1):
"""
Returns a sliding window (of width n) over data from the iterable.
The window increments by 'step'.
s -> (s0,s1,...s[n-1]), (s0+step,s1+step,...,sn), ...
"""
i = 0
while True:
result = sequence[i: i + size]
if not result:
break
else:
i = i + step
yield result
def analyze(self, data=None):
"""
Analyze data with yara.
Calls yara's "match" function on self.data and
returns the results returned by match.
"""
if not data:
data = self.data
for r in self._rules.match(data=data):
result = {'result': r.rule,
'strings': [],
'subtype': 'scan_result'}
for s in r.strings:
result['strings'].append({'offset': self._offset + s[0],
'rule_id': s[1],
'string': binascii.hexlify(s[2])})
self._raw_results.append(r)
self._formatted_results.append(result)
if self._processing_mode == 'raw':
self._offset += len(data)
return self.results
@property
def results(self):
"""
Get the analysis results.
"""
return self._formatted_results
def clear_results(self):
"""
Clear the current set of results.
"""
self._raw_results = []
self._formatted_results = []
@property
def data(self):
"""
Get the data to be analyzed by yara.
"""
return self._current
@data.setter
def data(self, value):
"""
Set the data to be analyzed by yara.
This behaves differently based on the processing mode
being used.
If set to "raw", data is a simple buffer.
If set to "fixed_buffer", data will be buffered until that size
is reached. When reached, the data will automatically be analyzed,
and the buffer will be cleared. If data is larger than the fixed_buffer
any extra will be buffered into the next chunk.
If set to "sliding_window", data will be buffered similar to
"fixed_buffer" mode. However, the analysis window will increment
based on the buffer size. For example, with a buffer size set to 5,
a data stream of '123456789' would be analyzed in the following chunks:
12345
23456
34567
45678
56789
The option "window_step" controls the increment between windows. For
example, a window step of 2 changes the above example to:
12345
34567
56789
"""
self._current = value
if self._processing_mode != 'raw':
if self._current and \
len(self._current[self._window_index:]) >= self._buffer_size:
for chunk in self._window(self._current[self._window_index:],
size=self._buffer_size,
step=self._window_step):
# Analyze each chunk and concatenate the results
self.analyze(''.join(chunk))
if self._processing_mode == 'fixed_buffer':
self._offset += len(chunk)
elif self._processing_mode == 'sliding_window':
self._offset += self._window_step
# Update the index
self._window_index = len(self._current)
if __name__ == '__main__':
"""
Helper code used to test yaraprocessor.
"""
# Setup logging
logger = logging.getLogger('yaraprocessor')
logger.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler(stream=sys.stdout)
consoleHandler.setFormatter(logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s%(message)s'))
logger.addHandler(consoleHandler)
# Parse command line arguments
parser = argparse.ArgumentParser(description="Analyze data with Yara")
parser.add_argument(
'--mode',
choices=['raw', 'fixed_buffer', 'sliding_window'],
default='raw',
help='Set the operating mode for yara. Default is "raw".')
parser.add_argument(
'--input',
nargs='?',
type=argparse.FileType('r'),
required=True,
help='File to read data from for analysis.')
parser.add_argument(
'--rules',
nargs='*',
required=True,
help='Rule files for use in Yara.')
parser.add_argument(
'--compiled',
action='store_true',
help='Treat provided rule file as compiled. Note, all rules must \
be compiled to a single file.'
)
parser.add_argument(
'--size',
type=int,
default=5,
help='If using fixed_buffer or sliding_window mode, \
set the size of the buffer/window. Default is 5.')
parser.add_argument(
'--step',
type=int,
default=1,
help='Window step. Default is 1.')
args = parser.parse_args()
data = args.input.read()
logger.debug('Building Processor with rules:')
for i, each in enumerate(args.rules):
logger.debug(' %i) %s' % (i, each))
if args.compiled:
logger.debug('Treating rule file as compiled.')
logger.debug('Operating in %s mode.' % args.mode)
if args.mode != 'raw':
logger.debug('Buffer/Window size is %s' % args.size)
logger.debug('Window step is %s' % args.step)
p = Processor(args.rules, processing_mode=args.mode,
compiled=args.compiled, buffer_size=args.size,
window_step=args.step)
p.data += data
else:
p = Processor(args.rules, compiled=args.compiled)
p.data += data
results = p.analyze()
if p.results:
for match in p.results:
logger.debug('Match found; %s', match)
| |
# tags.py - read tag info from local repository
#
# Copyright 2009 Matt Mackall <mpm@selenic.com>
# Copyright 2009 Greg Ward <greg@gerg.ca>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
# Currently this module only deals with reading and caching tags.
# Eventually, it could take care of updating (adding/removing/moving)
# tags too.
from node import nullid, bin, hex, short
from i18n import _
import encoding
import error
import errno
def findglobaltags(ui, repo, alltags, tagtypes):
'''Find global tags in repo by reading .hgtags from every head that
has a distinct version of it, using a cache to avoid excess work.
Updates the dicts alltags, tagtypes in place: alltags maps tag name
to (node, hist) pair (see _readtags() below), and tagtypes maps tag
name to tag type ("global" in this case).'''
# This is so we can be lazy and assume alltags contains only global
# tags when we pass it to _writetagcache().
assert len(alltags) == len(tagtypes) == 0, \
"findglobaltags() should be called first"
(heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo)
if cachetags is not None:
assert not shouldwrite
# XXX is this really 100% correct? are there oddball special
# cases where a global tag should outrank a local tag but won't,
# because cachetags does not contain rank info?
_updatetags(cachetags, 'global', alltags, tagtypes)
return
seen = set() # set of fnode
fctx = None
for head in reversed(heads): # oldest to newest
assert head in repo.changelog.nodemap, \
"tag cache returned bogus head %s" % short(head)
fnode = tagfnode.get(head)
if fnode and fnode not in seen:
seen.add(fnode)
if not fctx:
fctx = repo.filectx('.hgtags', fileid=fnode)
else:
fctx = fctx.filectx(fnode)
filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
_updatetags(filetags, 'global', alltags, tagtypes)
# and update the cache (if necessary)
if shouldwrite:
_writetagcache(ui, repo, heads, tagfnode, alltags)
def readlocaltags(ui, repo, alltags, tagtypes):
'''Read local tags in repo. Update alltags and tagtypes.'''
try:
data = repo.opener.read("localtags")
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
return
# localtags is in the local encoding; re-encode to UTF-8 on
# input for consistency with the rest of this module.
filetags = _readtags(
ui, repo, data.splitlines(), "localtags",
recode=encoding.fromlocal)
_updatetags(filetags, "local", alltags, tagtypes)
def _readtags(ui, repo, lines, fn, recode=None):
'''Read tag definitions from a file (or any source of lines).
Return a mapping from tag name to (node, hist): node is the node id
from the last line read for that name, and hist is the list of node
ids previously associated with it (in file order). All node ids are
binary, not hex.'''
filetags = {} # map tag name to (node, hist)
count = 0
def warn(msg):
ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
for line in lines:
count += 1
if not line:
continue
try:
(nodehex, name) = line.split(" ", 1)
except ValueError:
warn(_("cannot parse entry"))
continue
name = name.strip()
if recode:
name = recode(name)
try:
nodebin = bin(nodehex)
except TypeError:
warn(_("node '%s' is not well formed") % nodehex)
continue
# update filetags
hist = []
if name in filetags:
n, hist = filetags[name]
hist.append(n)
filetags[name] = (nodebin, hist)
return filetags
def _updatetags(filetags, tagtype, alltags, tagtypes):
'''Incorporate the tag info read from one file into the two
dictionaries, alltags and tagtypes, that contain all tag
info (global across all heads plus local).'''
for name, nodehist in filetags.iteritems():
if name not in alltags:
alltags[name] = nodehist
tagtypes[name] = tagtype
continue
# we prefer alltags[name] if:
# it supersedes us OR
# mutual supersedes and it has a higher rank
# otherwise we win because we're tip-most
anode, ahist = nodehist
bnode, bhist = alltags[name]
if (bnode != anode and anode in bhist and
(bnode not in ahist or len(bhist) > len(ahist))):
anode = bnode
else:
tagtypes[name] = tagtype
ahist.extend([n for n in bhist if n not in ahist])
alltags[name] = anode, ahist
# The tag cache only stores info about heads, not the tag contents
# from each head. I.e. it doesn't try to squeeze out the maximum
# performance, but is simpler has a better chance of actually
# working correctly. And this gives the biggest performance win: it
# avoids looking up .hgtags in the manifest for every head, and it
# can avoid calling heads() at all if there have been no changes to
# the repo.
def _readtagcache(ui, repo):
'''Read the tag cache and return a tuple (heads, fnodes, cachetags,
shouldwrite). If the cache is completely up-to-date, cachetags is a
dict of the form returned by _readtags(); otherwise, it is None and
heads and fnodes are set. In that case, heads is the list of all
heads currently in the repository (ordered from tip to oldest) and
fnodes is a mapping from head to .hgtags filenode. If those two are
set, caller is responsible for reading tag info from each head.'''
try:
cachefile = repo.opener('cache/tags', 'r')
# force reading the file for static-http
cachelines = iter(cachefile)
except IOError:
cachefile = None
# The cache file consists of lines like
# <headrev> <headnode> [<tagnode>]
# where <headrev> and <headnode> redundantly identify a repository
# head from the time the cache was written, and <tagnode> is the
# filenode of .hgtags on that head. Heads with no .hgtags file will
# have no <tagnode>. The cache is ordered from tip to oldest (which
# is part of why <headrev> is there: a quick visual check is all
# that's required to ensure correct order).
#
# This information is enough to let us avoid the most expensive part
# of finding global tags, which is looking up <tagnode> in the
# manifest for each head.
cacherevs = [] # list of headrev
cacheheads = [] # list of headnode
cachefnode = {} # map headnode to filenode
if cachefile:
try:
for line in cachelines:
if line == "\n":
break
line = line.split()
cacherevs.append(int(line[0]))
headnode = bin(line[1])
cacheheads.append(headnode)
if len(line) == 3:
fnode = bin(line[2])
cachefnode[headnode] = fnode
except Exception:
# corruption of the tags cache, just recompute it
ui.warn(_('.hg/cache/tags is corrupt, rebuilding it\n'))
cacheheads = []
cacherevs = []
cachefnode = {}
tipnode = repo.changelog.tip()
tiprev = len(repo.changelog) - 1
# Case 1 (common): tip is the same, so nothing has changed.
# (Unchanged tip trivially means no changesets have been added.
# But, thanks to localrepository.destroyed(), it also means none
# have been destroyed by strip or rollback.)
if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev:
tags = _readtags(ui, repo, cachelines, cachefile.name)
cachefile.close()
return (None, None, tags, False)
if cachefile:
cachefile.close() # ignore rest of file
repoheads = repo.heads()
# Case 2 (uncommon): empty repo; get out quickly and don't bother
# writing an empty cache.
if repoheads == [nullid]:
return ([], {}, {}, False)
# Case 3 (uncommon): cache file missing or empty.
# Case 4 (uncommon): tip rev decreased. This should only happen
# when we're called from localrepository.destroyed(). Refresh the
# cache so future invocations will not see disappeared heads in the
# cache.
# Case 5 (common): tip has changed, so we've added/replaced heads.
# As it happens, the code to handle cases 3, 4, 5 is the same.
# N.B. in case 4 (nodes destroyed), "new head" really means "newly
# exposed".
if not len(repo.file('.hgtags')):
# No tags have ever been committed, so we can avoid a
# potentially expensive search.
return (repoheads, cachefnode, None, True)
newheads = [head
for head in repoheads
if head not in set(cacheheads)]
# Now we have to lookup the .hgtags filenode for every new head.
# This is the most expensive part of finding tags, so performance
# depends primarily on the size of newheads. Worst case: no cache
# file, so newheads == repoheads.
for head in reversed(newheads):
cctx = repo[head]
try:
fnode = cctx.filenode('.hgtags')
cachefnode[head] = fnode
except error.LookupError:
# no .hgtags file on this head
pass
# Caller has to iterate over all heads, but can use the filenodes in
# cachefnode to get to each .hgtags revision quickly.
return (repoheads, cachefnode, None, True)
def _writetagcache(ui, repo, heads, tagfnode, cachetags):
try:
cachefile = repo.opener('cache/tags', 'w', atomictemp=True)
except (OSError, IOError):
return
realheads = repo.heads() # for sanity checks below
for head in heads:
# temporary sanity checks; these can probably be removed
# once this code has been in crew for a few weeks
assert head in repo.changelog.nodemap, \
'trying to write non-existent node %s to tag cache' % short(head)
assert head in realheads, \
'trying to write non-head %s to tag cache' % short(head)
assert head != nullid, \
'trying to write nullid to tag cache'
# This can't fail because of the first assert above. When/if we
# remove that assert, we might want to catch LookupError here
# and downgrade it to a warning.
rev = repo.changelog.rev(head)
fnode = tagfnode.get(head)
if fnode:
cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode)))
else:
cachefile.write('%d %s\n' % (rev, hex(head)))
# Tag names in the cache are in UTF-8 -- which is the whole reason
# we keep them in UTF-8 throughout this module. If we converted
# them local encoding on input, we would lose info writing them to
# the cache.
cachefile.write('\n')
for (name, (node, hist)) in cachetags.iteritems():
cachefile.write("%s %s\n" % (hex(node), name))
try:
cachefile.close()
except (OSError, IOError):
pass
| |
import sys
import numpy
from collections import OrderedDict
import scipy.constants as codata
from PyQt5.QtWidgets import QApplication
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui, congruence
from oasys.widgets.exchange import DataExchangeObject
from orangecontrib.xoppy.widgets.gui.ow_xoppy_widget import XoppyWidget
from orangecontrib.xoppy.util import srundplug
from syned.widget.widget_decorator import WidgetDecorator
import syned.beamline.beamline as synedb
import syned.storage_ring.magnetic_structures.insertion_device as synedid
class OWtc_slit(XoppyWidget):
name = "TC-SLIT"
id = "orange.widgets.data_tc_slit"
description = "Undulator Tuning Curves (Flux on a slit)"
icon = "icons/xoppy_xtc.png"
priority = 6
category = ""
keywords = ["xoppy", "tc_slit"]
USEEMITTANCES=Setting(1)
ELECTRONENERGY = Setting(6.037)
ELECTRONENERGYSPREAD = Setting(0.001)
ELECTRONCURRENT = Setting(0.2)
ELECTRONBEAMSIZEH = Setting(4.99e-05)
ELECTRONBEAMSIZEV = Setting(3.45e-06)
ELECTRONBEAMDIVERGENCEH = Setting(0.000107)
ELECTRONBEAMDIVERGENCEV = Setting(1.16e-06)
PERIODID = Setting(0.042)
NPERIODS = Setting(38)
DISTANCE = Setting(26.0)
GAPH = Setting(0.00258)
GAPV = Setting(0.00195)
KMIN = Setting(0.001)
KMAX = Setting(3.0)
KPOINTS = Setting(10)
HARMONICS = Setting("1") # sequence of harmonics, separated by ","
METHOD = Setting(2)
inputs = WidgetDecorator.syned_input_data()
def __init__(self):
super().__init__(show_script_tab=True)
def build_gui(self):
box = oasysgui.widgetBox(self.controlArea, self.name + " Input Parameters", orientation="vertical", width=self.CONTROL_AREA_WIDTH-5)
idx = -1
#
#
#
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "USEEMITTANCES",
label=self.unitLabels()[idx], addSpace=False,
items=['No', 'Yes'],
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 0
idx += 1
box1 = gui.widgetBox(box)
self.id_ELECTRONENERGY = oasysgui.lineEdit(box1, self, "ELECTRONENERGY",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 1
idx += 1
box1 = gui.widgetBox(box)
self.id_ELECTRONENERGYSPREAD = oasysgui.lineEdit(box1, self, "ELECTRONENERGYSPREAD",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 2
idx += 1
box1 = gui.widgetBox(box)
self.id_ELECTRONCURRENT = oasysgui.lineEdit(box1, self, "ELECTRONCURRENT",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 3
idx += 1
box1 = gui.widgetBox(box)
self.id_ELECTRONBEAMSIZEH = oasysgui.lineEdit(box1, self, "ELECTRONBEAMSIZEH",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 4
idx += 1
box1 = gui.widgetBox(box)
self.id_ELECTRONBEAMSIZEV = oasysgui.lineEdit(box1, self, "ELECTRONBEAMSIZEV",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 5
idx += 1
box1 = gui.widgetBox(box)
self.id_ELECTRONBEAMDIVERGENCEH = oasysgui.lineEdit(box1, self, "ELECTRONBEAMDIVERGENCEH",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 6
idx += 1
box1 = gui.widgetBox(box)
self.id_ELECTRONBEAMDIVERGENCEV = oasysgui.lineEdit(box1, self, "ELECTRONBEAMDIVERGENCEV",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 7
idx += 1
box1 = gui.widgetBox(box)
self.id_PERIODID = oasysgui.lineEdit(box1, self, "PERIODID",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 8
idx += 1
box1 = gui.widgetBox(box)
self.id_NPERIODS = oasysgui.lineEdit(box1, self, "NPERIODS",
label=self.unitLabels()[idx], addSpace=False,
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 9
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "DISTANCE",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 10
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "GAPH",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 11
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "GAPV",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 12
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "KMIN",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 13
idx += 1
box1 = gui.widgetBox(box)
self.id_KMAX = oasysgui.lineEdit(box1, self, "KMAX",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 14
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "KPOINTS",
label=self.unitLabels()[idx], addSpace=False,
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 15
idx += 1
box1 = gui.widgetBox(box)
# TODO: add validator
oasysgui.lineEdit(box1, self, "HARMONICS",
label=self.unitLabels()[idx], addSpace=False,
valueType=str, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 16
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "METHOD",
label=self.unitLabels()[idx], addSpace=False,
items=['US', 'URGENT', 'SRW'],
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
def unitLabels(self):
return ["Use emittances","Electron Energy [GeV]", "Electron Energy Spread", "Electron Current [A]",
"Electron Beam Size H [m]", "Electron Beam Size V [m]",
"Electron Beam Divergence H [rad]", "Electron Beam Divergence V [rad]",
"Period ID [m]", "Number of periods",
"Distance to slit [m]", "Slit gap H [m]", "Slit gap V [m]",
"K Min", "K Max", "Number of K Points", "harmonics (e.g: 1,3)", "calculation code"]
def unitFlags(self):
return ["True", "True", "self.USEEMITTANCES == 1 and self.METHOD != 1", "True",
"self.USEEMITTANCES == 1", "self.USEEMITTANCES == 1",
"self.USEEMITTANCES == 1", "self.USEEMITTANCES == 1",
"True", "True",
"True", "True", "True",
"True", "True", "True", "True", "True"]
def get_help_name(self):
return 'tc_slit'
def check_fields(self):
self.ELECTRONENERGY = congruence.checkStrictlyPositiveNumber(self.ELECTRONENERGY, "Electron Energy")
if not self.METHOD == 1: self.ELECTRONENERGYSPREAD = congruence.checkPositiveNumber(self.ELECTRONENERGYSPREAD, "Electron Energy Spread")
self.ELECTRONCURRENT = congruence.checkStrictlyPositiveNumber(self.ELECTRONCURRENT, "Electron Current")
self.ELECTRONBEAMSIZEH = congruence.checkPositiveNumber(self.ELECTRONBEAMSIZEH, "Electron Beam Size H")
self.ELECTRONBEAMSIZEV = congruence.checkPositiveNumber(self.ELECTRONBEAMSIZEV, "Electron Beam Size V")
self.ELECTRONBEAMDIVERGENCEH = congruence.checkPositiveNumber(self.ELECTRONBEAMDIVERGENCEH, "Electron Beam Divergence H")
self.ELECTRONBEAMDIVERGENCEV = congruence.checkPositiveNumber(self.ELECTRONBEAMDIVERGENCEV, "Electron Beam Divergence V")
self.PERIODID = congruence.checkStrictlyPositiveNumber(self.PERIODID, "Period ID")
self.NPERIODS = congruence.checkStrictlyPositiveNumber(self.NPERIODS, "Number of Periods")
self.DISTANCE = congruence.checkPositiveNumber(self.DISTANCE, "Distance to slit")
self.GAPH = congruence.checkPositiveNumber(self.GAPH, "Slit gap H")
self.GAPV = congruence.checkPositiveNumber(self.GAPV, "Slit gap V")
self.KMIN = congruence.checkPositiveNumber(self.KMIN, "K Min")
self.KMAX = congruence.checkStrictlyPositiveNumber(self.KMAX, "K Max")
congruence.checkLessThan(self.KMIN, self.KMAX, "K Min", "K Max")
self.KPOINTS = congruence.checkStrictlyPositiveNumber(self.KPOINTS, "Number of K Points")
# self.HARMONICS = congruence.checkStrictlyPositiveNumber(self.HARMONIX, "Higher harmonic")
def do_xoppy_calculation(self):
return self.xoppy_calc_tc_slit()
def extract_data_from_xoppy_output(self, calculation_output):
K_scan,harmonics,P_scan,energy_values_at_flux_peak,flux_values = calculation_output
harmonics_data = []
for ih in range(len(harmonics)):
harmonic_number = int(harmonics[ih])
harmonics_data.append([harmonic_number,None])
data = numpy.zeros((K_scan.size, 5))
data[:, 0] = numpy.array(energy_values_at_flux_peak[:,ih])
data[:, 1] = numpy.array(flux_values[:,ih])
data[:, 2] = numpy.array(flux_values[:,ih])*codata.e*1e3
data[:, 3] = numpy.array(K_scan)
data[:, 4] = numpy.array(P_scan)
harmonics_data[ih][1] = data
#send exchange
calculated_data = DataExchangeObject("XOPPY", self.get_data_exchange_widget_name())
try:
calculated_data.add_content("xoppy_data_harmonics", harmonics_data)
calculated_data.add_content("plot_x_col", 1)
calculated_data.add_content("plot_y_col", 2)
except:
pass
try:
calculated_data.add_content("labels",["Photon energy [eV]","Flux [photons/s/0.1%bw]","Ky","Power [W]"])
except:
pass
return calculated_data
def plot_histo(self, x, y, progressBarValue, tabs_canvas_index, plot_canvas_index, title="", xtitle="", ytitle="",
log_x=False, log_y=False, harmonic=1, color='blue',control=True):
h_title = "Harmonic " + str(harmonic)
hex_r = hex(min(255, 128 + harmonic*10))[2:].upper()
hex_g = hex(min(255, 20 + harmonic*15))[2:].upper()
hex_b = hex(min(255, harmonic*10))[2:].upper()
if len(hex_r) == 1: hex_r = "0" + hex_r
if len(hex_g) == 1: hex_g = "0" + hex_g
if len(hex_b) == 1: hex_b = "0" + hex_b
super().plot_histo(x, y, progressBarValue, tabs_canvas_index, plot_canvas_index, h_title, xtitle, ytitle,
log_x, log_y, color="#" + hex_r + hex_g + hex_b, replace=False, control=control)
self.plot_canvas[plot_canvas_index].setGraphTitle(title)
self.plot_canvas[plot_canvas_index].setDefaultPlotLines(True)
self.plot_canvas[plot_canvas_index].setDefaultPlotPoints(True)
def plot_results(self, calculated_data, progressBarValue=80):
if not self.view_type == 0:
if not calculated_data is None:
self.view_type_combo.setEnabled(False)
xoppy_data_harmonics = calculated_data.get_content("xoppy_data_harmonics")
titles = self.getTitles()
xtitles = self.getXTitles()
ytitles = self.getYTitles()
progress_bar_step = (100-progressBarValue)/len(titles)
for index in range(0, len(titles)):
x_index, y_index = self.getVariablesToPlot()[index]
log_x, log_y = self.getLogPlot()[index]
if not self.plot_canvas[index] is None:
self.plot_canvas[index].clear()
try:
for h_index in range(0, len(xoppy_data_harmonics)):
self.plot_histo(xoppy_data_harmonics[h_index][1][:, x_index],
xoppy_data_harmonics[h_index][1][:, y_index],
progressBarValue + ((index+1)*progress_bar_step),
tabs_canvas_index=index,
plot_canvas_index=index,
title=titles[index],
xtitle=xtitles[index],
ytitle=ytitles[index],
log_x=log_x,
log_y=log_y,
harmonic=xoppy_data_harmonics[h_index][0],
control=True)
self.plot_canvas[index].addCurve(numpy.zeros(1),
numpy.array([max(xoppy_data_harmonics[h_index][1][:, y_index])]),
"Click on curve to highlight it",
xlabel=xtitles[index], ylabel=ytitles[index],
symbol='', color='white')
except Exception as e:
self.view_type_combo.setEnabled(True)
raise Exception("Data not plottable: bad content\n" + str(e))
self.view_type_combo.setEnabled(True)
self.tabs.setCurrentIndex(0)
else:
raise Exception("Empty Data")
def plot_histo(self, x, y, progressBarValue, tabs_canvas_index, plot_canvas_index, title="", xtitle="", ytitle="",
log_x=False, log_y=False, harmonic=1, color='blue', control=True):
h_title = "Harmonic " + str(harmonic)
hex_r = hex(min(255, 128 + harmonic*10))[2:].upper()
hex_g = hex(min(255, 20 + harmonic*15))[2:].upper()
hex_b = hex(min(255, harmonic*10))[2:].upper()
if len(hex_r) == 1: hex_r = "0" + hex_r
if len(hex_g) == 1: hex_g = "0" + hex_g
if len(hex_b) == 1: hex_b = "0" + hex_b
super().plot_histo(x, y, progressBarValue, tabs_canvas_index, plot_canvas_index, h_title, xtitle, ytitle,
log_x, log_y, color="#" + hex_r + hex_g + hex_b, replace=False, control=control)
self.plot_canvas[plot_canvas_index].setGraphTitle(title)
self.plot_canvas[plot_canvas_index].setDefaultPlotLines(True)
self.plot_canvas[plot_canvas_index].setDefaultPlotPoints(True)
def get_data_exchange_widget_name(self):
return "TC_SLIT"
def getTitles(self):
return ["Flux on slit","Spectral power on slit","Kv","Total power on slit"]
def getXTitles(self):
return ["Energy (eV)","Energy (eV)","Energy (eV)","Kv"]
def getYTitles(self):
return ["Flux (photons/s/0.1%bw)","Spectral power (W/eV)","Kv","Total power (W)"]
def getVariablesToPlot(self):
return [(0, 1), (0, 2), (0, 3), (3, 4)]
def getLogPlot(self):
return[(False, False), (False, False), (False, False), (False, False)]
def xoppy_calc_tc_slit(self):
bl = OrderedDict()
bl['ElectronBeamDivergenceH'] = self.ELECTRONBEAMDIVERGENCEH
bl['ElectronBeamDivergenceV'] = self.ELECTRONBEAMDIVERGENCEV
bl['ElectronBeamSizeH'] = self.ELECTRONBEAMSIZEH
bl['ElectronBeamSizeV'] = self.ELECTRONBEAMSIZEV
bl['ElectronCurrent'] = self.ELECTRONCURRENT
bl['ElectronEnergy'] = self.ELECTRONENERGY
bl['ElectronEnergySpread'] = self.ELECTRONENERGYSPREAD
bl['NPeriods'] = self.NPERIODS
bl['PeriodID'] = self.PERIODID
bl['distance'] = self.DISTANCE
bl['gapH'] = self.GAPH
bl['gapV'] = self.GAPV
bl['gapHcenter'] = 0.0
bl['gapVcenter'] = 0.0
if self.USEEMITTANCES:
zero_emittance = False
else:
zero_emittance = True
if self.METHOD == 0:
code = "us"
if self.METHOD == 1:
code = "urgent"
if self.METHOD == 2:
code = "srw"
harmonics = str(self.HARMONICS).split(",") #[]
# for i in range(self.HARMONICS+1):
# if i % 2 != 0: harmonics.append(i)
K_scan,harmonics,power_array, energy_values_at_flux_peak,flux_values = srundplug.tuning_curves_on_slit(bl,
Kmin=self.KMIN,
Kmax=self.KMAX,
Kpoints=self.KPOINTS,
harmonics=harmonics,
zero_emittance=zero_emittance,
do_plot_peaks=False,
code=code)
if zero_emittance:
print("\nNo emittance calculation")
print("Done")
# write python script in standard output
dict_parameters = {
'ElectronBeamDivergenceH' : self.ELECTRONBEAMDIVERGENCEH,
'ElectronBeamDivergenceV' : self.ELECTRONBEAMDIVERGENCEV,
'ElectronBeamSizeH' : self.ELECTRONBEAMSIZEH,
'ElectronBeamSizeV' : self.ELECTRONBEAMSIZEV,
'ElectronCurrent' : self.ELECTRONCURRENT,
'ElectronEnergy' : self.ELECTRONENERGY,
'ElectronEnergySpread' : self.ELECTRONENERGYSPREAD,
'NPeriods' : self.NPERIODS,
'PeriodID' : self.PERIODID,
'distance' : self.DISTANCE,
'gapH' : self.GAPH,
'gapV' : self.GAPV,
'gapHcenter' : 0.0,
'gapVcenter' : 0.0,
'HARMONICS' : self.HARMONICS,
'Kmin' : self.KMIN,
'Kmax' : self.KMAX,
'Kpoints' : self.KPOINTS,
'harmonics' : harmonics,
'zero_emittance' : zero_emittance,
'do_plot_peaks' : False,
'code' : code,
}
self.xoppy_script.set_code(self.script_template().format_map(dict_parameters))
return K_scan,harmonics,power_array,energy_values_at_flux_peak,flux_values
def script_template(self):
return """
#
# script to make the calculations (created by XOPPY:tc_slit)
#
from collections import OrderedDict
from orangecontrib.xoppy.util import srundplug
bl = OrderedDict()
bl['ElectronBeamDivergenceH'] = {ElectronBeamDivergenceH}
bl['ElectronBeamDivergenceV'] = {ElectronBeamDivergenceV}
bl['ElectronBeamSizeH'] = {ElectronBeamSizeH}
bl['ElectronBeamSizeV'] = {ElectronBeamSizeV}
bl['ElectronCurrent'] = {ElectronCurrent}
bl['ElectronEnergy'] = {ElectronEnergy}
bl['ElectronEnergySpread'] = {ElectronEnergySpread}
bl['NPeriods'] = {NPeriods}
bl['PeriodID'] = {PeriodID}
bl['distance'] = {distance}
bl['gapH'] = {gapH}
bl['gapV'] = {gapV}
bl['gapHcenter'] = 0.0
bl['gapVcenter'] = 0.0
harmonics = "{HARMONICS}".split(",")
K_scan,harmonics,power_array, energy_values_at_flux_peak,flux_values = srundplug.tuning_curves_on_slit(bl,
Kmin={Kmin},
Kmax={Kmax},
Kpoints={Kpoints},
harmonics=harmonics,
zero_emittance={zero_emittance},
do_plot_peaks={do_plot_peaks},
code="{code}")
from srxraylib.plot.gol import plot
plot(energy_values_at_flux_peak,flux_values,xtitle="Photon energy [eV]",ytitle="Flux on slit [photons/s/0.1%bw]")
#
# end script
#
"""
def receive_syned_data(self, data):
if isinstance(data, synedb.Beamline):
if not data._light_source is None and isinstance(data._light_source._magnetic_structure, synedid.InsertionDevice):
light_source = data._light_source
self.ELECTRONENERGY = light_source._electron_beam._energy_in_GeV
self.ELECTRONENERGYSPREAD = light_source._electron_beam._energy_spread
self.ELECTRONCURRENT = light_source._electron_beam._current
x, xp, y, yp = light_source._electron_beam.get_sigmas_all()
self.ELECTRONBEAMSIZEH = x
self.ELECTRONBEAMSIZEV = y
self.ELECTRONBEAMDIVERGENCEH = xp
self.ELECTRONBEAMDIVERGENCEV = yp
self.PERIODID = light_source._magnetic_structure._period_length
self.NPERIODS = light_source._magnetic_structure._number_of_periods
self.KMAX = light_source._magnetic_structure._K_vertical
self.set_enabled(False)
else:
self.set_enabled(True)
# raise ValueError("Syned data not correct")
else:
self.set_enabled(True)
# raise ValueError("Syned data not correct")
def set_enabled(self,value):
if value == True:
self.id_ELECTRONENERGY.setEnabled(True)
self.id_ELECTRONENERGYSPREAD.setEnabled(True)
self.id_ELECTRONBEAMSIZEH.setEnabled(True)
self.id_ELECTRONBEAMSIZEV.setEnabled(True)
self.id_ELECTRONBEAMDIVERGENCEH.setEnabled(True)
self.id_ELECTRONBEAMDIVERGENCEV.setEnabled(True)
self.id_ELECTRONCURRENT.setEnabled(True)
self.id_PERIODID.setEnabled(True)
self.id_NPERIODS.setEnabled(True)
self.id_KMAX.setEnabled(True)
else:
self.id_ELECTRONENERGY.setEnabled(False)
self.id_ELECTRONENERGYSPREAD.setEnabled(False)
self.id_ELECTRONBEAMSIZEH.setEnabled(False)
self.id_ELECTRONBEAMSIZEV.setEnabled(False)
self.id_ELECTRONBEAMDIVERGENCEH.setEnabled(False)
self.id_ELECTRONBEAMDIVERGENCEV.setEnabled(False)
self.id_ELECTRONCURRENT.setEnabled(False)
self.id_PERIODID.setEnabled(False)
self.id_NPERIODS.setEnabled(False)
self.id_KMAX.setEnabled(False)
if __name__ == "__main__":
app = QApplication(sys.argv)
w = OWtc_slit()
w.show()
app.exec()
w.saveSettings()
| |
import collections
import json as jsonlib
import os
import random
import re
from operator import attrgetter
from urlparse import urljoin
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.forms import CheckboxInput
from django.utils import translation
from django.utils.encoding import smart_unicode
from django.template import defaultfilters
import caching.base as caching
import jinja2
import six
from babel.support import Format
from jingo import register, env
# Needed to make sure our own |f filter overrides jingo's one.
from jingo import helpers # noqa
from jingo_minify.helpers import (_build_html, _get_compiled_css_url, get_path,
is_external)
from tower import ugettext as _, strip_whitespace
import amo
from amo import utils, urlresolvers
from constants.licenses import PERSONA_LICENSES_IDS
from translations.query import order_by_translation
from translations.helpers import truncate
# Yanking filters from Django.
register.filter(defaultfilters.slugify)
# Registering some utils as filters:
urlparams = register.filter(utils.urlparams)
register.filter(utils.epoch)
register.filter(utils.isotime)
register.function(dict)
register.function(utils.randslice)
@register.filter
def link(item):
html = """<a href="%s">%s</a>""" % (item.get_url_path(),
jinja2.escape(item.name))
return jinja2.Markup(html)
@register.filter
def xssafe(value):
"""
Like |safe but for strings with interpolation.
By using |xssafe you assert that you have written tests proving an
XSS can't happen here.
"""
return jinja2.Markup(value)
@register.filter
def babel_datetime(dt, format='medium'):
return _get_format().datetime(dt, format=format) if dt else ''
@register.filter
def babel_date(date, format='medium'):
return _get_format().date(date, format=format) if date else ''
@register.function
def locale_url(url):
"""Take a URL and give it the locale prefix."""
prefixer = urlresolvers.get_url_prefix()
script = prefixer.request.META['SCRIPT_NAME']
parts = [script, prefixer.locale, url.lstrip('/')]
return '/'.join(parts)
@register.inclusion_tag('includes/refinements.html')
@jinja2.contextfunction
def refinements(context, items, title, thing):
d = dict(context.items())
d.update(items=items, title=title, thing=thing)
return d
@register.function
def url(viewname, *args, **kwargs):
"""Helper for Django's ``reverse`` in templates."""
add_prefix = kwargs.pop('add_prefix', True)
host = kwargs.pop('host', '')
src = kwargs.pop('src', '')
url = '%s%s' % (host, urlresolvers.reverse(viewname,
args=args,
kwargs=kwargs,
add_prefix=add_prefix))
if src:
url = urlparams(url, src=src)
return url
@register.function
def services_url(viewname, *args, **kwargs):
"""Helper for ``url`` with host=SERVICES_URL."""
kwargs.update({'host': settings.SERVICES_URL})
return url(viewname, *args, **kwargs)
@register.filter
def paginator(pager):
return Paginator(pager).render()
@register.filter
def impala_paginator(pager):
t = env.get_template('amo/impala/paginator.html')
return jinja2.Markup(t.render({'pager': pager}))
@register.filter
def mobile_paginator(pager):
t = env.get_template('amo/mobile/paginator.html')
return jinja2.Markup(t.render({'pager': pager}))
@register.filter
def mobile_impala_paginator(pager):
# Impala-style paginator that is easier to mobilefy.
t = env.get_template('amo/mobile/impala_paginator.html')
return jinja2.Markup(t.render({'pager': pager}))
@register.function
def is_mobile(app):
return app == amo.MOBILE
@register.function
def sidebar(app):
"""Populates the sidebar with (categories, types)."""
from addons.models import Category
if app is None:
return [], []
# We muck with query to make order_by and extra_order_by play nice.
q = Category.objects.filter(application=app.id, weight__gte=0,
type=amo.ADDON_EXTENSION)
categories = order_by_translation(q, 'name')
categories.query.extra_order_by.insert(0, 'weight')
Type = collections.namedtuple('Type', 'id name url')
base = urlresolvers.reverse('home')
types = [Type(99, _('Collections'), base + 'collections/')]
shown_types = {
amo.ADDON_PERSONA: urlresolvers.reverse('browse.personas'),
amo.ADDON_DICT: urlresolvers.reverse('browse.language-tools'),
amo.ADDON_SEARCH: urlresolvers.reverse('browse.search-tools'),
amo.ADDON_THEME: urlresolvers.reverse('browse.themes'),
}
titles = dict(amo.ADDON_TYPES,
**{amo.ADDON_DICT: _('Dictionaries & Language Packs')})
for type_, url in shown_types.items():
if type_ in app.types:
types.append(Type(type_, titles[type_], url))
return categories, sorted(types, key=lambda x: x.name)
class Paginator(object):
def __init__(self, pager):
self.pager = pager
self.max = 10
self.span = (self.max - 1) / 2
self.page = pager.number
self.num_pages = pager.paginator.num_pages
self.count = pager.paginator.count
pager.page_range = self.range()
pager.dotted_upper = self.num_pages not in pager.page_range
pager.dotted_lower = 1 not in pager.page_range
def range(self):
"""Return a list of page numbers to show in the paginator."""
page, total, span = self.page, self.num_pages, self.span
if total < self.max:
lower, upper = 0, total
elif page < span + 1:
lower, upper = 0, span * 2
elif page > total - span:
lower, upper = total - span * 2, total
else:
lower, upper = page - span, page + span - 1
return range(max(lower + 1, 1), min(total, upper) + 1)
def render(self):
c = {'pager': self.pager, 'num_pages': self.num_pages,
'count': self.count}
t = env.get_template('amo/paginator.html').render(c)
return jinja2.Markup(t)
def _get_format():
lang = translation.get_language()
return Format(utils.get_locale_from_lang(lang))
@register.filter
def numberfmt(num, format=None):
return _get_format().decimal(num, format)
@register.filter
def currencyfmt(num, currency):
if num is None:
return ''
return _get_format().currency(num, currency)
def page_name(app=None):
"""Determine the correct page name for the given app (or no app)."""
if app:
return _(u'Add-ons for {0}').format(app.pretty)
else:
return _('Add-ons')
@register.function
@jinja2.contextfunction
def login_link(context):
next = context['request'].path
qs = context['request'].GET.urlencode()
if qs:
next += '?' + qs
l = urlparams(urlresolvers.reverse('users.login'), to=next)
return l
@register.function
@jinja2.contextfunction
def page_title(context, title):
title = smart_unicode(title)
base_title = page_name(context['request'].APP)
return u'%s :: %s' % (title, base_title)
@register.function
@jinja2.contextfunction
def breadcrumbs(context, items=list(), add_default=True, crumb_size=40):
"""
show a list of breadcrumbs. If url is None, it won't be a link.
Accepts: [(url, label)]
"""
if add_default:
app = context['request'].APP
crumbs = [(urlresolvers.reverse('home'), page_name(app))]
else:
crumbs = []
# add user-defined breadcrumbs
if items:
try:
crumbs += items
except TypeError:
crumbs.append(items)
crumbs = [(url, truncate(label, crumb_size)) for (url, label) in crumbs]
c = {'breadcrumbs': crumbs}
t = env.get_template('amo/breadcrumbs.html').render(c)
return jinja2.Markup(t)
@register.function
@jinja2.contextfunction
def impala_breadcrumbs(context, items=list(), add_default=True, crumb_size=40):
"""
show a list of breadcrumbs. If url is None, it won't be a link.
Accepts: [(url, label)]
"""
if add_default:
base_title = page_name(context['request'].APP)
crumbs = [(urlresolvers.reverse('home'), base_title)]
else:
crumbs = []
# add user-defined breadcrumbs
if items:
try:
crumbs += items
except TypeError:
crumbs.append(items)
crumbs = [(url, truncate(label, crumb_size)) for (url, label) in crumbs]
c = {'breadcrumbs': crumbs, 'has_home': add_default}
t = env.get_template('amo/impala/breadcrumbs.html').render(c)
return jinja2.Markup(t)
@register.filter
def json(s):
return jsonlib.dumps(s)
@register.filter
def absolutify(url, site=None):
"""Takes a URL and prepends the SITE_URL"""
if url.startswith('http'):
return url
else:
return urljoin(site or settings.SITE_URL, url)
@register.filter
def strip_controls(s):
"""
Strips control characters from a string.
"""
# Translation table of control characters.
control_trans = dict((n, None) for n in xrange(32) if n not in [10, 13])
rv = unicode(s).translate(control_trans)
return jinja2.Markup(rv) if isinstance(s, jinja2.Markup) else rv
@register.filter
def strip_html(s, just_kidding=False):
"""Strips HTML. Confirm lets us opt out easily."""
if just_kidding:
return s
if not s:
return ''
else:
s = re.sub(r'<.*?>', '', smart_unicode(s, errors='ignore'))
return re.sub(r'<.*?>', '', s)
@register.filter
def external_url(url):
"""Bounce a URL off outgoing.mozilla.org."""
return urlresolvers.get_outgoing_url(unicode(url))
@register.filter
def shuffle(sequence):
"""Shuffle a sequence."""
random.shuffle(sequence)
return sequence
@register.function
def license_link(license):
"""Link to a code license, including icon where applicable."""
# If passed in an integer, try to look up the License.
from versions.models import License
if isinstance(license, (long, int)):
if license in PERSONA_LICENSES_IDS:
# Grab built-in license.
license = PERSONA_LICENSES_IDS[license]
else:
# Grab custom license.
license = License.objects.filter(id=license)
if not license.exists():
return ''
license = license[0]
elif not license:
return ''
if not getattr(license, 'builtin', True):
return _('Custom License')
t = env.get_template('amo/license_link.html').render({'license': license})
return jinja2.Markup(t)
@register.function
def field(field, label=None, **attrs):
if label is not None:
field.label = label
# HTML from Django is already escaped.
return jinja2.Markup(u'%s<p>%s%s</p>' %
(field.errors, field.label_tag(),
field.as_widget(attrs=attrs)))
@register.inclusion_tag('amo/category-arrow.html')
@jinja2.contextfunction
def category_arrow(context, key, prefix):
d = dict(context.items())
d.update(key=key, prefix=prefix)
return d
@register.filter
def timesince(time):
if not time:
return u''
ago = defaultfilters.timesince(time)
# L10n: relative time in the past, like '4 days ago'
return _(u'{0} ago').format(ago)
@register.inclusion_tag('amo/recaptcha.html')
@jinja2.contextfunction
def recaptcha(context, form):
d = dict(context.items())
d.update(form=form)
return d
@register.filter
def is_choice_field(value):
try:
return isinstance(value.field.widget, CheckboxInput)
except AttributeError:
pass
@register.inclusion_tag('amo/mobile/sort_by.html')
def mobile_sort_by(base_url, options=None, selected=None, extra_sort_opts=None,
search_filter=None):
if search_filter:
selected = search_filter.field
options = search_filter.opts
if hasattr(search_filter, 'extras'):
options += search_filter.extras
if extra_sort_opts:
options_dict = dict(options + extra_sort_opts)
else:
options_dict = dict(options)
if selected in options_dict:
current = options_dict[selected]
else:
selected, current = options[0] # Default to the first option.
return locals()
@register.function
@jinja2.contextfunction
def cache_buster(context, url):
if 'BUILD_ID' in context:
build = context['BUILD_ID']
else:
if url.endswith('.js'):
build = context['BUILD_ID_JS']
elif url.endswith('.css'):
build = context['BUILD_ID_CSS']
else:
build = context['BUILD_ID_IMG']
return utils.urlparams(url, b=build)
@register.function
@jinja2.contextfunction
def media(context, url):
"""Get a MEDIA_URL link with a cache buster querystring."""
return urljoin(settings.MEDIA_URL, cache_buster(context, url))
@register.function
@jinja2.contextfunction
def static(context, url):
"""Get a STATIC_URL link with a cache buster querystring."""
return urljoin(settings.STATIC_URL, cache_buster(context, url))
@register.function
@jinja2.evalcontextfunction
def attrs(ctx, *args, **kw):
return jinja2.filters.do_xmlattr(ctx, dict(*args, **kw))
@register.function
@jinja2.contextfunction
def side_nav(context, addon_type, category=None):
app = context['request'].APP.id
cat = str(category.id) if category else 'all'
return caching.cached(lambda: _side_nav(context, addon_type, category),
'side-nav-%s-%s-%s' % (app, addon_type, cat))
def _side_nav(context, addon_type, cat):
# Prevent helpers generating circular imports.
from addons.models import Category, Addon
request = context['request']
qs = Category.objects.filter(weight__gte=0)
if addon_type != amo.ADDON_PERSONA:
qs = qs.filter(application=request.APP.id)
sort_key = attrgetter('weight', 'name')
categories = sorted(qs.filter(type=addon_type), key=sort_key)
if cat:
base_url = cat.get_url_path()
else:
base_url = Addon.get_type_url(addon_type)
ctx = dict(request=request, base_url=base_url, categories=categories,
addon_type=addon_type, amo=amo)
return jinja2.Markup(env.get_template('amo/side_nav.html').render(ctx))
@register.function
@jinja2.contextfunction
def site_nav(context):
app = context['request'].APP.id
return caching.cached(lambda: _site_nav(context), 'site-nav-%s' % app)
def _site_nav(context):
# Prevent helpers from generating circular imports.
from addons.models import Category
request = context['request']
def sorted_cats(qs):
return sorted(qs, key=attrgetter('weight', 'name'))
extensions = Category.objects.filter(
application=request.APP.id, weight__gte=0, type=amo.ADDON_EXTENSION)
personas = Category.objects.filter(weight__gte=0, type=amo.ADDON_PERSONA)
ctx = dict(request=request, amo=amo,
extensions=sorted_cats(extensions),
personas=sorted_cats(personas))
return jinja2.Markup(env.get_template('amo/site_nav.html').render(ctx))
@register.function
def loc(s):
"""A noop function for strings that are not ready to be localized."""
return strip_whitespace(s)
@register.function
def site_event_type(type):
return amo.SITE_EVENT_CHOICES[type]
@register.function
@jinja2.contextfunction
def remora_url(context, url, lang=None, app=None, prefix=''):
"""Wrapper for urlresolvers.remora_url"""
if lang is None:
_lang = context['LANG']
if _lang:
lang = translation.to_locale(_lang).replace('_', '-')
if app is None:
try:
app = context['APP'].short
except (AttributeError, KeyError):
pass
return urlresolvers.remora_url(url=url, lang=lang, app=app, prefix=prefix)
@register.function
@jinja2.contextfunction
def hasOneToOne(context, obj, attr):
try:
getattr(obj, attr)
return True
except ObjectDoesNotExist:
return False
@register.function
def no_results_amo():
# This prints a "No results found" message. That's all. Carry on.
t = env.get_template('amo/no_results.html').render()
return jinja2.Markup(t)
@register.filter
def f(string, *args, **kwargs):
"""This overrides jingo.helpers.f to convert input to unicode if needed.
This is needed because of
https://github.com/jbalogh/jingo/pull/54#issuecomment-36728948
"""
if not isinstance(string, six.text_type):
string = six.text_type(string)
return string.format(*args, **kwargs)
def _relative_to_absolute(url):
"""
Prepends relative URLs with STATIC_URL to turn those inline-able.
This method is intended to be used as a ``replace`` parameter of
``re.sub``.
"""
url = url.group(1).strip('"\'')
if not url.startswith(('data:', 'http:', 'https:', '//')):
url = url.replace('../../', settings.STATIC_URL)
return 'url(%s)' % url
@register.function
def inline_css(bundle, media=False, debug=None):
"""
If we are in debug mode, just output a single style tag for each css file.
If we are not in debug mode, return a style that contains bundle-min.css.
Forces a regular css() call for external URLs (no inline allowed).
Extracted from jingo-minify and re-registered, see:
https://github.com/jsocol/jingo-minify/pull/41
Added: turns relative links to absolute ones using STATIC_URL.
"""
if debug is None:
debug = getattr(settings, 'TEMPLATE_DEBUG', False)
if debug:
items = [_get_compiled_css_url(i)
for i in settings.MINIFY_BUNDLES['css'][bundle]]
else:
items = ['css/%s-min.css' % bundle]
if not media:
media = getattr(settings, 'CSS_MEDIA_DEFAULT', 'screen,projection,tv')
contents = []
for css in items:
if is_external(css):
return _build_html([css], '<link rel="stylesheet" media="%s" '
'href="%%s" />' % media)
with open(get_path(css), 'r') as f:
css_content = f.read()
css_parsed = re.sub(r'url\(([^)]*?)\)',
_relative_to_absolute,
css_content)
contents.append(css_parsed)
return _build_html(contents, '<style type="text/css" media="%s">%%s'
'</style>' % media)
# A (temporary?) copy of this is in services/utils.py. See bug 1055654.
def user_media_path(what):
"""Make it possible to override storage paths in settings.
By default, all storage paths are in the MEDIA_ROOT.
This is backwards compatible.
"""
default = os.path.join(settings.MEDIA_ROOT, what)
key = "{0}_PATH".format(what.upper())
return getattr(settings, key, default)
# A (temporary?) copy of this is in services/utils.py. See bug 1055654.
def user_media_url(what):
"""
Generate default media url, and make possible to override it from
settings.
"""
default = '%s%s/' % (settings.MEDIA_URL, what)
key = "{0}_URL".format(what.upper().replace('-', '_'))
return getattr(settings, key, default)
def id_to_path(pk):
"""
Generate a path from an id, to distribute folders in the file system.
1 => 1/1/1
12 => 2/12/12
123456 => 6/56/123456
"""
pk = str(pk)
path = [pk[-1]]
if len(pk) >= 2:
path.append(pk[-2:])
else:
path.append(pk)
path.append(pk)
return os.path.join(*path)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import a trackable object from a SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from tensorflow.core.protobuf import graph_debug_info_pb2
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import values_util
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import function_deserialization
from tensorflow.python.saved_model import load_options
from tensorflow.python.saved_model import load_v1_in_v2
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.saved_model import revived_types
from tensorflow.python.saved_model import utils_impl as saved_model_utils
from tensorflow.python.training.saving import checkpoint_options
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
def _unused_handle():
"""Returns a placeholder as a handle that is not supposed to be accessed."""
error_message = ("Trying to access a placeholder that is not supposed to be "
"executed. This means you are executing a graph generated "
"from the cross-replica context in an in-replica context.")
assert_op = control_flow_ops.Assert(
array_ops.placeholder_with_default(False, shape=()),
[error_message])
with ops.control_dependencies([assert_op]):
return array_ops.placeholder(dtype=dtypes.resource)
class _WrapperFunction(function.ConcreteFunction):
"""A class wraps a concrete function to handle different distributed contexts.
The reason for wrapping a concrete function is because the _captured_inputs
fields used for in-replica context and cross-replica context are different.
When `load()` is called from within a tf.distribute.strategy scope, the
captured inputs are distributed variables. When using these distributed
variables during calling the function, we need different approaches when it is
in-replica and when it is not in-replica. When it is in replica, naturally we
should use the corresponding component of the distributed variable; when it is
not in-replica, calling the function should mean that it is constructing a
graph that is not actually going to be used. A typical use case is when
constructing a functional model. In this case, return a placeholder with a
control dependency to ensure that is never accessed.
"""
def __init__(self, concrete_function):
# Shallow copy the concrete_function
self.__dict__.update(vars(concrete_function))
def _call_flat(self, args, captured_inputs, cancellation_manager=None):
def get_handle(x):
return x.handle if distribute_utils.is_distributed_variable(x) else x
def get_unused_handle(x):
return _unused_handle() if distribute_utils.is_distributed_variable(x) \
else x
if (ds_context.get_replica_context() is not None or
values_util.is_saving_non_distributed()):
# If we're in the replica context or are saving a non-distributed version
# of the model, we resolve the captured variables to the corresponding
# resource handle. In both situation we call var.handle, but it has
# different behavior. In the replica context, var.handle resolves the
# replica local variable handle if the variable is replicated. When saving
# a non-distributed version of the model, var.handle resolves to the
# primary variable handle, since we only save one copy of a replicated
# variable.
captured_inputs = list(map(get_handle, captured_inputs))
else: # cross-replica context
captured_inputs = list(map(get_unused_handle, captured_inputs))
return super(_WrapperFunction, self)._call_flat(args, captured_inputs,
cancellation_manager)
class Loader(object):
"""Helper class to load an object-based SavedModel."""
def __init__(self, object_graph_proto, saved_model_proto, export_dir,
ckpt_options, filters):
meta_graph = saved_model_proto.meta_graphs[0]
self._asset_file_def = meta_graph.asset_file_def
self._operation_attributes = {
node.name: node.attr for node in meta_graph.graph_def.node}
self._proto = object_graph_proto
self._export_dir = export_dir
self._concrete_functions = (
function_deserialization.load_function_def_library(
meta_graph.graph_def.library))
self._checkpoint_options = ckpt_options
# Stores user-defined node_filters argument.
self._node_filters = filters
# Stores map of string paths to integers.
self._node_path_to_id = self._convert_node_paths_to_ints()
self._loaded_nodes = {}
if isinstance(filters, dict):
# If node_filters is a dict, then the values may contain already created
# trackable objects. In this case, create a dictionary mapping node IDs to
# the already created nodes. This dict will be updated in
# `_retrieve_all_filtered_nodes` with tracked dependencies.
for node_path, node in filters.items():
if isinstance(node, tuple):
self._loaded_nodes[self._node_path_to_id[node_path]] = node
else:
self._loaded_nodes[self._node_path_to_id[node_path]] = (node, setattr)
# Get a list of all integer node ids to load, or None if all nodes should be
# loaded. This list includes ids of child nodes.
self._filtered_nodes = self._retrieve_all_filtered_nodes()
for name, concrete_function in self._concrete_functions.items():
# Wrap all the concrete function so that they are capable of dealing with
# both in replica and cross replica cases.
self._concrete_functions[name] = _WrapperFunction(concrete_function)
self._load_all()
self._restore_checkpoint()
for node in self._nodes:
if isinstance(node, tracking.CapturableResource):
init_op = node._initialize() # pylint: disable=protected-access
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
def _convert_node_paths_to_ints(self):
"""Maps all string node paths in node_filters to the int node ids."""
if self._node_filters is None:
return None
path_to_int = {}
for node_id in self._node_filters:
int_node_id = None
if isinstance(node_id, str):
node_path = node_id.split(".")
if node_path[0] != "root":
raise ValueError(
"When passing string identifiers to node_filters, the first name"
" must be root.")
int_node_id = 0
for n, name in enumerate(node_path[1:]):
int_node_id = self._find_node_child(
int_node_id, name, ".".join(node_path[:n+2]))
path_to_int[node_id] = int_node_id
else:
raise TypeError("Elements in node_filters must be strings.")
return path_to_int
def _retrieve_all_filtered_nodes(self):
"""Traverses through the object graph to get the IDs of all nodes to load.
As a side-effect, if node_filters is a dictionary that contains already-
created objects, then the dependencies tracked by those objects will be
added to node_filters.
Returns:
List of all nodes to load, or None if all nodes should be loaded.
"""
if self._node_filters is None:
return None # All nodes should be loaded.
all_filtered_nodes = set()
nodes_to_visit = list(self._node_filters)
while nodes_to_visit:
node_path = nodes_to_visit.pop(0)
node_id = self._node_path_to_id[node_path]
if node_id in all_filtered_nodes:
continue
all_filtered_nodes.add(node_id)
node, setter = self._loaded_nodes.get(node_id, (None, None))
if node is not None:
if not isinstance(node, base.Trackable):
raise TypeError(
"Error when processing dictionary values passed to nodes_to_load."
"Object at {} is expected to be a checkpointable TensorFlow "
"object (e.g. tf.Variable, tf.Module or Keras layer)."
.format(node_path))
node._maybe_initialize_trackable() # pylint: disable=protected-access
for reference in self._proto.nodes[node_id].children:
child_object, _ = self._loaded_nodes.get(
reference.node_id, (None, None))
# See if node already tracks the child reference, in which case add the
# child to the loaded_nodes dict.
if child_object is None and node is not None:
child_object = node._lookup_dependency(reference.local_name) # pylint: disable=protected-access
if isinstance(child_object, data_structures.TrackableDataStructure):
# Make setattr a noop to avoid overwriting already existing data
# structures.
setter = lambda *args: None
self._loaded_nodes[reference.node_id] = (child_object, setter)
child_path = "{}.{}".format(node_path, reference.local_name)
self._node_path_to_id[child_path] = reference.node_id
nodes_to_visit.append(child_path)
if 0 in all_filtered_nodes:
return None
return all_filtered_nodes
def _find_node_child(self, node_id, child_name, path):
for reference in self._proto.nodes[node_id].children:
if reference.local_name == child_name:
return reference.node_id
raise ValueError("unable to find node {}".format(path))
def _load_all(self):
"""Loads all nodes and functions from the SavedModel and their edges."""
self._load_nodes()
self._load_edges()
# TODO(b/124045874): There are limitations with functions whose captures
# trigger other functions to be executed. For now it is only guaranteed to
# work if the captures of a function only trigger functions without
# captures.
self._setup_functions_structures()
self._setup_functions_captures()
self._create_saveable_object_factories()
def _create_saveable_object_factories(self):
for node_id, proto in self._iter_all_nodes():
node = self.get(node_id)
node._self_saveable_object_factories = {} # pylint: disable=protected-access
for name, saveable_object_proto in proto.saveable_objects.items():
node._self_saveable_object_factories[name] = ( # pylint: disable=protected-access
saveable_object_util.restored_saved_object_factory(
self.get(saveable_object_proto.save_function),
self.get(saveable_object_proto.restore_function)))
def _load_edges(self):
"""Adds edges from objects to other objects and functions."""
for node_id, object_proto in self._iter_all_nodes():
self._add_object_graph_edges(object_proto, node_id)
# If root object isn't loaded, then create edges from the root for
# checkpoint compatibility.
if self._filtered_nodes is not None and 0 not in self._filtered_nodes:
root = self.get(0)
for node_path in self._node_filters:
loaded_node = self._nodes[self._node_path_to_id[node_path]]
path = node_path.split(".")
current_node = root
for name in path[1:-1]:
if not hasattr(current_node, name):
setattr(current_node, name, self._recreate_base_user_object()[0])
current_node = getattr(current_node, name)
if not hasattr(current_node, path[-1]):
setattr(current_node, path[-1], loaded_node)
def _add_object_graph_edges(self, proto, node_id):
"""Adds edges from an object to its children."""
obj = self._nodes[node_id]
setter = self._node_setters[node_id]
for reference in proto.children:
setter(obj, reference.local_name, self._nodes[reference.node_id])
# Note: if an object has an attribute `__call__` add a class method
# that allows `obj()` syntax to work. This is done per-instance to
# allow `callable` to be used to find out if an object is callable.
if reference.local_name == "__call__" and not callable(obj):
setattr(type(obj), "__call__", _call_attribute)
def _setup_functions_structures(self):
"""Setup structure for inputs and outputs of restored functions."""
coder = nested_structure_coder.StructureCoder()
for name, proto in sorted(self._proto.concrete_functions.items()):
concrete_function = self._concrete_functions[name]
# By setting the structured_outputs directly, we can rely on this
# function_lib.ConcreteFunction object to perform the output repacking
# logic. The only limitation of that logic is that it only works
# with output that is convertible to Tensors and the conversion
# always happens. For example tf.TensorShape([2, 3]) will be
# converted to Tensor representing [2, 3].
original_outputs = coder.decode_proto(proto.output_signature)
# The original_outputs here had Tensors converted to TensorSpecs, so
# the restored function's structured_outputs field will not be
# exactly the same. Fortunately the repacking logic cares only about
# the structure; and the unpacking logic cares only about structure
# and types.
concrete_function._func_graph.structured_outputs = original_outputs # pylint: disable=protected-access
concrete_function._func_graph.structured_input_signature = ( # pylint: disable=protected-access
coder.decode_proto(proto.canonicalized_input_signature))
concrete_function._initialize_function_spec() # pylint: disable=protected-access
def _setup_functions_captures(self):
"""Setup captures and variables in restored functions."""
concrete_functions = sorted(self._proto.concrete_functions.items())
for name, proto in concrete_functions:
concrete_function = self._concrete_functions[name]
bound_inputs = [
self._get_tensor_from_node(node_id, name)
for node_id in proto.bound_inputs]
bound_variables = [
self._nodes[node_id]
for node_id in proto.bound_inputs
if self._proto.nodes[node_id].WhichOneof("kind") == "variable"
]
# TODO(andresp): This is only injecting the captured inputs into the
# concrete function, note that we did not modify the FuncGraph
# itself.
concrete_function._captured_inputs = bound_inputs # pylint: disable=protected-access
concrete_function._func_graph.variables = bound_variables # pylint: disable=protected-access
if bound_inputs:
for bound_input, internal_capture in zip(
bound_inputs, concrete_function.inputs[-len(bound_inputs):]):
if distribute_utils.is_distributed_variable(bound_input):
concrete_function.graph.capture_distributed_variable(
bound_input, internal_capture)
else:
concrete_function.graph.replace_capture(bound_input,
internal_capture)
if internal_capture.dtype == dtypes.resource:
if resource_variable_ops.is_resource_variable(bound_input):
try:
handle = bound_input.handle
except ValueError:
# For mirrored variables we'll copy handle data for components
# as they get captured.
pass
else:
custom_gradient.copy_handle_data(handle, internal_capture)
else:
custom_gradient.copy_handle_data(bound_input, internal_capture)
# Setting "captures" first means "capture" won't create a new
# placeholder for this input.
concrete_function.graph.capture(bound_input)
def _get_tensor_from_node(self, node_id, fn_name):
"""Resolves a node id into a tensor to be captured for a function."""
if self._node_filters is not None and self._nodes[node_id] is None:
raise ValueError(
"Error when processing nodes_to_load. Function \"{}\" requires "
"inputs/variables that are not loaded when nodes_to_load={}"
.format(fn_name, self._node_filters))
with ops.init_scope():
obj = self._nodes[node_id]
if distribute_utils.is_distributed_variable(obj):
return obj
elif resource_variable_ops.is_resource_variable(obj):
return obj.handle
elif isinstance(obj, tracking.Asset):
return obj.asset_path
elif tensor_util.is_tensor(obj):
return obj
elif isinstance(obj, tracking.CapturableResource):
# Note: this executes restored functions in the CapturableResource.
return obj.resource_handle
raise ValueError("Can't convert node %s to tensor" % (type(obj)))
def _initialize_loaded_nodes(self):
nodes = {}
node_setters = {}
for node_id, (node, setter) in self._loaded_nodes.items():
nodes[node_id] = node
node_setters[node_id] = setter
return nodes, node_setters
def _iter_all_nodes(self):
if self._filtered_nodes is None:
return enumerate(self._proto.nodes)
else:
return [(node_id, self._proto.nodes[node_id])
for node_id in self._filtered_nodes]
def _load_nodes(self):
"""Load all saved objects."""
# `nodes` maps from node ids to recreated objects
# `node_setters` maps from node ids to setter functions
# (same signature as setattr) for setting dependencies.
nodes, node_setters = self._initialize_loaded_nodes()
# Figure out which objects are slot variables. These objects are created
# with Optimizer.add_slot rather than _recreate_variable.
slot_variable_node_ids = set()
for _, proto in self._iter_all_nodes():
for slot_variable_proto in proto.slot_variables:
slot_variable_node_ids.add(slot_variable_proto.slot_variable_node_id)
# Re-create everything except slot variables.
for node_id, proto in self._iter_all_nodes():
if node_id in slot_variable_node_ids or nodes.get(node_id) is not None:
# Defer recreating slot variables so we can use the public Optimizer
# interface.
continue
node, setter = self._recreate(proto, node_id)
nodes[node_id] = node
node_setters[node_id] = setter
# Now that we have created the variables being optimized, we have enough
# information to re-create slot variables for them.
for node_id, proto in self._iter_all_nodes():
optimizer_object = nodes[node_id]
for slot_variable_proto in proto.slot_variables:
optimized_variable = nodes[
slot_variable_proto.original_variable_node_id]
slot_variable = optimizer_object.add_slot(
var=optimized_variable,
slot_name=slot_variable_proto.slot_name)
nodes[slot_variable_proto.slot_variable_node_id] = slot_variable
node_setters[slot_variable_proto.slot_variable_node_id] = setattr
# If root object is not loaded, add a dummy root object for checkpoint
# compatibility.
if 0 not in nodes:
nodes[0] = self._recreate_base_user_object()[0]
self._nodes = [nodes.get(node_id)
for node_id in range(len(self._proto.nodes))]
self._node_setters = node_setters
@property
def _expect_partial_checkpoint(self):
"""Whether to expect that some objects aren't loaded.
This should be set to True in subclasses of the Loader class which generate
a trackable object with an object graph that is different from the graph
in the SavedModel. Setting this property to True suppresses the warnings
that are printed out when there are unused parts of the checkpoint or
object.
Returns:
boolean
"""
return False
def _restore_checkpoint(self):
"""Load state from checkpoint into the deserialized objects."""
variables_path = saved_model_utils.get_variables_path(self._export_dir)
# TODO(andresp): Clean use of private methods of TrackableSaver.
# pylint: disable=protected-access
saver = util.TrackableSaver(graph_view.ObjectGraphView(self.get(0)))
with ops.device("CPU"):
saver._file_prefix_placeholder = constant_op.constant(variables_path)
if self._expect_partial_checkpoint:
load_status = saver.restore(variables_path,
self._checkpoint_options).expect_partial()
else:
load_status = saver.restore(variables_path, self._checkpoint_options)
load_status.assert_existing_objects_matched()
checkpoint = load_status._checkpoint
# When running in eager mode, the `restore` call above has already run and
# restored the state of trackables, call `position.restore_ops()` will
# return an empty list as there is nothing left to do. In graph mode, that
# will return the list of ops that must run to restore the object on that
# position. We have to wire them in the initializers of the objects so that
# they get initialized properly when using common practices (e.g. the ones
# used by ManagedSession) without further user action.
for object_id, obj in dict(checkpoint.object_by_proto_id).items():
position = base.CheckpointPosition(checkpoint=checkpoint,
proto_id=object_id)
restore_ops = position.restore_ops()
if restore_ops:
if resource_variable_ops.is_resource_variable(obj):
if len(restore_ops) == 1:
obj._initializer_op = restore_ops[0]
else:
obj._initializer_op = control_flow_ops.group(*restore_ops)
elif isinstance(obj, lookup_ops.LookupInterface):
# We don't need to check for eager execution here, since this code
# path should only be taken if we are restoring in graph mode.
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, restore_ops)
else:
raise NotImplementedError(
("Missing functionality to restore state of object "
"%r from the checkpoint." % obj))
def adjust_debug_info_func_names(self, debug_info):
"""Rewrite func names in the debug info by using the concrete func names."""
output_debug_info = graph_debug_info_pb2.GraphDebugInfo()
output_debug_info.files[:] = debug_info.files
for key in debug_info.traces:
node, func = key.split("@")
new_func = ""
if func in self._concrete_functions:
new_func = self._concrete_functions[func].function_def.signature.name
output_debug_info.traces[node + "@" + new_func].CopyFrom(
debug_info.traces[key])
return output_debug_info
def get(self, node_id):
if isinstance(node_id, str):
node_id = self._node_path_to_id[node_id]
return self._nodes[node_id]
def _recreate(self, proto, node_id):
"""Creates a Python object from a SavedObject protocol buffer."""
factory = {
"user_object": (
lambda: self._recreate_user_object(proto.user_object, node_id)),
"asset": lambda: self._recreate_asset(proto.asset),
"function": lambda: self._recreate_function(proto.function),
"bare_concrete_function": functools.partial(
self._recreate_bare_concrete_function,
proto.bare_concrete_function),
"variable": lambda: self._recreate_variable(proto.variable),
"constant": lambda: self._recreate_constant(proto.constant),
"resource": lambda: self._recreate_resource(proto.resource),
}
kind = proto.WhichOneof("kind")
if kind not in factory:
raise ValueError("Unknown SavedObject type: %r" % kind)
return factory[kind]()
def _recreate_user_object(self, proto, node_id):
"""Instantiates a SavedUserObject."""
looked_up = revived_types.deserialize(proto)
if looked_up is None:
return self._recreate_base_user_object(proto, node_id)
return looked_up
def _recreate_base_user_object(self, proto=None, node_id=None):
del proto, node_id
# Note: each user object has its own class. This allows making each one
# individually callable by adding a `__call__` method to the classes of
# the objects instances that have a `__call__` property.
class _UserObject(tracking.AutoTrackable):
pass
return _UserObject(), setattr
def _recreate_asset(self, proto):
filename = os.path.join(
saved_model_utils.get_assets_dir(self._export_dir),
self._asset_file_def[proto.asset_file_def_index].filename)
asset = tracking.Asset(filename)
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, asset.asset_path)
return asset, setattr
def _recreate_function(self, proto):
return function_deserialization.recreate_function(
proto, self._concrete_functions), setattr
def _recreate_bare_concrete_function(self, proto):
return function_deserialization.setup_bare_concrete_function(
proto, self._concrete_functions), setattr
def _recreate_variable(self, proto):
name = proto.name if proto.name else None
if name is not None:
dbg_name = name
else:
dbg_name = "<variable loaded from saved model>"
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
proto.synchronization, proto.aggregation, proto.trainable,
name=dbg_name))
def uninitialized_variable_creator(next_creator, **kwargs):
"""A variable creator that creates uninitialized variables."""
del next_creator
return resource_variable_ops.UninitializedVariable(**kwargs)
# Create a variable_creator_scope that creates uninitialized variables with
# a lower priority such that a potential distributed variable_creator_scope
# can take precedence.
with ops.get_default_graph()._variable_creator_scope( # pylint: disable=protected-access
uninitialized_variable_creator,
priority=50):
return variables.Variable(
shape=proto.shape,
dtype=proto.dtype,
name=name,
trainable=trainable,
synchronization=synchronization,
aggregation=aggregation), setattr
def _recreate_constant(self, proto):
tensor_proto = self._operation_attributes[proto.operation]["value"].tensor
ndarray = tensor_util.MakeNdarray(tensor_proto)
if dtypes.as_dtype(tensor_proto.dtype) == dtypes.string:
with ops.device("CPU"):
imported_constant = constant_op.constant(ndarray)
else:
imported_constant = constant_op.constant(ndarray)
return imported_constant, setattr
def _recreate_resource(self, proto):
return _RestoredResource(device=proto.device), setattr
# TODO(b/124205571,b/124092991): Solve destruction of resources.
class _RestoredResource(tracking.TrackableResource):
"""Restored SavedResource."""
def __init__(self, device=""):
super(_RestoredResource, self).__init__(device=device)
self._destroy_resource_fn = None
def _create_resource(self):
raise RuntimeError()
def _initialize(self):
raise RuntimeError()
@property
def _destroy_resource(self):
return self._destroy_resource_fn
@_destroy_resource.setter
def _destroy_resource(self, destroy_resource_fn):
self._resource_deleter = tracking.CapturableResourceDeleter(
destroy_resource_fn)
self._destroy_resource_fn = destroy_resource_fn
def _list_functions_for_serialization(self, unused_serialization_cache):
# Overwrite this method to avoid the implementation of
# base class to re-wrap the polymorphic functions into
# another layer of `tf.function`.
functions = {
"_create_resource": self._create_resource,
"_initialize": self._initialize,
}
if self._destroy_resource:
functions.update(_destroy_resource=self._destroy_resource)
return functions
def _call_attribute(instance, *args, **kwargs):
return instance.__call__(*args, **kwargs)
@tf_export("__internal__.saved_model.load_partial", v1=[])
def load_partial(export_dir, filters, tags=None, options=None):
"""Partially load a SavedModel (saved from V2).
Similar to `tf.saved_model.load`, but with an additional argument that
lets you specify which nodes to load.
`tf.saved_model.load_partial(export_dir, ["root"])` and
`tf.saved_model.load(export_dir)` are equivalent.
Note: This only works for SavedModels saved with TensorFlow V2 from
`tf.saved_model.save` or Keras. This will not load SavedModels save from
the Estimator API.
In Tensorflow V2, SavedModel stores the **object graph** of the saved object.
The graph contains nodes (`tf.Module`, `tf.Variable`, `tf.function`, Keras
layers, etc.) and edges that are the name of the attributes connecting the
objects.
*Example 1*
```
model = tf.Module()
model.child_layer = tf.Module()
model.child_layer.v = tf.Variable(5.)
tf.saved_model.save(model, '/tmp/model')
loaded = tf.__internal__.saved_model.load_partial(
... '/tmp/model',
... ['root.child_layer', 'root.child_layer.v'])
loaded['root.child_layer'].v.numpy()
5.
loaded['root.child_layer'].v is loaded['root.child_layer.v']
True
*Example 2*
model = tf.Module()
model.child_layer = tf.Module()
model.child_layer.v = tf.Variable(5.)
>>>
tf.saved_model.save(model, '/tmp/model')
# Create a variable
new_variable = tf.Variable(0.)
loaded = tf.__internal__.saved_model.load_partial(
... '/tmp/model',
... {'root.child_layer': None, 'root.child_layer.v': new_variable})
loaded['root.child_layer'].v.numpy()
5.
new_variable.numpy()
5.
```
**Loading under different distribution strategies**
You can load different parts of the model under different distribution
strategies. Note that this is very experimental so use with care.
```
model = tf.Module()
model.layer_1 = tf.Module()
model.layer_1.v = tf.Variable(5.)
model.layer_2 = tf.Module()
model.layer_2.v = tf.Variable(7.)
tf.saved_model.save(model, '/tmp/model')
# Load with no strategy
loaded = tf.__internal__.saved_model.load_partial(
... '/tmp/model',
... ['root.layer_1'])
loaded['root.layer_1'].v
<tf.Variable 'Variable:0' shape=() dtype=float32, numpy=5.0>
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
... loaded2 = tf.__internal__.saved_model.load_partial(
... '/tmp/model',
... ['root.layer_2'])
loaded2['root.layer_2'].v
MirroredVariable:{
0: <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=7.0>
}
```
Args:
export_dir: The SavedModel directory to load from.
filters: A list or dictionary where each element or key is a string
path to nodes that should be loaded. Node paths consist of all the child
attribute names to reach that node in the form: `root.{attribute_name}`.
The loader will load all of the specified nodes and their recursive
descendants. When this option is defined, the loader will return a
dictionary mapping the node paths to the loaded objects.
tags: A tag or sequence of tags identifying the MetaGraph to load. Optional
if the SavedModel contains a single MetaGraph, as for those exported from
`tf.saved_model.save`.
options: `tf.saved_model.LoadOptions` object that specifies options for
loading.
Returns:
A dictionary mapping node paths from the filter to loaded objects.
"""
return load_internal(export_dir, tags, options, filters=filters)
@tf_export("saved_model.load", v1=["saved_model.load_v2"])
def load(export_dir, tags=None, options=None):
"""Load a SavedModel from `export_dir`.
Signatures associated with the SavedModel are available as functions:
```python
imported = tf.saved_model.load(path)
f = imported.signatures["serving_default"]
print(f(x=tf.constant([[1.]])))
```
Objects exported with `tf.saved_model.save` additionally have trackable
objects and functions assigned to attributes:
```python
exported = tf.train.Checkpoint(v=tf.Variable(3.))
exported.f = tf.function(
lambda x: exported.v * x,
input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])
tf.saved_model.save(exported, path)
imported = tf.saved_model.load(path)
assert 3. == imported.v.numpy()
assert 6. == imported.f(x=tf.constant(2.)).numpy()
```
_Loading Keras models_
Keras models are trackable, so they can be saved to SavedModel. The object
returned by `tf.saved_model.load` is not a Keras object (i.e. doesn't have
`.fit`, `.predict`, etc. methods). A few attributes and functions are still
available: `.variables`, `.trainable_variables` and `.__call__`.
```python
model = tf.keras.Model(...)
tf.saved_model.save(model, path)
imported = tf.saved_model.load(path)
outputs = imported(inputs)
```
Use `tf.keras.models.load_model` to restore the Keras model.
_Importing SavedModels from TensorFlow 1.x_
SavedModels from `tf.estimator.Estimator` or 1.x SavedModel APIs have a flat
graph instead of `tf.function` objects. These SavedModels will be loaded with
the following attributes:
* `.signatures`: A dictionary mapping signature names to functions.
* `.prune(feeds, fetches) `: A method which allows you to extract
functions for new subgraphs. This is equivalent to importing the SavedModel
and naming feeds and fetches in a Session from TensorFlow 1.x.
```python
imported = tf.saved_model.load(path_to_v1_saved_model)
pruned = imported.prune("x:0", "out:0")
pruned(tf.ones([]))
```
See `tf.compat.v1.wrap_function` for details.
* `.variables`: A list of imported variables.
* `.graph`: The whole imported graph.
* `.restore(save_path)`: A function that restores variables from a checkpoint
saved from `tf.compat.v1.Saver`.
_Consuming SavedModels asynchronously_
When consuming SavedModels asynchronously (the producer is a separate
process), the SavedModel directory will appear before all files have been
written, and `tf.saved_model.load` will fail if pointed at an incomplete
SavedModel. Rather than checking for the directory, check for
"saved_model_dir/saved_model.pb". This file is written atomically as the last
`tf.saved_model.save` file operation.
Args:
export_dir: The SavedModel directory to load from.
tags: A tag or sequence of tags identifying the MetaGraph to load. Optional
if the SavedModel contains a single MetaGraph, as for those exported from
`tf.saved_model.save`.
options: `tf.saved_model.LoadOptions` object that specifies options for
loading.
Returns:
A trackable object with a `signatures` attribute mapping from signature
keys to functions. If the SavedModel was exported by `tf.saved_model.load`,
it also points to trackable objects, functions, debug info which it has been
saved.
Raises:
ValueError: If `tags` don't match a MetaGraph in the SavedModel.
"""
return load_internal(export_dir, tags, options)["root"]
def load_internal(export_dir, tags=None, options=None, loader_cls=Loader,
filters=None):
"""Loader implementation."""
options = options or load_options.LoadOptions()
if tags is not None and not isinstance(tags, set):
# Supports e.g. tags=SERVING and tags=[SERVING]. Sets aren't considered
# sequences for nest.flatten, so we put those through as-is.
tags = nest.flatten(tags)
saved_model_proto, debug_info = (
loader_impl.parse_saved_model_with_debug_info(export_dir))
if (len(saved_model_proto.meta_graphs) == 1 and
saved_model_proto.meta_graphs[0].HasField("object_graph_def")):
meta_graph_def = saved_model_proto.meta_graphs[0]
if (tags is not None
and set(tags) != set(meta_graph_def.meta_info_def.tags)):
raise ValueError(
("The SavedModel at {} has one MetaGraph with tags {}, but got an "
"incompatible argument tags={} to tf.saved_model.load. You may omit "
"it, pass 'None', or pass matching tags.")
.format(export_dir, meta_graph_def.meta_info_def.tags, tags))
object_graph_proto = meta_graph_def.object_graph_def
ckpt_options = checkpoint_options.CheckpointOptions(
experimental_io_device=options.experimental_io_device)
with ops.init_scope():
try:
loader = loader_cls(object_graph_proto, saved_model_proto, export_dir,
ckpt_options, filters)
except errors.NotFoundError as err:
raise FileNotFoundError(
str(err) + "\n If trying to load on a different device from the "
"computational device, consider using setting the "
"`experimental_io_device` option on tf.saved_model.LoadOptions "
"to the io_device such as '/job:localhost'."
)
root = loader.get(0)
if isinstance(loader, Loader):
root.graph_debug_info = loader.adjust_debug_info_func_names(debug_info)
root.tensorflow_version = meta_graph_def.meta_info_def.tensorflow_version
root.tensorflow_git_version = (
meta_graph_def.meta_info_def.tensorflow_git_version)
else:
if filters:
raise ValueError("SavedModels saved from Tensorflow V1 or Estimator (any "
"version) cannot be loaded with node filters.")
with ops.init_scope():
root = load_v1_in_v2.load(export_dir, tags)
root.graph_debug_info = debug_info
if filters:
return {node_id: loader.get(node_id) for node_id in filters}
else:
return {"root": root}
| |
#!/usr/bin/python
# Copyright (c) weykent <weykent@weasyl.com>
# See COPYING for details.
import errno
import functools
import hashlib
import os
import shutil
import stat
import tempfile
import traceback
EXECUTABLE = 0o777
NONEXECUTABLE = 0o666
SETTABLE_MASK = 0o7777
def settable_mode(m):
return m & SETTABLE_MASK
def first_directory(directories):
for d in directories:
try:
s = os.lstat(d)
except OSError as e:
if e.errno != errno.ENOENT:
raise
continue
if not stat.S_ISDIR(s.st_mode):
continue
return d
return None
def hash_file(path, chunksize=4096):
try:
infile = open(path, 'rb')
except IOError as e:
if e.errno != errno.ENOENT:
raise
return None, None
hasher = hashlib.sha256()
with infile:
while True:
chunk = infile.read(chunksize)
if not chunk:
break
hasher.update(chunk)
s = os.fstat(infile.fileno())
return hasher.hexdigest(), s.st_mode
def makedirs_exist_ok(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class FileDoesNotExistError(Exception):
pass
class FileRecord(object):
def __init__(self, path, mode, content=None):
self.path = path
self.mode = mode
self.content = content
self.must_change = False
self.changed = False
def __repr__(self):
return '<%s %#x: %r @%r(%o)>' % (
type(self).__name__, id(self), self.content, self.path, self.mode)
def _must_change_p(self):
current_hash, current_mode = hash_file(self.path)
if current_hash is None:
return self.content is not None
else:
if self.content is None:
return True
elif self.content is True:
content_matches = True
else:
content_matches = (
hashlib.sha256(self.content).hexdigest() == current_hash)
return (
not content_matches
or self.mode != settable_mode(current_mode))
def check_if_must_change(self):
self.must_change = self._must_change_p()
def commit(self):
if not self.must_change:
return
if self.content is None:
try:
os.unlink(self.path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
elif self.content is True:
try:
filestat = os.lstat(self.path)
except OSError as e:
if e.errno == errno.ENOENT:
raise FileDoesNotExistError(self.path)
else:
raise
if not stat.S_ISLNK(filestat.st_mode):
os.chmod(self.path, self.mode)
else:
outdir = os.path.dirname(self.path)
makedirs_exist_ok(outdir)
outfile = tempfile.NamedTemporaryFile(
dir=outdir, prefix='.tmp', suffix='~', delete=False)
with outfile:
outfile.write(self.content)
os.chmod(outfile.name, self.mode)
os.rename(outfile.name, self.path)
self.changed = True
class PathAlreadyExistsError(Exception):
pass
class LinkRecord(object):
def __init__(self, path, target=None, dir_ok=False):
self.path = path
self.target = target
self.dir_ok = dir_ok
self.must_change = False
self.changed = False
def __repr__(self):
return '<%s %#x: %r dir_ok:%s @%r>' % (
type(self).__name__, id(self), self.target, self.dir_ok, self.path)
def _must_change_p(self):
try:
current_target = os.readlink(self.path)
except OSError as e:
if e.errno == errno.ENOENT:
return self.target is not None
elif e.errno == errno.EINVAL:
if self.dir_ok and os.path.isdir(self.path):
return False
else:
raise PathAlreadyExistsError(self.path)
raise
return self.target != current_target
def check_if_must_change(self):
self.must_change = self._must_change_p()
def commit(self):
if not self.must_change:
return
try:
os.unlink(self.path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
if self.target is not None:
makedirs_exist_ok(os.path.dirname(self.path))
os.symlink(self.target, self.path)
self.changed = True
class NotAThingError(Exception):
pass
class RemoveThing(object):
def __init__(self, path, stat_type, remover):
self.path = path
self.stat_type = stat_type
self.remover = remover
self.must_change = False
self.changed = False
def __repr__(self):
return '<%s %#x: %r(%r, %r)>' % (
type(self).__name__, id(self), self.path, self.stat_type,
self.remover)
def _must_change_p(self):
try:
s = os.lstat(self.path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
return False
if not getattr(stat, self.stat_type)(s.st_mode):
raise NotAThingError(self.path, 'does not match', self.stat_type)
return True
def check_if_must_change(self):
self.must_change = self._must_change_p()
def commit(self):
if not self.must_change:
return
self.remover(self.path)
self.changed = True
rm = functools.partial(RemoveThing, stat_type='S_ISREG', remover=os.unlink)
rmdir = functools.partial(
RemoveThing, stat_type='S_ISDIR', remover=shutil.rmtree)
def main(module_cls):
module = module_cls(
argument_spec=dict(
name=dict(required=True),
sv_directory=dict(type='list', default=['/etc/sv']),
service_directory=dict(type='list', default=['/service', '/etc/service']),
init_d_directory=dict(type='list', default=['/etc/init.d']),
runscript=dict(required=True),
log_runscript=dict(),
supervise_link=dict(),
log_supervise_link=dict(),
state=dict(
choices=['present', 'absent', 'down'], default='present'),
extra_files=dict(type='dict', default={}),
extra_scripts=dict(type='dict', default={}),
envdir=dict(type='dict'),
lsb_service=dict(choices=['present', 'absent']),
umask=dict(type='int', default=0o022),
),
supports_check_mode=True,
)
try:
_main(module)
except Exception:
module.fail_json(
msg='unhandled exception', traceback=traceback.format_exc())
def _main(module):
def first_directory_or_fail(name):
directories = module.params[name]
ret = first_directory(directories)
if ret is None:
module.fail_json(
msg='no extant directory found for %r out of %r' % (
name, directories))
return ret
sv_directory = first_directory_or_fail('sv_directory')
service_directory = first_directory_or_fail('service_directory')
name = module.params['name']
state = module.params['state']
umask = module.params['umask']
sv = functools.partial(os.path.join, sv_directory, name)
exe = functools.partial(FileRecord, mode=EXECUTABLE & ~umask)
nexe = functools.partial(FileRecord, mode=NONEXECUTABLE & ~umask)
outfiles = []
outfiles.append(exe(sv('run'), content=module.params['runscript']))
directories_to_clear = []
directories_to_clear.append(sv())
if module.params['log_runscript'] is None:
if module.params['log_supervise_link'] is not None:
module.fail_json(
msg='log_supervise_link must be specified with log_runscript')
outfiles.append(rmdir(sv('log')))
else:
outfiles.append(
exe(sv('log', 'run'), content=module.params['log_runscript']))
directories_to_clear.append(sv('log'))
for filename, content in module.params['extra_files'].iteritems():
outfiles.append(nexe(sv(filename), content=content))
for filename, content in module.params['extra_scripts'].iteritems():
outfiles.append(exe(sv(filename), content=content))
envdir = module.params['envdir']
if envdir is None:
outfiles.append(rmdir(sv('env')))
else:
for key, value in module.params['envdir'].iteritems():
outfiles.append(nexe(sv('env', key), content=value))
directories_to_clear.append(sv('env'))
outfiles.append(nexe(sv('down'), content='' if state == 'down' else None))
def do_supervise_link(param, *segments):
target = module.params[param]
outfiles.append(LinkRecord(
sv(*segments), target=target, dir_ok=target is None))
do_supervise_link('supervise_link', 'supervise')
do_supervise_link('log_supervise_link', 'log', 'supervise')
outfiles.append(LinkRecord(
os.path.join(service_directory, name),
target=None if state == 'absent' else sv()))
lsb_service = module.params['lsb_service']
if state == 'absent':
if lsb_service == 'present':
module.fail_json(
msg="lsb_service can't be set to present if state=absent")
else:
init_d_directory = first_directory(module.params['init_d_directory'])
if init_d_directory is None:
if lsb_service is not None:
module.fail_json(
msg='no /etc/init.d and lsb_service=%r' % (lsb_service,))
else:
should_create_lsb = lsb_service == 'present' or lsb_service is None
outfiles.append(LinkRecord(
os.path.join(init_d_directory, name),
target='/usr/bin/sv' if should_create_lsb else None))
paths_set = {outfile.path for outfile in outfiles}
if len(paths_set) != len(outfiles):
module.fail_json(msg='duplicate file paths specified')
paths_set.update(directories_to_clear)
for to_clear in directories_to_clear:
try:
directory_paths = os.listdir(to_clear)
except OSError as e:
if e.errno != errno.ENOENT:
raise
continue
directory_paths = {os.path.join(to_clear, p) for p in directory_paths}
outfiles.extend(rm(path) for path in directory_paths - paths_set)
for outfile in outfiles:
outfile.check_if_must_change()
paths = {outfile.path: outfile.must_change for outfile in outfiles}
if not any(outfile.must_change for outfile in outfiles):
module.exit_json(paths=paths, changed=False)
elif module.check_mode:
module.exit_json(paths=paths, changed=True)
for outfile in outfiles:
outfile.commit()
module.exit_json(paths=paths, changed=True)
# This is some gross-ass ansible magic. Unfortunately noqa can't be applied for
# E265, so it had to be disabled in setup.cfg.
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
if __name__ == '__main__': # pragma: nocover
main(AnsibleModule) # noqa
| |
"""
Parse XML file containing MSDN documentation.
Authors: Moritz Raabe, William Ballenthin
Copyright 2014 Mandiant, A FireEye Company
TODO: License
Based on zynamics' code at
https://code.google.com/p/zynamics/source/browse/?repo=msdn-ida-plugin
"""
import os.path
import sys
import xml.sax.handler
import itertools
import logging
class ParsingException(Exception):
def __init__(self, message):
super(ParsingException, self).__init__(message)
self.message = message
class Argument:
def __init__(self):
self.name = ""
self.description = ""
self.constants = []
self.enums = []
self._logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
def __str__(self):
return ("(%s, %s): %s" % (self.name, self.enums, self.description)).encode("ISO-8859-1")
def __repr__(self):
return self.__str__()
def get_constant(self, name):
for const in self.constants:
if const.name == name:
return const
return None
def merge(self, new_argument):
if self.name != new_argument.name:
return
if new_argument.description:
self._logger.debug(' Overwriting argument description')
self.description = new_argument.description
if new_argument.constants:
for constant in new_argument.constants:
current_const = self.get_constant(constant.name)
if not current_const:
# Constant not in list yet
self._logger.debug(' Adding new constant ' + constant.name)
self.constants.append(constant)
continue
# Constant possibly needs to be updated
current_const.merge(constant)
if new_argument.enums:
self._logger.debug(' Merging argument enums, resulting in [' + \
', '.join(self.enums) + ']')
self.enums += new_argument.enums
class Constant:
def __init__(self):
self.name = ""
self.value = ""
self.description = ""
self._logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
def __str__(self):
return ("(%s, %s)" % (self.name, self.value)).encode("ISO-8859-1")
def __repr__(self):
return self.__str__()
def merge(self, new_constant):
if self.name != new_constant.name:
return
self._logger.debug(' Working on constant ' + self.name)
if new_constant.value:
self._logger.debug(' Overwriting constant value')
self.value = new_constant.value
if new_constant.description:
self._logger.debug(' Overwriting constant description')
self.description = new_constant.description
class Function:
def __init__(self):
self.name = ""
self.dll = ""
self.description = ""
self.arguments = []
self.returns = ""
self._logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
def __str__(self):
return ("%s -- %s" % (self.name, self.arguments)).encode("ISO-8859-1")
def __repr__(self):
return self.__str__()
def get_argument(self, name):
for arg in self.arguments:
if arg.name == name:
return arg
return None
def merge(self, new_function):
"""
Merge two function objects. Information found in the second function
instance will overwrite previously obtained data.
Argument:
new_function -- function object that will overwrite previous data
"""
if self.name != new_function.name:
return
self._logger.debug('Merging function ' + self.name)
if new_function.dll:
self._logger.debug(' Overwriting DLL info')
self.dll = new_function
if new_function.description:
self._logger.debug(' Overwriting function description')
self.description = new_function.description
if new_function.arguments:
for arg in new_function.arguments:
self._logger.debug(' Working on argument ' + arg.name)
current_arg = self.get_argument(arg.name)
if not current_arg:
# Argument not in list yet
self._logger.debug(' Adding argument ' + arg.name + ' to arguments')
self.arguments.append(arg)
continue
# Argument possibly needs to be updated
current_arg.merge(arg)
if new_function.returns:
self._logger.debug(' Overwriting function return value')
self.returns = new_function.returns
class FunctionHandler(xml.sax.handler.ContentHandler):
c = itertools.count()
IN_FUNCTION = next(c)
IN_FUNCTION_NAME = next(c)
IN_DLL = next(c)
IN_FUNCTION_DESCRIPTION = next(c)
IN_ARGUMENTS = next(c)
IN_ARGUMENT = next(c)
IN_ARGUMENT_NAME = next(c)
IN_ARGUMENT_DESCRIPTION = next(c)
IN_RETURNS = next(c)
IN_CONSTANTS = next(c)
IN_CONSTANT = next(c)
IN_CONSTANT_NAME = next(c)
IN_CONSTANT_VALUE = next(c)
IN_CONSTANT_DESCRIPTION = next(c)
def __init__(self):
self.inTitle = 0
self.mapping = {}
self.current_step = 0
self.functions = []
self._logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
def startElement(self, name, attributes):
if name == "msdn":
pass
elif name == "functions":
pass
elif name == "function":
self.current_step = FunctionHandler.IN_FUNCTION
self.function = Function()
elif self.current_step == FunctionHandler.IN_FUNCTION and name == "name":
self.current_step = FunctionHandler.IN_FUNCTION_NAME
elif self.current_step == FunctionHandler.IN_ARGUMENT and name == "name":
self.current_step = FunctionHandler.IN_ARGUMENT_NAME
elif name == "dll":
self.current_step = FunctionHandler.IN_DLL
elif self.current_step == FunctionHandler.IN_FUNCTION and name == "description":
self.current_step = FunctionHandler.IN_FUNCTION_DESCRIPTION
elif self.current_step == FunctionHandler.IN_ARGUMENT and name == "description":
self.current_step = FunctionHandler.IN_ARGUMENT_DESCRIPTION
elif self.current_step == FunctionHandler.IN_CONSTANT and name == "name":
self.current_step = FunctionHandler.IN_CONSTANT_NAME
elif self.current_step == FunctionHandler.IN_CONSTANT and name == "value":
self.current_step = FunctionHandler.IN_CONSTANT_VALUE
elif self.current_step == FunctionHandler.IN_CONSTANT and name == "description":
self.current_step = FunctionHandler.IN_CONSTANT_DESCRIPTION
elif name == "arguments":
self.current_step = FunctionHandler.IN_ARGUMENTS
elif name == "argument":
self.current_step = FunctionHandler.IN_ARGUMENT
self.current_argument = Argument()
elif self.current_step == FunctionHandler.IN_CONSTANTS and name == "constant":
self.current_step = FunctionHandler.IN_CONSTANT
self.current_constant = Constant()
elif name == "constants":
self.current_step = FunctionHandler.IN_CONSTANTS
self.current_argument.enums = []
if "enums" in attributes.getNames():
enums = attributes.getValue('enums').encode('utf-8')
if enums:
self.current_argument.enums = enums.split(',')
elif name == "returns":
self.current_step = FunctionHandler.IN_RETURNS
else:
self._logger.warning('Error START: ' + name)
raise ParsingException('start')
def characters(self, data):
if self.current_step == FunctionHandler.IN_FUNCTION_NAME:
self.function.name = self.function.name + data
elif self.current_step == FunctionHandler.IN_DLL:
self.function.dll = self.function.dll + data
elif self.current_step == FunctionHandler.IN_FUNCTION_DESCRIPTION:
self.function.description = self.function.description + data
elif self.current_step == FunctionHandler.IN_ARGUMENT_NAME:
self.current_argument.name = self.current_argument.name + data
elif self.current_step == FunctionHandler.IN_ARGUMENT_DESCRIPTION:
self.current_argument.description = self.current_argument.description + \
data
elif self.current_step == FunctionHandler.IN_RETURNS:
self.function.returns = self.function.returns + data
elif self.current_step == FunctionHandler.IN_CONSTANT_NAME:
self.current_constant.name = self.current_constant.name + data
elif self.current_step == FunctionHandler.IN_CONSTANT_VALUE:
self.current_constant.value = self.current_constant.value + data
elif self.current_step == FunctionHandler.IN_CONSTANT_DESCRIPTION:
self.current_constant.description = self.current_constant.description + \
data
def endElement(self, name):
if name in ["functions", "msdn"]:
pass
elif name == "function":
self.functions.append(self.function)
elif self.current_step in [FunctionHandler.IN_ARGUMENT_NAME, FunctionHandler.IN_ARGUMENT_DESCRIPTION]:
self.current_step = FunctionHandler.IN_ARGUMENT
elif self.current_step in [FunctionHandler.IN_CONSTANT_NAME, FunctionHandler.IN_CONSTANT_VALUE, FunctionHandler.IN_CONSTANT_DESCRIPTION]:
self.current_step = FunctionHandler.IN_CONSTANT
elif name in ["name", "dll", "description", "arguments", "returns", "constants"]:
self.current_step = FunctionHandler.IN_FUNCTION
elif name == "argument":
self.current_step = FunctionHandler.IN_ARGUMENT
self.function.arguments.append(self.current_argument)
elif name == "constant":
self.current_step = FunctionHandler.IN_CONSTANTS
self.current_argument.constants.append(self.current_constant)
else:
self._logger.warning('Error END: ' + name)
raise ParsingException('end')
g_logger = logging.getLogger(__name__)
def parse(xmlfile):
"""
Return parsed MSDN information.
Argument:
xmlfile -- xml data file storing the MSDN information
"""
g_logger.info('Starting parsing ' + xmlfile)
parser = xml.sax.make_parser()
try:
handler = FunctionHandler()
except ParsingException as e:
g_logger.warning(e.message)
return None # TODO critical?
parser.setContentHandler(handler)
parser.parse(xmlfile)
return handler.functions
| |
#!/usr/bin/env python
from ..common import *
from ..extractor import VideoExtractor
class YouTube(VideoExtractor):
name = "YouTube"
# YouTube media encoding options, in descending quality order.
# http://en.wikipedia.org/wiki/YouTube#Quality_and_codecs. Retrieved July 17, 2014.
stream_types = [
{'itag': '38', 'container': 'MP4', 'video_resolution': '3072p', 'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '3.5-5', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
#{'itag': '85', 'container': 'MP4', 'video_resolution': '1080p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '3-4', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
{'itag': '46', 'container': 'WebM', 'video_resolution': '1080p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
{'itag': '37', 'container': 'MP4', 'video_resolution': '1080p', 'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '3-4.3', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
#{'itag': '102', 'container': 'WebM', 'video_resolution': '720p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
{'itag': '45', 'container': 'WebM', 'video_resolution': '720p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '2', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
#{'itag': '84', 'container': 'MP4', 'video_resolution': '720p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '2-3', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
{'itag': '22', 'container': 'MP4', 'video_resolution': '720p', 'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '2-3', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
{'itag': '120', 'container': 'FLV', 'video_resolution': '720p', 'video_encoding': 'H.264', 'video_profile': 'Main@L3.1', 'video_bitrate': '2', 'audio_encoding': 'AAC', 'audio_bitrate': '128'}, # Live streaming only
{'itag': '44', 'container': 'WebM', 'video_resolution': '480p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '1', 'audio_encoding': 'Vorbis', 'audio_bitrate': '128'},
{'itag': '35', 'container': 'FLV', 'video_resolution': '480p', 'video_encoding': 'H.264', 'video_profile': 'Main', 'video_bitrate': '0.8-1', 'audio_encoding': 'AAC', 'audio_bitrate': '128'},
#{'itag': '101', 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
#{'itag': '100', 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '128'},
{'itag': '43', 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '0.5', 'audio_encoding': 'Vorbis', 'audio_bitrate': '128'},
{'itag': '34', 'container': 'FLV', 'video_resolution': '360p', 'video_encoding': 'H.264', 'video_profile': 'Main', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '128'},
#{'itag': '82', 'container': 'MP4', 'video_resolution': '360p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'},
{'itag': '18', 'container': 'MP4', 'video_resolution': '270p/360p', 'video_encoding': 'H.264', 'video_profile': 'Baseline', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'},
{'itag': '6', 'container': 'FLV', 'video_resolution': '270p', 'video_encoding': 'Sorenson H.263', 'video_profile': '', 'video_bitrate': '0.8', 'audio_encoding': 'MP3', 'audio_bitrate': '64'},
#{'itag': '83', 'container': 'MP4', 'video_resolution': '240p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'},
{'itag': '13', 'container': '3GP', 'video_resolution': '', 'video_encoding': 'MPEG-4 Visual', 'video_profile': '', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': ''},
{'itag': '5', 'container': 'FLV', 'video_resolution': '240p', 'video_encoding': 'Sorenson H.263', 'video_profile': '', 'video_bitrate': '0.25', 'audio_encoding': 'MP3', 'audio_bitrate': '64'},
{'itag': '36', 'container': '3GP', 'video_resolution': '240p', 'video_encoding': 'MPEG-4 Visual', 'video_profile': 'Simple', 'video_bitrate': '0.175', 'audio_encoding': 'AAC', 'audio_bitrate': '36'},
{'itag': '17', 'container': '3GP', 'video_resolution': '144p', 'video_encoding': 'MPEG-4 Visual', 'video_profile': 'Simple', 'video_bitrate': '0.05', 'audio_encoding': 'AAC', 'audio_bitrate': '24'},
]
def decipher(js, s):
def tr_js(code):
code = re.sub(r'function', r'def', code)
code = re.sub(r'\$', '_dollar', code)
code = re.sub(r'\{', r':\n\t', code)
code = re.sub(r'\}', r'\n', code)
code = re.sub(r'var\s+', r'', code)
code = re.sub(r'(\w+).join\(""\)', r'"".join(\1)', code)
code = re.sub(r'(\w+).length', r'len(\1)', code)
code = re.sub(r'(\w+).slice\((\w+)\)', r'\1[\2:]', code)
code = re.sub(r'(\w+).splice\((\w+),(\w+)\)', r'del \1[\2:\2+\3]', code)
code = re.sub(r'(\w+).split\(""\)', r'list(\1)', code)
return code
f1 = match1(js, r'\w+\.sig\|\|([$\w]+)\(\w+\.\w+\)')
f1def = match1(js, r'(function %s\(\w+\)\{[^\{]+\})' % re.escape(f1))
f1def = re.sub(r'([$\w]+\.)([$\w]+\(\w+,\d+\))', r'\2', f1def)
code = tr_js(f1def)
f2s = set(re.findall(r'([$\w]+)\(\w+,\d+\)', f1def))
for f2 in f2s:
f2e = re.escape(f2)
f2def = re.search(r'[^$\w]%s:function\((\w+,\w+)\)(\{[^\{\}]+\})' % f2e, js)
if f2def:
f2def = 'function {}({}){}'.format(f2e, f2def.group(1), f2def.group(2))
else:
f2def = re.search(r'[^$\w]%s:function\((\w+)\)(\{[^\{\}]+\})' % f2e, js)
f2def = 'function {}({},b){}'.format(f2e, f2def.group(1), f2def.group(2))
f2 = re.sub(r'\$', '_dollar', f2)
code = code + 'global %s\n' % f2 + tr_js(f2def)
code = code + 'sig=%s(s)' % re.sub(r'\$', '_dollar', f1)
exec(code, globals(), locals())
return locals()['sig']
def get_url_from_vid(vid):
return 'http://youtu.be/{}'.format(vid)
def get_vid_from_url(url):
"""Extracts video ID from URL.
"""
return match1(url, r'youtu\.be/([^/]+)') or \
match1(url, r'youtube\.com/embed/([^/?]+)') or \
match1(url, r'youtube\.com/v/([^/?]+)') or \
parse_query_param(url, 'v') or \
parse_query_param(parse_query_param(url, 'u'), 'v')
def get_playlist_id_from_url(url):
"""Extracts playlist ID from URL.
"""
return parse_query_param(url, 'list') or \
parse_query_param(url, 'p')
def download_playlist_by_url(self, url, **kwargs):
self.url = url
playlist_id = self.__class__.get_playlist_id_from_url(self.url)
if playlist_id is None:
log.wtf('[Failed] Unsupported URL pattern.')
video_page = get_content('http://www.youtube.com/playlist?list=%s' % playlist_id)
from html.parser import HTMLParser
videos = sorted([HTMLParser().unescape(video)
for video in re.findall(r'<a href="(/watch\?[^"]+)"', video_page)
if parse_query_param(video, 'index')],
key=lambda video: parse_query_param(video, 'index'))
self.title = re.search(r'<meta name="title" content="([^"]+)"', video_page).group(1)
self.p_playlist()
for video in videos:
vid = parse_query_param(video, 'v')
index = parse_query_param(video, 'index')
self.__class__().download_by_url(self.__class__.get_url_from_vid(vid), index=index, **kwargs)
def prepare(self, **kwargs):
assert self.url or self.vid
if not self.vid and self.url:
self.vid = self.__class__.get_vid_from_url(self.url)
if self.vid is None:
self.download_playlist_by_url(self.url, **kwargs)
exit(0)
video_info = parse.parse_qs(get_content('http://www.youtube.com/get_video_info?video_id={}'.format(self.vid)))
if 'status' not in video_info:
log.wtf('[Failed] Unknown status.')
elif video_info['status'] == ['ok']:
if 'use_cipher_signature' not in video_info or video_info['use_cipher_signature'] == ['False']:
self.title = parse.unquote_plus(video_info['title'][0])
stream_list = video_info['url_encoded_fmt_stream_map'][0].split(',')
else:
# Parse video page instead
video_page = get_content('http://www.youtube.com/watch?v=%s' % self.vid)
ytplayer_config = json.loads(re.search('ytplayer.config\s*=\s*([^\n]+?});', video_page).group(1))
self.title = ytplayer_config['args']['title']
self.html5player = 'http:' + ytplayer_config['assets']['js']
stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')
elif video_info['status'] == ['fail']:
if video_info['errorcode'] == ['150']:
video_page = get_content('http://www.youtube.com/watch?v=%s' % self.vid)
ytplayer_config = json.loads(re.search('ytplayer.config\s*=\s*([^\n]+});ytplayer', video_page).group(1))
if 'title' in ytplayer_config['args']:
# 150 Restricted from playback on certain sites
# Parse video page instead
self.title = ytplayer_config['args']['title']
self.html5player = 'http:' + ytplayer_config['assets']['js']
stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')
else:
log.wtf('[Error] The uploader has not made this video available in your country.')
#self.title = re.search('<meta name="title" content="([^"]+)"', video_page).group(1)
#stream_list = []
elif video_info['errorcode'] == ['100']:
log.wtf('[Failed] This video does not exist.', exit_code=int(video_info['errorcode'][0]))
else:
log.wtf('[Failed] %s' % video_info['reason'][0], exit_code=int(video_info['errorcode'][0]))
else:
log.wtf('[Failed] Invalid status.')
for stream in stream_list:
metadata = parse.parse_qs(stream)
stream_itag = metadata['itag'][0]
self.streams[stream_itag] = {
'itag': metadata['itag'][0],
'url': metadata['url'][0],
'sig': metadata['sig'][0] if 'sig' in metadata else None,
's': metadata['s'][0] if 's' in metadata else None,
'quality': metadata['quality'][0],
'type': metadata['type'][0],
'mime': metadata['type'][0].split(';')[0],
'container': mime_to_container(metadata['type'][0].split(';')[0]),
}
def extract(self, **kwargs):
if not self.streams_sorted:
# No stream is available
return
if 'stream_id' in kwargs and kwargs['stream_id']:
# Extract the stream
stream_id = kwargs['stream_id']
if stream_id not in self.streams:
log.e('[Error] Invalid video format.')
log.e('Run \'-i\' command with no specific video format to view all available formats.')
exit(2)
else:
# Extract stream with the best quality
stream_id = self.streams_sorted[0]['itag']
src = self.streams[stream_id]['url']
if self.streams[stream_id]['sig'] is not None:
sig = self.streams[stream_id]['sig']
src += '&signature={}'.format(sig)
elif self.streams[stream_id]['s'] is not None:
s = self.streams[stream_id]['s']
js = get_content(self.html5player)
sig = self.__class__.decipher(js, s)
src += '&signature={}'.format(sig)
self.streams[stream_id]['src'] = [src]
self.streams[stream_id]['size'] = urls_size(self.streams[stream_id]['src'])
site = YouTube()
download = site.download_by_url
download_playlist = site.download_playlist_by_url
| |
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import time
import unittest
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import InvalidElementStateException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
def not_available_on_remote(func):
def testMethod(self):
print(self.driver)
if type(self.driver) == 'remote':
return lambda x: None
else:
return func(self)
return testMethod
def throwSERE(driver):
raise StaleElementReferenceException("test")
class WebDriverWaitTest(unittest.TestCase):
def testShouldExplicitlyWaitForASingleElement(self):
self._loadPage("dynamic")
add = self.driver.find_element_by_id("adder")
add.click()
WebDriverWait(self.driver, 3).until(EC.presence_of_element_located((By.ID, "box0"))) # All is well if this doesn't throw.
def testShouldStillFailToFindAnElementWithExplicitWait(self):
self._loadPage("dynamic")
try:
WebDriverWait(self.driver, 0.7).until(EC.presence_of_element_located((By.ID, "box0")))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
except Exception as e:
self.fail("Expected TimeoutException but got " + str(e))
def testShouldExplicitlyWaituntilAtLeastOneElementIsFoundWhenSearchingForMany(self):
self._loadPage("dynamic")
add = self.driver.find_element_by_id("adder")
add.click()
add.click()
elements = WebDriverWait(self.driver, 2).until(EC.presence_of_all_elements_located((By.CLASS_NAME, "redbox")))
self.assertTrue(len(elements) >= 1)
def testShouldFailToFindElementsWhenExplicitWaiting(self):
self._loadPage("dynamic")
with self.assertRaises(TimeoutException):
WebDriverWait(self.driver, 0.7).until(EC.presence_of_all_elements_located((By.CLASS_NAME, "redbox")))
def testShouldWaitUntilAtLeastOneVisibleElementsIsFoundWhenSearchingForMany(self):
self._loadPage("hidden_partially")
add_visible = self.driver.find_element_by_id("addVisible")
add_hidden = self.driver.find_element_by_id("addHidden")
add_visible.click()
add_visible.click()
add_hidden.click()
elements = WebDriverWait(self.driver, 2).until(EC.visibility_of_any_elements_located((By.CLASS_NAME, "redbox")))
self.assertTrue(len(elements) == 2)
def testShouldFailToFindVisibleElementsWhenExplicitWaiting(self):
self._loadPage("hidden_partially")
with self.assertRaises(TimeoutException):
WebDriverWait(self.driver, 0.7).until(EC.visibility_of_any_elements_located((By.CLASS_NAME, "redbox")))
def testShouldWaitOnlyAsLongAsTimeoutSpecifiedWhenImplicitWaitsAreSet(self):
self._loadPage("dynamic")
self.driver.implicitly_wait(0.5)
try:
start = time.time()
try:
WebDriverWait(self.driver, 1).until(EC.presence_of_element_located((By.ID, "box0")))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
self.assertTrue(time.time() - start < 1.5,
"Expected to take just over 1 second to execute, but took %f" %
(time.time() - start))
finally:
self.driver.implicitly_wait(0)
def testShouldWaitAtLeastOnce(self):
self._loadPage("simpleTest")
elements = WebDriverWait(self.driver, 0).until(lambda d: d.find_elements_by_tag_name('h1'))
self.assertTrue(len(elements) >= 1)
def testWaitUntilNotReturnsIfEvaluatesToFalse(self):
self.assertFalse(WebDriverWait(self.driver, 1).until_not(lambda d: False))
def testWaitShouldStillFailIfProduceIgnoredException(self):
ignored = (InvalidElementStateException, StaleElementReferenceException)
try:
WebDriverWait(self.driver, 1, 0.7, ignored_exceptions=ignored).until(throwSERE)
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
def testWaitShouldStillFailIfProduceChildOfIgnoredException(self):
ignored = (WebDriverException)
try:
WebDriverWait(self.driver, 1, 0.7, ignored_exceptions=ignored).until(throwSERE)
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
def testWaitUntilNotShouldNotFailIfProduceIgnoredException(self):
ignored = (InvalidElementStateException, StaleElementReferenceException)
self.assertTrue(WebDriverWait(self.driver, 1, 0.7, ignored_exceptions=ignored).until_not(throwSERE))
def testExpectedConditionTitleIs(self):
self._loadPage("blank")
WebDriverWait(self.driver, 1).until(EC.title_is("blank"))
self.driver.execute_script("setTimeout(function(){document.title='not blank'}, 200)")
WebDriverWait(self.driver, 1).until(EC.title_is("not blank"))
self.assertEqual(self.driver.title, 'not blank')
try:
WebDriverWait(self.driver, 0.7).until(EC.title_is("blank"))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
def testExpectedConditionTitleContains(self):
self._loadPage("blank")
self.driver.execute_script("setTimeout(function(){document.title='not blank'}, 200)")
WebDriverWait(self.driver, 1).until(EC.title_contains("not"))
self.assertEqual(self.driver.title, 'not blank')
try:
WebDriverWait(self.driver, 0.7).until(EC.title_contains("blanket"))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
def testExpectedConditionVisibilityOfElementLocated(self):
self._loadPage("javascriptPage")
try:
WebDriverWait(self.driver, 0.7).until(EC.visibility_of_element_located((By.ID, 'clickToHide')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
self.driver.find_element_by_id('clickToShow').click()
element = WebDriverWait(self.driver, 5).until(EC.visibility_of_element_located((By.ID, 'clickToHide')))
self.assertTrue(element.is_displayed())
def testExpectedConditionVisibilityOf(self):
self._loadPage("javascriptPage")
hidden = self.driver.find_element_by_id('clickToHide')
try:
WebDriverWait(self.driver, 0.7).until(EC.visibility_of(hidden))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
self.driver.find_element_by_id('clickToShow').click()
element = WebDriverWait(self.driver, 5).until(EC.visibility_of(hidden))
self.assertTrue(element.is_displayed())
def testExpectedConditionTextToBePresentInElement(self):
self._loadPage('booleanAttributes')
try:
WebDriverWait(self.driver, 0.7).until(EC.text_to_be_present_in_element((By.ID, 'unwrappable'), 'Expected'))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
self.driver.execute_script("setTimeout(function(){var el = document.getElementById('unwrappable'); el.textContent = el.innerText = 'Unwrappable Expected text'}, 200)")
WebDriverWait(self.driver, 1).until(EC.text_to_be_present_in_element((By.ID, 'unwrappable'), 'Expected'))
self.assertEqual('Unwrappable Expected text', self.driver.find_element_by_id('unwrappable').text)
def testExpectedConditionTextToBePresentInElementValue(self):
self._loadPage('booleanAttributes')
try:
WebDriverWait(self.driver, 1).until(EC.text_to_be_present_in_element_value((By.ID, 'inputRequired'), 'Expected'))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('inputRequired').value = 'Example Expected text'}, 200)")
WebDriverWait(self.driver, 1).until(EC.text_to_be_present_in_element_value((By.ID, 'inputRequired'), 'Expected'))
self.assertEqual('Example Expected text', self.driver.find_element_by_id('inputRequired').get_attribute('value'))
def testExpectedConditionFrameToBeAvailableAndSwitchToItByName(self):
self._loadPage("blank")
try:
WebDriverWait(self.driver, 1).until(EC.frame_to_be_available_and_switch_to_it('myFrame'))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
self.driver.execute_script("setTimeout(function(){var f = document.createElement('iframe'); f.id='myFrame'; f.src = '" + self._pageURL('iframeWithAlert') + "'; document.body.appendChild(f)}, 200)")
WebDriverWait(self.driver, 1).until(EC.frame_to_be_available_and_switch_to_it('myFrame'))
self.assertEqual('click me', self.driver.find_element_by_id('alertInFrame').text)
def testExpectedConditionFrameToBeAvailableAndSwitchToItByLocator(self):
self._loadPage("blank")
try:
WebDriverWait(self.driver, 1).until(EC.frame_to_be_available_and_switch_to_it((By.ID, 'myFrame')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
self.driver.execute_script("setTimeout(function(){var f = document.createElement('iframe'); f.id='myFrame'; f.src = '" + self._pageURL('iframeWithAlert') + "'; document.body.appendChild(f)}, 200)")
WebDriverWait(self.driver, 1).until(EC.frame_to_be_available_and_switch_to_it((By.ID, 'myFrame')))
self.assertEqual('click me', self.driver.find_element_by_id('alertInFrame').text)
def testExpectedConditionInvisiblityOfElementLocated(self):
self._loadPage("javascriptPage")
self.driver.execute_script("delayedShowHide(0, true)")
try:
WebDriverWait(self.driver, 0.7).until(EC.invisibility_of_element_located((By.ID, 'clickToHide')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
self.driver.execute_script("delayedShowHide(200, false)")
element = WebDriverWait(self.driver, 0.7).until(EC.invisibility_of_element_located((By.ID, 'clickToHide')))
self.assertFalse(element.is_displayed())
def testExpectedConditionElementToBeClickable(self):
self._loadPage("javascriptPage")
try:
WebDriverWait(self.driver, 0.7).until(EC.element_to_be_clickable((By.ID, 'clickToHide')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
self.driver.execute_script("delayedShowHide(200, true)")
WebDriverWait(self.driver, 0.7).until(EC.element_to_be_clickable((By.ID, 'clickToHide')))
element = self.driver.find_element_by_id('clickToHide')
element.click()
WebDriverWait(self.driver, 3.5).until(EC.invisibility_of_element_located((By.ID, 'clickToHide')))
self.assertFalse(element.is_displayed())
def testExpectedConditionStalenessOf(self):
self._loadPage('dynamicallyModifiedPage')
element = self.driver.find_element_by_id('element-to-remove')
try:
WebDriverWait(self.driver, 0.7).until(EC.staleness_of(element))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
self.driver.find_element_by_id('buttonDelete').click()
self.assertEqual('element', element.text)
WebDriverWait(self.driver, 0.7).until(EC.staleness_of(element))
try:
element.text
self.fail("Expected StaleReferenceException to have been thrown")
except StaleElementReferenceException:
pass
def testExpectedConditionElementToBeSelected(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id('checky')
try:
WebDriverWait(self.driver, 0.7).until(EC.element_to_be_selected(element))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('checky').checked = true}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.element_to_be_selected(element))
self.assertTrue(element.is_selected())
def testExpectedConditionElementLocatedToBeSelected(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id('checky')
try:
WebDriverWait(self.driver, 0.7).until(EC.element_located_to_be_selected((By.ID, 'checky')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('checky').checked = true}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.element_located_to_be_selected((By.ID, 'checky')))
self.assertTrue(element.is_selected())
def testExpectedConditionElementSelectionStateToBe(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id('checky')
WebDriverWait(self.driver, 0.7).until(EC.element_selection_state_to_be(element, False))
self.assertFalse(element.is_selected())
try:
WebDriverWait(self.driver, 0.7).until(EC.element_selection_state_to_be(element, True))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('checky').checked = true}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.element_selection_state_to_be(element, True))
self.assertTrue(element.is_selected())
def testExpectedConditionElementLocatedSelectionStateToBe(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id('checky')
WebDriverWait(self.driver, 0.7).until(EC.element_located_selection_state_to_be((By.ID, 'checky'), False))
self.assertFalse(element.is_selected())
try:
WebDriverWait(self.driver, 0.7).until(EC.element_located_selection_state_to_be((By.ID, 'checky'), True))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('checky').checked = true}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.element_located_selection_state_to_be((By.ID, 'checky'), True))
self.assertTrue(element.is_selected())
def testExpectedConditionAlertIsPresent(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage('blank')
try:
WebDriverWait(self.driver, 0.7).until(EC.alert_is_present())
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException:
pass
self.driver.execute_script("setTimeout(function(){alert('alerty')}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.alert_is_present())
alert = self.driver.switch_to.alert
self.assertEqual('alerty', alert.text)
alert.dismiss()
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| |
"""
Exception classes
=================
"""
from __future__ import annotations
import logging
import sys
from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Tuple
from montreal_forced_aligner.helper import TerminalPrinter, comma_join
if TYPE_CHECKING:
from montreal_forced_aligner.dictionary.pronunciation import PronunciationDictionaryMixin
from montreal_forced_aligner.models import G2PModel
from montreal_forced_aligner.textgrid import CtmInterval
__all__ = [
"MFAError",
"SoxError",
"G2PError",
"PyniniAlignmentError",
"ConfigError",
"LMError",
"LanguageModelNotFoundError",
"ModelExtensionError",
"ThirdpartyError",
"TrainerError",
"ModelError",
"CorpusError",
"ModelLoadError",
"CorpusReadError",
"ArgumentError",
"AlignerError",
"AlignmentError",
"AlignmentExportError",
"NoSuccessfulAlignments",
"KaldiProcessingError",
"TextParseError",
"TextGridParseError",
"DictionaryError",
"NoDefaultSpeakerDictionaryError",
"DictionaryPathError",
"DictionaryFileError",
"FileArgumentNotFoundError",
"PretrainedModelNotFoundError",
"MultipleModelTypesFoundError",
"ModelTypeNotSupportedError",
"PronunciationAcousticMismatchError",
"PronunciationOrthographyMismatchError",
]
class MFAError(Exception):
"""
Base exception class
"""
def __init__(self, base_error_message: str, *args, **kwargs):
self.printer = TerminalPrinter()
self.message_lines: List[str] = [base_error_message]
@property
def message(self) -> str:
return "\n".join(self.printer.format_info_lines(self.message_lines))
def __str__(self) -> str:
"""Output the error"""
return "\n".join(
self.printer.format_info_lines(
[self.printer.error_text(type(self).__name__) + f": {self.message}"]
)
)
class PlatformError(MFAError):
"""
Exception class for platform compatibility issues
Parameters
----------
functionality_name: str
Functionality not available on the current platform
"""
def __init__(self, functionality_name):
super().__init__("")
self.message_lines = [
f"Functionality for {self.printer.emphasized_text(functionality_name)} is not available on {self.printer.error_text(sys.platform)}."
]
if sys.platform == "win32":
self.message_lines.append("")
self.message_lines.append(
f" If you'd like to use {self.printer.emphasized_text(functionality_name)} on Windows, please follow the MFA installation "
f"instructions for the Windows Subsystem for Linux (WSL)."
)
class ThirdpartyError(MFAError):
"""
Exception class for errors in third party binary (usually Kaldi or OpenFst)
Parameters
----------
binary_name: str
Name of third party binary
open_fst: bool, optional
Flag for the error having to do with OpenFst
open_blas: bool, optional
Flag for the error having to do with the BLAS library
libc: bool, optional
Flag for the error having to do with the system libraries
sox: bool, optional
Flag for the error having to do with SoX
"""
def __init__(self, binary_name, open_fst=False, open_blas=False, libc=False, sox=False):
super().__init__("")
self.message_lines = [f"Could not find '{self.printer.error_text(binary_name)}'."]
self.message_lines.append(
"Please ensure that you have installed MFA's conda dependencies and are in the correct environment."
)
if open_fst:
self.message_lines.append(
f"Please ensure that you are in an environment that has the {self.printer.emphasized_text('openfst')} conda package installed, "
f"or that the {self.printer.emphasized_text('openfst')} binaries are on your path if you compiled them yourself."
)
elif open_blas:
self.message_lines.append(
f"Try installing {self.printer.emphasized_text('openblas')} via system package manager or verify it's on your system path?"
)
elif libc:
self.message_lines.append(
f"You likely have a different version of {self.printer.emphasized_text('glibc')} than the packages binaries use. "
f"Try compiling {self.printer.emphasized_text('Kaldi')} on your machine and collecting the binaries via the "
f"{self.printer.pass_text('mfa thirdparty kaldi')} command."
)
elif sox:
self.message_lines = []
self.message_lines.append(
f"Your version of {self.printer.emphasized_text('sox')} does not support the file format in your corpus. "
f"Try installing another version of {self.printer.emphasized_text('sox')} with support for {self.printer.error_text(binary_name)}."
)
# Model Errors
class ModelError(MFAError):
"""
Exception class related to MFA model archives
"""
pass
class ModelLoadError(ModelError):
"""
Exception during loading of a model archive
Parameters
----------
path: str
Path of the model archive
"""
def __init__(self, path: str):
super().__init__("")
self.message_lines = [
f"The archive {self.printer.error_text(path)} could not be parsed as an MFA model"
]
# Dictionary Errors
class DictionaryError(MFAError):
"""
Exception class for errors in creating dictionary objects
"""
pass
class NoDefaultSpeakerDictionaryError(DictionaryError):
"""
Exception class for errors in creating MultispeakerDictionary objects
"""
def __init__(self):
super().__init__("")
self.message_lines = [f'No "{self.printer.error_text("default")}" dictionary was found.']
class DictionaryPathError(DictionaryError):
"""
Exception class for errors in locating paths for dictionary objects
Parameters
----------
input_path: str
Path of the pronunciation dictionary
"""
def __init__(self, input_path: str):
super().__init__("")
self.message_lines = [
f"The specified path for the dictionary ({self.printer.error_text(input_path)}) was not found."
]
class DictionaryFileError(DictionaryError):
"""
Exception class for file type being wrong for DictionaryModel objects
Parameters
----------
input_path: str
Path of the pronunciation dictionary
"""
def __init__(self, input_path: str):
super().__init__("")
self.message_lines = [
f"The specified path for the dictionary ({self.printer.error_text(input_path)}) is not a file."
]
# Corpus Errors
class CorpusError(MFAError):
"""
Class for errors in creating Corpus objects
"""
pass
class CorpusReadError(CorpusError):
"""
Class for errors in reading a file
Parameters
----------
file_name: str
File name that was not readable
"""
def __init__(self, file_name: str):
super().__init__("")
self.message_lines = [f"There was an error reading {self.printer.error_text(file_name)}."]
class TextParseError(CorpusReadError):
"""
Class for errors parsing lab and txt files
Parameters
----------
file_name: str
File name that had the error
"""
def __init__(self, file_name: str):
super().__init__("")
self.message_lines = [
f"There was an error decoding {self.printer.error_text(file_name)}, "
f"maybe try resaving it as utf8?"
]
class TextGridParseError(CorpusReadError):
"""
Class capturing TextGrid reading errors
Parameters
----------
file_name: str
File name that had the error
error: str
Error in TextGrid file
"""
def __init__(self, file_name: str, error: str):
super().__init__("")
self.file_name = file_name
self.error = error
self.message_lines.extend(
[
f"Reading {self.printer.emphasized_text(file_name)} has the following error:",
"",
"",
self.error,
]
)
class SoxError(CorpusReadError):
"""
Class for errors in calling and finding Sox
"""
pass
# Aligner Errors
class AlignerError(MFAError):
"""
Class for errors during alignment
"""
pass
class AlignmentError(MFAError):
"""
Class for errors during alignment
Parameters
----------
error_logs: list[str]
List of Kaldi log files with errors
"""
def __init__(self, error_logs: List[str]):
super().__init__("")
self.message_lines = [
f"There were {self.printer.error_text(len(error_logs))} job(s) with errors. "
f"For more information, please see:",
"",
"",
]
for path in error_logs:
self.message_lines.append(self.printer.error_text(path))
class AlignmentExportError(AlignmentError):
"""
Class for errors in exporting alignments
Parameters
----------
error_dict: dict[tuple[str, int], str]
Error dictionary mapping export stage and job to the error encountered
"""
def __init__(self, error_dict: Dict[Tuple[str, int], str]):
MFAError.__init__(self, "Error was encountered in processing CTMs:")
self.message_lines.append("")
self.message_lines.append("")
for key, error in error_dict.items():
self.message_lines.extend([f"{key}:", error])
class CtmError(AlignmentError):
"""
Class for errors in creating CTM intervals
Parameters
----------
ctm: CtmInterval
CTM interval that was not parsed correctly
"""
def __init__(self, ctm: CtmInterval):
MFAError.__init__(self, f"Error was encountered in processing CTM interval: {ctm}")
class NoSuccessfulAlignments(AlignerError):
"""
Class for errors where nothing could be aligned
"""
pass
class PronunciationAcousticMismatchError(AlignerError):
"""
Exception class for when an acoustic model and pronunciation dictionary have different phone sets
Parameters
----------
missing_phones: Collection[str]
Phones that are not in the acoustic model
"""
def __init__(self, missing_phones: Collection[str]):
super().__init__("There were phones in the dictionary that do not have acoustic models: ")
missing_phones = [f"{self.printer.error_text(x)}" for x in sorted(missing_phones)]
self.message_lines.append(comma_join(missing_phones))
class PronunciationOrthographyMismatchError(AlignerError):
"""
Exception class for missing graphemes in a G2P model
Parameters
----------
g2p_model: :class:`~montreal_forced_aligner.models.G2PModel`
Specified G2P model
dictionary: :class:`~montreal_forced_aligner.dictionary.pronunciation.PronunciationDictionaryMixin`
Specified dictionary
"""
def __init__(self, g2p_model: G2PModel, dictionary: PronunciationDictionaryMixin):
super().__init__(
"There were graphemes in the corpus that are not covered by the G2P model:"
)
missing_graphs = dictionary.graphemes - set(g2p_model.meta["graphemes"])
missing_graphs = [f"{self.printer.error_text(x)}" for x in sorted(missing_graphs)]
self.message_lines.append(comma_join(missing_graphs))
# Command line exceptions
class ArgumentError(MFAError):
"""
Exception class for errors parsing command line arguments
"""
pass
class FileArgumentNotFoundError(ArgumentError):
"""
Exception class for not finding a specified file
Parameters
----------
path: str
Path not found
"""
def __init__(self, path):
super().__init__("")
self.message_lines = [f'Could not find "{self.printer.error_text(path)}".']
class PretrainedModelNotFoundError(ArgumentError):
"""
Exception class for not finding a specified pretrained model
Parameters
----------
name: str
Model name
model_type: str, optional
Model type searched
available: list[str], optional
List of models that were found
"""
def __init__(
self, name: str, model_type: Optional[str] = None, available: Optional[List[str]] = None
):
super().__init__("")
extra = ""
if model_type:
extra += f" for {model_type}"
self.message_lines = [
f'Could not find a model named "{self.printer.error_text(name)}"{extra}.'
]
if available:
available = [f"{self.printer.pass_text(x)}" for x in available]
self.message_lines.append(f"Available: {comma_join(available)}.")
class MultipleModelTypesFoundError(ArgumentError):
"""
Exception class for finding multiple model types that could map to a given name
Parameters
----------
name: str
Model name
possible_model_types: list[str]
List of model types that have a model with the given name
"""
def __init__(self, name: str, possible_model_types: List[str]):
super().__init__("")
self.message_lines = [f'Found multiple model types for "{self.printer.error_text(name)}":']
possible_model_types = [f"{self.printer.error_text(x)}" for x in possible_model_types]
self.message_lines.extend(
[", ".join(possible_model_types), "Please specify a model type to inspect."]
)
class ModelExtensionError(ArgumentError):
"""
Exception class for a model not having the correct extension
Parameters
----------
name: str
Model name
model_type: str
Model type
extensions: list[str]
Extensions that the model supports
"""
def __init__(self, name: str, model_type: str, extensions: List[str]):
super().__init__("")
extra = ""
if model_type:
extra += f" for {model_type}"
self.message_lines = [
f'The path "{self.printer.error_text(name)}" does not have the correct extensions{extra}.'
]
if extensions:
available = [f"{self.printer.pass_text(x)}" for x in extensions]
self.message_lines.append(f" Possible extensions: {comma_join(available)}.")
class ModelTypeNotSupportedError(ArgumentError):
"""
Exception class for a model type not being supported
Parameters
----------
model_type: str
Model type
model_types: list[str]
List of supported model types
"""
def __init__(self, model_type, model_types):
super().__init__("")
self.message_lines = [
f'The model type "{self.printer.error_text(model_type)}" is not supported.'
]
if model_types:
model_types = [f"{self.printer.pass_text(x)}" for x in sorted(model_types)]
self.message_lines.append(f" Possible model types: {comma_join(model_types)}.")
class ConfigError(MFAError):
"""
Exception class for errors in configuration
"""
pass
class RootDirectoryError(ConfigError):
"""
Exception class for errors using the MFA_ROOT_DIR
"""
def __init__(self, temporary_directory, variable):
super().__init__("")
self.message_lines = [
f"Could not create a root MFA temporary directory (tried {self.printer.error_text(temporary_directory)}. ",
f"Please specify a write-able directory via the {self.printer.emphasized_text(variable)} environment variable.",
]
class TrainerError(MFAError):
"""
Exception class for errors in trainers
"""
pass
class G2PError(MFAError):
"""
Exception class for errors in G2P
"""
pass
class PyniniAlignmentError(G2PError):
"""
Exception class for errors in alignment for Pynini training
"""
def __init__(self, error_dict: Dict[str, Exception]):
super().__init__("The following Pynini alignment jobs encountered errors:")
self.message_lines.extend(["", ""])
for k, v in error_dict.items():
self.message_lines.append(self.printer.indent_string + self.printer.error_text(k))
self.message_lines.append(
self.printer.indent_string + self.printer.emphasized_text(str(v))
)
class PyniniGenerationError(G2PError):
"""
Exception class for errors generating pronunciations with Pynini
"""
def __init__(self, error_dict: Dict[str, Exception]):
super().__init__("The following words had errors in running G2P:")
self.message_lines.extend(["", ""])
for k, v in error_dict.items():
self.message_lines.append(self.printer.indent_string + self.printer.error_text(k))
self.message_lines.append(
self.printer.indent_string + self.printer.emphasized_text(str(v))
)
class LMError(MFAError):
"""
Exception class for errors in language models
"""
pass
class LanguageModelNotFoundError(LMError):
"""
Exception class for a language model not being found
"""
def __init__(self):
super().__init__("Could not find a suitable language model.")
class KaldiProcessingError(MFAError):
"""
Exception class for when a Kaldi binary has an exception
Parameters
----------
error_logs: list[str]
List of Kaldi logs that had errors
log_file: str, optional
Overall log file to find more information
"""
def __init__(self, error_logs: List[str], log_file: Optional[str] = None):
super().__init__(
f"There were {len(error_logs)} job(s) with errors when running Kaldi binaries."
)
if log_file is not None:
self.message_lines.append(
f" For more details, please check {self.printer.error_text(log_file)}"
)
self.error_logs = error_logs
self.log_file = log_file
def update_log_file(self, logger: logging.Logger) -> None:
"""
Update the log file output
Parameters
----------
logger: logging.Logger
Logger
"""
if logger.handlers:
self.log_file = logger.handlers[0].baseFilename
self.message_lines = [
f"There were {len(self.error_logs)} job(s) with errors when running Kaldi binaries."
]
if self.log_file is not None:
self.message_lines.append(
f" For more details, please check {self.printer.error_text(self.log_file)}"
)
| |
"""
Geckoboard decorators.
"""
from __future__ import absolute_import
from collections import OrderedDict
from functools import wraps
from hashlib import md5
from xml.dom.minidom import Document
import base64
import json
from Crypto import Random
from Crypto.Cipher import AES
from django.conf import settings
from django.http import HttpResponse, HttpResponseForbidden
from django.utils.decorators import available_attrs
from django.views.decorators.csrf import csrf_exempt
import six
TEXT_NONE = 0
TEXT_INFO = 2
TEXT_WARN = 1
class WidgetDecorator(object):
"""
Geckoboard widget decorator.
The decorated view must return a data structure suitable for
serialization to XML or JSON for Geckoboard. See the Geckoboard
API docs or the source of extending classes for details.
If the ``GECKOBOARD_API_KEY`` setting is used, the request must
contain the correct API key, or a 403 Forbidden response is
returned.
If the ``encrypted` argument is set to True, then the data will be
encrypted using ``GECKOBOARD_PASSWORD`` (JSON only).
"""
def __new__(cls, *args, **kwargs):
obj = object.__new__(cls)
obj._encrypted = None
if 'encrypted' in kwargs:
obj._encrypted = kwargs.pop('encrypted')
obj._format = None
if 'format' in kwargs:
obj._format = kwargs.pop('format')
obj.data = kwargs
try:
return obj(args[0])
except IndexError:
return obj
def __call__(self, view_func):
def _wrapped_view(request, *args, **kwargs):
if not _is_api_key_correct(request):
return HttpResponseForbidden("Geckoboard API key incorrect")
view_result = view_func(request, *args, **kwargs)
data = self._convert_view_result(view_result)
try:
self.data.update(data)
except ValueError:
self.data = data
content, content_type = _render(request, self.data, self._encrypted, self._format)
return HttpResponse(content, content_type=content_type)
wrapper = wraps(view_func, assigned=available_attrs(view_func))
return csrf_exempt(wrapper(_wrapped_view))
def _convert_view_result(self, data):
# Extending classes do view result mangling here.
return data
widget = WidgetDecorator
class NumberWidgetDecorator(WidgetDecorator):
"""
Geckoboard Number widget decorator.
The decorated view must return a tuple `(current, [previous])`, where
`current` is the current value and `previous` is the previous value
of the measured quantity..
"""
def _convert_view_result(self, result):
if not isinstance(result, (tuple, list)):
result = [result]
result = list(result)
for k, v in enumerate(result):
result[k] = v if isinstance(v, dict) else {'value': v}
return {'item': result}
number_widget = NumberWidgetDecorator
class RAGWidgetDecorator(WidgetDecorator):
"""
Geckoboard Red-Amber-Green (RAG) widget decorator.
The decorated view must return a tuple with three tuples `(value,
[text])`. The `value` parameters are the numbers shown in red,
amber and green (in that order). The `text` parameters are optional
and will be displayed next to the respective values in the
dashboard.
"""
def _convert_view_result(self, result):
items = []
for elem in result:
if not isinstance(elem, (tuple, list)):
elem = [elem]
item = OrderedDict()
if elem[0] is None:
item['value'] = ''
else:
item['value'] = elem[0]
if len(elem) > 1:
item['text'] = elem[1]
items.append(item)
return {'item': items}
rag_widget = RAGWidgetDecorator
class TextWidgetDecorator(WidgetDecorator):
"""
Geckoboard Text widget decorator.
The decorated view must return a list of tuples `(message, [type])`.
The `message` parameters are strings that will be shown in the
widget. The `type` parameters are optional and tell Geckoboard how
to annotate the messages. Use ``TEXT_INFO`` for informational
messages, ``TEXT_WARN`` for for warnings and ``TEXT_NONE`` for plain
text (the default).
"""
def _convert_view_result(self, result):
items = []
if not isinstance(result, (tuple, list)):
result = [result]
for elem in result:
if not isinstance(elem, (tuple, list)):
elem = [elem]
item = OrderedDict()
item['text'] = elem[0]
if len(elem) > 1 and elem[1] is not None:
item['type'] = elem[1]
else:
item['type'] = TEXT_NONE
items.append(item)
return {'item': items}
text_widget = TextWidgetDecorator
class PieChartWidgetDecorator(WidgetDecorator):
"""
Geckoboard Pie chart decorator.
The decorated view must return a list of tuples `(value, label,
color)`. The color parameter is a string 'RRGGBB[TT]' representing
red, green, blue and optionally transparency.
"""
def _convert_view_result(self, result):
items = []
for elem in result:
if not isinstance(elem, (tuple, list)):
elem = [elem]
item = OrderedDict()
item['value'] = elem[0]
if len(elem) > 1:
item['label'] = elem[1]
if len(elem) > 2:
item['colour'] = elem[2]
items.append(item)
return {'item': items}
pie_chart = PieChartWidgetDecorator
class LineChartWidgetDecorator(WidgetDecorator):
"""
Geckoboard Line chart decorator.
The decorated view must return a tuple `(values, x_axis, y_axis,
[color])`. The `values` parameter is a list of data points. The
`x-axis` parameter is a label string or a list of strings, that will
be placed on the X-axis. The `y-axis` parameter works similarly for
the Y-axis. If there are more than one axis label, they are placed
evenly along the axis. The optional `color` parameter is a string
``'RRGGBB[TT]'`` representing red, green, blue and optionally
transparency.
"""
def _convert_view_result(self, result):
data = OrderedDict()
data['item'] = list(result[0])
data['settings'] = OrderedDict()
if len(result) > 1:
x_axis = result[1]
if x_axis is None:
x_axis = ''
if not isinstance(x_axis, (tuple, list)):
x_axis = [x_axis]
data['settings']['axisx'] = x_axis
if len(result) > 2:
y_axis = result[2]
if y_axis is None:
y_axis = ''
if not isinstance(y_axis, (tuple, list)):
y_axis = [y_axis]
data['settings']['axisy'] = y_axis
if len(result) > 3:
data['settings']['colour'] = result[3]
return data
line_chart = LineChartWidgetDecorator
class GeckOMeterWidgetDecorator(WidgetDecorator):
"""
Geckoboard Geck-O-Meter decorator.
The decorated view must return a tuple `(value, min, max)`. The
`value` parameter represents the current value. The `min` and `max`
parameters represent the minimum and maximum value respectively.
They are either a value, or a tuple `(value, text)`. If used, the
`text` parameter will be displayed next to the minimum or maximum
value.
"""
def _convert_view_result(self, result):
value, min, max = result
data = OrderedDict()
data['item'] = value
data['max'] = OrderedDict()
data['min'] = OrderedDict()
if not isinstance(max, (tuple, list)):
max = [max]
data['max']['value'] = max[0]
if len(max) > 1:
data['max']['text'] = max[1]
if not isinstance(min, (tuple, list)):
min = [min]
data['min']['value'] = min[0]
if len(min) > 1:
data['min']['text'] = min[1]
return data
geck_o_meter = GeckOMeterWidgetDecorator
class FunnelWidgetDecorator(WidgetDecorator):
"""
Geckoboard Funnel decorator.
The decorated view must return a dictionary with at least an `items`
entry: `{'items': [(100, '100 %'), (50, '50 %')]}`.
Optional keys are:
type: 'standard' (default) or 'reverse'. Determines the
order of the colours.
percentage: 'show' (default) or 'hide'. Determines whether or
not the percentage value is shown.
sort: `False` (default) or `True`. Sort the entries by
value or not.
"""
def _convert_view_result(self, result):
data = OrderedDict()
items = result.get('items', [])
# sort the items in order if so desired
if result.get('sort'):
items.sort(reverse=True)
data["item"] = [{"value": k, "label": v} for k, v in items]
data["type"] = result.get('type', 'standard')
data["percentage"] = result.get('percentage', 'show')
return data
funnel = FunnelWidgetDecorator
class BulletWidgetDecorator(WidgetDecorator):
"""
See http://support.geckoboard.com/entries/274940-custom-chart-widget-type-definitions
for more information.
The decorated method must return a dictionary containing these keys:
Required keys:
label: Main label, eg. "Revenue 2011 YTD".
axis_points: Points on the axis, eg. [0, 200, 400, 600, 800, 1000].
current: Current value range, eg. 500 or [100, 500]. A singleton
500 is internally converted to [0, 500].
comparative: Comparative value, eg. 600.
Optional keys:
orientation: One of 'horizontal' or 'vertical'. Defaults to horizontal.
sublabel: Appears below main label.
red: Red start and end, eg. [0,100]. Defaults are calculated
from axis_points.
amber: Amber start and end, eg. [0,100]. Defaults are calculated
from axis_points.
green: Green start and end, eg. [0,100]. Defaults are calculated
from axis_points.
projected: Projected value range, eg. 900 or [100, 900]. A singleton
900 is internally converted to [0, 900].
auto_scale: If true then values will be scaled down if they
do not fit into Geckoboard's UI, eg. a value of 1100
is represented as 1.1. If scaling takes place the sublabel
is suffixed with that information. Default is true.
"""
def _convert_view_result(self, result):
# Check required keys. We do not do type checking since this level of
# competence is assumed.
for key in ('label', 'axis_points', 'current', 'comparative'):
if key not in result:
raise RuntimeError("Key %s is required" % key)
# Handle singleton current and projected
current = result['current']
projected = result.get('projected', None)
if not isinstance(current, (list, tuple)):
current = [0, current]
if (projected is not None) and not isinstance(projected, (list, tuple)):
projected = [0, projected]
# If red, amber and green are not *all* supplied calculate defaults
axis_points = result['axis_points']
red = result.get('red', None)
amber = result.get('amber', None)
green = result.get('green', None)
if (red is None) or (amber is None) or (green is None):
if axis_points:
max_point = max(axis_points)
min_point = min(axis_points)
third = (max_point - min_point) // 3
red = (min_point, min_point + third - 1)
amber = (min_point + third, max_point - third - 1)
green = (max_point - third, max_point)
else:
red = amber = green = (0, 0)
# Scan axis points for largest value and scale to avoid overflow in
# Geckoboard's UI.
auto_scale = result.get('auto_scale', True)
if auto_scale and axis_points:
scale_label_map = {1000000000: 'billions', 1000000: 'millions',
1000: 'thousands'}
scale = 1
value = max(axis_points)
for n in (1000000000, 1000000, 1000):
if value >= n:
scale = n
break
# Little fixedpoint helper.
# todo: use a fixedpoint library
def scaler(value, scale):
return float('%.2f' % (value*1.0 / scale))
# Apply scale to all values
if scale > 1:
axis_points = [scaler(v, scale) for v in axis_points]
current = (scaler(current[0], scale), scaler(current[1], scale))
if projected is not None:
projected = (scaler(projected[0], scale),
scaler(projected[1], scale))
red = (scaler(red[0], scale), scaler(red[1], scale))
amber = (scaler(amber[0], scale), scaler(amber[1], scale))
green = (scaler(green[0], scale), scaler(green[1], scale))
result['comparative'] = scaler(result['comparative'], scale)
# Suffix sublabel
sublabel = result.get('sublabel', '')
if sublabel:
result['sublabel'] = '%s (%s)' % (sublabel,
scale_label_map[scale])
else:
result['sublabel'] = scale_label_map[scale].capitalize()
# Assemble structure
data = dict(
orientation=result.get('orientation', 'horizontal'),
item=dict(
label=result['label'],
axis=dict(point=axis_points),
range=dict(
red=dict(start=red[0], end=red[1]),
amber=dict(start=amber[0], end=amber[1]),
green=dict(start=green[0], end=green[1])
),
measure=dict(current=dict(start=current[0], end=current[1])),
comparative=dict(point=result['comparative'])
)
)
# Add optional items
if 'sublabel' in result:
data['item']['sublabel'] = result['sublabel']
if projected is not None:
data['item']['measure']['projected'] = dict(start=projected[0],
end=projected[1])
return data
bullet = BulletWidgetDecorator
def _is_api_key_correct(request):
"""Return whether the Geckoboard API key on the request is correct."""
api_key = getattr(settings, 'GECKOBOARD_API_KEY', None)
if api_key is None:
return True
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) == 2:
if auth[0].lower() == b'basic':
request_key = base64.b64decode(auth[1]).split(b':')[0]
return request_key == api_key
return False
def _derive_key_and_iv(password, salt, key_length, iv_length):
d = d_i = b''
while len(d) < key_length + iv_length:
d_i = md5(d_i + password + salt).digest()
d += d_i
return d[:key_length], d[key_length:key_length+iv_length]
def _encrypt(data):
"""Equivalent to OpenSSL using 256 bit AES in CBC mode"""
BS = AES.block_size
def pad(s):
n = BS - len(s) % BS
char = chr(n).encode('utf8')
return s + n * char
password = settings.GECKOBOARD_PASSWORD
salt = Random.new().read(BS - len('Salted__'))
key, iv = _derive_key_and_iv(password, salt, 32, BS)
cipher = AES.new(key, AES.MODE_CBC, iv)
encrypted = b'Salted__' + salt + cipher.encrypt(pad(data))
return base64.b64encode(encrypted)
def _render(request, data, encrypted, format=None):
"""
Render the data to Geckoboard. If the `format` parameter is passed
to the widget it defines the output format. Otherwise the output
format is based on the `format` request parameter.
A `format` paramater of ``json`` or ``2`` renders JSON output, any
other value renders XML.
"""
if not format:
format = request.POST.get('format', '')
if not format:
format = request.GET.get('format', '')
if format == 'json' or format == '2':
return _render_json(data, encrypted)
else:
return _render_xml(data, encrypted)
def _render_json(data, encrypted=False):
data_json = json.dumps(data).encode('utf8')
if encrypted:
data_json = _encrypt(data_json)
return data_json, 'application/json'
def _render_xml(data, encrypted=False):
if encrypted:
raise ValueError("encryption requested for XML output but unsupported")
doc = Document()
root = doc.createElement('root')
doc.appendChild(root)
_build_xml(doc, root, data)
return doc.toxml(), 'application/xml'
def _build_xml(doc, parent, data):
if isinstance(data, (tuple, list)):
_build_list_xml(doc, parent, data)
elif isinstance(data, dict):
_build_dict_xml(doc, parent, data)
else:
_build_str_xml(doc, parent, data)
def _build_str_xml(doc, parent, data):
parent.appendChild(doc.createTextNode(six.text_type(data)))
def _build_list_xml(doc, parent, data):
for item in data:
_build_xml(doc, parent, item)
def _build_dict_xml(doc, parent, data):
tags = sorted(data.keys()) # order tags testing ease
for tag in tags:
item = data[tag]
if isinstance(item, (list, tuple)):
for subitem in item:
elem = doc.createElement(tag)
_build_xml(doc, elem, subitem)
parent.appendChild(elem)
else:
elem = doc.createElement(tag)
_build_xml(doc, elem, item)
parent.appendChild(elem)
class GeckoboardException(Exception):
"""
Represents an error with the Geckoboard decorators.
"""
| |
"""Read genome build configurations from Galaxy *.loc and bcbio-nextgen resource files.
"""
from six.moves import configparser
import glob
import os
import sys
from xml.etree import ElementTree
import six
import toolz as tz
import yaml
from bcbio import utils
from bcbio.cwl import cwlutils
from bcbio.distributed import objectstore
from bcbio.log import logger
from bcbio.ngsalign import star
from bcbio.pipeline import alignment
from bcbio.provenance import do
from bcbio.rnaseq import gtf
# ## bcbio-nextgen genome resource files
def get_resources(genome, ref_file, data):
"""Retrieve genome information from a genome-references.yaml file.
"""
base_dir = os.path.normpath(os.path.dirname(ref_file))
resource_file = os.path.join(base_dir, "%s-resources.yaml" % genome.replace("-test", ""))
if not os.path.exists(resource_file):
raise IOError("Did not find resource file for %s: %s\n"
"To update bcbio_nextgen.py with genome resources for standard builds, run:\n"
"bcbio_nextgen.py upgrade -u skip"
% (genome, resource_file))
with open(resource_file) as in_handle:
resources = yaml.safe_load(in_handle)
def resource_file_path(x):
if isinstance(x, six.string_types) and os.path.exists(os.path.join(base_dir, x)):
return os.path.normpath(os.path.join(base_dir, x))
return x
cleaned = utils.dictapply(resources, resource_file_path)
return ensure_annotations(cleaned, data)
def add_required_resources(resources):
"""Add default or empty values for required resources referenced in CWL
"""
required = [["variation", "cosmic"], ["variation", "clinvar"], ["variation", "dbsnp"],
["variation", "lcr"], ["variation", "polyx"],
["variation", "encode_blacklist"], ["variation", "gc_profile"],
["variation", "germline_het_pon"],
["variation", "train_hapmap"], ["variation", "train_indels"],
["variation", "editing"], ["variation", "exac"], ["variation", "esp"],
["variation", "gnomad_exome"],
["variation", "1000g"], ["aliases", "human"]]
for key in required:
if not tz.get_in(key, resources):
resources = tz.update_in(resources, key, lambda x: None)
return resources
def ensure_annotations(resources, data):
"""Prepare any potentially missing annotations for downstream processing in a local directory.
"""
transcript_gff = tz.get_in(["rnaseq", "transcripts"], resources)
if transcript_gff and utils.file_exists(transcript_gff):
out_dir = os.path.join(tz.get_in(["dirs", "work"], data),
"inputs", "data", "annotations")
resources["rnaseq"]["gene_bed"] = gtf.gtf_to_bed(transcript_gff, out_dir)
return resources
# ## Utilities
def abs_file_paths(xs, base_dir=None, ignore_keys=None, fileonly_keys=None, cur_key=None,
do_download=True):
"""Normalize any file paths found in a subdirectory of configuration input.
base_dir -- directory to normalize relative paths to
ignore_keys -- algorithm key names to ignore normalize for (keywords, not files/directories)
fileonly_keys -- algorithm key names to only expand files (not directories)
cur_key -- current key when calling recursively
"""
ignore_keys = set([]) if ignore_keys is None else set(ignore_keys)
fileonly_keys = set([]) if fileonly_keys is None else set(fileonly_keys)
if base_dir is None:
base_dir = os.getcwd()
orig_dir = os.getcwd()
os.chdir(base_dir)
input_dir = os.path.join(base_dir, "inputs")
if isinstance(xs, dict):
out = {}
for k, v in xs.items():
if k not in ignore_keys and v and isinstance(v, six.string_types):
if v.lower() == "none":
out[k] = None
else:
out[k] = abs_file_paths(v, base_dir, ignore_keys, fileonly_keys, k, do_download=do_download)
elif isinstance(v, (list, tuple)):
out[k] = [abs_file_paths(x, base_dir, ignore_keys, fileonly_keys, k, do_download=do_download)
for x in v]
else:
out[k] = v
elif isinstance(xs, six.string_types):
if os.path.exists(xs) or (do_download and objectstore.is_remote(xs)):
dl = objectstore.download(xs, input_dir)
if dl and cur_key not in ignore_keys and not (cur_key in fileonly_keys and not os.path.isfile(dl)):
out = os.path.normpath(os.path.join(base_dir, dl))
else:
out = xs
else:
out = xs
else:
out = xs
os.chdir(orig_dir)
return out
# ## Galaxy integration -- *.loc files
def _get_galaxy_loc_file(name, galaxy_dt, ref_dir, galaxy_base):
"""Retrieve Galaxy *.loc file for the given reference/aligner name.
First tries to find an aligner specific *.loc file. If not defined
or does not exist, then we need to try and remap it from the
default reference file
"""
if "file" in galaxy_dt and os.path.exists(os.path.join(galaxy_base, galaxy_dt["file"])):
loc_file = os.path.join(galaxy_base, galaxy_dt["file"])
need_remap = False
elif alignment.TOOLS[name].galaxy_loc_file is None:
loc_file = os.path.join(ref_dir, alignment.BASE_LOCATION_FILE)
need_remap = True
else:
loc_file = os.path.join(ref_dir, alignment.TOOLS[name].galaxy_loc_file)
need_remap = False
if not os.path.exists(loc_file):
loc_file = os.path.join(ref_dir, alignment.BASE_LOCATION_FILE)
need_remap = True
return loc_file, need_remap
def _galaxy_loc_iter(loc_file, galaxy_dt, need_remap=False):
"""Iterator returning genome build and references from Galaxy *.loc file.
"""
if "column" in galaxy_dt:
dbkey_i = galaxy_dt["column"].index("dbkey")
path_i = galaxy_dt["column"].index("path")
else:
dbkey_i = None
if os.path.exists(loc_file):
with open(loc_file) as in_handle:
for line in in_handle:
if line.strip() and not line.startswith("#"):
parts = [x.strip() for x in line.strip().split("\t")]
# Detect and report spaces instead of tabs
if len(parts) == 1:
parts = [x.strip() for x in line.strip().split(" ") if x.strip()]
if len(parts) > 1:
raise IOError("Galaxy location file uses spaces instead of "
"tabs to separate fields: %s" % loc_file)
if dbkey_i is not None and not need_remap:
dbkey = parts[dbkey_i]
cur_ref = parts[path_i]
else:
if parts[0] == "index":
parts = parts[1:]
dbkey = parts[0]
cur_ref = parts[-1]
yield (dbkey, cur_ref)
def _get_ref_from_galaxy_loc(name, genome_build, loc_file, galaxy_dt, need_remap,
galaxy_config, data):
"""Retrieve reference genome file from Galaxy *.loc file.
Reads from tool_data_table_conf.xml information for the index if it
exists, otherwise uses heuristics to find line based on most common setups.
"""
refs = [ref for dbkey, ref in _galaxy_loc_iter(loc_file, galaxy_dt, need_remap)
if dbkey == genome_build]
remap_fn = alignment.TOOLS[name].remap_index_fn
need_remap = remap_fn is not None
if len(refs) == 0:
raise ValueError("Did not find genome build %s in bcbio installation: %s" %
(genome_build, os.path.normpath(loc_file)))
else:
cur_ref = refs[-1]
# Find genome directory and check for packed wf tarballs
cur_ref_norm = os.path.normpath(utils.add_full_path(cur_ref, galaxy_config["tool_data_path"]))
base_dir_i = cur_ref_norm.find("/%s/" % genome_build)
base_dir = os.path.join(cur_ref_norm[:base_dir_i], genome_build)
for tarball in glob.glob(os.path.join(base_dir, "*-wf.tar.gz")):
cwlutils.unpack_tarballs(tarball, {"dirs": {"work": base_dir}}, use_subdir=False)
if need_remap:
assert remap_fn is not None, "%s requires remapping function from base location file" % name
cur_ref = os.path.normpath(utils.add_full_path(cur_ref, galaxy_config["tool_data_path"]))
cur_ref = remap_fn(os.path.abspath(cur_ref))
return cur_ref
def _get_galaxy_tool_info(galaxy_base):
"""Retrieve Galaxy tool-data information from defaults or galaxy config file.
"""
ini_file = os.path.join(galaxy_base, "universe_wsgi.ini")
info = {"tool_data_table_config_path": os.path.join(galaxy_base, "tool_data_table_conf.xml"),
"tool_data_path": os.path.join(galaxy_base, "tool-data")}
config = configparser.ConfigParser()
config.read(ini_file)
if "app:main" in config.sections():
for option in config.options("app:main"):
if option in info:
info[option] = os.path.join(galaxy_base, config.get("app:main", option))
return info
def _get_galaxy_data_table(name, dt_config_file):
"""Parse data table config file for details on tool *.loc location and columns.
"""
out = {}
if os.path.exists(dt_config_file):
tdtc = ElementTree.parse(dt_config_file)
for t in tdtc.getiterator("table"):
if t.attrib.get("name", "") in [name, "%s_indexes" % name]:
out["column"] = [x.strip() for x in t.find("columns").text.split(",")]
out["file"] = t.find("file").attrib.get("path", "")
return out
def get_refs(genome_build, aligner, galaxy_base, data):
"""Retrieve the reference genome file location from galaxy configuration.
"""
out = {}
name_remap = {"samtools": "fasta"}
if genome_build:
galaxy_config = _get_galaxy_tool_info(galaxy_base)
for name in [x for x in ("samtools", aligner) if x]:
galaxy_dt = _get_galaxy_data_table(name, galaxy_config["tool_data_table_config_path"])
loc_file, need_remap = _get_galaxy_loc_file(name, galaxy_dt, galaxy_config["tool_data_path"],
galaxy_base)
cur_ref = _get_ref_from_galaxy_loc(name, genome_build, loc_file, galaxy_dt, need_remap,
galaxy_config, data)
base = os.path.normpath(utils.add_full_path(cur_ref, galaxy_config["tool_data_path"]))
# Expand directories unless we are an aligner like minimap2 that uses the seq directory
if os.path.isdir(base) and not (need_remap and os.path.basename(base) == "seq"):
indexes = sorted(glob.glob(os.path.join(base, "*")))
elif name != "samtools":
indexes = sorted(glob.glob("%s*" % utils.splitext_plus(base)[0]))
else:
indexes = []
name = name_remap.get(name, name)
out[name] = {}
if os.path.exists(base) and os.path.isfile(base):
out[name]["base"] = base
if indexes:
out[name]["indexes"] = indexes
# For references, add compressed inputs and indexes if they exist
if name == "fasta" and "base" in out[name] and os.path.exists(out[name]["base"] + ".gz"):
indexes = [out[name]["base"] + ".gz.fai", out[name]["base"] + ".gz.gzi",
utils.splitext_plus(out[name]["base"])[0] + ".dict"]
out[name + "gz"] = {"base": out[name]["base"] + ".gz",
"indexes": [x for x in indexes if os.path.exists(x)]}
# add additional indices relative to the base
if tz.get_in(["fasta", "base"], out):
ref_dir, ref_filebase = os.path.split(out["fasta"]["base"])
rtg_dir = os.path.normpath(os.path.join(ref_dir, os.path.pardir, "rtg",
"%s.sdf" % (os.path.splitext(ref_filebase)[0])))
out["rtg"] = {"base": os.path.join(rtg_dir, "mainIndex"),
"indexes": [x for x in glob.glob(os.path.join(rtg_dir, "*"))
if not x.endswith("/mainIndex")]}
twobit = os.path.normpath(os.path.join(ref_dir, os.path.pardir, "ucsc",
"%s.2bit" % (os.path.splitext(ref_filebase)[0])))
if os.path.exists(twobit):
out["twobit"] = twobit
return out
def get_builds(galaxy_base):
"""Retrieve configured genome builds and reference files, using Galaxy configuration files.
Allows multiple dbkey specifications in the same file, using the most recently added.
"""
name = "samtools"
galaxy_config = _get_galaxy_tool_info(galaxy_base)
galaxy_dt = _get_galaxy_data_table(name, galaxy_config["tool_data_table_config_path"])
loc_file, need_remap = _get_galaxy_loc_file(name, galaxy_dt, galaxy_config["tool_data_path"],
galaxy_base)
assert not need_remap, "Should not need to remap reference files"
fnames = {}
for dbkey, fname in _galaxy_loc_iter(loc_file, galaxy_dt):
fnames[dbkey] = fname
out = []
for dbkey in sorted(fnames.keys()):
out.append((dbkey, fnames[dbkey]))
return out
| |
'''
keras mlp regression
'''
from __future__ import print_function
import numpy as np
from undecorated import undecorated
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Input
from keras import layers
from keras.callbacks import ModelCheckpoint
def res_block(input_tensor, stage, block):
conv_name_base = 'res' + str(stage) + block + '_branch'
x = Dense(100, name=conv_name_base + '2a')(input_tensor)
x = Activation('relu')(x)
x = Dense(100, name=conv_name_base + '2c')(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
return x
def bc_dec(func):
def function_wrapper(x):
if (x[0] * (x[0] - 1) * x[1] * (x[1] - 1)) != 0:
bc_trans = (func(x) - x[1] * np.sin(np.pi * x[0])) / (x[0] * (x[0] - 1) * x[1] * (x[1] - 1))
else:
bc_trans = x[1] * np.sin(np.pi * x[0]) + (x[0] * (x[0] - 1) * x[1] * (x[1] - 1))
return bc_trans
return function_wrapper
# @bc_dec
def analytic_solution(x):
return (1 / (np.exp(np.pi) - np.exp(-np.pi))) * \
np.sin(np.pi * x[0]) * (np.exp(np.pi * x[1]) - np.exp(-np.pi * x[1]))
nx = 30
ny = 30
x_space = np.linspace(0, 1, nx)
y_space = np.linspace(0, 1, ny)
print('generate sample data from analytic solution')
x_bc_l = np.asarray([[0, y] for y in y_space])
x_bc_r = np.asarray([[1, y] for y in y_space])
x_bc_b = np.asarray([[x, 0] for x in x_space])
x_bc_t = np.asarray([[x, 1] for x in x_space])
n_train = 1024 * 100
x_train = np.random.rand(n_train, 2)
x_train = np.concatenate((x_train, x_bc_l, x_bc_r, x_bc_b, x_bc_t))
y_train = [analytic_solution(x) for x in x_train]
y_train = np.reshape(np.asarray(y_train), (-1, 1))
print('Building model...')
batch_size = 1024
epochs = 500
vsplit = 0.01
# model = Sequential()
# model.add(Dense(100, input_shape=(2,)))
# model.add(Activation('relu'))
# model.add(Dropout(0.))
#
# model.add(Dense(100))
# model.add(Activation('relu'))
# model.add(Dropout(0.))
#
# model.add(Dense(100))
# model.add(Activation('relu'))
# model.add(Dropout(0.))
#
# model.add(Dense(100))
# model.add(Activation('relu'))
# model.add(Dropout(0.))
#
# model.add(Dense(1))
# #model.add(Activation('relu'))
# model.add(Activation('linear'))
# This returns a tensor
inputs = Input(shape=(2,))
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(100, activation='relu')(inputs)
x = res_block(x,stage=1,block='a')
x = res_block(x,stage=2,block='b')
x = res_block(x,stage=2,block='c')
predictions = Dense(1, activation='linear')(x)
# This creates a model that includes
# the Input layer and three Dense layers
model = Model(inputs=inputs, outputs=predictions)
# compile model
from keras import optimizers
adam = optimizers.Adam(lr=0.000001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.99)
model.compile(loss='mse',
optimizer='adam',
metrics=['accuracy'])
# checkpoint (save the best model based validate loss)
# filepath = "./tmp/weights-improvement-{epoch:02d}-{val_loss:.2e}.hdf5"
filepath = "./tmp/weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min', period=10)
callbacks_list = [checkpoint]
# fit the model
history = model.fit(
x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=2,
validation_split=vsplit,
callbacks=callbacks_list)
# score = model.evaluate(x_test, y_test,
# batch_size=batch_size, verbose=1)
# print('Test score:', score[0])
# print('Test accuracy:', score[1])
if vsplit:
# summarize history for loss
fig = plt.figure()
# plt.plot(history.history['loss'])
# plt.plot(history.history['val_loss'])
plt.semilogy(history.history['loss'])
plt.semilogy(history.history['val_loss'])
plt.title('mse')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# visualisation
# 1. analytical solution
x_test = np.zeros((ny * nx, 2))
surface = np.zeros((ny, nx))
for i, x in enumerate(x_space):
for j, y in enumerate(y_space):
x_test[i * nx + j] = [x, y]
surface[i][j] = undecorated(analytic_solution)([x, y])
###
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y = np.meshgrid(x_space, y_space)
surf = ax.plot_surface(X, Y, surface, rstride=1, cstride=1, cmap=cm.viridis,
linewidth=0, antialiased=False)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_zlim(0, 2)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
plt.colorbar(surf)
# 2.test solution
surface_predict = model.predict(x_test).reshape(ny, nx)
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y = np.meshgrid(x_space, y_space)
surf_pdt = ax.plot_surface(X, Y, surface_predict, rstride=1, cstride=1, cmap=cm.viridis,
linewidth=0, antialiased=False)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_zlim(0, 2)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
plt.colorbar(surf_pdt)
# 3.error surface
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y = np.meshgrid(x_space, y_space)
surf_error = ax.plot_surface(X, Y, abs(surface_predict - surface), rstride=1, cstride=1, cmap=cm.viridis,
linewidth=0, antialiased=False)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_zlim(0, 0.01)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
plt.colorbar(surf_error)
# 4.Plot actual vs prediction for training set
fig = plt.figure()
plt.plot(surface.reshape(-1), surface_predict.reshape(-1), 'ro')
# Compute R-Square value for training set
from sklearn.metrics import r2_score
TestR2Value = r2_score(surface.reshape(-1), surface_predict.reshape(-1))
print("Training Set R-Square=", TestR2Value)
| |
#!/usr/bin/env python
#
# Map weather service icon codes to my limited set held on the e-ink device
#
def interpret_icons(service,id):
if service == "openweathermap":
icons = {
"200": {
"label": "thunderstorm with light rain",
"icon": "RAIN"
},
"201": {
"label": "thunderstorm with rain",
"icon": "RAIN"
},
"202": {
"label": "thunderstorm with heavy rain",
"icon": "RAIN"
},
"210": {
"label": "light thunderstorm",
"icon": "RAIN"
},
"211": {
"label": "thunderstorm",
"icon": "RAIN"
},
"212": {
"label": "heavy thunderstorm",
"icon": "RAIN"
},
"221": {
"label": "ragged thunderstorm",
"icon": "RAIN"
},
"230": {
"label": "thunderstorm with light drizzle",
"icon": "RAIN"
},
"231": {
"label": "thunderstorm with drizzle",
"icon": "RAIN"
},
"232": {
"label": "thunderstorm with heavy drizzle",
"icon": "RAIN"
},
"300": {
"label": "light intensity drizzle",
"icon": "RAIN"
},
"301": {
"label": "drizzle",
"icon": "RAIN"
},
"302": {
"label": "heavy intensity drizzle",
"icon": "RAIN"
},
"310": {
"label": "light intensity drizzle rain",
"icon": "RAIN"
},
"311": {
"label": "drizzle rain",
"icon": "RAIN"
},
"312": {
"label": "heavy intensity drizzle rain",
"icon": "RAIN"
},
"313": {
"label": "shower rain and drizzle",
"icon": "RAIN"
},
"314": {
"label": "heavy shower rain and drizzle",
"icon": "RAIN"
},
"321": {
"label": "shower drizzle",
"icon": "RAIN"
},
"500": {
"label": "light rain",
"icon": "RAIN"
},
"501": {
"label": "moderate rain",
"icon": "RAIN"
},
"502": {
"label": "heavy intensity rain",
"icon": "RAIN"
},
"503": {
"label": "very heavy rain",
"icon": "RAIN"
},
"504": {
"label": "extreme rain",
"icon": "RAIN"
},
"511": {
"label": "freezing rain",
"icon": "RAIN"
},
"520": {
"label": "light intensity shower rain",
"icon": "RAIN"
},
"521": {
"label": "shower rain",
"icon": "RAIN"
},
"522": {
"label": "heavy intensity shower rain",
"icon": "RAIN"
},
"531": {
"label": "ragged shower rain",
"icon": "RAIN"
},
"600": {
"label": "light snow",
"icon": "RAIN"
},
"601": {
"label": "snow",
"icon": "RAIN"
},
"602": {
"label": "heavy snow",
"icon": "RAIN"
},
"611": {
"label": "sleet",
"icon": "RAIN"
},
"612": {
"label": "shower sleet",
"icon": "RAIN"
},
"615": {
"label": "light rain and snow",
"icon": "RAIN"
},
"616": {
"label": "rain and snow",
"icon": "RAIN"
},
"620": {
"label": "light shower snow",
"icon": "RAIN"
},
"621": {
"label": "shower snow",
"icon": "RAIN"
},
"622": {
"label": "heavy shower snow",
"icon": "RAIN"
},
"701": {
"label": "mist",
"icon": "RAIN"
},
"711": {
"label": "smoke",
"icon": "CLOUD"
},
"721": {
"label": "haze",
"icon": "MOST"
},
"731": {
"label": "sand, dust whirls",
"icon": "MOST"
},
"741": {
"label": "fog",
"icon": "MOST"
},
"751": {
"label": "sand",
"icon": "MOST"
},
"761": {
"label": "dust",
"icon": "MOST"
},
"762": {
"label": "volcanic ash",
"icon": "MOST"
},
"771": {
"label": "squalls",
"icon": "MIXED"
},
"781": {
"label": "tornado",
"icon": "MIXED"
},
"800": {
"label": "clear sky",
"icon": "SUN"
},
"801": {
"label": "few clouds",
"icon": "CLOUD"
},
"802": {
"label": "scattered clouds",
"icon": "MOST"
},
"803": {
"label": "broken clouds",
"icon": "MOST"
},
"804": {
"label": "overcast clouds",
"icon": "CLOUD"
},
"900": {
"label": "tornado",
"icon": "MIXED"
},
"901": {
"label": "tropical storm",
"icon": "MIXED"
},
"902": {
"label": "hurricane",
"icon": "MIXED"
},
"903": {
"label": "cold",
"icon": "MIXED"
},
"904": {
"label": "hot",
"icon": "SUN"
},
"905": {
"label": "windy",
"icon": "MIXED"
},
"906": {
"label": "hail",
"icon": "MIXED"
},
"951": {
"label": "calm",
"icon": "SUN"
},
"952": {
"label": "light breeze",
"icon": "MIXED"
},
"953": {
"label": "gentle breeze",
"icon": "MIXED"
},
"954": {
"label": "moderate breeze",
"icon": "MIXED"
},
"955": {
"label": "fresh breeze",
"icon": "MIXED"
},
"956": {
"label": "strong breeze",
"icon": "MIXED"
},
"957": {
"label": "high wind, near gale",
"icon": "MIXED"
},
"958": {
"label": "gale",
"icon": "MIXED"
},
"959": {
"label": "severe gale",
"icon": "MIXED"
},
"960": {
"label": "storm",
"icon": "MIXED"
},
"961": {
"label": "violent storm",
"icon": "MIXED"
},
"962": {
"label": "hurricane",
"icon": "MIXED"
}
}
elif service == "weatherunderground":
icons = {
"partlycloudy": {
"label": "Partly cloudy",
"icon": "MIXED"
},
"mostlycloudy": {
"label": "Mostly cloudy",
"icon": "MOST"
},
"clear": {
"label": "Clear",
"icon": "SUN"
},
"clear": {
"label": "Clear",
"icon": "SUN"
},
"cloudy": {
"label": "Cloudy",
"icon": "CLOUD"
}
}
else:
print("Error: bad service selected")
return(icons[id])
| |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
import math
from concise.preprocessing.sequence import DNA, RNA, AMINO_ACIDS
from concise.utils.letters import all_letters
from collections import OrderedDict
from matplotlib import pyplot
from matplotlib.patches import PathPatch
from matplotlib.path import Path
from descartes.patch import Polygon, PolygonPath, PolygonPatch
from shapely.wkt import loads as load_wkt
from shapely import affinity
import re
def heatmap(w, vmin=None, vmax=None, diverge_color=False,
ncol=1,
plot_name=None, vocab=["A", "C", "G", "T"], figsize=(6, 2)):
"""Plot a heatmap from weight matrix w
vmin, vmax = z axis range
diverge_color = Should we use diverging colors?
plot_name = plot_title
vocab = vocabulary (corresponds to the first axis)
"""
# Generate y and x values from the dimension lengths
assert len(vocab) == w.shape[0]
plt_y = np.arange(w.shape[0] + 1) + 0.5
plt_x = np.arange(w.shape[1] + 1) - 0.5
z_min = w.min()
z_max = w.max()
if vmin is None:
vmin = z_min
if vmax is None:
vmax = z_max
if diverge_color:
color_map = plt.cm.RdBu
else:
color_map = plt.cm.Blues
fig = plt.figure(figsize=figsize)
# multiple axis
if len(w.shape) == 3:
#
n_plots = w.shape[2]
nrow = math.ceil(n_plots / ncol)
else:
n_plots = 1
nrow = 1
ncol = 1
for i in range(n_plots):
if len(w.shape) == 3:
w_cur = w[:, :, i]
else:
w_cur = w
ax = plt.subplot(nrow, ncol, i + 1)
plt.tight_layout()
im = ax.pcolormesh(plt_x, plt_y, w_cur, cmap=color_map,
vmin=vmin, vmax=vmax, edgecolors="white")
ax.grid(False)
ax.set_yticklabels([""] + vocab, minor=False)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xticks(np.arange(w_cur.shape[1] + 1))
ax.set_xlim(plt_x.min(), plt_x.max())
ax.set_ylim(plt_y.min(), plt_y.max())
# nice scale location:
# http://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(im, cax=cax)
if plot_name is not None:
if n_plots > 0:
pln = plot_name + " {0}".format(i)
else:
pln = plot_name
ax.set_title(pln)
ax.set_aspect('equal')
return fig
# -----------------------------------------------------------------------
#
#
# Code adoped from https://github.com/kundajelab/dragonn, (c) 2016 Kundaje Lab
def standardize_polygons_str(data_str):
"""Given a POLYGON string, standardize the coordinates to a 1x1 grid.
Input : data_str (taken from above)
Output: tuple of polygon objects
"""
# find all of the polygons in the letter (for instance an A
# needs to be constructed from 2 polygons)
path_strs = re.findall("\(\(([^\)]+?)\)\)", data_str.strip())
# convert the data into a numpy array
polygons_data = []
for path_str in path_strs:
data = np.array([
tuple(map(float, x.split())) for x in path_str.strip().split(",")])
polygons_data.append(data)
# standardize the coordinates
min_coords = np.vstack(data.min(0) for data in polygons_data).min(0)
max_coords = np.vstack(data.max(0) for data in polygons_data).max(0)
for data in polygons_data:
data[:, ] -= min_coords
data[:, ] /= (max_coords - min_coords)
polygons = []
for data in polygons_data:
polygons.append(load_wkt(
"POLYGON((%s))" % ",".join(" ".join(map(str, x)) for x in data)))
return tuple(polygons)
# ----------------------
letter_polygons = {k: standardize_polygons_str(v) for k, v in all_letters.items()}
VOCABS = {"DNA": OrderedDict([("A", "green"),
("C", "blue"),
("G", "orange"),
("T", "red")]),
"RNA": OrderedDict([("A", "green"),
("C", "blue"),
("G", "orange"),
("U", "red")]),
"AA": OrderedDict([('A', '#CCFF00'),
('B', "orange"),
('C', '#FFFF00'),
('D', '#FF0000'),
('E', '#FF0066'),
('F', '#00FF66'),
('G', '#FF9900'),
('H', '#0066FF'),
('I', '#66FF00'),
('K', '#6600FF'),
('L', '#33FF00'),
('M', '#00FF00'),
('N', '#CC00FF'),
('P', '#FFCC00'),
('Q', '#FF00CC'),
('R', '#0000FF'),
('S', '#FF3300'),
('T', '#FF6600'),
('V', '#99FF00'),
('W', '#00CCFF'),
('Y', '#00FFCC'),
('Z', 'blue')]),
"RNAStruct": OrderedDict([("P", "red"),
("H", "green"),
("I", "blue"),
("M", "orange"),
("E", "violet")]),
}
# make sure things are in order
VOCABS["AA"] = OrderedDict((k, VOCABS["AA"][k]) for k in AMINO_ACIDS)
VOCABS["DNA"] = OrderedDict((k, VOCABS["DNA"][k]) for k in DNA)
VOCABS["RNA"] = OrderedDict((k, VOCABS["RNA"][k]) for k in RNA)
def add_letter_to_axis(ax, let, col, x, y, height):
"""Add 'let' with position x,y and height height to matplotlib axis 'ax'.
"""
if len(let) == 2:
colors = [col, "white"]
elif len(let) == 1:
colors = [col]
else:
raise ValueError("3 or more Polygons are not supported")
for polygon, color in zip(let, colors):
new_polygon = affinity.scale(
polygon, yfact=height, origin=(0, 0, 0))
new_polygon = affinity.translate(
new_polygon, xoff=x, yoff=y)
patch = PolygonPatch(
new_polygon, edgecolor=color, facecolor=color)
ax.add_patch(patch)
return
# TODO - add figsize???
def seqlogo(letter_heights, vocab="DNA", ax=None):
"""Make a logo plot
# Arguments
letter_heights: "motif length" x "vocabulary size" numpy array
Can also contain negative values.
vocab: str, Vocabulary name. Can be: DNA, RNA, AA, RNAStruct.
ax: matplotlib axis
"""
ax = ax or plt.gca()
assert letter_heights.shape[1] == len(VOCABS[vocab])
x_range = [1, letter_heights.shape[0]]
pos_heights = np.copy(letter_heights)
pos_heights[letter_heights < 0] = 0
neg_heights = np.copy(letter_heights)
neg_heights[letter_heights > 0] = 0
for x_pos, heights in enumerate(letter_heights):
letters_and_heights = sorted(zip(heights, list(VOCABS[vocab].keys())))
y_pos_pos = 0.0
y_neg_pos = 0.0
for height, letter in letters_and_heights:
color = VOCABS[vocab][letter]
polygons = letter_polygons[letter]
if height > 0:
add_letter_to_axis(ax, polygons, color, 0.5 + x_pos, y_pos_pos, height)
y_pos_pos += height
else:
add_letter_to_axis(ax, polygons, color, 0.5 + x_pos, y_neg_pos, height)
y_neg_pos += height
# if add_hline:
# ax.axhline(color="black", linewidth=1)
ax.set_xlim(x_range[0] - 1, x_range[1] + 1)
ax.grid(False)
ax.set_xticks(list(range(*x_range)) + [x_range[-1]])
ax.set_aspect(aspect='auto', adjustable='box')
ax.autoscale_view()
def seqlogo_fig(letter_heights, vocab="DNA", figsize=(10, 2), ncol=1, plot_name=None):
"""
# Arguments
plot_name: Title of the plot. Can be a list of names
"""
fig = plt.figure(figsize=figsize)
if len(letter_heights.shape) == 3:
#
n_plots = letter_heights.shape[2]
nrow = math.ceil(n_plots / ncol)
if isinstance(plot_name, list):
assert len(plot_name) == n_plots
else:
n_plots = 1
nrow = 1
ncol = 1
for i in range(n_plots):
if len(letter_heights.shape) == 3:
w_cur = letter_heights[:, :, i]
else:
w_cur = letter_heights
ax = plt.subplot(nrow, ncol, i + 1)
plt.tight_layout()
# plot the motif
seqlogo(w_cur, vocab, ax)
# add the title
if plot_name is not None:
if n_plots > 0:
if isinstance(plot_name, list):
pln = plot_name[i]
else:
pln = plot_name + " {0}".format(i)
else:
pln = plot_name
ax.set_title(pln)
return fig
| |
# Created By: Virgil Dupras
# Created On: 2006/03/19
# Copyright 2010 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
import sys
import objc
from jobprogress import job
from hscommon.cocoa.inter import signature, PyFairware
from hscommon.cocoa.objcmin import NSObject
from core import app_cocoa, design
# Fix py2app imports which chokes on relative imports
from core import app, fs_utils, sqlfs
from core.sqlfs import _sql, music, strings, utils
from hsfs import auto, stats, tree, music
from hsfs.phys import music
from hsaudiotag import aiff, flac, genres, id3v1, id3v2, mp4, mpeg, ogg, wma
class PyMassRenamePanel(NSObject):
def setRefDir(self,refdir):
#You MUST call this before starting to use the class
self.panel = design.MassRenamePanel(refdir)
def changeExampleSong(self):
self.panel.ChangeExample()
def getExampleDisplayAfter(self):
return self.panel.example_after
def getExampleDisplayBefore(self):
return self.panel.example_before
def getModel(self):
return self.panel.model
def getWhitespace(self):
return self.panel.whitespace
def setCustomModel_(self,model):
self.panel.custom_model = model
def setModelSelectedRow_(self,row):
self.panel.model_index = row
def setWhitespaceSelectedRow_(self,row):
self.panel.whitespace_index = row
class PySplitPanel(NSObject):
def setRefDir(self,refdir):
#You MUST call this before starting to use the class
self.panel = design.SplittingPanel(refdir)
def getGroupingExample(self):
return self.panel.example
def getCapacity(self):
return self.panel.capacity
def getGroupingLevel(self):
return self.panel.grouping_level
def getModel(self):
return self.panel.model
def setCapacitySelectedRow_(self,row):
self.panel.capacity_index = row
def setCustomCapacity_(self,capacity):
self.panel.custom_capacity = capacity
def setCustomModel_(self,model):
self.panel.custom_model = model
def setGroupingLevel_(self,level):
self.panel.grouping_level = level
def setModelSelectedRow_(self,row):
self.panel.model_index = row
class PyApp(NSObject):
pass #fake class
class PyMusicGuru(PyFairware):
def init(self):
self = super(PyMusicGuru,self).init()
self.py = app_cocoa.MusicGuru()
return self
#---Locations
def addLocationWithPath_name_removeable_(self, path, name, removeable):
return self.py.AddLocation(path, name, removeable)
def canAddLocationWithPath_name_(self,path,name):
return self.py.CanAddLocation(path,name)
def setPath_ofLocationNamed_(self, path, name):
self.py.set_location_path(name, path)
def locationNamesInBoard_writable_(self,in_board,writable):
return self.py.GetLocationNames(in_board,writable)
def removeLocationNamed_(self,name):
self.py.RemoveLocationNamed(name)
def toggleLocation_(self,index):
self.py.ToggleLocationIndex(index)
def updateLocationNamed_(self, name):
self.py.update_location(name)
#---Board
def conflictCount(self):
return len(self.py.board.allconflicts)
def emptyBoard(self):
self.py.board.Empty()
def getBoardStats(self):
return self.py.board.stats_line
def getMassRenamePanel(self):
result = PyMassRenamePanel.alloc().init()
result.setRefDir(self.py.board)
return result
def getSplitPanel(self):
result = PySplitPanel.alloc().init()
result.setRefDir(self.py.board)
return result
def isNodeConflicted_(self,node_path):
return self.py.IsNodeConflicted(node_path)
def isBoardSplitted(self):
return self.py.board.splitted
def massRenameWithModel_whitespaceType_(self, model, whitespace):
self.py.MassRename(model, whitespace)
def moveConflicts(self):
#Returns true is at least one conflict has been moved
return self.py.board.MoveConflicts()
def moveConflictsAndOriginals(self):
#Returns true is at least one conflict has been moved
return self.py.board.MoveConflicts(True)
def moveToIgnoreBox_(self,node_paths):
self.py.MoveToIgnoreBox(node_paths)
def newFolderIn_(self,node_path):
return self.py.CreateFolderInNode(node_path)
def performDragFrom_withNodes_to_withNode_(self,source_tag,source_node_paths,dest_tag,dest_node_path):
return self.py.PerformDrag(source_tag,source_node_paths,dest_tag,dest_node_path)
def removeEmptyFolders(self):
self.py.RemoveEmptyDirs()
def renameNode_to_(self,node_path,new_name):
return self.py.RenameNode(node_path,new_name)
def selectBoardSongs_(self,node_paths):
self.py.SelectBoardSongs(node_paths)
def splitWithModel_capacity_groupingLevel_(self, model, capacity, grouping_level):
if self.py.board.splitted:
return
self.py.Split(model, capacity, grouping_level)
def switchConflictAndOriginal_(self,node_path):
self.py.SwitchConflictAndOriginal(node_path)
def unsplit(self):
self.py.board.Unsplit()
#---Materialize
def copyOrMove_toPath_onNeedCDPanel_(self,copy,destination,panel):
self.py.CopyOrMove(copy, destination, panel)
def renameInRespectiveLocations(self):
self.py.RenameInRespectiveLocations()
#---Misc
def isNodeContainer_(self,node_path):
return self.py.IsNodeContainer(node_path)
def updateCollection(self):
self.py.UpdateCollection()
#---Data
@signature('i@:i')
def getOutlineViewMaxLevel_(self, tag):
return 0 # no max level
@signature('@@:i@')
def getOutlineView_childCountsForPath_(self, tag, node_path):
return self.py.GetOutlineViewChildCounts(tag, node_path)
def getOutlineView_valuesForIndexes_(self,tag,node_path):
return self.py.GetOutlineViewValues(tag,node_path)
def getOutlineView_markedAtIndexes_(self,tag,node_path):
return False
def getTableViewCount_(self,tag):
return self.py.GetTableViewCount(tag)
def getTableViewMarkedIndexes_(self,tag):
return self.py.GetTableViewMarkedIndexes(tag)
def getTableView_valuesForRow_(self,tag,row):
return self.py.GetTableViewValues(tag,row)
#---Worker
def getJobProgress(self):
return self.py.progress.last_progress
def getJobDesc(self):
return self.py.progress.last_desc
def cancelJob(self):
self.py.progress.job_cancelled = True
def jobCompleted_(self, jobid):
pass
#---Registration
def appName(self):
return "musicGuru"
| |
# Copyright 2010 Harry Bock <hbock@ele.uri.edu>
# See LICENSE for licensing information.
# Active test scheduler and circuit manager for TorBEL.
# We come from the __future__.
from __future__ import with_statement
import os
import time
import random
import resource
import threading
from copy import copy
from collections import deque
from datetime import datetime, timedelta
from torbel import config
from torbel.controller import reactor
from torbel.logger import *
from twisted.internet import task
log = get_logger("torbel.scheduler")
class TestScheduler:
""" Abstract base class for all test schedulers. """
controller = None
name = "Abstract"
def __init__(self, controller, max_pending_factor = 0.5, export_interval = 5):
self.controller = controller
self.terminated = False
## Circuit dictionaries.
# Established circuits under test.
self.circuits = {}
# Circuits in the process of being built.
self.pending_circuits = {}
self.pending_circuit_lock = threading.Lock()
self.pending_circuit_cond = threading.Condition(self.pending_circuit_lock)
# Circuit failure metrics and bookkeeping.
self.retry_routers = set()
self.circuit_fail_count = 0
self.circuit_retry_success_count = 0
# Base max running circuits on the total number of file descriptors
# we can have open (hard limit returned by getrlimit) and the maximum
# number of file descriptors per circuit, adjusting for possible pending
# circuits, TorCtl connection, stdin/out, and other files.
max_files = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
circuit_limit = max_files / len(controller.test_ports)
self.max_running_circuits = min(config.max_built_circuits, circuit_limit)
self.max_pending_circuits = int(self.max_running_circuits * max_pending_factor)
self.export_interval = export_interval
self.init()
def init(self):
""" Initialization routine for a custom scheduler. Don't override
__init__. """
pass
def start(self):
""" Start the scheduler. """
# Start the export looping task, but don't run it immediately -
# wait until the first export_interval (converted to seconds) to pass.
reactor.callLater(self.export_interval * 60, self.export)
def export(self):
""" Force the controller to export all test data. """
self.controller.export()
reactor.callLater(self.export_interval * 60, self.export)
def next(self):
""" Return a set of routers to be tested. May block until enough routers
are deemed ready by the scheduler. """
log.verbose2("Request more circuits. (%d pending, %d running).",
len(self.pending_circuits),
len(self.circuits))
with self.pending_circuit_cond:
# Block until we have less than ten circuits built or
# waiting to be built.
while len(self.pending_circuits) >= self.max_pending_circuits or \
len(self.circuits) >= self.max_running_circuits:
self.pending_circuit_cond.wait(3.0)
# We're done here.
if self.terminated:
return []
self.controller.feed_watchdog()
# Get what the child scheduler class wants to test.
return list(self.fetch_next_tests())
def new_consensus(self, cache):
""" Called when a NEWCONSENSUS event occurs. cache is a dictionary
of the entire consensus, keyed by router ID hash. """
pass
def new_descriptor(self, router):
""" Called when a NEWDESC event occurs. router is the new descriptor
RouterRecord object. """
pass
def fetch_next_tests(self):
""" Scheduler-specific interface that returns a list of
routers to retry and test. TestScheduler.next() takes these
results and performs rate-limiting, so it may not always test
every router returned by this method.
Return value is of the form (new_tests, retry_tests), where
retry_tests is a list of routers that recently failed
unexpectedly and should be tested again.
"""
raise ValueError("You must implement fetch_next_tests()!")
def retry_soon(self, router):
""" Inidcate to the scheduler that the controller was not able
to complete a stream test or circuit to router, but the result
may indicate a temporary failure. The scheduler should retry
all tests to router as soon as possible."""
self.retry_routers.add(router)
def retry_later(self, router):
""" Indicate to the scheduler that the controller was not able to
complete a stream test due to a possibly temporary failure, and that
it should retry at a longer interval than retry_soon. """
# Default behavior is to use the retry_soon behavior unless
# implemented otherwise.
self.retry_soon(router)
def stop(self):
""" Stop the scheduler. """
with self.pending_circuit_cond:
self.pending_circuit_cond.notify()
self.terminated = True
def circ_pending(self, circ_id, router):
with self.pending_circuit_lock:
self.pending_circuits[circ_id] = router
def circ_built(self, event):
circ_id = event.circ_id
with self.pending_circuit_cond:
if circ_id in self.pending_circuits:
router = self.pending_circuits[circ_id]
del self.pending_circuits[circ_id]
# Notify scheduler thread that we have
# completed building a circuit and we could
# need to pre-build more.
self.pending_circuit_cond.notify()
else:
return
# If we BUILT a circuit to this router, it is not unreachable.
# TODO: But it could still be that all streams through it fail,
# with inconclusive error messages. We have to handle that!
router.unreachable = False
# If we succeeded in building this router on retry,
# reset its failure count to give it a clean slate.
if router.retry:
self.circuit_retry_success_count += 1
router.retry = False
log.verbose1("Retry for %s successful after %d failures (%d/%d %.2f%%)!",
router.nickname, router.circuit_failures,
self.circuit_retry_success_count,
self.circuit_fail_count + self.circuit_retry_success_count,
100 * float(self.circuit_retry_success_count) / \
(self.circuit_fail_count + self.circuit_retry_success_count))
router.circuit_failures = 0
log.verbose1("Successfully built circuit %d for %s.",
circ_id, router.nickname)
with self.pending_circuit_lock:
self.circuits[circ_id] = router
self.controller.connect_test(router)
def circ_closed(self, event):
circ_id = event.circ_id
handle_as_failed = False
with self.pending_circuit_cond:
if circ_id in self.circuits:
router = self.circuits[circ_id]
# FINISHED = "The circuit has expired for being dirty or old."
# (tor-spec.txt 5.4, "Tearing down circuits"). Treat this as
# an error condition if we have not yet completed the test.
if event.reason == "FINISHED":
if router.current_test and router.current_test.circ_id == circ_id:
handle_as_failed = True
# Normal circuit closing close.
else:
log.verbose2("Closed circuit %d (%s).", circ_id,
self.circuits[circ_id].nickname)
del self.circuits[circ_id]
elif circ_id in self.pending_circuits:
# Pending circuit closed before being built (can this happen?)
log.debug("Pending circuit closed (%d)?", circ_id)
router = self.pending_circuits[circ_id]
del self.pending_circuits[circ_id]
# Not technically an explicit failure, but the circuit is already
# closed, so don't bother doing it again.
# Only close if this circuit belongs to the current test.
if router.current_test and router.current_test.circ_id == circ_id:
self.controller.test_cleanup(router, circ_failed = True)
self.pending_circuit_cond.notify()
if handle_as_failed:
self.circ_failed(event)
def circ_failed(self, event):
circ_id = event.circ_id
retry = False
# We sometimes get a CIRC FAILED event after calling close_circuit,
# so we should probably ignore these messages to make sure we don't
# go in circles retrying the circuit build.
if event.reason == "REQUESTED":
return
with self.pending_circuit_cond:
if circ_id in self.circuits:
log.verbose1("Established test circuit %d failed: %s", circ_id, event.reason)
router = self.circuits[circ_id]
router.circuit_failures += 1
del self.circuits[circ_id]
retry = True
elif circ_id in self.pending_circuits:
# Circuit failed without being built.
# Delete from pending_circuits and notify
# CircuitBuilder that the pending_circuits dict
# has changed.
self.circuit_fail_count += 1
router = self.pending_circuits[circ_id]
if len(event.path) >= 1:
router.circuit_failures += 1
log.verbose1("Circ to %s failed (r:%s remr:%s). %d failures",
router.nickname, event.reason, event.remote_reason,
router.circuit_failures)
else:
# We failed to extend to the entry guard. This more than
# likely means we have a bad guard. Remove this guard.
log.debug("Bad guard: circuit to %s failed (reason %s).",
router.nickname, event.reason)
if router.guard:
self.controller.remove_guard(router.guard)
# Remove from pending circuits.
del self.pending_circuits[circ_id]
if router.current_test and router.current_test.circ_id == circ_id:
self.controller.test_cleanup(router, circ_failed = True)
self.pending_circuit_cond.notify()
# Don't call retry_soon within a pending_circuit_lock critical section.
# This will cause a deadlock race condition due to trying to acquire
# locks in ConservativeScheduler out of order.
# TODO: Do locking better.
if retry:
# Append this router to our failure list, and let the scheduler
# decide if testing should be re-tried.
self.retry_soon(router)
def retry_candidates(self):
""" Return a list of circuits that have recently failed and are candidates
for retrying the test. """
control = self.controller
with self.pending_circuit_lock:
max_retry = min(self.max_pending_circuits / 2,
len(self.retry_routers))
# Look through the circuit failure queue and determine
# which should be retried and which should wait until the next
# run-through of testing.
retry_set = set()
retry_not_ready = []
while len(retry_set) < max_retry and len(self.retry_routers) > 0:
router = self.retry_routers.pop()
if router.circuit_failures >= 3:
log.debug("%s: Too many failures.", router.nickname)
# If we fail too many times, set the router unreachable
# flag so we don't export its data.
router.unreachable = True
elif router.is_ready():
retry_set.add(router)
router.retry = True
# If a router is not ready to be retried (currently under test),
# put it back on the retry list.
else:
retry_not_ready.append(router)
for router in retry_not_ready:
self.retry_routers.add(router)
return retry_set
def print_stats(self):
log.debug("%d pending circuits, %d running circuits.",
len(self.pending_circuits),
len(self.circuits))
def lock_state(self):
return [("pending_circuit_lock", self.pending_circuit_lock.locked())]
class HammerScheduler(TestScheduler):
""" The Hammer test scheduler hath no mercy. This scheduler will
continually test every router it knows about. Very good for
stress-testing torbel and the Tor network itself, bad in practice."""
name = "HAMMER"
def fetch_next_tests(self):
control = self.controller
retry = self.retry_candidates()
# Filter testable routers and sort them by the time their last test
# started.
with self.controller.consensus_cache_lock:
ready = sorted(filter(lambda router: router.is_ready(),
self.controller.router_cache.values()),
key = lambda r: r.last_test.start_time)
# Only return up to self.max_pending_circuits routers to test.
available_pending = self.max_pending_circuits - len(self.pending_circuits)
return set(ready[:(available_pending - len(retry))]) | retry
class ConservativeScheduler(TestScheduler):
""" Implement meeee! """
name = "Conservative"
def init(self):
self.router_count = 0
self.router_list = deque()
self.new_router_lock = threading.Lock()
self.new_router_cond = threading.Condition(self.new_router_lock)
def new_consensus(self, cache):
# Add NEWCONSENSUS data to our test cache.
# TODO: This will cause lots of duplicate tests if we get a NEWCONSENSUS
# soon after starting TorBEL, but it really shouldn't be a problem with
# how fast we test.
cache_values = copy(cache.values())
self.router_count = len(cache_values)
random.shuffle(cache_values)
with self.new_router_cond:
for router in cache_values:
self.router_list.append(router)
self.new_router_cond.notify()
def new_descriptor(self, router):
# Append new descriptor to our list and notify a (possibly)
# sleeping fetch_next_tests.
with self.new_router_cond:
self.router_list.append(router)
self.new_router_cond.notify()
def retry_soon(self, router):
# Call parent class and notify possibly sleeping fetch_next_tests
# call to get things rolling again.
TestScheduler.retry_soon(self, router)
with self.new_router_cond:
self.new_router_cond.notify()
def fetch_next_tests(self):
testable = 0
with self.new_router_cond:
# Only return up to self.max_pending_circuits routers to test.
while not self.terminated and testable == 0:
# Start with our retry candidates.
test_set = self.retry_candidates()
# Grab the number of available test circuits...
with self.pending_circuit_lock:
available = self.max_pending_circuits - len(self.pending_circuits) - len(test_set)
# If we have available circuits, grab as many routers to test as
# possible.
if available > 0:
for i in range(min(len(self.router_list), available)):
candidate = self.router_list.popleft()
test_set.add(candidate)
# If we don't have any routers to test, sleep until we are
# notified of new routers.
testable = len(test_set)
if testable == 0:
self.new_router_cond.wait()
# Boom, bail.
if self.terminated:
return []
return sorted(list(test_set), key = lambda r: r.last_test.end_time)
def retry_later(self, router):
""" Retry router in five minutes. """
reactor.callLater(5 * 60, lambda: self.retry_soon(router))
def print_stats(self):
TestScheduler.print_stats(self)
log.debug("new_router_lock.locked(): %s", self.new_router_lock.locked())
log.debug("%d pending new tests, %d pending retries.",
len(self.router_list), len(self.retry_routers))
def stop(self):
# Notify new_router_cond first.
TestScheduler.stop(self)
with self.new_router_cond:
self.new_router_cond.notify()
def lock_state(self):
return [("new_router_lock", self.new_router_lock.locked())] + \
TestScheduler.lock_state(self)
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Buildbot master utility functions.
"""
import json
import errno
import logging
import os
import sys
import time
BUILD_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(BUILD_DIR, 'scripts'))
from tools import mastermap
from common import find_depot_tools # pylint: disable=W0611
import subprocess2
def sublists(superlist, n):
"""Breaks a list into list of sublists, each of length no more than n."""
result = []
for cut in range(0, len(superlist), n):
result.append(superlist[cut:cut + n])
return result
def pid_exists(pid):
"""Returns True if there is a process in the system with given |pid|."""
try:
os.kill(pid, 0)
except OSError as error:
if error.errno == errno.EPERM:
return True
elif error.errno == errno.ESRCH:
return False
raise
return True
def is_master_alive(master, path):
"""Reads master's *.pid file and checks for corresponding PID in the system.
If there is no such process, removes stale *.pid file and returns False.
Returns:
True - *.pid file exists and corresponding process is running.
False - *.pid file doesn't exist or there is no such process.
"""
pid_path = os.path.join(path, 'twistd.pid')
contents = None
try:
with open(pid_path) as f:
contents = f.read()
if pid_exists(int(contents.strip())):
return True
logging.warning('Ghost twistd.pid for %s, removing it', master)
except IOError as error:
if error.errno == errno.ENOENT:
return False
raise
except ValueError:
logging.warning('Corrupted twistd.pid for %s, removing it: %r',
master, contents)
remove_file(pid_path)
return False
def remove_file(path):
"""Deletes file at given |path| if it exists. Does nothing if it's not there
or can not be deleted."""
try:
os.remove(path)
except OSError:
pass
def start_master(master, path, dry_run=False):
"""Asynchronously starts the |master| at given |path|.
If |dry_run| is True, will start the master in a limited mode suitable only
for integration testing purposes.
Returns:
True - the master was successfully started.
False - the master failed to start, details are in the log.
"""
try:
env = os.environ.copy()
if dry_run:
# Ask ChromiumGitPoller not to pull git repos.
env['NO_REVISION_AUDIT'] = '0'
env['POLLER_DRY_RUN'] = '1'
subprocess2.check_output(
['make', 'start'], timeout=120, cwd=path, env=env,
stderr=subprocess2.STDOUT)
except subprocess2.CalledProcessError as e:
logging.error('Error: cannot start %s' % master)
print e
return False
return True
def stop_master(master, path, force=False):
"""Issues 'stop' command and waits for master to terminate. If |force| is True
will try to kill master process if it fails to terminate in time by itself.
Returns:
True - master was stopped, killed or wasn't running.
False - master is still running.
"""
if terminate_master(master, path, 'stop', timeout=10):
return True
if not force:
logging.warning('Master %s failed to stop in time', master)
return False
logging.warning('Master %s failed to stop in time, killing it', master)
if terminate_master(master, path, 'kill', timeout=2):
return True
logging.warning('Master %s is still running', master)
return False
def terminate_master(master, path, command, timeout=10):
"""Executes 'make |command|' and waits for master to stop running or until
|timeout| seconds pass.
Returns:
True - the master was terminated or wasn't running.
False - the command failed, or master failed to terminate in time.
"""
if not is_master_alive(master, path):
return True
try:
env = os.environ.copy()
env['NO_REVISION_AUDIT'] = '0'
subprocess2.check_output(
['make', command], timeout=5, cwd=path, env=env,
stderr=subprocess2.STDOUT)
except subprocess2.CalledProcessError as e:
if not is_master_alive(master, path):
return True
logging.warning('Master %s was not terminated: \'make %s\' failed: %s',
master, command, e)
return False
return wait_for_termination(master, path, timeout=timeout)
def wait_for_termination(master, path, timeout=10):
"""Waits for master to finish running and cleans up pid file.
Waits for at most |timeout| seconds.
Returns:
True - master has stopped or wasn't running.
False - master failed to terminate in time.
"""
started = time.time()
while True:
now = time.time()
if now > started + timeout:
break
if not is_master_alive(master, path):
logging.info('Master %s stopped in %.1f sec.', master, now - started)
return True
time.sleep(0.1)
return False
def search_for_exceptions(path):
"""Looks in twistd.log for an exception.
Returns True if an exception is found.
"""
twistd_log = os.path.join(path, 'twistd.log')
with open(twistd_log) as f:
lines = f.readlines()
stripped_lines = [l.strip() for l in lines]
try:
i = stripped_lines.index('--- <exception caught here> ---')
# Found an exception at line 'i'! Now find line 'j', the number
# of lines from 'i' where there's a blank line. If we cannot find
# a blank line, then we will show up to 10 lines from i.
try:
j = stripped_lines[i:-1].index('')
except ValueError:
j = 10
# Print from either 15 lines back from i or the start of the log
# text to j lines after i.
return ''.join(lines[max(i-15, 0):i+j])
except ValueError:
pass
return False
def json_probe(sensitive, allports):
"""Looks through the port range and finds a master listening.
sensitive: Indicates whether partial success should be reported.
Returns (port, name) or None.
"""
procs = {}
for ports in sublists(allports, 30):
for port in ports:
# urllib2 does not play nicely with threading. Using curl lets us avoid
# the GIL.
procs[port] = subprocess2.Popen(
['curl', '-fs', '-m2', 'http://localhost:%d/json/project' % port],
stdin=subprocess2.VOID,
stdout=subprocess2.PIPE,
stderr=subprocess2.VOID)
for port in ports:
stdout, _ = procs[port].communicate()
if procs[port].returncode != 0:
continue
try:
data = json.loads(stdout) or {}
if not data or (not 'projectName' in data and not 'title' in data):
logging.debug('Didn\'t get valid data from port %d' % port)
if sensitive:
return (port, None)
continue
name = data.get('projectName', data.get('title'))
return (port, name)
except ValueError:
logging.warning('Didn\'t get valid data from port %d' % port)
# presume this is some other type of server
# E.g. X20 on a dev workstation.
continue
return None
def wait_for_start(master, name, path, ports):
"""Waits for ~30s for the masters to open its web server."""
logging.info("Waiting for master %s on ports %s" % (name, ports))
for i in range(300):
result = json_probe(False, ports)
if result is None:
exception = search_for_exceptions(path)
if exception:
return exception
time.sleep(0.1)
continue
port, got_name = result # pylint: disable=unpacking-non-sequence
if got_name != name:
return 'Wrong %s name, expected %s, got %s on port %d' % (
master, name, got_name, port)
logging.info("Found master %s on port %s, iteration %d" % (name, port, i))
# The server is now answering /json requests. Check that the log file
# doesn't have any other exceptions just in case there was some other
# unexpected error.
return search_for_exceptions(path)
return 'Didn\'t find open port for %s' % master
def check_for_no_masters():
ports = range(8000, 8099) + range(8200, 8299) + range(9000, 9099)
ports = [x for x in ports if x not in mastermap.PORT_BLACKLIST]
result = json_probe(True, ports)
if result is None:
return True
if result[1] is None:
logging.error('Something is listening on port %d' % result[0])
return False
logging.error('Found unexpected master %s on port %d' %
(result[1], result[0]))
return False
| |
# coding=utf-8
# Copyright 2022 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gather_with_default op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow_text.python.ops import pointer_ops
def _MakeTestTensor(shape, prefix=b'v'):
"""Constructs a string tensor with the specified shape, for testing."""
if not shape:
return prefix
return [
_MakeTestTensor(shape[1:], b'%s%s' % (prefix, ('%s' % i).encode('ascii')))
for i in range(shape[0])
]
@test_util.run_all_in_graph_and_eager_modes
class GatherWithDefaultOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def testDocStringExample(self):
gathered = pointer_ops.gather_with_default(['a', 'b', 'c', 'd'],
[2, 0, -1, 2, -1], '_')
self.assertAllEqual(gathered, [b'c', b'a', b'_', b'c', b'_'])
@parameterized.parameters(
(_MakeTestTensor([8]), -1, b'_'),
(_MakeTestTensor([8]), 0, b'_'),
(_MakeTestTensor([8]), 1, b'_'),
(_MakeTestTensor([8]), 6, b'_'),
(_MakeTestTensor([8]), 7, b'_'),
)
def testScalarIndicesWith1DParams(self, params, indices, default):
indices_t = constant_op.constant(indices, dtype=dtypes.int32)
params_t = constant_op.constant(params)
assert isinstance(indices, int)
gathered = pointer_ops.gather_with_default(params_t, indices_t, default)
expected = default if indices == -1 else params[indices]
self.assertAllEqual(expected, gathered)
# When there are no -1 indices, check that behavior matches tf.gather.
if indices != -1:
self.assertAllEqual(gathered, array_ops.gather(params_t, indices_t))
@parameterized.parameters(
(_MakeTestTensor([3, 2]), -1, [b'_', b'_']),
(_MakeTestTensor([3, 2]), 0, [b'_', b'_']),
(_MakeTestTensor([3, 2]), 1, [b'_', b'_']),
(_MakeTestTensor([3, 2]), 2, [b'_', b'_']),
)
def testScalarIndicesWith2DParams(self, params, indices, default):
indices_t = constant_op.constant(indices, dtype=dtypes.int32)
params_t = constant_op.constant(params)
assert isinstance(indices, int)
gathered = pointer_ops.gather_with_default(params_t, indices_t, default)
expected = default if indices == -1 else params[indices]
self.assertAllEqual(gathered, expected)
# When there are no -1 indices, check that behavior matches tf.gather.
if indices != -1:
self.assertAllEqual(gathered, array_ops.gather(params_t, indices_t))
@parameterized.parameters(
# 1D params
(_MakeTestTensor([8]), [], '_'),
(_MakeTestTensor([8]), [0], '_'),
(_MakeTestTensor([8]), [-1], '_'),
(_MakeTestTensor([8]), [6], '_'),
(_MakeTestTensor([8]), [2, 0, 2, -1, 5, -1], '_'),
(_MakeTestTensor([8]), [2, 0, 2, 1, 5, 3], '_'),
# 2D params
(_MakeTestTensor([3, 2]), [], ['_', '_'], [0, 2]),
(_MakeTestTensor([3, 2]), [0], ['_', '_']),
(_MakeTestTensor([3, 2]), [1], ['_', '_']),
(_MakeTestTensor([3, 2]), [-1], ['_', '_']),
(_MakeTestTensor([3, 2]), [2], ['_', '_']),
(_MakeTestTensor([3, 2]), [1, 0, -1, 2, -1], ['_', '_']),
(_MakeTestTensor([3, 2]), [1, 0, 1, 2, 0], ['_', '_']),
)
def testVectorIndices(self, params, indices, default, expected_shape=None):
indices_t = constant_op.constant(indices, dtype=dtypes.int32)
params_t = constant_op.constant(params)
gathered = pointer_ops.gather_with_default(params_t, indices_t, default)
expected = [default if i == -1 else params[i] for i in indices]
expected = constant_op.constant(expected, shape=expected_shape)
self.assertAllEqual(gathered, expected)
# When there are no -1 indices, check that behavior matches tf.gather.
if not any(i == -1 for i in indices):
self.assertAllEqual(gathered, array_ops.gather(params_t, indices_t))
@parameterized.parameters(
# 1D params
(_MakeTestTensor([8]), [], '_'),
(_MakeTestTensor([8]), [[0]], '_'),
(_MakeTestTensor([8]), [[-1]], '_'),
(_MakeTestTensor([8]), [[6]], '_'),
(_MakeTestTensor([8]), [[2, 0], [2, -1], [5, -1]], '_'),
(_MakeTestTensor([8]), [[2, 0], [2, 1], [5, 2]], '_'),
# 2D params
(_MakeTestTensor([3, 2]), [], ['_', '_'], [0, 2]),
(_MakeTestTensor([3, 2]), [[0]], ['_', '_']),
(_MakeTestTensor([3, 2]), [[1]], ['_', '_']),
(_MakeTestTensor([3, 2]), [[-1]], ['_', '_']),
(_MakeTestTensor([3, 2]), [[2]], ['_', '_']),
(_MakeTestTensor([3, 2]), [[1, 0], [-1, 2], [-1, -1]], ['_', '_']),
(_MakeTestTensor([3, 2]), [[1, 0], [1, 2], [0, 0]], ['_', '_']),
)
def test2DIndices(self, params, indices, default, expected_shape=None):
indices_t = constant_op.constant(indices, dtype=dtypes.int32)
params_t = constant_op.constant(params)
gathered = pointer_ops.gather_with_default(params_t, indices_t, default)
expected = [[default if i == -1 else params[i]
for i in indices_row]
for indices_row in indices]
expected = constant_op.constant(expected, shape=expected_shape)
self.assertAllEqual(gathered, expected)
# When there are no -1 indices, check that behavior matches tf.gather.
if not any(i == -1 for index_row in indices for i in index_row):
self.assertAllEqual(gathered, array_ops.gather(params_t, indices_t))
def testAxisGreaterThan0(self):
params = [['a0', 'a1', 'a2', 'a3', 'a4'],
['b0', 'b1', 'b2', 'b3', 'b4'],
['c0', 'c1', 'c2', 'c3', 'c4']] # pyformat: disable
indices = [2, 0, -1, 4, -1]
gathered = pointer_ops.gather_with_default(params, indices, '__', axis=1)
expected = [[b'a2', b'a0', b'__', b'a4', b'__'],
[b'b2', b'b0', b'__', b'b4', b'__'],
[b'c2', b'c0', b'__', b'c4', b'__']] # pyformat: disable
self.assertAllEqual(gathered, expected)
def testNegativeAxis(self):
params_1d = _MakeTestTensor(shape=[3])
params_2d = _MakeTestTensor(shape=[3, 3])
params_3d = _MakeTestTensor(shape=[3, 3, 3])
indices = [2, 0, -1, 1, -1]
gathered1a = pointer_ops.gather_with_default(
params_1d, indices, '__', axis=0)
gathered1b = pointer_ops.gather_with_default(
params_1d, indices, '__', axis=-1)
expected1 = [b'v2', b'v0', b'__', b'v1', b'__']
gathered2a = pointer_ops.gather_with_default(
params_2d, indices, ['__', '__', '__'], axis=0)
gathered2b = pointer_ops.gather_with_default(
params_2d, indices, ['__', '__', '__'], axis=-2)
expected2 = [[b'v20', b'v21', b'v22'],
[b'v00', b'v01', b'v02'],
[b'__', b'__', b'__'],
[b'v10', b'v11', b'v12'],
[b'__', b'__', b'__']] # pyformat: disable
gathered3a = pointer_ops.gather_with_default(
params_2d, indices, '__', axis=1)
gathered3b = pointer_ops.gather_with_default(
params_2d, indices, '__', axis=-1)
expected3 = [[b'v02', b'v00', b'__', b'v01', b'__'],
[b'v12', b'v10', b'__', b'v11', b'__'],
[b'v22', b'v20', b'__', b'v21', b'__']] # pyformat: disable
gathered4a = pointer_ops.gather_with_default(
params_3d, indices, '__', axis=2)
gathered4b = pointer_ops.gather_with_default(
params_3d, indices, '__', axis=-1)
expected4 = [[[b'v002', b'v000', b'__', b'v001', b'__'],
[b'v012', b'v010', b'__', b'v011', b'__'],
[b'v022', b'v020', b'__', b'v021', b'__']],
[[b'v102', b'v100', b'__', b'v101', b'__'],
[b'v112', b'v110', b'__', b'v111', b'__'],
[b'v122', b'v120', b'__', b'v121', b'__']],
[[b'v202', b'v200', b'__', b'v201', b'__'],
[b'v212', b'v210', b'__', b'v211', b'__'],
[b'v222', b'v220', b'__', b'v221', b'__']]]
self.assertAllEqual(gathered1a, expected1)
self.assertAllEqual(gathered1b, expected1)
self.assertAllEqual(gathered2a, expected2)
self.assertAllEqual(gathered2b, expected2)
self.assertAllEqual(gathered3a, expected3)
self.assertAllEqual(gathered3b, expected3)
self.assertAllEqual(gathered4a, expected4)
self.assertAllEqual(gathered4b, expected4)
def testAxisGreaterThan0_BehaviorMatchesTfGather(self):
params = [['a1', 'a2', 'a3', 'a4'], ['b1', 'b2', 'b3', 'b4'],
['c1', 'c2', 'c3', 'c4']]
indices = [2, 0, 2, 1]
gathered = pointer_ops.gather_with_default(params, indices, '__', axis=1)
expected = array_ops.gather(params, indices, axis=1)
self.assertAllEqual(gathered, expected)
def testBadDefaultShape(self):
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
pointer_ops.gather_with_default(
params=[0, 1, 2, 3], indices=[0], default=[0])
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
pointer_ops.gather_with_default(
params=[[0, 1], [2, 3]], indices=[0], default=0)
def testBadDefaultDtype(self):
with self.assertRaisesRegexp(
TypeError,
'Expected int32.*|Cannot convert .*'):
pointer_ops.gather_with_default(
params=[0, 1, 2, 3], indices=[0], default='a')
def testBadAxis(self):
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
pointer_ops.gather_with_default(
params=[0, 1, 2, 3], indices=[0], default=-1, axis=1)
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
pointer_ops.gather_with_default(
params=[[0, 1], [2, 3]], indices=[0], default=[0, 0], axis=2)
def testIndexOutOfRange(self):
# Note: because of the way gather_with_default is implemented, these
# error messages will report values and ranges that are one greater than
# those that were supplied to gather_with_default.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r'indices\[0\] = .* is not in .*'):
self.evaluate(
pointer_ops.gather_with_default(
params=[0, 1, 2, 3], indices=[4], default=0))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r'indices\[0\] = .* is not in .*'):
self.evaluate(
pointer_ops.gather_with_default(
params=[0, 1, 2, 3], indices=[-2], default=0))
if __name__ == '__main__':
test.main()
| |
#!/usr/bin/env python3
"""
Converts the LibreOffice `download.lst` file into a Nix expression.
Requires an environment variable named `downloadList` identifying the path
of the input file, and writes the result to stdout.
todo - Ideally we would move as much as possible into derivation dependencies.
"""
import collections, itertools, json, re, subprocess, sys, os
def main():
packages = list(get_packages())
for x in packages:
print(x, file=sys.stderr)
print('[')
for x in packages:
md5 = x['md5']
upstream_sha256 = x['sha256']
if upstream_sha256:
hash = upstream_sha256
hashtype = 'sha256'
else:
hash = md5
hashtype = 'md5'
tarball = x['tarball']
url = construct_url(x)
print('url: {}'.format(url), file=sys.stderr)
path = download(url, tarball, hash, hashtype)
print('path: {}'.format(path), file=sys.stderr)
sha256 = get_sha256(path)
print('sha256: {}'.format(sha256), file=sys.stderr)
print(' {')
print(' name = "{}";'.format(tarball))
print(' url = "{}";'.format(url))
print(' sha256 = "{}";'.format(sha256))
print(' md5 = "{}";'.format(md5))
print(' md5name = "{}-{}";'.format(md5 or upstream_sha256,tarball))
print(' }')
print(']')
def construct_url(x):
if x['brief']:
return 'http://dev-www.libreoffice.org/src/{}{}'.format(
x.get('subdir', ''), x['tarball'])
else:
return 'http://dev-www.libreoffice.org/src/{}{}-{}'.format(
x.get('subdir', ''), x['md5'], x['tarball'])
def download(url, name, hash, hashtype):
cmd = ['nix-prefetch-url', url, hash, '--print-path',
'--type', hashtype, '--name', name]
proc = subprocess.run(cmd, stdout=subprocess.PIPE, check=True,
universal_newlines=True)
return proc.stdout.split('\n')[1].strip()
def get_sha256(path):
cmd = ['sha256sum', path]
proc = subprocess.run(cmd, stdout=subprocess.PIPE, check=True,
universal_newlines=True)
return proc.stdout.split(' ')[0].strip()
def get_packages():
"""
All of the package data: What's parsed from download.lst,
plus our additions.
"""
return apply_additions(get_packages_from_download_list(),
get_additions())
def get_additions():
"""
A mapping from package name (the all-caps identifiers used in
`download.lst`) to a dict of additional attributes to set on the package.
"""
with open('./libreoffice-srcs-additions.json') as f:
return json.load(f)
def apply_additions(xs, additions):
for x in xs:
yield dict_merge([x,
additions.get(x['name'], {})])
def get_packages_from_download_list():
"""
The result of parsing `download.lst`: A list of dicts containing keys
'name', 'tarball', 'md5', 'brief'.
"""
def lines():
for x in sub_symbols(parse_lines(get_lines())):
interpretation = interpret(x)
if interpretation == 'unrecognized':
print_skipped_line(x)
else:
yield dict_merge([x,
interpretation])
def cluster(xs):
"""
Groups lines according to their order within the file, to support
packages that are listed in `download.lst` more than once.
"""
keys = ['tarball', 'md5', 'sha256', 'brief']
a = {k: [x for x in xs if k in x['attrs']] for k in keys}
return zip(*[a[k] for k in keys])
def packages():
for (name, group) in groupby(lines(), lambda x: x['name']):
for xs in cluster(group):
yield {'name': name,
'attrs': dict_merge(x['attrs'] for x in xs),
'index': min(x['index'] for x in xs)}
for x in sorted(packages(), key=lambda x: x['index']):
yield dict_merge([{'name': x['name']},
x['attrs']])
def dict_merge(xs):
"""
>>> dict_merge([{1: 2}, {3: 4}, {3: 5}])
{1: 2, 3: 4}
"""
return dict(collections.ChainMap(*xs))
def groupby(xs, f):
"""
>>> groupby([1, 2, 3, 4], lambda x: x % 2)
[(0, [2, 4]), (1, [1, 3])]
"""
for (k, iter) in itertools.groupby(sorted(xs, key=f), f):
group = list(iter)
yield (f(group[0]), group)
def get_lines():
download_list = os.getenv('downloadList')
with open(download_list) as f:
return f.read().splitlines()
def print_skipped_line(x):
print('Skipped line {}: {}'.format(x['index'],
x['original']),
file=sys.stderr)
def parse_lines(lines):
"""
Input: List of strings (the lines from `download.lst`
Output: Iterator of dicts with keys 'key', 'value', and 'index'
"""
for (index, line) in enumerate(lines):
x = { 'index': index, 'original': line }
result = parse_line(line)
if result == 'nothing':
pass
elif result == 'unrecognized':
print_skipped_line(x)
else:
yield dict_merge([x,
result])
def parse_line(line):
"""
Input: A string
Output: One of 1. A dict with keys 'key', 'value'
2. 'nothing' (if the line contains no information)
2. 'unrecognized' (if parsing failed)
"""
if re.match('\s*(#.*)?$', line):
return 'nothing'
match = re.match('\s*export\s+([^:\s]+)\s*:=\s*(.*)$', line)
if match:
return {
'key': match.group(1),
'value': match.group(2).strip()
}
else:
return 'unrecognized'
def sub_symbols(xs):
"""
Do substitution of variables across all lines.
>>> sub_symbols([{'key': 'a', 'value': 'x'},
... {'key': 'c': 'value': '$(a)yz'}])
[{'key': 'a', 'value': 'x'}, {'key': 'c': 'value': 'xyz'}]
"""
xs = list(xs)
symbols = {x['key']: x for x in xs}
def get_value(k):
x = symbols.get(k)
return x['value'] if x is not None else ''
for x in xs:
yield dict_merge([{'value': sub_str(x['value'], get_value)},
x])
def sub_str(string, func):
"""
Do substitution of variables in a single line.
>>> sub_str("x = $(x)", lambda k: {'x': 'a'}[k])
"x = a"
"""
def func2(m):
x = m.group(1)
result = func(x)
return result if result is not None else x
return re.sub(r'\$\(([^\$\(\)]+)\)', func2, string)
def interpret(x):
"""
Input: Dict with keys 'key' and 'value'
Output: One of 1. Dict with keys 'name' and 'attrs'
2. 'unrecognized' (if interpretation failed)
"""
for f in [interpret_md5, interpret_sha256, interpret_tarball_with_md5, interpret_tarball, interpret_jar]:
result = f(x)
if result is not None:
return result
return 'unrecognized'
def interpret_md5(x):
"""
>>> interpret_md5("ODFGEN_MD5SUM", "32572ea48d9021bbd6fa317ddb697abc")
{'name': 'ODFGEN', 'attrs': {'md5': '32572ea48d9021bbd6fa317ddb697abc'}}
"""
match = re.match('^(.*)_MD5SUM$', x['key'])
if match:
return {'name': match.group(1),
'attrs': {'md5': x['value'], 'sha256': ''}}
def interpret_sha256(x):
match = re.match('^(.*)_SHA256SUM$', x['key'])
if match:
return {'name': match.group(1),
'attrs': {'sha256': x['value'], 'md5': ''}}
def interpret_tarball(x):
"""
>>> interpret_tarball("FREEHAND_TARBALL", "libfreehand-0.1.1.tar.bz2")
{'name': 'FREEHAND',
'attrs': {'tarball': 'libfreehand-0.1.1.tar.bz2', 'brief': True}}
"""
match = re.match('^(.*)_TARBALL$', x['key'])
if match:
return {'name': match.group(1),
'attrs': {'tarball': x['value'], 'brief': True}}
def interpret_jar(x):
match = re.match('^(.*)_JAR$', x['key'])
if match:
return {'name': match.group(1),
'attrs': {'tarball': x['value'], 'brief': True}}
def interpret_tarball_with_md5(x):
"""
>>> interpret_tarball_with_md5("CLUCENE_TARBALL",\
"48d647fbd8ef8889e5a7f422c1bfda94-clucene-core-2.3.3.4.tar.gz")
{'name': 'CLUCENE',
'attrs': {'tarball': 'clucene-core-2.3.3.4.tar.gz',
'md5': '48d647fbd8ef8889e5a7f422c1bfda94', 'brief': False}}
"""
match = {'key': re.match('^(.*)_(TARBALL|JAR)$', x['key']),
'value': re.match('(?P<md5>[0-9a-fA-F]{32})-(?P<tarball>.+)$',
x['value'])}
if match['key'] and match['value']:
return {'name': match['key'].group(1),
'attrs': {'tarball': match['value'].group('tarball'),
'md5': match['value'].group('md5'),
'sha256': '',
'brief': False}}
main()
| |
import os
import textwrap
from io import StringIO
import numpy as np
from mock import patch
from nose.tools import eq_, ok_
import neurom.io as io
import neurom.io.neurolucida as nasc
from neurom.core.dataformat import COLS
from neurom.io.datawrapper import DataWrapper
_path = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(_path, '../../../test_data')
NEUROLUCIDA_PATH = os.path.join(DATA_PATH, 'neurolucida')
def test__match_section():
# no match in first 5
section = [0, 1, 2, 3, 4, 'something']
match = {'Foo': 'Bar', }
eq_(nasc._match_section(section, match), None)
def test__get_tokens():
morph_fd = StringIO(u'((()))')
tokens = list(nasc._get_tokens(morph_fd))
eq_(tokens, ['(', '(', '(', ')', ')', ')'])
morph_fd = StringIO(u'(Baz("Bar"("Foo")))')
tokens = list(nasc._get_tokens(morph_fd))
eq_(tokens, ['(', 'Baz', '(', '"Bar"', '(', '"Foo"', ')', ')', ')'])
morph_fd = StringIO(u'(Baz("Cell Bar Body"("Foo")))')
tokens = list(nasc._get_tokens(morph_fd))
eq_(tokens, ['(', 'Baz', '(', '"Cell Bar Body"', '(', '"Foo"', ')', ')', ')'])
def test__parse_section():
with patch('neurom.io.neurolucida._match_section') as mock_match:
mock_match.return_value = False # want all sections
token_iter = iter(['(', '(', '(', ')', ')', ')'])
section = nasc._parse_section(token_iter)
eq_(section, [[[[]]]])
token_iter = iter(['(', 'Baz', '(', '"Bar"', '(', '"Foo"', ')', ')', ')'])
section = nasc._parse_section(token_iter)
eq_(section, [['Baz',
['"Bar"',
['"Foo"',
]]]])
def test__parse_sections():
string_section = textwrap.dedent(
u'''(FilledCircle
(Color RGB (64, 0, 128))
(Name "Marker 11")
(Set "axons")
( -189.59 55.67 28.68 0.12) ; 1
) ; End of markers
( (Color Yellow)
(Axon)
(Set "axons")
( -40.54 -113.20 -36.61 0.12) ; Root
( -40.54 -113.20 -36.61 0.12) ; 1, R
Generated
) ; End of tree
''')
morph_fd = StringIO(string_section)
sections = nasc._parse_sections(morph_fd)
eq_(len(sections), 1) # FilledCircle is ignored
eq_(sections[0], [['Axon'],
['-40.54', '-113.20', '-36.61', '0.12'],
['-40.54', '-113.20', '-36.61', '0.12'],
'Generated'])
def test__flatten_section():
#[X, Y, Z, R, TYPE, ID, PARENT_ID]
subsection = [['0', '0', '0', '0'],
['1', '1', '1', '1'],
['2', '2', '2', '2'],
['3', '3', '3', '3'],
['4', '4', '4', '4'],
'Generated',
]
ret = np.array([row for row in nasc._flatten_subsection(subsection, 0, offset=0, parent=-1)])
# correct parents
ok_(np.allclose(ret[:, COLS.P], np.arange(-1, 4)))
ok_(np.allclose(ret[:, COLS.ID], np.arange(0, 5)))
subsection = [['-1', '-1', '-1', '-1'],
[['0', '0', '0', '0'],
['1', '1', '1', '1'],
['2', '2', '2', '2'],
['3', '3', '3', '3'],
['4', '4', '4', '4'],
'|',
['1', '2', '3', '4'],
['1', '2', '3', '4'],
['1', '2', '3', '4'],
['1', '2', '3', '4'],
['1', '2', '3', '4'], ]
]
ret = np.array([row for row in nasc._flatten_subsection(subsection, 0, offset=0, parent=-1)])
# correct parents
eq_(ret[0, COLS.P], -1.)
eq_(ret[1, COLS.P], 0.0)
eq_(ret[6, COLS.P], 0.0)
ok_(np.allclose(ret[:, COLS.ID], np.arange(0, 11))) # correct ID
# Try a non-standard bifurcation, ie: missing '|' separator
subsection = [['-1', '-1', '-1', '-1'],
[['0', '0', '0', '0'],
['1', '1', '1', '1'], ]
]
ret = np.array([row for row in nasc._flatten_subsection(subsection, 0, offset=0, parent=-1)])
eq_(ret.shape, (3, 7))
# try multifurcation
subsection = [['-1', '-1', '-1', '-1'],
[['0', '0', '0', '0'],
['1', '1', '1', '1'],
'|',
['2', '2', '2', '2'],
['3', '3', '3', '3'],
'|',
['4', '4', '4', '4'],
['5', '5', '5', '5'], ]
]
ret = np.array([row for row in nasc._flatten_subsection(subsection, 0, offset=0, parent=-1)])
# correct parents
eq_(ret[0, COLS.P], -1.)
eq_(ret[1, COLS.P], 0.0)
eq_(ret[3, COLS.P], 0.0)
eq_(ret[5, COLS.P], 0.0)
ok_(np.allclose(ret[:, COLS.ID], np.arange(0, 7))) # correct ID
def test__extract_section():
section = ['"CellBody"',
['CellBody'],
['-1', '-1', '-1', '-1'],
['1', '1', '1', '1'],
]
section = nasc._extract_section(section)
# unknown type
section = ['"Foo"',
['Bar'],
['-1', '-1', '-1', '-1'],
['1', '1', '1', '1'],
]
section = nasc._extract_section(section)
def test_sections_to_raw_data():
# from my h5 example neuron
# https://developer.humanbrainproject.eu/docs/projects/morphology-documentation/0.0.2/h5v1.html
soma = ['"CellBody"',
['CellBody'],
['1', '1', '0', '.1'],
['-1', '1', '0', '.1'],
['-1', '-1', '0', '.1'],
['1', '-1', '0', '.1'],
]
axon = [['Axon'],
['0', '5', '0', '.1'],
['2', '9', '0', '.1'],
['0', '13', '0', '.1'],
['2', '13', '0', '.1'],
['4', '13', '0', '.1'],
]
dendrite = [['Dendrite'],
['3', '-4', '0', '.1'],
['3', '-6', '0', '.1'],
['3', '-8', '0', '.1'],
['3', '-10', '0', '.1'],
[['0', '-10', '0', '.1'],
'|',
['6', '-10', '0', '.1'],
]
]
fake_neurite = [['This is not ', ], ['a neurite']]
sections = [soma, fake_neurite, axon, dendrite, ]
raw_data = nasc._sections_to_raw_data(sections)
eq_(raw_data.shape, (15, 7))
ok_(np.allclose(raw_data[:, COLS.ID], np.arange(0, 15))) # correct ID
# 3 is ID of end of the soma, 2 sections attach to this
ok_(np.count_nonzero(raw_data[:, COLS.P] == 3), 2)
# what I think the
# https://developer.humanbrainproject.eu/docs/projects/morphology-documentation/0.0.2/h5v1.html
# would look like
MORPH_ASC = textwrap.dedent(
u'''\
; Generated by the hand of mgevaert
("CellBody"
(CellBody)
(1 1 0 0) ; 1, 1
(-1 1 0 0) ; 1, 2
(-1 -1 0 0) ; 1, 3
(1 -1 0 0) ; 1, 4
);
((Axon)
(0 5 0 2)
(2 9 0 2)
(0 13 0 2)
(2 13 0 2)
(4 13 0 2)
)
((Dendrite)
(3 -4 0 2)
(3 -6 0 2)
(3 -8 0 2)
(3 -10 0 2)
(
(3 -10 0 2)
(0 -10 0 2)
(-3 -10 0 2)
|
(3 -10 0 2)
(6 -10 0 2)
(9 -10 0 2)
)
)
''')
def test_read():
rdw = io.load_data(StringIO(MORPH_ASC), reader='asc')
raw_data = rdw.data_block
eq_(raw_data.shape, (19, 7))
ok_(np.allclose(raw_data[:, COLS.ID], np.arange(0, 19))) # correct ID
# 3 is ID of end of the soma, 2 sections attach to this
ok_(np.count_nonzero(raw_data[:, COLS.P] == 3), 2)
def test_load_neurolucida_ascii():
f = os.path.join(NEUROLUCIDA_PATH, 'sample.asc')
ascii = io.load_data(f)
ok_(isinstance(ascii, DataWrapper))
eq_(len(ascii.data_block), 18)
| |
#!/usr/bin/env python
import unittest
from M2Crypto import RSA
import base64
import json
import grequests
import os
import shutil
import six
import subprocess
import tempfile
import time
import uuid
# Under test
from le_server import Server
#
# These are the same as current defaults (see DEFAULTS in le_server.py)
# but let's specify them here as inputs and use them in case the prod
# defaults change but we want to keep them different from the test
# inputs.
#
user_input_port = 9999
user_input_path_to_certs = './certs'
# This config file sets the Let's Encrypt CA to a staging server.
user_input_path_to_config = './config_staging'
user_input_processes = 1
# This is set on Circle CI only to allow preserving logs as artifacts:
preserved_log_dir = os.environ.get('PRESERVED_LOG_DIR')
#
# The domain and successful credentials are set as an environment variables
# in prod as well as local (by sourcing a hidden credentials file before running tests)
# as well as on Circle CI and Heroku through environment variables in the service's
# settings.
#
test_domain = os.environ.get('DNS_DOMAIN')
test_plotly_api_domain = os.environ.get('PLOTLY_API_DOMAIN')
#
# Successful credentials are set as environment variables in prod as well as local
# (by sourcing a hidden credentials file before running tests) as well as
# on Circle CI and Heroku through environment variables in the service's
# settings.
#
correct_username = os.environ['PLOTLY_USERNAME']
#
# These are some fake inputs for our tests.
# Test with max username length (25 chars) to make sure it works.
#
fake_username = 'NananaNananaNananaNananaBatman'
fake_api_key = 'f4K3-4Pi-k3Y'
fake_access_token = 'f4K3-4CC355-t0k3N'
#
# Quick Mocks to avoid using other functions than those under test per test
#
mocked_path_to_config = os.path.join(
os.getcwd(), os.path.relpath(user_input_path_to_config))
mocked_get_hash = str(uuid.uuid4())
mocked_build_subdomain = fake_username[:7] + '-' + mocked_get_hash[:25]
mocked_build_host = mocked_build_subdomain + '.' + test_domain
mocked_encoded_api_key = base64.b64encode(
six.b('{0}:{1}'.format(fake_username, fake_api_key))).decode('utf8')
def mock_create_certs(cert_dir):
domain_cert_folder = os.path.join(os.getcwd(), cert_dir, mocked_build_host)
os.mkdir(domain_cert_folder)
# Create fake certs. Note that the "cert" is actually a key for ease
# of implementation. This works because get_cert_and_key() doesn't
# actually check the type of the item it extracted. If get_cert_and_key()
# is ever improved, this function will need to be improved too.
key = RSA.gen_key(2048, 65537)
test_key = os.path.join(domain_cert_folder, 'privkey.pem')
test_cert = os.path.join(domain_cert_folder, 'fullchain.pem')
key.save_key(test_key, cipher=None)
key.save_key(test_cert, cipher=None)
return test_key, test_cert
#
# Constants
#
CURRENT = '/v2/users/current'
class TestServerPerformance(unittest.TestCase):
def setUp(self):
self.server_process = subprocess.Popen(['python', 'le_server.py', '--path_to_config', './config_staging'], stdout=subprocess.PIPE)
# Let the server start up.
time.sleep(5)
@unittest.skip('This test has too many intermittent failures. Need to improve this.')
def test_concurrent_requests(self):
start_time = time.time()
# A simple task to do to each response object
def verify_response(res):
self.assertTrue((time.time() - start_time) < 120)
response_object = json.loads(res.content)
# Returns the certificate, the key and the subdomain used.
self.assertIn('cert', response_object)
self.assertIn('key', response_object)
self.assertIn('subdomain', response_object)
self.assertIsNotNone(response_object.get('cert'))
self.assertIsNotNone(response_object.get('key'))
self.assertIn(correct_username[:7], response_object.get('subdomain'))
print 'All good.'
N = 2
async_requests = (grequests.post('http://localhost:8080/certificate', data=json.dumps({
'credentials': {
'username': correct_username,
'api_key': correct_api_key,
'plotly_api_domain': test_plotly_api_domain
}
})) for _ in range(N))
async_responses = grequests.map(async_requests)
print async_responses
for res in async_responses:
verify_response(res)
class TestServerRoutes(unittest.TestCase):
"""
# # #
# Integration tests that test the full flow when hitting a route.
#
"""
def setUp(self):
if preserved_log_dir:
self.path_to_logs = preserved_log_dir
else:
self.path_to_logs = tempfile.mkdtemp()
self.server = Server({
'port': user_input_port,
'path_to_config': user_input_path_to_config,
'path_to_certs': user_input_path_to_certs,
'processes': user_input_processes,
'path_to_logs': self.path_to_logs})
# Delete certificates folder after each test to start from a clean state.
def tearDown(self):
try:
shutil.rmtree(os.path.join(os.getcwd(), user_input_path_to_certs))
except:
pass
if not preserved_log_dir:
shutil.rmtree(self.path_to_logs)
def test_ping(self):
with self.server.app.test_client() as app_under_test:
res = app_under_test.get('/ping')
self.assertEqual(res.data, 'pong')
def test_certificate_post_success(self):
with self.server.app.test_client() as app_under_test:
start_time = time.time()
res = app_under_test.post('/certificate', data=json.dumps({
'credentials': {
'username': correct_username,
}
}))
self.assertTrue((time.time() - start_time) < 120)
response_object = json.loads(res.data)
# Returns the certificate, the key and the subdomain used.
self.assertIn('cert', response_object)
self.assertIn('key', response_object)
self.assertIn('subdomain', response_object)
self.assertIsNotNone(response_object.get('cert'))
self.assertIsNotNone(response_object.get('key'))
self.assertIn(correct_username[:7], response_object.get('subdomain'))
# Failing case: no username
def test_certificate_post_error_no_username(self):
with self.server.app.test_client() as app_under_test:
res = app_under_test.post('/certificate', data=json.dumps({
'credentials': {}
}))
self.assertTrue('error' in json.loads(res.data))
class TestServerFunctions(unittest.TestCase):
"""
# # #
# Functional tests that test specific functions of the server.
#
"""
def setUp(self):
self.path_to_logs = tempfile.mkdtemp()
self.path_to_certs = tempfile.mkdtemp()
self.server = Server({
'port': user_input_port,
'path_to_config': user_input_path_to_config,
'path_to_certs': self.path_to_certs,
'processes': user_input_processes,
'path_to_logs': self.path_to_logs})
def tearDown(self):
shutil.rmtree(self.path_to_logs)
shutil.rmtree(self.path_to_certs)
def test_constructor(self):
server = Server({
'port': user_input_port,
'path_to_config': user_input_path_to_config,
'path_to_certs': 'fake/cert/path',
'processes': user_input_processes,
'path_to_logs': self.path_to_logs
})
self.assertEqual(server.port, user_input_port)
self.assertEqual(server.domain, test_domain)
self.assertEqual(server.processes, user_input_processes)
self.assertEqual(server.path_to_certs,
os.path.join(os.getcwd(), 'fake/cert/path/'))
self.assertEqual(
self.server.dehydrated_command, [
os.getcwd() + '/dehydrated-0.3.1/dehydrated',
'-c',
'-n',
'-f',
mocked_path_to_config])
def test_build_host(self):
self.assertEqual(
self.server.build_host(mocked_build_subdomain),
mocked_build_subdomain + '.' + test_domain)
def test_build_subdomain(self):
build_subdomain_under_test = self.server.build_subdomain(fake_username)
self.assertEqual(
len(mocked_build_subdomain), len(build_subdomain_under_test))
def fake_get_hash():
return mocked_get_hash
self.server.get_hash = fake_get_hash
exp_hash = mocked_get_hash[:25]
self.assertEqual(self.server.build_subdomain('Cats'),
'cats-{}'.format(exp_hash))
self.assertEqual(self.server.build_subdomain('LongCatIsLong'),
'longcat-{}'.format(exp_hash))
self.assertEqual(self.server.build_subdomain('dash-ok'),
'dash-ok-{}'.format(exp_hash))
self.assertEqual(self.server.build_subdomain('no.dot'),
'no-dot-{}'.format(exp_hash))
self.assertEqual(self.server.build_subdomain('no_under'),
'no-unde-{}'.format(exp_hash))
self.assertEqual(self.server.build_subdomain('-no-leading'),
'pno-lea-{}'.format(exp_hash))
self.assertEqual(self.server.build_subdomain('-'),
'p-{}'.format(exp_hash))
def test_get_path(self):
server = Server({
'port': user_input_port,
'path_to_config': user_input_path_to_config,
'path_to_certs': 'fake/cert/path',
'processes': user_input_processes,
'path_to_logs': self.path_to_logs
})
expected_key_path = os.path.join(os.getcwd(), 'fake/cert/path',
mocked_build_host, 'privkey.pem')
self.assertEqual(server.get_key_path(mocked_build_subdomain),
expected_key_path)
expected_cert_path = os.path.join(os.getcwd(), 'fake/cert/path',
mocked_build_host, 'fullchain.pem')
self.assertEqual(server.get_cert_path(mocked_build_subdomain),
expected_cert_path)
def test_get_cert_and_key(self):
# Create fake certs
test_key, test_cert = mock_create_certs(self.path_to_certs)
# Check certs exist
self.assertTrue(self.server.get_cert_and_key(mocked_build_subdomain))
def test_get_cert_and_key_error(self):
with self.assertRaises(Exception) as context:
certs = self.server.get_cert_and_key(mocked_build_subdomain)
self.assertIn('key not found:', str(context.exception))
def test_delete_certs_folder_if_exists(self):
# Create fake certs
test_key, test_cert = mock_create_certs(self.path_to_certs)
# Delete them
self.server.delete_certs_folder_if_exists(mocked_build_subdomain)
self.assertFalse(os.path.exists(test_key))
self.assertFalse(os.path.exists(test_cert))
def test_encode_api_key(self):
expected_key = base64.b64encode(
six.b('{0}:{1}'.format(fake_username, fake_api_key))).decode('utf8')
encoded_key = self.server.encode_api_key(fake_username, fake_api_key)
self.assertNotEqual(encoded_key, fake_api_key)
self.assertIsNotNone(encoded_key)
self.assertEqual(expected_key, encoded_key)
def test_get_headers(self):
# Correct credentials with api_key
credentials = {'username': fake_username, 'api_key': fake_api_key}
headers_under_test = self.server.get_headers(credentials)
self.assertTrue('authorization' in headers_under_test)
self.assertEqual('Basic ' + self.server.encode_api_key(
fake_username, fake_api_key), headers_under_test['authorization'])
# Correct credentials with access_token
credentials = {
'username': fake_username,
'access_token': fake_access_token}
headers_under_test = self.server.get_headers(credentials)
self.assertTrue('authorization' in headers_under_test)
self.assertEqual(
'Bearer ' + fake_access_token,
headers_under_test['authorization'])
# No key or token
credentials = {'username': fake_username}
self.assertFalse(
'authorization' in self.server.get_headers(credentials))
def test_get_hash(self):
self.assertEqual(len(self.server.get_hash()), len(mocked_get_hash))
# Make sure it's not always returning the same hash
self.assertNotEqual(self.server.get_hash(), self.server.get_hash())
def test_user_is_verified(self):
# Failing case: no username
credentials = {
'api_key': fake_api_key,
'plotly_api_domain': test_plotly_api_domain
}
self.assertFalse(self.server.user_is_verified(credentials))
# Successful case
credentials = {
'username': correct_username,
}
self.assertTrue(self.server.user_is_verified(credentials))
# TODO: Add a tests for catching TimtoutError and ProcessError in
# server.execute_letsencrypt_client()
if __name__ == '__main__':
unittest.main()
| |
"""Test the search module"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneLabelOut
from sklearn.model_selection import LeavePLabelOut
from sklearn.model_selection import LabelKFold
from sklearn.model_selection import LabelShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
# TODO Import from sklearn.exceptions once merged.
from sklearn.base import ChangedBehaviorWarning
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_labels():
# Check if ValueError (when labels is None) propagates to GridSearchCV
# And also check if labels is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
labels = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(),
LabelShuffleSplit()]
for cv in label_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The labels parameter should not be None",
gs.fit, X, y)
gs.fit(X, y, labels)
non_label_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_label_cvs:
print(cv)
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv.split(X, y):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| |
import __builtin__
import os
import sys
import re
import code
import atexit
import getopt
import pydoc
import signal
import burst
from burst.conf import CONF_DIR
from burst.color import *
try:
import readline
import rlcompleter
has_readline = True
except ImportError:
has_readline = False
try:
import termios
import fcntl
import struct
has_termios = True
except ImportError:
has_termios = False
term_width = None
def _usage():
print """Usage: burst [-bhlrv] [-s session_name]
-b: no graphical banner
-h: print this help message
-l: list existing sessions
-v: print the version and exit
-s: create or load a session
-r: put the session in read-only"""
sys.exit(0)
def _save_history():
try:
readline.write_history_file(os.path.join(CONF_DIR, ".history"))
except IOError:
pass
def _load_history():
try:
readline.read_history_file(os.path.join(CONF_DIR, ".history"))
except IOError:
pass
atexit.register(_save_history)
def _get_term_width():
if has_termios:
for i in range(3):
try:
bin_size = fcntl.ioctl(i, termios.TIOCGWINSZ, '????')
_, width = struct.unpack('hh', bin_size)
return width
except:
pass
return 80
def _update_term_width(snum, frame):
global term_width
if conf.term_width:
if conf.term_width == "auto":
term_width = _get_term_width()
else:
term_width = int(conf.term_width)
else:
term_width = 0
class ColorPrompt(object):
def __str__(self):
session_name = burst.session.session_name
read_only = burst.session.session_readonly
prompt = '\001{}\002'.format(info('\002>>> \001'))
if session_name != "default":
if read_only:
c = stealthy
elif burst.session.should_save():
c = error
else:
c = warning
prompt = '\001{}\002 '.format(c('\002' + session_name + '\001')) + prompt
return prompt
class BurstInteractiveConsole(code.InteractiveConsole):
re_print_alias = re.compile(r'^p\s(.*)')
re_view_alias = re.compile(r'^v\s(.*)')
re_extview_alias = re.compile(r'^w\s(.*)')
def push(self, line):
if self.re_print_alias.match(line):
line = self.re_print_alias.sub(r'print \1', line)
if self.re_view_alias.match(line):
line = self.re_view_alias.sub(r'view(\1)', line)
if self.re_extview_alias.match(line):
line = self.re_extview_alias.sub(r'external_view(\1)', line)
code.InteractiveConsole.push(self, line)
def help(obj=None):
if not obj:
print """Welcome to Burst!
If this is your first time using Burst, you should check the
quickstart at http://securusglobal.github.com/burst/.
Here are the basic functions of Burst, type 'help(function)'
for a complete description of these functions:
* proxy: Start a HTTP proxy on port 8080.
* create: Create a HTTP request based on a URL.
* inject: Inject or fuzz a request.
Burst have few classes which worth having a look at, typing 'help(class)':
* Request
* Response
* RequestSet
There are also few interesting global objects, 'help(object)':
* conf
* history
Please, report any bug or comment to tw@securusglobal.com"""
else:
pydoc.help(obj)
def interact(local_dict=None):
burst_builtins = __import__("all", globals(), locals(), ".").__dict__
__builtin__.__dict__.update(burst_builtins)
__builtin__.__dict__["help"] = help
__builtin__.__dict__["python_help"] = pydoc.help
banner = """ ___ _
| _ )_ _ _ _ ___| |_
| _ \ || | '_(_ -< _|
|___/\_,_|_| /__/\__|
"""
# Parse arguments
try:
opts = getopt.getopt(sys.argv[1:], "s:bhlvr")
for opt, param in opts[0]:
if opt == "-h":
_usage()
elif opt == "-s":
burst.session.session_name = param
elif opt == "-l":
burst.session.list_sessions()
sys.exit(0)
elif opt == "-v":
print "Burst {}, Copyright (c) 2013 Securus Global".format(burst.__version__)
sys.exit(0)
elif opt == "-b":
banner = "Burst {}".format(burst.__version__)
elif opt == "-r":
burst.session.session_readonly = True
if opts[1]:
_usage()
except getopt.GetoptError:
_usage()
# First time setup
if not burst.conf.check_config_dir():
print "Generating SSL certificate..."
burst.cert.generate_ca_cert()
banner += "\nWelcome to Burst, type help() for more information"
# Load user plugins
burst.conf.load_plugins()
# Could we find the payloads?
if not burst.injection.payloads:
print warning("No payload found for the injection, check burst/payloads")
# Load user default configuration, if any
conf.load()
# Import config from the environment
conf.import_env()
# Load the session, session configuration takes precedence
# over global configuration. There is no condition, by default,
# load the "default" session.
burst.session.load_session()
# Experimental: Insert provided local variables
# (only used when scripted)
if local_dict:
burst.session.session_dict.update(local_dict)
# Setup autocompletion if readline
if has_readline:
class BurstCompleter(rlcompleter.Completer):
def global_matches(self, text):
matches = []
n = len(text)
for word in dir(__builtin__) + burst.session.session_dict.keys():
if word[:n] == text and word != "__builtins__":
matches.append(word)
return matches
def attr_matches(self, text):
m = re.match(r"([\w\[\]\-]+(\.[\w\[\]]+)*)\.(\w*)", text)
if m:
expr, attr = m.group(1, 3)
else:
return
try:
thisobject = eval(expr)
except:
thisobject = eval(expr, burst.session.session_dict)
words = dir(thisobject)
if hasattr(thisobject, "__class__"):
words = words + rlcompleter.get_class_members(thisobject.__class__)
matches = []
n = len(attr)
for word in words:
if word[:n] == attr and word != "__builtins__":
matches.append("{}.{}".format(expr, word))
return matches
readline.set_completer_delims(" \t\n`~!@#$%^&*()=+{}\\|;:'\",<>/?")
readline.set_completer(BurstCompleter().complete)
readline.parse_and_bind("tab: complete")
_load_history()
# Hooked window resizing
_update_term_width(None, None)
signal.signal(signal.SIGWINCH, _update_term_width)
# And run the interpreter!
sys.ps1 = ColorPrompt()
atexit.register(burst.session.autosave_session)
aci = BurstInteractiveConsole(burst.session.session_dict)
aci.interact(banner)
| |
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 149876 if testnet else 49876
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import bz2
import io
import json
import pytest
from sanic import Sanic, response
import cloudevents.exceptions as cloud_exceptions
from cloudevents.http import (
CloudEvent,
from_http,
is_binary,
is_structured,
to_binary,
to_structured,
)
from cloudevents.sdk import converters
invalid_test_headers = [
{
"ce-source": "<event-source>",
"ce-type": "cloudevent.event.type",
"ce-specversion": "1.0",
},
{
"ce-id": "my-id",
"ce-type": "cloudevent.event.type",
"ce-specversion": "1.0",
},
{"ce-id": "my-id", "ce-source": "<event-source>", "ce-specversion": "1.0"},
{
"ce-id": "my-id",
"ce-source": "<event-source>",
"ce-type": "cloudevent.event.type",
},
]
invalid_cloudevent_request_body = [
{
"source": "<event-source>",
"type": "cloudevent.event.type",
"specversion": "1.0",
},
{"id": "my-id", "type": "cloudevent.event.type", "specversion": "1.0"},
{"id": "my-id", "source": "<event-source>", "specversion": "1.0"},
{
"id": "my-id",
"source": "<event-source>",
"type": "cloudevent.event.type",
},
]
test_data = {"payload-content": "Hello World!"}
app = Sanic(__name__)
@app.route("/event", ["POST"])
async def echo(request):
decoder = None
if "binary-payload" in request.headers:
decoder = lambda x: x
event = from_http(
dict(request.headers), request.body, data_unmarshaller=decoder
)
data = (
event.data
if isinstance(event.data, (bytes, bytearray, memoryview))
else json.dumps(event.data).encode()
)
return response.raw(data, headers={k: event[k] for k in event})
@pytest.mark.parametrize("body", invalid_cloudevent_request_body)
def test_missing_required_fields_structured(body):
with pytest.raises(cloud_exceptions.MissingRequiredFields):
_ = from_http(
{"Content-Type": "application/cloudevents+json"}, json.dumps(body)
)
@pytest.mark.parametrize("headers", invalid_test_headers)
def test_missing_required_fields_binary(headers):
with pytest.raises(cloud_exceptions.MissingRequiredFields):
_ = from_http(headers, json.dumps(test_data))
@pytest.mark.parametrize("headers", invalid_test_headers)
def test_missing_required_fields_empty_data_binary(headers):
# Test for issue #115
with pytest.raises(cloud_exceptions.MissingRequiredFields):
_ = from_http(headers, None)
@pytest.mark.parametrize("specversion", ["1.0", "0.3"])
def test_emit_binary_event(specversion):
headers = {
"ce-id": "my-id",
"ce-source": "<event-source>",
"ce-type": "cloudevent.event.type",
"ce-specversion": specversion,
"Content-Type": "text/plain",
}
data = json.dumps(test_data)
_, r = app.test_client.post("/event", headers=headers, data=data)
# Convert byte array to dict
# e.g. r.body = b'{"payload-content": "Hello World!"}'
body = json.loads(r.body.decode("utf-8"))
# Check response fields
for key in test_data:
assert body[key] == test_data[key], body
for key in headers:
if key != "Content-Type":
attribute_key = key[3:]
assert r.headers[attribute_key] == headers[key]
assert r.status_code == 200
@pytest.mark.parametrize("specversion", ["1.0", "0.3"])
def test_emit_structured_event(specversion):
headers = {"Content-Type": "application/cloudevents+json"}
body = {
"id": "my-id",
"source": "<event-source>",
"type": "cloudevent.event.type",
"specversion": specversion,
"data": test_data,
}
_, r = app.test_client.post(
"/event", headers=headers, data=json.dumps(body)
)
# Convert byte array to dict
# e.g. r.body = b'{"payload-content": "Hello World!"}'
body = json.loads(r.body.decode("utf-8"))
# Check response fields
for key in test_data:
assert body[key] == test_data[key]
assert r.status_code == 200
@pytest.mark.parametrize(
"converter", [converters.TypeBinary, converters.TypeStructured]
)
@pytest.mark.parametrize("specversion", ["1.0", "0.3"])
def test_roundtrip_non_json_event(converter, specversion):
input_data = io.BytesIO()
for _ in range(100):
for j in range(20):
assert 1 == input_data.write(j.to_bytes(1, byteorder="big"))
compressed_data = bz2.compress(input_data.getvalue())
attrs = {"source": "test", "type": "t"}
event = CloudEvent(attrs, compressed_data)
if converter == converters.TypeStructured:
headers, data = to_structured(event, data_marshaller=lambda x: x)
elif converter == converters.TypeBinary:
headers, data = to_binary(event, data_marshaller=lambda x: x)
headers["binary-payload"] = "true" # Decoding hint for server
_, r = app.test_client.post("/event", headers=headers, data=data)
assert r.status_code == 200
for key in attrs:
assert r.headers[key] == attrs[key]
assert compressed_data == r.body, r.body
@pytest.mark.parametrize("specversion", ["1.0", "0.3"])
def test_missing_ce_prefix_binary_event(specversion):
prefixed_headers = {}
headers = {
"ce-id": "my-id",
"ce-source": "<event-source>",
"ce-type": "cloudevent.event.type",
"ce-specversion": specversion,
}
for key in headers:
# breaking prefix e.g. e-id instead of ce-id
prefixed_headers[key[1:]] = headers[key]
with pytest.raises(cloud_exceptions.MissingRequiredFields):
# CloudEvent constructor throws TypeError if missing required field
# and NotImplementedError because structured calls aren't
# implemented. In this instance one of the required keys should have
# prefix e-id instead of ce-id therefore it should throw
_ = from_http(prefixed_headers, json.dumps(test_data))
@pytest.mark.parametrize("specversion", ["1.0", "0.3"])
def test_valid_binary_events(specversion):
# Test creating multiple cloud events
events_queue = []
headers = {}
num_cloudevents = 30
for i in range(num_cloudevents):
headers = {
"ce-id": f"id{i}",
"ce-source": f"source{i}.com.test",
"ce-type": "cloudevent.test.type",
"ce-specversion": specversion,
}
data = {"payload": f"payload-{i}"}
events_queue.append(from_http(headers, json.dumps(data)))
for i, event in enumerate(events_queue):
data = event.data
assert event["id"] == f"id{i}"
assert event["source"] == f"source{i}.com.test"
assert event["specversion"] == specversion
assert event.data["payload"] == f"payload-{i}"
@pytest.mark.parametrize("specversion", ["1.0", "0.3"])
def test_structured_to_request(specversion):
attributes = {
"specversion": specversion,
"type": "word.found.name",
"id": "96fb5f0b-001e-0108-6dfe-da6e2806f124",
"source": "pytest",
}
data = {"message": "Hello World!"}
event = CloudEvent(attributes, data)
headers, body_bytes = to_structured(event)
assert isinstance(body_bytes, bytes)
body = json.loads(body_bytes)
assert headers["content-type"] == "application/cloudevents+json"
for key in attributes:
assert body[key] == attributes[key]
assert body["data"] == data, f"|{body_bytes}|| {body}"
@pytest.mark.parametrize("specversion", ["1.0", "0.3"])
def test_binary_to_request(specversion):
attributes = {
"specversion": specversion,
"type": "word.found.name",
"id": "96fb5f0b-001e-0108-6dfe-da6e2806f124",
"source": "pytest",
}
data = {"message": "Hello World!"}
event = CloudEvent(attributes, data)
headers, body_bytes = to_binary(event)
body = json.loads(body_bytes)
for key in data:
assert body[key] == data[key]
for key in attributes:
assert attributes[key] == headers["ce-" + key]
@pytest.mark.parametrize("specversion", ["1.0", "0.3"])
def test_empty_data_structured_event(specversion):
# Testing if cloudevent breaks when no structured data field present
attributes = {
"specversion": specversion,
"datacontenttype": "application/cloudevents+json",
"type": "word.found.name",
"id": "96fb5f0b-001e-0108-6dfe-da6e2806f124",
"time": "2018-10-23T12:28:22.4579346Z",
"source": "<source-url>",
}
event = from_http(
{"content-type": "application/cloudevents+json"}, json.dumps(attributes)
)
assert event.data is None
attributes["data"] = ""
# Data of empty string will be marshalled into None
event = from_http(
{"content-type": "application/cloudevents+json"}, json.dumps(attributes)
)
assert event.data is None
@pytest.mark.parametrize("specversion", ["1.0", "0.3"])
def test_empty_data_binary_event(specversion):
# Testing if cloudevent breaks when no structured data field present
headers = {
"Content-Type": "application/octet-stream",
"ce-specversion": specversion,
"ce-type": "word.found.name",
"ce-id": "96fb5f0b-001e-0108-6dfe-da6e2806f124",
"ce-time": "2018-10-23T12:28:22.4579346Z",
"ce-source": "<source-url>",
}
event = from_http(headers, None)
assert event.data is None
data = ""
# Data of empty string will be marshalled into None
event = from_http(headers, data)
assert event.data is None
@pytest.mark.parametrize("specversion", ["1.0", "0.3"])
def test_valid_structured_events(specversion):
# Test creating multiple cloud events
events_queue = []
num_cloudevents = 30
for i in range(num_cloudevents):
event = {
"id": f"id{i}",
"source": f"source{i}.com.test",
"type": "cloudevent.test.type",
"specversion": specversion,
"data": {"payload": f"payload-{i}"},
}
events_queue.append(
from_http(
{"content-type": "application/cloudevents+json"},
json.dumps(event),
)
)
for i, event in enumerate(events_queue):
assert event["id"] == f"id{i}"
assert event["source"] == f"source{i}.com.test"
assert event["specversion"] == specversion
assert event.data["payload"] == f"payload-{i}"
@pytest.mark.parametrize("specversion", ["1.0", "0.3"])
def test_structured_no_content_type(specversion):
# Test creating multiple cloud events
data = {
"id": "id",
"source": "source.com.test",
"type": "cloudevent.test.type",
"specversion": specversion,
"data": test_data,
}
event = from_http({}, json.dumps(data))
assert event["id"] == "id"
assert event["source"] == "source.com.test"
assert event["specversion"] == specversion
for key, val in test_data.items():
assert event.data[key] == val
def test_is_binary():
headers = {
"ce-id": "my-id",
"ce-source": "<event-source>",
"ce-type": "cloudevent.event.type",
"ce-specversion": "1.0",
"Content-Type": "text/plain",
}
assert is_binary(headers)
headers = {
"Content-Type": "application/cloudevents+json",
}
assert not is_binary(headers)
headers = {}
assert not is_binary(headers)
@pytest.mark.parametrize("specversion", ["1.0", "0.3"])
def test_cloudevent_repr(specversion):
headers = {
"Content-Type": "application/octet-stream",
"ce-specversion": specversion,
"ce-type": "word.found.name",
"ce-id": "96fb5f0b-001e-0108-6dfe-da6e2806f124",
"ce-time": "2018-10-23T12:28:22.4579346Z",
"ce-source": "<source-url>",
}
event = from_http(headers, "")
# Testing to make sure event is printable. I could runevent. __repr__() but
# we had issues in the past where event.__repr__() could run but
# print(event) would fail.
print(event)
@pytest.mark.parametrize("specversion", ["1.0", "0.3"])
def test_none_data_cloudevent(specversion):
event = CloudEvent(
{
"source": "<my-url>",
"type": "issue.example",
"specversion": specversion,
}
)
to_binary(event)
to_structured(event)
def test_wrong_specversion():
headers = {"Content-Type": "application/cloudevents+json"}
data = json.dumps(
{
"specversion": "0.2",
"type": "word.found.name",
"id": "96fb5f0b-001e-0108-6dfe-da6e2806f124",
"source": "<my-source>",
}
)
with pytest.raises(cloud_exceptions.InvalidRequiredFields) as e:
from_http(headers, data)
assert "Found invalid specversion 0.2" in str(e.value)
def test_invalid_data_format_structured_from_http():
headers = {"Content-Type": "application/cloudevents+json"}
data = 20
with pytest.raises(cloud_exceptions.InvalidStructuredJSON) as e:
from_http(headers, data)
assert "Expected json of type (str, bytes, bytearray)" in str(e.value)
def test_wrong_specversion_to_request():
event = CloudEvent({"source": "s", "type": "t"}, None)
with pytest.raises(cloud_exceptions.InvalidRequiredFields) as e:
event["specversion"] = "0.2"
to_binary(event)
assert "Unsupported specversion: 0.2" in str(e.value)
def test_is_structured():
headers = {
"Content-Type": "application/cloudevents+json",
}
assert is_structured(headers)
headers = {
"ce-id": "my-id",
"ce-source": "<event-source>",
"ce-type": "cloudevent.event.type",
"ce-specversion": "1.0",
"Content-Type": "text/plain",
}
assert not is_structured(headers)
def test_empty_json_structured():
headers = {"Content-Type": "application/cloudevents+json"}
data = ""
with pytest.raises(cloud_exceptions.MissingRequiredFields) as e:
from_http(headers, data)
assert "Failed to read specversion from both headers and data" in str(
e.value
)
def test_uppercase_headers_with_none_data_binary():
headers = {
"Ce-Id": "my-id",
"Ce-Source": "<event-source>",
"Ce-Type": "cloudevent.event.type",
"Ce-Specversion": "1.0",
}
event = from_http(headers, None)
for key in headers:
assert event[key.lower()[3:]] == headers[key]
assert event.data is None
_, new_data = to_binary(event)
assert new_data is None
def test_generic_exception():
headers = {"Content-Type": "application/cloudevents+json"}
data = json.dumps(
{
"specversion": "1.0",
"source": "s",
"type": "t",
"id": "1234-1234-1234",
"data": "",
}
)
with pytest.raises(cloud_exceptions.GenericException) as e:
from_http({}, None)
e.errisinstance(cloud_exceptions.MissingRequiredFields)
with pytest.raises(cloud_exceptions.GenericException) as e:
from_http({}, 123)
e.errisinstance(cloud_exceptions.InvalidStructuredJSON)
with pytest.raises(cloud_exceptions.GenericException) as e:
from_http(headers, data, data_unmarshaller=lambda x: 1 / 0)
e.errisinstance(cloud_exceptions.DataUnmarshallerError)
with pytest.raises(cloud_exceptions.GenericException) as e:
event = from_http(headers, data)
to_binary(event, data_marshaller=lambda x: 1 / 0)
e.errisinstance(cloud_exceptions.DataMarshallerError)
def test_non_dict_data_no_headers_bug():
# Test for issue #116
headers = {"Content-Type": "application/cloudevents+json"}
data = "123"
with pytest.raises(cloud_exceptions.MissingRequiredFields) as e:
from_http(headers, data)
assert "Failed to read specversion from both headers and data" in str(
e.value
)
assert "The following deserialized data has no 'get' method" in str(e.value)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import compat
class VariablesTestCase(test.TestCase):
def testInitialization(self):
with self.test_session():
var0 = variables.Variable(0.0)
self.assertEqual("Variable:0", var0.name)
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.shape)
var1 = variables.Variable(1.1)
self.assertEqual("Variable_1:0", var1.name)
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.shape)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
var0.eval()
with self.assertRaisesOpError("Attempting to use uninitialized value"):
var1.eval()
variables.global_variables_initializer().run()
self.assertAllClose(0.0, var0.eval())
self.assertAllClose(1.1, var1.eval())
def testInitializationOrder(self):
with self.test_session():
rnd = variables.Variable(random_ops.random_uniform([3, 6]), name="rnd")
self.assertEqual("rnd:0", rnd.name)
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.shape)
dep = variables.Variable(rnd.initialized_value(), name="dep")
self.assertEqual("dep:0", dep.name)
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.shape)
# Currently have to set the shape manually for Add.
added_val = rnd.initialized_value() + dep.initialized_value() + 2.0
added_val.set_shape(rnd.get_shape())
depdep = variables.Variable(added_val, name="depdep")
self.assertEqual("depdep:0", depdep.name)
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.shape)
variables.global_variables_initializer().run()
self.assertAllClose(rnd.eval(), dep.eval())
self.assertAllClose(rnd.eval() + dep.eval() + 2.0, depdep.eval())
def testIterable(self):
with self.assertRaisesRegexp(TypeError, "not iterable"):
for _ in variables.Variable(0.0):
pass
with self.assertRaisesRegexp(TypeError, "not iterable"):
for _ in variables.Variable([0.0, 1.0]):
pass
def testAssignments(self):
with self.test_session():
var = variables.Variable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
variables.global_variables_initializer().run()
self.assertAllClose(0.0, var.eval())
self.assertAllClose(1.0, plus_one.eval())
self.assertAllClose(1.0, var.eval())
self.assertAllClose(-1.0, minus_one.eval())
self.assertAllClose(-1.0, var.eval())
self.assertAllClose(4.0, four.eval())
self.assertAllClose(4.0, var.eval())
def testResourceAssignments(self):
with self.test_session(use_gpu=True):
var = resource_variable_ops.ResourceVariable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
variables.global_variables_initializer().run()
self.assertAllClose(0.0, var.eval())
plus_one.eval()
self.assertAllClose(1.0, var.eval())
minus_one.eval()
self.assertAllClose(-1.0, var.eval())
four.eval()
self.assertAllClose(4.0, var.eval())
def testZeroSizeStringAssign(self):
with self.test_session() as sess:
array = variables.Variable(
initial_value=array_ops.zeros((0,), dtype=dtypes.string),
name="foo",
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
sess.run(variables.local_variables_initializer())
old_value = array.value()
copy_op = array.assign(old_value)
self.assertEqual([], list(sess.run(copy_op)))
def _countUpToTest(self, dtype):
with self.test_session():
zero = constant_op.constant(0, dtype=dtype)
var = variables.Variable(zero)
count_up_to = var.count_up_to(3)
variables.global_variables_initializer().run()
self.assertEqual(0, var.eval())
self.assertEqual(0, count_up_to.eval())
self.assertEqual(1, var.eval())
self.assertEqual(1, count_up_to.eval())
self.assertEqual(2, var.eval())
self.assertEqual(2, count_up_to.eval())
self.assertEqual(3, var.eval())
with self.assertRaisesOpError("Reached limit of 3"):
count_up_to.eval()
self.assertEqual(3, var.eval())
with self.assertRaisesOpError("Reached limit of 3"):
count_up_to.eval()
self.assertEqual(3, var.eval())
def testCountUpToInt32(self):
self._countUpToTest(dtypes.int32)
def testCountUpToInt64(self):
self._countUpToTest(dtypes.int64)
def testControlDepsNone(self):
with self.test_session():
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
# d get the control dep.
d = constant_op.constant(2.0)
# variables do not.
var_x = variables.Variable(2.0)
self.assertEqual([c.op], d.op.control_inputs)
self.assertEqual([], var_x.initializer.control_inputs)
self.assertEqual([], var_x.value().op.control_inputs)
self.assertEqual([], var_x._ref().op.control_inputs) # pylint: disable=protected-access
def testControlFlow(self):
with self.test_session() as sess:
v0 = variables.Variable(0, name="v0")
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variables.Variable(1, name="v1")
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variables.Variable(2, name="v2")
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual([1], sess.run(v1))
sess.run(v2.initializer)
self.assertEqual([2], sess.run(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = variables.Variable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegexp(ValueError, "inside a control-flow"):
control_flow_ops.while_loop(cond, body, [0, 0])
def testUseVariableAsTensor(self):
with self.test_session():
var_x = variables.Variable(2.0)
var_y = variables.Variable(3.0)
variables.global_variables_initializer().run()
self.assertAllClose(2.0, var_x.eval())
self.assertAllClose(3.0, var_y.eval())
self.assertAllClose(5.0, math_ops.add(var_x, var_y).eval())
def testZeroSizeVarSameAsConst(self):
with self.test_session():
zero_size_var = variables.Variable(array_ops.zeros([0, 2]))
zero_size_const = array_ops.ones([2, 0])
variable_mul = math_ops.matmul(zero_size_const, zero_size_var)
const_mul = math_ops.matmul(
zero_size_const, zero_size_const, transpose_b=True)
variables.global_variables_initializer().run()
variable_output = variable_mul.eval()
self.assertAllClose(const_mul.eval(), variable_output)
self.assertAllClose([[0., 0.], [0., 0.]], variable_output)
def testCachingDevice(self):
with self.test_session():
var = variables.Variable(2.0)
self.assertEqual(var.device, var.value().device)
self.assertEqual(var.device, var.initialized_value().device)
var_cached = variables.Variable(2.0, caching_device="/job:foo")
self.assertFalse(var_cached.device.startswith("/job:foo"))
self.assertTrue(var_cached.value().device.startswith("/job:foo"))
def testCollections(self):
with self.test_session():
var_x = variables.Variable(2.0)
var_y = variables.Variable(2.0, trainable=False)
var_z = variables.Variable(2.0, trainable=True)
var_t = variables.Variable(
2.0,
trainable=True,
collections=[
ops.GraphKeys.TRAINABLE_VARIABLES, ops.GraphKeys.GLOBAL_VARIABLES
])
self.assertEqual([var_x, var_y, var_z, var_t],
variables.global_variables())
self.assertEqual([var_x, var_z, var_t], variables.trainable_variables())
def testCollectionsWithScope(self):
with self.test_session():
with ops.name_scope("scope_1"):
var_x = variables.Variable(2.0)
with ops.name_scope("scope_2"):
var_y = variables.Variable(2.0)
self.assertEqual([var_x, var_y], variables.global_variables())
self.assertEqual([var_x], variables.global_variables("scope_1"))
self.assertEqual([var_y], variables.global_variables("scope_2"))
self.assertEqual([var_x, var_y], variables.trainable_variables())
self.assertEqual([var_x], variables.trainable_variables("scope_1"))
self.assertEqual([var_y], variables.trainable_variables("scope_2"))
def testOperators(self):
with self.test_session():
var_f = variables.Variable([2.0])
add = var_f + 0.0
radd = 1.0 + var_f
sub = var_f - 1.0
rsub = 1.0 - var_f
mul = var_f * 10.0
rmul = 10.0 * var_f
div = var_f / 10.0
rdiv = 10.0 / var_f
lt = var_f < 3.0
rlt = 3.0 < var_f
le = var_f <= 2.0
rle = 2.0 <= var_f
gt = var_f > 3.0
rgt = 3.0 > var_f
ge = var_f >= 2.0
rge = 2.0 >= var_f
neg = -var_f
abs_v = abs(var_f)
var_i = variables.Variable([20])
mod = var_i % 7
rmod = 103 % var_i
var_b = variables.Variable([True, False])
and_v = operator.and_(var_b, [True, True])
or_v = operator.or_(var_b, [False, True])
xor_v = operator.xor(var_b, [False, False])
invert_v = ~var_b
rnd = np.random.rand(4, 4).astype("f")
var_t = variables.Variable(rnd)
slice_v = var_t[2, 0:0]
var_m = variables.Variable([[2.0, 3.0]])
matmul = var_m.__matmul__([[10.0], [20.0]])
rmatmul = var_m.__rmatmul__([[10.0], [20.0]])
variables.global_variables_initializer().run()
self.assertAllClose([2.0], add.eval())
self.assertAllClose([3.0], radd.eval())
self.assertAllClose([1.0], sub.eval())
self.assertAllClose([-1.0], rsub.eval())
self.assertAllClose([20.0], mul.eval())
self.assertAllClose([20.0], rmul.eval())
self.assertAllClose([0.2], div.eval())
self.assertAllClose([5.0], rdiv.eval())
self.assertAllClose([-2.0], neg.eval())
self.assertAllClose([2.0], abs_v.eval())
self.assertAllClose([True], lt.eval())
self.assertAllClose([False], rlt.eval())
self.assertAllClose([True], le.eval())
self.assertAllClose([True], rle.eval())
self.assertAllClose([False], gt.eval())
self.assertAllClose([True], rgt.eval())
self.assertAllClose([True], ge.eval())
self.assertAllClose([True], rge.eval())
self.assertAllClose([6], mod.eval())
self.assertAllClose([3], rmod.eval())
self.assertAllClose([True, False], and_v.eval())
self.assertAllClose([True, True], or_v.eval())
self.assertAllClose([True, False], xor_v.eval())
self.assertAllClose([False, True], invert_v.eval())
self.assertAllClose(rnd[2, 0:0], slice_v.eval())
self.assertAllClose([[80.0]], matmul.eval())
self.assertAllClose([[20.0, 30.0], [40.0, 60.0]], rmatmul.eval())
def testSession(self):
with self.test_session() as sess:
var = variables.Variable([1, 12])
variables.global_variables_initializer().run()
self.assertAllClose([1, 12], sess.run(var))
def testDevicePlacement(self):
with self.test_session() as sess:
with ops.device("/cpu:0"):
var = variables.Variable([1, 12])
init_value = var.initialized_value()
init_op = variables.global_variables_initializer()
self.assertEqual(var.op.device, init_value.device)
self.assertEqual(var.op.device, init_op.device)
sess.run(init_op)
def testColocation(self):
with ops.device("/job:ps"):
var = variables.Variable(0, name="v")
with ops.device("/job:worker/task:7"):
assign_op = var.assign(1)
self.assertDeviceEqual("/job:ps", assign_op.device)
self.assertEqual([b"loc:@v"], assign_op.op.colocation_groups())
def testInitializerFunction(self):
value = [[-42], [133.7]]
shape = [2, 1]
with self.test_session():
initializer = lambda: constant_op.constant(value)
v1 = variables.Variable(initializer, dtype=dtypes.float32)
self.assertEqual(shape, v1.get_shape())
self.assertEqual(shape, v1.shape)
self.assertAllClose(value, v1.initial_value.eval())
with self.assertRaises(errors_impl.FailedPreconditionError):
v1.eval()
v2 = variables.Variable(
math_ops.negative(v1.initialized_value()), dtype=dtypes.float32)
self.assertEqual(v1.get_shape(), v2.get_shape())
self.assertEqual(v1.shape, v2.shape)
self.assertAllClose(np.negative(value), v2.initial_value.eval())
with self.assertRaises(errors_impl.FailedPreconditionError):
v2.eval()
variables.global_variables_initializer().run()
self.assertAllClose(np.negative(value), v2.eval())
def testConstraintArg(self):
constraint = lambda x: x
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
self.assertEqual(v.constraint, constraint)
constraint = 0
with self.assertRaises(ValueError):
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
def testNoRefDataRace(self):
with self.test_session():
a = variables.Variable([1, 2, 3], dtype=dtypes.float32)
b = variables.Variable(a.initialized_value() + 2)
c = variables.Variable(b.initialized_value() + 2)
variables.global_variables_initializer().run()
self.assertAllEqual(a.eval(), [1, 2, 3])
self.assertAllEqual(b.eval(), [3, 4, 5])
self.assertAllEqual(c.eval(), [5, 6, 7])
def testInitializerFunctionDevicePlacement(self):
with self.test_session():
initializer = lambda: constant_op.constant(42.0)
with ops.device("/cpu:100"):
v1 = variables.Variable(initializer, dtype=dtypes.float32, name="v1")
expected_device = "/device:CPU:100"
expected_group_v1 = [b"loc:@v1"]
self.assertEqual(expected_device, v1.op.device)
self.assertEqual(expected_group_v1, v1.op.colocation_groups())
for i in v1.initializer.inputs:
self.assertEqual(expected_group_v1, i.op.colocation_groups())
v2 = variables.Variable(initializer, dtype=dtypes.float32, name="v2")
expected_group_v2 = [b"loc:@v2"]
self.assertEqual(expected_group_v2, v2.op.colocation_groups())
for i in v2.initializer.inputs:
self.assertEqual(expected_group_v2, i.op.colocation_groups())
def testVariableDefInitializedInstances(self):
with ops.Graph().as_default(), self.test_session() as sess:
v_def = variables.Variable(
initial_value=constant_op.constant(3.0)).to_proto()
with ops.Graph().as_default(), self.test_session() as sess:
# v describes a VariableDef-based variable without an initial value.
v = variables.Variable(variable_def=v_def)
self.assertEqual(3.0, sess.run(v.initialized_value()))
# initialized_value should not rerun the initializer_op if the variable
# has already been initialized elsewhere.
sess.run(v.assign(1.0))
self.assertEqual(1.0, v.initialized_value().eval())
v_def.ClearField("initial_value_name")
with ops.Graph().as_default(), self.test_session() as sess:
# Restoring a legacy VariableDef proto that does not have
# initial_value_name set should still work.
v = variables.Variable(variable_def=v_def)
# We should also be able to re-export the variable to a new meta graph.
self.assertProtoEquals(v_def, v.to_proto())
# But attempts to use initialized_value will result in errors.
with self.assertRaises(ValueError):
sess.run(v.initialized_value())
def testLoad(self):
with self.test_session():
var = variables.Variable(np.zeros((5, 5), np.float32))
variables.global_variables_initializer().run()
var.load(np.ones((5, 5), np.float32))
self.assertAllClose(np.ones((5, 5), np.float32), var.eval())
def testRepr(self):
var = variables.Variable(np.zeros((5, 5), np.float32), name='noop')
self.assertEqual(
"<tf.Variable 'noop:0' shape=(5, 5) dtype=float32_ref>",
repr(var))
class IsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default(), self.test_session() as sess:
uninited = variables.report_uninitialized_variables()
self.assertEqual(0, sess.run(uninited).size)
def testAssertVariablesInitialized(self):
with ops.Graph().as_default(), self.test_session() as sess:
v = variables.Variable([1, 2], name="v")
w = variables.Variable([3, 4], name="w")
_ = v, w
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), sess.run(uninited))
variables.global_variables_initializer().run()
self.assertEqual(0, sess.run(uninited).size)
def testVariableList(self):
with ops.Graph().as_default(), self.test_session() as sess:
v = variables.Variable([1, 2], name="v")
w = variables.Variable([3, 4], name="w")
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), sess.run(uninited))
sess.run(w.initializer)
self.assertAllEqual(np.array([b"v"]), sess.run(uninited))
v.initializer.run()
self.assertEqual(0, sess.run(uninited).size)
def testZeroSizeVarInitialized(self):
with ops.Graph().as_default(), self.test_session() as sess:
v = variables.Variable(array_ops.zeros([0, 2]), name="v")
uninited = variables.report_uninitialized_variables()
v.initializer.run() # not strictly necessary
self.assertEqual(0, sess.run(uninited).size)
def testTrainingWithZeroSizeVar(self):
with ops.Graph().as_default(), self.test_session() as sess:
a = variables.Variable(array_ops.zeros([0, 2]))
b = variables.Variable(array_ops.ones([2, 2]))
objective = math_ops.reduce_sum(b + math_ops.matmul(
a, a, transpose_a=True))
variables.global_variables_initializer().run()
do_opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
objective)
sess.run([do_opt])
self.assertAllClose([[0.9, 0.9], [0.9, 0.9]], b.eval())
class ObsoleteIsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default():
self.assertEqual(None, variables.assert_variables_initialized())
def testVariables(self):
with ops.Graph().as_default(), self.test_session() as sess:
v = variables.Variable([1, 2])
w = variables.Variable([3, 4])
_ = v, w
inited = variables.assert_variables_initialized()
with self.assertRaisesOpError("Attempting to use uninitialized value"):
sess.run(inited)
variables.global_variables_initializer().run()
sess.run(inited)
def testVariableList(self):
with ops.Graph().as_default(), self.test_session() as sess:
v = variables.Variable([1, 2])
w = variables.Variable([3, 4])
inited = variables.assert_variables_initialized([v])
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
sess.run(w.initializer)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
v.initializer.run()
inited.op.run()
class PartitionedVariableTest(test.TestCase):
def testPartitionedVariable(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
# Pass variable_list as [v1, v0] to ensure they are properly
# re-sorted to [v0, v1] based on their slice info offsets.
partitioned_variable = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
concatenated = ops.convert_to_tensor(partitioned_variable)
num_partitions = len(partitioned_variable)
iterated_partitions = list(partitioned_variable)
self.assertEqual(2, num_partitions)
self.assertEqual([v0, v1], iterated_partitions)
self.assertEqual([2], concatenated.get_shape())
self.assertEqual([2], concatenated.shape)
def testPartitionedVariableFailures(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "empty"):
variables.PartitionedVariable(
name="fail",
shape=2,
dtype=dtypes.int32,
variable_list=[],
partitions=[])
with self.assertRaisesRegexp(ValueError, "must have a save_slice_info"):
v0 = variables.Variable([0])
partitions = [1]
variables.PartitionedVariable(
name="two_vars",
shape=[1],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
with self.assertRaisesRegexp(ValueError, "full shapes must match"):
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
variables.PartitionedVariable(
name="two_vars",
shape=[3],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
with self.assertRaisesRegexp(ValueError, "must be positive"):
v0 = variables.Variable([0])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
partitions = [0]
variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
class VariableContainerTest(test.TestCase):
def testContainer(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
with ops.container("l1"):
v1 = variables.Variable([1])
with ops.container("l2"):
v2 = variables.Variable([2])
special_v = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="VariableInL3",
container="l3",
shared_name="")
v3 = variables.Variable([3])
v4 = variables.Variable([4])
self.assertEqual(compat.as_bytes(""), v0.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v1.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l2"), v2.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l3"), special_v.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v3.op.get_attr("container"))
self.assertEqual(compat.as_bytes(""), v4.op.get_attr("container"))
if __name__ == "__main__":
test.main()
| |
import json
from couchdbkit.exceptions import ResourceNotFound
from django.contrib.humanize.templatetags.humanize import naturaltime
from casexml.apps.case.dbaccessors import get_open_case_ids_in_domain
from casexml.apps.case.util import iter_cases
from corehq.apps.cloudcare.exceptions import RemoteAppError
from corehq.apps.hqcase.dbaccessors import get_case_ids_in_domain, \
get_case_ids_in_domain_by_owner
from corehq.apps.users.models import CouchUser
from casexml.apps.case.models import CommCareCase, CASE_STATUS_ALL, CASE_STATUS_CLOSED, CASE_STATUS_OPEN
from corehq.apps.app_manager.models import (
ApplicationBase,
get_app,
)
from corehq.util.soft_assert import soft_assert
from dimagi.utils.couch.safe_index import safe_index
from casexml.apps.phone.caselogic import get_footprint, get_related_cases
from datetime import datetime
from corehq.elastic import get_es
import urllib
from django.utils.translation import ugettext as _
from dimagi.utils.parsing import json_format_date
from touchforms.formplayer.models import EntrySession
from django.core.urlresolvers import reverse
CLOUDCARE_API_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S' # todo: add '.%fZ'?
def api_closed_to_status(closed_string):
# legacy api support
return {
'any': CASE_STATUS_ALL,
'true': CASE_STATUS_CLOSED,
'false': CASE_STATUS_OPEN,
}[closed_string]
def closed_to_status(closed_bool):
return {None: CASE_STATUS_ALL,
True: CASE_STATUS_CLOSED,
False: CASE_STATUS_OPEN}[closed_bool]
def status_to_closed_flags(status):
return {CASE_STATUS_ALL: [True, False],
CASE_STATUS_CLOSED: [True],
CASE_STATUS_OPEN: [False]}[status]
class CaseAPIResult(object):
"""
The result of a case API query. Useful for abstracting out the difference
between an id-only representation and a full_blown one.
"""
def __init__(self, id=None, couch_doc=None, id_only=False, lite=True, sanitize=True):
self._id = id
self._couch_doc = couch_doc
self.id_only = id_only
self.lite = lite
self.sanitize = sanitize
def __getitem__(self, key):
if key == 'case_id':
return self.id
else:
return self.case_json.__getitem__(key)
@property
def id(self):
if self._id is None:
self._id = self._couch_doc['_id']
return self._id
@property
def couch_doc(self):
if self._couch_doc is None:
self._couch_doc = CommCareCase.get(self._id)
return self._couch_doc
@property
def case_json(self):
json = self.couch_doc.get_json(lite=self.lite)
if self.sanitize:
# This ensures that any None value will be encoded as "" instead of null
# This fixes http://manage.dimagi.com/default.asp?158655 because mobile chokes on null
def _sanitize(props):
for key, val in props.items():
if val is None:
props[key] = ''
elif isinstance(val, dict):
props[key] = _sanitize(val)
return props
json = _sanitize(json)
return json
def to_json(self):
return self.id if self.id_only else self.case_json
class CaseAPIHelper(object):
"""
Simple config object for querying the APIs
"""
def __init__(self, domain, status=CASE_STATUS_OPEN, case_type=None, ids_only=False,
footprint=False, strip_history=False, filters=None, include_children=False):
if status not in [CASE_STATUS_ALL, CASE_STATUS_CLOSED, CASE_STATUS_OPEN]:
raise ValueError("invalid case status %s" % status)
self.domain = domain
self.status = status
self.case_type = case_type
self.ids_only = ids_only
self.wrap = not ids_only # if we're just querying IDs we don't need to wrap the docs
self.footprint = footprint
self.strip_history = strip_history
self.filters = filters
self.include_children = include_children
def _case_results(self, case_id_list):
def _filter(res):
if self.filters:
for path, val in self.filters.items():
actual_val = safe_index(res.case_json, path.split("/"))
if actual_val != val:
# closed=false => case.closed == False
if val in ('null', 'true', 'false'):
if actual_val != json.loads(val):
return False
else:
return False
return True
if not self.ids_only or self.filters or self.footprint or self.include_children:
# optimization hack - we know we'll need the full cases eventually
# so just grab them now.
base_results = [CaseAPIResult(couch_doc=case, id_only=self.ids_only)
for case in iter_cases(case_id_list, self.strip_history, self.wrap)]
else:
base_results = [CaseAPIResult(id=id, id_only=True) for id in case_id_list]
if self.filters and not self.footprint:
base_results = filter(_filter, base_results)
if not self.footprint and not self.include_children:
return base_results
case_list = [res.couch_doc for res in base_results]
if self.footprint:
case_list = get_footprint(
case_list,
self.domain,
strip_history=self.strip_history,
).values()
if self.include_children:
case_list = get_related_cases(
case_list,
self.domain,
strip_history=self.strip_history,
search_up=False,
).values()
return [CaseAPIResult(couch_doc=case, id_only=self.ids_only) for case in case_list]
def get_all(self):
status = self.status or CASE_STATUS_ALL
if status == CASE_STATUS_ALL:
case_ids = get_case_ids_in_domain(self.domain, type=self.case_type)
elif status == CASE_STATUS_OPEN:
case_ids = get_open_case_ids_in_domain(self.domain, type=self.case_type)
elif status == CASE_STATUS_CLOSED:
_assert = soft_assert('@'.join(['droberts', 'dimagi.com']))
_assert(False, "I'm surprised CaseAPIHelper "
"ever gets called with status=closed")
# this is rare so we don't care if it requires two calls to get
# all the ids
case_ids = (
set(get_case_ids_in_domain(self.domain, type=self.case_type))
- set(get_open_case_ids_in_domain(self.domain, type=self.case_type))
)
else:
raise ValueError("Invalid value for 'status': '%s'" % status)
return self._case_results(case_ids)
def get_owned(self, user_id):
try:
user = CouchUser.get_by_user_id(user_id, self.domain)
except KeyError:
user = None
try:
owner_ids = user.get_owner_ids()
except AttributeError:
owner_ids = [user_id]
closed = {
CASE_STATUS_OPEN: False,
CASE_STATUS_CLOSED: True,
CASE_STATUS_ALL: None,
}[self.status]
ids = get_case_ids_in_domain_by_owner(
self.domain, owner_id__in=owner_ids, closed=closed)
return self._case_results(ids)
# todo: Make these api functions use generators for streaming
# so that a limit call won't fetch more docs than it needs to
# This could be achieved with something like CommCareCase.paging_view that
# returns a generator but internally batches couch requests
# potentially doubling the batch-size each time in case it really is a lot of data
def get_filtered_cases(domain, status, user_id=None, case_type=None,
filters=None, footprint=False, ids_only=False,
strip_history=True, include_children=False):
# a filter value of None means don't filter
filters = dict((k, v) for k, v in (filters or {}).items() if v is not None)
helper = CaseAPIHelper(domain, status, case_type=case_type, ids_only=ids_only,
footprint=footprint, strip_history=strip_history,
filters=filters, include_children=include_children)
if user_id:
return helper.get_owned(user_id)
else:
return helper.get_all()
class ElasticCaseQuery(object):
# this class is currently pretty customized to serve exactly
# this API. one day it may be worth reconciling our ES interfaces
# but today is not that day.
# To be replaced by CaseES framework.
RESERVED_KEYS = ('date_modified_start', 'date_modified_end',
'server_date_modified_start', 'server_date_modified_end',
'limit', 'offset')
def __init__(self, domain, filters):
self.domain = domain
self.filters = filters
self.offset = int(filters.get('offset', 0))
self.limit = int(filters.get('limit', 50))
self._date_modified_start = filters.get("date_modified_start", None)
self._date_modified_end = filters.get("date_modified_end", None)
self._server_date_modified_start = filters.get("server_date_modified_start", None)
self._server_date_modified_end = filters.get("server_date_modified_end", None)
@property
def uses_modified(self):
return bool(self._date_modified_start or self._date_modified_end)
@property
def uses_server_modified(self):
return bool(self._server_date_modified_start or self._server_date_modified_end)
@property
def date_modified_start(self):
return self._date_modified_start or json_format_date(datetime(1970, 1, 1))
@property
def date_modified_end(self):
return self._date_modified_end or json_format_date(datetime.max)
@property
def server_date_modified_start(self):
return self._server_date_modified_start or json_format_date(datetime(1970, 1, 1))
@property
def server_date_modified_end(self):
return self._server_date_modified_end or json_format_date(datetime.max)
@property
def scrubbed_filters(self):
return dict( (k, v) for k, v in self.filters.items()
if k not in self.RESERVED_KEYS and not k.endswith('__full') )
def _modified_params(self, key, start, end):
return {
'range': {
key: {
'from': start,
'to': end
}
}
}
@property
def modified_params(self, ):
return self._modified_params('modified_on',
self.date_modified_start,
self.date_modified_end)
@property
def server_modified_params(self):
return self._modified_params('server_modified_on',
self.server_date_modified_start,
self.server_date_modified_end)
def get_terms(self):
yield {'term': {'domain.exact': self.domain}}
if self.uses_modified:
yield self.modified_params
if self.uses_modified:
yield self.modified_params
if self.uses_server_modified:
yield self.server_modified_params
for k, v in self.scrubbed_filters.items():
yield {'term': {k: v.lower()}}
def get_query(self):
return {
'query': {
'bool': {
'must': list(self.get_terms())
}
},
'sort': {
'modified_on': {'order': 'asc'}
},
'from': self.offset,
'size': self.offset + self.limit,
}
def es_filter_cases(domain, filters=None):
"""
Filter cases using elastic search
(Domain, Filters?) -> [CommCareCase]
"""
q = ElasticCaseQuery(domain, filters)
res = get_es().get('hqcases/_search', data=q.get_query())
# this is ugly, but for consistency / ease of deployment just
# use this to return everything in the expected format for now
return [CommCareCase.wrap(r["_source"]) for r in res['hits']['hits'] if r["_source"]]
def get_filters_from_request(request, limit_top_level=None):
"""
limit_top_level lets you specify a whitelist of top-level properties you can include in the filters,
properties with a / in them are always included in the filters
"""
def _decode(thing):
try:
return urllib.unquote(thing)
except Exception:
return thing
# super weird hack: force decoding keys because sometimes (only seen in
# production) django doesn't do this for us.
filters = dict((_decode(k), v) for k, v in request.REQUEST.items())
if limit_top_level is not None:
filters = dict([(key, val) for key, val in filters.items() if '/' in key or key in limit_top_level])
for system_property in ['user_id', 'closed', 'format', 'footprint',
'ids_only', 'include_children', 'use_cache']:
if system_property in filters:
del filters[system_property]
return filters
def get_cloudcare_apps(domain):
return map(lambda app: app._doc,
ApplicationBase.view('cloudcare/cloudcare_apps',
startkey=[domain], endkey=[domain, {}]))
def get_app_json(app):
if not app:
return None
app_json = app.to_json()
app_json['post_url'] = app.post_url
return app_json
def look_up_app_json(domain, app_id):
app = get_app(domain, app_id)
if app.is_remote_app():
raise RemoteAppError()
assert(app.domain == domain)
return get_app_json(app)
def get_cloudcare_app(domain, app_name):
apps = get_cloudcare_apps(domain)
app = filter(lambda x: x['name'] == app_name, apps)
if app:
return look_up_app_json(domain, app[0]['_id'])
else:
raise ResourceNotFound(_("Not found application by name: %s") % app_name)
def get_open_form_sessions(user, skip=0, limit=10):
def session_to_json(sess):
return {
'id': sess.session_id,
'app_id': sess.app_id,
'name': sess.session_name,
'display': u'{name} ({when})'.format(name=sess.session_name, when=naturaltime(sess.last_activity_date)),
'created_date': sess.created_date.strftime(CLOUDCARE_API_DATETIME_FORMAT),
'last_activity_date': sess.last_activity_date.strftime(CLOUDCARE_API_DATETIME_FORMAT),
}
return [session_to_json(sess) for sess in EntrySession.objects.filter(
last_activity_date__isnull=False,
user=user,
).order_by('-last_activity_date')[skip:limit]]
def get_cloudcare_form_url(domain, app_build_id=None, module_id=None, form_id=None, case_id=None):
url_root = reverse("cloudcare_main", args=[domain, ""])
url = url_root
if app_build_id != None:
url = url + "view/" + str(app_build_id)
if module_id != None:
url = url + "/" + str(module_id)
if form_id != None:
url = url + "/" + str(form_id)
if case_id != None:
url = url + "/case/" + str(case_id)
return url
| |
import zlib
import io
from socket import timeout as SocketTimeout
from .packages import six
from ._collections import HTTPHeaderDict
from .exceptions import ProtocolError, DecodeError, ReadTimeoutError
from .packages.six import string_types as basestring, binary_type
from .connection import HTTPException, BaseSSLError
from .util.response import is_fp_closed
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
class GzipDecoder(object):
def __init__(self):
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
return self._obj.decompress(data)
def _get_decoder(mode):
if mode == 'gzip':
return GzipDecoder()
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
self.headers = HTTPHeaderDict()
if headers:
self.headers.update(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
try:
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if 'read operation timed out' not in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except HTTPException as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
self._fp_bytes_read += len(data)
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = HTTPHeaderDict()
for k, v in r.getheaders():
if k.lower() != 'set-cookie':
headers.add(k, v)
if six.PY3: # Python 3:
cookies = r.msg.get_all('set-cookie') or tuple()
else: # Python 2:
cookies = r.msg.getheaders('set-cookie')
for cookie in cookies:
headers.add('set-cookie', cookie)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
return ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)
| |
from __future__ import division, absolute_import, print_function
import sys
import warnings
import itertools
import operator
import platform
import pytest
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_almost_equal, assert_allclose,
assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data,
assert_warns
)
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong,
np.single, np.double, np.longdouble, np.csingle,
np.cdouble, np.clongdouble]
floating_types = np.floating.__subclasses__()
complex_floating_types = np.complexfloating.__subclasses__()
# This compares scalarmath against ufuncs.
class TestTypes(object):
def test_types(self):
for atype in types:
a = atype(1)
assert_(a == 1, "error with %r: got %r" % (atype, a))
def test_type_add(self):
# list of types
for k, atype in enumerate(types):
a_scalar = atype(3)
a_array = np.array([3], dtype=atype)
for l, btype in enumerate(types):
b_scalar = btype(1)
b_array = np.array([1], dtype=btype)
c_scalar = a_scalar + b_scalar
c_array = a_array + b_array
# It was comparing the type numbers, but the new ufunc
# function-finding mechanism finds the lowest function
# to which both inputs can be cast - which produces 'l'
# when you do 'q' + 'b'. The old function finding mechanism
# skipped ahead based on the first argument, but that
# does not produce properly symmetric results...
assert_equal(c_scalar.dtype, c_array.dtype,
"error with types (%d/'%c' + %d/'%c')" %
(k, np.dtype(atype).char, l, np.dtype(btype).char))
def test_type_create(self):
for k, atype in enumerate(types):
a = np.array([1, 2, 3], atype)
b = atype([1, 2, 3])
assert_equal(a, b)
def test_leak(self):
# test leak of scalar objects
# a leak would show up in valgrind as still-reachable of ~2.6MB
for i in range(200000):
np.add(1, 1)
class TestBaseMath(object):
def test_blocked(self):
# test alignments offsets for simd instructions
# alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]:
for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
type='binary',
max_size=sz):
exp1 = np.ones_like(inp1)
inp1[...] = np.ones_like(inp1)
inp2[...] = np.zeros_like(inp2)
assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg)
assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)
np.add(inp1, inp2, out=out)
assert_almost_equal(out, exp1, err_msg=msg)
inp2[...] += np.arange(inp2.size, dtype=dt) + 1
assert_almost_equal(np.square(inp2),
np.multiply(inp2, inp2), err_msg=msg)
# skip true divide for ints
if dt != np.int32 or (sys.version_info.major < 3 and not sys.py3kwarning):
assert_almost_equal(np.reciprocal(inp2),
np.divide(1, inp2), err_msg=msg)
inp1[...] = np.ones_like(inp1)
np.add(inp1, 2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
inp2[...] = np.ones_like(inp2)
np.add(2, inp2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_almost_equal(d + d, d * 2)
np.add(d, d, out=o)
np.add(np.ones_like(d), d, out=o)
np.add(d, np.ones_like(d), out=o)
np.add(np.ones_like(d), d)
np.add(d, np.ones_like(d))
class TestPower(object):
def test_small_types(self):
for t in [np.int8, np.int16, np.float16]:
a = t(3)
b = a ** 4
assert_(b == 81, "error with %r: got %r" % (t, b))
def test_large_types(self):
for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]:
a = t(51)
b = a ** 4
msg = "error with %r: got %r" % (t, b)
if np.issubdtype(t, np.integer):
assert_(b == 6765201, msg)
else:
assert_almost_equal(b, 6765201, err_msg=msg)
def test_integers_to_negative_integer_power(self):
# Note that the combination of uint64 with a signed integer
# has common type np.float64. The other combinations should all
# raise a ValueError for integer ** negative integer.
exp = [np.array(-1, dt)[()] for dt in 'bhilq']
# 1 ** -1 possible special case
base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ']
for i1, i2 in itertools.product(base, exp):
if i1.dtype.name != 'uint64':
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, 1.)
# -1 ** -1 possible special case
base = [np.array(-1, dt)[()] for dt in 'bhilq']
for i1, i2 in itertools.product(base, exp):
if i1.dtype.name != 'uint64':
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, -1.)
# 2 ** -1 perhaps generic
base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ']
for i1, i2 in itertools.product(base, exp):
if i1.dtype.name != 'uint64':
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, .5)
def test_mixed_types(self):
typelist = [np.int8, np.int16, np.float16,
np.float32, np.float64, np.int8,
np.int16, np.int32, np.int64]
for t1 in typelist:
for t2 in typelist:
a = t1(3)
b = t2(2)
result = a**b
msg = ("error with %r and %r:"
"got %r, expected %r") % (t1, t2, result, 9)
if np.issubdtype(np.dtype(result), np.integer):
assert_(result == 9, msg)
else:
assert_almost_equal(result, 9, err_msg=msg)
def test_modular_power(self):
# modular power is not implemented, so ensure it errors
a = 5
b = 4
c = 10
expected = pow(a, b, c)
for t in (np.int32, np.float32, np.complex64):
# note that 3-operand power only dispatches on the first argument
assert_raises(TypeError, operator.pow, t(a), b, c)
assert_raises(TypeError, operator.pow, np.array(t(a)), b, c)
def floordiv_and_mod(x, y):
return (x // y, x % y)
def _signs(dt):
if dt in np.typecodes['UnsignedInteger']:
return (+1,)
else:
return (+1, -1)
class TestModulus(object):
def test_modulus_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*71, dtype=dt1)[()]
b = np.array(sg2*19, dtype=dt2)[()]
div, rem = op(a, b)
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
def test_float_modulus_exact(self):
# test that float results are exact for small integers. This also
# holds for the same integers scaled by powers of two.
nlst = list(range(-127, 0))
plst = list(range(1, 128))
dividend = nlst + [0] + plst
divisor = nlst + plst
arg = list(itertools.product(dividend, divisor))
tgt = list(divmod(*t) for t in arg)
a, b = np.array(arg, dtype=int).T
# convert exact integer results from Python to float so that
# signed zero can be used, it is checked.
tgtdiv, tgtrem = np.array(tgt, dtype=float).T
tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)
for op in [floordiv_and_mod, divmod]:
for dt in np.typecodes['Float']:
msg = 'op: %s, dtype: %s' % (op.__name__, dt)
fa = a.astype(dt)
fb = b.astype(dt)
# use list comprehension so a_ and b_ are scalars
div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)])
assert_equal(div, tgtdiv, err_msg=msg)
assert_equal(rem, tgtrem, err_msg=msg)
def test_float_modulus_roundoff(self):
# gh-6127
dt = np.typecodes['Float']
for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*78*6e-8, dtype=dt1)[()]
b = np.array(sg2*6e-8, dtype=dt2)[()]
div, rem = op(a, b)
# Equal assertion should hold when fmod is used
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
def test_float_modulus_corner_cases(self):
# Check remainder magnitude.
for dt in np.typecodes['Float']:
b = np.array(1.0, dtype=dt)
a = np.nextafter(np.array(0.0, dtype=dt), -b)
rem = operator.mod(a, b)
assert_(rem <= b, 'dt: %s' % dt)
rem = operator.mod(-a, -b)
assert_(rem >= -b, 'dt: %s' % dt)
# Check nans, inf
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in remainder")
for dt in np.typecodes['Float']:
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
finf = np.array(np.inf, dtype=dt)
fnan = np.array(np.nan, dtype=dt)
rem = operator.mod(fone, fzer)
assert_(np.isnan(rem), 'dt: %s' % dt)
# MSVC 2008 returns NaN here, so disable the check.
#rem = operator.mod(fone, finf)
#assert_(rem == fone, 'dt: %s' % dt)
rem = operator.mod(fone, fnan)
assert_(np.isnan(rem), 'dt: %s' % dt)
rem = operator.mod(finf, fone)
assert_(np.isnan(rem), 'dt: %s' % dt)
class TestComplexDivision(object):
def test_zero_division(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
a = t(0.0)
b = t(1.0)
assert_(np.isinf(b/a))
b = t(complex(np.inf, np.inf))
assert_(np.isinf(b/a))
b = t(complex(np.inf, np.nan))
assert_(np.isinf(b/a))
b = t(complex(np.nan, np.inf))
assert_(np.isinf(b/a))
b = t(complex(np.nan, np.nan))
assert_(np.isnan(b/a))
b = t(0.)
assert_(np.isnan(b/a))
def test_signed_zeros(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
# tupled (numerator, denominator, expected)
# for testing as expected == numerator/denominator
data = (
(( 0.0,-1.0), ( 0.0, 1.0), (-1.0,-0.0)),
(( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
(( 0.0,-1.0), (-0.0,-1.0), ( 1.0, 0.0)),
(( 0.0,-1.0), (-0.0, 1.0), (-1.0, 0.0)),
(( 0.0, 1.0), ( 0.0,-1.0), (-1.0, 0.0)),
(( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
((-0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
((-0.0, 1.0), ( 0.0,-1.0), (-1.0,-0.0))
)
for cases in data:
n = cases[0]
d = cases[1]
ex = cases[2]
result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))
# check real and imag parts separately to avoid comparison
# in array context, which does not account for signed zeros
assert_equal(result.real, ex[0])
assert_equal(result.imag, ex[1])
def test_branches(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
# tupled (numerator, denominator, expected)
# for testing as expected == numerator/denominator
data = list()
# trigger branch: real(fabs(denom)) > imag(fabs(denom))
# followed by else condition as neither are == 0
data.append((( 2.0, 1.0), ( 2.0, 1.0), (1.0, 0.0)))
# trigger branch: real(fabs(denom)) > imag(fabs(denom))
# followed by if condition as both are == 0
# is performed in test_zero_division(), so this is skipped
# trigger else if branch: real(fabs(denom)) < imag(fabs(denom))
data.append((( 1.0, 2.0), ( 1.0, 2.0), (1.0, 0.0)))
for cases in data:
n = cases[0]
d = cases[1]
ex = cases[2]
result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))
# check real and imag parts separately to avoid comparison
# in array context, which does not account for signed zeros
assert_equal(result.real, ex[0])
assert_equal(result.imag, ex[1])
class TestConversion(object):
def test_int_from_long(self):
l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18]
li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18]
for T in [None, np.float64, np.int64]:
a = np.array(l, dtype=T)
assert_equal([int(_m) for _m in a], li)
a = np.array(l[:3], dtype=np.uint64)
assert_equal([int(_m) for _m in a], li[:3])
def test_iinfo_long_values(self):
for code in 'bBhH':
res = np.array(np.iinfo(code).max + 1, dtype=code)
tgt = np.iinfo(code).min
assert_(res == tgt)
for code in np.typecodes['AllInteger']:
res = np.array(np.iinfo(code).max, dtype=code)
tgt = np.iinfo(code).max
assert_(res == tgt)
for code in np.typecodes['AllInteger']:
res = np.typeDict[code](np.iinfo(code).max)
tgt = np.iinfo(code).max
assert_(res == tgt)
def test_int_raise_behaviour(self):
def overflow_error_func(dtype):
np.typeDict[dtype](np.iinfo(dtype).max + 1)
for code in 'lLqQ':
assert_raises(OverflowError, overflow_error_func, code)
def test_int_from_infinite_longdouble(self):
# gh-627
x = np.longdouble(np.inf)
assert_raises(OverflowError, int, x)
with suppress_warnings() as sup:
sup.record(np.ComplexWarning)
x = np.clongdouble(np.inf)
assert_raises(OverflowError, int, x)
assert_equal(len(sup.log), 1)
@pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)")
def test_int_from_infinite_longdouble___int__(self):
x = np.longdouble(np.inf)
assert_raises(OverflowError, x.__int__)
with suppress_warnings() as sup:
sup.record(np.ComplexWarning)
x = np.clongdouble(np.inf)
assert_raises(OverflowError, x.__int__)
assert_equal(len(sup.log), 1)
@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
reason="long double is same as double")
@pytest.mark.skipif(platform.machine().startswith("ppc64"),
reason="IBM double double")
def test_int_from_huge_longdouble(self):
# Produce a longdouble that would overflow a double,
# use exponent that avoids bug in Darwin pow function.
exp = np.finfo(np.double).maxexp - 1
huge_ld = 2 * 1234 * np.longdouble(2) ** exp
huge_i = 2 * 1234 * 2 ** exp
assert_(huge_ld != np.inf)
assert_equal(int(huge_ld), huge_i)
def test_int_from_longdouble(self):
x = np.longdouble(1.5)
assert_equal(int(x), 1)
x = np.longdouble(-10.5)
assert_equal(int(x), -10)
def test_numpy_scalar_relational_operators(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
def test_scalar_comparison_to_none(self):
# Scalars should just return False and not give a warnings.
# The comparisons are flagged by pep8, ignore that.
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', FutureWarning)
assert_(not np.float32(1) == None)
assert_(not np.str_('test') == None)
# This is dubious (see below):
assert_(not np.datetime64('NaT') == None)
assert_(np.float32(1) != None)
assert_(np.str_('test') != None)
# This is dubious (see below):
assert_(np.datetime64('NaT') != None)
assert_(len(w) == 0)
# For documentation purposes, this is why the datetime is dubious.
# At the time of deprecation this was no behaviour change, but
# it has to be considered when the deprecations are done.
assert_(np.equal(np.datetime64('NaT'), None))
#class TestRepr(object):
# def test_repr(self):
# for t in types:
# val = t(1197346475.0137341)
# val_repr = repr(val)
# val2 = eval(val_repr)
# assert_equal( val, val2 )
class TestRepr(object):
def _test_type_repr(self, t):
finfo = np.finfo(t)
last_fraction_bit_idx = finfo.nexp + finfo.nmant
last_exponent_bit_idx = finfo.nexp
storage_bytes = np.dtype(t).itemsize*8
# could add some more types to the list below
for which in ['small denorm', 'small norm']:
# Values from http://en.wikipedia.org/wiki/IEEE_754
constr = np.array([0x00]*storage_bytes, dtype=np.uint8)
if which == 'small denorm':
byte = last_fraction_bit_idx // 8
bytebit = 7-(last_fraction_bit_idx % 8)
constr[byte] = 1 << bytebit
elif which == 'small norm':
byte = last_exponent_bit_idx // 8
bytebit = 7-(last_exponent_bit_idx % 8)
constr[byte] = 1 << bytebit
else:
raise ValueError('hmm')
val = constr.view(t)[0]
val_repr = repr(val)
val2 = t(eval(val_repr))
if not (val2 == 0 and val < 1e-100):
assert_equal(val, val2)
def test_float_repr(self):
# long double test cannot work, because eval goes through a python
# float
for t in [np.float32, np.float64]:
self._test_type_repr(t)
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
class TestSizeOf(object):
def test_equal_nbytes(self):
for type in types:
x = type(0)
assert_(sys.getsizeof(x) > x.nbytes)
def test_error(self):
d = np.float32()
assert_raises(TypeError, d.__sizeof__, "a")
class TestMultiply(object):
def test_seq_repeat(self):
# Test that basic sequences get repeated when multiplied with
# numpy integers. And errors are raised when multiplied with others.
# Some of this behaviour may be controversial and could be open for
# change.
accepted_types = set(np.typecodes["AllInteger"])
deprecated_types = set('?')
forbidden_types = (
set(np.typecodes["All"]) - accepted_types - deprecated_types)
forbidden_types -= set('V') # can't default-construct void scalars
for seq_type in (list, tuple):
seq = seq_type([1, 2, 3])
for numpy_type in accepted_types:
i = np.dtype(numpy_type).type(2)
assert_equal(seq * i, seq * int(i))
assert_equal(i * seq, int(i) * seq)
for numpy_type in deprecated_types:
i = np.dtype(numpy_type).type()
assert_equal(
assert_warns(DeprecationWarning, operator.mul, seq, i),
seq * int(i))
assert_equal(
assert_warns(DeprecationWarning, operator.mul, i, seq),
int(i) * seq)
for numpy_type in forbidden_types:
i = np.dtype(numpy_type).type()
assert_raises(TypeError, operator.mul, seq, i)
assert_raises(TypeError, operator.mul, i, seq)
def test_no_seq_repeat_basic_array_like(self):
# Test that an array-like which does not know how to be multiplied
# does not attempt sequence repeat (raise TypeError).
# See also gh-7428.
class ArrayLike(object):
def __init__(self, arr):
self.arr = arr
def __array__(self):
return self.arr
# Test for simple ArrayLike above and memoryviews (original report)
for arr_like in (ArrayLike(np.ones(3)), memoryview(np.ones(3))):
assert_array_equal(arr_like * np.float32(3.), np.full(3, 3.))
assert_array_equal(np.float32(3.) * arr_like, np.full(3, 3.))
assert_array_equal(arr_like * np.int_(3), np.full(3, 3))
assert_array_equal(np.int_(3) * arr_like, np.full(3, 3))
class TestNegative(object):
def test_exceptions(self):
a = np.ones((), dtype=np.bool_)[()]
assert_raises(TypeError, operator.neg, a)
def test_result(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
for dt in types:
a = np.ones((), dtype=dt)[()]
assert_equal(operator.neg(a) + a, 0)
class TestSubtract(object):
def test_exceptions(self):
a = np.ones((), dtype=np.bool_)[()]
assert_raises(TypeError, operator.sub, a, a)
def test_result(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
for dt in types:
a = np.ones((), dtype=dt)[()]
assert_equal(operator.sub(a, a), 0)
class TestAbs(object):
def _test_abs_func(self, absfunc):
for tp in floating_types + complex_floating_types:
x = tp(-1.5)
assert_equal(absfunc(x), 1.5)
x = tp(0.0)
res = absfunc(x)
# assert_equal() checks zero signedness
assert_equal(res, 0.0)
x = tp(-0.0)
res = absfunc(x)
assert_equal(res, 0.0)
x = tp(np.finfo(tp).max)
assert_equal(absfunc(x), x.real)
x = tp(np.finfo(tp).tiny)
assert_equal(absfunc(x), x.real)
x = tp(np.finfo(tp).min)
assert_equal(absfunc(x), -x.real)
def test_builtin_abs(self):
self._test_abs_func(abs)
def test_numpy_abs(self):
self._test_abs_func(np.abs)
| |
#
# ElementTree
# $Id: HTMLTreeBuilder.py 3265 2007-09-06 20:42:00Z fredrik $
#
# a simple tree builder, for HTML input
#
# history:
# 2002-04-06 fl created
# 2002-04-07 fl ignore IMG and HR end tags
# 2002-04-07 fl added support for 1.5.2 and later
# 2003-04-13 fl added HTMLTreeBuilder alias
# 2004-12-02 fl don't feed non-ASCII charrefs/entities as 8-bit strings
# 2004-12-05 fl don't feed non-ASCII CDATA as 8-bit strings
#
# Copyright (c) 1999-2004 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2007 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
##
# Tools to build element trees from HTML files.
##
import htmlentitydefs
import re, string, sys
import mimetools, StringIO
import ElementTree
AUTOCLOSE = "p", "li", "tr", "th", "td", "head", "body"
IGNOREEND = "img", "hr", "meta", "link", "br"
if sys.version[:3] == "1.5":
is_not_ascii = re.compile(r"[\x80-\xff]").search # 1.5.2
else:
is_not_ascii = re.compile(eval(r'u"[\u0080-\uffff]"')).search
try:
from HTMLParser import HTMLParser
except ImportError:
from sgmllib import SGMLParser
# hack to use sgmllib's SGMLParser to emulate 2.2's HTMLParser
class HTMLParser(SGMLParser):
# the following only works as long as this class doesn't
# provide any do, start, or end handlers
def unknown_starttag(self, tag, attrs):
self.handle_starttag(tag, attrs)
def unknown_endtag(self, tag):
self.handle_endtag(tag)
##
# ElementTree builder for HTML source code. This builder converts an
# HTML document or fragment to an ElementTree.
# <p>
# The parser is relatively picky, and requires balanced tags for most
# elements. However, elements belonging to the following group are
# automatically closed: P, LI, TR, TH, and TD. In addition, the
# parser automatically inserts end tags immediately after the start
# tag, and ignores any end tags for the following group: IMG, HR,
# META, and LINK.
#
# @keyparam builder Optional builder object. If omitted, the parser
# uses the standard <b>elementtree</b> builder.
# @keyparam encoding Optional character encoding, if known. If omitted,
# the parser looks for META tags inside the document. If no tags
# are found, the parser defaults to ISO-8859-1. Note that if your
# document uses a non-ASCII compatible encoding, you must decode
# the document before parsing.
#
# @see elementtree.ElementTree
class HTMLTreeBuilder(HTMLParser):
# FIXME: shouldn't this class be named Parser, not Builder?
def __init__(self, builder=None, encoding=None):
self.__stack = []
if builder is None:
builder = ElementTree.TreeBuilder()
self.__builder = builder
self.encoding = encoding or "iso-8859-1"
HTMLParser.__init__(self)
##
# Flushes parser buffers, and return the root element.
#
# @return An Element instance.
def close(self):
HTMLParser.close(self)
return self.__builder.close()
##
# (Internal) Handles start tags.
def handle_starttag(self, tag, attrs):
if tag == "meta":
# look for encoding directives
http_equiv = content = None
for k, v in attrs:
if k == "http-equiv":
http_equiv = string.lower(v)
elif k == "content":
content = v
if http_equiv == "content-type" and content:
# use mimetools to parse the http header
header = mimetools.Message(
StringIO.StringIO("%s: %s\n\n" % (http_equiv, content))
)
encoding = header.getparam("charset")
if encoding:
self.encoding = encoding
if tag in AUTOCLOSE:
if self.__stack and self.__stack[-1] == tag:
self.handle_endtag(tag)
self.__stack.append(tag)
attrib = {}
if attrs:
for k, v in attrs:
attrib[string.lower(k)] = v
self.__builder.start(tag, attrib)
if tag in IGNOREEND:
self.__stack.pop()
self.__builder.end(tag)
##
# (Internal) Handles end tags.
def handle_endtag(self, tag):
if tag in IGNOREEND:
return
lasttag = self.__stack.pop()
if tag != lasttag and lasttag in AUTOCLOSE:
self.handle_endtag(lasttag)
self.__builder.end(tag)
##
# (Internal) Handles character references.
def handle_charref(self, char):
if char[:1] == "x":
char = int(char[1:], 16)
else:
char = int(char)
if 0 <= char < 128:
self.__builder.data(chr(char))
else:
self.__builder.data(unichr(char))
##
# (Internal) Handles entity references.
def handle_entityref(self, name):
entity = htmlentitydefs.entitydefs.get(name)
if entity:
if len(entity) == 1:
entity = ord(entity)
else:
entity = int(entity[2:-1])
if 0 <= entity < 128:
self.__builder.data(chr(entity))
else:
self.__builder.data(unichr(entity))
else:
self.unknown_entityref(name)
##
# (Internal) Handles character data.
def handle_data(self, data):
if isinstance(data, type('')) and is_not_ascii(data):
# convert to unicode, but only if necessary
data = unicode(data, self.encoding, "ignore")
self.__builder.data(data)
##
# (Hook) Handles unknown entity references. The default action
# is to ignore unknown entities.
def unknown_entityref(self, name):
pass # ignore by default; override if necessary
##
# An alias for the <b>HTMLTreeBuilder</b> class.
TreeBuilder = HTMLTreeBuilder
##
# Parse an HTML document or document fragment.
#
# @param source A filename or file object containing HTML data.
# @param encoding Optional character encoding, if known. If omitted,
# the parser looks for META tags inside the document. If no tags
# are found, the parser defaults to ISO-8859-1.
# @return An ElementTree instance
def parse(source, encoding=None):
return ElementTree.parse(source, HTMLTreeBuilder(encoding=encoding))
if __name__ == "__main__":
import sys
ElementTree.dump(parse(open(sys.argv[1])))
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Chromium media component.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
import re
# This line is 'magic' in that git-cl looks for it to decide whether to
# use Python3 instead of Python2 when running the code in this file.
USE_PYTHON3 = True
# Well-defined simple classes containing only <= 4 ints, or <= 2 floats.
BASE_TIME_TYPES = [
'base::Time',
'base::TimeDelta',
'base::TimeTicks',
]
BASE_TIME_TYPES_RE = re.compile(r'\bconst (%s)&' % '|'.join(BASE_TIME_TYPES))
def _FilterFile(affected_file):
"""Return true if the file could contain code requiring a presubmit check."""
return affected_file.LocalPath().endswith(
('.h', '.cc', '.cpp', '.cxx', '.mm'))
def _CheckForUseOfWrongClock(input_api, output_api):
"""Make sure new lines of media code don't use a clock susceptible to skew."""
# Regular expression that should detect any explicit references to the
# base::Time type (or base::Clock/DefaultClock), whether in using decls,
# typedefs, or to call static methods.
base_time_type_pattern = r'(^|\W)base::(Time|Clock|DefaultClock)(\W|$)'
# Regular expression that should detect references to the base::Time class
# members, such as a call to base::Time::Now.
base_time_member_pattern = r'(^|\W)(Time|Clock|DefaultClock)::'
# Regular expression to detect "using base::Time" declarations. We want to
# prevent these from triggerring a warning. For example, it's perfectly
# reasonable for code to be written like this:
#
# using base::Time;
# ...
# int64_t foo_us = foo_s * Time::kMicrosecondsPerSecond;
using_base_time_decl_pattern = r'^\s*using\s+(::)?base::Time\s*;'
# Regular expression to detect references to the kXXX constants in the
# base::Time class. We want to prevent these from triggerring a warning.
base_time_konstant_pattern = r'(^|\W)Time::k\w+'
problem_re = input_api.re.compile(
r'(' + base_time_type_pattern + r')|(' + base_time_member_pattern + r')')
exception_re = input_api.re.compile(
r'(' + using_base_time_decl_pattern + r')|(' +
base_time_konstant_pattern + r')')
problems = []
for f in input_api.AffectedSourceFiles(_FilterFile):
for line_number, line in f.ChangedContents():
if problem_re.search(line):
if not exception_re.search(line):
problems.append(
' %s:%d\n %s' % (f.LocalPath(), line_number, line.strip()))
if problems:
return [output_api.PresubmitPromptOrNotify(
'You added one or more references to the base::Time class and/or one\n'
'of its member functions (or base::Clock/DefaultClock). In media\n'
'code, it is rarely correct to use a clock susceptible to time skew!\n'
'Instead, could you use base::TimeTicks to track the passage of\n'
'real-world time?\n\n' +
'\n'.join(problems))]
else:
return []
def _CheckForHistogramOffByOne(input_api, output_api):
"""Make sure histogram enum maxes are used properly"""
# A general-purpose chunk of regex to match whitespace and/or comments
# that may be interspersed with the code we're interested in:
comment = r'/\*.*?\*/|//[^\n]*'
whitespace = r'(?:[\n\t ]|(?:' + comment + r'))*'
# The name is assumed to be a literal string.
histogram_name = r'"[^"]*"'
# This can be an arbitrary expression, so just ensure it isn't a ; to prevent
# matching past the end of this statement.
histogram_value = r'[^;]*'
# In parens so we can retrieve it for further checks.
histogram_max = r'([^;,]*)'
# This should match a uma histogram enumeration macro expression.
uma_macro_re = input_api.re.compile(
r'\bUMA_HISTOGRAM_ENUMERATION\(' + whitespace + histogram_name + r',' +
whitespace + histogram_value + r',' + whitespace + histogram_max +
whitespace + r'\)' + whitespace + r';(?:' + whitespace +
r'\/\/ (PRESUBMIT_IGNORE_UMA_MAX))?')
uma_max_re = input_api.re.compile(r'.*(?:Max|MAX).* \+ 1')
problems = []
for f in input_api.AffectedSourceFiles(_FilterFile):
contents = input_api.ReadFile(f)
# We want to match across lines, but still report a line number, so we keep
# track of the line we're on as we search through the file.
line_number = 1
# We search the entire file, then check if any violations are in the changed
# areas, this is inefficient, but simple. A UMA_HISTOGRAM_ENUMERATION call
# will often span multiple lines, so finding a match looking just at the
# deltas line-by-line won't catch problems.
match = uma_macro_re.search(contents)
while match:
line_number += contents.count('\n', 0, match.start())
max_arg = match.group(1) # The third argument.
if (not uma_max_re.match(max_arg) and match.group(2) !=
'PRESUBMIT_IGNORE_UMA_MAX'):
uma_range = range(match.start(), match.end() + 1)
# Check if any part of the match is in the changed lines:
for num, line in f.ChangedContents():
if line_number <= num <= line_number + match.group().count('\n'):
problems.append('%s:%d' % (f, line_number))
break
# Strip off the file contents up to the end of the match and update the
# line number.
contents = contents[match.end():]
line_number += match.group().count('\n')
match = uma_macro_re.search(contents)
if problems:
return [output_api.PresubmitError(
'UMA_HISTOGRAM_ENUMERATION reports in src/media/ are expected to adhere\n'
'to the following guidelines:\n'
' - The max value (3rd argument) should be an enum value equal to the\n'
' last valid value, e.g. FOO_MAX = LAST_VALID_FOO.\n'
' - 1 must be added to that max value.\n'
'Contact dalecurtis@chromium.org if you have questions.' , problems)]
return []
def _CheckPassByValue(input_api, output_api):
"""Check that base::Time and derived classes are passed by value, and not by
const reference """
problems = []
for f in input_api.AffectedSourceFiles(_FilterFile):
for line_number, line in f.ChangedContents():
if BASE_TIME_TYPES_RE.search(line):
problems.append('%s:%d' % (f, line_number))
if problems:
return [output_api.PresubmitError(
'base::Time and derived classes should be passed by value and not by\n'
'const ref, see base/time/time.h for more information.', problems)]
return []
def _CheckForUseOfLazyInstance(input_api, output_api):
"""Check that base::LazyInstance is not used."""
problems = []
lazy_instance_re = re.compile(r'(^|\W)base::LazyInstance<')
for f in input_api.AffectedSourceFiles(_FilterFile):
for line_number, line in f.ChangedContents():
if lazy_instance_re.search(line):
problems.append('%s:%d' % (f, line_number))
if problems:
return [output_api.PresubmitError(
'base::LazyInstance is deprecated; use a thread safe static.', problems)]
return []
def _CheckNoLoggingOverrideInHeaders(input_api, output_api):
"""Checks to make sure no .h files include logging_override_if_enabled.h."""
files = []
pattern = input_api.re.compile(
r'^#include\s*"media/base/logging_override_if_enabled.h"',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [output_api.PresubmitError(
'Do not #include "logging_override_if_enabled.h" in header files, '
'since it overrides DVLOG() in every file including the header. '
'Instead, only include it in source files.',
files) ]
return []
def _CheckForNoV4L2AggregateInitialization(input_api, output_api):
"""Check that struct v4l2_* are not initialized as aggregates with a
braced-init-list"""
problems = []
v4l2_aggregate_initializer_re = re.compile(r'(^|\W)struct.+v4l2_.+=.+{+}+;')
for f in input_api.AffectedSourceFiles(_FilterFile):
for line_number, line in f.ChangedContents():
if v4l2_aggregate_initializer_re.search(line):
problems.append('%s:%d' % (f, line_number))
if problems:
return [output_api.PresubmitPromptWarning(
'Avoid initializing V4L2 structures with braced-init-lists, i.e. as '
'aggregates. V4L2 structs often contain unions of various sized members: '
'when a union is initialized by aggregate initialization, only the first '
'non-static member is initialized, leaving other members unitialized if '
'they are larger. Use memset instead.',
problems)]
return []
def _CheckChange(input_api, output_api):
results = []
results.extend(_CheckForUseOfWrongClock(input_api, output_api))
results.extend(_CheckPassByValue(input_api, output_api))
results.extend(_CheckForHistogramOffByOne(input_api, output_api))
results.extend(_CheckForUseOfLazyInstance(input_api, output_api))
results.extend(_CheckNoLoggingOverrideInHeaders(input_api, output_api))
results.extend(_CheckForNoV4L2AggregateInitialization(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
return _CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CheckChange(input_api, output_api)
| |
import logging
from .pi30 import pi30
log = logging.getLogger("pi41")
# New / overriden commands
NEW_COMMANDS = {
"QP2GS": {
"name": "QP2GS",
"description": "Parallel Information inquiry",
"help": " -- example: QP2GS1 queries the values of various metrics from instance 1 of parallel setup Inverters (numbers from 0)",
"type": "QUERY",
"response": [
["int", "Parallel instance number??", ""],
["int", "Serial number", ""],
[
"keyed",
"Work mode",
{
"P": "Power On Mode",
"S": "Standby Mode",
"L": "Line Mode",
"B": "Battery Mode",
"F": "Fault Mode",
"H": "Power Saving Mode",
},
],
[
"keyed",
"Fault code",
{
"00": "No fault",
"01": "Fan is locked",
"02": "Over temperature",
"03": "Battery voltage is too high",
"04": "Battery voltage is too low",
"05": "Output short circuited or Over temperature",
"06": "Output voltage is too high",
"07": "Over load time out",
"08": "Bus voltage is too high",
"09": "Bus soft start failed",
"11": "Main relay failed",
"51": "Over current inverter",
"52": "Bus soft start failed",
"53": "Inverter soft start failed",
"54": "Self-test failed",
"55": "Over DC voltage on output of inverter",
"56": "Battery connection is open",
"57": "Current sensor failed",
"58": "Output voltage is too low",
"60": "Inverter negative power",
"71": "Parallel version different",
"72": "Output circuit failed",
"80": "CAN communication failed",
"81": "Parallel host line lost",
"82": "Parallel synchronized signal lost",
"83": "Parallel battery voltage detect different",
"84": "Parallel Line voltage or frequency detect different",
"85": "Parallel Line input current unbalanced",
"86": "Parallel output setting different",
},
],
["float", "L2 AC input voltage", "V"],
["float", "L2 AC input frequency", "Hz"],
["float", "L2 AC output voltage", "V"],
["float", "L2 AC output frequency", "Hz"],
["int", "L2 AC output apparent power", "VA"],
["int", "L2 AC output active power", "W"],
["int", "L2 Load percentage", "%"],
["float", "L2 Battery voltage", "V"],
["int", "L2 Battery charging current", "A"],
["int", "L2 Battery capacity", "%"],
["float", "PV2 Input Voltage", "V"],
["int", "PV2 charging current", "A"],
[
"flags",
"Inverter Status",
[
"is_l2_scc_ok",
"is_l2_ac_charging",
"is_l2_scc_charging",
"is_battery_over_voltage",
"is_battery_under_voltage",
"is_l2_line_lost",
"is_l2_load_on",
"is_configuration_changed",
],
],
],
"test_responses": [
b"(11 92911906100045 L 00 124.2 59.98 124.2 59.98 0149 0130 005 56.1 000 100 000.0 00 01000010\xA9\xA8\r",
],
"regex": "QP2GS(\\d)$",
},
"QPGS": {
"name": "QPGS",
"description": "Parallel Information inquiry LV5048",
"help": " -- example: QPGS1 queries the values of various metrics from instance 1 of parallel setup Inverters (numbers from 0)",
"type": "QUERY",
"response": [
["option", "Parallel instance number", ["Not valid", "valid"]],
["int", "Serial number", ""],
[
"keyed",
"Work mode",
{
"P": "Power On Mode",
"S": "Standby Mode",
"L": "Line Mode",
"B": "Battery Mode",
"F": "Fault Mode",
"H": "Power Saving Mode",
},
],
[
"keyed",
"Fault code",
{
"00": "No fault",
"01": "Fan is locked",
"02": "Over temperature",
"03": "Battery voltage is too high",
"04": "Battery voltage is too low",
"05": "Output short circuited or Over temperature",
"06": "Output voltage is too high",
"07": "Over load time out",
"08": "Bus voltage is too high",
"09": "Bus soft start failed",
"11": "Main relay failed",
"51": "Over current inverter",
"52": "Bus soft start failed",
"53": "Inverter soft start failed",
"54": "Self-test failed",
"55": "Over DC voltage on output of inverter",
"56": "Battery connection is open",
"57": "Current sensor failed",
"58": "Output voltage is too low",
"60": "Inverter negative power",
"71": "Parallel version different",
"72": "Output circuit failed",
"80": "CAN communication failed",
"81": "Parallel host line lost",
"82": "Parallel synchronized signal lost",
"83": "Parallel battery voltage detect different",
"84": "Parallel Line voltage or frequency detect different",
"85": "Parallel Line input current unbalanced",
"86": "Parallel output setting different",
},
],
["float", "L1 AC input voltage", "V"],
["float", "L1 AC input frequency", "Hz"],
["float", "L1 AC output voltage", "V"],
["float", "L1 AC output frequency", "Hz"],
["int", "L1 AC output apparent power", "VA"],
["int", "L1 AC output active power", "W"],
["int", "L1 Load percentage", "%"],
["float", "Battery voltage", "V"],
["int", "Battery charging current", "A"],
["int", "Battery capacity", "%"],
["float", "PV1 Input Voltage", "V"],
["int", "Total charging current", "A"],
["int", "Total AC output apparent power", "VA"],
["int", "Total output active power", "W"],
["int", "Total AC output percentage", "%"],
[
"flags",
"Inverter Status",
[
"is_l1_scc_ok",
"is_l1_ac_charging_on",
"is_l1_scc_charging_on",
"is_battery_over_voltage",
"is_battery_under_voltage",
"is_l1_line_off",
"is_l1_load_on",
"is_configuration_changed",
],
],
[
"option",
"Output mode",
[
"Standalone?",
"Parallel output 0 degrees",
"Phase 1 of 3 Phase output",
"Phase 2 of 3 Phase output",
"Phase 3 of 3 Phase output",
"Parallel output 120 degrees",
"Parallel output 180 degrees",
],
],
[
"option",
"Charger source priority",
["Utility first", "Solar first", "Solar + Utility", "Solar only"],
],
["int", "Max charger current", "A"],
["int", "Max charger range", "A"],
["int", "Max AC charger current", "A"],
["int", "PV1 charging current", "A"],
["int", "Battery discharge current", "A"],
],
"test_responses": [
b"(1 92911906100045 L 00 122.9 59.98 122.9 59.98 0331 0272 013 56.1 004 100 000.0 004 01577 01400 009 01000010 6 0 060 220 40 00 000\xC7\xC2\r",
],
"regex": "QPGS(\\d)$",
},
"QPIGS": {
"name": "QPIGS",
"description": "General Status Parameters inquiry LV5048",
"help": " -- queries the value of various metrics from the Inverter",
"type": "QUERY",
"response": [
["float", "L1 AC Input Voltage", "V"],
["float", "L1 AC Input Frequency", "Hz"],
["float", "L1 AC Output Voltage", "V"],
["float", "L1 AC Output Frequency", "Hz"],
["int", "L1 AC Output Apparent Power", "VA"],
["int", "L1 AC Output Active Power", "W"],
["int", "L1 AC Output Load", "%"],
["int", "BUS Voltage", "V"],
["float", "Battery Voltage", "V"],
["int", "Battery Charging Current", "A"],
["int", "Battery Capacity", "%"],
["int", "Inverter Heat Sink Temperature", "Deg_C"],
["float", "PV Input Current for Battery", "A"],
["float", "PV Input Voltage", "V"],
["float", "Battery Voltage from SCC", "V"],
["int", "Battery Discharge Current", "A"],
[
"flags",
"Inverter Status",
[
"is_l1_scc_ok",
"is_l1_ac_charging_on",
"is_l1_scc_charging_on",
"is_battery_over",
"is_battery_under",
"is_l1_line_not_ok",
"is_load_on",
"is_configuration_changed",
],
],
["int", "RSV1", "A"],
["int", "RSV2", "A"],
["int", "PV Input Power", "W"],
["flags", "Device Status2", ["is_charging_to_float", "is_switched_on", "is_reserved"]],
],
"test_responses": [
b"(000.0 00.0 230.0 49.9 0161 0119 003 460 57.50 012 100 0069 0014 103.8 57.45 00000 00110110 00 00 00856 010\x24\x8C\r",
],
},
"QPIGS2": {
"name": "QPIGS2",
"description": "General Status Parameters inquiry",
"help": " -- queries the value of various metrics from the Inverter",
"type": "QUERY",
"response": [
["float", "L2 AC Input Voltage", "V"],
["float", "L2 AC Input Frequency", "Hz"],
["float", "L2 AC Output Voltage", "V"],
["float", "L2 AC Output Frequency", "Hz"],
["int", "L2 AC Output Apparent Power", "VA"],
["int", "L2 AC Output Active Power", "W"],
["int", "L2 AC Output Load", "%"],
["int", "PV2 Battery Charging Current", "A"],
["float", "PV2 Input Voltage", "V"],
["float", "L2 Battery Voltage", "V"],
[
"flags",
"Device Status",
[
"is_l2_scc_ok",
"is_l2_ac_charging_on",
"is_l2_scc_charging_on",
"reserved",
"is_l2_line_not_ok",
"is_load_on",
"reserved",
],
],
],
"test_responses": [
b"",
],
},
"QPIRI": {
"name": "QPIRI",
"description": "Current Settings inquiry for LV5048",
"help": " -- queries the current settings from the Inverter",
"type": "QUERY",
"response": [
["float", "AC Input Voltage", "V"],
["float", "AC Input Current", "A"],
["float", "AC Output Voltage", "V"],
["float", "AC Output Frequency", "Hz"],
["float", "AC Output Current", "A"],
["int", "AC Output Apparent Power", "VA"],
["int", "AC Output Active Power", "W"],
["float", "Battery Voltage", "V"],
["float", "Battery Recharge Voltage", "V"],
["float", "Battery Under Voltage", "V"],
["float", "Battery Bulk Charge Voltage", "V"],
["float", "Battery Float Charge Voltage", "V"],
["option", "Battery Type", ["AGM", "Flooded", "User"]],
["int", "Max AC Charging Current", "A"],
["int", "Max Charging Current", "A"],
["option", "Input Voltage Range", ["Appliance", "UPS"]],
["option", "Output Source Priority", ["Utility first", "Solar first", "SBU first"]],
[
"option",
"Charger Source Priority",
["Utility first", "Solar first", "Solar + Utility", "Only solar charging permitted"],
],
["int", "Max Parallel Units", "units"],
["keyed", "Machine Type", {"00": "Grid tie", "01": "Off Grid", "10": "Hybrid"}],
["option", "Topology", ["transformerless", "transformer"]],
[
"option",
"Output Mode",
[
"Standalone?",
"Parallel output 0 degrees",
"Phase 1 of 3 Phase output",
"Phase 2 of 3 Phase output",
"Phase 3 of 3 Phase output",
"Parallel output 120 degrees",
"Parallel output 180 degrees",
],
],
["float", "Battery Redischarge Voltage", "V"],
[
"option",
"PV OK Condition",
[
"As long as one unit of inverters has connect PV, parallel system will consider PV OK",
"Only All of inverters have connect PV, parallel system will consider PV OK",
],
],
[
"option",
"PV Power Balance",
[
"PV input max current will be the max charged current",
"PV input max power will be the sum of the max charged power and loads power",
],
],
["int", "Max Charging Time at CV Stage", "min"],
],
"test_responses": [
b"(230.0 21.7 230.0 50.0 21.7 5000 4000 48.0 46.0 42.0 56.4 54.0 0 10 010 1 0 0 6 01 0 0 54.0 0 1 60\x83\xAA\r",
b"(120.0 20.8 120.0 60.0 20.8 2500 2500 48.0 47.0 43.0 56.1 56.1 2 40 060 0 0 0 9 01 0 6 55.0 0 1 000\xBE\xAC\r",
b"(NAK\x73\x73\r",
],
},
}
class pi41(pi30):
def __init__(self, *args, **kwargs) -> None:
super().__init__()
self._protocol_id = b"PI41"
self.COMMANDS.update(NEW_COMMANDS)
self.STATUS_COMMANDS = ["QPIGS", "Q1"]
self.SETTINGS_COMMANDS = ["QPIRI", "QFLAG"]
self.DEFAULT_COMMAND = "QDI"
# log.info(f'Using protocol {self._protocol_id} with {len(self.COMMANDS)} commands')
| |
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DocsClient extends gdata.client.GDClient to streamline DocList API calls."""
__author__ = 'e.bidelman (Eric Bidelman)'
import mimetypes
import urllib
import atom.data
import atom.http_core
import gdata.client
import gdata.docs.data
import gdata.gauth
# Feed URI templates
DOCLIST_FEED_URI = '/feeds/default/private/full/'
FOLDERS_FEED_TEMPLATE = DOCLIST_FEED_URI + '%s/contents'
ACL_FEED_TEMPLATE = DOCLIST_FEED_URI + '%s/acl'
REVISIONS_FEED_TEMPLATE = DOCLIST_FEED_URI + '%s/revisions'
class DocsClient(gdata.client.GDClient):
"""Client extension for the Google Documents List API."""
host = 'docs.google.com' # default server for the API
api_version = '3.0' # default major version for the service.
auth_service = 'writely'
auth_scopes = gdata.gauth.AUTH_SCOPES['writely']
def __init__(self, auth_token=None, **kwargs):
"""Constructs a new client for the DocList API.
Args:
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: The other parameters to pass to gdata.client.GDClient constructor.
"""
gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs)
def get_file_content(self, uri, auth_token=None, **kwargs):
"""Fetches the file content from the specified uri.
This method is useful for downloading/exporting a file within enviornments
like Google App Engine, where the user does not have the ability to write
the file to a local disk.
Args:
uri: str The full URL to fetch the file contents from.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.request().
Returns:
The binary file content.
Raises:
gdata.client.RequestError: on error response from server.
"""
server_response = self.request('GET', uri, auth_token=auth_token, **kwargs)
if server_response.status != 200:
raise gdata.client.RequestError, {'status': server_response.status,
'reason': server_response.reason,
'body': server_response.read()}
return server_response.read()
GetFileContent = get_file_content
def _download_file(self, uri, file_path, auth_token=None, **kwargs):
"""Downloads a file to disk from the specified URI.
Note: to download a file in memory, use the GetFileContent() method.
Args:
uri: str The full URL to download the file from.
file_path: str The full path to save the file to.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.get_file_content().
Raises:
gdata.client.RequestError: on error response from server.
"""
f = open(file_path, 'wb')
try:
f.write(self.get_file_content(uri, auth_token=auth_token, **kwargs))
except gdata.client.RequestError, e:
f.close()
raise e
f.flush()
f.close()
_DownloadFile = _download_file
def get_doclist(self, uri=None, limit=None, auth_token=None, **kwargs):
"""Retrieves the main doclist feed containing the user's items.
Args:
uri: str (optional) A URI to query the doclist feed.
limit: int (optional) A maximum cap for the number of results to
return in the feed. By default, the API returns a maximum of 100
per page. Thus, if you set limit=5000, you will get <= 5000
documents (guarenteed no more than 5000), and will need to follow the
feed's next links (feed.GetNextLink()) to the rest. See
get_everything(). Similarly, if you set limit=50, only <= 50
documents are returned. Note: if the max-results parameter is set in
the uri parameter, it is chosen over a value set for limit.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.get_feed().
Returns:
gdata.docs.data.DocList feed.
"""
if uri is None:
uri = DOCLIST_FEED_URI
if isinstance(uri, (str, unicode)):
uri = atom.http_core.Uri.parse_uri(uri)
# Add max-results param if it wasn't included in the uri.
if limit is not None and not 'max-results' in uri.query:
uri.query['max-results'] = limit
return self.get_feed(uri, desired_class=gdata.docs.data.DocList,
auth_token=auth_token, **kwargs)
GetDocList = get_doclist
def get_doc(self, resource_id, etag=None, auth_token=None, **kwargs):
"""Retrieves a particular document given by its resource id.
Args:
resource_id: str The document/item's resource id. Example spreadsheet:
'spreadsheet%3A0A1234567890'.
etag: str (optional) The document/item's etag value to be used in a
conditional GET. See http://code.google.com/apis/documents/docs/3.0/
developers_guide_protocol.html#RetrievingCached.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.get_entry().
Returns:
A gdata.docs.data.DocsEntry object representing the retrieved entry.
Raises:
ValueError if the resource_id is not a valid format.
"""
match = gdata.docs.data.RESOURCE_ID_PATTERN.match(resource_id)
if match is None:
raise ValueError, 'Invalid resource id: %s' % resource_id
return self.get_entry(
DOCLIST_FEED_URI + resource_id, etag=etag,
desired_class=gdata.docs.data.DocsEntry,
auth_token=auth_token, **kwargs)
GetDoc = get_doc
def get_everything(self, uri=None, auth_token=None, **kwargs):
"""Retrieves the user's entire doc list.
The method makes multiple HTTP requests (by following the feed's next links)
in order to fetch the user's entire document list.
Args:
uri: str (optional) A URI to query the doclist feed with.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.GetDocList().
Returns:
A list of gdata.docs.data.DocsEntry objects representing the retrieved
entries.
"""
if uri is None:
uri = DOCLIST_FEED_URI
feed = self.GetDocList(uri=uri, auth_token=auth_token, **kwargs)
entries = feed.entry
while feed.GetNextLink() is not None:
feed = self.GetDocList(
feed.GetNextLink().href, auth_token=auth_token, **kwargs)
entries.extend(feed.entry)
return entries
GetEverything = get_everything
def get_acl_permissions(self, resource_id, auth_token=None, **kwargs):
"""Retrieves a the ACL sharing permissions for a document.
Args:
resource_id: str The document/item's resource id. Example for pdf:
'pdf%3A0A1234567890'.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.get_feed().
Returns:
A gdata.docs.data.AclFeed object representing the document's ACL entries.
Raises:
ValueError if the resource_id is not a valid format.
"""
match = gdata.docs.data.RESOURCE_ID_PATTERN.match(resource_id)
if match is None:
raise ValueError, 'Invalid resource id: %s' % resource_id
return self.get_feed(
ACL_FEED_TEMPLATE % resource_id, desired_class=gdata.docs.data.AclFeed,
auth_token=auth_token, **kwargs)
GetAclPermissions = get_acl_permissions
def get_revisions(self, resource_id, auth_token=None, **kwargs):
"""Retrieves the revision history for a document.
Args:
resource_id: str The document/item's resource id. Example for pdf:
'pdf%3A0A1234567890'.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.get_feed().
Returns:
A gdata.docs.data.RevisionFeed representing the document's revisions.
Raises:
ValueError if the resource_id is not a valid format.
"""
match = gdata.docs.data.RESOURCE_ID_PATTERN.match(resource_id)
if match is None:
raise ValueError, 'Invalid resource id: %s' % resource_id
return self.get_feed(
REVISIONS_FEED_TEMPLATE % resource_id,
desired_class=gdata.docs.data.RevisionFeed, auth_token=auth_token,
**kwargs)
GetRevisions = get_revisions
def create(self, doc_type, title, folder_or_id=None, writers_can_invite=None,
auth_token=None, **kwargs):
"""Creates a new item in the user's doclist.
Args:
doc_type: str The type of object to create. For example: 'document',
'spreadsheet', 'folder', 'presentation'.
title: str A title for the document.
folder_or_id: gdata.docs.data.DocsEntry or str (optional) Folder entry or
the resouce id of a folder to create the object under. Note: A valid
resource id for a folder is of the form: folder%3Afolder_id.
writers_can_invite: bool (optional) False prevents collaborators from
being able to invite others to edit or view the document.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.post().
Returns:
gdata.docs.data.DocsEntry containing information newly created item.
"""
entry = gdata.docs.data.DocsEntry(title=atom.data.Title(text=title))
entry.category.append(gdata.docs.data.make_kind_category(doc_type))
if isinstance(writers_can_invite, gdata.docs.data.WritersCanInvite):
entry.writers_can_invite = writers_can_invite
elif isinstance(writers_can_invite, bool):
entry.writers_can_invite = gdata.docs.data.WritersCanInvite(
value=str(writers_can_invite).lower())
uri = DOCLIST_FEED_URI
if folder_or_id is not None:
if isinstance(folder_or_id, gdata.docs.data.DocsEntry):
# Verify that we're uploading the resource into to a folder.
if folder_or_id.get_document_type() == gdata.docs.data.FOLDER_LABEL:
uri = folder_or_id.content.src
else:
raise gdata.client.Error, 'Trying to upload item to a non-folder.'
else:
uri = FOLDERS_FEED_TEMPLATE % folder_or_id
return self.post(entry, uri, auth_token=auth_token, **kwargs)
Create = create
def copy(self, source_entry, title, auth_token=None, **kwargs):
"""Copies a native Google document, spreadsheet, or presentation.
Note: arbitrary file types and PDFs do not support this feature.
Args:
source_entry: gdata.docs.data.DocsEntry An object representing the source
document/folder.
title: str A title for the new document.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.post().
Returns:
A gdata.docs.data.DocsEntry of the duplicated document.
"""
entry = gdata.docs.data.DocsEntry(
title=atom.data.Title(text=title),
id=atom.data.Id(text=source_entry.GetSelfLink().href))
return self.post(entry, DOCLIST_FEED_URI, auth_token=auth_token, **kwargs)
Copy = copy
def move(self, source_entry, folder_entry=None,
keep_in_folders=False, auth_token=None, **kwargs):
"""Moves an item into a different folder (or to the root document list).
Args:
source_entry: gdata.docs.data.DocsEntry An object representing the source
document/folder.
folder_entry: gdata.docs.data.DocsEntry (optional) An object representing
the destination folder. If None, set keep_in_folders to
True to remove the item from all parent folders.
keep_in_folders: boolean (optional) If True, the source entry
is not removed from any existing parent folders it is in.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.post().
Returns:
A gdata.docs.data.DocsEntry of the moved entry or True if just moving the
item out of all folders (e.g. Move(source_entry)).
"""
entry = gdata.docs.data.DocsEntry(id=source_entry.id)
# Remove the item from any folders it is already in.
if not keep_in_folders:
for folder in source_entry.InFolders():
self.delete(
'%s/contents/%s' % (folder.href, source_entry.resource_id.text),
force=True)
# If we're moving the resource into a folder, verify it is a folder entry.
if folder_entry is not None:
if folder_entry.get_document_type() == gdata.docs.data.FOLDER_LABEL:
return self.post(entry, folder_entry.content.src,
auth_token=auth_token, **kwargs)
else:
raise gdata.client.Error, 'Trying to move item into a non-folder.'
return True
Move = move
def upload(self, media, title, folder_or_uri=None, content_type=None,
auth_token=None, **kwargs):
"""Uploads a file to Google Docs.
Args:
media: A gdata.data.MediaSource object containing the file to be
uploaded or a string of the filepath.
title: str The title of the document on the server after being
uploaded.
folder_or_uri: gdata.docs.data.DocsEntry or str (optional) An object with
a link to the folder or the uri to upload the file to.
Note: A valid uri for a folder is of the form:
/feeds/default/private/full/folder%3Afolder_id/contents
content_type: str (optional) The file's mimetype. If not provided, the
one in the media source object is used or the mimetype is inferred
from the filename (if media is a string). When media is a filename,
it is always recommended to pass in a content type.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.post().
Returns:
A gdata.docs.data.DocsEntry containing information about uploaded doc.
"""
uri = None
if folder_or_uri is not None:
if isinstance(folder_or_uri, gdata.docs.data.DocsEntry):
# Verify that we're uploading the resource into to a folder.
if folder_or_uri.get_document_type() == gdata.docs.data.FOLDER_LABEL:
uri = folder_or_uri.content.src
else:
raise gdata.client.Error, 'Trying to upload item to a non-folder.'
else:
uri = folder_or_uri
else:
uri = DOCLIST_FEED_URI
# Create media source if media is a filepath.
if isinstance(media, (str, unicode)):
mimetype = mimetypes.guess_type(media)[0]
if mimetype is None and content_type is None:
raise ValueError, ("Unknown mimetype. Please pass in the file's "
"content_type")
else:
media = gdata.data.MediaSource(file_path=media,
content_type=content_type)
entry = gdata.docs.data.DocsEntry(title=atom.data.Title(text=title))
return self.post(entry, uri, media_source=media,
desired_class=gdata.docs.data.DocsEntry,
auth_token=auth_token, **kwargs)
Upload = upload
def download(self, entry_or_id_or_url, file_path, extra_params=None,
auth_token=None, **kwargs):
"""Downloads a file from the Document List to local disk.
Note: to download a file in memory, use the GetFileContent() method.
Args:
entry_or_id_or_url: gdata.docs.data.DocsEntry or string representing a
resource id or URL to download the document from (such as the content
src link).
file_path: str The full path to save the file to.
extra_params: dict (optional) A map of any further parameters to control
how the document is downloaded/exported. For example, exporting a
spreadsheet as a .csv: extra_params={'gid': 0, 'exportFormat': 'csv'}
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self._download_file().
Raises:
gdata.client.RequestError if the download URL is malformed or the server's
response was not successful.
ValueError if entry_or_id_or_url was a resource id for a filetype
in which the download link cannot be manually constructed (e.g. pdf).
"""
if isinstance(entry_or_id_or_url, gdata.docs.data.DocsEntry):
url = entry_or_id_or_url.content.src
else:
if gdata.docs.data.RESOURCE_ID_PATTERN.match(entry_or_id_or_url):
url = gdata.docs.data.make_content_link_from_resource_id(
entry_or_id_or_url)
else:
url = entry_or_id_or_url
if extra_params is not None:
if 'exportFormat' in extra_params and url.find('/Export?') == -1:
raise gdata.client.Error, ('This entry type cannot be exported '
'as a different format.')
if 'gid' in extra_params and url.find('spreadsheets') == -1:
raise gdata.client.Error, 'gid param is not valid for this doc type.'
url += '&' + urllib.urlencode(extra_params)
self._download_file(url, file_path, auth_token=auth_token, **kwargs)
Download = download
def export(self, entry_or_id_or_url, file_path, gid=None, auth_token=None,
**kwargs):
"""Exports a document from the Document List in a different format.
Args:
entry_or_id_or_url: gdata.docs.data.DocsEntry or string representing a
resource id or URL to download the document from (such as the content
src link).
file_path: str The full path to save the file to. The export
format is inferred from the the file extension.
gid: str (optional) grid id for downloading a single grid of a
spreadsheet. The param should only be used for .csv and .tsv
spreadsheet exports.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.download().
Raises:
gdata.client.RequestError if the download URL is malformed or the server's
response was not successful.
"""
extra_params = {}
match = gdata.docs.data.FILE_EXT_PATTERN.match(file_path)
if match:
extra_params['exportFormat'] = match.group(1)
if gid is not None:
extra_params['gid'] = gid
self.download(entry_or_id_or_url, file_path, extra_params,
auth_token=auth_token, **kwargs)
Export = export
class DocsQuery(gdata.client.Query):
def __init__(self, title=None, title_exact=None, opened_min=None,
opened_max=None, edited_min=None, edited_max=None, owner=None,
writer=None, reader=None, show_folders=None,
show_deleted=None, ocr=None, target_language=None,
source_language=None, convert=None, **kwargs):
"""Constructs a query URL for the Google Documents List API.
Args:
title: str (optional) Specifies the search terms for the title of a
document. This parameter used without title_exact will only
submit partial queries, not exact queries.
title_exact: str (optional) Meaningless without title. Possible values
are 'true' and 'false'. Note: Matches are case-insensitive.
opened_min: str (optional) Lower bound on the last time a document was
opened by the current user. Use the RFC 3339 timestamp
format. For example: opened_min='2005-08-09T09:57:00-08:00'.
opened_max: str (optional) Upper bound on the last time a document was
opened by the current user. (See also opened_min.)
edited_min: str (optional) Lower bound on the last time a document was
edited by the current user. This value corresponds to the
edited.text value in the doc's entry object, which
represents changes to the document's content or metadata.
Use the RFC 3339 timestamp format. For example:
edited_min='2005-08-09T09:57:00-08:00'
edited_max: str (optional) Upper bound on the last time a document was
edited by the user. (See also edited_min.)
owner: str (optional) Searches for documents with a specific owner. Use
the email address of the owner. For example:
owner='user@gmail.com'
writer: str (optional) Searches for documents which can be written to
by specific users. Use a single email address or a comma
separated list of email addresses. For example:
writer='user1@gmail.com,user@example.com'
reader: str (optional) Searches for documents which can be read by
specific users. (See also writer.)
show_folders: str (optional) Specifies whether the query should return
folders as well as documents. Possible values are 'true'
and 'false'. Default is false.
show_deleted: str (optional) Specifies whether the query should return
documents which are in the trash as well as other
documents. Possible values are 'true' and 'false'.
Default is false.
ocr: str (optional) Specifies whether to attempt OCR on a .jpg, .png, or
.gif upload. Possible values are 'true' and 'false'. Default is
false. See OCR in the Protocol Guide:
http://code.google.com/apis/documents/docs/3.0/developers_guide_protocol.html#OCR
target_language: str (optional) Specifies the language to translate a
document into. See Document Translation in the Protocol
Guide for a table of possible values:
http://code.google.com/apis/documents/docs/3.0/developers_guide_protocol.html#DocumentTranslation
source_language: str (optional) Specifies the source language of the
original document. Optional when using the translation
service. If not provided, Google will attempt to
auto-detect the source language. See Document
Translation in the Protocol Guide for a table of
possible values (link in target_language).
convert: str (optional) Used when uploading arbitrary file types to
specity if document-type uploads should convert to a native
Google Docs format. Possible values are 'true' and 'false'.
The default is 'true'.
"""
gdata.client.Query.__init__(self, **kwargs)
self.convert = convert
self.title = title
self.title_exact = title_exact
self.opened_min = opened_min
self.opened_max = opened_max
self.edited_min = edited_min
self.edited_max = edited_max
self.owner = owner
self.writer = writer
self.reader = reader
self.show_folders = show_folders
self.show_deleted = show_deleted
self.ocr = ocr
self.target_language = target_language
self.source_language = source_language
def modify_request(self, http_request):
gdata.client._add_query_param('convert', self.convert, http_request)
gdata.client._add_query_param('title', self.title, http_request)
gdata.client._add_query_param('title-exact', self.title_exact,
http_request)
gdata.client._add_query_param('opened-min', self.opened_min, http_request)
gdata.client._add_query_param('opened-max', self.opened_max, http_request)
gdata.client._add_query_param('edited-min', self.edited_min, http_request)
gdata.client._add_query_param('edited-max', self.edited_max, http_request)
gdata.client._add_query_param('owner', self.owner, http_request)
gdata.client._add_query_param('writer', self.writer, http_request)
gdata.client._add_query_param('reader', self.reader, http_request)
gdata.client._add_query_param('showfolders', self.show_folders,
http_request)
gdata.client._add_query_param('showdeleted', self.show_deleted,
http_request)
gdata.client._add_query_param('ocr', self.ocr, http_request)
gdata.client._add_query_param('targetLanguage', self.target_language,
http_request)
gdata.client._add_query_param('sourceLanguage', self.source_language,
http_request)
gdata.client.Query.modify_request(self, http_request)
ModifyRequest = modify_request
| |
# Copyright 2015 Alan Vezina. All rights reserved.
import argparse
from csv import DictReader
import json
import time
from functools import reduce
from email_hunter import EmailHunterClient
THROTTLE = 0.2
def reduce_sources(sources):
def reducer(value, element):
value.append(element['uri'])
return value
return ';'.join(reduce(reducer, sources, []))
def validate_search_file(reader: DictReader):
field_names = reader.fieldnames
if 'domain' not in field_names:
print('domain column is required')
return False
return True
def validate_generate_file(reader: DictReader):
valid = True
field_names = reader.fieldnames
if 'domain' not in field_names:
print('domain column is required')
if 'first_name' not in field_names:
print('first_name column is required')
if 'last_name' not in field_names:
print('last_name column is required')
return valid
def validate_exist_file(reader: DictReader):
field_names = reader.fieldnames
if 'email' not in field_names:
print('email column is required')
return False
return True
def search(client: EmailHunterClient, domain, offset, type_, print_header=True, is_file_output=False):
if is_file_output:
header = 'domain,email,type,sources'
line_format = '{},{},{},{}'
else:
header = 'Domain\tEmail\tType\tSources'
line_format = '{}\t{}\t{}\t{}'
try:
emails = client.search(domain, offset, type_)
except Exception as e:
print('Error during search request: {}'.format(e))
else:
for data in emails:
email = data['value']
type_ = data['type']
sources = reduce_sources(data['sources'])
if print_header:
print(header)
print_header = False
print(line_format.format(domain, email, type_, sources))
def generate(client: EmailHunterClient, domain, first_name, last_name, print_header=True, is_file_output=False):
try:
email, score = client.generate(domain, first_name, last_name)
except Exception as e:
print('Error during request: {}'.format(e))
else:
if is_file_output:
if print_header:
print('domain,first_name,last_name,email,score')
print('{},{},{},{},{}'.format(domain, first_name, last_name, email, score))
else:
print('Domain:\t{}'.format(domain))
print('First Name:\t{}'.format(first_name))
print('Last Name:\t{}'.format(last_name))
print('Email:\t{}'.format(email))
print('Score:\t{}'.format(score))
def exist(client: EmailHunterClient, email, print_header=True, is_file_output=False):
try:
exist_, sources = client.exist(email)
except Exception as e:
print('Error during exist request: {}'.format(e))
else:
if is_file_output:
if print_header:
print('email,exist,sources')
sources = reduce_sources(sources)
print('{},{},{}'.format(email, exist_, sources))
else:
print('Email:\t{}'.format(email))
print('Exist:\t{}'.format(exist_))
print('Sources:\t{}'.format(json.dumps(sources, indent=2)))
def handle_search_file(client: EmailHunterClient, reader: DictReader):
if not validate_search_file(reader):
return
print_header = True
for line in reader:
domain = line['domain'].strip()
offset = line.get('offset', 0)
type_ = line.get('type')
search(client, domain, offset, type_, print_header=print_header, is_file_output=True)
print_header = False
time.sleep(THROTTLE)
def handle_generate_file(client: EmailHunterClient, reader: DictReader):
if not validate_generate_file(reader):
return
print_header = True
for line in reader:
domain = line['domain'].strip()
first_name = line['first_name'].strip()
last_name = line['last_name'].strip()
generate(client, domain, first_name, last_name, print_header=print_header, is_file_output=True)
print_header = False
time.sleep(THROTTLE)
def handle_exist_file(client: EmailHunterClient, reader: DictReader):
if not validate_exist_file(reader):
return
print_header = True
for line in reader:
email = line['email']
exist(client, email, print_header=print_header, is_file_output=True)
print_header = False
time.sleep(THROTTLE)
def handle_cli(command, api_key, domain=None, offset=0, type=None, first_name=None, last_name=None, email=None,
file=None):
client = EmailHunterClient(api_key)
reader = None
if file is not None:
file = open(file)
reader = DictReader(file)
if command == 'search':
if file:
handle_search_file(client, reader)
elif domain:
print('Searching {} for emails'.format(domain))
if offset:
print('Offset: {}'.format(offset))
if type:
print('Type: {}'.format(type))
search(client, domain, offset, type)
else:
print('domain is required when using the generate command')
elif command == 'generate':
if file:
handle_generate_file(client, reader)
else:
valid = True
if not domain:
print('domain is required when using the generate command')
if not first_name:
print('first_name is required when using the generate command')
if not last_name:
print('last_name is required when using the generate command')
if valid:
print('Finding email for {}, {}, {}'.format(domain, first_name, last_name))
generate(client, domain, first_name, last_name)
elif command == 'exist':
if file:
handle_exist_file(client, reader)
elif email:
print('Checking if {} exists'.format(email))
exist(client, email)
else:
print('email is required when using the exist command')
else:
print('Invalid command {}'.format(command))
if file:
file.close()
def main():
"""
TODO: parse args here
:return:
"""
parser = argparse.ArgumentParser(description='Email Hunter CLI')
parser.add_argument('command', help='The API command to run. Choices: search, exist, or generate')
parser.add_argument('api_key', help='The API key for your account')
parser.add_argument('--domain', help='Required for search and generate commands')
parser.add_argument('--offset', help='Optional, used with search command.')
parser.add_argument('--type', help='Optional, used with search command')
parser.add_argument('--first_name', help='Required for generate command')
parser.add_argument('--last_name', help='Required for generate command')
parser.add_argument('--email', help='Required for exist command')
file_help = 'Path to a CSV to be used with the specified command. CSV must have a column for each argument used'
parser.add_argument('--file', help=file_help)
args = parser.parse_args()
handle_cli(**vars(args))
if __name__ == '__main__':
main()
| |
# LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
For more information, please consult the GeoDjango documentation:
http://geodjango.org/docs/layermapping.html
"""
import sys
from datetime import date, datetime
from decimal import Decimal
from django.core.exceptions import ObjectDoesNotExist
from django.db import connections, DEFAULT_DB_ALIAS
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.gdal import CoordTransform, DataSource, \
OGRException, OGRGeometry, OGRGeomType, SpatialReference
from django.contrib.gis.gdal.field import \
OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime
from django.db import models, transaction
from django.contrib.localflavor.us.models import USStateField
# LayerMapping exceptions.
class LayerMapError(Exception): pass
class InvalidString(LayerMapError): pass
class InvalidDecimal(LayerMapError): pass
class InvalidInteger(LayerMapError): pass
class MissingForeignKey(LayerMapError): pass
class LayerMapping(object):
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1 : OGRGeomType('MultiPoint'),
2 : OGRGeomType('MultiLineString'),
3 : OGRGeomType('MultiPolygon'),
OGRGeomType('Point25D').num : OGRGeomType('MultiPoint25D'),
OGRGeomType('LineString25D').num : OGRGeomType('MultiLineString25D'),
OGRGeomType('Polygon25D').num : OGRGeomType('MultiPolygon25D'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField : OFTInteger,
models.IntegerField : (OFTInteger, OFTReal, OFTString),
models.FloatField : (OFTInteger, OFTReal),
models.DateField : OFTDate,
models.DateTimeField : OFTDateTime,
models.EmailField : OFTString,
models.TimeField : OFTTime,
models.DecimalField : (OFTInteger, OFTReal),
models.CharField : OFTString,
models.SlugField : OFTString,
models.TextField : OFTString,
models.URLField : OFTString,
USStateField : OFTString,
models.BigIntegerField : (OFTInteger, OFTReal, OFTString),
models.SmallIntegerField : (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField : (OFTInteger, OFTReal, OFTString),
}
# The acceptable transaction modes.
TRANSACTION_MODES = {'autocommit' : transaction.autocommit,
'commit_on_success' : transaction.commit_on_success,
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding=None,
transaction_mode='commit_on_success',
transform=True, unique=None, using=DEFAULT_DB_ALIAS):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, basestring):
self.ds = DataSource(data)
else:
self.ds = data
self.layer = self.ds[layer]
self.using = using
self.spatial_backend = connections[using].ops
# Setting the mapping & model attributes.
self.mapping = mapping
self.model = model
# Checking the layer -- intitialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Getting the geometry column associated with the model (an
# exception will be raised if there is no geometry column).
if self.spatial_backend.mysql:
transform = False
else:
self.geo_field = self.geometry_field()
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
if transaction_mode in self.TRANSACTION_MODES:
self.transaction_decorator = self.TRANSACTION_MODES[transaction_mode]
self.transaction_mode = transaction_mode
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
if using is None:
pass
#### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"This checks the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
This checks the Layer metadata, and ensures that it is compatible
with the mapping information and model. Unlike previous revisions,
there is no need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model. However, this
# depends on the GDAL Driver in use.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except models.fields.FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
# Getting the coordinate dimension of the geometry field.
coord_dim = model_field.dim
try:
if coord_dim == 3:
gtype = OGRGeomType(ogr_name + '25D')
else:
gtype = OGRGeomType(ogr_name)
except OGRException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s%s, '
'layer geometry type is %s.' %
(fld_name, (coord_dim == 3 and '(dim=3)') or '', ltype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry. Also setting the coordinate dimension
# attribute.
self.geom_field = field_name
self.coord_dim = coord_dim
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.rel.to
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_field = rel_model._meta.get_field(rel_name)
except models.fields.FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if not model_field.__class__ in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Checks the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()):
sr = source_srs.srs
elif isinstance(source_srs, (int, basestring)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Checks the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if not attr in self.mapping: raise ValueError
elif isinstance(unique, basestring):
# Only a single field passed in.
if unique not in self.mapping: raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
#### Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, this will return a dictionary of keyword arguments
for constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
try:
val = self.verify_geom(feat.geom, model_field)
except OGRException:
raise LayerMapError('Could not retrieve geometry from feature.')
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`) this routine
will construct and return the uniqueness keyword arguments -- a subset
of the feature kwargs.
"""
if isinstance(self.unique, basestring):
return {self.unique : kwargs[self.unique]}
else:
return dict((fld, kwargs[fld]) for fld in self.unique)
#### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verifies if the OGR Field contents are acceptable to the Django
model field. If they are, the verified value is returned,
otherwise the proper exception is raised.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = unicode(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal('A DecimalField with max_digits %d, decimal_places %d must round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec))
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
this routine will retrieve the related model for the ForeignKey
mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey('No ForeignKey %s model found with keyword arguments: %s' % (rel_model.__name__, fk_kwargs))
def verify_geom(self, geom, model_field):
"""
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
# Downgrade a 3D geom to a 2D one, if necessary.
if self.coord_dim != geom.coord_dim:
geom.coord_dim = self.coord_dim
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform: g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
#### Other model methods ####
def coord_transform(self):
"Returns the coordinate transformation object."
SpatialRefSys = self.spatial_backend.spatial_ref_sys()
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.get(srid=self.geo_field.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception, msg:
raise LayerMapError('Could not translate between the data source and model geometry: %s' % msg)
def geometry_field(self):
"Returns the GeometryField instance associated with the geographic column."
# Use the `get_field_by_name` on the model's options so that we
# get the correct field instance if there's model inheritance.
opts = self.model._meta
fld, model, direct, m2m = opts.get_field_by_name(self.geom_field)
return fld
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Saves the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and sucessfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
# Defining the 'real' save method, utilizing the transaction
# decorator created during initialization.
@self.transaction_decorator
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError, msg:
# Something borked the validation
if strict: raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new: geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save(using=self.using)
num_saved += 1
if verbose: stream.write('%s: %s\n' % (is_update and 'Updated' or 'Saved', m))
except SystemExit:
raise
except Exception, msg:
if self.transaction_mode == 'autocommit':
# Rolling back the transaction so that other model saves
# will work.
transaction.rollback_unless_managed()
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write('Failed to save the feature (id: %s) into the model with the keyword arguments:\n' % feat.fid)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i+1 == n_i: step_slice = slice(beg, None)
else: step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except:
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
| |
from JumpScale import j
import random
from JumpScale.tools.cuisine.CuisinePackage import LOCK_NAME, LOCK_TIMEOUT
from JumpScale.sal.fs.SystemFS import FileLock
app = j.tools.cuisine._getBaseAppClass()
class CuisineDockerCompose(app):
NAME = "docker-compose"
def __init__(self, executor, cuisine):
self._executor = executor
self._cuisine = cuisine
def _init(self):
try:
self._cuisine.core.run("service docker start")
except Exception as e:
if 'cgroup is already mounted' in e.__str__():
return
raise e
def install(self, reset=False):
if reset is False and self.isInstalled():
return
if self._cuisine.core.isUbuntu:
self._cuisine.bash.environSet('LC_ALL', 'C.UTF-8')
self._cuisine.bash.environSet('LANG', 'C.UTF-8')
if not self._cuisine.core.command_check('docker'):
C = """
wget -qO- https://get.docker.com/ | sh
"""
with FileLock(LOCK_NAME, locktimeout=LOCK_TIMEOUT):
self._cuisine.core.run(C)
if not self._cuisine.core.command_check('docker-compose'):
C = """
curl -L https://github.com/docker/compose/releases/download/1.8.0-rc1/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
"""
with FileLock(LOCK_NAME, locktimeout=LOCK_TIMEOUT):
self._cuisine.core.run(C)
if self._cuisine.core.isArch:
self._cuisine.package.install("docker")
self._cuisine.package.install("docker-compose")
self._init()
def ubuntuBuild(self, push=False):
self._init()
dest = self._cuisine.development.git.pullRepo('https://github.com/Jumpscale/dockers.git', ssh=False)
path = self._cuisine.core.joinpaths(dest, 'js8/x86_64/01_ubuntu1604')
C = """
set -ex
cd %s
docker build -t jumpscale/ubuntu1604 --no-cache .
""" % path
self._cuisine.core.execute_bash(C)
if push:
C = """
set -ex
cd %s
docker push jumpscale/ubuntu1604
""" % path
self._cuisine.core.execute_bash(C)
def resetPasswd(self, dockerCuisineObject):
# change passwd
dockerCuisineObject.user.passwd("root", j.data.idgenerator.generateGUID())
def dockerStart(self, name="ubuntu1", image='jumpscale/ubuntu1604_all',
ports='', volumes=None, pubkey=None, weave=False, ssh=True, weavePeer=None):
"""
will return dockerCuisineObj: is again a cuisine obj on which all kinds of actions can be executed
@param ports e.g. 2022,2023
@param volumes e.g. format: "/var/insidemachine:/var/inhost # /var/1:/var/1
@param ports e.g. format "22:8022 80:8080" the first arg e.g. 22 is the port in the container
@param weave If weave is available on node, weave will be used by default. To make sure weave is available, set to True
"""
if weave:
self._cuisine.systemservices.weave.install(start=True, peer=weavePeer)
self._init()
if ssh and not '22:' in ports:
port = "2202"
while port in ports:
port = random.randint(1000, 9999)
ports += '22:%s' % port
cmd = "jsdocker create --name {name} --image {image}".format(name=name, image=image)
if pubkey:
cmd += " --pubkey '%s'" % pubkey
if ports:
cmd += " --ports '%s'" % ports
if volumes:
cmd += " --volumes '%s'" % volumes
# if aydofs:
# cmd += " --aysfs"
self._cuisine.core.run(cmd, profile=True)
cmd = "jsdocker list --name {name} --parsable".format(name=name)
_, out, _ = self._cuisine.core.run(cmd, profile=True)
info = j.data.serializer.json.loads(out)
port = info[0]["port"]
_, out, _ = self._cuisine.core.run("docker inspect ahah | grep \"IPAddress\"| cut -d '\"' -f 4 ")
host = out.splitlines()[0]
dockerexecutor = Cuisinedockerobj(name, host, "22", self._cuisine)
cuisinedockerobj = j.tools.cuisine.get(dockerexecutor)
# NEED TO MAKE SURE WE CAN GET ACCESS TO THIS DOCKER WITHOUT OPENING PORTS; we know can using docker exec
# ON DOCKER HOST (which is current cuisine)
return cuisinedockerobj
def getDocker(self, name):
pass
class Cuisinedockerobj:
def __init__(self, name, addr, port, cuisineDockerHost):
self.id = 'docker:%s:%s' % (cuisineDockerHost.id, name)
self.addr = addr
self.port = port
self.name = name
self.login = "root"
self._cuisineDockerHost = cuisineDockerHost
self._cuisine = None
def execute(self, cmds, die=True, checkok=None, async=False, showout=True, timeout=0, env={}):
return self._cuisineDockerHost.core.run("docker exec %s bash -c '%s'" % (self.name, cmds.replace("'", "'\"'\"'")),
die=die, checkok=checkok, showout=showout, env=env)
executeRaw = execute
@property
def cuisine(self):
if not self._cuisine:
return j.tools.cuisine.get(self)
return self._cuisine
# def archBuild(self):
# C = """
# FROM base/archlinux:latest
#
# def archBuild(self):
# C = """
# FROM base/archlinux:latest
#
# MAINTAINER "Matthias Adler" <macedigital@gmail.com> / kristof de spiegeleer
#
# RUN pacman -S --debug --noconfirm archlinux-keyring
#
# RUN pacman -S --needed --noconfirm git iproute2 iputils procps-ng tar which licenses util-linux
# RUN pacman -S --noconfirm curl wget ssh mc
#
#
# # remove unneeded pkgs, update and clean cache
# # RUN pacman -Rss --noconfirm cronie device-mapper dhcpcd diffutils file nano vi texinfo usbutils gcc pinentry; \
#
# # RUN pacman -Syu --force --noconfirm; pacman -Scc --noconfirm
#
# # remove man pages and locale data
# RUN rm -rf /archlinux/usr/share/locale && rm -rf /archlinux/usr/share/man
#
# # clean unneeded services
# RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \
# rm -f /lib/systemd/system/multi-user.target.wants/*;\
# rm -f /lib/systemd/system/graphical.target.wants/*; \
# rm -f /etc/systemd/system/*.wants/*;\
# rm -f /lib/systemd/system/local-fs.target.wants/*; \
# rm -f /lib/systemd/system/sockets.target.wants/*udev*; \
# rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \
# rm -f /lib/systemd/system/basic.target.wants/*;\
# rm -f /lib/systemd/system/anaconda.target.wants/*;
#
# # switch default target from graphical to multi-user
# RUN systemctl set-default multi-user.target
#
# # systemd inside a container
# ENV container docker
# VOLUME [ "/sys/fs/cgroup" ]
#
# CMD ["/usr/sbin/init"]
#
# """
# self._cuisine.core.run("rm -rf $tmpDir/docker;mkdir $tmpDir/docker")
# self._cuisine.core.file_write("$tmpDir/docker/Dockerfile", C)
#
# C = """
# set -ex
# cd $tmpDir/docker
# docker build -t arch .
# """
# self._cuisine.core.execute_bash(C)
| |
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2012-2013 Esteban Tovagliari, Jupiter Jazz Limited
# Copyright (c) 2014-2015 Esteban Tovagliari, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import math
import signal
import sys
import time
import threading
import appleseed as asr
def build_project():
# Create an empty project.
project = asr.Project('test project')
paths = project.get_search_paths()
paths.append('data')
project.set_search_paths(paths)
# Add default configurations to the project.
project.add_default_configurations()
# Set the number of samples. This is basically the quality parameter: the higher the number
# of samples, the smoother the image but the longer the rendering time.
# todo: fix.
conf = project.configurations()['final']
params = {'uniform_pixel_renderer' : {'samples' : 25}}
# Create a scene.
scene = asr.Scene()
# Create an assembly.
assembly = asr.Assembly("assembly")
#------------------------------------------------------------------------
# Materials
#------------------------------------------------------------------------
# Create a color called "gray" and insert it into the assembly.
GrayReflectance = [0.5, 0.5, 0.5]
assembly.colors().insert(asr.ColorEntity("gray", { 'color_space' : 'srgb' }, GrayReflectance))
# Create a BRDF called "diffuse_gray_brdf" and insert it into the assembly.
assembly.bsdfs().insert(asr.BSDF("lambertian_brdf", "diffuse_gray_brdf", { 'reflectance' : 'gray' }))
# Create a physical surface shader and insert it into the assembly.
assembly.surface_shaders().insert(asr.SurfaceShader("physical_surface_shader", "physical_surface_shader"))
# Create a material called "gray_material" and insert it into the assembly.
assembly.materials().insert(asr.Material("gray_material", { "surface_shader" : "physical_surface_shader",
"bsdf" : "diffuse_gray_brdf" }))
#------------------------------------------------------------------------
# Geometry
#------------------------------------------------------------------------
# Load the scene geometry from disk.
objects = asr.MeshObjectReader.read(project.get_search_paths(), "cube", { 'filename' : 'scene.obj' })
# Insert all the objects into the assembly.
for object in objects:
# Create an instance of this object and insert it into the assembly.
instance_name = object.get_name() + "_inst"
material_names = { "default" : "gray_material", "default2" : "gray_material" }
instance = asr.ObjectInstance(instance_name, {}, object.get_name(), asr.Transformd(asr.Matrix4d.identity()), material_names)
assembly.object_instances().insert(instance)
# Insert this object into the scene.
assembly.objects().insert(object)
#------------------------------------------------------------------------
# Light
#------------------------------------------------------------------------
# Create a color called "light_intensity" and insert it into the assembly.
LightRadiance = [1.0, 1.0, 1.0]
assembly.colors().insert(asr.ColorEntity("light_intensity", { 'color_space' : 'srgb', 'multiplier' : 30.0 }, LightRadiance))
# Create a point light called "light" and insert it into the assembly.
light = asr.Light("point_light", "light", { 'intensity' : 'light_intensity' })
light.set_transform(asr.Transformd(asr.Matrix4d.translation(asr.Vector3d(0.6, 2.0, 1.0))))
assembly.lights().insert(light)
# Create an instance of the assembly and insert it into the scene.
assembly_inst = asr.AssemblyInstance("assembly_inst", {}, assembly.get_name())
assembly_inst.transform_sequence().set_transform(0.0, asr.Transformd(asr.Matrix4d.identity()))
scene.assembly_instances().insert(assembly_inst)
# Insert the assembly into the scene.
scene.assemblies().insert(assembly)
#------------------------------------------------------------------------
# Environment
#------------------------------------------------------------------------
# Create a color called "sky_radiance" and insert it into the scene.
SkyRadiance = [0.75, 0.80, 1.0]
scene.colors().insert(asr.ColorEntity("sky_radiance", { 'color_space' : 'srgb', 'multiplier' : 0.5 }, SkyRadiance))
# Create an environment EDF called "sky_edf" and insert it into the scene.
scene.environment_edfs().insert(asr.EnvironmentEDF("constant_environment_edf", "sky_edf", { 'radiance' : 'sky_radiance' }))
# Create an environment shader called "sky_shader" and insert it into the scene.
scene.environment_shaders().insert(asr.EnvironmentShader("edf_environment_shader", "sky_shader", { 'environment_edf' : 'sky_edf' }))
# Create an environment called "sky" and bind it to the scene.
scene.set_environment(asr.Environment("sky", { "environment_edf" : "sky_edf", "environment_shader" : "sky_shader" }))
#------------------------------------------------------------------------
# Camera
#------------------------------------------------------------------------
# Create a pinhole camera with film dimensions 0.980 x 0.735 in (24.892 x 18.669 mm).
params = { 'film_dimensions' : asr.Vector2f(0.024892, 0.018669), 'focal_length' : 0.035 }
camera = asr.Camera("pinhole_camera", "camera", params)
# Place and orient the camera. By default cameras are located in (0.0, 0.0, 0.0)
# and are looking toward Z- (0.0, 0.0, -1.0).
mat = asr.Matrix4d.rotation(asr.Vector3d(1.0, 0.0, 0.0), math.radians(-20.0))
mat = mat * asr.Matrix4d.translation(asr.Vector3d(0.0, 0.8, 11.0))
camera.transform_sequence().set_transform(0.0, asr.Transformd(mat))
# Bind the camera to the scene.
scene.set_camera(camera)
#------------------------------------------------------------------------
# Frame
#------------------------------------------------------------------------
# Create a frame and bind it to the project.
params = { 'camera' : scene.get_camera().get_name(),
'resolution' : asr.Vector2i(640, 480),
'color_space' : 'srgb' }
project.set_frame(asr.Frame("beauty", params))
# Bind the scene to the project.
project.set_scene(scene)
return project
class RendererController(asr.IRendererController):
def __init__(self):
super(RendererController, self).__init__()
self.__abort = False
def abort_rendering(self):
sys.stdout.write("Aborting rendering\n")
sys.stdout.flush()
self.__abort = True
# This method is called before rendering begins.
def on_rendering_begin(self):
pass
# This method is called after rendering has succeeded.
def on_rendering_success(self):
pass
# This method is called after rendering was aborted.
def on_rendering_abort(self):
pass
# This method is called before rendering a single frame.
def on_frame_begin(self):
pass
# This method is called after rendering a single frame.
def on_frame_end(self):
pass
# This method is called continuously during rendering.
def on_progress(self):
pass
# Return the current rendering status.
def get_status(self):
if self.__abort:
return asr.IRenderControllerStatus.AbortRendering
else:
return asr.IRenderControllerStatus.ContinueRendering
class TileCallback(asr.ITileCallback):
def __init__(self):
super(TileCallback, self).__init__()
# This method is called before a region is rendered.
def pre_render(self, x, y, width, height):
pass
# This method is called after a tile is rendered.
def post_render_tile(self, frame, tile_x, tile_y):
sys.stdout.write('.')
# This method is called after a whole frame is rendered.
def post_render(self, frame):
pass
class RenderThread(threading.Thread):
def __init__(self, renderer):
super(RenderThread, self).__init__()
self.__renderer = renderer
def run(self):
self.__renderer.render()
RENDER_ON_THREAD = True
def main():
# Create a log target that outputs to stderr, and binds it to the renderer's global logger.
# Eventually you will want to redirect log messages to your own target.
# For this you will need to subclass appleseed.ILogTarget.
log_target = asr.ConsoleLogTarget(sys.stderr)
# It is important to keep log_target alive, as the global logger does not
# take ownership of it. In this example, we do that by removing the log target
# when no longer needed, at the end of this function.
asr.global_logger().add_target(log_target)
# Build the project.
project = build_project()
# Create the master renderer.
renderer_controller = RendererController()
# Catch Control-C.
signal.signal(signal.SIGINT, lambda signal, frame: renderer_controller.abort_rendering())
tile_callback = TileCallback()
renderer = asr.MasterRenderer(project,
project.configurations()['final'].get_inherited_parameters(),
renderer_controller,
tile_callback)
# Render the frame.
if RENDER_ON_THREAD:
render_thread = RenderThread(renderer)
render_thread.start()
while render_thread.isAlive():
render_thread.join(0.5) # seconds
else:
renderer.render()
# Save the frame to disk.
project.get_frame().write_main_image("output/test.png")
# Save the project to disk.
asr.ProjectFileWriter().write(project, "output/test.appleseed")
# Remove the log target we added previosly.
asr.global_logger().remove_target(log_target)
if __name__ == "__main__":
main()
| |
import datetime
import io
import re
from os import path
import json
from django.contrib.gis.geos import Point
from django.urls import reverse
from django.utils import timezone
from openpyxl import load_workbook
from rest_framework import status
from main.models import Dataset, Record
from main.tests.api import helpers
from main.tests.test_data_package import clone
from main.utils_species import NoSpeciesFacade
class TestPermissions(helpers.BaseUserTestCase):
"""
Test Permissions
Get: authenticated
Update: admin, custodians
Create: admin, custodians
Delete: admin, custodians
"""
species_facade_class = NoSpeciesFacade
@staticmethod
def schema_with_species_name():
schema_fields = [
{
"name": "Species Name",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
def _more_setup(self):
# set the HerbieFacade class
from main.api.views import SpeciesMixin
SpeciesMixin.species_facade_class = self.species_facade_class
project = self.project_1
client = self.data_engineer_1_client
schema = self.schema_with_species_name()
self.ds_1 = self._create_dataset_with_schema(project, client, schema,
dataset_type=Dataset.TYPE_SPECIES_OBSERVATION)
self.record_1 = self._create_default_record()
def _create_default_record(self):
ds = self.ds_1
client = self.custodian_1_client
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
ds.record_queryset.delete()
self.assertEqual(
client.post(url, data=payload, format='json').status_code,
status.HTTP_201_CREATED
)
return ds.record_queryset.first()
def test_get(self):
urls = [
reverse('api:record-list'),
reverse('api:record-detail', kwargs={'pk': self.record_1.pk})
]
access = {
"forbidden": [self.anonymous_client],
"allowed": [self.readonly_client, self.custodian_1_client, self.custodian_2_client, self.admin_client]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.get(url).status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
self.assertEqual(
client.get(url).status_code,
status.HTTP_200_OK
)
def test_create(self):
"""
Admin and custodians
:return:
"""
urls = [reverse('api:record-list')]
ds = self.ds_1
rec = self.record_1
data = {
"dataset": rec.dataset.pk,
"data": rec.data,
}
access = {
"forbidden": [self.anonymous_client, self.readonly_client, self.custodian_2_client],
"allowed": [self.admin_client, self.custodian_1_client]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.post(url, data, format='json').status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
count = ds.record_queryset.count()
self.assertEqual(
client.post(url, data, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), count + 1)
def test_bulk_create(self):
"""
Cannot create bulk with this end point
:return:
"""
urls = [reverse('api:record-list')]
rec = self.record_1
ds = self.ds_1
data = [
{
"dataset": rec.dataset.pk,
"data": rec.data
},
{
"dataset": rec.dataset.pk,
"data": rec.data
}
]
access = {
"forbidden": [self.anonymous_client, self.readonly_client, self.custodian_2_client,
self.admin_client, self.custodian_1_client],
"allowed": []
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.post(url, data, format='json').status_code,
[status.HTTP_400_BAD_REQUEST, status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
count = ds.record_queryset.count()
self.assertEqual(
client.post(url, data, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), count + len(data))
def test_update(self):
"""
admin + custodian of project for site 1
:return:
"""
rec = self.record_1
previous_data = clone(rec.data)
updated_data = clone(previous_data)
updated_data['Longitude'] = '118.78'
urls = [reverse('api:record-detail', kwargs={'pk': rec.pk})]
data = {
"data": updated_data,
}
access = {
"forbidden": [self.anonymous_client, self.readonly_client, self.custodian_2_client],
"allowed": [self.admin_client, self.custodian_1_client]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.patch(url, data, format='json').status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
rec.data = previous_data
rec.save()
self.assertEqual(
client.patch(url, data, format='json').status_code,
status.HTTP_200_OK
)
rec.refresh_from_db()
self.assertEqual(rec.data, updated_data)
def test_delete(self):
"""
Currently admin + custodian
:return:
"""
rec = self.record_1
urls = [reverse('api:record-detail', kwargs={'pk': rec.pk})]
data = None
access = {
"forbidden": [self.anonymous_client, self.readonly_client, self.custodian_2_client],
"allowed": [self.admin_client, self.custodian_1_client]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.delete(url, data, format='json').status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
rec.save()
count = Dataset.objects.count()
self.assertEqual(
client.delete(url, data, format='json').status_code,
status.HTTP_204_NO_CONTENT
)
self.assertTrue(Dataset.objects.count(), count - 1)
def test_options(self):
urls = [
reverse('api:record-list'),
reverse('api:record-detail', kwargs={'pk': 1})
]
access = {
"forbidden": [self.anonymous_client],
"allowed": [self.readonly_client, self.custodian_1_client, self.custodian_2_client, self.admin_client]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.options(url).status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
# authenticated
for client in access['allowed']:
for url in urls:
self.assertEqual(
client.options(url).status_code,
status.HTTP_200_OK
)
class TestDataValidation(helpers.BaseUserTestCase):
species_facade_class = NoSpeciesFacade
@staticmethod
def schema_with_species_name():
schema_fields = [
{
"name": "Species Name",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
def _more_setup(self):
# set the HerbieFacade class
from main.api.views import SpeciesMixin
SpeciesMixin.species_facade_class = self.species_facade_class
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_species_name()
self.ds_1 = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema,
dataset_type=Dataset.TYPE_SPECIES_OBSERVATION)
def _create_default_record(self):
ds = self.ds_1
client = self.custodian_1_client
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
ds.record_queryset.delete()
self.assertEqual(
client.post(url, data=payload, format='json').status_code,
status.HTTP_201_CREATED
)
return ds.record_queryset.first()
def test_create_one_happy_path(self):
"""
Test the create of one record
:return:
"""
ds = self.ds_1
client = self.custodian_1_client
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
ds.record_queryset.delete()
self.assertEqual(
client.post(url, payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), 1)
def test_empty_not_allowed(self):
ds = self.ds_1
client = self.custodian_1_client
payload = {
"dataset": ds.pk,
"data": {}
}
url = reverse('api:record-list')
count = ds.record_queryset.count()
self.assertEqual(
client.post(url, payload, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), count)
def test_create_column_not_in_schema(self):
"""
Test that if we introduce a column not in the schema it will not validate in strict mode
"""
ds = self.ds_1
client = self.custodian_1_client
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31',
'Extra Column': 'Extra Value'
}
payload = {
"dataset": ds.pk,
"data": data
}
url = helpers.set_strict_mode(reverse('api:record-list'))
ds.record_queryset.delete()
self.assertEqual(
client.post(url, data=payload, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), 0)
def test_update_column_not_in_schema(self):
"""
Test that updating a record with column not in the schema it will not validate in strict mode
:return:
"""
ds = self.ds_1
client = self.custodian_1_client
record = self._create_default_record()
incorrect_data = clone(record.data)
incorrect_data['Extra Column'] = "Extra Value"
data = {
"dataset": record.dataset.pk,
"data": incorrect_data
}
url = reverse('api:record-detail', kwargs={"pk": record.pk})
# set strict mode
url = helpers.set_strict_mode(url)
count = ds.record_queryset.count()
self.assertEqual(
client.put(url, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), count)
self.assertEqual(
client.patch(url, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), count)
def test_date_error(self):
"""
An observation must have a date
:return:
"""
ds = self.ds_1
record = self._create_default_record()
date_column = ds.schema.observation_date_field.name
new_data = clone(record.data)
url_post = reverse('api:record-list')
url_update = reverse('api:record-detail', kwargs={'pk': record.pk})
valid_values = ['15/08/2008']
for value in valid_values:
new_data[date_column] = value
data = {
"dataset": record.dataset.pk,
"data": new_data
}
client = self.custodian_1_client
count = ds.record_queryset.count()
self.assertEqual(
client.post(url_post, data, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), count + 1)
invalid_values = [None, '', 'not a date']
for value in invalid_values:
new_data[date_column] = value
data = {
"dataset": record.dataset.pk,
"data": new_data
}
client = self.custodian_1_client
count = ds.record_queryset.count()
self.assertEqual(
client.post(url_post, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
client.put(url_update, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
client.patch(url_update, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), count)
def test_geometry_error(self):
"""
An observation must have a valid geometry
:return:
"""
ds = self.ds_1
record = self._create_default_record()
lat_column = ds.schema.latitude_field.name
new_data = clone(record.data)
url_post = reverse('api:record-list')
url_update = reverse('api:record-detail', kwargs={'pk': record.pk})
valid_values = [-34.125]
for value in valid_values:
new_data[lat_column] = value
data = {
"dataset": record.dataset.pk,
"data": new_data
}
client = self.custodian_1_client
count = ds.record_queryset.count()
self.assertEqual(
client.post(url_post, data, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), count + 1)
invalid_values = [None, '', 'not a valid latitude']
for value in invalid_values:
new_data[lat_column] = value
data = {
"dataset": record.dataset.pk,
"data": new_data
}
client = self.custodian_1_client
count = ds.record_queryset.count()
self.assertEqual(
client.post(url_post, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
client.put(url_update, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
client.patch(url_update, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), count)
def test_species_name(self):
ds = self.ds_1
record = self._create_default_record()
column = ds.schema.species_name_parser.species_name_field.name
new_data = clone(record.data)
url_post = reverse('api:record-list')
url_update = reverse('api:record-detail', kwargs={'pk': record.pk})
valid_values = ['Canis Lupus', 'chubby bat', 'anything']
for value in valid_values:
new_data[column] = value
data = {
"dataset": record.dataset.pk,
"data": new_data
}
client = self.custodian_1_client
count = ds.record_queryset.count()
self.assertEqual(
client.post(url_post, data, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), count + 1)
invalid_values = [None, '', 125]
for value in invalid_values:
new_data[column] = value
data = {
"dataset": record.dataset.pk,
"data": new_data
}
client = self.custodian_1_client
count = ds.record_queryset.count()
self.assertEqual(
client.post(url_post, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
client.put(url_update, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
client.patch(url_update, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), count)
class TestDateTimeAndGeometryExtraction(helpers.BaseUserTestCase):
species_facade_class = NoSpeciesFacade
@staticmethod
def schema_with_species_name():
schema_fields = [
{
"name": "Species Name",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
def _more_setup(self):
# set the HerbieFacade class
from main.api.views import SpeciesMixin
SpeciesMixin.species_facade_class = self.species_facade_class
def test_create(self):
"""
Test that the date and geometry are extracted from the data
and saved in DB
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_species_name()
ds = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
# clear all records
ds.record_queryset.delete()
self.assertEqual(ds.record_queryset.count(), 0)
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
self.assertEqual(
client.post(url, data=payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), 1)
record = ds.record_queryset.first()
expected_date = datetime.date(2018, 1, 31)
self.assertEqual(timezone.localtime(record.datetime).date(), expected_date)
geometry = record.geometry
self.assertIsInstance(geometry, Point)
self.assertEqual((115.75, -32.0), (geometry.x, geometry.y))
def test_update(self):
"""
Test that the date and geometry are extracted from the data
and saved in DB after a PATCH of the record data
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_species_name()
ds = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
# clear all records
ds.record_queryset.delete()
self.assertEqual(ds.record_queryset.count(), 0)
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
self.assertEqual(
client.post(url, data=payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), 1)
record = ds.record_queryset.first()
# date and lat/lon
# change lat/lon
data = {
'Species Name': 'Chubby Bat',
'Latitude': 22.222,
'Longitude': 111.111,
'When': '2017-12-24'
}
payload = {
"data": data
}
url = reverse('api:record-detail', kwargs={"pk": record.pk})
self.assertEqual(
client.patch(url, data=payload, format='json').status_code,
status.HTTP_200_OK
)
record.refresh_from_db()
expected_date = datetime.date(2017, 12, 24)
self.assertEqual(timezone.localtime(record.datetime).date(), expected_date)
geometry = record.geometry
self.assertIsInstance(geometry, Point)
self.assertEqual((111.111, 22.222), (geometry.x, geometry.y))
class TestSpeciesNameExtraction(helpers.BaseUserTestCase):
species_facade_class = NoSpeciesFacade
@staticmethod
def schema_with_species_name():
schema_fields = [
{
"name": "Species Name",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
def _more_setup(self):
# set the HerbieFacade class
from main.api.views import SpeciesMixin
SpeciesMixin.species_facade_class = self.species_facade_class
def test_create(self):
"""
Test that the species name is extracted from the data and saved in DB even if the species is not valid
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_species_name()
ds = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
# clear all records
ds.record_queryset.delete()
self.assertEqual(ds.record_queryset.count(), 0)
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
self.assertEqual(
client.post(url, data=payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), 1)
self.assertEqual(ds.record_queryset.first().species_name, 'Chubby Bat')
def test_update(self):
"""
Test that name extraction after a PUT method
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_species_name()
ds = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
self.assertEqual(
client.post(url, payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), 1)
record = ds.record_queryset.first()
self.assertEqual(record.species_name, 'Chubby Bat')
# update the species_name
data = {
'Species Name': ' Canis lupus ',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-detail', kwargs={"pk": record.pk})
self.assertEqual(
client.put(url, payload, format='json').status_code,
status.HTTP_200_OK
)
record.refresh_from_db()
self.assertEqual(record.species_name, 'Canis lupus')
def test_patch(self):
"""
Test that name extraction after a PATCH method
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_species_name()
ds = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
self.assertEqual(
client.post(url, payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), 1)
record = ds.record_queryset.first()
self.assertEqual(record.species_name, 'Chubby Bat')
# update the species_name
data = {
'Species Name': 'Canis lupus ',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"data": data
}
url = reverse('api:record-detail', kwargs={"pk": record.pk})
self.assertEqual(
client.patch(url, payload, format='json').status_code,
status.HTTP_200_OK
)
record.refresh_from_db()
self.assertEqual(record.species_name, 'Canis lupus')
class TestNameIDFromSpeciesName(helpers.BaseUserTestCase):
"""
Test that we retrieve the name id from the species facade
"""
species_facade_class = helpers.LightSpeciesFacade
@staticmethod
def schema_with_species_name():
schema_fields = [
{
"name": "Species Name",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
def _more_setup(self):
# set the HerbieFacade class
from main.api.views import SpeciesMixin
SpeciesMixin.species_facade_class = self.species_facade_class
def test_create(self):
"""
Test that the name_id is retrieved from the species facade from the species_name
:return:
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_species_name()
ds = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
data = {
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
for species_name, name_id in list(helpers.LightSpeciesFacade().name_id_by_species_name().items())[:2]:
ds.record_queryset.delete()
self.assertEqual(ds.record_queryset.count(), 0)
data['Species Name'] = species_name
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
self.assertEqual(
client.post(url, payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), 1)
self.assertEqual(ds.record_queryset.first().name_id, name_id)
def test_update(self):
"""
Test that the name_id is retrieved from the species facade from the species_name
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_species_name()
ds = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
# create a record with a wrong species name. Should have name_id = -1
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
self.assertEqual(
client.post(url, payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), 1)
record = ds.record_queryset.first()
self.assertEqual(record.name_id, -1)
# update the species_name
data = {
'Species Name': 'Canis lupus',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"dataset": ds.pk,
"data": data
}
expected_name_id = 25454
url = reverse('api:record-detail', kwargs={"pk": record.pk})
self.assertEqual(
client.put(url, payload, format='json').status_code,
status.HTTP_200_OK
)
record.refresh_from_db()
self.assertEqual(record.name_id, expected_name_id)
def test_patch(self):
"""
Same as above but wit a patch method instead of put
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_species_name()
ds = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
# create a record with a wrong species name. Should have name_id = -1
data = {
'Species Name': 'Chubby Bat',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"dataset": ds.pk,
"data": data
}
url = reverse('api:record-list')
self.assertEqual(
client.post(url, payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), 1)
record = ds.record_queryset.first()
self.assertEqual(record.name_id, -1)
# update the species_name
data = {
'Species Name': 'Canis lupus',
'Latitude': -32.0,
'Longitude': 115.75,
'When': '2018-01-31'
}
payload = {
"data": data
}
expected_name_id = 25454
url = reverse('api:record-detail', kwargs={"pk": record.pk})
self.assertEqual(
client.patch(url, payload, format='json').status_code,
status.HTTP_200_OK
)
record.refresh_from_db()
self.assertEqual(record.name_id, expected_name_id)
class TestExport(helpers.BaseUserTestCase):
def setUp(self):
super(TestExport, self).setUp()
rows = [
['When', 'Species Name', 'How Many', 'Latitude', 'Longitude', 'Comments'],
['2018-02-07', 'Canis lupus', 1, -32.0, 115.75, ''],
['2018-01-12', 'Chubby bat', 10, -32.0, 115.75, 'Awesome'],
['2018-02-02', 'Canis dingo', 2, -32.0, 115.75, 'Watch out kids'],
['2018-02-10', 'Unknown', 3, -32.0, 115.75, 'Canis?'],
]
self.ds_1 = self._create_dataset_and_records_from_rows(rows)
self.assertEqual(self.ds_1.type, Dataset.TYPE_SPECIES_OBSERVATION)
def test_happy_path_no_filter(self):
client = self.custodian_1_client
dataset = self.ds_1
all_records = Record.objects.filter(dataset=dataset)
self.assertTrue(all_records.count() > 0)
url = reverse('api:record-list')
query = {
'dataset__id': dataset.pk,
'output': 'xlsx'
}
try:
resp = client.get(url, query)
except Exception as e:
self.fail("Export should not raise an exception: {}".format(e))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
# check headers
self.assertEqual(resp.get('content-type'),
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
content_disposition = resp.get('content-disposition')
# should be something like:
# 'attachment; filename=DatasetName_YYYY_MM_DD-HHMMSS.xlsx
match = re.match('attachment; filename=(.+)', content_disposition)
self.assertIsNotNone(match)
filename, ext = path.splitext(match.group(1))
self.assertEqual(ext, '.xlsx')
filename.startswith(dataset.name)
# read content
wb = load_workbook(io.BytesIO(resp.content), read_only=True)
# one datasheet named from dataset
sheet_names = wb.sheetnames
self.assertEqual(1, len(sheet_names))
self.assertEqual(dataset.name, sheet_names[0])
ws = wb[dataset.name]
rows = list(ws.rows)
expected_records = Record.objects.filter(dataset=dataset)
self.assertEqual(len(rows), expected_records.count() + 1)
headers = [c.value for c in rows[0]]
schema = dataset.schema
# all the columns of the schema should be in the excel
self.assertEqual(schema.headers, headers)
def test_permission_ok_for_not_custodian(self):
"""Export is a read action. Should be authorised for every logged-in user."""
client = self.custodian_2_client
dataset = self.ds_1
url = reverse('api:record-list')
query = {
'dataset__id': dataset.pk,
'output': 'xlsx'
}
try:
resp = client.get(url, query)
except Exception as e:
self.fail("Export should not raise an exception: {}".format(e))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
def test_permission_denied_if_not_logged_in(self):
"""Must be logged-in."""
client = self.anonymous_client
dataset = self.ds_1
url = reverse('api:record-list')
query = {
'dataset__id': dataset.pk,
'output': 'xlsx'
}
try:
resp = client.get(url, query)
except Exception as e:
self.fail("Export should not raise an exception: {}".format(e))
self.assertEqual(resp.status_code, status.HTTP_401_UNAUTHORIZED)
class TestSpeciesNameFromNameID(helpers.BaseUserTestCase):
"""
Use case:
The schema doesn't include a Species Name but just a Name Id column.
Test that using the upload (excel) or API the species name is collected from herbie and populated.
The test suite uses a mock herbie facade with a static species_name -> nameId dict
@see helpers.SOME_SPECIES_NAME_NAME_ID_MAP
"""
species_facade_class = helpers.LightSpeciesFacade
def _more_setup(self):
# set the HerbieFacade class
from main.api.views import SpeciesMixin
SpeciesMixin.species_facade_class = self.species_facade_class
@staticmethod
def schema_with_name_id():
schema_fields = [
{
"name": "Name Id",
"type": "integer",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
def test_species_name_collected_upload(self):
"""
Happy path: upload excel with a valid Name Id.
:return:
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_name_id()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
# data
csv_data = [
['Name Id', 'When', 'Latitude', 'Longitude'],
[25454, '01/01/2017', -32.0, 115.75], # "Canis lupus"
['24204', '02/02/2017', -33.0, 116.0] # "Vespadelus douglasorum"
]
file_ = helpers.rows_to_xlsx_file(csv_data)
self.assertEqual(0, Record.objects.filter(dataset=dataset).count())
url = reverse('api:dataset-upload', kwargs={'pk': dataset.pk})
with open(file_, 'rb') as fp:
payload = {
'file': fp
}
resp = client.post(url, payload, format='multipart')
self.assertEqual(status.HTTP_200_OK, resp.status_code)
records = Record.objects.filter(dataset=dataset)
self.assertEqual(records.count(), len(csv_data) - 1)
for r in records:
self.assertTrue(r.name_id > 0)
self.assertIsNotNone(r.species_name)
canis_lupus = records.filter(name_id=25454).first()
self.assertIsNotNone(canis_lupus)
self.assertEqual(canis_lupus.species_name, "Canis lupus")
vespadelus = records.filter(name_id=24204).first()
self.assertIsNotNone(vespadelus)
self.assertEqual(vespadelus.species_name, "Vespadelus douglasorum")
def test_species_name_collected_api_create(self):
"""
Same as above: testing that the species name is collected when using the API create
:return:
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_name_id()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
record_data = {
'Name Id': 25454, # "Canis lupus"
'When': '12/12/2017',
'Latitude': -32.0,
'Longitude': 115.756
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
record = Record.objects.filter(id=resp.json().get('id')).first()
self.assertIsNotNone(record)
self.assertEqual(record.name_id, 25454)
self.assertEqual(record.species_name, "Canis lupus")
def test_species_name_collected_api_update(self):
"""
Updating the Name Id should update the species name
:return:
"""
# create record
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_name_id()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
record_data = {
'Name Id': 25454, # "Canis lupus"
'When': '12/12/2017',
'Latitude': -32.0,
'Longitude': 115.756
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
record = Record.objects.filter(id=resp.json().get('id')).first()
self.assertIsNotNone(record)
self.assertEqual(record.name_id, 25454)
self.assertEqual(record.species_name, "Canis lupus")
# patch Name Id
new_name_id = 24204
record_data['Name Id'] = new_name_id
expected_species_name = 'Vespadelus douglasorum'
url = reverse('api:record-detail', kwargs={'pk': record.pk})
payload = {
'data': record_data
}
resp = client.patch(url, payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
record.refresh_from_db()
self.assertEqual(record.name_id, new_name_id)
self.assertEqual(record.species_name, expected_species_name)
def test_wrong_id_rejected_upload(self):
"""
If a wrong Name Id is provided the system assume its an error
:return:
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_name_id()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
# data
csv_data = [
['Name Id', 'When', 'Latitude', 'Longitude'],
[99934, '01/01/2017', -32.0, 115.75], # wrong
['24204', '02/02/2017', -33.0, 116.0] # "Vespadelus douglasorum"
]
file_ = helpers.rows_to_xlsx_file(csv_data)
self.assertEqual(0, Record.objects.filter(dataset=dataset).count())
url = reverse('api:dataset-upload', kwargs={'pk': dataset.pk})
with open(file_, 'rb') as fp:
payload = {
'file': fp
}
resp = client.post(url, payload, format='multipart')
self.assertEqual(status.HTTP_400_BAD_REQUEST, resp.status_code)
records = Record.objects.filter(dataset=dataset)
# should be only one record (the good one)
self.assertEqual(records.count(), 1)
vespadelus = records.filter(name_id=24204).first()
self.assertIsNotNone(vespadelus)
self.assertEqual(vespadelus.species_name, "Vespadelus douglasorum")
def test_wrong_id_rejected_api_create(self):
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_name_id()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
record_data = {
'Name Id': 9999, # wrong
'When': '12/12/2017',
'Latitude': -32.0,
'Longitude': 115.756
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(Record.objects.filter(dataset=dataset).count(), 0)
class TestSpeciesNameAndNameID(helpers.BaseUserTestCase):
"""
Use case:
The schema includes a Species Name and a Name Id column.
Test that the Name Id takes precedence
The test suite uses a mock herbie facade with a static species_name -> Name Id dict
@see helpers.SOME_SPECIES_NAME_NAME_ID_MAP
"""
species_facade_class = helpers.LightSpeciesFacade
def _more_setup(self):
# set the HerbieFacade class
from main.api.views import SpeciesMixin
SpeciesMixin.species_facade_class = self.species_facade_class
@staticmethod
def schema_with_name_id_and_species_name():
schema_fields = [
{
"name": "Name Id",
"type": "integer",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS
},
{
"name": "Species Name",
"type": "string",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
def test_species_name_collected_upload(self):
"""
Happy path: upload excel with a valid Name Id.
:return:
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_name_id_and_species_name()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
# data
csv_data = [
['Name Id', 'Species Name', 'When', 'Latitude', 'Longitude'],
[25454, 'Chubby Bat', '01/01/2017', -32.0, 115.75], # "Canis lupus"
['24204', 'French Frog', '02/02/2017', -33.0, 116.0] # "Vespadelus douglasorum"
]
file_ = helpers.rows_to_xlsx_file(csv_data)
self.assertEqual(0, Record.objects.filter(dataset=dataset).count())
url = reverse('api:dataset-upload', kwargs={'pk': dataset.pk})
with open(file_, 'rb') as fp:
payload = {
'file': fp
}
resp = client.post(url, payload, format='multipart')
self.assertEqual(status.HTTP_200_OK, resp.status_code)
records = Record.objects.filter(dataset=dataset)
self.assertEqual(records.count(), len(csv_data) - 1)
for r in records:
self.assertTrue(r.name_id > 0)
self.assertIsNotNone(r.species_name)
canis_lupus = records.filter(name_id=25454).first()
self.assertIsNotNone(canis_lupus)
self.assertEqual(canis_lupus.species_name, "Canis lupus")
vespadelus = records.filter(name_id=24204).first()
self.assertIsNotNone(vespadelus)
self.assertEqual(vespadelus.species_name, "Vespadelus douglasorum")
def test_nameId_collected_upload(self):
"""
Test that if Name Id is not provided it is collected from the species list
:return:
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_name_id_and_species_name()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
# data
csv_data = [
['Name Id', 'Species Name', 'When', 'Latitude', 'Longitude'],
['', 'Canis lupus', '01/01/2017', -32.0, 115.75], # "Canis lupus"
['', 'Vespadelus douglasorum', '02/02/2017', -33.0, 116.0] # "Vespadelus douglasorum"
]
file_ = helpers.rows_to_xlsx_file(csv_data)
self.assertEqual(0, Record.objects.filter(dataset=dataset).count())
url = reverse('api:dataset-upload', kwargs={'pk': dataset.pk})
with open(file_, 'rb') as fp:
payload = {
'file': fp
}
resp = client.post(url, payload, format='multipart')
self.assertEqual(status.HTTP_200_OK, resp.status_code)
records = Record.objects.filter(dataset=dataset)
self.assertEqual(records.count(), len(csv_data) - 1)
for r in records:
self.assertTrue(r.name_id > 0)
self.assertIsNotNone(r.species_name)
canis_lupus = records.filter(name_id=25454).first()
self.assertIsNotNone(canis_lupus)
self.assertEqual(canis_lupus.species_name, "Canis lupus")
vespadelus = records.filter(name_id=24204).first()
self.assertIsNotNone(vespadelus)
self.assertEqual(vespadelus.species_name, "Vespadelus douglasorum")
def test_species_name_collected_api_create(self):
"""
Same as above: testing that the species name is collected when using the API create
:return:
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_name_id_and_species_name()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
record_data = {
'Name Id': 25454, # "Canis lupus"
'Species Name': 'Chubby Bat',
'When': '12/12/2017',
'Latitude': -32.0,
'Longitude': 115.756
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
record = Record.objects.filter(id=resp.json().get('id')).first()
self.assertIsNotNone(record)
self.assertEqual(record.name_id, 25454)
self.assertEqual(record.species_name, "Canis lupus")
def test_species_name_collected_api_update(self):
"""
Updating the Name Id should update the species name
:return:
"""
# create record
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_name_id_and_species_name()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
record_data = {
'Name Id': 25454, # "Canis lupus"
'Species Name': 'Chubby Bat',
'When': '12/12/2017',
'Latitude': -32.0,
'Longitude': 115.756
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
record = Record.objects.filter(id=resp.json().get('id')).first()
self.assertIsNotNone(record)
self.assertEqual(record.name_id, 25454)
self.assertEqual(record.species_name, "Canis lupus")
# TODO: the species name in the data is not updated. Should we?
self.assertEqual(record.data.get('Species Name'), 'Chubby Bat')
# patch Name Id
new_name_id = 24204
record_data['Name Id'] = new_name_id
expected_species_name = 'Vespadelus douglasorum'
url = reverse('api:record-detail', kwargs={'pk': record.pk})
payload = {
'data': record_data
}
resp = client.patch(url, payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
record.refresh_from_db()
self.assertEqual(record.name_id, new_name_id)
self.assertEqual(record.species_name, expected_species_name)
class TestCompositeSpeciesName(helpers.BaseUserTestCase):
"""
Test for species name composed from Genus, Species, infra_rank, infra_name columns
"""
species_facade_class = helpers.LightSpeciesFacade
@staticmethod
def schema_with_4_columns_genus():
schema_fields = [
{
"name": "Genus",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Species",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "InfraSpecific Rank",
"type": "string",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS
},
{
"name": "InfraSpecific Name",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
@staticmethod
def schema_with_2_columns_genus():
schema_fields = [
{
"name": "Genus",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Species",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
@staticmethod
def schema_with_genus_and_species_name_no_required():
schema_fields = [
{
"name": "SpeciesName",
"type": "string",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS,
"biosys": {
"type": "speciesName"
}
},
{
"name": "Genus",
"type": "string",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS,
"biosys": {
"type": "genus"
}
},
{
"name": "Species",
"type": "string",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS,
"biosys": {
"type": "species"
}
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
def _more_setup(self):
# set the HerbieFacade class
from main.api.views import SpeciesMixin
SpeciesMixin.species_facade_class = self.species_facade_class
self.client = self.custodian_1_client
def assert_create_dataset(self, schema):
try:
return self._create_dataset_with_schema(
self.project_1,
self.data_engineer_1_client,
schema,
dataset_type=Dataset.TYPE_SPECIES_OBSERVATION
)
except Exception as e:
self.fail('Species Observation dataset creation failed for schema {schema}'.format(
schema=schema
))
def test_genus_species_only_happy_path(self):
schema = self.schema_with_2_columns_genus()
dataset = self.assert_create_dataset(schema)
records = [
['Genus', 'Species', 'When', 'Latitude', 'Longitude'],
['Canis', 'lupus', '2018-01-25', -32.0, 115.75],
]
resp = self._upload_records_from_rows(records, dataset_pk=dataset.pk)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
received = resp.json()
rec_id = received[0]['recordId']
record = Record.objects.filter(pk=rec_id).first()
self.assertEqual(record.species_name, 'Canis lupus')
self.assertEqual(record.name_id, 25454)
def test_genus_species_and_infra_specifics_happy_path(self):
schema = self.schema_with_4_columns_genus()
dataset = self.assert_create_dataset(schema)
records = [
['Genus', 'Species', 'InfraSpecific Rank', 'InfraSpecific Name', 'When', 'Latitude', 'Longitude'],
['Canis', 'lupus', 'subsp. familiaris ', ' rank naughty dog ', '2018-01-25', -32.0, 115.75],
]
resp = self._upload_records_from_rows(records, dataset_pk=dataset.pk)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
received = resp.json()
rec_id = received[0]['recordId']
record = Record.objects.filter(pk=rec_id).first()
expected_species_name = 'Canis lupus subsp. familiaris rank naughty dog'
self.assertEqual(record.species_name, expected_species_name)
self.assertEqual(record.name_id, -1)
def test_validation_missing_species(self):
schema = self.schema_with_2_columns_genus()
dataset = self.assert_create_dataset(schema)
data = {
'Genus': "Canis",
'When': '2018-01-25',
'Latitude': -32.0,
'Longitude': 115.75
}
url = helpers.url_post_record_strict()
payload = {
'dataset': dataset.pk,
'data': data
}
resp = self.client.post(url, payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
received_json = resp.json()
# should contain one error on the 'data' for the species field
self.assertIn('data', received_json)
errors = received_json.get('data')
self.assertIsInstance(errors, list)
self.assertEqual(len(errors), 1)
error = errors[0]
# should be "Species::msg"
pattern = re.compile(r"^Species::(.+)$")
self.assertTrue(pattern.match(error))
def test_genus_required_error(self):
"""
If genus is set to be required and not provided it should not throw an exception
but return a 400 with a field error message
see https://decbugs.com/view.php?id=6907 for details
"""
schema = self.schema_with_2_columns_genus()
dataset = self.assert_create_dataset(schema)
# Genus is required
self.assertTrue(dataset.schema.get_field_by_name('Genus').required)
# provides 3 records with no Genus (row=2,3,4)
records = [
['Genus', 'Species', 'When', 'Latitude', 'Longitude'],
[None, 'lupus', '2018-01-25', -32.0, 115.75],
['', 'lupus', '2018-01-25', -32.0, 115.75],
[' ', 'lupus', '2018-01-25', -32.0, 115.75]
]
resp = self._upload_records_from_rows(records, dataset_pk=dataset.pk, strict=False)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
received = resp.json()
# expected: array of report by row
self.assertIsInstance(received, list)
self.assertEqual(len(received), 3)
# this what an report should look like
expected_row_report = {
'row': 3,
'errors': {'Genus': 'Field "Genus" has constraint "required" which is not satisfied for value "None"'},
'warnings': {}}
for row_report in received:
self.assertIn('errors', row_report)
errors = row_report.get('errors')
self.assertIn('Genus', errors)
msg = errors.get('Genus')
self.assertEqual(msg, expected_row_report['errors']['Genus'])
def test_species_required_error(self):
"""
If species (with genus) is set to be required and not provided it should not throw an exception
but return a 400 with a field error message
see https://decbugs.com/view.php?id=6907 for details
"""
schema = self.schema_with_2_columns_genus()
dataset = self.assert_create_dataset(schema)
# Genus is required
self.assertTrue(dataset.schema.get_field_by_name('Genus').required)
# provides 3 records with no Species (row=2,3,4)
records = [
['Genus', 'Species', 'When', 'Latitude', 'Longitude'],
['Canis', '', '2018-01-25', -32.0, 115.75],
['Canis', None, '2018-01-25', -32.0, 115.75],
['Canis', ' ', '2018-01-25', -32.0, 115.75]
]
resp = self._upload_records_from_rows(records, dataset_pk=dataset.pk, strict=False)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
received = resp.json()
# expected: array of report by row
self.assertIsInstance(received, list)
self.assertEqual(len(received), 3)
# this what an report should look like
expected_row_report = {
'row': 3,
'errors': {'Species': 'Field "Species" has constraint "required" which is not satisfied for value "None"'},
'warnings': {}}
for row_report in received:
self.assertIn('errors', row_report)
errors = row_report.get('errors')
self.assertIn('Species', errors)
msg = errors.get('Species')
self.assertEqual(msg, expected_row_report['errors']['Species'])
def test_species_name_and_genus_requirement(self):
"""
If the schema has speciesName and genus/species we should not impose any requirement
User should be able to choose one or the other way to enter a species.
"""
schema = self.schema_with_genus_and_species_name_no_required()
self.assert_create_dataset(schema)
def test_species_name_tag_precedence(self):
"""
if the schema has Species Name and genus/species and the the Species Name column is biosys tagged as type
speciesName it then has precedence over genus/species.
@see https://youtrack.gaiaresources.com.au/youtrack/issue/BIOSYS-305
Given I have a species observation dataset with fields |Genus|Species|Species Name|
And the Species Name field is tagged with the Biosys type 'SpeciesName'
And Genus and Species fields have no Biosys type
When I enter |Pteropyus|vampyrus|Canis lupus|
Then the species extracted should be Canis lupus and not Pteropyus vampyrus
"""
schema = self.schema_with_genus_and_species_name_no_required()
# remove biosys tag for Genus and Species
for field in schema['fields']:
if field['name'] in ['Genus', 'Species']:
del field['biosys']
dataset = self.assert_create_dataset(schema)
records = [
['Genus', 'Species', 'SpeciesName', 'When', 'Latitude', 'Longitude'],
['Pteropyus', 'vampyrus', 'Canis lupus', '2018-01-25', -32.0, 115.75],
]
expected_species_name = 'Canis lupus'
resp = self._upload_records_from_rows(records, dataset_pk=dataset.pk, strict=False)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
received = resp.json()
rec_id = received[0]['recordId']
record = Record.objects.filter(pk=rec_id).first()
self.assertEqual(record.species_name, expected_species_name)
class TestPatch(helpers.BaseUserTestCase):
def test_patch_validated(self):
"""
Test that we can patch just the 'validated' flag
:return:
"""
rows = [
['Species Name', 'When', 'Latitude', 'Longitude', 'Comments'],
['Chubby bat', '2018-06-01', -32, 115.75, 'It is huge!']
]
dataset = self._create_dataset_and_records_from_rows(rows)
self.assertEqual(dataset.type, Dataset.TYPE_SPECIES_OBSERVATION)
records = dataset.record_set.all()
record = records.last()
self.assertIsNotNone(record)
self.assertFalse(record.validated)
previous_data = json.dumps(record.data)
# patch
url = reverse('api:record-detail', kwargs={"pk": record.pk})
client = self.custodian_1_client
payload = {
'validated': True
}
resp = client.patch(url, payload)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
record.refresh_from_db()
self.assertTrue(record.validated)
self.assertTrue(json.dumps(record.data), previous_data)
def test_patch_locked(self):
"""
Test that we can patch just the 'locked' flag
:return:
"""
rows = [
['Species Name', 'When', 'Latitude', 'Longitude', 'Comments'],
['Chubby bat', '2018-06-01', -32, 115.75, 'It is huge!']
]
dataset = self._create_dataset_and_records_from_rows(rows)
self.assertEqual(dataset.type, Dataset.TYPE_SPECIES_OBSERVATION)
records = dataset.record_set.all()
record = records.last()
self.assertIsNotNone(record)
self.assertFalse(record.locked)
previous_data = json.dumps(record.data)
# patch
url = reverse('api:record-detail', kwargs={"pk": record.pk})
client = self.custodian_1_client
payload = {
'locked': True
}
resp = client.patch(url, payload)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
record.refresh_from_db()
self.assertTrue(record.locked)
self.assertTrue(json.dumps(record.data), previous_data)
| |
"""Views for searching."""
from collections import OrderedDict
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.shortcuts import render
from haystack.generic_views import SearchView
from haystack.query import SearchQuerySet
from reviewboard.accounts.mixins import (CheckLoginRequiredViewMixin,
UserProfileRequiredViewMixin)
from reviewboard.avatars import avatar_services
from reviewboard.reviews.models import ReviewRequest
from reviewboard.search import search_backend_registry
from reviewboard.search.forms import RBSearchForm
from reviewboard.site.urlresolvers import local_site_reverse
from reviewboard.site.mixins import CheckLocalSiteAccessViewMixin
class RBSearchView(CheckLoginRequiredViewMixin,
CheckLocalSiteAccessViewMixin,
UserProfileRequiredViewMixin,
SearchView):
"""The Review Board search view."""
template_name = 'search/results.html'
disabled_template_name = 'search/search_disabled.html'
form_class = RBSearchForm
load_all = False
# This is normally set on Haystack's SearchMixin class to an instance,
# at which point the backend loads and is then reused for all queries.
# Not great, since that assumes the backend will never change. Not a
# healthy assumption for us. So we clear it out here and set it on dispatch
# instead.
queryset = None
ADJACENT_PAGES = 5
@property
def paginate_by(self):
"""The number of search results per page."""
return search_backend_registry.results_per_page
def dispatch(self, request, local_site=None, *args, **kwargs):
"""Dispatch the view.
If search is disabled, the search will not be performed and the user
will be informed.
Args:
request (django.http.HttpRequest):
The current HTTP request.
local_site (reviewboard.site.models.LocalSite):
The LocalSite on which the search is being performed.
*args (tuple, unused):
Ignored positional arguments.
**kwargs (dict, unused):
Ignored keyword arguments.
Returns:
django.http.HttpResponse:
The HTTP response for the search.
"""
if not search_backend_registry.search_enabled:
return render(request, self.disabled_template_name)
self.queryset = SearchQuerySet()
form_class = self.get_form_class()
form = form_class(user=request.user,
local_site=local_site,
**self.get_form_kwargs())
if not form.is_valid():
return self.form_invalid(form)
query = form.cleaned_data.get(self.search_field, '')
if not query:
return HttpResponseRedirect(
local_site_reverse('all-review-requests',
local_site=local_site),
)
if query.isdigit():
# If the query is an integer, then assume that it's a review
# request ID that we'll want to redirect to. This mirrors behavior
# we've had since Review Board 1.7.
try:
review_request = ReviewRequest.objects.for_id(query,
local_site)
except ReviewRequest.DoesNotExist:
pass
else:
if review_request.is_accessible_by(self.request.user,
local_site=local_site,
request=request):
return HttpResponseRedirect(
review_request.get_absolute_url()
)
return self.form_valid(form)
def get_context_data(self, form=None, **kwargs):
"""Return context data for rendering the view.
Args:
form (reviewboard.search.forms.RBSearchForm):
The search form instance.
This will be included in the returned dictionary.
**kwargs (dict):
Additional context to be added to the returned dictionary.
Returns:
dict:
The context dictionary.
"""
context = super(RBSearchView, self).get_context_data(form=form,
**kwargs)
paginator = context['paginator']
page_obj = context['page_obj']
object_list = context['object_list']
page_nums = list(range(
max(1, page_obj.number - self.ADJACENT_PAGES),
min(paginator.num_pages,
page_obj.number + self.ADJACENT_PAGES) + 1))
active_filters = form.cleaned_data.get('model_filter',
[form.FILTER_ALL])
context.update({
'filter_types': OrderedDict(
(filter_id, dict(active=(filter_id in active_filters),
**filter_type))
for filter_id, filter_type in form.FILTER_TYPES.items()
),
'hits_returned': len(object_list),
'last_page_num': paginator.num_pages - 1,
'page_numbers': page_nums,
'show_first_page': 1 not in page_nums,
'show_last_page': paginator.num_pages not in page_nums,
})
return context
def render_to_response(self, context, **response_kwargs):
"""Render the search page.
Args:
context (dict):
A dictionary of context from :py:meth:`get_context_data`.
**response_kwargs (dict);
Keyword arguments to be passed to the response class.
Returns:
django.http.HttpResponse:
The rendered response.
"""
show_users = False
# We only need to fetch users if the search is for just users or
# both users and review requests (i.e., the '' ID).
if avatar_services.avatars_enabled:
show_users = any(
filter_type['active'] and User in filter_type['models']
for filter_type in context['filter_types'].values()
)
if show_users:
page_obj = context['page_obj']
user_pks = {
int(result.pk)
for result in page_obj
if result.content_type() == 'auth.user'
}
users = {
user.pk: user
for user in (
User.objects
.filter(pk__in=user_pks)
.select_related('profile')
)
}
for result in page_obj:
if result.content_type() == 'auth.user':
result.user = users[int(result.pk)]
return super(RBSearchView, self).render_to_response(context,
**response_kwargs)
| |
"""
Detect physical devices that can be used by chutes.
This module detects physical devices (for now just network interfaces) that can
be used by chutes. This includes WAN interfaces for Internet connectivity and
WiFi interfaces which can host APs.
It also makes sure certain entries exist in the system UCI files for these
devices, for example "wifi-device" sections. These are shared between chutes,
so they only need to be added when missing.
"""
import netifaces
import operator
import os
import re
import subprocess
import six
from paradrop.base.output import out
from paradrop.base import constants, settings
from paradrop.base.exceptions import DeviceNotFoundException
from paradrop.lib.utils import datastruct, pdos, uci
IEEE80211_DIR = "/sys/class/ieee80211"
SYS_DIR = "/sys/class/net"
EXCLUDE_IFACES = set(["lo"])
# Strings that identify a virtual interface.
VIF_MARKERS = [".", "veth"]
# Matches various ways of specifying WiFi devices (phy0, wlan0).
WIFI_DEV_REF = re.compile("([a-z]+)(\d+)")
# Set of wifi-interface mode values that are handled by Paradrop rather than
# UCI configuration system.
WIFI_NONSTANDARD_MODES = set(["airshark"])
def isVirtual(ifname):
"""
Test if an interface is a virtual one.
FIXME: This just tests for the presence of certain strings in the interface
name, so it is not very robust.
"""
for marker in VIF_MARKERS:
if marker in ifname:
return True
return False
def isWAN(ifname):
"""
Test if an interface is a WAN interface.
"""
pattern = re.compile(r"(\w+)\s+(\w+)*")
routeList = pdos.readFile("/proc/net/route")
for line in routeList:
match = pattern.match(line)
if match is not None and \
match.group(1) == ifname and \
match.group(2) == "00000000":
return True
return False
def isWireless(ifname):
"""
Test if an interface is a wireless device.
"""
check_path = "{}/{}/wireless".format(SYS_DIR, ifname)
return pdos.exists(check_path)
def detectSystemDevices():
"""
Detect devices on the system.
The result is three lists stored in a dictionary. The three lists are
indexed by 'wan', 'wifi', and 'lan'. Other devices may be supported by
adding additional lists.
Within each list, a device is represented by a dictionary.
For all devices, the 'name' and 'mac' fields are defined.
For WiFi devices, the 'phy' is defined in addition.
Later, we may fill in more device information
(e.g. what channels a WiFi card supports).
"""
devices = dict()
devices['wan'] = list()
devices['wifi'] = list()
devices['lan'] = list()
for dev in listSystemDevices():
devices[dev['type']].append(dev)
del dev['type']
return devices
def readSysFile(path):
try:
with open(path, 'r') as source:
return source.read().strip()
except:
return None
def getMACAddress(ifname):
path = "{}/{}/address".format(SYS_DIR, ifname)
return readSysFile(path)
def getPhyMACAddress(phy):
path = "{}/{}/macaddress".format(IEEE80211_DIR, phy)
return readSysFile(path)
def getWirelessPhyName(ifname):
path = "{}/{}/phy80211/name".format(SYS_DIR, ifname)
return readSysFile(path)
class SysReader(object):
PCI_BUS_ID = re.compile(r"\d+:\d+:\d+\.\d+")
USB_BUS_ID = re.compile(r"\d+\-\d+(\.\d+)*:\d+\.\d+")
def __init__(self, phy):
self.phy = phy
self.device_path = "{}/{}/device".format(IEEE80211_DIR, phy)
def getDeviceId(self, default="????"):
"""
Return the device ID for the device.
This is a four-digit hexadecimal number. For example, our Qualcomm
802.11n chips have device ID 002a.
"""
path = os.path.join(self.device_path, "device")
device = readSysFile(path)
if device is None:
device = default
return device
def getSlotName(self, default="????"):
"""
Return the PCI/USB slot name for the device.
Example: "pci/0000:04:00.0" or "usb/1-1:1.0"
"""
path = os.path.join(self.device_path, "driver")
for fname in os.listdir(path):
match = SysReader.PCI_BUS_ID.match(fname)
if match is not None:
return "pci/" + fname
match = SysReader.USB_BUS_ID.match(fname)
if match is not None:
return "usb/" + fname
return default
def getVendorId(self, default="????"):
"""
Return the vendor ID for the device.
This is a four-digit hexadecimal number. For example, our Qualcomm
802.11n chips have vendor ID 168c.
"""
path = os.path.join(self.device_path, "vendor")
vendor = readSysFile(path)
if vendor is None:
vendor = default
return vendor
def read_uevent(self):
"""
Read the device uevent file and return the contents as a dictionary.
"""
result = dict()
path = os.path.join(self.device_path, "uevent")
with open(path, "r") as source:
for line in source:
key, value = line.split("=")
result[key] = value
return result
def listWiFiDevices():
# Collect information about the physical devices (e.g. phy0 -> MAC address,
# device type, PCI slot, etc.) and store as objects in a dictionary.
devices = dict()
try:
for phy in pdos.listdir(IEEE80211_DIR):
mac = getPhyMACAddress(phy)
reader = SysReader(phy)
devices[phy] = {
'name': "wifi{}".format(mac.replace(':', '')),
'type': 'wifi',
'mac': mac,
'phy': phy,
'vendor': reader.getVendorId(),
'device': reader.getDeviceId(),
'slot': reader.getSlotName()
}
except OSError:
# If we get an error here, it probably just means there are no WiFi
# devices.
pass
# Collect a list of interfaces corresponding to each physical device
# (e.g. phy0 -> wlan0, vwlan0.0000, etc.)
interfaces = dict((dev, []) for dev in devices.keys())
for ifname in pdos.listdir(SYS_DIR):
try:
path = "{}/{}/phy80211/name".format(SYS_DIR, ifname)
phy = readSysFile(path)
path = "{}/{}/ifindex".format(SYS_DIR, ifname)
ifindex = int(readSysFile(path))
interfaces[phy].append({
'ifname': ifname,
'ifindex': ifindex
})
except:
# Error probably means it was not a wireless interface.
pass
# Sort by ifindex to identify the primary interface, which is the one that
# was created when the device was first added. We make use the fact that
# Linux uses monotonically increasing ifindex values.
for phy, device in six.iteritems(devices):
if len(interfaces[phy]) > 0:
interfaces[phy].sort(key=operator.itemgetter('ifindex'))
device['primary_interface'] = interfaces[phy][0]['ifname']
else:
device['primary_interface'] = None
# Finally, sort the device list by PCI/USB slot to create an ordering that
# is stable across device reboots and somewhat stable across hardware
# swaps.
result = list(devices.values())
result.sort(key=operator.itemgetter('slot'))
pci_index = 0
usb_index = 0
other_index = 0
for dev in result:
if dev['slot'].startswith("pci"):
dev['id'] = "pci-wifi-{}".format(pci_index)
pci_index += 1
elif dev['slot'].startswith("usb"):
dev['id'] = "usb-wifi-{}".format(usb_index)
usb_index += 1
else:
dev['id'] = "other-wifi-{}".format(other_index)
other_index += 1
return result
def listSystemDevices():
"""
Detect devices on the system.
The result is a single list of dictionaries, each containing information
about a network device.
"""
devices = list()
for ifname in pdos.listdir(SYS_DIR):
if ifname in EXCLUDE_IFACES:
continue
# Only want to detect physical interfaces.
if isVirtual(ifname):
continue
# More special cases to ignore for now.
if ifname.startswith("br"):
continue
if ifname.startswith("docker"):
continue
if ifname.startswith("sit"):
continue
dev = {
'name': ifname,
'mac': getMACAddress(ifname)
}
if isWAN(ifname):
dev['type'] = 'wan'
elif isWireless(ifname):
# Detect wireless devices separately.
continue
else:
dev['type'] = 'lan'
devices.append(dev)
wifi_devices = listWiFiDevices()
devices.extend(wifi_devices)
return devices
def resetWirelessDevice(phy, primary_interface):
"""
Reset a wireless device's interfaces to clean state.
This will rename, delete, or add an interface as necessary to make sure
only the primary interface exists, e.g. "wlan0" for a wireless device, e.g.
phy0.
"""
primaryExists = False
renameOrRemove = list()
for ifname in pdos.listdir(SYS_DIR):
if ifname in EXCLUDE_IFACES:
continue
if getWirelessPhyName(ifname) == phy:
if ifname == primary_interface:
primaryExists = True
else:
renameOrRemove.append(ifname)
for ifname in renameOrRemove:
if primaryExists:
cmd = ['iw', 'dev', ifname, 'del']
subprocess.call(cmd)
else:
cmd = ['ip', 'link', 'set', 'dev', ifname, 'down', 'name',
primary_interface]
subprocess.call(cmd)
primaryExists = True
if not primaryExists:
cmd = ['iw', 'phy', phy, 'interface', 'add', primary_interface, 'type',
'managed']
subprocess.call(cmd)
def flushWirelessInterfaces(phy):
"""
Remove all virtual interfaces associated with a wireless device.
This should be used before giving a chute exclusive access to a device
(e.g. monitor mode), so that it does not inherit unexpected interfaces.
"""
for ifname in pdos.listdir(SYS_DIR):
if ifname in EXCLUDE_IFACES:
continue
if getWirelessPhyName(ifname) == phy:
cmd = ['iw', 'dev', ifname, 'del']
subprocess.call(cmd)
def setConfig(chuteName, sections, filepath):
cfgFile = uci.UCIConfig(filepath)
# Set the name in the comment field.
for config, options in sections:
config['comment'] = chuteName
oldSections = cfgFile.getChuteConfigs(chuteName)
if not uci.chuteConfigsMatch(oldSections, sections):
cfgFile.delConfigs(oldSections)
cfgFile.addConfigs(sections)
cfgFile.save(backupToken="paradrop", internalid=chuteName)
else:
# Save a backup of the file even though there were no changes.
cfgFile.backup(backupToken="paradrop")
def readHostconfigWifi(wifi, networkDevices, builder):
for dev in wifi:
# The preferred method is to use the id field, which could contain many
# different kinds of identifiers (MAC address, phy, interface, or
# index), and use the resolveWirelessDevRef to produce a MAC
# address-based name. resolve that to a MAC address-based name. Most
# importantly, index-based names here mean host configurations can be
# copied to different machines, but then resolved unambiguously to
# devices.
#
# We can also a few other forms of identification from older
# configuration files (macaddr, phy, or interface) and convert to
# MAC-based name.
if 'id' in dev:
resolved = resolveWirelessDevRef(dev['id'], networkDevices)
mac = resolved['mac']
elif 'macaddr' in dev:
mac = dev['macaddr']
elif 'phy' in dev:
mac = getPhyMACAddress(dev['phy'])
elif 'interface' in dev:
phy = getWirelessPhyName(dev['interface'])
mac = getPhyMACAddress(phy)
else:
raise Exception("Missing name or address field in wifi device definition.")
name = "wifi{}".format(mac.replace(":", ""))
# We want to copy over all fields except name, interface, and phy.
options = dev.copy()
for key in ['id', 'interface', 'phy']:
if key in options:
del options[key]
# Make sure macaddr is specified because that is the based way for
# pdconf to identify the device.
options['macaddr'] = mac
# If type is missing, then add it because it is a required field.
if 'type' not in options:
options['type'] = 'auto'
builder.add("wireless", "wifi-device", options, name=name)
def resolveWirelessDevRef(name, networkDevices):
"""
Resolve a WiFi device reference (wlan0, phy0, 00:11:22:33:44:55, etc.) to
the name of the device section as used by pdconf (wifiXXXXXXXXXXXX).
Unambiguous naming is preferred going forward (either wifiXX or the MAC
address), but to maintain backward compatibility, we attempt to resolve
either wlanX or phyX to the MAC address of the device that currently uses
that name.
"""
for device in networkDevices['wifi']:
# Construct a set of accepted identifiers for this device.
identifiers = set()
# MAC-based identifiers, e.g. wifi001122334455 or 00:11:22:33:44:55
identifiers.add(device['name'])
identifiers.add(device['mac'])
# Index-based with deterministic ordering, e.g. pci-wifi-0
identifiers.add(device['id'])
# Ambiguous identifiers, e.g. phy0 or wlan0
identifiers.add(device['phy'])
if device['primary_interface'] is not None:
identifiers.add(device['primary_interface'])
# If the given name matches anything in the current set, return the
# device name.
if name in identifiers:
return device
raise DeviceNotFoundException("Could not resolve wireless device {}".format(name))
def readHostconfigWifiInterfaces(wifiInterfaces, networkDevices, builder):
for iface in wifiInterfaces:
# We handle nonstandard modes (e.g. Airshark) separately rather than
# through the UCI system.
if iface.get('mode', None) in WIFI_NONSTANDARD_MODES:
continue
options = iface.copy()
# There are various ways the host configuration file may have specified
# the WiFi device (wlan0, phy0, pci-wifi-0, 00:11:22:33:44:55, etc.).
# Try to resolve that to a device name that pdconf will recognize.
try:
device = resolveWirelessDevRef(options['device'], networkDevices)
options['device'] = device['name']
except:
pass
builder.add("wireless", "wifi-iface", options)
def handleMissingWiFi(hostConfig):
"""
Take appropriate action in response to missing WiFi devices.
Depending on the host configuration, we may either emit a warning or reboot
the system.
"""
# Missing WiFi devices - check what we should do.
action = datastruct.getValue(hostConfig, "system.onMissingWiFi")
if action == "reboot":
out.warn("Missing WiFi devices, system will be rebooted.")
cmd = ["shutdown", "-r", "now"]
subprocess.call(cmd)
elif action == "warn":
out.warn("Missing WiFi devices.")
def checkSystemDevices(update):
"""
Check whether expected devices are present.
This may reboot the machine if devices are missing and the host config is
set to do that.
"""
devices = update.cache_get('networkDevices')
hostConfig = update.cache_get('hostConfig')
if len(devices['wifi']) == 0:
handleMissingWiFi(hostConfig)
def readHostconfigVlan(vlanInterfaces, builder):
for interface in vlanInterfaces:
name = interface['name']
options = {
'proto': interface['proto']
}
if interface['proto'] == 'static':
options['ipaddr'] = interface['ipaddr']
options['netmask'] = interface['netmask']
# TODO: Support VLANs on interfaces other than the lan bridge.
ifname = "br-lan.{}".format(interface['id'])
options['ifname'] = [ifname]
builder.add("network", "interface", options, name=name)
if 'dhcp' in interface:
dhcp = interface['dhcp']
options = {}
options['interface'] = [name]
builder.add("dhcp", "dnsmasq", options)
options = {
'interface': name,
'start': dhcp['start'],
'limit': dhcp['limit'],
'leasetime': dhcp['leasetime']
}
builder.add("dhcp", "dhcp", options, name=name)
# Allow DNS requests.
options = {
'src': name,
'proto': 'udp',
'dest_port': 53,
'target': 'ACCEPT'
}
builder.add("firewall", "rule", options)
# Allow DHCP requests.
options = {
'src': name,
'proto': 'udp',
'dest_port': 67,
'target': 'ACCEPT'
}
builder.add("firewall", "rule", options)
# Make a zone entry with defaults.
options = datastruct.getValue(interface,
"firewall.defaults", {}).copy()
options['name'] = name
options['network'] = [name]
builder.add("firewall", "zone", options)
# Add forwarding entries.
rules = datastruct.getValue(interface, "firewall.forwarding", [])
for rule in rules:
builder.add("firewall", "forwarding", rule)
rules = datastruct.getValue(interface, "firewall.rules", [])
for rule in rules:
builder.add("firewall", "rule", rule)
class UCIBuilder(object):
"""
UCIBuilder helps aggregate UCI configuration sections for writing to files.
"""
FILES = ["dhcp", "network", "firewall", "wireless", "qos"]
def __init__(self):
self.contents = dict((f, []) for f in UCIBuilder.FILES)
def add(self, file_, type_, options, name=None):
"""
Add a new configuration section.
"""
config = {"type": type_}
if name is not None:
config['name'] = name
self.contents[file_].append((config, options))
def getSections(self, file_):
"""
Get sections associated with a single file.
Returns: list of tuples, [(config, options)]
"""
return self.contents[file_]
def write(self):
"""
Write all of the configuration sections to files.
"""
for f in UCIBuilder.FILES:
setConfig(constants.RESERVED_CHUTE_NAME, self.contents[f],
uci.getSystemPath(f))
def select_brlan_address(hostConfig):
"""
Select IP address and netmask to use for LAN bridge.
Behavior depends on the proto field, which can either be 'auto' or
'static'. When proto is set to 'auto', we check the WAN interface address
and choose either 10.0.0.0 or 192.168.0.1 to avoid conflict. Otherwise,
when proto is set to 'static', we use the specified address.
"""
proto = datastruct.getValue(hostConfig, 'lan.proto', 'auto')
netmask = datastruct.getValue(hostConfig, 'lan.netmask', '255.255.255.0')
wan_ifname = datastruct.getValue(hostConfig, 'wan.interface', 'eth0')
if proto == 'auto':
addresses = netifaces.ifaddresses(wan_ifname)
ipv4_addrs = addresses.get(netifaces.AF_INET, [])
if any(x['addr'].startswith("10.") for x in ipv4_addrs):
return "192.168.0.1", netmask
else:
return "10.0.0.1", netmask
else:
return hostConfig['lan']['ipaddr'], netmask
#
# Chute update functions
#
def getSystemDevices(update):
"""
Detect devices on the system.
Store device information in cache key "networkDevices" as well as
"networkDevicesByName".
"""
devices = detectSystemDevices()
devicesByName = dict()
for dtype, dlist in six.iteritems(devices):
for dev in dlist:
name = dev['name']
if name in devicesByName:
out.warn("Multiple network devices named {}".format(name))
devicesByName[name] = dev
update.cache_set('networkDevices', devices)
update.cache_set('networkDevicesByName', devicesByName)
def setSystemDevices(update):
"""
Initialize system configuration files.
This section should only be run for host configuration updates.
Creates basic sections that all chutes require such as the "wan" interface.
"""
hostConfig = update.cache_get('hostConfig')
networkDevices = update.cache_get('networkDevices')
builder = UCIBuilder()
# This section defines the default input, output, and forward policies for
# the firewall.
options = datastruct.getValue(hostConfig, "firewall.defaults", {})
builder.add("firewall", "defaults", options)
def zoneFirewallSettings(name):
# Create zone entry with defaults (input, output, forward policies and
# other configuration).
#
# Make a copy of the object from hostconfig because we modify it.
options = datastruct.getValue(hostConfig,
name+".firewall.defaults", {}).copy()
options['name'] = name
options['network'] = [name]
builder.add("firewall", "zone", options)
# Add forwarding entries (rules that allow traffic to move from one
# zone to another).
rules = datastruct.getValue(hostConfig, name+".firewall.forwarding", [])
for rule in rules:
builder.add("firewall", "forwarding", rule)
if 'wan' in hostConfig:
options = dict()
options['ifname'] = hostConfig['wan']['interface']
options['proto'] = "dhcp"
builder.add("network", "interface", options, name="wan")
zoneFirewallSettings("wan")
options = {
"enabled": 0
}
builder.add("qos", "interface", options, name="wan")
if 'lan' in hostConfig:
options = dict()
options['type'] = "bridge"
options['bridge_empty'] = "1"
options['proto'] = 'static'
options['ipaddr'], options['netmask'] = select_brlan_address(hostConfig)
options['ifname'] = hostConfig['lan']['interfaces']
builder.add("network", "interface", options, name="lan")
if 'dhcp' in hostConfig['lan']:
dhcp = hostConfig['lan']['dhcp']
options = {
'interface': 'lan',
'domain': settings.LOCAL_DOMAIN
}
builder.add("dhcp", "dnsmasq", options)
options = {
'interface': 'lan',
'start': dhcp['start'],
'limit': dhcp['limit'],
'leasetime': dhcp['leasetime']
}
builder.add("dhcp", "dhcp", options, name="lan")
options = {
'name': settings.LOCAL_DOMAIN,
'ip': hostConfig['lan']['ipaddr']
}
builder.add("dhcp", "domain", options)
zoneFirewallSettings("lan")
options = {
"enabled": 0
}
builder.add("qos", "interface", options, name="lan")
# Automatically generate loopback section. There is generally not much to
# configure for loopback, but we could add support to the host
# configuration.
options = {
'ifname': ['lo'],
'proto': 'static',
'ipaddr': '127.0.0.1',
'netmask': '255.0.0.0'
}
builder.add("network", "interface", options, name="loopback")
options = {
'name': 'loopback',
'masq': '0',
'conntrack': '1',
'input': 'ACCEPT',
'forward': 'ACCEPT',
'output': 'ACCEPT',
'network': ['loopback']
}
builder.add("firewall", "zone", options)
wifi = hostConfig.get('wifi', [])
try:
readHostconfigWifi(wifi, networkDevices, builder)
except DeviceNotFoundException:
handleMissingWiFi(hostConfig)
wifiInterfaces = hostConfig.get('wifi-interfaces', [])
readHostconfigWifiInterfaces(wifiInterfaces, networkDevices, builder)
vlanInterfaces = hostConfig.get('vlan-interfaces', [])
readHostconfigVlan(vlanInterfaces, builder)
# Add additional firewall rules.
rules = datastruct.getValue(hostConfig, "firewall.rules", [])
for rule in rules:
builder.add("firewall", "rule", rule)
# Write all of the changes to UCI files at once.
builder.write()
def get_hardware_serial():
"""
Get hardware serial number.
The most reliable way we have that works across many hardware platforms is
to check the eth0 MAC address.
Returns a numeric serial number.
"""
addr = getMACAddress("eth0")
if addr is None:
return 0
else:
return int(addr.translate(None, ":.- "), 16)
def get_machine_id():
"""
Return unique machine identifier.
This is software-based but fairly standardized from the /etc/machine-id file.
We can potentially rely on this for uniquely identifying a node.
"""
if os.path.isfile("/etc/machine-id"):
return readSysFile("/etc/machine-id")
elif os.path.isfile("/var/lib/dbus/machine-id"):
return readSysFile("/var/lib/dbus/machine-id")
else:
return None
| |
# -*- coding: utf-8 -*-
from collections import OrderedDict
from cms.constants import LEFT, REFRESH_PAGE
from cms.models import UserSettings, Placeholder
from cms.toolbar.items import Menu, ToolbarAPIMixin, ButtonList
from cms.toolbar_pool import toolbar_pool
from cms.utils import get_language_from_request
from cms.utils.compat.dj import installed_apps
from cms.utils.conf import get_cms_setting
from cms.utils.i18n import force_language
from django import forms
from django.conf import settings
from django.contrib.auth import login, logout, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import AuthenticationForm
from django.core.urlresolvers import resolve, Resolver404
from django.http import HttpResponseRedirect, HttpResponse
from django.middleware.csrf import get_token
class CMSToolbarLoginForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super(CMSToolbarLoginForm, self).__init__(*args, **kwargs)
kwargs['prefix'] = kwargs.get('prefix', 'cms')
self.fields['username'].widget = forms.TextInput(
attrs = { 'required': 'required' })
self.fields['password'].widget = forms.PasswordInput(
attrs = { 'required': 'required' })
class CMSToolbar(ToolbarAPIMixin):
"""
The default CMS Toolbar
"""
watch_models = []
def __init__(self, request):
super(CMSToolbar, self).__init__()
self.right_items = []
self.left_items = []
self.populated = False
self.post_template_populated = False
self.menus = {}
self.obj = None
self.redirect_url = None
self.request = None
self.is_staff = None
self.edit_mode = None
self.edit_mode_url_on = get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
self.edit_mode_url_off = get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF')
self.disable_url = get_cms_setting('CMS_TOOLBAR_URL__DISABLE')
self.build_mode = None
self.use_draft = None
self.show_toolbar = None
self.login_form = None
self.clipboard = None
self.language = None
self.toolbar_language = None
self.simple_structure_mode = get_cms_setting('TOOLBAR_SIMPLE_STRUCTURE_MODE')
self.show_toolbar = True
self.init_toolbar(request)
with force_language(self.language):
try:
decorator = resolve(self.request.path_info).func
try:
# If the original view is decorated we try to extract the real function
# module instead of the decorator's one
if decorator and getattr(decorator, 'func_closure', False):
# python 2
self.app_name = decorator.func_closure[0].cell_contents.__module__
elif decorator and getattr(decorator, '__closure__', False):
# python 3
self.app_name = decorator.__closure__[0].cell_contents.__module__
else:
raise AttributeError()
except (TypeError, AttributeError):
# no decorator
self.app_name = decorator.__module__
except Resolver404:
self.app_name = ""
toolbars = toolbar_pool.get_toolbars()
parts = self.app_name.split('.')
while parts:
path = '.'.join(parts)
if path in installed_apps():
self.app_name = path
break
parts.pop()
self.toolbars = OrderedDict()
for key in toolbars:
toolbar = toolbars[key](self.request, self, toolbars[key].check_current_app(key, self.app_name), self.app_name)
self.toolbars[key] = toolbar
def init_toolbar(self, request):
self.request = request
self.is_staff = self.request.user.is_staff
self.edit_mode = self.is_staff and self.request.session.get('cms_edit', False)
self.build_mode = self.is_staff and self.request.session.get('cms_build', False)
self.use_draft = self.is_staff and self.edit_mode or self.build_mode
self.show_toolbar = self.is_staff or self.request.session.get('cms_edit', False)
self.login_form = CMSToolbarLoginForm(request=request)
if self.request.session.get('cms_toolbar_disabled', False):
self.show_toolbar = False
if settings.USE_I18N:
self.language = get_language_from_request(request)
else:
self.language = settings.LANGUAGE_CODE
# We need to store the current language in case the user's preferred language is different.
self.toolbar_language = self.language
user_settings = self.get_user_settings()
if user_settings:
if (settings.USE_I18N and user_settings.language in dict(settings.LANGUAGES)) or (
not settings.USE_I18N and user_settings.language == settings.LANGUAGE_CODE):
self.toolbar_language = user_settings.language
else:
user_settings.language = self.language
user_settings.save()
self.clipboard = user_settings.clipboard
if hasattr(self, 'toolbars'):
for key, toolbar in self.toolbars.items():
self.toolbars[key].request = self.request
def get_user_settings(self):
user_settings = None
if self.is_staff:
try:
user_settings = UserSettings.objects.select_related('clipboard').get(user=self.request.user)
except UserSettings.DoesNotExist:
placeholder = Placeholder.objects.create(slot="clipboard")
user_settings = UserSettings.objects.create(
clipboard=placeholder,
language=self.language,
user=self.request.user,
)
return user_settings
def render_addons(self, context):
addons = []
for toolbar in self.toolbars.values():
addons.extend(toolbar.render_addons(context))
return ''.join(addons)
def post_template_render_addons(self, context):
addons = []
for toolbar in self.toolbars.values():
addons.extend(toolbar.post_template_render_addons(context))
return ''.join(addons)
@property
def csrf_token(self):
token = get_token(self.request)
return token
# Public API
def get_menu(self, key, verbose_name=None, side=LEFT, position=None):
self.populate()
if key in self.menus:
return self.menus[key]
return None
def get_or_create_menu(self, key, verbose_name=None, side=LEFT, position=None):
self.populate()
if key in self.menus:
menu = self.menus[key]
if verbose_name:
menu.name = verbose_name
if menu.side != side:
menu.side = side
if position:
self.remove_item(menu)
self.add_item(menu, position=position)
return menu
menu = Menu(verbose_name, self.csrf_token, side=side)
self.menus[key] = menu
self.add_item(menu, position=position)
return menu
def add_button(self, name, url, active=False, disabled=False, extra_classes=None, extra_wrapper_classes=None,
side=LEFT, position=None):
self.populate()
item = ButtonList(extra_classes=extra_wrapper_classes, side=side)
item.add_button(name, url, active=active, disabled=disabled, extra_classes=extra_classes)
self.add_item(item, position=position)
return item
def add_modal_button(self, name, url, active=False, disabled=False, extra_classes=None, extra_wrapper_classes=None,
side=LEFT, position=None, on_close=REFRESH_PAGE):
self.populate()
item = ButtonList(extra_classes=extra_wrapper_classes, side=side)
item.add_modal_button(name, url, active=active, disabled=disabled, extra_classes=extra_classes, on_close=on_close)
self.add_item(item, position=position)
return item
def add_sideframe_button(self, name, url, active=False, disabled=False, extra_classes=None, extra_wrapper_classes=None,
side=LEFT, position=None, on_close=None):
self.populate()
item = ButtonList(extra_classes=extra_wrapper_classes, side=side)
item.add_sideframe_button(name, url, active=active, disabled=disabled, extra_classes=extra_classes, on_close=on_close)
self.add_item(item, position=position)
return item
def add_button_list(self, identifier=None, extra_classes=None, side=LEFT, position=None):
self.populate()
item = ButtonList(identifier, extra_classes=extra_classes, side=side)
self.add_item(item, position=position)
return item
def set_object(self, obj):
if not self.obj:
self.obj = obj
def get_object_model(self):
if self.obj:
return "{0}.{1}".format(self.obj._meta.app_label, self.obj._meta.object_name).lower()
return ''
def get_object_pk(self):
if self.obj:
return self.obj.pk
return ''
def get_object_public_url(self):
if self.obj:
with force_language(self.language):
try:
return self.obj.get_public_url()
except:
pass
return ''
def get_object_draft_url(self):
if self.obj:
with force_language(self.language):
try:
return self.obj.get_draft_url()
except:
try:
return self.obj.get_absolute_url()
except:
pass
return ''
# Internal API
def _add_item(self, item, position):
if item.right:
target = self.right_items
else:
target = self.left_items
if position is not None:
target.insert(position, item)
else:
target.append(item)
def _remove_item(self, item):
if item in self.right_items:
self.right_items.remove(item)
elif item in self.left_items:
self.left_items.remove(item)
else:
raise KeyError("Item %r not found" % item)
def _item_position(self, item):
if item.right:
return self.right_items.index(item)
else:
return self.left_items.index(item)
def get_left_items(self):
self.populate()
return self.left_items
def get_right_items(self):
self.populate()
return self.right_items
def populate(self):
"""
Get the CMS items on the toolbar
"""
if self.populated:
return
self.populated = True
# never populate the toolbar on is_staff=False
# FIXME: In 3.1 we should really update the request/staff status
# when toolbar is used in the cms_toolbar templatetag
if not self.request.user.is_staff:
return
if self.request.session.get('cms_log_latest', False):
del self.request.session['cms_log_latest']
self._call_toolbar('populate')
def post_template_populate(self):
self.populate()
if self.post_template_populated:
return
self.post_template_populated = True
# FIXME: In 3.1 we should really update the request/staff status
# when toolbar is used in the cms_toolbar templatetag
if not self.request.user.is_staff:
return
self._call_toolbar('post_template_populate')
def request_hook(self):
response = self._call_toolbar('request_hook')
if isinstance(response, HttpResponse):
return response
if self.request.method != 'POST':
return self._request_hook_get()
else:
return self._request_hook_post()
def _request_hook_get(self):
if 'cms-toolbar-logout' in self.request.GET:
logout(self.request)
return HttpResponseRedirect(self.request.path_info)
def _request_hook_post(self):
# login hook
if 'cms-toolbar-login' in self.request.GET:
self.login_form = CMSToolbarLoginForm(request=self.request, data=self.request.POST)
if self.login_form.is_valid():
login(self.request, self.login_form.user_cache)
if REDIRECT_FIELD_NAME in self.request.GET:
return HttpResponseRedirect(self.request.GET[REDIRECT_FIELD_NAME])
else:
return HttpResponseRedirect(self.request.path_info)
else:
if REDIRECT_FIELD_NAME in self.request.GET:
return HttpResponseRedirect(self.request.GET[REDIRECT_FIELD_NAME]+"?cms-toolbar-login-error=1")
def _call_toolbar(self, func_name):
with force_language(self.toolbar_language):
first = ('cms.cms_toolbars.BasicToolbar', 'cms.cms_toolbars.PlaceholderToolbar')
for key in first:
toolbar = self.toolbars.get(key)
if not toolbar:
continue
result = getattr(toolbar, func_name)()
if isinstance(result, HttpResponse):
return result
for key in self.toolbars:
if key in first:
continue
toolbar = self.toolbars[key]
result = getattr(toolbar, func_name)()
if isinstance(result, HttpResponse):
return result
| |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twistedchecker.core.runner}.
"""
import sys
import os
import operator
from functools import reduce
from pylint.reporters.text import TextReporter
from io import StringIO
from twisted.trial import unittest
import twistedchecker
from twistedchecker.core.runner import Runner
from twistedchecker.checkers.header import HeaderChecker
from twistedchecker.test.test_exceptionfinder import (
createTestFiles as createTestFilesForFindingExceptions)
class RunnerTestCase(unittest.TestCase):
"""
Test for twistedchecker.core.runner.Runner.
"""
debug = False
def setUp(self):
"""
Redirect stdout to a temp C{StringIO} stream.
"""
self.outputStream = StringIO()
self.patch(sys, "stdout", self.outputStream)
self.errorStream = StringIO()
self.patch(sys, "stderr", self.errorStream)
def clearOutputStream(self):
"""
A function to clear output stream.
"""
self.outputStream = StringIO()
def makeRunner(self):
"""
Return a runner instance.
"""
runner = Runner()
runner.setOutput(self.outputStream)
return runner
def _loadAllowedMessages(self):
"""
Load allowed messages from test files.
"""
pathTests = os.path.join(twistedchecker.abspath, "functionaltests")
testfiles = reduce(operator.add,
[[os.path.join(pathDir, f) for f in files if f.endswith(".py")]
for pathDir, _, files in os.walk(pathTests)])
messagesAllowed = set(Runner.allowedMessagesFromPylint)
for testfile in testfiles:
with open(testfile) as f:
firstline = f.readline().strip()
if (firstline.startswith("#") and "enable" in firstline
and ":" in firstline):
messages = firstline.split(":")[1].strip().split(",")
messagesAllowed.update(messages)
return messagesAllowed
def test_findUselessCheckers(self):
"""
Test for method findUselessCheckers
"""
runner = Runner()
registeredCheckers = sum(list(runner.linter._checkers.values()), [])
# remove checkers other than header checker
headerCheckerList = [ckr
for ckr in registeredCheckers
if type(ckr) == HeaderChecker]
self.assertTrue(headerCheckerList)
headerChecker = headerCheckerList[0]
uselessCheckers = runner.findUselessCheckers(
list(headerChecker.msgs.keys())[:1])
self.assertEqual(len(uselessCheckers) + 1, len(registeredCheckers))
self.assertTrue(headerChecker not in uselessCheckers)
def test_unregisterChecker(self):
"""
Test for method unregisterChecker.
Remove HeaderChecker from registered,
and make sure it was removed.
"""
runner = Runner()
registeredCheckers = sum(list(runner.linter._checkers.values()), [])
# Make sure an instance of HeaderChecker in registered checkers
headerCheckerList = [ckr
for ckr in registeredCheckers
if type(ckr) == HeaderChecker]
self.assertTrue(headerCheckerList)
headerChecker = headerCheckerList[0]
# Make sure it in option providers
self.assertTrue(headerChecker in runner.linter.options_providers)
runner.unregisterChecker(headerChecker)
# Make sure the instance of HeaderChecker was removed
registeredCheckers = sum(list(runner.linter._checkers.values()), [])
self.assertFalse(headerChecker in registeredCheckers)
# Could not check reports because HeaderChecker is not be
# recorded in that list
# Make sure it was removed from option providers
self.assertFalse(headerChecker in runner.linter.options_providers)
def test_restrictCheckers(self):
"""
Test for method restrictCheckers.
Manually set allowed messages,
then check for the result of registered checkers
after run this method.
"""
runner = Runner()
runner.restrictCheckers(list(HeaderChecker.msgs.keys())[:1])
# After run it, only HeaderChecker should be left in
# registered checkers
registeredCheckers = sum(list(runner.linter._checkers.values()), [])
self.assertEqual(len(registeredCheckers), 1)
self.assertEqual(type(registeredCheckers[0]), HeaderChecker)
def test_allMessagesAreRegistered(self):
"""
A test to assume all tests are registered to reporter.
"""
linter = Runner().linter
messagesFromTests = self._loadAllowedMessages()
messagesFromReporter = linter.reporter.messagesAllowed
messagesDisabled = set(linter
.cfgfile_parser.get("TWISTEDCHECKER", "disable")
.replace(" ", "").split(","))
self.assertEqual(messagesFromTests - messagesDisabled,
messagesFromReporter)
def test_runVersion(self):
"""
Pass argument "--version" to C{runner.run}, and it should show
a version infomation, then exit. So that I could know it called pylint.
"""
runner = Runner()
runner.setOutput(self.outputStream)
exitResult = self.assertRaises(SystemExit, runner.run, ["--version"])
self.assertTrue(self.outputStream.getvalue().count("Python") > 0, \
msg="failed to call pylint")
self.assertIsNone(runner.diffOption)
self.assertEqual(0, exitResult.code)
def test_runNoError(self):
"""
When checked file is clean and has no errors it exit with code 0
without any other output.
"""
runner = Runner()
runner.setOutput(self.outputStream)
# The twistedchecker/checkers/__init__.py is assumed to be clean.
exitResult = self.assertRaises(SystemExit, runner.run, [
"twistedchecker.checkers.__init__"])
self.assertEqual('', self.outputStream.getvalue())
self.assertEqual(0, exitResult.code)
def test_runWithErrors(self):
"""
When checked file is not clean it will exit with non zero exit code.
"""
runner = Runner()
runner.setOutput(self.outputStream)
# The comments functional test is assumed to have at lest one error.
exitResult = self.assertRaises(SystemExit, runner.run, [
"twistedchecker.functionaltests.comments"])
self.assertNotEqual(0, exitResult.code)
def test_parseWarnings(self):
"""
Test for twistedchecker.core.runner.Runner.parseWarnings.
"""
textWarnings = """
************* Module foo
W9001: 1,0: Missing copyright header
************* Module bar
W9002: 1,0: Missing a reference to test module in header
C0111: 10,0: Missing docstring
""".strip()
warningsCorrect = {
"foo": {"W9001: 1,0: Missing copyright header", },
"bar": {"W9002: 1,0: Missing a reference "
"to test module in header",
"C0111: 10,0: Missing docstring"
}
}
warnings = Runner().parseWarnings(textWarnings)
self.assertEqual(warnings, warningsCorrect)
def test_runDiffNoWarnings(self):
"""
When running in diff mode set path to result file and exit with 0 if
no warnings were found.
"""
runner = self.makeRunner()
# Mock showDiffResults to check that it is called.
showDiffResultsCalls = []
runner.showDiffResults = lambda: showDiffResultsCalls.append(True)
exitResult = self.assertRaises(
SystemExit,
runner.run, ['--diff', 'path/to/previous.results', 'target'])
self.assertEqual('path/to/previous.results', runner.diffOption)
# Called once.
self.assertEqual([True], showDiffResultsCalls)
# Nothing in stderr or stdout.
self.assertEqual('', self.outputStream.getvalue())
self.assertEqual('', self.errorStream.getvalue())
self.assertEqual(0, exitResult.code)
def test_runDiffWarnings(self):
"""
Exit with 1 when warnings are found in diff mode.
"""
runner = self.makeRunner()
runner.showDiffResults = lambda: 3
exitResult = self.assertRaises(
SystemExit,
runner.run, ['--diff', 'path/to/previous.results', 'target'])
self.assertEqual(1, exitResult.code)
def test_showDiffResultsReadFail(self):
"""
Show an error and exit with 1 when failing to read diff result file.
"""
runner = self.makeRunner()
runner.diffOption = 'no/such/file'
result = runner.showDiffResults()
self.assertEqual(1, result)
self.assertEqual('', self.outputStream.getvalue())
self.assertEqual(
"Error: Failed to read result file 'no/such/file'.\n",
self.errorStream.getvalue(),
)
def test_showDiffResultEmpty(self):
"""
Return 0 when both sources are empty.
"""
runner = self.makeRunner()
runner.prepareDiff()
runner._readDiffFile = lambda: ''
result = runner.showDiffResults()
self.assertEqual(0, result)
self.assertEqual('', self.outputStream.getvalue())
self.assertEqual('', self.errorStream.getvalue())
def test_showDiffResultNoChanges(self):
"""
Return 0 when both sources have same content.
"""
runner = self.makeRunner()
runner.prepareDiff()
content = """
************* Module foo
W9001: 1,0: Missing copyright header
""".strip()
runner._readDiffFile = lambda: content
runner.streamForDiff.write(content)
result = runner.showDiffResults()
self.assertEqual(0, result)
self.assertEqual('', self.outputStream.getvalue())
self.assertEqual('', self.errorStream.getvalue())
def test_showDiffResultChanges(self):
"""
Return 0 when both sources have same content.
"""
runner = self.makeRunner()
runner.prepareDiff()
previous = """
************* Module foo
W9001: 1,0: Missing copyright header
""".strip()
new = """
************* Module foo
W9001: 1,0: Missing copyright header
W9001: 2,0: Missing copyright header
""".strip()
expectedOutput = """
************* Module foo
W9001: 2,0: Missing copyright header
""".lstrip()
runner._readDiffFile = lambda: previous
runner.streamForDiff.write(new)
result = runner.showDiffResults()
self.assertEqual(1, result)
self.assertEqual(expectedOutput, self.outputStream.getvalue())
self.assertEqual('', self.errorStream.getvalue())
def test_formatWarnings(self):
"""
Test for twistedchecker.core.runner.Runner.formatWarnings.
"""
warnings = {
"foo": {"W9001: 1,0: Missing copyright header", },
"bar": {"W9002: 1,0: Missing a reference "
"to test module in header",
"C0111: 10,0: Missing docstring"
}
}
resultCorrect = """
************* Module bar
W9002: 1,0: Missing a reference to test module in header
C0111: 10,0: Missing docstring
************* Module foo
W9001: 1,0: Missing copyright header
""".strip()
result = Runner().formatWarnings(warnings)
self.assertEqual(result, resultCorrect)
def test_generateDiff(self):
"""
Test for twistedchecker.core.runner.Runner.generateDiff.
"""
oldWarnings = {
"foo": {"W9001: 1,0: Missing copyright header"},
"bar": {
"W9002: 1,0: Missing a reference to test module in header",
"C0111: 10,0: Missing docstring"
}
}
newWarnings = {
"foo": {
"W9001: 1,0: Missing copyright header",
"C0301: 10,0: Line too long"
},
"bar": {
"W9002: 1,0: Missing a reference to test module in header",
"C0111: 10,0: Missing docstring"
},
"baz": {
"W9001: 1,0: Missing copyright header"
}
}
diffCorrect = {
"foo": {"C0301: 10,0: Line too long"},
"baz": {"W9001: 1,0: Missing copyright header"}
}
# Make sure generated diff is correct.
diff = Runner().generateDiff(oldWarnings, newWarnings)
self.assertEqual(diff, diffCorrect)
def test_getPathList(self):
"""
Test for twistedchecker.core.runner.Runner.getPathList.
"""
workingDir = os.getcwd()
pathTwistedchecker = os.path.dirname(twistedchecker.__path__[0])
inputList = [os.path.join("twistedchecker","functionaltests"),
"twistedchecker.core.util"]
correctPaths = [os.path.join("twistedchecker","functionaltests"),
os.path.join("twistedchecker","core","util.py")]
os.chdir(pathTwistedchecker)
result = Runner().getPathList(inputList)
# transform them to relative path.
result = [os.path.relpath(path) for path in result]
os.chdir(workingDir)
self.assertEqual(result, correctPaths)
def test_setNameExceptions(self):
"""
Test for twistedchecker.core.runner.Runner.setNameExceptions.
"""
pathTestFiles = createTestFilesForFindingExceptions(self.mktemp())
self.clearOutputStream()
runner = Runner()
runner.setOutput(self.outputStream)
runner.linter.set_reporter(TextReporter())
runner.linter.config.msg_template = "{line}:{msg_id}"
runner.linter.open()
# Limit messages.
runner.linter.disable_noerror_messages()
# Enable invalid function names.
runner.linter.enable("C0103")
# Enable invalid method names.
runner.linter.enable("C9302")
workingDir = os.getcwd()
os.chdir(os.path.dirname(pathTestFiles))
moduleName = os.path.basename(pathTestFiles)
exitResult = self.assertRaises(SystemExit, runner.run, [moduleName])
os.chdir(workingDir)
predictResult = "************* Module temp.test\n7:C9302\n11:C0103\n14:C0103\n15:C9302\n"
outputResult = self.outputStream.getvalue()
self.assertEqual(outputResult, predictResult)
self.assertEqual(16, exitResult.code)
| |
#!/usr/bin/python
# TODO: document here purpose of this experiment
# Tested functions: sha3 and estream finalists
# Settings: basic settings for CPU computations, used in many papers (as EACirc 2.0 paper)
# Purpose: finding rounds counts for usable testbed
# ReadMe:
# 1. fork this script and rewrite it for your purpose
# 2. document your purpose in the header, so others can easily get into your experiment
# Rewriting:
# a) choose functions and rounds in dictionaries estream and sha
# b) go trough text variables with actual configuration file
# i) change constants in text, if your experiment doesn't need to variate them
# ii) for variadic changes, create list/dict with your parameters and split surrrounding text parts; add for loop in main and print values, you are iterating trough; probably change directory tree generation
import os
# used funs in batch
# USE exactly the string from estream_fun_names_id or worry about ID's of funs!
estream = {
'Grain': [2, 3, 4],
'HC-128': [i for i in range(0, 2)],
# 'MICKEY': [i for i in range(0, 10)],
'Rabbit': [i for i in range(0, 2)],
'Salsa20': [2, 3, 4],
'SOSEMANUK': [i for i in range(0, 2)]#,
# 'Trivium': [i for i in range(0, 10)]
}
sha = {
'BLAKE': [0, 1, 2, 3],
'Grostl': [2, 3, 4, 5],
'JH': [6, 7, 8],
'Keccak': [i for i in range(1, 5)],
'MD6': [8, 9, 10, 11],
'Skein': [2, 3, 4, 5]
}
class Fun_args:
def __init__(self, rounds, block_size, key_size):
self.rounds = rounds
self.block_size = block_size
self.key_size = key_size
block = {
'TEA': Fun_args([3, 4, 5, 6], 8, 16), # 4 is max
'AES': Fun_args([1, 2, 3, 4], 16, 16), # 3 is max
'RC4': Fun_args([1], 16, 16), # RC4 is not round based :(
'SINGLE-DES': Fun_args([3, 4, 5, 6], 8, 8),
'TRIPLE-DES': Fun_args([2, 3, 4, 5], 8, 24)
}
header = """{
"""
# notes
main1 = """
"seed" : null,
"num-of-epochs" : 300,
"significance-level" : 1,
"tv-size" : 16,
"tv-count" : 1000,
"""
# stream-a
streamB = """
"stream-b" : {
"type" : "pcg32-stream"
},
"""
backend1 = """
"backend" : {
"type" : "circuit",
"solver" : "global-search",
"function-set" : [ "NOP", "CONS", "NOT",
"AND", "NAND", "OR", "XOR", "NOR",
"SHIL", "SHIR", "ROTL", "ROTR",
"MASK" ],
"num-of-generations": 100,
"initializer" : {
"type" : "basic-initializer"
},
"mutator" : {
"type" : "basic-mutator",
"changes-of-functions" : 2,
"changes-of-arguments" : 2,
"changes-of-connectors" : 3
},
"evaluator" : {
"type" : "categories-evaluator",
"num-of-categories" : 8
}
}
}
"""
if __name__ == "__main__":
# mkdirs estream, sha (from current dir - run from target dir)
if not os.path.exists("./estream"):
os.makedirs("./estream")
if not os.path.exists("./sha"):
os.makedirs("./sha")
# for estream
for fun, rounds in estream.items():
fun_path = './estream/' + fun
if not os.path.exists(fun_path):
os.makedirs(fun_path)
for r in rounds:
f = open(fun_path + '/' + fun + '_r' + ("%02d" % r) + '.json', 'w')
f.write(header)
f.write(' \"notes\" : \"' + fun_path + '_r' + ("%02d" % r) + '\",')
f.write(main1)
# stream-a
f.write(' \"stream-a\" : {\n')
f.write(' \"type\" : \"estream\",\n')
f.write(' \"generator\" : \"pcg32\",\n')
f.write(' \"init-frequency\" : \"only-once\",\n')
f.write(' \"algorithm\" : \"' + fun + '\",\n')
f.write(' \"round\" : ' + str(r) + ',\n')
f.write(""" "plaintext-type" : {
"type" : "counter"
},\n""")
f.write(' \"key-type\" : \"random\",\n')
f.write(' \"iv-type\" : \"zeros\"\n')
f.write(' },')
f.write(streamB)
f.write(backend1)
f.close()
for fun, rounds in sha.items():
fun_path = './sha/' + fun
if not os.path.exists(fun_path):
os.makedirs(fun_path)
for r in rounds:
f = open(fun_path + '/' + fun + '_r' + ("%02d" % r) + '.json', 'w')
f.write(header)
f.write(' \"notes\" : \"' + fun_path + '_r' + ("%02d" % r) + '\",')
f.write(main1)
# stream-a
f.write(' \"stream-a\" : {\n')
f.write(' \"type\" : \"sha3\",\n')
f.write(""" "source" : {
"type" : "counter"
},\n""")
f.write(' \"algorithm\" : \"' + fun + '\",\n')
f.write(' \"round\" : ' + str(r) + ',\n')
f.write(' \"hash-bitsize\" : 256\n')
f.write(' },\n')
f.write(streamB)
f.write(backend1)
f.close()
for fun, fun_args in block.items():
fun_path = './block/' + fun
if not os.path.exists(fun_path):
os.makedirs(fun_path)
for r in fun_args.rounds:
f = open(fun_path + '/' + fun + '_r' + ("%02d" % r) + '.json', 'w')
f.write(header)
f.write(' \"notes\" : \"' + fun_path + '_r' + ("%02d" % r) + '\",')
f.write(main1)
# stream-a
f.write(' \"stream-a\" : {\n')
f.write(' \"type\" : \"block\",\n')
f.write(' \"generator\" : \"pcg32\",\n')
f.write(' \"init-frequency\" : \"only-once\",\n')
f.write(' \"algorithm\" : \"' + fun + '\",\n')
f.write(' \"round\" : ' + str(r) + ',\n')
f.write(' \"block-size\" : ' + str(fun_args.block_size) + ',\n')
f.write(""" "plaintext" : {
"type" : "counter"
},\n""")
f.write(' \"key-size\" : ' + str(fun_args.key_size) + ',\n')
f.write(""" "key" : {
"type" : "pcg32-stream"
},\n""")
f.write(' \"mode\" : \"ECB\",\n')
f.write(""" "iv" : {
"type" : "false-stream"
}\n""")
f.write(' },\n')
f.write(streamB)
f.write(backend1)
f.close()
# for rnd-rnd
f = open('rnd_rnd.json', 'w')
f.write(header)
f.write(' \"notes\" : \"Big testbed: rnd-rnd\",')
f.write(main1)
# stream-a
f.write(' \"stream-a\" : {\n')
f.write(' \"type\" : \"pcg32-stream\"\n')
f.write(' },')
f.write(streamB)
f.write(backend1)
f.close()
| |
"""
kombu.connection
================
Broker connection and pools.
:copyright: (c) 2009 - 2011 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import socket
from copy import copy
from itertools import count
from Queue import Empty
from kombu import exceptions
from kombu.transport import get_transport_cls
from kombu.utils import retry_over_time
from kombu.utils.compat import OrderedDict, LifoQueue as _LifoQueue
from kombu.utils.functional import wraps
_LOG_CONNECTION = os.environ.get("KOMBU_LOG_CONNECTION", False)
_LOG_CHANNEL = os.environ.get("KOMBU_LOG_CHANNEL", False)
#: Connection info -> URI
URI_FORMAT = """\
%(transport)s://%(userid)s@%(hostname)s%(port)s%(virtual_host)s\
"""
class BrokerConnection(object):
"""A connection to the broker.
:keyword hostname: Hostname/address of the server to connect to.
Default is ``"localhost"``.
:keyword userid: Username. Default is ``"guest"``.
:keyword password: Password. Default is ``"guest"``.
:keyword virtual_host: Virtual host. Default is ``"/"``.
:keyword port: Port of the server. Default is transport specific.
:keyword insist: Insist on connecting to a server.
In a configuration with multiple load-sharing servers, the insist
option tells the server that the client is insisting on a connection
to the specified server. Default is ``False``.
:keyword ssl: Use ssl to connect to the server. Default is ``False``.
:keyword transport: Transport class to use. Can be a class,
or a string specifying the path to the class. (e.g.
``kombu.transport.pyamqplib.Transport``), or one of the aliases:
``amqplib``, ``pika``, ``redis``, ``memory``.
:keyword connect_timeout: Timeout in seconds for connecting to the
server. May not be suported by the specified transport.
:keyword transport_options: A dict of additional connection arguments to
pass to alternate kombu channel implementations. Consult the transport
documentation for available options.
**Usage**
Creating a connection::
>>> conn = BrokerConnection("rabbit.example.com")
The connection is established lazily when needed. If you need the
connection to be established, then force it to do so using
:meth:`connect`::
>>> conn.connect()
Remember to always close the connection::
>>> conn.release()
"""
URI_FORMAT = URI_FORMAT
port = None
virtual_host = "/"
connect_timeout = 5
_closed = None
_connection = None
_transport = None
_logger = None
def __init__(self, hostname="localhost", userid=None,
password=None, virtual_host="/", port=None, insist=False,
ssl=False, transport=None, connect_timeout=5, backend_cls=None,
transport_options=None, **kwargs):
self.hostname = hostname
self.userid = userid
self.password = password
self.virtual_host = virtual_host or self.virtual_host
self.port = port or self.port
self.insist = insist
self.connect_timeout = connect_timeout or self.connect_timeout
self.ssl = ssl
# backend_cls argument will be removed shortly.
self.transport_cls = transport or backend_cls
if transport_options is None:
transport_options = {}
self.transport_options = transport_options
if _LOG_CONNECTION:
from kombu.utils.log import get_logger
self._logger = get_logger("kombu.connection")
def _debug(self, msg, ident="[Kombu connection:0x%(id)x] ", **kwargs):
if self._logger:
self._logger.debug((ident + unicode(msg)) % {"id": id(self)},
**kwargs)
def connect(self):
"""Establish connection to server immediately."""
self._closed = False
return self.connection
def channel(self):
"""Request a new channel."""
self._debug("create channel")
chan = self.transport.create_channel(self.connection)
if _LOG_CHANNEL:
from kombu.utils.debug import Logwrapped
return Logwrapped(chan, "kombu.channel",
"[Kombu channel:%(channel_id)s] ")
return chan
def drain_events(self, **kwargs):
"""Wait for a single event from the server.
:keyword timeout: Timeout in seconds before we give up.
Raises :exc:`socket.timeout` if the timeout is execeded.
Usually used from an event loop.
"""
return self.transport.drain_events(self.connection, **kwargs)
def _close(self):
if self._connection:
try:
self.transport.close_connection(self._connection)
except self.transport.connection_errors + (AttributeError,
socket.error):
pass
self._connection = None
self._debug("closed")
if self._transport:
self._transport.client = None
self._transport = None
self._closed = True
def release(self):
"""Close the connection (if open)."""
self._close()
close = release
def ensure_connection(self, errback=None, max_retries=None,
interval_start=2, interval_step=2, interval_max=30):
"""Ensure we have a connection to the server.
If not retry establishing the connection with the settings
specified.
:keyword errback: Optional callback called each time the connection
can't be established. Arguments provided are the exception
raised and the interval that will be slept ``(exc, interval)``.
:keyword max_retries: Maximum number of times to retry.
If this limit is exceeded the connection error will be re-raised.
:keyword interval_start: The number of seconds we start sleeping for.
:keyword interval_step: How many seconds added to the interval
for each retry.
:keyword interval_max: Maximum number of seconds to sleep between
each retry.
"""
retry_over_time(self.connect, self.connection_errors, (), {},
errback, max_retries,
interval_start, interval_step, interval_max)
return self
def ensure(self, obj, fun, errback=None, max_retries=None,
interval_start=1, interval_step=1, interval_max=1):
"""Ensure operation completes, regardless of any channel/connection
errors occuring.
Will retry by establishing the connection, and reapplying
the function.
:param fun: Method to apply.
:keyword errback: Optional callback called each time the connection
can't be established. Arguments provided are the exception
raised and the interval that will be slept ``(exc, interval)``.
:keyword max_retries: Maximum number of times to retry.
If this limit is exceeded the connection error will be re-raised.
:keyword interval_start: The number of seconds we start sleeping for.
:keyword interval_step: How many seconds added to the interval
for each retry.
:keyword interval_max: Maximum number of seconds to sleep between
each retry.
**Example**
This is an example ensuring a publish operation::
>>> def errback(exc, interval):
... print("Couldn't publish message: %r. Retry in %ds" % (
... exc, interval))
>>> publish = conn.ensure(producer, producer.publish,
... errback=errback, max_retries=3)
>>> publish(message, routing_key)
"""
max_retries = max_retries or 0
@wraps(fun)
def _insured(*args, **kwargs):
got_connection = 0
for retries in count(0):
try:
return fun(*args, **kwargs)
except self.connection_errors + self.channel_errors, exc:
self._debug("ensure got exception: %r" % (exc, ),
exc_info=sys.exc_info())
if got_connection or \
max_retries and retries > max_retries:
raise
errback and errback(exc, 0)
self._connection = None
self.close()
remaining_retries = max_retries and \
max(max_retries - retries, 1)
self.ensure_connection(errback,
remaining_retries,
interval_start,
interval_step,
interval_max)
obj.revive(self.channel())
got_connection += 1
_insured.func_name = _insured.__name__ = "%s(insured)" % fun.__name__
return _insured
def create_transport(self):
return self.get_transport_cls()(client=self)
create_backend = create_transport # FIXME
def get_transport_cls(self):
"""Get the currently used transport class."""
transport_cls = self.transport_cls
if not transport_cls or isinstance(transport_cls, basestring):
transport_cls = get_transport_cls(transport_cls)
return transport_cls
def clone(self, **kwargs):
"""Create a copy of the connection with the same connection
settings."""
return self.__class__(**dict(self.info(), **kwargs))
def info(self):
"""Get connection info."""
transport_cls = self.transport_cls or "amqplib"
port = self.port or self.transport.default_port
return OrderedDict((("hostname", self.hostname),
("userid", self.userid),
("password", self.password),
("virtual_host", self.virtual_host),
("port", port),
("insist", self.insist),
("ssl", self.ssl),
("transport", transport_cls),
("transport_options", self.transport_options),
("connect_timeout", self.connect_timeout)))
def __hash__(self):
return hash("|".join(map(str, self.info().itervalues())))
def as_uri(self):
fields = self.info()
port = fields["port"]
if port:
fields["port"] = ":%s" % (port, )
vhost = fields["virtual_host"]
if not vhost.startswith('/'):
fields["virtual_host"] = '/' + vhost
return self.URI_FORMAT % fields
def Pool(self, limit=None, preload=None):
"""Pool of connections.
See :class:`ConnectionPool`.
:keyword limit: Maximum number of active connections.
Default is no limit.
:keyword preload: Number of connections to preload
when the pool is created. Default is 0.
*Example usage*::
>>> pool = connection.Pool(2)
>>> c1 = pool.acquire()
>>> c2 = pool.acquire()
>>> c3 = pool.acquire()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "kombu/connection.py", line 354, in acquire
raise ConnectionLimitExceeded(self.limit)
kombu.exceptions.ConnectionLimitExceeded: 2
>>> c1.release()
>>> c3 = pool.acquire()
"""
return ConnectionPool(self, limit, preload)
def ChannelPool(self, limit=None, preload=None):
"""Pool of channels.
See :class:`ChannelPool`.
:keyword limit: Maximum number of active channels.
Default is no limit.
:keyword preload: Number of channels to preload
when the pool is created. Default is 0.
*Example usage*::
>>> pool = connection.ChannelPool(2)
>>> c1 = pool.acquire()
>>> c2 = pool.acquire()
>>> c3 = pool.acquire()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "kombu/connection.py", line 354, in acquire
raise ChannelLimitExceeded(self.limit)
kombu.connection.ChannelLimitExceeded: 2
>>> c1.release()
>>> c3 = pool.acquire()
"""
return ChannelPool(self, limit, preload)
def SimpleQueue(self, name, no_ack=None, queue_opts=None,
exchange_opts=None, channel=None, **kwargs):
"""Create new :class:`~kombu.simple.SimpleQueue`, using a channel
from this connection.
If ``name`` is a string, a queue and exchange will be automatically
created using that name as the name of the queue and exchange,
also it will be used as the default routing key.
:param name: Name of the queue/or a :class:`~kombu.entity.Queue`.
:keyword no_ack: Disable acknowledgements. Default is false.
:keyword queue_opts: Additional keyword arguments passed to the
constructor of the automatically created
:class:`~kombu.entity.Queue`.
:keyword exchange_opts: Additional keyword arguments passed to the
constructor of the automatically created
:class:`~kombu.entity.Exchange`.
:keyword channel: Channel to use. If not specified a new channel
from the current connection will be used. Remember to call
:meth:`~kombu.simple.SimpleQueue.close` when done with the
object.
"""
from kombu.simple import SimpleQueue
channel_autoclose = False
if channel is None:
channel = self.channel()
channel_autoclose = True
return SimpleQueue(channel, name, no_ack, queue_opts, exchange_opts,
channel_autoclose=channel_autoclose, **kwargs)
def SimpleBuffer(self, name, no_ack=None, queue_opts=None,
exchange_opts=None, channel=None, **kwargs):
"""Create new :class:`~kombu.simple.SimpleQueue` using a channel
from this connection.
Same as :meth:`SimpleQueue`, but configured with buffering
semantics. The resulting queue and exchange will not be durable, also
auto delete is enabled. Messages will be transient (not persistent),
and acknowledgements are disabled (``no_ack``).
"""
from kombu.simple import SimpleBuffer
channel_autoclose = False
if channel is None:
channel = self.channel()
channel_autoclose = True
return SimpleBuffer(channel, name, no_ack, queue_opts, exchange_opts,
channel_autoclose=channel_autoclose, **kwargs)
def _establish_connection(self):
self._debug("establishing connection...")
conn = self.transport.establish_connection()
self._debug("connection established: %r" % (conn, ))
return conn
def __repr__(self):
"""``x.__repr__() <==> repr(x)``"""
return "<BrokerConnection: %s at 0x%x>" % (self.as_uri(), id(self))
def __copy__(self):
"""``x.__copy__() <==> copy(x)``"""
return self.clone()
def __reduce__(self):
return (self.__class__, tuple(self.info().values()), None)
def __enter__(self):
return self
def __exit__(self, *args):
self.release()
@property
def connection(self):
"""The underlying connection object.
.. warning::
This instance is transport specific, so do not
depend on the interface of this object.
"""
if self._closed:
return
if not self._connection or not \
self.transport.verify_connection(self._connection):
self._connection = self._establish_connection()
self._closed = False
return self._connection
@property
def host(self):
"""The host as a hostname/port pair separated by colon."""
return ":".join([self.hostname, str(self.port)])
@property
def transport(self):
if self._transport is None:
self._transport = self.create_transport()
return self._transport
@property
def connection_errors(self):
"""List of exceptions that may be raised by the connection."""
return self.transport.connection_errors
@property
def channel_errors(self):
"""List of exceptions that may be raised by the channel."""
return self.transport.channel_errors
Connection = BrokerConnection
class Resource(object):
def __init__(self, limit=None, preload=None):
self.limit = limit
self.preload = preload or 0
self._resource = _LifoQueue()
self._dirty = set()
self.setup()
def setup(self):
raise NotImplementedError("subclass responsibilty")
def _add_when_empty(self):
if self.limit and len(self._dirty) >= self.limit:
raise self.LimitExceeded(self.limit)
# All taken, put new on the queue and
# try get again, this way the first in line
# will get the resource.
self._resource.put_nowait(self.new())
def acquire(self, block=False, timeout=None):
"""Acquire resource.
:keyword block: If the limit is exceeded,
block until there is an available item.
:keyword timeout: Timeout to wait
if ``block`` is true. Default is :const:`None` (forever).
:raises LimitExceeded: if block is false
and the limit has been exceeded.
"""
if self.limit:
while 1:
try:
resource = self._resource.get(block=block, timeout=timeout)
except Empty:
self._add_when_empty()
else:
resource = self.prepare(resource)
self._dirty.add(resource)
break
else:
resource = self.prepare(self.new())
@wraps(self.release)
def _release():
self.release(resource)
resource.release = _release
return resource
def prepare(self, resource):
return resource
def close_resource(self, resource):
resource.close()
def release_resource(self, resource):
pass
def release(self, resource):
"""Release resource so it can be used by another thread.
The caller is responsible for discarding the object,
and to never use the resource again. A new resource must
be acquired if so needed.
"""
if self.limit:
self._dirty.discard(resource)
self._resource.put_nowait(resource)
self.release_resource(resource)
else:
self.close_resource(resource)
def force_close_all(self):
"""Closes and removes all resources in the pool (also those in use).
Can be used to close resources from parent processes
after fork (e.g. sockets/connections).
"""
dirty = self._dirty
resource = self._resource
while 1:
try:
dres = dirty.pop()
except KeyError:
break
self.close_resource(dres)
resource.mutex.acquire()
try:
while 1:
try:
res = resource.queue.pop()
except IndexError:
break
self.close_resource(res)
finally:
resource.mutex.release()
class PoolChannelContext(object):
def __init__(self, pool, block=False):
self.pool = pool
self.block = block
def __enter__(self):
self.conn = self.pool.acquire(block=self.block)
self.chan = self.conn.channel()
return self.conn, self.chan
def __exit__(self, *exc_info):
self.chan.close()
self.conn.release()
class ConnectionPool(Resource):
LimitExceeded = exceptions.ConnectionLimitExceeded
def __init__(self, connection, limit=None, preload=None):
self.connection = connection
super(ConnectionPool, self).__init__(limit=limit,
preload=preload)
def new(self):
return copy(self.connection)
def release_resource(self, resource):
resource._debug("released")
def acquire_channel(self, block=False):
return PoolChannelContext(self, block)
def setup(self):
if self.limit:
for i in xrange(self.limit):
conn = self.new()
if i < self.preload:
conn.connect()
self._resource.put_nowait(conn)
def prepare(self, resource):
resource._debug("acquired")
resource.connect()
return resource
class ChannelPool(Resource):
LimitExceeded = exceptions.ChannelLimitExceeded
def __init__(self, connection, limit=None, preload=None):
self.connection = connection
super(ChannelPool, self).__init__(limit=limit,
preload=preload)
def new(self):
return self.connection.channel
def setup(self):
channel = self.new()
for i in xrange(self.limit):
self._resource.put_nowait(
i < self.preload and channel() or channel)
def prepare(self, channel):
if callable(channel):
channel = channel()
return channel
| |
#!/usr/bin/env python
"""
wrapper program to run transcriptome assembly using TransriptSkimmer,
stringtie, and cufflinks on sequencing read alignment data.
In advance settings it is good to give the options specific to an
organism.
Requirement:
TransriptSkimmer -
cufflinks -
stringtie -
python libraries:
pysam
"""
import os
import sys
import pysam
import shutil
import subprocess
def run_stringtie(org_name, read_map_dir, result_dir, trans_pred_file="_tmp_strtie_genes.gff"):
"""
run stringtie program on mapped reads without genome annotation
@args bam_file: bam file with read alignments
@type bam_file: str
@args trans_pred_file: gtf file with transcript prediction
@type trans_pred_file: str
stringtie H_sapiens_Aligned_mmr_sortbyCoord.bam -o H_sapiens_stringtie_genes.gff -f 0.7 -m 400 -j 10 -c 10
"""
#TODO
# create a function which handles the sorting of reads in a bam file according to the coordinates.
#
try:
subprocess.call(["stringtie"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
exit("Please make sure that the `stringtie` binary is in your $PATH")
print("preparing stringtie run for organism %s" % org_name)
bam_file = "%s/%s_Aligned_mmr_sortbyCoord.bam" % (read_map_dir, org_name)
if not os.path.isfile(bam_file):
sys.stdout.write("failed to fetch sorted mmr BAM file for organism: %s, trying to get the mmr file...\n" % org_name)
bam_file = "%s/%s_Aligned_mmr.bam" % (read_map_dir, org_name)
if not os.path.isfile(bam_file):
exit("error: failed to fetch mmr BAM file for organism %s" % org_name)
## sorting, indexing the bam file
file_prefix, ext = os.path.splitext(bam_file)
sorted_bam = "%s_sortbyCoord" % file_prefix
sys.stdout.write("trying to sort based by the coordinates with output prefix as: %s\n" % sorted_bam)
if not os.path.isfile("%s.bam" % sorted_bam):
pysam.sort(bam_file, sorted_bam)
bam_file = "%s.bam" % sorted_bam
print('using bam file from %s' % bam_file)
if not os.path.exists(bam_file + ".bai"):
pysam.index(bam_file)
strtie_run="stringtie %s \
-o %s \
-f 0.7 \
-m 400 \
-j 10 \
-c 10 \
" % (bam_file, trans_pred_file)
print('\trun stringtie as: %s' % strtie_run)
try:
os.chdir(result_dir)
process = subprocess.Popen(strtie_run, shell=True)
returncode = process.wait()
if returncode !=0:
raise Exception, "Exit status return code = %i" % returncode
except Exception, e:
exit('Error running stringtie.\n%s' % str( e ))
def run_cufflinks(org_db, num_cpus=4):
"""
run cufflinks program on mapped reads
"""
try:
subprocess.call(["cufflinks"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
exit("Please make sure that the `Cufflinks` binary is in your $PATH")
org_name = org_db['short_name']
print("preparing for cufflinks run for organism %s" % org_name)
min_intron_length = 20
min_isoform_frac = 0.25
max_intron_length = org_db['max_intron_len']
result_dir = org_db['read_assembly_dir']
bam_file = "%s/%s_Aligned_mmr_sortbyCoord.bam" % (org_db['read_map_dir'], org_name)
if not os.path.isfile(bam_file):
sys.stdout.write("failed to fetch sorted mmr BAM file for organism: %s, trying to get the mmr file...\n" % org_name)
bam_file = "%s/%s_Aligned_mmr.bam" % (org_db['read_map_dir'], org_name)
if not os.path.isfile(bam_file):
exit("error: failed to fetch mmr BAM file for organism %s" % org_name)
## sorting, indexing the bam file
file_prefix, ext = os.path.splitext(bam_file)
sorted_bam = "%s_sortbyCoord" % file_prefix
sys.stdout.write("trying to sort based by the coordinates with output prefix as: %s\n" % sorted_bam)
if not os.path.isfile("%s.bam" % sorted_bam):
pysam.sort(bam_file, sorted_bam)
bam_file = "%s.bam" % sorted_bam
print('using bam file from %s' % bam_file)
if not os.path.exists(bam_file + ".bai"):
pysam.index(bam_file)
## always use quiet mode to avoid problems with storing log output.
cli_cuff = "cufflinks -q --no-update-check \
-F %.2f \
-I %d \
--min-intron-length %d \
--library-type fr-unstranded \
-p %d \
-o %s \
%s" % (min_isoform_frac, max_intron_length, min_intron_length, num_cpus, result_dir, bam_file)
sys.stdout.write('\trun cufflinks as: %s \n' % cli_cuff)
try:
os.chdir(result_dir)
process = subprocess.Popen(cli_cuff, shell=True)
returncode = process.wait()
if returncode !=0:
raise Exception, "Exit status return code = %i" % returncode
except Exception, e:
print 'Error running cufflinks.\n%s' % str( e )
def run_trsk(org_db, out_gff_file="_tmp_trsk_genes.gff"):
"""
run TransriptSkimmer with mapped reads and genome sequence
"""
try:
subprocess.call(["infer_genes"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
exit("Please make sure that the `TranscriptSkimmer` binary is in your $PATH")
org_name = org_db['short_name']
sys.stdout.write("preparing for TransriptSkimmer run for organism %s\n" % org_name)
genome_seq_file = org_db['fasta']
if not os.path.isfile(genome_seq_file):
exit('error: failed to fetch genome sequence file %s for organism %s' % (genome_seq_file, org_name))
sys.stdout.write("using genome sequence file %s\n" % genome_seq_file)
## expect the mmr result file in ex: A_thaliana/read_mapping/A_thaliana_Aligned_mmr_sortbyCoord.bam
bam_file = "%s/%s_Aligned_mmr_sortbyCoord.bam" % (org_db['read_map_dir'], org_name)
if not os.path.isfile(bam_file):
sys.stdout.write("warning: failed to fetch sorted mmr BAM file for organism: %s, trying to get the unsorted mmr file\n" % org_name)
bam_file = "%s/%s_Aligned_mmr.bam" % (org_db['read_map_dir'], org_name)
if not os.path.isfile(bam_file):
exit("error: failed to fetch mmr BAM file for organism %s" % org_name)
## sorting, indexing the bam file
file_prefix, ext = os.path.splitext(bam_file)
sorted_bam = "%s_sortbyCoord" % file_prefix
sys.stdout.write("trying to sort based on the coordinates with output prefix as: %s\n" % sorted_bam)
if not os.path.isfile("%s.bam" % sorted_bam):
pysam.sort(bam_file, sorted_bam)
bam_file = "%s.bam" % sorted_bam
sys.stdout.write("using bam file %s\n" % bam_file)
if not os.path.exists(bam_file + ".bai"):
pysam.index(bam_file)
##FIXME to be included
max_intergenic_region = 10000
max_exon_length = org_db['max_exon_len']
result_dir = org_db['read_assembly_dir']
max_intron_length = org_db['max_intron_len']
gio_path_temp = os.path.join(result_dir, "temp_gio")
make_gio(genome_seq_file, gio_path_temp)
gio_file = "%s/genome.config" % gio_path_temp
options="-maxel %d \
-nss \
-reglen 0.66 \
-maxic %d \
-minic 20 \
-maxin %d \
-mm 4 \
-exm 3 \
-indt 150 \
-exd 20 \
-tf 0.5 \
-inscf 3 \
-excut 3 \
-toff 100 \
-el 15" % (max_exon_length, max_intergenic_region, max_intron_length)
cli_trsk = "infer_genes -gio %s -bam %s -gff %s %s" % (gio_file, bam_file, out_gff_file, options)
sys.stdout.write('\trun TransriptSkimmer as: %s \n' % cli_trsk)
try:
os.chdir(result_dir)
process = subprocess.Popen(cli_trsk, shell=True)
returncode = process.wait()
if returncode !=0:
raise Exception, "Exit status return code = %i" % returncode
except Exception, e:
print 'Error running TranscriptSkimmer.\n%s' % str( e )
## cleaning
shutil.rmtree(gio_path_temp)
def make_gio(in_file_name, gio_path):
"""
make_gio builds a genome information object for an input fasta file.
takes 2 arguments:
@args fasta_file: is the input file in fasta format
@type fasta_file: str
@args gio_path: is the directory to which the genome information object will be written to
@type gio_path: dir
"""
try:
f_in = file(in_file_name, "r")
except Exception, msg:
print msg
print "cannot open infile '" + in_file_name + "'"
sys.exit(1)
write_dna = 1
flat_path = os.path.join(gio_path, "genome")
try:
if os.path.exists(flat_path):
print "directory " + flat_path + " exists already."
else:
os.makedirs(flat_path)
except Exception, msg:
print msg
print "cannot create path '" + flat_path + "'"
sys.exit(1)
f_out = None
f_out_dna = None
contig_list = []
num_bases = 0
for line in f_in:
if line.isspace():
print "warning: wrong format. ignoring empty line in file '" + in_file_name + "'"
continue
if line[0].isspace():
print "wrong format: leading white space in file '" + in_file_name + "'"
sys.exit(1)
if line.startswith(">"):
if f_out != None:
f_out.close()
if f_out_dna != None:
f_out_dna.close()
contig_list.append(line[1:-1].split()[0])
out_name = os.path.join(flat_path, contig_list[-1] + ".flat")
out_dna_name = os.path.join(flat_path, contig_list[-1] + ".dna")
try:
f_out = file(out_name, "w")
#print "creating file '" + out_name + "'"
if write_dna==1:
f_out_dna = file(out_dna_name, "w")
f_out_dna.write(line)
#print "creating file '" + out_dna_name + "'"
except Exception, msg:
print msg
print "cannot open file '" + out_name + "'"
sys.exit(1)
else:
try:
f_out.write(line[0:-1].lower())
if write_dna==1:
f_out_dna.write(line.lower())
except Exception, msg:
if f_out != None:
print msg
print "cannot write to file '" +out_name + "'"
sys.exit(1)
else:
print "improper input format. No header in first line"
sys.exit(1)
num_bases += len(line)-1
f_out.close()
try:
print "creating file '" + os.path.join(gio_path, "genome.config") + "'"
f_conf = file(os.path.join(gio_path, "genome.config"),"w")
f_conf.write("BASEDIR " + os.path.abspath(gio_path) +"\n\n")
f_conf.write("CONTIGS " + str(len(contig_list)) +"\n")
for c in contig_list:
f_conf.write(c + "\tgenome/" + c + ".flat\tgenome/" + c + ".dna\n")
f_conf.write("\nALPHABET acgt\n\n")
f_conf.write("ESTFILES 0\n\n")
f_conf.write("CDNAFILES 0\n\n")
f_conf.write("ANNOTATIONFILES 0\n")
f_conf.close()
except Exception, msg:
print msg
print "cannot create file '" + os.path.join(gio_path, "genome.config") + "'"
sys.exit(1)
| |
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import os
import gevent
import logging
import kazoo.client
import kazoo.exceptions
import kazoo.handlers.gevent
import kazoo.recipe.election
from kazoo.client import KazooState
from kazoo.retry import KazooRetry, ForceRetryError
from kazoo.recipe.counter import Counter
from bitarray import bitarray
from cfgm_common.exceptions import ResourceExhaustionError,\
ResourceExistsError, OverQuota
from gevent.lock import BoundedSemaphore
import datetime
import uuid
import sys
import socket
LOG_DIR = '/var/log/contrail/'
class IndexAllocator(object):
def __init__(self, zookeeper_client, path, size=0, start_idx=0,
reverse=False, alloc_list=None, max_alloc=0):
self._size = size
self._start_idx = start_idx
if alloc_list is None:
self._alloc_list = [{'start': start_idx, 'end': start_idx+size}]
else:
sorted_alloc_list = sorted(alloc_list, key=lambda k: k['start'])
self._alloc_list = sorted_alloc_list
size = self._get_range_size(self._alloc_list)
if max_alloc == 0:
self._max_alloc = size
else:
self._max_alloc = max_alloc
self._zookeeper_client = zookeeper_client
self._path = path
self._in_use = bitarray('0')
self._reverse = reverse
for idx in self._zookeeper_client.get_children(path):
idx_int = self._get_bit_from_zk_index(int(idx))
if idx_int >= 0:
self._set_in_use(self._in_use, idx_int)
# end for idx
# end __init__
# Given a set of ranges (alloc_list), return
# the cumulative count of the ranges.
def _get_range_size(self, alloc_list):
alloc_count = len(alloc_list)
size = 0
# check for overlap in alloc_list --TODO
for alloc_idx in range(0, alloc_count - 1):
idx_start_addr = alloc_list[alloc_idx]['start']
idx_end_addr = alloc_list[alloc_idx]['end']
next_start_addr = alloc_list[alloc_idx+1]['start']
if next_start_addr <= idx_end_addr:
raise Exception(
'Allocation Lists Overlapping: %s' % (alloc_list))
size += idx_end_addr - idx_start_addr + 1
size += (alloc_list[alloc_count-1]['end'] -
alloc_list[alloc_count-1]['start'] + 1)
return size
def _has_ranges_shrunk(self, old_list, new_list):
if len(old_list) > len(new_list):
return True
for old_pool, new_pool in zip(old_list, new_list):
if (new_pool['start'] > old_pool['start'] or
new_pool['end'] < old_pool['end']):
return True
return False
# Reallocates the indexes to a new set of indexes provided by
# the user.
# Limitation -
# 1. No. of alloc pools needs to be constant
# For example, [10-20] cannot become [10-20],[25-30]
# 2. Every alloc pool can only expand but not shrink
# For ex, [10-20] can become [9-20] or [10-22] or [9-22]
# but not [15-17]
#
def reallocate(self, new_alloc_list):
sorted_alloc_list = sorted(new_alloc_list,
key=lambda k: k['start'])
if self._has_ranges_shrunk(self._alloc_list, sorted_alloc_list):
raise Exception('Indexes allocated cannot be shrunk: %s' %
(self._alloc_list))
size = self._get_range_size(sorted_alloc_list)
self._max_alloc = size
new_in_use = bitarray(0)
for idx, bitval in enumerate(self._in_use):
if not bitval:
continue
zk_idx = self._get_zk_index_from_bit(idx)
idx_int = self._get_bit_from_zk_index(zk_idx, sorted_alloc_list)
if idx_int >= 0:
self._set_in_use(new_in_use, idx_int)
self._in_use = new_in_use
# end for idx
def _get_zk_index_from_bit(self, idx, alloc_list=None):
if not alloc_list:
alloc_list = self._alloc_list
size = idx
if self._reverse:
for alloc in reversed(alloc_list):
size -= alloc['end'] - alloc['start'] + 1
if size < 0:
return alloc['start'] - size - 1
else:
for alloc in alloc_list:
size -= alloc['end'] - alloc['start'] + 1
if size < 0:
return alloc['end']+size + 1
raise ResourceExhaustionError(
'Cannot get zk index from bit %s' % (idx))
# end _get_zk_index
def _get_bit_from_zk_index(self, idx, alloc_list=None):
if not alloc_list:
alloc_list = self._alloc_list
size = 0
if self._reverse:
for alloc in reversed(alloc_list):
if alloc['start'] <= idx <= alloc['end']:
return alloc['end'] - idx + size
size += alloc['end'] - alloc['start'] + 1
pass
else:
for alloc in alloc_list:
if alloc['start'] <= idx <= alloc['end']:
return idx - alloc['start'] + size
size += alloc['end'] - alloc['start'] + 1
return -1
# end _get_bit_from_zk_index
def _set_in_use(self, array, bitnum):
# if the index is higher than _max_alloc, do not use the bitarray, in
# order to reduce the size of the bitarray. Otherwise, set the bit
# corresponding to idx to 1 and extend the _in_use bitarray if needed
if bitnum > self._max_alloc:
return
if bitnum >= array.length():
temp = bitarray(bitnum - array.length())
temp.setall(0)
temp.append('1')
array.extend(temp)
else:
array[bitnum] = 1
# end _set_in_use
def _reset_in_use(self, bitnum):
# if the index is higher than _max_alloc, do not use the bitarray, in
# order to reduce the size of the bitarray. Otherwise, set the bit
# corresponding to idx to 1 and extend the _in_use bitarray if needed
if bitnum > self._max_alloc:
return
if bitnum >= self._in_use.length():
return
else:
self._in_use[bitnum] = 0
# end _reset_in_use
def set_in_use(self, idx):
bit_idx = self._get_bit_from_zk_index(idx)
if bit_idx < 0:
return
self._set_in_use(self._in_use, bit_idx)
# end set_in_use
def reset_in_use(self, idx):
bit_idx = self._get_bit_from_zk_index(idx)
if bit_idx < 0:
return
self._reset_in_use(bit_idx)
# end reset_in_use
def get_alloc_count(self):
return self._in_use.count()
# end get_alloc_count
def _alloc_from_pools(self, pools=None):
if not pools:
raise ResourceExhaustionError()
if self._reverse:
pools = list(reversed(pools))
for pool in pools:
last_idx = self._in_use.length() - 1
pool_start = pool['start']
pool_end = pool['end']
pool_size = pool_end - pool_start + 1
if self._reverse:
start_zk_idx = pool_end
end_zk_idx = pool_start
else:
start_zk_idx = pool_start
end_zk_idx = pool_end
start_bit_idx = self._get_bit_from_zk_index(start_zk_idx)
end_bit_idx = self._get_bit_from_zk_index(end_zk_idx)
# if bitarray is less then start_bit_index,
# extend bit array to start_bit_idx and use that idx
if last_idx < start_bit_idx:
temp = bitarray(start_bit_idx - last_idx)
temp.setall(0)
self._in_use.extend(temp)
self._in_use[start_bit_idx] = 1
return start_bit_idx
# if bitarray is in between start_bit_idx and end_bit_idx
if last_idx >= start_bit_idx and last_idx <= end_bit_idx:
# we need to slice part of bitarray from
# start of the pool and end of array
pool_bitarray = self._in_use[start_bit_idx:]
else:
pool_bitarray = self._in_use[
start_bit_idx:end_bit_idx+1]
if pool_bitarray.all():
if last_idx >= end_bit_idx:
continue
idx = self._in_use.length()
self._in_use.append(1)
else:
idx = pool_bitarray.index(0)
idx += start_bit_idx
self._in_use[idx] = 1
return idx
raise ResourceExhaustionError()
# end _alloc_from_pools
def alloc(self, value=None, pools=None):
if pools:
idx = self._alloc_from_pools(pools)
else:
# Allocates a index from the allocation list
if self._in_use.all():
idx = self._in_use.length()
if idx > self._max_alloc:
raise ResourceExhaustionError()
self._in_use.append(1)
else:
idx = self._in_use.index(0)
self._in_use[idx] = 1
idx = self._get_zk_index_from_bit(idx)
try:
# Create a node at path and return its integer value
id_str = "%(#)010d" % {'#': idx}
self._zookeeper_client.create_node(self._path + id_str, value)
return idx
except ResourceExistsError:
return self.alloc(value, pools)
# end alloc
def reserve(self, idx, value=None):
# Reserves the requested index if available
if not self._start_idx <= idx < self._start_idx + self._size:
return None
try:
# Create a node at path and return its integer value
id_str = "%(#)010d" % {'#': idx}
self._zookeeper_client.create_node(self._path + id_str, value)
self.set_in_use(idx)
return idx
except ResourceExistsError:
self.set_in_use(idx)
existing_value = self.read(idx)
if (value == existing_value):
# idempotent reserve
return idx
msg = 'For index %s reserve conflicts with existing value %s.' \
%(idx, existing_value)
self._zookeeper_client.syslog(msg, level='notice')
raise
# end reserve
def delete(self, idx):
id_str = "%(#)010d" % {'#': idx}
self._zookeeper_client.delete_node(self._path + id_str)
bit_idx = self._get_bit_from_zk_index(idx)
if 0 <= bit_idx < self._in_use.length():
self._in_use[bit_idx] = 0
# end delete
def read(self, idx):
id_str = "%(#)010d" % {'#': idx}
id_val = self._zookeeper_client.read_node(self._path+id_str)
if id_val is not None:
bit_idx = self._get_bit_from_zk_index(idx)
if bit_idx >= 0:
self._set_in_use(self._in_use, bit_idx)
return id_val
# end read
def empty(self):
return not self._in_use.any()
# end empty
@classmethod
def delete_all(cls, zookeeper_client, path):
try:
zookeeper_client.delete_node(path, recursive=True)
except kazoo.exceptions.NotEmptyError:
#TODO: Add retries for NotEmptyError
zookeeper_client.syslog("NotEmptyError while deleting %s" % path)
# end delete_all
#end class IndexAllocator
class ZookeeperCounter(Counter):
def __init__(self, client, path, max_count=sys.maxint, default=0):
super(ZookeeperCounter, self).__init__(client, path, default)
self.max_count = max_count
self._ensure_node()
def _inner_change(self, value):
data, version = self._value()
# Decrement counter only if data(current count) is non zero.
if data > 0 or value > 0:
data += value
# Dont raise OverQuota during delete
if (data > self.max_count and value > 0):
raise OverQuota()
try:
self.client.set(
self.path, repr(data).encode('ascii'), version=version)
except kazoo.exceptions.BadVersionError: # pragma: nocover
raise ForceRetryError()
# end class ZookeeperCounter
class ZookeeperClient(object):
def __init__(self, module, server_list, logging_fn=None, zk_timeout=400,
log_response_time=None):
# logging
logger = logging.getLogger(module)
logger.setLevel(logging.DEBUG)
try:
handler = logging.handlers.RotatingFileHandler(
LOG_DIR + module + '-zk.log', maxBytes=10*1024*1024, backupCount=5)
except IOError:
print "Cannot open log file in %s" %(LOG_DIR)
else:
log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
handler.setFormatter(log_format)
logger.addHandler(handler)
if logging_fn:
self.log = logging_fn
else:
self.log = self.syslog
self.log_response_time = log_response_time
# KazooRetry to retry keeper CRUD operations
self._retry = KazooRetry(max_tries=None, max_delay=300,
sleep_func=gevent.sleep)
self._zk_client = kazoo.client.KazooClient(
server_list,
timeout=zk_timeout,
handler=kazoo.handlers.gevent.SequentialGeventHandler(),
logger=logger,
connection_retry=self._retry,
command_retry=self._retry)
self._zk_client.add_listener(self._zk_listener)
self._logger = logger
self._election = None
self._server_list = server_list
self._conn_state = None
self._sandesh_connection_info_update(status='INIT', message='')
self._lost_cb = None
self._suspend_cb = None
self.delete_node = self._response_time(self.delete_node, "DELETE")
self.create_node = self._response_time(self.create_node, "CREATE")
self.read_node = self._response_time(self.read_node, "READ")
self.get_children= self._response_time(self.get_children, "GET_CHILDREN")
self.exists = self._response_time(self.exists, "EXISTS")
self.connect()
# end __init__
def _response_time(self, func, oper):
def wrapper(*args, **kwargs):
# Measure the time
self.start_time = datetime.datetime.now()
val = func(*args, **kwargs)
self.end_time = datetime.datetime.now()
if self.log_response_time:
self.log_response_time(self.end_time - self.start_time, oper)
return val
# Measure the time again
return wrapper
# start
def connect(self):
while True:
try:
self._zk_client.start()
break
except gevent.event.Timeout as e:
self._zk_client.close()
# Update connection info
self._sandesh_connection_info_update(status='DOWN',
message=str(e))
gevent.sleep(1)
# Zookeeper is also throwing exception due to delay in master election
except Exception as e:
self._zk_client.stop()
self._zk_client.close()
# Update connection info
self._sandesh_connection_info_update(status='DOWN',
message=str(e))
gevent.sleep(1)
# Update connection info
self._sandesh_connection_info_update(status='UP', message='')
# end
def is_connected(self):
return self._zk_client.state == KazooState.CONNECTED
# end is_connected
def syslog(self, msg, *args, **kwargs):
if not self._logger:
return
level = kwargs.get('level', 'info')
if isinstance(level, int):
from pysandesh.sandesh_logger import SandeshLogger
level = SandeshLogger.get_py_logger_level(level)
self._logger.log(level, msg)
return
log_method = getattr(self._logger, level, self._logger.info)
log_method(msg)
# end syslog
def set_lost_cb(self, lost_cb=None):
# set a callback to be called when kazoo state is lost
# set to None for default action
self._lost_cb = lost_cb
# end set_lost_cb
def set_suspend_cb(self, suspend_cb=None):
# set a callback to be called when kazoo state is suspend
# set to None for default action
self._suspend_cb = suspend_cb
# end set_suspend_cb
def _zk_listener(self, state):
if state == KazooState.CONNECTED:
if self._election:
self._election.cancel()
# Update connection info
self._sandesh_connection_info_update(status='UP', message='')
elif state == KazooState.LOST:
# Lost the session with ZooKeeper Server
# Best of option we have is to exit the process and restart all
# over again
self._sandesh_connection_info_update(status='DOWN',
message='Connection to Zookeeper lost')
if self._lost_cb:
self._lost_cb()
else:
os._exit(2)
elif state == KazooState.SUSPENDED:
# Update connection info
self._sandesh_connection_info_update(status='INIT',
message = 'Connection to zookeeper lost. Retrying')
if self._suspend_cb:
self._suspend_cb()
# end
def master_election(self, path, identifier, func, *args, **kwargs):
self._election = self._zk_client.Election(path, identifier)
self._election.run(func, *args, **kwargs)
# end master_election
def quota_counter(self, path, max_count=sys.maxint, default=0):
return ZookeeperCounter(self._zk_client, path, max_count,
default=default)
def create_node(self, path, value=None):
try:
if value is None:
value = uuid.uuid4()
retry = self._retry.copy()
retry(self._zk_client.create, path, str(value), makepath=True)
except kazoo.exceptions.NodeExistsError:
current_value = self.read_node(path)
if current_value == value:
return True;
raise ResourceExistsError(path, str(current_value), 'zookeeper')
# end create_node
def delete_node(self, path, recursive=False):
try:
retry = self._retry.copy()
retry(self._zk_client.delete, path, recursive=recursive)
except kazoo.exceptions.NoNodeError:
pass
# end delete_node
def read_node(self, path, include_timestamp=False):
try:
retry = self._retry.copy()
value = retry(self._zk_client.get, path)
if include_timestamp:
return value
return value[0]
except Exception:
return None
# end read_node
def get_children(self, path):
try:
retry = self._retry.copy()
return retry(self._zk_client.get_children, path)
except Exception:
return []
# end read_node
def exists(self, path):
try:
retry = self._retry.copy()
return retry(self._zk_client.exists, path)
except Exception:
return []
# end exists
def _sandesh_connection_info_update(self, status, message):
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus
from pysandesh.gen_py.process_info.ttypes import ConnectionType as ConnType
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
new_conn_state = getattr(ConnectionStatus, status)
ConnectionState.update(conn_type = ConnType.ZOOKEEPER,
name = 'Zookeeper', status = new_conn_state,
message = message,
server_addrs = self._server_list.split(','))
if (self._conn_state and self._conn_state != ConnectionStatus.DOWN and
new_conn_state == ConnectionStatus.DOWN):
msg = 'Connection to Zookeeper down: %s' %(message)
self.log(msg, level=SandeshLevel.SYS_ERR)
if (self._conn_state and self._conn_state != new_conn_state and
new_conn_state == ConnectionStatus.UP):
msg = 'Connection to Zookeeper ESTABLISHED'
self.log(msg, level=SandeshLevel.SYS_NOTICE)
self._conn_state = new_conn_state
# end _sandesh_connection_info_update
def lock(self, path, identifier=None):
if not identifier:
identifier = '%s-%s' % (socket.gethostname(), os.getpid())
return self._zk_client.Lock(path, identifier)
def read_lock(self, path, identifier=None):
if not identifier:
identifier = '%s-%s' % (socket.gethostname(), os.getpid())
return self._zk_client.ReadLock(path, identifier)
def write_lock(self, path, identifier=None):
if not identifier:
identifier = '%s-%s' % (socket.gethostname(), os.getpid())
return self._zk_client.WriteLock(path, identifier)
| |
# -*- coding: utf-8 -*-
import logging
import itertools
import math
import urllib
import httplib as http
from modularodm import Q
from flask import request
from framework import utils
from framework import sentry
from framework.auth.core import User
from framework.flask import redirect # VOL-aware redirect
from framework.routing import proxy_url
from framework.exceptions import HTTPError
from framework.auth.forms import SignInForm
from framework.forms import utils as form_utils
from framework.auth.forms import RegistrationForm
from framework.auth.forms import ResetPasswordForm
from framework.auth.forms import ForgotPasswordForm
from framework.auth.decorators import collect_auth
from framework.auth.decorators import must_be_logged_in
from website.models import Guid
from website.models import Node
from website.util import rubeus
from website.util import sanitize
from website.project import model
from website.util import web_url_for
from website.util import permissions
from website.project import new_dashboard
from website.settings import ALL_MY_PROJECTS_ID
from website.settings import ALL_MY_REGISTRATIONS_ID
logger = logging.getLogger(__name__)
def _render_node(node, auth=None):
"""
:param node:
:return:
"""
perm = None
# NOTE: auth.user may be None if viewing public project while not
# logged in
if auth and auth.user and node.get_permissions(auth.user):
perm_list = node.get_permissions(auth.user)
perm = permissions.reduce_permissions(perm_list)
return {
'title': node.title,
'id': node._primary_key,
'url': node.url,
'api_url': node.api_url,
'primary': node.primary,
'date_modified': utils.iso8601format(node.date_modified),
'category': node.category,
'permissions': perm, # A string, e.g. 'admin', or None,
'archiving': node.archiving,
}
def _render_nodes(nodes, auth=None, show_path=False):
"""
:param nodes:
:return:
"""
ret = {
'nodes': [
_render_node(node, auth)
for node in nodes
],
'show_path': show_path
}
return ret
@collect_auth
def index(auth):
"""Redirect to dashboard if user is logged in, else show homepage.
"""
if auth.user:
return redirect(web_url_for('dashboard'))
return {}
def find_dashboard(user):
dashboard_folder = Node.find_for_user(user, subquery=Q('is_dashboard', 'eq', True))
if dashboard_folder.count() == 0:
new_dashboard(user)
dashboard_folder = Node.find_for_user(user, Q('is_dashboard', 'eq', True))
return dashboard_folder[0]
@must_be_logged_in
def get_dashboard(auth, nid=None, **kwargs):
user = auth.user
if nid is None:
node = find_dashboard(user)
dashboard_projects = [rubeus.to_project_root(node, auth, **kwargs)]
return_value = {'data': dashboard_projects}
elif nid == ALL_MY_PROJECTS_ID:
return_value = {'data': get_all_projects_smart_folder(**kwargs)}
elif nid == ALL_MY_REGISTRATIONS_ID:
return_value = {'data': get_all_registrations_smart_folder(**kwargs)}
else:
node = Node.load(nid)
dashboard_projects = rubeus.to_project_hgrid(node, auth, **kwargs)
return_value = {'data': dashboard_projects}
return_value['timezone'] = user.timezone
return_value['locale'] = user.locale
return_value['id'] = user._id
return return_value
@must_be_logged_in
def get_all_projects_smart_folder(auth, **kwargs):
# TODO: Unit tests
user = auth.user
contributed = Node.find_for_user(
user,
subquery=(
Q('is_deleted', 'eq', False) &
Q('is_registration', 'eq', False) &
Q('is_folder', 'eq', False)
)
)
nodes = contributed.sort('title')
keys = nodes.get_keys()
return [rubeus.to_project_root(node, auth, **kwargs) for node in nodes if node.parent_id not in keys]
@must_be_logged_in
def get_all_registrations_smart_folder(auth, **kwargs):
# TODO: Unit tests
user = auth.user
contributed = Node.find_for_user(
user,
subquery=(
Q('is_deleted', 'eq', False) &
Q('is_registration', 'eq', True) &
Q('is_folder', 'eq', False)
)
)
nodes = contributed.sort('-title')
# Note(hrybacki): is_retracted and is_pending_embargo are property methods
# and cannot be directly queried
nodes = filter(lambda node: not node.is_retracted and not node.is_pending_embargo, nodes)
keys = [node._id for node in nodes]
return [rubeus.to_project_root(node, auth, **kwargs) for node in nodes if node.ids_above.isdisjoint(keys)]
@must_be_logged_in
def get_dashboard_nodes(auth):
"""Get summary information about the current user's dashboard nodes.
:param-query no_components: Exclude components from response.
NOTE: By default, components will only be shown if the current user
is contributor on a comonent but not its parent project. This query
parameter forces ALL components to be excluded from the request.
:param-query permissions: Filter upon projects for which the current user
has the specified permissions. Examples: 'write', 'admin'
"""
user = auth.user
nodes = Node.find_for_user(
user,
subquery=(
Q('category', 'eq', 'project') &
Q('is_deleted', 'eq', False) &
Q('is_registration', 'eq', False) &
Q('is_folder', 'eq', False)
)
)
if request.args.get('no_components') not in [True, 'true', 'True', '1', 1]:
comps = Node.find_for_user( # NOTE - this used to be a find on nodes above. Does this mess it up?
user,
(
# components only
Q('category', 'ne', 'project') &
# exclude deleted nodes
Q('is_deleted', 'eq', False) &
# exclude registrations
Q('is_registration', 'eq', False)
)
)
else:
comps = []
nodes = list(nodes) + list(comps)
if request.args.get('permissions'):
perm = request.args['permissions'].strip().lower()
if perm not in permissions.PERMISSIONS:
raise HTTPError(http.BAD_REQUEST, dict(
message_short='Invalid query parameter',
message_long='{0} is not in {1}'.format(perm, permissions.PERMISSIONS)
))
response_nodes = [node for node in nodes if node.has_permission(user, permission=perm)]
else:
response_nodes = nodes
return _render_nodes(response_nodes, auth)
@must_be_logged_in
def dashboard(auth):
user = auth.user
dashboard_folder = find_dashboard(user)
dashboard_id = dashboard_folder._id
return {'addons_enabled': user.get_addon_names(),
'dashboard_id': dashboard_id,
}
def validate_page_num(page, pages):
if page < 0 or (pages and page >= pages):
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "page".'
))
def paginate(items, total, page, size):
pages = math.ceil(total / float(size))
validate_page_num(page, pages)
start = page * size
paginated_items = itertools.islice(items, start, start + size)
return paginated_items, pages
@must_be_logged_in
def watched_logs_get(**kwargs):
user = kwargs['auth'].user
try:
page = int(request.args.get('page', 0))
except ValueError:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "page".'
))
try:
size = int(request.args.get('size', 10))
except ValueError:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "size".'
))
total = sum(1 for x in user.get_recent_log_ids())
paginated_logs, pages = paginate(user.get_recent_log_ids(), total, page, size)
logs = (model.NodeLog.load(id) for id in paginated_logs)
return {
"logs": [serialize_log(log) for log in logs],
"total": total,
"pages": pages,
"page": page
}
def serialize_log(node_log, auth=None, anonymous=False):
'''Return a dictionary representation of the log.'''
return {
'id': str(node_log._primary_key),
'user': node_log.user.serialize()
if isinstance(node_log.user, User)
else {'fullname': node_log.foreign_user},
'contributors': [node_log._render_log_contributor(c) for c in node_log.params.get("contributors", [])],
'action': node_log.action,
'params': sanitize.unescape_entities(node_log.params),
'date': utils.iso8601format(node_log.date),
'node': node_log.node.serialize(auth) if node_log.node else None,
'anonymous': anonymous
}
def reproducibility():
return redirect('/ezcuj/wiki')
def registration_form():
return form_utils.jsonify(RegistrationForm(prefix='register'))
def signin_form():
return form_utils.jsonify(SignInForm())
def forgot_password_form():
return form_utils.jsonify(ForgotPasswordForm(prefix='forgot_password'))
def reset_password_form():
return form_utils.jsonify(ResetPasswordForm())
# GUID ###
def _build_guid_url(base, suffix=None):
url = '/'.join([
each.strip('/') for each in [base, suffix]
if each
])
if not isinstance(url, unicode):
url = url.decode('utf-8')
return u'/{0}/'.format(url)
def resolve_guid(guid, suffix=None):
"""Load GUID by primary key, look up the corresponding view function in the
routing table, and return the return value of the view function without
changing the URL.
:param str guid: GUID primary key
:param str suffix: Remainder of URL after the GUID
:return: Return value of proxied view function
"""
# Look up GUID
guid_object = Guid.load(guid)
if guid_object:
# verify that the object implements a GuidStoredObject-like interface. If a model
# was once GuidStoredObject-like but that relationship has changed, it's
# possible to have referents that are instances of classes that don't
# have a deep_url attribute or otherwise don't behave as
# expected.
if not hasattr(guid_object.referent, 'deep_url'):
sentry.log_message(
'Guid `{}` resolved to an object with no deep_url'.format(guid)
)
raise HTTPError(http.NOT_FOUND)
referent = guid_object.referent
if referent is None:
logger.error('Referent of GUID {0} not found'.format(guid))
raise HTTPError(http.NOT_FOUND)
if not referent.deep_url:
raise HTTPError(http.NOT_FOUND)
url = _build_guid_url(urllib.unquote(referent.deep_url), suffix)
return proxy_url(url)
# GUID not found; try lower-cased and redirect if exists
guid_object_lower = Guid.load(guid.lower())
if guid_object_lower:
return redirect(
_build_guid_url(guid.lower(), suffix)
)
# GUID not found
raise HTTPError(http.NOT_FOUND)
##### Redirects #####
# Redirect /about/ to OSF wiki page
# https://github.com/CenterForOpenScience/osf.io/issues/3862
# https://github.com/CenterForOpenScience/community/issues/294
def redirect_about(**kwargs):
return redirect('https://osf.io/4znzp/wiki/home/')
def redirect_howosfworks(**kwargs):
return redirect('/getting-started/')
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for jacobian and batch_jacobian ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training as keras_training
from tensorflow.python.layers import layers as tf_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops as tf_control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients as gradient_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.ops.parallel_for import control_flow_ops
from tensorflow.python.ops.parallel_for import gradients
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class FullyConnectedModel(object):
def __init__(self, activation_size, num_layers):
self._layers = [
tf_layers.Dense(activation_size, activation=nn.relu)
for _ in range(num_layers)
]
def __call__(self, inp):
activation = inp
for layer in self._layers:
activation = layer(activation)
return activation
def fully_connected_model_fn(batch_size, activation_size, num_layers):
model = FullyConnectedModel(activation_size, num_layers)
inp = random_ops.random_normal([batch_size, activation_size])
return inp, model(inp)
def lstm_model_fn(batch_size, state_size, steps, inputs_size=None):
inputs_size = inputs_size or state_size
inputs = [
random_ops.random_normal([batch_size, inputs_size]) for _ in range(steps)
]
cell = rnn_cell.BasicLSTMCell(state_size)
init_state = cell.zero_state(batch_size, dtypes.float32)
state = init_state
for inp in inputs:
_, state = cell(inp, state)
return init_state.c, state.c
def dynamic_lstm_model_fn(batch_size, state_size, max_steps):
# We make inputs and sequence_length constant so that multiple session.run
# calls produce the same result.
inputs = constant_op.constant(
np.random.rand(batch_size, max_steps, state_size), dtype=dtypes.float32)
sequence_length = constant_op.constant(
np.random.randint(0, size=[batch_size], high=max_steps + 1),
dtype=dtypes.int32)
cell = rnn_cell.BasicLSTMCell(state_size)
initial_state = cell.zero_state(batch_size, dtypes.float32)
return inputs, rnn.dynamic_rnn(
cell,
inputs,
sequence_length=sequence_length,
initial_state=initial_state)
def create_fc_batch_jacobian(batch_size, activation_size, num_layers):
inp, output = fully_connected_model_fn(batch_size, activation_size,
num_layers)
pfor_jacobian = gradients.batch_jacobian(output, inp, use_pfor=True)
while_jacobian = gradients.batch_jacobian(output, inp, use_pfor=False)
return pfor_jacobian, while_jacobian
def create_lstm_batch_jacobian(batch_size, state_size, steps, inputs_size=None):
inp, output = lstm_model_fn(batch_size, state_size, steps,
inputs_size=inputs_size)
pfor_jacobian = gradients.batch_jacobian(output, inp, use_pfor=True)
while_jacobian = gradients.batch_jacobian(output, inp, use_pfor=False)
return pfor_jacobian, while_jacobian
def create_dynamic_lstm_batch_jacobian(batch_size, state_size, max_steps):
inp, (_, final_state) = dynamic_lstm_model_fn(batch_size, state_size,
max_steps)
pfor_jacobian = gradients.batch_jacobian(final_state.c, inp, use_pfor=True)
# Note that use_pfor=False does not work above given the current limitations
# on implementation of while_loop. So we statically unroll the looping in the
# jacobian computation.
while_gradients = [
gradient_ops.gradients(array_ops.gather(final_state.c, i, axis=1), inp)[0]
for i in range(state_size)
]
return pfor_jacobian, while_gradients
def create_lstm_batch_hessian(batch_size, state_size, steps):
inp, output = lstm_model_fn(batch_size, state_size, steps)
pfor_jacobian = gradients.batch_jacobian(output, inp, use_pfor=True)
pfor_jacobian = array_ops.reshape(pfor_jacobian, [batch_size, -1])
pfor_hessian = gradients.batch_jacobian(pfor_jacobian, inp, use_pfor=True)
# TODO(agarwal): using two nested while_loop doesn't seem to work here.
# Hence we use pfor_jacobian for computing while_hessian.
while_jacobian = pfor_jacobian
while_hessian = gradients.batch_jacobian(while_jacobian, inp, use_pfor=False)
return pfor_hessian, while_hessian
def create_lstm_hessian(batch_size, state_size, steps):
_, output = lstm_model_fn(batch_size, state_size, steps)
weights = variables.trainable_variables()
pfor_jacobians = gradients.jacobian(output, weights, use_pfor=True)
pfor_hessians = [
gradients.jacobian(x, weights, use_pfor=True) for x in pfor_jacobians
]
# TODO(agarwal): using two nested while_loop doesn't seem to work here.
# Hence we use pfor_jacobians for computing while_hessians.
while_jacobians = pfor_jacobians
while_hessians = [
gradients.jacobian(x, weights, use_pfor=False) for x in while_jacobians
]
return pfor_hessians, while_hessians
def create_fc_per_eg_grad(batch_size, activation_size, num_layers):
inp = random_ops.random_normal([batch_size, activation_size])
layers = [
tf_layers.Dense(activation_size, activation=nn.relu)
for _ in range(num_layers)
]
projection = tf_layers.Dense(1)
def model_fn(activation):
for layer in layers:
activation = layer(activation)
activation = projection(activation)
activation = nn.l2_loss(activation)
return gradient_ops.gradients(activation, variables.trainable_variables())
def loop_fn(i):
return model_fn(array_ops.expand_dims(array_ops.gather(inp, i), 0))
pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
loop_fn_dtypes = [x.dtype for x in variables.trainable_variables()]
while_outputs = control_flow_ops.for_loop(loop_fn, loop_fn_dtypes, batch_size)
return pfor_outputs, while_outputs
def create_lstm_per_eg_grad(batch_size, state_size, steps, inputs_size=None):
inputs_size = inputs_size or state_size
inputs = [
random_ops.random_normal([batch_size, inputs_size]) for _ in range(steps)
]
cell = rnn_cell.BasicLSTMCell(state_size)
init_state = cell.zero_state(batch_size, dtypes.float32)
def model_fn(inps, init_state):
state = init_state
for inp in inps:
_, state = cell(inp, state)
output = nn.l2_loss(state.c)
return gradient_ops.gradients(output, variables.trainable_variables())
def loop_fn(i):
loop_inputs = [
array_ops.expand_dims(array_ops.gather(x, i), 0) for x in inputs
]
loop_init_state = rnn_cell.LSTMStateTuple(
*[array_ops.expand_dims(array_ops.gather(x, i), 0) for x in init_state])
return model_fn(loop_inputs, loop_init_state)
pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
loop_fn_dtypes = [x.dtype for x in variables.trainable_variables()]
while_outputs = control_flow_ops.for_loop(loop_fn, loop_fn_dtypes, batch_size)
return pfor_outputs, while_outputs
# Importing the code from tensorflow_models seems to cause errors. Hence we
# duplicate the model definition here.
# TODO(agarwal): Use the version in tensorflow_models/official instead.
class Mnist(keras_training.Model):
def __init__(self, data_format):
"""Creates a model for classifying a hand-written digit.
Args:
data_format: Either 'channels_first' or 'channels_last'.
"""
super(Mnist, self).__init__()
if data_format == "channels_first":
self._input_shape = [-1, 1, 28, 28]
else:
assert data_format == "channels_last"
self._input_shape = [-1, 28, 28, 1]
self.conv1 = tf_layers.Conv2D(
32, 5, padding="same", data_format=data_format, activation=nn.relu)
self.conv2 = tf_layers.Conv2D(
64, 5, padding="same", data_format=data_format, activation=nn.relu)
self.fc1 = tf_layers.Dense(1024, activation=nn.relu)
self.fc2 = tf_layers.Dense(10)
self.dropout = tf_layers.Dropout(0.4)
self.max_pool2d = tf_layers.MaxPooling2D(
(2, 2), (2, 2), padding="same", data_format=data_format)
def __call__(self, inputs, training):
"""Add operations to classify a batch of input images.
Args:
inputs: A Tensor representing a batch of input images.
training: A boolean. Set to True to add operations required only when
training the classifier.
Returns:
A logits Tensor with shape [<batch_size>, 10].
"""
y = array_ops.reshape(inputs, self._input_shape)
y = self.conv1(y)
y = self.max_pool2d(y)
y = self.conv2(y)
y = self.max_pool2d(y)
y = tf_layers.flatten(y)
y = self.fc1(y)
y = self.dropout(y, training=training)
return self.fc2(y)
def create_mnist_autobatch(batch_size, data_format, training):
images = random_ops.random_uniform([batch_size, 28, 28])
model = Mnist(data_format)
manual = model(images, training=training)
def loop_fn(i):
image = array_ops.gather(images, i)
return model(image, training=training)
pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
while_outputs = control_flow_ops.for_loop(
loop_fn, dtypes.float32, batch_size)
return pfor_outputs, while_outputs, manual
def create_mnist_per_eg_grad(batch_size, data_format, training):
images = random_ops.random_uniform([batch_size, 28, 28])
sparse_labels = np.random.randint(
low=0, high=10, size=[batch_size]).astype(np.int32)
labels = np.zeros((batch_size, 10)).astype(np.float32)
labels[np.arange(batch_size), sparse_labels] = 1.
model = Mnist(data_format)
def loop_fn(i):
image = array_ops.gather(images, i)
label = array_ops.gather(labels, i)
logits = array_ops.reshape(model(image, training=training), [-1])
loss = losses.softmax_cross_entropy(
logits=logits, onehot_labels=label, reduction=losses.Reduction.NONE)
return gradient_ops.gradients(loss, variables.trainable_variables())
pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
while_outputs = control_flow_ops.for_loop(
loop_fn, [dtypes.float32] * len(variables.trainable_variables()),
batch_size)
return pfor_outputs, while_outputs
def create_mnist_batch_jacobian(batch_size, data_format, training):
images = random_ops.random_uniform([batch_size, 28, 28])
model = Mnist(data_format)
logits = model(images, training=training)
pfor_jacobian = gradients.batch_jacobian(logits, images, use_pfor=True)
while_jacobian = gradients.batch_jacobian(logits, images, use_pfor=False)
return pfor_jacobian, while_jacobian
def create_mnist_per_eg_jacobian(batch_size, data_format, training):
images = random_ops.random_uniform([batch_size, 28, 28])
model = Mnist(data_format)
def loop_fn(i, use_pfor):
image = array_ops.gather(images, i)
logits = array_ops.reshape(model(image, training=training), [-1])
return gradients.jacobian(
logits, variables.trainable_variables(), use_pfor=use_pfor)
pfor_outputs = control_flow_ops.pfor(
functools.partial(loop_fn, use_pfor=True),
batch_size)
while_outputs = control_flow_ops.for_loop(
functools.partial(loop_fn, use_pfor=False),
[dtypes.float32] * len(variables.trainable_variables()), batch_size)
return pfor_outputs, while_outputs
def create_fc_per_eg_jacobians(batch_size, activation_size, num_layers):
model = FullyConnectedModel(activation_size=activation_size,
num_layers=num_layers)
inp = random_ops.random_normal([batch_size, activation_size])
output = model(inp)
jacobians = gradients.jacobian(output, variables.trainable_variables())
def loop_fn(i, use_pfor):
inp_i = array_ops.expand_dims(array_ops.gather(inp, i), 0)
output = array_ops.reshape(model(inp_i), [-1])
return gradients.jacobian(
output, variables.trainable_variables(), use_pfor=use_pfor)
per_eg_jacobians_pfor = control_flow_ops.pfor(
functools.partial(loop_fn, use_pfor=True),
batch_size)
per_eg_jacobians_while = control_flow_ops.for_loop(
functools.partial(loop_fn, use_pfor=False),
[dtypes.float32] * len(variables.trainable_variables()), batch_size)
return jacobians, per_eg_jacobians_pfor, per_eg_jacobians_while
@test_util.run_v1_only("b/122612051")
class GradientsTest(test.TestCase):
def run_and_assert_equal(self, targets1, targets2, atol=1e-4, rtol=1e-4):
targets1 = nest.flatten(targets1)
targets2 = nest.flatten(targets2)
assert len(targets1) == len(targets2)
init = variables.global_variables_initializer()
self.evaluate(init)
outputs = self.evaluate(targets1 + targets2)
n = len(outputs) // 2
for i in range(n):
self.assertAllClose(outputs[i], outputs[i + n], rtol=rtol, atol=atol)
def test_no_path(self):
for grad_func in [gradients.jacobian, gradients.batch_jacobian]:
for use_pfor in [True, False]:
x = constant_op.constant([[1.0]])
y = constant_op.constant([[2.0]])
self.assertIsNone(grad_func(y, x, use_pfor=use_pfor))
def test_jacobian_fixed_shape(self):
x = random_ops.random_uniform([2, 2])
y = math_ops.matmul(x, x, transpose_a=True)
jacobian_pfor = gradients.jacobian(y, x, use_pfor=True)
jacobian_while = gradients.jacobian(y, x, use_pfor=False)
answer = ops.convert_to_tensor([[
gradient_ops.gradients(y[0][0], x)[0],
gradient_ops.gradients(y[0][1], x)[0]
], [
gradient_ops.gradients(y[1][0], x)[0],
gradient_ops.gradients(y[1][1], x)[0]
]])
self.run_and_assert_equal(answer, jacobian_pfor)
self.run_and_assert_equal(answer, jacobian_while)
def test_jacobian_scan_shape(self):
# Shape x: [3, 4]
x = random_ops.random_uniform([3, 4])
elems = random_ops.random_uniform([6])
# Shape y: [6, 3, 4]
y = functional_ops.scan(lambda a, e: a + e, elems, initializer=x)
jacobian = gradients.jacobian(y, x)
expected_shape = [6, 3, 4, 3, 4]
self.assertAllEqual(expected_shape, jacobian.shape.as_list())
def test_jacobian_while_loop_shape(self):
# Shape x: [3, 4]
x = random_ops.random_uniform([3, 4])
_, y = tf_control_flow_ops.while_loop(lambda i, a: i > 5.,
lambda i, a: (i + 1, a + i),
(constant_op.constant(0.), x))
# Shape y: [2, 3]
y = y[:2, :3]
jacobian = gradients.jacobian(y, x)
expected_shape = [2, 3, 3, 4]
self.assertAllEqual(expected_shape, jacobian.shape.as_list())
def test_jacobian_unknown_shape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32, shape=[None, None])
y = math_ops.matmul(x, x, transpose_a=True)
jacobian_pfor = gradients.jacobian(y, x, use_pfor=True)
jacobian_while = gradients.jacobian(y, x, use_pfor=False)
answer = ops.convert_to_tensor([[
gradient_ops.gradients(y[0][0], x)[0],
gradient_ops.gradients(y[0][1], x)[0]
], [
gradient_ops.gradients(y[1][0], x)[0],
gradient_ops.gradients(y[1][1], x)[0]
]])
ans, pfor_value, while_value = sess.run(
[answer, jacobian_pfor, jacobian_while],
feed_dict={x: [[1, 2], [3, 4]]})
self.assertAllClose(ans, pfor_value)
self.assertAllClose(ans, while_value)
def test_jacobian_parallel_iterations(self):
x = constant_op.constant([[1., 2], [3, 4]])
y = math_ops.matmul(x, x)
self.assertAllClose(gradients.jacobian(y, x, parallel_iterations=2),
gradients.jacobian(y, x, parallel_iterations=3))
def test_batch_jacobian_bad_shapes(self):
x = random_ops.random_uniform([2, 2])
y = random_ops.random_uniform([3, 2])
with self.assertRaisesRegexp(ValueError, "Need first dimension of output"):
gradients.batch_jacobian(y, x, use_pfor=True)
def test_batch_jacobian_bad_unknown_shapes(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
y = array_ops.concat([x, x], axis=0)
jacobian = gradients.batch_jacobian(y, x)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"assertion failed"):
sess.run(jacobian, feed_dict={x: [[1, 2], [3, 4]]})
def test_batch_jacobian_fixed_shape(self):
x = random_ops.random_uniform([2, 3, 5])
y = x * x
batch_jacobian_pfor = gradients.batch_jacobian(y, x, use_pfor=True)
batch_jacobian_while = gradients.batch_jacobian(y, x, use_pfor=False)
two_x = 2 * x
answer = array_ops.stack(
[array_ops.diag(two_x[0]),
array_ops.diag(two_x[1])])
self.run_and_assert_equal(answer, batch_jacobian_pfor)
self.run_and_assert_equal(answer, batch_jacobian_while)
def test_batch_jacobian_unknown_shape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
y = x * x
batch_jacobian_pfor = gradients.batch_jacobian(y, x, use_pfor=True)
batch_jacobian_while = gradients.batch_jacobian(y, x, use_pfor=False)
two_x = 2 * x
answer = array_ops.stack(
[array_ops.diag(two_x[0]),
array_ops.diag(two_x[1])])
ans, pfor_value, while_value = sess.run(
[answer, batch_jacobian_pfor, batch_jacobian_while],
feed_dict={x: [[1, 2], [3, 4]]})
self.assertAllClose(ans, pfor_value)
self.assertAllClose(ans, while_value)
def test_batch_jacobian_parallel_iterations(self):
x = constant_op.constant([[1., 2], [3, 4]])
w = constant_op.constant([[1., 2, 3, 4], [5, 6, 7, 8]])
y = math_ops.matmul(x, w)
self.assertAllClose(gradients.batch_jacobian(y, x, parallel_iterations=2),
gradients.batch_jacobian(y, x, parallel_iterations=3))
def test_fc_batch_jacobian(self):
pfor_jacobian, while_jacobian = create_fc_batch_jacobian(8, 4, 2)
self.run_and_assert_equal(pfor_jacobian, while_jacobian)
def test_lstm_batch_jacobian(self):
pfor_jacobian, while_jacobian = create_lstm_batch_jacobian(8, 4, 2,
inputs_size=128)
self.run_and_assert_equal(pfor_jacobian, while_jacobian)
@test_util.disable_xla("This test never passed for XLA")
def DISABLED_test_dynamic_lstm_batch_jacobian(self):
pfor_jacobian, while_gradients = create_dynamic_lstm_batch_jacobian(8, 4, 3)
with session.Session() as sess:
init = variables.global_variables_initializer()
self.evaluate(init)
pfor = self.evaluate(pfor_jacobian)
for i in range(4):
while_i = sess.run(while_gradients[i])
self.assertAllClose(while_i, pfor[:, i, ...])
def test_lstm_hessian(self):
pfor_hessian, while_hessian = create_lstm_hessian(2, 2, 2)
self.run_and_assert_equal(pfor_hessian, while_hessian)
def test_lstm_batch_hessian(self):
pfor_hessian, while_hessian = create_lstm_batch_hessian(2, 2, 2)
self.run_and_assert_equal(pfor_hessian, while_hessian)
def test_fc_per_eg_grad(self):
pfor_outputs, while_outputs = create_fc_per_eg_grad(8, 4, 2)
self.run_and_assert_equal(pfor_outputs, while_outputs)
def test_lstm_per_eg_grad(self):
pfor_outputs, while_outputs = create_lstm_per_eg_grad(8, 4, 2)
self.run_and_assert_equal(pfor_outputs, while_outputs)
def test_mnist_per_eg_grad(self):
# It looks like CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED
# configuration of Winograd can cause low precision output resulting in
# tests failing. So we disable that here.
os.environ["TF_ENABLE_WINOGRAD_NONFUSED"] = "0"
data_format = ("channels_first"
if test.is_gpu_available() else "channels_last")
# Note that we we are setting training=False here so that dropout produces
# the same result with pfor and with while_loop.
pfor_outputs, while_outputs = create_mnist_per_eg_grad(
4, data_format, training=False)
self.run_and_assert_equal(pfor_outputs, while_outputs, rtol=1e-3)
os.environ.pop("TF_ENABLE_WINOGRAD_NONFUSED", None)
def test_mnist_per_eg_jacobian(self):
# It looks like CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED
# configuration of Winograd can cause low precision output resulting in
# tests failing. So we disable that here.
os.environ["TF_ENABLE_WINOGRAD_NONFUSED"] = "0"
data_format = ("channels_first"
if test.is_gpu_available() else "channels_last")
# Note that we we are setting training=False here so that dropout produces
# the same result with pfor and with while_loop.
pfor_outputs, while_outputs = create_mnist_per_eg_jacobian(
2, data_format, training=False)
self.run_and_assert_equal(pfor_outputs, while_outputs, rtol=1e-3)
os.environ.pop("TF_ENABLE_WINOGRAD_NONFUSED", None)
def test_fc_jacobian(self):
jacobians, per_eg_jacobians_pfor, per_eg_jacobians_while = (
create_fc_per_eg_jacobians(batch_size=8,
activation_size=4,
num_layers=2))
self.run_and_assert_equal(jacobians, per_eg_jacobians_pfor,
rtol=2e-3, atol=1e-3)
self.run_and_assert_equal(jacobians, per_eg_jacobians_while,
rtol=2e-3, atol=1e-3)
def test_indexed_slice(self):
inp = random_ops.random_uniform([3, 2])
output = nn.embedding_lookup(inp, [0, 2])
pfor_jacobian = gradients.jacobian(output, inp, use_pfor=True)
while_jacobian = gradients.jacobian(output, inp, use_pfor=False)
self.run_and_assert_equal(while_jacobian, pfor_jacobian)
class GradientsBenchmarks(test.Benchmark):
def _run(self, targets, iters, name=None):
def _done(t):
# Note that we don't use tf.control_dependencies since that will not make
# sure that the computation on GPU has actually finished. So we fetch the
# first element of the output, and assume that this will not be called on
# empty tensors.
return array_ops.gather(array_ops.reshape(t, [-1]), 0)
targets = [_done(x) for x in nest.flatten(targets)]
sess = session.Session()
with sess:
init = variables.global_variables_initializer()
self.evaluate(init)
self.evaluate(targets)
begin = time.time()
for _ in range(iters):
self.evaluate(targets)
end = time.time()
avg_time_ms = (1000 * (end - begin)) / iters
self.report_benchmark(iters=iters, wall_time=avg_time_ms, name=name)
return avg_time_ms
def benchmark_fc_batch_jacobian(self):
with ops.Graph().as_default():
pfor_jacobian, while_jacobian = create_fc_batch_jacobian(100, 32, 20)
self._run(pfor_jacobian, 100, name="fc_batch_jacobian_pfor")
self._run(while_jacobian, 20, name="fc_batch_jacobian_while")
def benchmark_lstm_batch_jacobian(self):
with ops.Graph().as_default():
pfor_jacobian, while_jacobian = create_lstm_batch_jacobian(
100, 32, 8, inputs_size=128)
self._run(pfor_jacobian, 100, name="lstm_batch_jacobian_pfor")
self._run(while_jacobian, 20, name="lstm_batch_jacobian_while")
def benchmark_lstm_hessian(self):
with ops.Graph().as_default():
pfor_hessian, while_hessian = create_lstm_hessian(2, 2, 10)
self._run(pfor_hessian, 20, name="lstm_hessian_pfor")
self._run(while_hessian, 3, name="lstm_hessian_while_pfor")
def benchmark_lstm_batch_hessian(self):
with ops.Graph().as_default():
pfor_hessian, while_hessian = create_lstm_batch_hessian(4, 4, 10)
self._run(pfor_hessian, 100, name="lstm_batch_hessian_pfor")
self._run(while_hessian, 20, name="lstm_batch_hessian_while_pfor")
def benchmark_fc_per_eg_grad(self):
with ops.Graph().as_default():
pfor_outputs, while_outputs = create_fc_per_eg_grad(100, 32, 3)
self._run(pfor_outputs, 100, name="fc_per_eg_grad_pfor")
self._run(while_outputs, 20, name="fc_per_eg_grad_while")
def benchmark_lstm_per_eg_grad(self):
with ops.Graph().as_default():
pfor_outputs, while_outputs = create_lstm_per_eg_grad(100, 32, 8)
self._run(pfor_outputs, 100, name="lstm_per_eg_grad_pfor")
self._run(while_outputs, 20, name="lstm_per_eg_grad_while")
def benchmark_mnist_autobatch(self):
with ops.Graph().as_default():
data_format = ("channels_first"
if test.is_gpu_available() else "channels_last")
pfor_outputs, while_outputs, manual = create_mnist_autobatch(
100, data_format, training=False)
self._run(pfor_outputs, 100, name="mnist_pfor")
self._run(while_outputs, 20, name="mnist_while")
self._run(manual, 100, name="mnist_manual")
def benchmark_mnist_per_eg_grad(self):
with ops.Graph().as_default():
data_format = ("channels_first"
if test.is_gpu_available() else "channels_last")
pfor_outputs, while_outputs = create_mnist_per_eg_grad(
128, data_format, training=True)
self._run(pfor_outputs, 20, name="mnist_per_eg_grad_pfor")
self._run(while_outputs, 20, name="mnist_per_eg_grad_while")
def benchmark_mnist_per_eg_jacobian(self):
with ops.Graph().as_default():
if test.is_gpu_available():
data_format = "channels_first"
else:
data_format = "channels_last"
pfor_outputs, while_outputs = create_mnist_per_eg_jacobian(
16, data_format, training=True)
self._run(pfor_outputs, 20, name="mnist_per_eg_jacobian_pfor")
self._run(while_outputs, 20, name="mnist_per_eg_jacobian_while")
def benchmark_mnist_batch_jacobian(self):
with ops.Graph().as_default():
if test.is_gpu_available():
data_format = "channels_first"
else:
data_format = "channels_last"
pfor_outputs, while_outputs = create_mnist_batch_jacobian(
128, data_format, training=True)
self._run(pfor_outputs, 20, name="mnist_batch_jacobian_pfor")
self._run(while_outputs, 20, name="mnist_batch_jacobian_while")
def benchmark_fc_per_eg_jacobian(self):
with ops.Graph().as_default():
jacobians, per_eg_jacobians_pfor, per_eg_jacobians_while = (
create_fc_per_eg_jacobians(batch_size=128,
activation_size=32,
num_layers=3))
self._run(jacobians, 30, name="fc_jacobians_pfor")
self._run(per_eg_jacobians_pfor, 100,
name="fc_per_eg_jacobians_pfor")
self._run(per_eg_jacobians_while, 10,
name="fc_per_eg_jacobians_while")
if __name__ == "__main__":
test.main()
| |
#!/usr/bin/env python
# Author: Josh Yelon
#
# This is a tutorial to show one of the simplest applications
# of copy-to-texture: motion trails.
#
from direct.showbase.ShowBase import ShowBase
from panda3d.core import GraphicsOutput
from panda3d.core import Filename, Texture
from panda3d.core import CardMaker
from panda3d.core import NodePath, TextNode
from panda3d.core import AmbientLight, DirectionalLight
from direct.showbase.DirectObject import DirectObject
from direct.gui.OnscreenText import OnscreenText
from direct.task.Task import Task
from direct.actor.Actor import Actor
from random import uniform
import sys
import os
def addInstructions(pos, msg):
return OnscreenText(text=msg, parent=base.a2dTopLeft,
style=1, fg=(1, 1, 1, 1), pos=(0.06, -pos - 0.03),
align=TextNode.ALeft, scale=.05)
class MotionTrails(ShowBase):
def __init__(self):
# Initialize the ShowBase class from which we inherit, which will
# create a window and set up everything we need for rendering into it.
ShowBase.__init__(self)
self.disableMouse()
self.camera.setPos(0, -26, 4)
self.setBackgroundColor(0, 0, 0)
# Create a texture into which we can copy the main window.
# We set it to RTMTriggeredCopyTexture mode, which tells it that we
# want it to copy the window contents into a texture every time we
# call self.win.triggerCopy().
self.tex = Texture()
self.tex.setMinfilter(Texture.FTLinear)
self.win.addRenderTexture(self.tex,
GraphicsOutput.RTMTriggeredCopyTexture)
# Set the initial color to clear the texture to, before rendering it.
# This is necessary because we don't clear the texture while rendering,
# and otherwise the user might see garbled random data from GPU memory.
self.tex.setClearColor((0, 0, 0, 1))
self.tex.clearImage()
# Create another 2D camera. Tell it to render before the main camera.
self.backcam = self.makeCamera2d(self.win, sort=-10)
self.background = NodePath("background")
self.backcam.reparentTo(self.background)
self.background.setDepthTest(0)
self.background.setDepthWrite(0)
self.backcam.node().getDisplayRegion(0).setClearDepthActive(0)
# Obtain two texture cards. One renders before the dragon, the other
# after.
self.bcard = self.win.getTextureCard()
self.bcard.reparentTo(self.background)
self.bcard.setTransparency(1)
self.fcard = self.win.getTextureCard()
self.fcard.reparentTo(self.render2d)
self.fcard.setTransparency(1)
# Initialize one of the nice effects.
self.chooseEffectGhost()
# Add the task that initiates the screenshots.
taskMgr.add(self.takeSnapShot, "takeSnapShot")
# Create some black squares on top of which we will
# place the instructions.
blackmaker = CardMaker("blackmaker")
blackmaker.setColor(0, 0, 0, 1)
blackmaker.setFrame(-1.00, -0.50, 0.65, 1.00)
instcard = NodePath(blackmaker.generate())
instcard.reparentTo(self.render2d)
blackmaker.setFrame(-0.5, 0.5, -1.00, -0.85)
titlecard = NodePath(blackmaker.generate())
titlecard.reparentTo(self.render2d)
# Panda does its best to hide the differences between DirectX and
# OpenGL. But there are a few differences that it cannot hide.
# One such difference is that when OpenGL copies from a
# visible window to a texture, it gets it right-side-up. When
# DirectX does it, it gets it upside-down. There is nothing panda
# can do to compensate except to expose a flag and let the
# application programmer deal with it. You should only do this
# in the rare event that you're copying from a visible window
# to a texture.
if self.win.getGsg().getCopyTextureInverted():
print("Copy texture is inverted.")
self.bcard.setScale(1, 1, -1)
self.fcard.setScale(1, 1, -1)
# Put up the instructions
title = OnscreenText(text="Panda3D: Tutorial - Motion Trails",
fg=(1, 1, 1, 1), parent=base.a2dBottomCenter,
pos=(0, 0.1), scale=.08)
instr0 = addInstructions(0.06, "Press ESC to exit")
instr1 = addInstructions(0.12, "Press 1: Ghost effect")
instr2 = addInstructions(0.18, "Press 2: PaintBrush effect")
instr3 = addInstructions(0.24, "Press 3: Double Vision effect")
instr4 = addInstructions(0.30, "Press 4: Wings of Blue effect")
instr5 = addInstructions(0.36, "Press 5: Whirlpool effect")
# Enable the key events
self.accept("escape", sys.exit, [0])
self.accept("1", self.chooseEffectGhost)
self.accept("2", self.chooseEffectPaintBrush)
self.accept("3", self.chooseEffectDoubleVision)
self.accept("4", self.chooseEffectWingsOfBlue)
self.accept("5", self.chooseEffectWhirlpool)
def takeSnapShot(self, task):
if task.time > self.nextclick:
self.nextclick += 1.0 / self.clickrate
if self.nextclick < task.time:
self.nextclick = task.time
self.win.triggerCopy()
return Task.cont
def chooseEffectGhost(self):
self.setBackgroundColor(0, 0, 0, 1)
self.bcard.hide()
self.fcard.show()
self.fcard.setColor(1.0, 1.0, 1.0, 0.99)
self.fcard.setScale(1.00)
self.fcard.setPos(0, 0, 0)
self.fcard.setR(0)
self.clickrate = 30
self.nextclick = 0
def chooseEffectPaintBrush(self):
self.setBackgroundColor(0, 0, 0, 1)
self.bcard.show()
self.fcard.hide()
self.bcard.setColor(1, 1, 1, 1)
self.bcard.setScale(1.0)
self.bcard.setPos(0, 0, 0)
self.bcard.setR(0)
self.clickrate = 10000
self.nextclick = 0
def chooseEffectDoubleVision(self):
self.setBackgroundColor(0, 0, 0, 1)
self.bcard.show()
self.bcard.setColor(1, 1, 1, 1)
self.bcard.setScale(1.0)
self.bcard.setPos(-0.05, 0, 0)
self.bcard.setR(0)
self.fcard.show()
self.fcard.setColor(1, 1, 1, 0.60)
self.fcard.setScale(1.0)
self.fcard.setPos(0.05, 0, 0)
self.fcard.setR(0)
self.clickrate = 10000
self.nextclick = 0
def chooseEffectWingsOfBlue(self):
self.setBackgroundColor(0, 0, 0, 1)
self.fcard.hide()
self.bcard.show()
self.bcard.setColor(1.0, 0.90, 1.0, 254.0 / 255.0)
self.bcard.setScale(1.1, 1, 0.95)
self.bcard.setPos(0, 0, 0.05)
self.bcard.setR(0)
self.clickrate = 30
self.nextclick = 0
def chooseEffectWhirlpool(self):
self.setBackgroundColor(0, 0, 0, 1)
self.bcard.show()
self.fcard.hide()
self.bcard.setColor(1, 1, 1, 1)
self.bcard.setScale(0.999)
self.bcard.setPos(0, 0, 0)
self.bcard.setR(1)
self.clickrate = 10000
self.nextclick = 0
t = MotionTrails()
character = Actor()
character.loadModel('models/dancer')
character.reparentTo(t.render)
character.loadAnims({'win': 'models/dancer'})
character.loop('win')
# character.hprInterval(15, LPoint3(360, 0,0)).loop()
# put some lighting on the model
dlight = DirectionalLight('dlight')
alight = AmbientLight('alight')
dlnp = t.render.attachNewNode(dlight)
alnp = t.render.attachNewNode(alight)
dlight.setColor((1.0, 0.9, 0.8, 1))
alight.setColor((0.2, 0.3, 0.4, 1))
dlnp.setHpr(0, -60, 0)
t.render.setLight(dlnp)
t.render.setLight(alnp)
t.run()
| |
"""SCons.Tool.icl
Tool-specific initialization for the Intel C/C++ compiler.
Supports Linux and Windows compilers, v7 and up.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import math, sys, os.path, glob, string, re
is_windows = sys.platform == 'win32'
is_linux = sys.platform == 'linux2'
if is_windows:
import SCons.Tool.msvc
elif is_linux:
import SCons.Tool.gcc
import SCons.Util
import SCons.Warnings
# Exceptions for this tool
class IntelCError(SCons.Errors.InternalError):
pass
class MissingRegistryError(IntelCError): # missing registry entry
pass
class MissingDirError(IntelCError): # dir not found
pass
class NoRegistryModuleError(IntelCError): # can't read registry at all
pass
def uniquify(s):
"""Return a sequence containing only one copy of each unique element from input sequence s.
Does not preserve order.
Input sequence must be hashable (i.e. must be usable as a dictionary key)."""
u = {}
for x in s:
u[x] = 1
return u.keys()
def linux_ver_normalize(vstr):
"""Normalize a Linux compiler version number.
Intel changed from "80" to "9.0" in 2005, so we assume if the number
is greater than 60 it's an old-style number and otherwise new-style.
Always returns an old-style float like 80 or 90 for compatibility with Windows.
Shades of Y2K!"""
# Check for version number like 9.1.026: return 91.026
m = re.match(r'([0-9]+)\.([0-9]+)\.([0-9]+)', vstr)
if m:
vmaj,vmin,build = m.groups()
return float(vmaj) * 10 + float(vmin) + float(build) / 1000.;
else:
f = float(vstr)
if is_windows:
return f
else:
if f < 60: return f * 10.0
else: return f
def check_abi(abi):
"""Check for valid ABI (application binary interface) name,
and map into canonical one"""
if not abi:
return None
abi = abi.lower()
# valid_abis maps input name to canonical name
if is_windows:
valid_abis = {'ia32' : 'ia32',
'x86' : 'ia32',
'ia64' : 'ia64',
'em64t' : 'ia32e',
'amd64' : 'ia32e'}
if is_linux:
valid_abis = {'ia32' : 'ia32',
'x86' : 'ia32',
'x86_64' : 'x86_64',
'em64t' : 'x86_64',
'amd64' : 'x86_64'}
try:
abi = valid_abis[abi]
except KeyError:
raise SCons.Errors.UserError, \
"Intel compiler: Invalid ABI %s, valid values are %s"% \
(abi, valid_abis.keys())
return abi
def vercmp(a, b):
"""Compare strings as floats,
but Intel changed Linux naming convention at 9.0"""
return cmp(linux_ver_normalize(b), linux_ver_normalize(a))
def get_version_from_list(v, vlist):
"""See if we can match v (string) in vlist (list of strings)
Linux has to match in a fuzzy way."""
if is_windows:
# Simple case, just find it in the list
if v in vlist: return v
else: return None
else:
# Fuzzy match: normalize version number first, but still return
# original non-normalized form.
fuzz = 0.001
for vi in vlist:
if math.fabs(linux_ver_normalize(vi) - linux_ver_normalize(v)) < fuzz:
return vi
# Not found
return None
def get_intel_registry_value(valuename, version=None, abi=None):
"""
Return a value from the Intel compiler registry tree. (Windows only)
"""
# Open the key:
K = 'Software\\Intel\\Compilers\\C++\\' + version + '\\'+abi.upper()
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, K)
except SCons.Util.RegError:
raise MissingRegistryError, \
"%s was not found in the registry, for Intel compiler version %s, abi='%s'"%(K, version,abi)
# Get the value:
try:
v = SCons.Util.RegQueryValueEx(k, valuename)[0]
return v # or v.encode('iso-8859-1', 'replace') to remove unicode?
except SCons.Util.RegError:
raise MissingRegistryError, \
"%s\\%s was not found in the registry."%(K, valuename)
def get_all_compiler_versions():
"""Returns a sorted list of strings, like "70" or "80" or "9.0"
with most recent compiler version first.
"""
versions=[]
if is_windows:
keyname = 'Software\\Intel\\Compilers\\C++'
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
keyname)
except WindowsError:
return []
i = 0
versions = []
try:
while i < 100:
subkey = SCons.Util.RegEnumKey(k, i) # raises EnvironmentError
# Check that this refers to an existing dir.
# This is not 100% perfect but should catch common
# installation issues like when the compiler was installed
# and then the install directory deleted or moved (rather
# than uninstalling properly), so the registry values
# are still there.
ok = False
for try_abi in ('IA32', 'IA32e', 'IA64'):
try:
d = get_intel_registry_value('ProductDir', subkey, try_abi)
except MissingRegistryError:
continue # not found in reg, keep going
if os.path.exists(d): ok = True
if ok:
versions.append(subkey)
else:
# Registry points to nonexistent dir. Ignore this version.
print "Ignoring "+str(get_intel_registry_value('ProductDir', subkey, 'IA32'))
i = i + 1
except EnvironmentError:
# no more subkeys
pass
elif is_linux:
for d in glob.glob('/opt/intel_cc_*'):
# Typical dir here is /opt/intel_cc_80.
versions.append(re.search(r'cc_(.*)$', d).group(1))
for d in glob.glob('/opt/intel/cc*/*'):
# Typical dir here is /opt/intel/cc/9.0 for IA32,
# /opt/intel/cce/9.0 for EMT64 (AMD64)
versions.append(re.search(r'([0-9.]+)$', d).group(1))
versions = uniquify(versions) # remove dups
versions.sort(vercmp)
return versions
def get_intel_compiler_top(version, abi):
"""
Return the main path to the top-level dir of the Intel compiler,
using the given version.
The compiler will be in <top>/bin/icl.exe (icc on linux),
the include dir is <top>/include, etc.
"""
if is_windows:
if not SCons.Util.can_read_reg:
raise NoRegistryModuleError, "No Windows registry module was found"
top = get_intel_registry_value('ProductDir', version, abi)
if not os.path.exists(os.path.join(top, "Bin", "icl.exe")):
raise MissingDirError, \
"Can't find Intel compiler in %s"%(top)
elif is_linux:
# first dir is new (>=9.0) style, second is old (8.0) style.
dirs=('/opt/intel/cc/%s', '/opt/intel_cc_%s')
if abi == 'x86_64':
dirs=('/opt/intel/cce/%s',) # 'e' stands for 'em64t', aka x86_64 aka amd64
top=None
for d in dirs:
if os.path.exists(os.path.join(d%version, "bin", "icc")):
top = d%version
break
if not top:
raise MissingDirError, \
"Can't find version %s Intel compiler in %s (abi='%s')"%(version,top, abi)
return top
def generate(env, version=None, abi=None, topdir=None, verbose=0):
"""Add Builders and construction variables for Intel C/C++ compiler
to an Environment.
args:
version: (string) compiler version to use, like "80"
abi: (string) 'win32' or whatever Itanium version wants
topdir: (string) compiler top dir, like
"c:\Program Files\Intel\Compiler70"
If topdir is used, version and abi are ignored.
verbose: (int) if >0, prints compiler version used.
"""
if not (is_linux or is_windows):
# can't handle this platform
return
if is_windows:
SCons.Tool.msvc.generate(env)
elif is_linux:
SCons.Tool.gcc.generate(env)
# if version is unspecified, use latest
vlist = get_all_compiler_versions()
if not version:
if vlist:
version = vlist[0]
else:
# User may have specified '90' but we need to get actual dirname '9.0'.
# get_version_from_list does that mapping.
v = get_version_from_list(version, vlist)
if not v:
raise SCons.Errors.UserError, \
"Invalid Intel compiler version %s: "%version + \
"installed versions are %s"%(', '.join(vlist))
version = v
# if abi is unspecified, use ia32
# alternatives are ia64 for Itanium, or amd64 or em64t or x86_64 (all synonyms here)
abi = check_abi(abi)
if abi is None:
if is_linux:
# Check if we are on 64-bit linux, default to 64 then.
uname_m = os.uname()[4]
if uname_m == 'x86_64':
abi = 'x86_64'
else:
abi = 'ia32'
else:
# XXX: how would we do the same test on Windows?
abi = "ia32"
if version and not topdir:
try:
topdir = get_intel_compiler_top(version, abi)
except (SCons.Util.RegError, IntelCError):
topdir = None
if not topdir:
# Normally this is an error, but it might not be if the compiler is
# on $PATH and the user is importing their env.
class ICLTopDirWarning(SCons.Warnings.Warning):
pass
if is_linux and not env.Detect('icc') or \
is_windows and not env.Detect('icl'):
SCons.Warnings.enableWarningClass(ICLTopDirWarning)
SCons.Warnings.warn(ICLTopDirWarning,
"Failed to find Intel compiler for version='%s', abi='%s'"%
(str(version), str(abi)))
else:
# should be cleaned up to say what this other version is
# since in this case we have some other Intel compiler installed
SCons.Warnings.enableWarningClass(ICLTopDirWarning)
SCons.Warnings.warn(ICLTopDirWarning,
"Can't find Intel compiler top dir for version='%s', abi='%s'"%
(str(version), str(abi)))
if topdir:
if verbose:
print "Intel C compiler: using version '%s' (%g), abi %s, in '%s'"%\
(version, linux_ver_normalize(version),abi,topdir)
if is_linux:
# Show the actual compiler version by running the compiler.
os.system('%s/bin/icc --version'%topdir)
env['INTEL_C_COMPILER_TOP'] = topdir
if is_linux:
paths={'INCLUDE' : 'include',
'LIB' : 'lib',
'PATH' : 'bin',
'LD_LIBRARY_PATH' : 'lib'}
for p in paths:
env.PrependENVPath(p, os.path.join(topdir, paths[p]))
if is_windows:
# env key reg valname default subdir of top
paths=(('INCLUDE', 'IncludeDir', 'Include'),
('LIB' , 'LibDir', 'Lib'),
('PATH' , 'BinDir', 'Bin'))
# Each path has a registry entry, use that or default to subdir
for p in paths:
try:
path=get_intel_registry_value(p[1], version, abi)
# These paths may have $(ICInstallDir)
# which needs to be substituted with the topdir.
path=path.replace('$(ICInstallDir)', topdir + os.sep)
except IntelCError:
# Couldn't get it from registry: use default subdir of topdir
env.PrependENVPath(p[0], os.path.join(topdir, p[2]))
else:
env.PrependENVPath(p[0], string.split(path, os.pathsep))
# print "ICL %s: %s, final=%s"%(p[0], path, str(env['ENV'][p[0]]))
if is_windows:
env['CC'] = 'icl'
env['CXX'] = 'icl'
env['LINK'] = 'xilink'
else:
env['CC'] = 'icc'
env['CXX'] = 'icpc'
# Don't reset LINK here;
# use smart_link which should already be here from link.py.
#env['LINK'] = '$CC'
env['AR'] = 'xiar'
env['LD'] = 'xild' # not used by default
# This is not the exact (detailed) compiler version,
# just the major version as determined above or specified
# by the user. It is a float like 80 or 90, in normalized form for Linux
# (i.e. even for Linux 9.0 compiler, still returns 90 rather than 9.0)
if version:
env['INTEL_C_COMPILER_VERSION']=linux_ver_normalize(version)
if is_windows:
# Look for license file dir
# in system environment, registry, and default location.
envlicdir = os.environ.get("INTEL_LICENSE_FILE", '')
K = ('SOFTWARE\Intel\Licenses')
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, K)
reglicdir = SCons.Util.RegQueryValueEx(k, "w_cpp")[0]
except (AttributeError, SCons.Util.RegError):
reglicdir = ""
defaultlicdir = r'C:\Program Files\Common Files\Intel\Licenses'
licdir = None
for ld in [envlicdir, reglicdir]:
if ld and os.path.exists(ld):
licdir = ld
break
if not licdir:
licdir = defaultlicdir
if not os.path.exists(licdir):
class ICLLicenseDirWarning(SCons.Warnings.Warning):
pass
SCons.Warnings.enableWarningClass(ICLLicenseDirWarning)
SCons.Warnings.warn(ICLLicenseDirWarning,
"Intel license dir was not found."
" Tried using the INTEL_LICENSE_FILE environment variable (%s), the registry (%s) and the default path (%s)."
" Using the default path as a last resort."
% (envlicdir, reglicdir, defaultlicdir))
env['ENV']['INTEL_LICENSE_FILE'] = licdir
def exists(env):
if not (is_linux or is_windows):
# can't handle this platform
return 0
try:
versions = get_all_compiler_versions()
except (SCons.Util.RegError, IntelCError):
versions = None
detected = versions is not None and len(versions) > 0
if not detected:
# try env.Detect, maybe that will work
if is_windows:
return env.Detect('icl')
elif is_linux:
return env.Detect('icc')
return detected
# end of file
| |
# coding=utf-8
from __future__ import print_function
from abc import abstractmethod
import copy
from hooky import List, Dict
import xml.parsers.expat
class HandlerError(Exception):
pass
def parse(xmlstr, debug=False):
"""
parse XML string to Xl object
:param xmlstr:
:type xmlstr: str
:param debug:
:type debug: bool
:return: object of :class:`Xl`
:rtype: Xl
"""
xl = Xl()
out_element = [None]
elements = []
ns_list = []
s = ['']
def start_element(name, attrs):
_do_string()
print('start_element', name) if debug else None
attributes = {}
l = name.rsplit(' ', 1)
tag = l[-1]
uri = None
if len(l) > 1:
uri = l[0]
for key, value in attrs.items():
l = key.rsplit(' ', 1)
attr_name = l[-1]
attr_uri = None
if len(l) > 1:
attr_uri = l[0]
attributes[(attr_uri, attr_name)] = value
prefixes = {}
for _uri, _prefix in ns_list:
prefixes[_uri] = _prefix
e = Element(tag=(uri, tag), attributes=attributes, prefixes=prefixes)
if elements:
elements[-1].children.append(e)
elements.append(e)
# nonlocal out_element
if not out_element[0]:
out_element[0] = e
def end_element(name):
_do_string()
print('end_element', name) if debug else None
elements.pop()
def start_namespace(prefix, uri):
print('start_namespace') if debug else None
ns_list.append((uri, prefix))
def end_namespace(prefix):
print('end_namespace') if debug else None
ns_list.pop()
def character_data_handler(data):
print('Character html: "{}"'.format(data)) if debug else None
# nonlocal s
s[0] += data
def _do_string():
# nonlocal s
if s[0] and elements:
string = s[0]
elements[-1].children.append(string)
s[0] = ''
def start_doc_type_decl(doc_type_name, system_id, public_id, has_internal_subset):
xl.doc_type = DocType(doc_type_name=doc_type_name, system_id=system_id, public_id=public_id)
if has_internal_subset == 1:
raise HandlerError('Has internal subset, cannot handler it!')
def decl_handler(version, encoding, standalone):
standalone_ = None
if standalone == 1:
standalone_ = True
elif standalone == 0:
standalone_ = False
elif standalone == -1:
standalone_ = None
xl.header = Header(version=version, encoding=encoding, standalone=standalone_)
p = xml.parsers.expat.ParserCreate(namespace_separator=' ', encoding='UTF=8')
p.XmlDeclHandler = decl_handler
p.StartDoctypeDeclHandler = start_doc_type_decl
# internal dtd
# p.EntityDeclHandler = entity_decl_handler
p.StartElementHandler = start_element
p.EndElementHandler = end_element
p.StartNamespaceDeclHandler = start_namespace
p.EndNamespaceDeclHandler = end_namespace
p.CharacterDataHandler = character_data_handler
p.Parse(xmlstr, 1)
xl.root = out_element[0]
return xl
class ObjectAttributeError(Exception):
pass
def _name_check(name):
pass
def _nsmap_check(nsmap):
pass
def _nsuri_check(uri, namespaces):
if uri is None:
pass
elif uri not in namespaces.keys():
raise Exception
def clean_whitespaces(element):
"""
:param element:
:return: A copy of the element, all whitespace characters have been stripped from the beginning and the end of the
text node in the children and children's children and so on. delete the text node If it is empty.
"""
if not isinstance(element, Element):
raise TypeError
new_element = Element(tag=copy.deepcopy(element.tag),
attributes=copy.deepcopy(element.attributes),
prefixes=copy.deepcopy(element.prefixes))
for child in element.children:
if isinstance(child, str):
new_text = child.strip()
if new_text:
new_element.children.append(new_text)
elif isinstance(child, Element):
new_element.children.append(clean_whitespaces(child))
return new_element
def _is_straight_line(element):
if len(element.children) == 0:
return True
if len(element.children) == 1:
if isinstance(element.children[0], Element):
return _is_straight_line(element.children[0])
else:
return True
elif len(element.children) > 1:
return False
def pretty_insert(element, start_indent=0, step=4, dont_do_when_one_child=True):
"""
Modify the copy of the element, to make it looks more pretty and clear.
:param element:
:type element: Element
:param start_indent:
:type start_indent: int
:param step:
:type step: int
:param dont_do_when_one_child:
:type dont_do_when_one_child: bool
:return: object of :class:`Element`
"""
new_element = Element(tag=copy.deepcopy(element.tag),
attributes=copy.deepcopy(element.attributes),
prefixes=copy.deepcopy(element.prefixes))
_indent_text = '\n' + ' ' * (start_indent + step)
if _is_straight_line(element) and dont_do_when_one_child:
new_element.children = copy.deepcopy(element.children)
elif element.children:
for child in element.children:
if isinstance(child, str):
new_text = _indent_text + child
new_element.children.append(new_text)
elif isinstance(child, Element):
new_element.children.append(_indent_text)
new_element.children.append(pretty_insert(element=child,
start_indent=start_indent + step,
step=step,
dont_do_when_one_child=dont_do_when_one_child,
))
new_element.children.append('\n' + ' ' * start_indent)
return new_element
URI_XML = 'http://www.w3.org/XML/1998/namespace'
class XLError(Exception):
pass
class Xl(object):
def __init__(self, header=None, doc_type=None, root=None):
"""
:param header:
:type header: Header
:param doc_type:
:type doc_type: DocType
:param root:
:type root: Element
"""
self.header = header or header
"""object of :class:`Header`"""
self.doc_type = doc_type
"""object of :class:`DocType`"""
self.root = root
"""object of :class:`Element`"""
def string(self):
"""To xml string"""
s = ''
if self.header:
s += self.header.string() + '\n'
if self.doc_type:
s += self.doc_type.string() + '\n'
s += self.root.string()
return s
class _Node(object):
@abstractmethod
def string(self):
pass
class Header(_Node):
"""
Handle XML header node
"""
def __init__(self, version=None, encoding=None, standalone=None):
"""
:param version:
:type version: str
:param encoding:
:type encoding: str
:param standalone:
:type standalone: bool
"""
self.version = version or '1.0'
self.encoding = encoding or 'utf-8'
self.standalone = standalone
def string(self):
if not (self.version or self.encoding or self.standalone):
return ''
s = ''
s += '<?xml'
if self.version:
s += ' version="{}"'.format(self.version)
if self.encoding:
s += ' encoding="{}"'.format(self.encoding)
if self.standalone is not None:
s += ' standalone="{}"'.format('yes' if self.standalone else 'no')
s += ' ?>'
return s
class DocType(_Node):
"""
Handle XML doc type node
"""
def __init__(self, doc_type_name, system_id, public_id):
self.doc_type_name = doc_type_name
self.system_id = system_id
self.public_id = public_id
def string(self):
s = ''
s += '<!DOCTYPE'
s += ' {}'.format(self.doc_type_name)
s += ' "{}"'.format(self.public_id)
s += ' "{}"'.format(self.system_id)
s += '>'
return s
class Element(_Node):
"""
Handle XML element node.
"""
def __init__(self, tag=None, attributes=None, prefixes=None):
_Node.__init__(self)
"""
:param tag:
:type tag: tuple or str
:param attributes:
:type attributes: _Attributes or dict
:param prefixes:
:type prefixes: _Prefixes or dict
"""
self._tag = None
self.tag = tag
"""tuple object of length 2.
First in the tuple is the url of the namespaces,
the second is the xml element tag you know ordinarily.
"""
self._attributes = _Attributes(attributes) if attributes else _Attributes()
"""dict-like.
Store xml attribute names and values in *keys* and *values*
"""
self._prefixes = _Prefixes(prefixes) if prefixes else _Prefixes()
"""dict-like.
Store xml namespaces urls and prefixes in *keys* and *values*
Ignore this is fine, because you will get automatic prefixes for the namespaces.
"""
self._children = _Children()
"""list-like.
Store children Node"""
@property
def tag(self):
return self._tag
@tag.setter
def tag(self, value):
if not isinstance(value, tuple):
value = (None, value)
if len(value) != 2:
raise ValueError
if value[0] is not None:
if isinstance(value[0], str):
pass
elif isinstance(value[0], unicode):
pass
else:
raise ValueError
if isinstance(value[1], str):
pass
elif isinstance(value[1], unicode):
pass
else:
raise ValueError
self._tag = value
@property
def prefixes(self):
return self._prefixes
@prefixes.setter
def prefixes(self, value):
self._prefixes = value
@property
def attributes(self):
return self._attributes
@attributes.setter
def attributes(self, value):
self._attributes = value
@property
def children(self):
return self._children
@children.setter
def children(self, value):
self._children = value
self.__dict__['children'] = value
def string(self, inherited_prefixes=None):
"""to string, you may want to see :class:`Xl.string`"""
if self.tag[1] is None:
raise TypeError
inherited_prefixes = inherited_prefixes or _Prefixes()
auto_prefixs = _Prefixes()
def make_a_auto_prefix(_uri):
_prefix_num = 0
while 'prefix' + str(_prefix_num) in \
[one for one in self.prefixes.values()] + \
[one for one in inherited_prefixes.values()] + \
[one for one in auto_prefixs.values()]:
_prefix_num += 1
_prefix = 'prefix' + str(_prefix_num)
auto_prefixs[_uri] = _prefix
return _prefix
def get_prefix(_uri):
if _uri in self.prefixes.keys():
_prefix = self.prefixes[_uri]
elif _uri in inherited_prefixes.keys():
_prefix = inherited_prefixes[_uri]
elif _uri in auto_prefixs.keys():
_prefix = auto_prefixs[_uri]
else:
raise ValueError
return _prefix
s = '<'
################################################################################################################
# processing xml tag
if self.tag[0] is not None:
try:
prefix = get_prefix(self.tag[0])
except ValueError:
prefix = make_a_auto_prefix(self.tag[0])
if prefix is not None:
full_name = '{}:{}'.format(prefix, self.tag[1])
else:
full_name = self.tag[1]
else:
full_name = self.tag[1]
s += full_name
################################################################################################################
# processing xml attributes
_attrs_string_list = []
for attr_name, attr_value in self.attributes.items():
if attr_name[0] is not None:
try:
prefix = get_prefix(attr_name[0])
except ValueError:
prefix = make_a_auto_prefix(attr_name[0])
_attrs_string_list.append('{}:{}="{}"'.format(prefix, attr_name[1], attr_value))
else:
_attrs_string_list.append('{}="{}"'.format(attr_name[1], attr_value))
if _attrs_string_list:
s += ' '
s += ' '.join(_attrs_string_list)
################################################################################################################
# processing xml prefixes
_prefix_string_list = []
for url, prefix in self.prefixes.items():
if url == URI_XML:
continue
if url in inherited_prefixes.keys() and prefix == inherited_prefixes[url]:
continue
if url:
if prefix:
_prefix_string_list.append('xmlns:{}="{}"'.format(prefix, url))
else:
_prefix_string_list.append('xmlns="{}"'.format(url))
if _prefix_string_list:
s += ' '
s += ' '.join(_prefix_string_list)
################################################################################################################
# processing children
if self.children:
s += '>'
prefixes_for_subs = inherited_prefixes.copy()
prefixes_for_subs.update(self.prefixes)
prefixes_for_subs.update(auto_prefixs)
for child in self.children:
if isinstance(child, Element):
s += child.string(inherited_prefixes=prefixes_for_subs)
# elif isinstance(child, Text):
# s += child.string()
elif isinstance(child, str):
s += _escape(child)
s += '</{}>'.format(full_name)
else:
s += ' />'
return s
class _Prefixes(Dict):
def _before_add(self, key=None, item=None):
if key == URI_XML and item != 'xml':
raise ValueError
if item in self.values() and (key not in self.keys() or item != self[key]):
raise ValueError
def __init__(self, prefixes=None):
Dict.__init__(self)
self[URI_XML] = 'xml'
if prefixes:
self.update(prefixes)
class _Attributes(Dict):
def __init__(self, attributes=None):
Dict.__init__(self)
if attributes:
self.update(attributes)
def __setitem__(self, key, value):
if not isinstance(key, tuple):
key = (None, key)
if key[0] is not None:
if isinstance(key[0], str):
pass
elif isinstance(key[0], unicode):
pass
else:
raise KeyError
if isinstance(key[1], str):
pass
elif isinstance(key[1], unicode):
pass
else:
raise KeyError
if key == (None, 'xmlns'):
raise AttributeError
Dict.__setitem__(self, key, value)
class _Children(List):
def __init__(self):
List.__init__(self)
def _before_add(self, key=None, item=None):
if isinstance(item, (_Node, str)):
pass
elif isinstance(item, unicode):
pass
else:
raise TypeError('{} is not legal'.format(item.__class__.__name__))
def _escape(string):
"""
:param string:
:type string: str
:return:
:rtype: str
"""
s = ''
for char in string:
if char == '&':
s += '&'
elif char == '<':
s += '<'
elif char == '>':
s += '>'
else:
s += str(char)
return s
| |
#
# Copyright (C) 2015 Jason Mar
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import os
import os.path
import errno
import sqlite3
import time
import json
import sqlite3
import urllib
import urllib2
import cookielib
import boto3
import botocore
import wordpress_xmlrpc
from botocore.exceptions import ClientError
from wordpress_xmlrpc import Client
from wordpress_xmlrpc import WordPressPost
from wordpress_xmlrpc.methods import media
from wordpress_xmlrpc.methods import posts
from urllib2 import HTTPError
from urllib2 import URLError
# Perform all media library migration steps
def perform_migration(kwargs):
# Get the authenticated connection objects
kwargs = init(kwargs)
# Download and deduplicte media library metadata
if not kwargs['state']['metadata_loaded']:
prepare_media_items(kwargs)
# Download the media library
if not kwargs['state']['media_downloaded']:
download_media_items(kwargs)
# Upload the media library to S3
if not kwargs['state']['media_uploaded']:
upload_files(kwargs)
# Edit the posts
if not kwargs['state']['posts_edited']:
replace_images(kwargs)
kwargs['db'].close()
print 'Finished migrating media items from ' + kwargs['wp_host'] + ' to ' + kwargs['s3_host'] + kwargs['s3_bucket']
default_kwargs = {
"wp_uri" : 'https://blogname.wordpress.com/xmlrpc.php',
"wp_user" : 'user@wordpress.com',
"wp_pass" : 'password',
"wp_db" : 'wp.sqlite3',
"wp_host" : 'blogname.files.wordpress.com',
"s3_host" : 's3-us-west-2.amazonaws.com',
"s3_bucket" : 'blogname',
"wp_upload_dir" : r'C:\tmp\wp-upload',
"http_headers" : {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.93 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.8'
},
"state" : { # Edit state if you need to skip certain steps
"metadata_loaded" : False, # False => fetch all media items and save to sqlite database
"media_downloaded" : False, # False => download all media items using links in database
"media_uploaded" : False, # False => upload all files from wp_upload_dir to s3 bucket
"posts_edited" : False # False => fetch all posts, replace wp_host with s3_host/s3_bucket, and apply changes
}
}
# login to wordpress.com
def login(kwargs):
cookie_file = 'cookiejar.txt'
cookies = cookielib.CookieJar()
cookie_handler = urllib2.HTTPCookieProcessor(cookies)
redirect_handler = urllib2.HTTPRedirectHandler()
opener = urllib2.build_opener(redirect_handler, cookie_handler)
state = {"opener": opener}
uri = "https://wordpress.com/wp-login.php"
opts = {
'log': kwargs['wp_user'],
'pwd': kwargs['wp_pass'],
'rememberme': 'forever',
'wp-submit': 'Log In',
'testcookie': 1
}
# Prepare POST request
post_data = urllib.urlencode(opts)
request = urllib2.Request(uri, post_data, kwargs['http_headers'])
# Submit POST
try:
response = opener.open(request)
except HTTPError as e:
print(e.code)
print(e.read())
raise
print 'successfully logged in'
cookies.extract_cookies(response, request)
return state
# Download data from a uri to a file
def uri2file(uri, file, kwargs):
print 'downloading ' + uri + ' to ' + file
# Prepare GET request
request = urllib2.Request(uri, None, kwargs['http_headers'])
try:
# Submit request
response = kwargs['opener'].open(request)
# Write data to file
with open(file, "wb") as of:
of.write(response.read())
except HTTPError as e:
print 'request for ' + uri + ' failed with HTTPError: ' + str(e.code) + ' ' + e.msg
except URLError as e:
print 'request for ' + uri + ' failed URLError: ' + e.reason
except TypeError as e:
print 'request for ' + uri + ' failed with TypeError: ' + e.msg
except:
print 'request for ' + uri + ' failed'
# Obtain authenticated connection object
def init(kwargs):
print 'Creating Wordpress.com session'
opener = login(kwargs)
print 'Connecting to Wordpress XMLRPC Endpoint ' + kwargs['wp_uri']
wp_client = Client(kwargs['wp_uri'], kwargs['wp_user'], kwargs['wp_pass'])
print 'Connecting to Amazon S3'
s3 = boto3.resource('s3')
print 'Connecting to SQLite3 Database ' + kwargs['wp_db']
db = sqlite3.connect(kwargs['wp_db'])
db_cursor = db.cursor()
print 'All connections have been initialized'
res = {"wp_client": wp_client, "db": db, "db_cursor": db_cursor, "s3": s3}
res.update(kwargs)
res.update(opener)
return res
def get_wp_media_library(wp_client):
media_items = []
i0 = 0
n = 999
k = 100
while n > 0:
filter = {"number": k, "offset": i0}
method = media.GetMediaLibrary(filter)
res = wp_client.call(method)
i0 += k
n = len(res)
for r in res:
media_items.append(r)
return media_items
def create_media_table(db, cursor):
createTableSQL='''
CREATE TABLE WPMEDIA (
id text,
parent int,
title text,
description text,
caption text,
date_created long,
link text,
thumbnail text,
metadata text
)
'''
cursor.execute(createTableSQL)
db.commit()
def insert_media_items(media_items, db, db_cursor):
insertMediaSQL='''
INSERT INTO WPMEDIA (
id,
parent,
title,
description,
caption,
date_created,
link,
thumbnail,
metadata
) VALUES (?,?,?,?,?,?,?,?,?)
'''
for x in media_items:
n = 0
dt = x.date_created
# get unix time
ts = long(time.mktime((dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, 0, 0, 0)))
# convert metadata dict to string
metadata = json.dumps(x.metadata)
row = (x.id, x.parent, x.title, x.description, x.caption, ts, x.link, x.thumbnail, metadata)
# Insert the data
db_cursor.execute(insertMediaSQL, row)
if n % 100 == 0:
db.commit()
db.commit()
def get_distinct_records(db, db_cursor):
create_table_sql = '''
CREATE TABLE WPMEDIA1 AS
SELECT id, parent, title, description, caption, date_created, link, thumbnail, metadata
FROM WPMEDIA
GROUP BY id, parent, title, description, caption, date_created, link, thumbnail, metadata
'''
db_cursor.execute(create_table_sql)
db_cursor.execute("DROP TABLE WPMEDIA")
db.commit()
def mkdir_p(dir):
try:
os.makedirs(dir)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(dir):
pass
else:
raise
def download_media_items(kwargs):
kwargs['db_cursor'].execute("SELECT LINK FROM WPMEDIA1")
rs = kwargs['db_cursor'].fetchall()
n = 0
m = len(rs)
log1 = ' of ' + str(m) + ': '
for r in rs:
media_uri = r[0]
if media_uri.lower().startswith("http"):
split = media_uri.split("/")
# get YYYY/MM from original media link
outdir = kwargs['wp_upload_dir'] + os.path.sep + split[-3] + os.path.sep + split[-2] + os.path.sep
filename = split[-1]
mkdir_p(outdir)
outfile = os.path.join(outdir, filename)
# Check whether file exists
if os.path.isfile(outfile):
print str(n) + log1 + outfile + ' already exists'
else:
print str(n) + log1 + outfile
# Download the file
uri2file(media_uri, outfile, kwargs)
n += 1
else:
print 'invalid link: ' + media_uri
# bucket_exists(s3_bucket: str, s3: s3.ServiceResource): Boolean
def bucket_exists(s3_bucket, s3):
try:
# Send HEAD BUCKET request and get response
response = s3.meta.client.head_bucket(Bucket=s3_bucket)['ResponseMetadata']
status = response['HTTPStatusCode']
except ClientError as e:
# Collect error message
response = e.response['ResponseMetadata']
status = response['HTTPStatusCode']
if status == 200:
# The bucket exists
return True
elif status == 404:
# The bucket probably doesn't exist
return False
else:
# The bucket may already exist but we aren't authorized (403)
raise
# key_exists(s3_bucket, key_name: str, s3: s3.ServiceResource): Boolean
def key_exists(s3_bucket, key_name, s3):
try:
# Send HEAD OBJECT request and collect response
response = s3.meta.client.head_object(Bucket=s3_bucket, Key=key_name)['ResponseMetadata']
status = response['HTTPStatusCode']
except ClientError as e:
response = e.response['ResponseMetadata']
status = response['HTTPStatusCode']
if status == 200:
# The object exists
return True
elif status == 404:
# The object probably doesn't exist
return False
else:
# The object may already exist and we aren't authorized (403)
raise
# Upload files in directory to AWS S3 bucket
# ls(dir: str): [(file_path, key_name)]
def ls(dir):
print 'finding files in ' + dir
i = len(dir) + 1
keys = []
for dirname, dirnames, filenames in os.walk(dir):
for filename in filenames:
# Remove base directory and replace '\' with '/' - should end up with 'YYYY/MM'
key_name = dirname[i:].replace(os.path.sep,'/') + '/' + filename
# Get the full path of the file
file_path = os.path.join(dirname, filename)
# Add the key and path as a tuple
keys.append((file_path, key_name))
print str(len(keys)) + ' files found'
return keys
# Upload files in directory to AWS S3 bucket
# upload_dir_to_bucket(keys: (str,str), s3_bucket: str, s3: S3.Client): int
def upload_dir_to_bucket(keys, s3_bucket, s3):
n_uploaded = 0
for key in keys:
infile = key[0]
key_name = key[1]
upload_successful = upload_if_not_exists(s3_bucket, key_name, infile, s3)
if upload_successful:
n_uploaded += 1
if n_uploaded % 100 == 0:
print str(n_uploaded) + ' files uploaded'
print str(n_uploaded) + ' files uploaded'
return n_uploaded
# upload_if_not_exists(s3_bucket: str, key_name: str, infile: str, s3: s3.ServiceResource): Boolean
def upload_if_not_exists(s3_bucket, key_name, infile, s3):
# Check if key already exists in bucket
if key_exists(s3_bucket, key_name, s3):
print key_name + ' already exists'
return False
else:
# Specify the target key
object = s3.Object(s3_bucket, key_name)
print 'uploading ' + infile + ' as ' + key_name + ' in ' + s3_bucket
# Upload the file to the key
object.put(ACL='public-read', Body=open(infile, 'rb'))
return True
# Replaces string in post
# replace_str_in_post(old: str, new: str, post: wordpress_xmlrpc.WordPressPost, wp_client: wordpress_xmlrpc.Client): Boolean
def replace_str_in_post(old, new, post, wp_client):
# Check whether the post has content
if len(post.content) > 0:
if post.content.find(old) > -1:
needs_replacement = True
else:
needs_replacement = False
else:
print 'post id ' + str(post.id) + ' ' + post.slug + ' has content of length 0'
needs_replacement = False
if needs_replacement:
# Replace the wordpress.com media hostname with the s3 uri
post.content = post.content.replace(old, new)
# Post the edited content to the wordpress xmlrpc server
wp_client.call(posts.EditPost(post.id, post))
return True
else:
print 'post id ' + str(post.id) + ' \"' + post.slug + '\" did not need replacement'
return False
# Replaces uri in wordpress posts
#replace_uri_in_posts(old: str, new: str, wp_client: wordpress_xmlrpc.Client): int
def replace_uri_in_posts(old, new, wp_client):
i0 = 0
n = 999
k = 100
m = 0
# Collect posts to be edited
while n > 0:
filter = {"number": k, "offset": i0}
method = posts.GetPosts(filter)
# Get a batch of WordpressPost
res = wp_client.call(method)
i0 += k
n = len(res)
for post in res:
# Replace the hostname
post_modified = replace_str_in_post(old, new, post, wp_client)
# Record whether the post was modified
if post_modified:
print 'modified post_id ' + str(post.id) + ' "' + post.slug + '"'
m += 1
if m % 100 == 0:
print str(m) + ' posts modified'
else:
print 'post_id ' + str(post.id) + ' was not modified'
print str(m) + ' posts modified'
return m
# get_post_ids(cursor: sqlite3.Cursor): [int]
def get_post_ids(cursor):
res = []
cursor.execute('SELECT PARENT FROM WPMEDIA1')
post_ids = cursor.fetchall()
print str(len(post_ids)) + ' posts found'
for id in post_ids:
res.append(id[0])
return res
# Downloads Media Library and deduplicates records
def prepare_media_items(kwargs):
media_items = get_wp_media_library(kwargs['wp_client'])
create_media_table(kwargs['db'], kwargs['db_cursor'])
insert_media_items(media_items, kwargs['db'], kwargs['db_cursor'])
get_distinct_records(kwargs['db'], kwargs['db_cursor'])
kwargs['db_cursor'].execute("SELECT COUNT(1) FROM WPMEDIA1")
n_items = kwargs['db_cursor'].fetchone()[0]
print str(n_items) + ' distinct media items'
# Uploads media library to Amazon S3
def upload_files(kwargs):
# List files to be uploaded
keys = ls(kwargs['wp_upload_dir'])
if len(keys) > 0:
# Upload the files
print 'uploading files in ' + kwargs['wp_upload_dir'] + ' to ' + kwargs['s3_bucket']
n_uploaded = upload_dir_to_bucket(keys, kwargs['s3_bucket'], kwargs['s3'])
else:
n_uploaded = 0
return n_uploaded
# Edits posts with new image URIs
def replace_images(kwargs):
# Replace wordpress media hostname with s3 bucket uri
old = kwargs['wp_host']
new = kwargs['s3_host'] + '/' + kwargs['s3_bucket']
print 'replacing ' + old + ' with ' + new + ' in all posts'
n_replaced = replace_uri_in_posts(old, new, kwargs['wp_client'])
# EOF
| |
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import pathlib2
import tempfile
from ..external import six
from ..external.six.moves.urllib.parse import urlparse # pylint: disable=import-error, no-name-in-module
from .._protos.public.modeldb.versioning import VersioningService_pb2 as _VersioningService
from .._internal_utils import _artifact_utils
from .._internal_utils import _utils
from . import _dataset
class S3(_dataset._Dataset):
"""
Captures metadata about S3 objects.
If your S3 object requires additional information to identify it, such as its version ID, you
can use :meth:`S3.location`.
Parameters
----------
paths : list
List of S3 URLs of the form ``"s3://<bucket-name>"`` or ``"s3://<bucket-name>/<key>"``, or
objects returned by :meth:`S3.location`.
enable_mdb_versioning : bool, default False
Whether to upload the data itself to ModelDB to enable managed data versioning.
Examples
--------
.. code-block:: python
from verta.dataset import S3
dataset1 = S3([
"s3://verta-starter/census-train.csv",
"s3://verta-starter/census-test.csv",
])
dataset2 = S3([
"s3://verta-starter",
])
dataset3 = S3([
S3.location("s3://verta-starter/census-train.csv",
version_id="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"),
])
.. describe:: dataset += other
Updates the dataset, adding paths from ``other``.
.. describe:: dataset + other + ...
Returns a new dataset with paths from the dataset and all others.
"""
_S3_PATH = "s3://{}/{}"
def __init__(self, paths, enable_mdb_versioning=False):
if isinstance(paths, (six.string_types, S3Location)):
paths = [paths]
super(S3, self).__init__(enable_mdb_versioning=enable_mdb_versioning)
for path in paths:
# convert paths to S3Location
if isinstance(path, six.string_types):
s3_loc = S3Location(path)
elif isinstance(path, S3Location):
s3_loc = path
else:
raise TypeError(
"`paths` must contain either str or S3Location,"
" not {} ({})".format(type(path), path)
)
self._add_components(self._get_s3_components(s3_loc))
@classmethod
def _from_proto(cls, blob_msg):
obj = cls._create_empty()
obj._add_components([
_dataset.S3Component._from_proto(component_msg)
for component_msg
in blob_msg.dataset.s3.components
])
return obj
def _as_proto(self):
blob_msg = _VersioningService.Blob()
for component in self._components_map.values():
component_msg = component._as_proto()
blob_msg.dataset.s3.components.append(component_msg)
return blob_msg
@classmethod
def _get_s3_components(cls, s3_loc):
try:
import boto3
except ImportError:
e = ImportError("Boto 3 is not installed; try `pip install boto3`")
six.raise_from(e, None)
s3 = boto3.client('s3')
if (s3_loc.key is None # bucket
or s3_loc.key.endswith('/')): # folder
if s3_loc.key is None:
# TODO: handle `bucket_name` not found
obj_versions = s3.list_object_versions(Bucket=s3_loc.bucket)
else:
obj_versions = s3.list_object_versions(Bucket=s3_loc.bucket, Prefix=s3_loc.key)
if 'Versions' not in obj_versions: # boto3 doesn't error, so we have to catch this
s3_path = cls._S3_PATH.format(s3_loc.bucket, s3_loc.key)
raise ValueError("folder {} not found".format(s3_path))
for obj in obj_versions['Versions']:
if obj['Key'].endswith('/'): # folder, not object
continue
if not obj['IsLatest']:
continue
yield cls._s3_obj_to_component(obj, s3_loc.bucket, obj['Key'])
else:
# TODO: handle `key` not found
if s3_loc.version_id is not None:
# TODO: handle `version_id` not found
obj = s3.head_object(Bucket=s3_loc.bucket, Key=s3_loc.key, VersionId=s3_loc.version_id)
else:
obj = s3.head_object(Bucket=s3_loc.bucket, Key=s3_loc.key)
yield cls._s3_obj_to_component(obj, s3_loc.bucket, s3_loc.key)
@classmethod
def _s3_obj_to_component(cls, obj, bucket_name, key):
component = _dataset.S3Component(
path=cls._S3_PATH.format(bucket_name, key),
size=obj.get('Size') or obj.get('ContentLength') or 0,
last_modified=_utils.timestamp_to_ms(_utils.ensure_timestamp(obj['LastModified'])),
md5=obj['ETag'].strip('"'), # NOTE: ETag for multipart is not MD5 https://stackoverflow.com/a/19304527
)
if obj.get('VersionId', 'null') != 'null': # S3's API returns 'null' when there's no version ID
component.s3_version_id = obj['VersionId']
return component
@staticmethod
def location(path, version_id=None):
"""
Returns an object describing an S3 location that can be passed into a new :class:`S3`.
Parameters
----------
path : str
S3 URL of the form ``"s3://<bucket-name>"`` or ``"s3://<bucket-name>/<key>"``.
version_id : str, optional
ID of an S3 object version.
Returns
-------
:class:`S3Location`
A location in S3.
Raises
------
ValueError
If `version_id` is provided but `path` represents a bucket rather than a single object.
"""
return S3Location(path, version_id)
def _prepare_components_to_upload(self):
"""
Downloads files from S3 and tracks them for upload to ModelDB.
This method does nothing if ModelDB-managed versioning was not enabled.
"""
if not self._mdb_versioned:
return
try:
import boto3
except ImportError:
e = ImportError("Boto 3 is not installed; try `pip install boto3`")
six.raise_from(e, None)
s3 = boto3.client('s3')
# download files to local disk
for component in self._components_map.values():
s3_loc = S3Location(component.path, component.s3_version_id)
# download to file in ~/.verta/temp/
tempdir = os.path.join(_utils.HOME_VERTA_DIR, "temp")
pathlib2.Path(tempdir).mkdir(parents=True, exist_ok=True)
print("downloading {} from S3".format(component.path))
with tempfile.NamedTemporaryFile('w+b', dir=tempdir, delete=False) as tempf:
s3.download_fileobj(
Bucket=s3_loc.bucket,
Key=s3_loc.key,
ExtraArgs={'VersionId': s3_loc.version_id} if s3_loc.version_id else None,
Fileobj=tempf,
)
print("download complete")
# track which downloaded file this component corresponds to
component._local_path = tempf.name
# add MDB path to component blob
with open(tempf.name, 'rb') as f:
artifact_hash = _artifact_utils.calc_sha256(f)
component._internal_versioned_path = artifact_hash + '/' + s3_loc.key
def _clean_up_uploaded_components(self):
"""
Deletes temporary files that had been downloaded for ModelDB-managed versioning.
This method does nothing if ModelDB-managed versioning was not enabled.
"""
if not self._mdb_versioned:
return
for component in self._components_map.values():
if component._local_path and os.path.isfile(component._local_path):
os.remove(component._local_path)
def add(self, paths):
"""
Adds `paths` to this dataset.
Parameters
----------
paths : list
List of S3 URLs of the form ``"s3://<bucket-name>"`` or ``"s3://<bucket-name>/<key>"``, or
objects returned by :meth:`S3.location`.
"""
# re-use logic in __init__
other = self.__class__(
paths=paths,
enable_mdb_versioning=self._mdb_versioned,
)
self += other
class S3Location(object):
# TODO: handle prefixes
def __init__(self, path, version_id=None):
bucket, key = self._parse_s3_url(path)
if (version_id is not None) and (key is None):
raise ValueError(
"`version_id` can only be provided if"
" `path` specifies a single S3 object"
)
self.bucket = bucket
self.key = key
self.version_id = version_id
@staticmethod
def _parse_s3_url(path):
url_components = urlparse(path, allow_fragments=False)
if url_components.scheme != 's3':
raise ValueError("`path` \"{}\" must be either \"s3://<bucket-name>\""
" or \"s3://<bucket-name>/<key>\"".format(path))
bucket_name = url_components.netloc
key = url_components.path
if key.startswith('/'):
key = key[1:]
if key == "":
key = None
return bucket_name, key
| |
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Type, TypeVar, Union, overload
from .class_validators import gather_all_validators
from .error_wrappers import ValidationError
from .errors import DataclassTypeError
from .fields import Field, FieldInfo, Required, Undefined
from .main import create_model, validate_model
from .typing import resolve_annotations
from .utils import ClassAttribute
if TYPE_CHECKING:
from .config import BaseConfig
from .main import BaseModel
from .typing import CallableGenerator, NoArgAnyCallable
DataclassT = TypeVar('DataclassT', bound='Dataclass')
class Dataclass:
__pydantic_model__: Type[BaseModel]
__initialised__: bool
__post_init_original__: Optional[Callable[..., None]]
__processed__: Optional[ClassAttribute]
__has_field_info_default__: bool # whether or not a `pydantic.Field` is used as default value
def __init__(self, *args: Any, **kwargs: Any) -> None:
pass
@classmethod
def __get_validators__(cls: Type['Dataclass']) -> 'CallableGenerator':
pass
@classmethod
def __validate__(cls: Type['DataclassT'], v: Any) -> 'DataclassT':
pass
def __call__(self: 'DataclassT', *args: Any, **kwargs: Any) -> 'DataclassT':
pass
def _validate_dataclass(cls: Type['DataclassT'], v: Any) -> 'DataclassT':
if isinstance(v, cls):
return v
elif isinstance(v, (list, tuple)):
return cls(*v)
elif isinstance(v, dict):
return cls(**v)
# In nested dataclasses, v can be of type `dataclasses.dataclass`.
# But to validate fields `cls` will be in fact a `pydantic.dataclasses.dataclass`,
# which inherits directly from the class of `v`.
elif is_builtin_dataclass(v) and cls.__bases__[0] is type(v):
import dataclasses
return cls(**dataclasses.asdict(v))
else:
raise DataclassTypeError(class_name=cls.__name__)
def _get_validators(cls: Type['Dataclass']) -> 'CallableGenerator':
yield cls.__validate__
def setattr_validate_assignment(self: 'Dataclass', name: str, value: Any) -> None:
if self.__initialised__:
d = dict(self.__dict__)
d.pop(name, None)
known_field = self.__pydantic_model__.__fields__.get(name, None)
if known_field:
value, error_ = known_field.validate(value, d, loc=name, cls=self.__class__)
if error_:
raise ValidationError([error_], self.__class__)
object.__setattr__(self, name, value)
def is_builtin_dataclass(_cls: Type[Any]) -> bool:
"""
`dataclasses.is_dataclass` is True if one of the class parents is a `dataclass`.
This is why we also add a class attribute `__processed__` to only consider 'direct' built-in dataclasses
"""
import dataclasses
return not hasattr(_cls, '__processed__') and dataclasses.is_dataclass(_cls)
def _generate_pydantic_post_init(
post_init_original: Optional[Callable[..., None]], post_init_post_parse: Optional[Callable[..., None]]
) -> Callable[..., None]:
def _pydantic_post_init(self: 'Dataclass', *initvars: Any) -> None:
if post_init_original is not None:
post_init_original(self, *initvars)
if getattr(self, '__has_field_info_default__', False):
# We need to remove `FieldInfo` values since they are not valid as input
# It's ok to do that because they are obviously the default values!
input_data = {k: v for k, v in self.__dict__.items() if not isinstance(v, FieldInfo)}
else:
input_data = self.__dict__
d, _, validation_error = validate_model(self.__pydantic_model__, input_data, cls=self.__class__)
if validation_error:
raise validation_error
object.__setattr__(self, '__dict__', {**getattr(self, '__dict__', {}), **d})
object.__setattr__(self, '__initialised__', True)
if post_init_post_parse is not None:
post_init_post_parse(self, *initvars)
return _pydantic_post_init
def _process_class(
_cls: Type[Any],
init: bool,
repr: bool,
eq: bool,
order: bool,
unsafe_hash: bool,
frozen: bool,
config: Optional[Type[Any]],
) -> Type['Dataclass']:
import dataclasses
post_init_original = getattr(_cls, '__post_init__', None)
if post_init_original and post_init_original.__name__ == '_pydantic_post_init':
post_init_original = None
if not post_init_original:
post_init_original = getattr(_cls, '__post_init_original__', None)
post_init_post_parse = getattr(_cls, '__post_init_post_parse__', None)
_pydantic_post_init = _generate_pydantic_post_init(post_init_original, post_init_post_parse)
# If the class is already a dataclass, __post_init__ will not be called automatically
# so no validation will be added.
# We hence create dynamically a new dataclass:
# ```
# @dataclasses.dataclass
# class NewClass(_cls):
# __post_init__ = _pydantic_post_init
# ```
# with the exact same fields as the base dataclass
# and register it on module level to address pickle problem:
# https://github.com/samuelcolvin/pydantic/issues/2111
if is_builtin_dataclass(_cls):
uniq_class_name = f'_Pydantic_{_cls.__name__}_{id(_cls)}'
_cls = type(
# for pretty output new class will have the name as original
_cls.__name__,
(_cls,),
{
'__annotations__': resolve_annotations(_cls.__annotations__, _cls.__module__),
'__post_init__': _pydantic_post_init,
# attrs for pickle to find this class
'__module__': __name__,
'__qualname__': uniq_class_name,
},
)
globals()[uniq_class_name] = _cls
else:
_cls.__post_init__ = _pydantic_post_init
cls: Type['Dataclass'] = dataclasses.dataclass( # type: ignore
_cls, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen
)
cls.__processed__ = ClassAttribute('__processed__', True)
field_definitions: Dict[str, Any] = {}
for field in dataclasses.fields(cls):
default: Any = Undefined
default_factory: Optional['NoArgAnyCallable'] = None
field_info: FieldInfo
if field.default is not dataclasses.MISSING:
default = field.default
elif field.default_factory is not dataclasses.MISSING:
default_factory = field.default_factory
else:
default = Required
if isinstance(default, FieldInfo):
field_info = default
cls.__has_field_info_default__ = True
else:
field_info = Field(default=default, default_factory=default_factory, **field.metadata)
field_definitions[field.name] = (field.type, field_info)
validators = gather_all_validators(cls)
cls.__pydantic_model__ = create_model(
cls.__name__, __config__=config, __module__=_cls.__module__, __validators__=validators, **field_definitions
)
cls.__initialised__ = False
cls.__validate__ = classmethod(_validate_dataclass) # type: ignore[assignment]
cls.__get_validators__ = classmethod(_get_validators) # type: ignore[assignment]
if post_init_original:
cls.__post_init_original__ = post_init_original
if cls.__pydantic_model__.__config__.validate_assignment and not frozen:
cls.__setattr__ = setattr_validate_assignment # type: ignore[assignment]
return cls
@overload
def dataclass(
*,
init: bool = True,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
config: Type[Any] = None,
) -> Callable[[Type[Any]], Type['Dataclass']]:
...
@overload
def dataclass(
_cls: Type[Any],
*,
init: bool = True,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
config: Type[Any] = None,
) -> Type['Dataclass']:
...
def dataclass(
_cls: Optional[Type[Any]] = None,
*,
init: bool = True,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
config: Type[Any] = None,
) -> Union[Callable[[Type[Any]], Type['Dataclass']], Type['Dataclass']]:
"""
Like the python standard lib dataclasses but with type validation.
Arguments are the same as for standard dataclasses, except for validate_assignment which has the same meaning
as Config.validate_assignment.
"""
def wrap(cls: Type[Any]) -> Type['Dataclass']:
return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, config)
if _cls is None:
return wrap
return wrap(_cls)
def make_dataclass_validator(_cls: Type[Any], config: Type['BaseConfig']) -> 'CallableGenerator':
"""
Create a pydantic.dataclass from a builtin dataclass to add type validation
and yield the validators
It retrieves the parameters of the dataclass and forwards them to the newly created dataclass
"""
dataclass_params = _cls.__dataclass_params__
stdlib_dataclass_parameters = {param: getattr(dataclass_params, param) for param in dataclass_params.__slots__}
cls = dataclass(_cls, config=config, **stdlib_dataclass_parameters)
yield from _get_validators(cls)
| |
# -*- coding: utf-8 -*-
"""
If you call sender.py from the command line, it will send 3 test packets.
See argparse for options.
Otherwise, this module is loaded to provide the ServerSender class.
"""
from __future__ import print_function
import socket
import argparse
import time
import numpy as np
from contextlib import closing
from Crypto.Cipher import AES
from auxiliaries import set_verbosity, Config, PublicKey
from globalvalues import DEFAULT_HOSTNAME, DEFAULT_SENDER_MODE
from globalvalues import DEFAULT_UDP_PORT, DEFAULT_TCP_PORT
from globalvalues import TESTING_UDP_PORT, TESTING_TCP_PORT
from globalvalues import DEFAULT_CONFIG, DEFAULT_PUBLICKEY, DEFAULT_AESKEY
from globalvalues import NETWORK_LED_BLINK_PERIOD_S
TCP_TIMEOUT = 5
D3S_PREPEND_STR = '{:05d}'
class ServerSender(object):
"""
Provides ServerSender.send_cpm() for sending UDP packets to the DoseNet
server.
"""
def __init__(self,
manager=None,
address=DEFAULT_HOSTNAME,
port=None,
config=None,
publickey=None,
aes=None,
verbosity=1,
logfile=None,
mode=None,
):
"""
network_status, config, publickey, aes loaded from manager
if not provided.
address and port take system defaults, although without config and
publickey, address and port will not be used.
"""
self.v = verbosity
if manager and logfile is None:
set_verbosity(self, logfile=manager.logfile)
else:
set_verbosity(self, logfile=logfile)
self.address = address
self.handle_input(
manager, mode, port, config, publickey, aes)
def handle_input(
self, manager, mode, port, config, publickey, aes):
# TODO: this stuff is messy. Is there a cleaner way using exceptions?
if manager is None:
self.vprint(1, 'ServerSender starting without Manager object')
self.manager = manager
try:
if mode is None:
self.mode = DEFAULT_SENDER_MODE
elif mode == 'udp_test':
self.mode = 'udp_test'
elif mode == 'tcp_test':
self.mode = 'tcp_test'
elif mode.lower() == 'udp':
self.mode = 'udp'
elif mode.lower() == 'tcp':
self.mode = 'tcp'
else:
raise RuntimeError(
'Invalid ServerSender mode (choose TCP or UDP)')
except AttributeError:
# if mode is not a string or None, then mode.lower() raises this
raise RuntimeError('Invalid ServerSender mode (choose TCP or UDP)')
if self.mode == 'udp':
if port is None:
self.port = DEFAULT_UDP_PORT
else:
self.port = port
self.vprint(3, 'ServerSender using UDP for {}:{}'.format(
self.address, self.port))
elif self.mode == 'tcp':
if port is None:
self.port = DEFAULT_TCP_PORT
else:
self.port = port
self.vprint(3, 'ServerSender using TCP for {}:{}'.format(
self.address, self.port))
elif self.mode == 'udp_test':
if port is None:
self.port = TESTING_UDP_PORT
else:
self.port = port
self.vprint(3, 'ServerSender using UDP for {}:{}'.format(
self.address, self.port))
elif self.mode == 'tcp_test':
if port is None:
self.port = TESTING_TCP_PORT
else:
self.port = port
self.vprint(3, 'ServerSender using TCP for {}:{}'.format(
self.address, self.port))
if config is None:
if manager is None:
self.vprint(1, 'ServerSender starting without config file')
self.config = None
else:
self.config = manager.config
else:
self.config = config
if publickey is None:
if manager is None:
self.vprint(1, 'ServerSender starting without publickey file')
self.encrypter = None
elif manager.publickey is None:
self.vprint(1, 'ServerSender starting without publickey file')
self.encrypter = None
else:
self.encrypter = manager.publickey.encrypter
else:
self.encrypter = publickey.encrypter
if aes is None:
if manager is None:
self.vprint(2, 'ServerSender starting without AES key')
self.aes = None
elif manager.aes is None:
self.vprint(2, 'ServerSender starting without AES key')
self.aes = None
else:
self.aes = manager.aes
else:
self.aes = aes
def construct_packet(self, sensor_type, cpm, cpm_error, error_code=0):
"""
Construct the raw packet string. (basically copied from old code)
hash,ID,cpm,cpm_error,error_code
"""
try:
raw_packet = ','.join(
[str(self.config.hash),
str(self.config.ID),
str(sensor_type),
str(cpm),
str(cpm_error),
str(error_code)]
)
except AttributeError: # on self.config.hash
raise MissingFile('Missing or broken Config object')
else:
self.vprint(3, 'Constructed packet')
return raw_packet
def construct_packet_new(self, sensor_type, timestamp, cpm, cpm_error, error_code=0):
"""
New protocol version of construct packet.
hash,ID,timestamp,cpm,cpm_error,error_code
"""
try:
raw_packet = ','.join(
[str(self.config.hash),
str(self.config.ID),
str(sensor_type),
str(timestamp),
str(cpm),
str(cpm_error),
str(error_code)]
)
except AttributeError: # on self.config.hash
raise MissingFile('Missing or broken Config object')
else:
self.vprint(3, 'Constructed packet')
return raw_packet
def construct_packet_new_D3S(self, sensor_type, timestamp, spectra, error_code=0):
"""
TCP version of construct packet.
"""
# convert spectra to a string representation that won't interfere with
# injector's parsing (no commas)
spectra_str = str(spectra).replace(',', ';')
try:
raw_packet = ','.join(
[str(self.config.hash),
str(self.config.ID),
str(sensor_type),
str(timestamp),
spectra_str,
str(error_code)]
)
except AttributeError: # on self.config.hash
raise MissingFile('Missing or broken Config object')
else:
self.vprint(3, 'Constructed packet')
return raw_packet
def construct_packet_new_AQ(self, sensor_type, timestamp, average_data, error_code=0):
avgdata_str = str(average_data).replace(',', ';')
try:
raw_packet = ','.join(
[str(self.config.hash),
str(self.config.ID),
str(sensor_type),
str(timestamp),
avgdata_str,
str(error_code)]
)
except AttributeError: # on self.config.hash
raise MissingFile('Missing or broken Config object')
else:
self.vprint(3, 'Constructed packet')
return raw_packet
def construct_packet_new_CO2(self, sensor_type, timestamp, average_data, error_code=0):
avgdata_str = str(average_data).replace(',', ';')
try:
raw_packet = ','.join(
[str(self.config.hash),
str(self.config.ID),
str(sensor_type),
str(timestamp),
avgdata_str,
str(error_code)]
)
except AttributeError:
raise MissingFile('Missing or broken Config object')
else:
self.vprint(3, 'Constructed packet')
return raw_packet
def construct_packet_new_weather(self, sensor_type, timestamp, average_data, error_code=0):
avgdata_str = str(average_data).replace(',', ';')
try:
raw_packet = ','.join(
[str(self.config.hash),
str(self.config.ID),
str(sensor_type),
str(timestamp),
avgdata_str,
str(error_code)]
)
except AttributeError:
raise MissingFile('Missing or broken Config object')
else:
self.vprint(3, 'Constructed packet')
return raw_packet
def construct_log_packet(self, msg_code, msg_text):
"""
Send a message to be recorded in the server log database.
hash,ID,"LOG",msg_code,msg_text
"""
if not isinstance(msg_code, int):
raise TypeError('msg_code should be an int')
try:
raw_packet = ','.join(
[str(self.config.hash),
str(self.config.ID),
'LOG',
str(msg_code),
str(msg_text)]
)
except AttributeError: # on self.config.hash
raise MissingFile('Missing or broken Config object')
else:
self.vprint(3, 'Constructed log packet')
return raw_packet
def encrypt_packet(self, raw_packet):
"""Encrypt the raw packet"""
self.vprint(3, 'Encrypting packet: {}'.format(raw_packet))
try:
encrypted = self.encrypter.encrypt_message(raw_packet)[0]
except AttributeError:
raise MissingFile('Missing or broken PublicKey object')
else:
return encrypted
def encrypt_packet_aes(self, raw_packet):
"""Encrypt with AES (for D3S). Prepend message length."""
self.vprint(3, 'AES encrypting packet: {}'.format(raw_packet))
try:
block_size = 16
pad_length = block_size - (len(raw_packet) % block_size)
if pad_length == block_size:
encrypted = self.aes.encrypt(raw_packet)
else:
pad = ' ' * pad_length
encrypted = self.aes.encrypt(raw_packet + pad)
except AttributeError:
raise MissingFile('Missing or broken AES object')
else:
prepend = D3S_PREPEND_STR.format(len(encrypted))
# prepend does NOT include its own string in the message length
full_packet = prepend + encrypted
return full_packet
def send_data(self, encrypted):
"""
Send data according to self.mode
"""
self.vprint(3, 'Trying to send data by {}'.format(self.mode))
if self.mode == 'udp' or self.mode == 'udp_test':
self.send_udp(encrypted)
elif self.mode == 'tcp' or self.mode == 'tcp_test':
self.send_tcp(encrypted)
def send_udp(self, encrypted):
"""
Send the encrypted packet over UDP
"""
self.vprint(3, 'Sending encrypted UDP packet to {}:{}'.format(
self.address, self.port))
with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as s:
s.sendto(encrypted, (self.address, self.port))
self.vprint(
3, 'UDP packet sent successfully (no client-side error)')
def send_tcp(self, encrypted):
"""
Send the encrypted packet over TCP
"""
self.vprint(3, 'Sending encrypted TCP packet to {}:{}'.format(
self.address, self.port))
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.settimeout(TCP_TIMEOUT) # generous timeout
s.connect((self.address, self.port))
s.sendall(encrypted)
received = s.recv(1024)
self.vprint(3, 'TCP received {}'.format(received))
branch, flag = self.handle_return_packet(received)
if branch is not None:
self.vprint(3, 'Branch: {}'.format(branch))
self.vprint(3, 'Update flag: {}'.format(flag))
if self.manager:
self.manager.branch = branch
self.manager.quit_after_interval = flag
else:
self.vprint(
1, 'No manager, not saving branch and updateflag')
else:
self.vprint(2, 'Bad or missing return packet!')
self.vprint(3, 'TCP packet sent successfully')
def send_cpm(self, sensor_type, cpm, cpm_error, error_code=0):
"""Construct, encrypt, and send the packet"""
packet = self.construct_packet(sensor_type, cpm, cpm_error, error_code=error_code)
encrypted = self.encrypt_packet(packet)
self.send_data(encrypted)
def send_cpm_new(self, sensor_type, timestamp, cpm, cpm_error, error_code=0):
"""
New protocol for send_cpm
"""
packet = self.construct_packet_new(
sensor_type, timestamp, cpm, cpm_error, error_code=error_code)
encrypted = self.encrypt_packet(packet)
self.send_data(encrypted)
def send_log(self, msg_code, msg_text):
"""
Send a log message
"""
packet = self.construct_log_packet(msg_code, msg_text)
encrypted = self.encrypt_packet(packet)
self.send_data(encrypted)
def send_spectra_new_D3S(self, sensor_type, timestamp, spectra, error_code=0):
"""
TCP for sending spectra
"""
packet = self.construct_packet_new_D3S(
sensor_type, timestamp, spectra, error_code=error_code)
encrypted = self.encrypt_packet_aes(packet)
self.send_data(encrypted)
def send_data_new_AQ(self, sensor_type, timestamp, average_data, error_code=0):
"""
Protocol for sending average air quality data
"""
packet = self.construct_packet_new_AQ(
sensor_type, timestamp, average_data, error_code=error_code)
encrypted = self.encrypt_packet(packet)
self.send_data(encrypted)
def send_data_new_CO2(self, sensor_type, timestamp, average_data, error_code=0):
"""
Protocol for sending the average CO2 data
"""
packet = self.construct_packet_new_CO2(
sensor_type, timestamp, average_data, error_code=error_code)
encrypted = self.encrypt_packet(packet)
self.send_data(encrypted)
def send_data_new_weather(self, sensor_type, timestamp, average_data, error_code=0):
"""
Protocol for sending the average CO2 data
"""
packet = self.construct_packet_new_weather(
sensor_type, timestamp, average_data, error_code=error_code)
encrypted = self.encrypt_packet(packet)
self.send_data(encrypted)
def handle_return_packet(self, received):
"""
Extracts the git tag from sender and puts it into a list.
"""
try:
received = [x.strip() for x in received.split(',')]
branch = received[0]
if int(received[1]) == 0:
flag = False
else:
flag = True
assert len(received) == 2
except (AttributeError, IndexError, ValueError, AssertionError):
return None, None
else:
return branch, flag
class PacketError(Exception):
pass
class MissingFile(PacketError):
pass
def send_test_packets(
mode=DEFAULT_SENDER_MODE,
config=DEFAULT_CONFIG,
publickey=DEFAULT_PUBLICKEY,
address=DEFAULT_HOSTNAME,
port=None,
n=3,
encrypt=True,
raw_packet=None):
"""
Send n (default 3) test packets to the DoseNet server.
"""
sleep_time = 2 # seconds
try:
config_obj = Config(config)
except IOError:
# file doesn't exist
config_obj = None
key_obj = PublicKey(publickey)
sender = ServerSender(
mode=mode, address=address, port=port,
config=config_obj, publickey=key_obj, verbosity=3)
try:
station_id = config_obj.ID
except AttributeError:
station_id = '?'
if raw_packet is None:
raw_packet = 'Test packet from station {} by mode {}'.format(
station_id, mode)
if encrypt:
packet_to_send = sender.encrypt_packet(raw_packet)
else:
packet_to_send = raw_packet
for _ in xrange(n):
sender.send_data(packet_to_send)
time.sleep(sleep_time)
def send_log_message(
mode=DEFAULT_SENDER_MODE,
config=DEFAULT_CONFIG,
publickey=DEFAULT_PUBLICKEY,
address=DEFAULT_HOSTNAME,
port=None,
msgcode=0,
message='ServerSender test'):
"""
Send a log message to the server.
"""
try:
config_obj = Config(config)
except IOError:
# file doesn't exist
config_obj = None
try:
key_obj = PublicKey(publickey)
except IOError:
# file doesn't exist
key_obj = None
sender = ServerSender(
mode=mode, address=address, port=port,
config=config_obj, publickey=key_obj, verbosity=3)
if key_obj is None:
# no encryption
if config is None:
# no ID
packet = ','.join(msgcode, message)
sender.send_data(packet)
else:
# has ID
packet = ','.join(
sender.config.hash, sender.config.ID, msgcode, message)
sender.send_data(packet)
else:
# encryption
if config is None:
# no ID
packet = ','.join(msgcode, message)
encrypted = sender.encrypt_packet(packet)
sender.send_data(encrypted)
else:
# standard, full functionality
sender.send_log(msgcode, message)
def send_test_d3s_packet(
config=DEFAULT_CONFIG,
publickey=DEFAULT_PUBLICKEY,
aeskey=DEFAULT_AESKEY,
port=None,
encrypt=True):
"""
Send a test packet in the format of the D3S data.
"""
try:
config_obj = Config(config)
except IOError:
config_obj = None
try:
key_obj = PublicKey(publickey)
except IOError:
key_obj = None
if encrypt:
print("no publickey, can't encrypt")
encrypt = False
try:
with open(aeskey, 'r') as aesfile:
aeskey = aesfile.read()
aes = AES.new(aeskey, mode=AES.MODE_ECB)
except IOError:
aes = None
if encrypt:
print("no AES key, can't encrypt")
encrypt = False
sender = ServerSender(
port=port, config=config_obj, publickey=key_obj, aes=aes, verbosity=3)
spectrum = [int(np.random.random() * 3) for _ in xrange(4096)]
raw_packet = sender.construct_packet_new_D3S(time.time(), spectrum)
if encrypt:
packet_to_send = sender.encrypt_packet_aes(raw_packet)
else:
packet_to_send = raw_packet
try:
sender.send_data(packet_to_send)
except socket.timeout:
print('timeout!')
return packet_to_send
if __name__ == '__main__':
# send a test log entry
parser = argparse.ArgumentParser(
description='Sender for UDP/TCP data packets. ' +
'Normally called from manager.py. ' +
'Called directly, it will send a log message to the server.')
parser.add_argument('--mode', '-n', choices=['udp', 'tcp', 'UDP', 'TCP', 'udp_test', 'tcp_test', 'UDP_test', 'TCP_test'],
default=DEFAULT_SENDER_MODE,
help='Network protocol to use')
parser.add_argument('--config', '-g', type=str, default=DEFAULT_CONFIG,
help='config file location')
parser.add_argument('--publickey', '-k', type=str,
default=DEFAULT_PUBLICKEY,
help='publickey file location')
parser.add_argument('--hostname', '-a', type=str, default=DEFAULT_HOSTNAME,
help='hostname (web address or IP)')
parser.add_argument('--port', '-p', type=int, default=None,
help='port')
parser.add_argument('--msgcode', '-c', type=int, default=0,
help='message code for log')
parser.add_argument('--message', '-m', type=str,
default='ServerSender test',
help='message text for log')
args = parser.parse_args()
send_log_message(
mode=args.mode, address=args.hostname, port=args.port,
config=args.config, publickey=args.publickey,
msgcode=args.msgcode, message=args.message)
| |
#! /usr/bin/env python
"""
This script takes an input file in USGS/EPA WQX xml format and creates a multi-indexed
Pandas Dataframe that contains time series of water quality data and discharge combined
with layers that contain meta data for each data value. You can call the script from
the command line using WQXtoPandas [input WQX start file], or import the runWQXtoPandas
function for calling within a Python session.
"""
from __future__ import print_function
import sys
import xlrd
import os
import requests
from glob import glob
from math import ceil
import pickle as pickle
from lxml import etree
from pandas import DataFrame, to_datetime, concat, ExcelWriter
from olm.USGS.PhreeqcPandas import processMidf
# import functions from olm package
from olm.USGS.siteListExtraction import extractSitesFromXML
from olm.USGS.siteListExtraction import extractSitesFromText
from olm.USGS.DataRetrieval import querySiteList, GetDailyDischarge, GetSiteData
from olm.USGS.dataSlice import extractValues
def WQXtoPandas(
xmlLocation,
charDict,
outputPath=".",
fromFile=False,
outputDirName="Processed-Sites",
RUN_PHREEQC=False,
PHREEQC_PATH="/home/mcoving/phreeqc-2.18.0/bin/",
DATABASE_FILE="/home/mcoving/phreeqc-2.18.0/database/phreeqc.dat",
LOG_FILE="Result.log",
START_FILE=None,
splittag="",
bracket_charge_balance=False,
):
"""
Processes a WQX xml data file and loads data for each site in the WQX file into
Pandas data objects that are stored in directories for each site.
Parameters
----------
xmlLocation : string
Content depends on mode in which WQXtoPandas is run. When fromFile is set to
False (input methods 2 or 3 in excel file) this string contains the html for
a query to the USGS NWIS database to obtain an xml file of the desired data.
Alternatively, if fromFile is True (input method 1 in excel file) then this
string contains the name of the xml file from which to read the data.
charDict : dict
A dictionary containing information about the characteristics to be processed.
Keys are EPA SRS characteristic names. Each entry in the dictionary is a second
dictionary that contains keys IsRequired, pcode, fraction, and quality. These
entries tell WQXtoPandas whether a given characteristic is required in order to
process a sample, and whether a specific pcode, fraction, or quality should be
required. See excel example file for more details.
outputPath : string
path to directory that will contain output directory
fromFile : boolean
True if data will be read from an xml file already present on computer. False
if xml file should be queried from NWIS. (Default=False)
outputDirName : string
Name of output directory where all site data will be written out.
(Default='Processed-Sites')
RUN_PHREEQC : boolean
Set to true if samples should be processed through PHREEQC. (Default=False)
PHREEQC_PATH : string
Path to PHREEQC executable (folder only, not executable file name)
DATABASE_FILE : string
Path to database file that PHREEQC should use, including database file name.
LOG_FILE : string
Name of log file that WQXtoPandas will create. (Default='Result.log')
START_FILE : string
Name of xls start file that was used to run this instance of WQXtoPandas. Name
will be written out in log file.
bracket_charge_balance : bool
If set to true, WQXtoPandas will alternately force charge balance on calcium and
alkalinity, while the latter is not physically meaningful, this provides a useful
estimate of uncertainty for cases with high charge balance errors. This is most
useful for water that is very dilute or with high organic content, such that
titrated alkalinity values are artificially high.
Returns
-------
Returns 0 if execution successful. Returns -1 in case of error.
Notes
-----
Designed to be run through convenience function runWQXtoPandas().
"""
try:
# Check to see if output directory exists
absOutputDirPath = os.path.abspath(outputPath)
sitesdir = os.path.join(absOutputDirPath, outputDirName)
print("sitesdir", sitesdir)
if not (os.path.exists(sitesdir)):
try:
os.makedirs(sitesdir)
except os.error:
print(
(
"Problem creating output directory. Check output path name: "
+ outputPath
)
)
return -1
# create xml tree
if fromFile:
# read from file
print("xmlLocation", xmlLocation)
wqxtree = etree.ElementTree(file=xmlLocation)
else:
# check whether we already have a matching xml file
xmlSaveFile = LOG_FILE + splittag + ".xml"
if os.path.isfile(xmlSaveFile):
goodAnswer = False
while not (goodAnswer):
answer = input(
"An xml file ("
+ xmlSaveFile
+ ") already exists. \n Use this instead of html query (y or n)?"
)
if answer.startswith("y"):
# read from file
wqxtree = etree.ElementTree(file=xmlSaveFile)
goodAnswer = True
queryXML = False
elif answer.startswith("n"):
goodAnswer = True
queryXML = True
else:
queryXML = True
# If we don't have a matching xml file, or we want to obtain a new one, then get the new xml
if queryXML:
print("Obtaining xml file from USGS NWIS using html query...")
# parse from html query
print("XML query string: ", xmlLocation)
r = requests.get(xmlLocation)
if not r.ok:
# There is some problem with the xml query
print("Response: ", str(r))
print("Reason: ", r.reason)
print("Warning: ", r.headers["Warning"])
# write to xml file
try:
# write xml to file
xmlFile = open(xmlSaveFile, "w")
print(r.text, file=xmlFile)
xmlFile.close()
wqxtree = etree.ElementTree(file=xmlSaveFile)
except IOError:
print(
(
"Problem writing to xml file to store html query: "
+ xmlSaveFile
)
)
return -1
# begin parsing XML tree
root = wqxtree.getroot()
# get namespace map
NSMAP = root.nsmap
WQX = "{%s}" % NSMAP[None]
# iterate over all <Activity> tags within file and process each sample
samples_processed = []
samples_not_processed = []
sitesDict = {}
for activity in wqxtree.getiterator(tag=WQX + "Activity"):
processThisSample = True
reason = ""
description = activity.find(WQX + "ActivityDescription")
if description != None:
datetext = description.findtext(WQX + "ActivityStartDate")
starttime = description.find(WQX + "ActivityStartTime")
if starttime != None:
timetext = starttime.findtext(WQX + "Time")
timezone = starttime.findtext(WQX + "TimeZoneCode")
else:
timetext = ""
timezone = ""
location = description.findtext(WQX + "MonitoringLocationIdentifier")
if location[:5] == "USGS-":
USGS = True
else:
USGS = False
descriptionDict = {
"location": location,
"date": datetext,
"time": timetext,
"timezone": timezone,
}
else:
descriptionDict = None
processThisSample = False
reason = "No description"
print(("Processing sample from " + location + " on " + datetext))
# create null sample dict
sampleDict = {}
sampleMetaDict = {}
# iterate though all results for this activity
for result in activity.getiterator(tag=WQX + "Result"):
if processThisSample:
try:
resultdesc = result.find(WQX + "ResultDescription")
characteristic = resultdesc.findtext(WQX + "CharacteristicName")
if characteristic in charDict:
samplefraction = resultdesc.findtext(
WQX + "ResultSampleFractionText"
)
pcode = resultdesc.findtext(WQX + "USGSPCode")
quality = resultdesc.findtext(
WQX + "ResultStatusIdentifier"
)
measure = resultdesc.find(WQX + "ResultMeasure")
count = 1.0
detection = resultdesc.findtext(
WQX + "ResultDetectionConditionText"
)
# print('detection=',detection)
if not (measure is None) or not (detection is None):
if not (measure is None):
value = measure.findtext(WQX + "ResultMeasureValue")
# print('initial value = ',value)
units = measure.findtext(WQX + "MeasureUnitCode")
# EPA system does not have detection info.
# Check for < in value text.
if "<" in str(value):
value = value[1:]
nondetect = True
else:
nondetect = False
elif not (detection is None):
# print("entering nondetect...")
nondetect = True
value = None
labinfo = result.find(WQX + "ResultLabInformation")
if not (labinfo == None):
# print("labinfo present")
quantLimitMeasure = labinfo.find(
WQX + "ResultDetectionQuantitationLimit"
)
if not (quantLimitMeasure == None):
# print("Quant limit present")
nondetectmeasure = quantLimitMeasure.find(
WQX
+ "DetectionQuantitationLimitMeasure"
)
if not (nondetectmeasure == None):
# print("Quant limit measure present")
value = nondetectmeasure.findtext(
WQX + "MeasureValue"
)
# print('measurevalue=',value)
# print(quantLimitMeasure)
# print("nondetect value=",value)
# split pcode into list
tempPcodeList = charDict[characteristic]["pcode"].split(
";"
)
# print("tempPcodeList="+str(tempPcodeList))
pcodeDict = {}
for codePriority, code in enumerate(tempPcodeList):
code = code.strip()
if code != "":
pcodeDict[code] = codePriority
# Check whether characteristic meets criteria
# for inclusion, otherwise don't add to sampleDict
addCharacteristic = True
if charDict[characteristic]["fraction"] != "0":
# test for correct fraction
if (
charDict[characteristic]["fraction"]
!= samplefraction
):
addCharacteristic = False
if addCharacteristic:
if USGS:
if charDict[characteristic]["pcode"] != "0":
# test for correct pcode
# print("pcode = "+pcode)
# print("pcodeList = "+str(pcodeList))
# print("pcode in list="+str(pcode in pcodeList))
if not (pcode in pcodeDict):
addCharacteristic = False
if addCharacteristic:
if charDict[characteristic]["quality"] != "0":
# test for correct data quality
if (
charDict[characteristic]["quality"]
!= quality
):
addCharacteristic = False
# end of characteristic criteria check
# Process duplicate characteristics
if addCharacteristic:
if characteristic in sampleDict:
if USGS:
priorPcode = sampleMetaDict[characteristic][
"pcode"
]
# if there are already multiple pcodes get only first one
priorPcode = priorPcode.split(";")[0]
averageValue = False
if len(pcodeDict) > 1:
thisPcodePriority = pcodeDict[pcode]
priorPcodePriority = pcodeDict[
priorPcode
]
if (
thisPcodePriority
> priorPcodePriority
):
# previous characteristic remains
addCharacteristic = False
elif (
thisPcodePriority
== priorPcodePriority
):
averageValue = True
else:
averageValue = True
if averageValue:
priorUnits = sampleMetaDict[
characteristic
]["units"]
# Only average if we have the same units
if units == priorUnits:
# Check if this or prior was non-detect
if sampleMetaDict[characteristic][
"nondetect"
]:
if nondetect:
# If both are non-detect, no need to add
averageValue = False
addCharacteristic = False
else:
# If prior was non-detect, but this one isn't
# Add this one instead
averageValue = False
addCharacteristic = True
elif nondetect:
# This one is non-detect, prior was not.
averageValue = False
addCharacteristic = False
if averageValue:
# average this value with existing values
count = sampleMetaDict[
characteristic
]["count"]
count += 1.0
oldvalue = float(
sampleDict[characteristic]
)
newvalue = (
oldvalue * (count - 1.0)
+ float(value)
) / count
value = str(newvalue)
pcode = (
priorPcode + "; " + pcode
)
# Changed this behavior to not allow different units
# units = priorUnits + '; ' + units
else:
# Do not add if units are different
addCharacteristic = False
if addCharacteristic:
sampleDict[characteristic] = value
sampleMetaDict[characteristic] = {
"samplefraction": samplefraction,
"units": units,
"pcode": pcode,
"quality": quality,
"count": count,
"nondetect": nondetect,
}
except etree.XMLSyntaxError as detail:
print("File contains invalid XML syntax: ", detail)
processThisSample = False
reason = "Entry contains invalid XML syntax."
# end results loop
# check whether sample has all the required constituents
if processThisSample:
for characteristic in charDict.keys():
if charDict[characteristic]["IsRequired"] != "0":
if not (characteristic in sampleDict):
processThisSample = False
reason += characteristic + " not available. "
if processThisSample:
# check to see whether site directory exists, if not, create it
sampledir = os.path.join(sitesdir, location)
if not (os.path.exists(sampledir)):
try:
os.makedirs(sampledir)
except os.error:
print(("Problem creating location directory: " + sampledir))
processThisSample = False
reason = "Problem creating location directory: " + sampledir
if processThisSample:
# Pull daily discharge data from USGS website
good_discharge_value = False
num_Q_tries = 0
if not USGS:
# We do not have a USGS site, do not query discharge
num_Q_tries = 99
dischargeDict = None
# Try 5 times to retrieve discharge value
while (not good_discharge_value) and num_Q_tries <= 5:
dischargeDict = GetDailyDischarge(
location, datetext
) # currently hard-wired to pcode 00060 (daily discharge, cfs)
if dischargeDict != -1:
good_discharge_value = True
else:
num_Q_tries += 1
dischargeDict = None
if dischargeDict is not None:
sampleDict["Stream flow, mean. daily"] = dischargeDict["discharge"]
sampleMetaDict["Stream flow, mean. daily"] = {
"units": "cfs",
"pcode": "00060",
"quality": dischargeDict["quality"],
"count": 1,
"samplefraction": None,
"nondetect": False,
}
descriptionDict["name"] = dischargeDict["name"]
else:
# Possibly allow this sample to be thrown out if no mean daily discharge, and/or similar for instantaneous discharge
sampleDict["Stream flow, mean. daily"] = None
sampleMetaDict["Stream flow, mean. daily"] = {
"units": "cfs",
"pcode": "00060",
"quality": None,
"count": 1,
"samplefraction": None,
"nondetect": False,
}
# Create data frame row for this sample date
if descriptionDict["time"] != "":
rowdate = to_datetime(datetext + " " + descriptionDict["time"])
else:
rowdate = to_datetime(datetext)
# Create Multiindex Dataframe to contain sample meta data
sampleMultiindexRow = concat(
{
"data": DataFrame(sampleDict, index=[rowdate], dtype="float"),
"time": DataFrame(
descriptionDict["time"],
index=[rowdate],
columns=list(sampleMetaDict.keys()),
),
"timezone": DataFrame(
descriptionDict["timezone"],
index=[rowdate],
columns=list(sampleMetaDict.keys()),
),
"pcode": DataFrame(
[extractValues(sampleMetaDict, ["pcode"])["values"]],
index=[rowdate],
columns=list(sampleMetaDict.keys()),
),
"quality": DataFrame(
[extractValues(sampleMetaDict, ["quality"])["values"]],
index=[rowdate],
columns=list(sampleMetaDict.keys()),
),
"fraction": DataFrame(
[
extractValues(sampleMetaDict, ["samplefraction"])[
"values"
]
],
index=[rowdate],
columns=list(sampleMetaDict.keys()),
),
"units": DataFrame(
[extractValues(sampleMetaDict, ["units"])["values"]],
index=[rowdate],
columns=list(sampleMetaDict.keys()),
),
"count": DataFrame(
[extractValues(sampleMetaDict, ["count"])["values"]],
index=[rowdate],
columns=list(sampleMetaDict.keys()),
),
"nondetect": DataFrame(
[extractValues(sampleMetaDict, ["nondetect"])["values"]],
index=[rowdate],
columns=list(sampleMetaDict.keys()),
),
},
axis=1,
)
# sampleMetaRow = Series(sampleMetaDict, index=[to_datetime(datetext)], dtype='object')
# Previous solution was reading/writing from pickle files
# New solution will keep all data in memory until end.
# This could cause memory problems with large data sets
# Test whether a df for this location already exists
if location in sitesDict:
# tempDF = sitesDict[location]
# sitesDict[location] = tempDF.append(sampleRow)
tempMultiindex = sitesDict[location]
sitesDict[location] = concat(
[tempMultiindex, sampleMultiindexRow], axis=0
)
else:
sitesDict[location] = sampleMultiindexRow
# add one to number of samples processed
if processThisSample:
samples_processed.append(location + " " + datetext)
else:
samples_not_processed.append(location + " " + datetext + " - " + reason)
print(("Number of Samples Processed = " + str(len(samples_processed))))
print(("Number of Samples Not Processed = " + str(len(samples_not_processed))))
# Write out individual site data pickle and csv files in each site directory
print("Writing out site data files...")
for location, midf in sitesDict.items():
print(location)
pickleFile = os.path.join(sitesdir, location, location + "-Dataframe.pkl")
pickle.dump(midf, open(pickleFile, "wb"))
midx = midf.keys()
with ExcelWriter(pickleFile[:-3] + "xlsx") as writer:
for sheet in midx.droplevel(level=1).drop_duplicates().values:
midf[sheet].to_excel(writer, sheet_name=sheet)
# Retrieve and store site description metadata
siteDescriptionDataDF = GetSiteData(location)
siteDescriptionDataFileName = os.path.join(
sitesdir, location, location + "-Site-Description.pkl"
)
pickle.dump(siteDescriptionDataDF, open(siteDescriptionDataFileName, "wb"))
siteDescriptionDataDF.to_csv(siteDescriptionDataFileName[:-3] + "csv")
# Process sites through PHREEQC
if RUN_PHREEQC:
print("Processing site water chemisty data in PHREEQC...")
for location, midf in sitesDict.items():
phreeqc_df = processMidf(
midf, os.path.join(sitesdir, location), PHREEQC_PATH, DATABASE_FILE
)
phreeqc_site_file = os.path.join(
sitesdir, location, location + "-PHREEQC.pkl"
)
try:
pickle.dump(phreeqc_df, open(phreeqc_site_file, "wb"))
phreeqc_df.to_csv(phreeqc_site_file[:-3] + "csv")
except IOError:
print("Problem writing out PHREEQC data file.")
if bracket_charge_balance:
for location, midf in sitesDict.items():
# Force balance on Calcium
phreeqc_df_ca = processMidf(
midf,
os.path.join(sitesdir, location),
PHREEQC_PATH,
DATABASE_FILE,
force_balance="Ca",
)
phreeqc_site_file_ca = os.path.join(
sitesdir, location, location + "-PHREEQC-Ca.pkl"
)
try:
pickle.dump(phreeqc_df_ca, open(phreeqc_site_file_ca, "wb"))
phreeqc_df_ca.to_csv(phreeqc_site_file_ca[:-3] + "csv")
except IOError:
print("Problem writing out PHREEQC Ca data file.")
# Force balance on Alkalinity
phreeqc_df_alk = processMidf(
midf,
os.path.join(sitesdir, location),
PHREEQC_PATH,
DATABASE_FILE,
force_balance="Alk",
)
phreeqc_site_file_alk = os.path.join(
sitesdir, location, location + "-PHREEQC-Alk.pkl"
)
try:
pickle.dump(phreeqc_df_alk, open(phreeqc_site_file_alk, "wb"))
phreeqc_df_alk.to_csv(phreeqc_site_file_alk[:-3] + "csv")
except IOError:
print("Problem writing out PHREEQC Alk data file.")
# Create log file
print(("Writing log file: " + LOG_FILE + splittag))
try:
log_file = open(LOG_FILE + splittag, "w")
print("Start file = " + START_FILE, file=log_file)
print(
"Number of Samples Processed = " + str(len(samples_processed)),
file=log_file,
)
print(
"Number of Samples Not Processed = " + str(len(samples_not_processed)),
file=log_file,
)
print("###############", file=log_file)
print("Characteristics", file=log_file)
print("###############", file=log_file)
printColumnNames = True
for key, flags in charDict.items():
if printColumnNames:
names = ["characteristic"] # + '\t'
for column in flags.keys():
names.append(str(column))
print(str("\t".join(names)), file=log_file)
printColumnNames = False
columns = [key]
for column in flags.keys():
if isinstance(flags[column], str):
columns.append(flags[column])
print(str("\t".join(columns)), file=log_file)
print("###############", file=log_file)
print("Samples processed", file=log_file)
print("###############", file=log_file)
for line in samples_processed:
print(line, file=log_file)
print("###############", file=log_file)
print("Samples not processed", file=log_file)
print("###############", file=log_file)
for line in samples_not_processed:
print(line, file=log_file)
except IOError:
print(("Problem opening log file: " + LOG_FILE))
return -1
# exceptions for parsing of xml file
except IOError:
print("Error opening xml file. Does it exist?")
# Note: can throw this error when discharge values are not read correctly,
# I should fix this, 6/16/2014
except etree.XMLSyntaxError as detail:
print("File contains invalid XML syntax: ", detail)
except requests.exceptions.RequestException as detail:
print("Error retrieving data by xml query: ", detail)
return 0
def runWQXtoPandas(startfilename, autosplitnum=20):
"""
Runs WQXtoPandas on an excel format input file where parameters can be set for an automatic query of data from
the USGS NWIS database.
Parameters
----------
startfilename : string
A string containing the name of the excel file to be used for input parameters to WQXtoPandas
autosplitnum : int (optional)
The number of sites at which a NWIS query is split into multiple queries. (default=20)
Returns
-------
None
Notes
-----
Can be run from within a python shell or script, or as a standalone script from the command line where the start
file name is provided as the first command line argument (e.g. WQXtoPandas <start file name> <autosplitnum>).
"""
# PHREEQC input file path
PHREEQC_INPUT_PATH = "./"
num_samples = 0
num_processed = 0
if not (type(autosplitnum) == int):
print("autosplitnum must be an integer.")
return -1
print(("Processing: " + startfilename))
try:
# open start file
startfile = xlrd.open_workbook(startfilename)
# open sheet
sheet = startfile.sheet_by_index(0)
# parse start file to determine what should be done
characteristicsBlockStarted = False
settingsDict = {}
charDict = {}
for rownum in range(sheet.nrows):
line = sheet.row_values(rownum)
if not (line[0][0] == "#"): # ignore comments
if not (characteristicsBlockStarted): # read script settings
if not (line[0] == "Characteristic"):
settingsDict[line[0]] = line[1]
else: # grab the characteristic block column headings
column_headings = line[1:]
characteristicsBlockStarted = True
else: # we are in the characteristics block
charDict[line[0]] = dict(list(zip(column_headings, line[1:])))
DATABASE_FILE = os.path.join(
settingsDict["Path to chemical database"],
settingsDict["Name of chemical database"],
)
LOG_FILE = os.path.join(
settingsDict["Path to output directory"],
settingsDict["Name of output directory"],
settingsDict["Log file name"],
)
RUN_PHREEQC = settingsDict["Run PHREEQC?"] == "Yes"
bracket_charge_balance = settingsDict["Force balance on Ca and Alk"] == "Yes"
if settingsDict["Input method"] == "1":
# We already have an XML file to process that contains water quality data
# Check whether a wildcard was used and more than one xml file is available
xml_file_string = os.path.join(
settingsDict["Path to output directory"],
settingsDict["Name of output directory"],
settingsDict["Input file"],
)
xml_list = glob(xml_file_string)
if xml_list == []:
print("Empty xml file list. Check path for xml file.")
print("xml file string =", xml_file_string)
return -1
n_xml = len(xml_list)
if n_xml > 1:
for xml_file in xml_list:
WQXtoPandas(
xml_file,
charDict,
outputPath=settingsDict["Path to output directory"],
outputDirName=settingsDict["Name of output directory"],
fromFile=True,
RUN_PHREEQC=RUN_PHREEQC,
bracket_charge_balance=bracket_charge_balance,
PHREEQC_PATH=settingsDict["Path to PHREEQC"],
DATABASE_FILE=DATABASE_FILE,
LOG_FILE=LOG_FILE,
START_FILE=startfilename,
)
else:
WQXtoPandas(
# settingsDict["Input file"], # appears to be a path bug
xml_list[0],
charDict,
outputPath=settingsDict["Path to output directory"],
outputDirName=settingsDict["Name of output directory"],
fromFile=True,
RUN_PHREEQC=RUN_PHREEQC,
bracket_charge_balance=bracket_charge_balance,
PHREEQC_PATH=settingsDict["Path to PHREEQC"],
DATABASE_FILE=DATABASE_FILE,
LOG_FILE=LOG_FILE,
START_FILE=startfilename,
)
elif settingsDict["Input method"] == "2":
# We will use a list of sites from a NWIS XML file and query these
# sites for water quality data
# First extract site list from XML file
try:
siteList = extractSitesFromXML(settingsDict["Input file"])
except IOError:
print(
"Problem extracting sites from XML file "
+ settingsDict["Input file"]
+ " check to see if file name is correct and file is in right location."
)
return -1
charList = []
# collect list of characteristics to query
for key in charDict.keys():
charList.append(str(key))
if len(siteList) > autosplitnum:
# We have too long of a list and should split into multiple queries
n_groups = int(ceil(len(siteList) / float(autosplitnum)))
for i in range(n_groups): # this doesn't work for even division cases
shortList = siteList[
i * autosplitnum : i * autosplitnum + autosplitnum
]
queryText = querySiteList(shortList, charList)
if queryText != None:
WQXtoPandas(
queryText,
charDict,
outputPath=settingsDict["Path to output directory"],
outputDirName=settingsDict["Name of output directory"],
fromFile=False,
RUN_PHREEQC=RUN_PHREEQC,
bracket_charge_balance=bracket_charge_balance,
PHREEQC_PATH=settingsDict["Path to PHREEQC"],
DATABASE_FILE=DATABASE_FILE,
splittag="." + str(i),
LOG_FILE=LOG_FILE,
START_FILE=startfilename,
)
else:
# get html for query
queryText = querySiteList(siteList, charList)
if queryText != None:
WQXtoPandas(
queryText,
charDict,
outputPath=settingsDict["Path to output directory"],
outputDirName=settingsDict["Name of output directory"],
fromFile=False,
RUN_PHREEQC=RUN_PHREEQC,
bracket_charge_balance=bracket_charge_balance,
PHREEQC_PATH=settingsDict["Path to PHREEQC"],
DATABASE_FILE=DATABASE_FILE,
LOG_FILE=LOG_FILE,
START_FILE=startfilename,
)
elif settingsDict["Input method"] == "3":
# We will use a list of sites from a text file and query these
# sites for water quality data
# First extract site list from text file
try:
siteList = extractSitesFromText(settingsDict["Input file"])
except IOError:
print(
"Problem extracting sites from text file "
+ settingsDict["Input file"]
+ " check to see if file name is correct and file is in right location."
)
return -1
if siteList != -1:
charList = []
# collect list of characteristics to query
for key in charDict.keys():
charList.append(str(key))
if len(siteList) > autosplitnum:
# We have too long of a list and should split into multiple queries
n_groups = int(ceil(len(siteList) / float(autosplitnum)))
for i in range(n_groups):
shortList = siteList[
i * autosplitnum : i * autosplitnum + autosplitnum
]
queryText = querySiteList(shortList, charList)
if queryText != None:
WQXtoPandas(
queryText,
charDict,
outputPath=settingsDict["Path to output directory"],
outputDirName=settingsDict["Name of output directory"],
fromFile=False,
RUN_PHREEQC=RUN_PHREEQC,
bracket_charge_balance=bracket_charge_balance,
PHREEQC_PATH=settingsDict["Path to PHREEQC"],
splittag="." + str(i),
DATABASE_FILE=DATABASE_FILE,
LOG_FILE=LOG_FILE,
START_FILE=startfilename,
)
else:
# get html for query
queryText = querySiteList(siteList, charList)
if queryText != None:
WQXtoPandas(
queryText,
charDict,
outputPath=settingsDict["Path to output directory"],
outputDirName=settingsDict["Name of output directory"],
fromFile=False,
RUN_PHREEQC=RUN_PHREEQC,
bracket_charge_balance=bracket_charge_balance,
PHREEQC_PATH=settingsDict["Path to PHREEQC"],
DATABASE_FILE=DATABASE_FILE,
LOG_FILE=LOG_FILE,
START_FILE=startfilename,
)
else:
print("Problem obtaining site list.")
else:
print(
(
'Problem with "Input Method" of start file: '
+ settingsDict["Input method"]
)
)
except IOError:
print("Problem reading start file. Check file name.")
# Run as script
if __name__ == "__main__":
# pull in name of start file
startfilename = sys.argv[1]
if len(sys.argv) > 2:
autosplitnum = sys.argv[2]
runWQXtoPandas(startfilename, autosplitnum=autosplitnum)
else:
runWQXtoPandas(startfilename)
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/global/reserved-label-blocks/reserved-label-block/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters relating to the label block.
"""
__slots__ = (
"_path_helper", "_extmethods", "__local_id", "__lower_bound", "__upper_bound"
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__local_id = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="local-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="string",
is_config=False,
)
self.__lower_bound = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="lower-bound",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
self.__upper_bound = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="upper-bound",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"global",
"reserved-label-blocks",
"reserved-label-block",
"state",
]
def _get_local_id(self):
"""
Getter method for local_id, mapped from YANG variable /network_instances/network_instance/mpls/global/reserved_label_blocks/reserved_label_block/state/local_id (string)
YANG Description: A local identifier for the global label block allocation.
"""
return self.__local_id
def _set_local_id(self, v, load=False):
"""
Setter method for local_id, mapped from YANG variable /network_instances/network_instance/mpls/global/reserved_label_blocks/reserved_label_block/state/local_id (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_local_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_local_id() directly.
YANG Description: A local identifier for the global label block allocation.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="local-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="string",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """local_id must be of a type compatible with string""",
"defined-type": "string",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="local-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)""",
}
)
self.__local_id = t
if hasattr(self, "_set"):
self._set()
def _unset_local_id(self):
self.__local_id = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="local-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="string",
is_config=False,
)
def _get_lower_bound(self):
"""
Getter method for lower_bound, mapped from YANG variable /network_instances/network_instance/mpls/global/reserved_label_blocks/reserved_label_block/state/lower_bound (oc-mplst:mpls-label)
YANG Description: Lower bound of the global label block. The block is defined to include
this label.
"""
return self.__lower_bound
def _set_lower_bound(self, v, load=False):
"""
Setter method for lower_bound, mapped from YANG variable /network_instances/network_instance/mpls/global/reserved_label_blocks/reserved_label_block/state/lower_bound (oc-mplst:mpls-label)
If this variable is read-only (config: false) in the
source YANG file, then _set_lower_bound is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lower_bound() directly.
YANG Description: Lower bound of the global label block. The block is defined to include
this label.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="lower-bound",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """lower_bound must be of a type compatible with oc-mplst:mpls-label""",
"defined-type": "oc-mplst:mpls-label",
"generated-type": """YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': ['16..1048575']}),RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IPV4_EXPLICIT_NULL': {'value': 0}, 'ROUTER_ALERT': {'value': 1}, 'IPV6_EXPLICIT_NULL': {'value': 2}, 'IMPLICIT_NULL': {'value': 3}, 'ENTROPY_LABEL_INDICATOR': {'value': 7}, 'NO_LABEL': {}},),], is_leaf=True, yang_name="lower-bound", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-mplst:mpls-label', is_config=False)""",
}
)
self.__lower_bound = t
if hasattr(self, "_set"):
self._set()
def _unset_lower_bound(self):
self.__lower_bound = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="lower-bound",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
def _get_upper_bound(self):
"""
Getter method for upper_bound, mapped from YANG variable /network_instances/network_instance/mpls/global/reserved_label_blocks/reserved_label_block/state/upper_bound (oc-mplst:mpls-label)
YANG Description: Upper bound for the global label block. The block is defined to include
this label.
"""
return self.__upper_bound
def _set_upper_bound(self, v, load=False):
"""
Setter method for upper_bound, mapped from YANG variable /network_instances/network_instance/mpls/global/reserved_label_blocks/reserved_label_block/state/upper_bound (oc-mplst:mpls-label)
If this variable is read-only (config: false) in the
source YANG file, then _set_upper_bound is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_upper_bound() directly.
YANG Description: Upper bound for the global label block. The block is defined to include
this label.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="upper-bound",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """upper_bound must be of a type compatible with oc-mplst:mpls-label""",
"defined-type": "oc-mplst:mpls-label",
"generated-type": """YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': ['16..1048575']}),RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IPV4_EXPLICIT_NULL': {'value': 0}, 'ROUTER_ALERT': {'value': 1}, 'IPV6_EXPLICIT_NULL': {'value': 2}, 'IMPLICIT_NULL': {'value': 3}, 'ENTROPY_LABEL_INDICATOR': {'value': 7}, 'NO_LABEL': {}},),], is_leaf=True, yang_name="upper-bound", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-mplst:mpls-label', is_config=False)""",
}
)
self.__upper_bound = t
if hasattr(self, "_set"):
self._set()
def _unset_upper_bound(self):
self.__upper_bound = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="upper-bound",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
local_id = __builtin__.property(_get_local_id)
lower_bound = __builtin__.property(_get_lower_bound)
upper_bound = __builtin__.property(_get_upper_bound)
_pyangbind_elements = OrderedDict(
[
("local_id", local_id),
("lower_bound", lower_bound),
("upper_bound", upper_bound),
]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/global/reserved-label-blocks/reserved-label-block/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters relating to the label block.
"""
__slots__ = (
"_path_helper", "_extmethods", "__local_id", "__lower_bound", "__upper_bound"
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__local_id = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="local-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="string",
is_config=False,
)
self.__lower_bound = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="lower-bound",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
self.__upper_bound = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="upper-bound",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"global",
"reserved-label-blocks",
"reserved-label-block",
"state",
]
def _get_local_id(self):
"""
Getter method for local_id, mapped from YANG variable /network_instances/network_instance/mpls/global/reserved_label_blocks/reserved_label_block/state/local_id (string)
YANG Description: A local identifier for the global label block allocation.
"""
return self.__local_id
def _set_local_id(self, v, load=False):
"""
Setter method for local_id, mapped from YANG variable /network_instances/network_instance/mpls/global/reserved_label_blocks/reserved_label_block/state/local_id (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_local_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_local_id() directly.
YANG Description: A local identifier for the global label block allocation.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="local-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="string",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """local_id must be of a type compatible with string""",
"defined-type": "string",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="local-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)""",
}
)
self.__local_id = t
if hasattr(self, "_set"):
self._set()
def _unset_local_id(self):
self.__local_id = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="local-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="string",
is_config=False,
)
def _get_lower_bound(self):
"""
Getter method for lower_bound, mapped from YANG variable /network_instances/network_instance/mpls/global/reserved_label_blocks/reserved_label_block/state/lower_bound (oc-mplst:mpls-label)
YANG Description: Lower bound of the global label block. The block is defined to include
this label.
"""
return self.__lower_bound
def _set_lower_bound(self, v, load=False):
"""
Setter method for lower_bound, mapped from YANG variable /network_instances/network_instance/mpls/global/reserved_label_blocks/reserved_label_block/state/lower_bound (oc-mplst:mpls-label)
If this variable is read-only (config: false) in the
source YANG file, then _set_lower_bound is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lower_bound() directly.
YANG Description: Lower bound of the global label block. The block is defined to include
this label.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="lower-bound",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """lower_bound must be of a type compatible with oc-mplst:mpls-label""",
"defined-type": "oc-mplst:mpls-label",
"generated-type": """YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': ['16..1048575']}),RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IPV4_EXPLICIT_NULL': {'value': 0}, 'ROUTER_ALERT': {'value': 1}, 'IPV6_EXPLICIT_NULL': {'value': 2}, 'IMPLICIT_NULL': {'value': 3}, 'ENTROPY_LABEL_INDICATOR': {'value': 7}, 'NO_LABEL': {}},),], is_leaf=True, yang_name="lower-bound", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-mplst:mpls-label', is_config=False)""",
}
)
self.__lower_bound = t
if hasattr(self, "_set"):
self._set()
def _unset_lower_bound(self):
self.__lower_bound = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="lower-bound",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
def _get_upper_bound(self):
"""
Getter method for upper_bound, mapped from YANG variable /network_instances/network_instance/mpls/global/reserved_label_blocks/reserved_label_block/state/upper_bound (oc-mplst:mpls-label)
YANG Description: Upper bound for the global label block. The block is defined to include
this label.
"""
return self.__upper_bound
def _set_upper_bound(self, v, load=False):
"""
Setter method for upper_bound, mapped from YANG variable /network_instances/network_instance/mpls/global/reserved_label_blocks/reserved_label_block/state/upper_bound (oc-mplst:mpls-label)
If this variable is read-only (config: false) in the
source YANG file, then _set_upper_bound is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_upper_bound() directly.
YANG Description: Upper bound for the global label block. The block is defined to include
this label.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="upper-bound",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """upper_bound must be of a type compatible with oc-mplst:mpls-label""",
"defined-type": "oc-mplst:mpls-label",
"generated-type": """YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': ['16..1048575']}),RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IPV4_EXPLICIT_NULL': {'value': 0}, 'ROUTER_ALERT': {'value': 1}, 'IPV6_EXPLICIT_NULL': {'value': 2}, 'IMPLICIT_NULL': {'value': 3}, 'ENTROPY_LABEL_INDICATOR': {'value': 7}, 'NO_LABEL': {}},),], is_leaf=True, yang_name="upper-bound", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-mplst:mpls-label', is_config=False)""",
}
)
self.__upper_bound = t
if hasattr(self, "_set"):
self._set()
def _unset_upper_bound(self):
self.__upper_bound = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="upper-bound",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
local_id = __builtin__.property(_get_local_id)
lower_bound = __builtin__.property(_get_lower_bound)
upper_bound = __builtin__.property(_get_upper_bound)
_pyangbind_elements = OrderedDict(
[
("local_id", local_id),
("lower_bound", lower_bound),
("upper_bound", upper_bound),
]
)
| |
import tempfile
import shutil
import os
from django.test import TransactionTestCase
from django.core.files.uploadedfile import UploadedFile
from django.contrib.auth.models import Group
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from hs_core import hydroshare
from hs_core.hydroshare import utils
from hs_core.models import CoreMetaData, Creator, Contributor, Coverage, Rights, Title, Language, \
Publisher, Identifier, Type, Subject, Description, Date, Format, Relation, Source
from hs_core.testing import MockIRODSTestCaseMixin
from hs_modflow_modelinstance.models import MODFLOWModelInstanceMetaData, ModelOutput, ExecutedBy, \
StudyArea, GridDimensions, StressPeriod, GroundWaterFlow, BoundaryCondition, ModelCalibration, \
ModelInput, GeneralElements
# cmd to run tests: ./hsctl managepy test --keepdb hs_modflow_modelinstance/tests
class TestMODFLOWModelInstanceMetaData(MockIRODSTestCaseMixin, TransactionTestCase):
def setUp(self):
super(TestMODFLOWModelInstanceMetaData, self).setUp()
self.group, _ = Group.objects.get_or_create(name='Resource Author')
self.user = hydroshare.create_account(
'user1@nowhere.com',
username='user1',
first_name='Creator_FirstName',
last_name='Creator_LastName',
superuser=False,
groups=[self.group]
)
self.res = hydroshare.create_resource(
resource_type='MODFLOWModelInstanceResource',
owner=self.user,
title='Test MODFLOW Model Instance Resource'
)
self.resGenModelProgram = hydroshare.create_resource(
resource_type='ModelProgramResource',
owner=self.user,
title='Model MODFLOW Program Resource'
)
self.resMODFLOWModelProgram = hydroshare.create_resource(
resource_type='ModelProgramResource',
owner=self.user,
title='Model Program Resource 2'
)
self.temp_dir = tempfile.mkdtemp()
d = 'hs_modflow_modelinstance/tests/modflow_example/'
self.file_list = []
self.file_names = []
self.sample_nam_name = 'example.nam'
self.sample_nam_name2 = 'example2.nam'
for file in os.listdir(d):
self.file_names.append(file)
target_temp_file = os.path.join(self.temp_dir, file)
shutil.copy("{}{}".format(d, file), target_temp_file)
if self.sample_nam_name == file:
self.sample_nam_obj = open(target_temp_file, 'r')
elif self.sample_nam_name2 == file:
self.sample_nam_obj2 = open(target_temp_file, 'r')
else:
self.file_list.append(target_temp_file)
self.file_name = "MIR.txt"
temp_text_file = os.path.join(self.temp_dir, self.file_name)
text_file = open(temp_text_file, 'w')
text_file.write("Model Instance resource files")
text_file.close()
self.text_file_obj = open(temp_text_file, 'r')
self.file_name_2 = "MIR.csv"
temp_text_file_2 = os.path.join(self.temp_dir, self.file_name_2)
text_file = open(temp_text_file_2, 'w')
text_file.write("Model,Instance,resource,files")
text_file.close()
self.text_file_obj_2 = open(temp_text_file_2, 'r')
def tearDown(self):
super(TestMODFLOWModelInstanceMetaData, self).tearDown()
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
def test_allowed_file_types(self):
# test allowed file type is '.*'
self.assertEqual(self.res.get_supported_upload_file_types(), '.*')
# there should not be any content file
self.assertEqual(self.res.files.all().count(), 0)
# Upload any file type should pass both the file pre add check post add check
files = [UploadedFile(file=self.text_file_obj, name=self.text_file_obj.name)]
utils.resource_file_add_pre_process(resource=self.res, files=files, user=self.user,
extract_metadata=False)
utils.resource_file_add_process(resource=self.res, files=files, user=self.user,
extract_metadata=False)
# there should one content file
self.assertEqual(self.res.files.all().count(), 1)
# check that there are no extended metadata elements at this point
self.assertEqual(self.res.metadata.model_output, None)
self.assertEqual(self.res.metadata.executed_by, None)
# Upload any other file type should pass both the file pre add check post add check
files = [UploadedFile(file=self.text_file_obj_2, name=self.text_file_obj_2.name)]
utils.resource_file_add_pre_process(resource=self.res, files=files, user=self.user,
extract_metadata=True)
utils.resource_file_add_process(resource=self.res, files=files, user=self.user,
extract_metadata=True)
# there should two content files
self.assertEqual(self.res.files.all().count(), 2)
# check that there are no extended metadata elements at this point
self.assertEqual(self.res.metadata.model_output, None)
self.assertEqual(self.res.metadata.executed_by, None)
def test_extended_metadata_CRUD(self):
# test the core metadata at this point
# there should be a title element
self.assertEqual(self.res.metadata.title.value, 'Test MODFLOW Model Instance Resource')
# there should be a creator element
self.assertEqual(self.res.metadata.creators.count(), 1)
# # there should be a type element
self.assertNotEqual(self.res.metadata.type, None)
# there should be an identifier element
self.assertEqual(self.res.metadata.identifiers.count(), 1)
# there should be rights element
self.assertNotEqual(self.res.metadata.rights, None)
# there shouldn't any source element
self.assertEqual(self.res.metadata.sources.count(), 0)
# there shouldn't any relation element
self.assertEqual(self.res.metadata.relations.count(), 0)
# there shouldn't any abstract element
self.assertEqual(self.res.metadata.description, None)
# there shouldn't any coverage element
self.assertEqual(self.res.metadata.coverages.all().count(), 0)
# there shouldn't any format element
self.assertEqual(self.res.metadata.formats.all().count(), 0)
# there shouldn't any subject element
self.assertEqual(self.res.metadata.subjects.all().count(), 0)
# there shouldn't any contributor element
self.assertEqual(self.res.metadata.contributors.all().count(), 0)
# check that there are no extended metadata elements at this point
self.assertEqual(self.res.metadata.model_output, None)
self.assertEqual(self.res.metadata.executed_by, None)
self.assertEqual(self.res.metadata.study_area, None)
self.assertEqual(self.res.metadata.grid_dimensions, None)
self.assertEqual(self.res.metadata.stress_period, None)
self.assertEqual(self.res.metadata.ground_water_flow, None)
self.assertEqual(self.res.metadata.boundary_condition, None)
self.assertEqual(self.res.metadata.model_calibration, None)
self.assertEqual(len(self.res.metadata.model_inputs), 0)
self.assertEqual(self.res.metadata.general_elements, None)
# create
# create study_area
self.res.metadata.create_element('ModelOutput', includes_output=False)
modeloutput_element = self.res.metadata.model_output
self.assertEqual(modeloutput_element.includes_output, False)
# multiple ModelOutput elements are not allowed - it would raise an exception
with self.assertRaises(IntegrityError):
self.res.metadata.create_element('ModelOutput', includes_output=True)
with self.assertRaises(IntegrityError):
self.res.metadata.create_element('ModelOutput', includes_output=False)
self.res.metadata.delete_element('ModelOutput',
self.res.metadata.model_output.id)
self.assertEqual(self.res.metadata.model_output, None)
self.res.metadata.create_element('ModelOutput', includes_output=True)
modeloutput_element = self.res.metadata.model_output
self.assertEqual(modeloutput_element.includes_output, True)
# multiple ModelOutput elements are not allowed - it would raise an exception
with self.assertRaises(IntegrityError):
self.res.metadata.create_element('ModelOutput', includes_output=True)
self.res.metadata.create_element('ModelOutput', includes_output=False)
# create ExecutedBy
self.res.metadata.create_element('ExecutedBy', model_name=self.resGenModelProgram.short_id)
modelparam_element = self.res.metadata.executed_by
self.assertEqual(modelparam_element.model_name,
self.resGenModelProgram.metadata.title.value)
self.assertEqual(modelparam_element.model_program_fk, self.resGenModelProgram)
# multiple ExecutedBy elements are not allowed - it would raise an exception
with self.assertRaises(IntegrityError):
self.res.metadata.create_element('ExecutedBy',
model_name=self.resMODFLOWModelProgram.short_id)
# create StudyArea
self.res.metadata.create_element('StudyArea',
totalLength='a',
totalWidth='b',
maximumElevation='c',
minimumElevation='d')
modelparam_element = self.res.metadata.study_area
self.assertNotEqual(modelparam_element, None)
self.assertEqual(modelparam_element.totalLength, 'a')
self.assertEqual(modelparam_element.totalWidth, 'b')
self.assertEqual(modelparam_element.maximumElevation, 'c')
self.assertEqual(modelparam_element.minimumElevation, 'd')
# try to create another studyarea - it would raise an exception
with self.assertRaises(IntegrityError):
self.res.metadata.create_element('StudyArea',
totalLength='b',
totalWidth='c',
maximumElevation='d',
minimumElevation='e')
# create GridDimensions
self.res.metadata.create_element('GridDimensions',
numberOfLayers='a',
typeOfRows='Regular',
numberOfRows='c',
typeOfColumns='Irregular',
numberOfColumns='e')
modelparam_element = self.res.metadata.grid_dimensions
self.assertNotEqual(modelparam_element, None)
self.assertEqual(modelparam_element.numberOfLayers, 'a')
self.assertEqual(modelparam_element.typeOfRows, 'Regular')
self.assertEqual(modelparam_element.numberOfRows, 'c')
self.assertEqual(modelparam_element.typeOfColumns, 'Irregular')
self.assertEqual(modelparam_element.numberOfColumns, 'e')
# try to create another griddimensions - it would raise an exception
with self.assertRaises(IntegrityError):
self.res.metadata.create_element('GridDimensions',
numberOfLayers='b',
typeOfRows='Irregular',
numberOfRows='c',
typeOfColumns='Regular',
numberOfColumns='z')
# try with wrong dimension types - raises exception
with self.assertRaises(ValidationError):
self.res.metadata.create_element('GridDimensions',
numberOfLayers='b',
typeOfRows='catspajamas',
numberOfRows='c',
typeOfColumns='Regular',
numberOfColumns='z')
with self.assertRaises(ValidationError):
self.res.metadata.create_element('GridDimensions',
numberOfLayers='b',
typeOfRows='Irregular',
numberOfRows='c',
typeOfColumns='beach',
numberOfColumns='z')
# create stressperiod
self.res.metadata.create_element('StressPeriod',
stressPeriodType='Steady',
steadyStateValue='a',
transientStateValueType='Daily',
transientStateValue='b')
modelparam_element = self.res.metadata.stress_period
self.assertNotEqual(modelparam_element, None)
self.assertEqual(modelparam_element.stressPeriodType, 'Steady')
self.assertEqual(modelparam_element.steadyStateValue, 'a')
self.assertEqual(modelparam_element.transientStateValueType, 'Daily')
self.assertEqual(modelparam_element.transientStateValue, 'b')
# try to create another stressperiod - it would raise an exception
with self.assertRaises(IntegrityError):
self.res.metadata.create_element('StressPeriod',
stressPeriodType='Steady',
steadyStateValue='a',
transientStateValueType='Daily',
transientStateValue='b')
# try with wrong stressperiod types - raises exception
with self.assertRaises(ValidationError):
self.res.metadata.create_element('StressPeriod',
stressPeriodType='Steady',
steadyStateValue='a',
transientStateValueType='Daly',
transientStateValue='b')
with self.assertRaises(ValidationError):
self.res.metadata.create_element('StressPeriod',
stressPeriodType='Bready',
steadyStateValue='a',
transientStateValueType='Daily',
transientStateValue='b')
# create groundwaterflow
self.res.metadata.create_element('GroundWaterFlow',
flowPackage='BCF6',
flowParameter='Transmissivity')
modelparam_element = self.res.metadata.ground_water_flow
self.assertNotEqual(modelparam_element, None)
self.assertEqual(modelparam_element.flowPackage, 'BCF6')
self.assertEqual(modelparam_element.flowParameter, 'Transmissivity')
# try to create another groundwaterflow - it would raise an exception
with self.assertRaises(IntegrityError):
self.res.metadata.create_element('GroundWaterFlow',
flowPackage='BCF6',
flowParameter='Transmissivity')
# try with wrong groundwaterflow types - raises exception
with self.assertRaises(ValidationError):
self.res.metadata.create_element('GroundWaterFlow',
flowPackage='BCFd6',
flowParameter='Transmissivity')
with self.assertRaises(ValidationError):
self.res.metadata.create_element('GroundWaterFlow',
flowPackage='BCF6',
flowParameter='Tranasmissivity')
# create boundary condition
# try with wrong boundarycondition types - raises exception
with self.assertRaises(ValidationError):
self.res.metadata.create_element('BoundaryCondition',
specified_head_boundary_packages=['BFH'],
specified_flux_boundary_packages=['FHB'],
head_dependent_flux_boundary_packages=['mmm'])
with self.assertRaises(ValidationError):
self.res.metadata.create_element('BoundaryCondition',
specified_head_boundary_packages=['BFH'],
specified_flux_boundary_packages=['mmm'],
head_dependent_flux_boundary_packages=['SFR'])
with self.assertRaises(ValidationError):
self.res.metadata.create_element('BoundaryCondition',
specified_head_boundary_packages=['mmm'],
specified_flux_boundary_packages=['FHB'],
head_dependent_flux_boundary_packages=['SFR'])
spec_hd_bd_pkgs = ['CHD', 'FHB']
spec_fx_bd_pkgs = ['RCH', 'WEL']
hd_dep_fx_pkgs = ['MNW2', 'GHB', 'LAK']
self.res.metadata.create_element('BoundaryCondition',
specified_head_boundary_packages=spec_hd_bd_pkgs,
specified_flux_boundary_packages=spec_fx_bd_pkgs,
head_dependent_flux_boundary_packages=hd_dep_fx_pkgs,
other_specified_head_boundary_packages='JMS',
other_specified_flux_boundary_packages='MMM',
other_head_dependent_flux_boundary_packages='JLG')
modelparam_element = self.res.metadata.boundary_condition
self.assertNotEqual(modelparam_element, None)
# check specified_head_boundary_packages
added_packages = modelparam_element.get_specified_head_boundary_packages()
for intended_package in spec_hd_bd_pkgs:
self.assertIn(intended_package, added_packages)
# check specified_flux_boundary_packages
added_packages = modelparam_element.get_specified_flux_boundary_packages()
for intended_package in spec_fx_bd_pkgs:
self.assertIn(intended_package, added_packages)
# check head_dependent_flux_boundary_packages
added_packages = modelparam_element.get_head_dependent_flux_boundary_packages()
for intended_package in hd_dep_fx_pkgs:
self.assertIn(intended_package, added_packages)
# check other packages
self.assertEqual(modelparam_element.other_specified_head_boundary_packages, 'JMS')
self.assertEqual(modelparam_element.other_specified_flux_boundary_packages, 'MMM')
self.assertEqual(modelparam_element.other_head_dependent_flux_boundary_packages, 'JLG')
# try to create another boundarycondition - it would raise an exception
with self.assertRaises(IntegrityError):
self.res.metadata.create_element('BoundaryCondition',
specified_head_boundary_packages=spec_hd_bd_pkgs,
specified_flux_boundary_packages=spec_fx_bd_pkgs,
head_dependent_flux_boundary_packages=hd_dep_fx_pkgs)
# create modelcalibration
self.res.metadata.create_element('ModelCalibration',
calibratedParameter='a',
observationType='b',
observationProcessPackage='RVOB',
calibrationMethod='c')
modelparam_element = self.res.metadata.model_calibration
self.assertNotEqual(modelparam_element, None)
self.assertEqual(modelparam_element.calibratedParameter, 'a')
self.assertEqual(modelparam_element.observationType, 'b')
self.assertEqual(modelparam_element.observationProcessPackage, 'RVOB')
self.assertEqual(modelparam_element.calibrationMethod, 'c')
# try to create another modelcalibration - it would raise an exception
with self.assertRaises(IntegrityError):
self.res.metadata.create_element('ModelCalibration',
calibratedParameter='aa',
observationType='b',
observationProcessPackage='RVOB',
calibrationMethod='c')
# try with wrong modelcalibration types - raises exception
with self.assertRaises(ValidationError):
self.res.metadata.create_element('ModelCalibration',
calibratedParameter='a',
observationType='b',
observationProcessPackage='RVoB',
calibrationMethod='c')
# create ModelInput
self.res.metadata.create_element('ModelInput',
inputType='a',
inputSourceName='b',
inputSourceURL='http://www.RVOB.com')
modelparam_elements = self.res.metadata.model_inputs
self.assertEqual(len(modelparam_elements), 1)
modelparam_element = modelparam_elements[0]
self.assertNotEqual(modelparam_element, None)
self.assertEqual(modelparam_element.inputType, 'a')
self.assertEqual(modelparam_element.inputSourceName, 'b')
self.assertEqual(modelparam_element.inputSourceURL, 'http://www.RVOB.com')
# create another modelinput
self.res.metadata.create_element('ModelInput',
inputType='aa',
inputSourceName='bd',
inputSourceURL='http://www.RVOBs.com')
modelparam_elements = self.res.metadata.model_inputs
self.assertEqual(len(modelparam_elements), 2)
modelparam_element = modelparam_elements[0]
self.assertNotEqual(modelparam_element, None)
self.assertEqual(modelparam_element.inputType, 'aa')
self.assertEqual(modelparam_element.inputSourceName, 'bd')
self.assertEqual(modelparam_element.inputSourceURL, 'http://www.RVOBs.com')
# create generalelements
# try with wrong generalelements types - raises exception
with self.assertRaises(ValidationError):
self.res.metadata.create_element('GeneralElements',
modelParameter='BCF6',
modelSolver='DsE4',
output_control_package=['LMT6'],
subsidencePackage='SUB')
with self.assertRaises(ValidationError):
self.res.metadata.create_element('GeneralElements',
modelParameter='BCF6',
modelSolver='DE4',
output_control_package=['LMt6'],
subsidencePackage='SUB')
with self.assertRaises(ValidationError):
self.res.metadata.create_element('GeneralElements',
modelParameter='BCF6',
modelSolver='DE4',
output_control_package=['LMT6'],
subsidencePackage='SaUB')
ot_ctl_pkgs = ['LMT6', 'OC']
self.res.metadata.create_element('GeneralElements',
modelParameter='BCF6',
modelSolver='DE4',
output_control_package=ot_ctl_pkgs,
subsidencePackage='SUB')
modelparam_element = self.res.metadata.general_elements
self.assertNotEqual(modelparam_element, None)
self.assertEqual(modelparam_element.modelParameter, 'BCF6')
self.assertEqual(modelparam_element.modelSolver, 'DE4')
# check outputControlPackage
added_packages = modelparam_element.get_output_control_package()
for intended_package in ot_ctl_pkgs:
self.assertIn(intended_package, added_packages)
self.assertEqual(modelparam_element.subsidencePackage, 'SUB')
# try to create another generalelements - it would raise an exception
with self.assertRaises(IntegrityError):
self.res.metadata.create_element('GeneralElements',
modelParameter='BCF6',
modelSolver='DE4',
output_control_package=['LMT6'],
subsidencePackage='SUB')
# update
# update ModelOutput
self.res.metadata.update_element('ModelOutput',
self.res.metadata.model_output.id,
includes_output=False)
self.assertEqual(self.res.metadata.model_output.includes_output, False)
self.res.metadata.update_element('ModelOutput',
self.res.metadata.model_output.id,
includes_output=True)
self.assertEqual(self.res.metadata.model_output.includes_output, True)
# update ExecutedBy
self.res.metadata.update_element('ExecutedBy',
self.res.metadata.executed_by.id,
model_name=self.resMODFLOWModelProgram.short_id)
modelparam_element = self.res.metadata.executed_by
self.assertEqual(modelparam_element.model_name,
self.resMODFLOWModelProgram.metadata.title.value)
self.assertEqual(modelparam_element.model_program_fk, self.resMODFLOWModelProgram)
# update StudyArea
self.res.metadata.update_element('StudyArea',
self.res.metadata.study_area.id,
totalLength=33,
totalWidth=2533,
maximumElevation=12,
minimumElevation=-148)
modelparam_element = self.res.metadata.study_area
self.assertNotEqual(modelparam_element, None)
self.assertEqual(modelparam_element.totalLength, '33')
self.assertEqual(modelparam_element.totalWidth, '2533')
self.assertEqual(modelparam_element.maximumElevation, '12')
self.assertEqual(modelparam_element.minimumElevation, '-148')
# update GridDimensions
self.res.metadata.update_element('GridDimensions',
self.res.metadata.grid_dimensions.id,
numberOfLayers='b',
typeOfRows='Irregular',
numberOfRows='d',
typeOfColumns='Regular',
numberOfColumns='f')
modelparam_element = self.res.metadata.grid_dimensions
self.assertNotEqual(modelparam_element, None)
self.assertEqual(modelparam_element.numberOfLayers, 'b')
self.assertEqual(modelparam_element.typeOfRows, 'Irregular')
self.assertEqual(modelparam_element.numberOfRows, 'd')
self.assertEqual(modelparam_element.typeOfColumns, 'Regular')
self.assertEqual(modelparam_element.numberOfColumns, 'f')
# try with wrong dimension types - raises exception
with self.assertRaises(ValidationError):
self.res.metadata. update_element('GridDimensions',
self.res.metadata.grid_dimensions.id,
numberOfLayers='b',
typeOfRows='catspajamas',
numberOfRows='c',
typeOfColumns='Regular',
numberOfColumns='z')
with self.assertRaises(ValidationError):
self.res.metadata.\
update_element('GridDimensions',
self.res.metadata.grid_dimensions.id,
numberOfLayers='b',
typeOfRows='Irregular',
numberOfRows='c',
typeOfColumns='beach',
numberOfColumns='z')
# update stressperiod
self.res.metadata.update_element('StressPeriod',
self.res.metadata.stress_period.id,
stressPeriodType='Transient',
steadyStateValue='555',
transientStateValueType='Annually',
transientStateValue='123')
modelparam_element = self.res.metadata.stress_period
self.assertNotEqual(modelparam_element, None)
self.assertEqual(modelparam_element.stressPeriodType, 'Transient')
self.assertEqual(modelparam_element.steadyStateValue, '555')
self.assertEqual(modelparam_element.transientStateValueType, 'Annually')
self.assertEqual(modelparam_element.transientStateValue, '123')
# try with wrong stressperiod types - raises exception
with self.assertRaises(ValidationError):
self.res.metadata.update_element('StressPeriod',
self.res.metadata.stress_period.id,
stressPeriodType='Transsdfasient',
steadyStateValue='555',
transientStateValueType='Annually',
transientStateValue='123')
with self.assertRaises(ValidationError):
self.res.metadata.update_element('StressPeriod',
self.res.metadata.stress_period.id,
stressPeriodType='Transient',
steadyStateValue='555',
transientStateValueType='Annfadsfually',
transientStateValue='123')
# update groundwaterflow
self.res.metadata.update_element('GroundWaterFlow',
self.res.metadata.ground_water_flow.id,
flowPackage='UPW',
flowParameter='Hydraulic Conductivity')
modelparam_element = self.res.metadata.ground_water_flow
self.assertNotEqual(modelparam_element, None)
self.assertEqual(modelparam_element.flowPackage, 'UPW')
self.assertEqual(modelparam_element.flowParameter, 'Hydraulic Conductivity')
# try with wrong groundwaterflow types - raises exception
with self.assertRaises(ValidationError):
self.res.metadata.\
update_element('GroundWaterFlow',
self.res.metadata.ground_water_flow.id,
flowPackage='UPsW',
flowParameter='Hydraulic Conductivity')
with self.assertRaises(ValidationError):
self.res.metadata.\
update_element('GroundWaterFlow',
self.res.metadata.ground_water_flow.id,
flowPackage='UPW',
flowParameter='Hydraalic Conductivity')
# update boundary condition
# try with wrong boundarycondition types - raises exception
with self.assertRaises(ValidationError):
self.res.metadata.\
update_element('BoundaryCondition',
self.res.metadata.boundary_condition.id,
specified_head_boundary_packages=['BFH'],
specified_flux_boundary_packages=['FHB'],
head_dependent_flux_boundary_packages=['mmm'])
with self.assertRaises(ValidationError):
self.res.metadata.\
update_element('BoundaryCondition',
self.res.metadata.boundary_condition.id,
specified_head_boundary_packages=['BFH'],
specified_flux_boundary_packages=['mmm'],
head_dependent_flux_boundary_packages=['SFR'])
with self.assertRaises(ValidationError):
self.res.metadata.\
update_element('BoundaryCondition',
self.res.metadata.boundary_condition.id,
specified_head_boundary_packages=['mmm'],
specified_flux_boundary_packages=['FHB'],
head_dependent_flux_boundary_packages=['SFR'])
spec_hd_bd_pkgs = ['BFH']
spec_fx_bd_pkgs = ['FHB']
hd_dep_fx_pkgs = ['RIV', 'DAFG', 'DRT']
self.res.metadata.\
update_element('BoundaryCondition',
self.res.metadata.boundary_condition.id,
specified_head_boundary_packages=spec_hd_bd_pkgs,
specified_flux_boundary_packages=spec_fx_bd_pkgs,
head_dependent_flux_boundary_packages=hd_dep_fx_pkgs,
other_specified_head_boundary_packages="AAA",
other_specified_flux_boundary_packages="BBB",
other_head_dependent_flux_boundary_packages="CCC")
modelparam_element = self.res.metadata.boundary_condition
self.assertNotEqual(modelparam_element, None)
# check specified_head_boundary_packages
added_packages = modelparam_element.get_specified_head_boundary_packages()
for intended_package in spec_hd_bd_pkgs:
self.assertIn(intended_package, added_packages)
# check specified_flux_boundary_packages
added_packages = modelparam_element.get_specified_flux_boundary_packages()
for intended_package in spec_fx_bd_pkgs:
self.assertIn(intended_package, added_packages)
# check head_dependent_flux_boundary_packages
added_packages = modelparam_element.get_head_dependent_flux_boundary_packages()
for intended_package in hd_dep_fx_pkgs:
self.assertIn(intended_package, added_packages)
# check other packages
self.assertEqual(modelparam_element.other_specified_head_boundary_packages, 'AAA')
self.assertEqual(modelparam_element.other_specified_flux_boundary_packages, 'BBB')
self.assertEqual(modelparam_element.other_head_dependent_flux_boundary_packages, 'CCC')
# update modelcalibration
self.res.metadata.update_element('ModelCalibration',
self.res.metadata.model_calibration.id,
calibratedParameter='b',
observationType='c',
observationProcessPackage='OBS',
calibrationMethod='d')
modelparam_element = self.res.metadata.model_calibration
self.assertNotEqual(modelparam_element, None)
self.assertEqual(modelparam_element.calibratedParameter, 'b')
self.assertEqual(modelparam_element.observationType, 'c')
self.assertEqual(modelparam_element.observationProcessPackage, 'OBS')
self.assertEqual(modelparam_element.calibrationMethod, 'd')
# try with wrong modelcalibration types - raises exception
with self.assertRaises(ValidationError):
self.res.metadata.\
update_element('ModelCalibration',
self.res.metadata.model_calibration.id,
calibratedParameter='a',
observationType='b',
observationProcessPackage='dtarb',
calibrationMethod='c')
# update ModelInput
self.res.metadata.update_element('ModelInput',
self.res.metadata.model_inputs[1].id,
inputType='b',
inputSourceName='c',
inputSourceURL='http://www.RVOB.com')
modelparam_elements = self.res.metadata.model_inputs
self.assertEqual(len(modelparam_elements), 2)
modelparam_element = modelparam_elements[1]
self.assertNotEqual(modelparam_element, None)
self.assertEqual(modelparam_element.inputType, 'b')
self.assertEqual(modelparam_element.inputSourceName, 'c')
self.assertEqual(modelparam_element.inputSourceURL, 'http://www.RVOB.com')
# update another modelinput
self.res.metadata.update_element('ModelInput',
self.res.metadata.model_inputs[0].id,
inputType='bb',
inputSourceName='cc',
inputSourceURL='http://www.RVOBss.com')
modelparam_elements = self.res.metadata.model_inputs
self.assertEqual(len(modelparam_elements), 2)
modelparam_element = modelparam_elements[0]
self.assertNotEqual(modelparam_element, None)
self.assertEqual(modelparam_element.inputType, 'bb')
self.assertEqual(modelparam_element.inputSourceName, 'cc')
self.assertEqual(modelparam_element.inputSourceURL, 'http://www.RVOBss.com')
# update generalelements
# try with wrong generalelements types - raises exception
with self.assertRaises(ValidationError):
self.res.metadata.\
update_element('GeneralElements',
self.res.metadata.general_elements.id,
modelParameter='BCF6',
modelSolver='DsE4',
output_control_package=['LMT6'],
subsidencePackage='SUB')
with self.assertRaises(ValidationError):
self.res.metadata.\
update_element('GeneralElements',
self.res.metadata.general_elements.id,
modelParameter='BCF6',
modelSolver='DE4',
output_control_package=['LMTd6'],
subsidencePackage='SUB')
with self.assertRaises(ValidationError):
self.res.metadata.\
update_element('GeneralElements',
self.res.metadata.general_elements.id,
modelParameter='BCF6',
modelSolver='DE4',
output_control_package=['LMT6'],
subsidencePackage='SaUB')
ot_ctl_pkgs = ['GAGE', 'MNWI']
self.res.metadata.update_element('GeneralElements',
self.res.metadata.general_elements.id,
modelParameter='hydraulic conductivity',
modelSolver='PCGN',
output_control_package=ot_ctl_pkgs,
subsidencePackage='SWT')
modelparam_element = self.res.metadata.general_elements
self.assertNotEqual(modelparam_element, None)
self.assertEqual(modelparam_element.modelParameter, 'hydraulic conductivity')
self.assertEqual(modelparam_element.modelSolver, 'PCGN')
# check outputControlPackage
added_packages = modelparam_element.get_output_control_package()
for intended_package in ot_ctl_pkgs:
self.assertIn(intended_package, added_packages)
self.assertEqual(modelparam_element.subsidencePackage, 'SWT')
# delete
# check that there are all extended metadata elements at this point
self.assertNotEqual(self.res.metadata.model_output, None)
self.assertNotEqual(self.res.metadata.executed_by, None)
self.assertNotEqual(self.res.metadata.study_area, None)
self.assertNotEqual(self.res.metadata.grid_dimensions, None)
self.assertNotEqual(self.res.metadata.stress_period, None)
self.assertNotEqual(self.res.metadata.ground_water_flow, None)
self.assertNotEqual(self.res.metadata.boundary_condition, None)
self.assertNotEqual(self.res.metadata.model_calibration, None)
self.assertNotEqual(self.res.metadata.model_inputs, None)
self.assertNotEqual(self.res.metadata.general_elements, None)
# delete all elements
self.res.metadata.delete_element('ModelOutput',
self.res.metadata.model_output.id)
self.res.metadata.delete_element('ExecutedBy',
self.res.metadata.executed_by.id)
self.res.metadata.delete_element('StudyArea',
self.res.metadata.study_area.id)
self.res.metadata.delete_element('GridDimensions',
self.res.metadata.grid_dimensions.id)
self.res.metadata.delete_element('StressPeriod',
self.res.metadata.stress_period.id)
self.res.metadata.delete_element('GroundWaterFlow',
self.res.metadata.ground_water_flow.id)
self.res.metadata.\
delete_element('BoundaryCondition', self.res.metadata.boundary_condition.id)
self.res.metadata.delete_element('ModelCalibration',
self.res.metadata.model_calibration.id)
for items in range(len(self.res.metadata.model_inputs)):
self.res.metadata.\
delete_element('ModelInput', self.res.metadata.model_inputs[0].id)
self.res.metadata.delete_element('GeneralElements',
self.res.metadata.general_elements.id)
# make sure they are deleted
self.assertEqual(self.res.metadata.model_output, None)
self.assertEqual(self.res.metadata.executed_by, None)
self.assertEqual(self.res.metadata.study_area, None)
self.assertEqual(self.res.metadata.grid_dimensions, None)
self.assertEqual(self.res.metadata.stress_period, None)
self.assertEqual(self.res.metadata.ground_water_flow, None)
self.assertEqual(self.res.metadata.boundary_condition, None)
self.assertEqual(self.res.metadata.model_calibration, None)
self.assertEqual(len(self.res.metadata.model_inputs), 0)
self.assertEqual(self.res.metadata.general_elements, None)
def test_delete_if_empty(self):
# create several metadata elements make sure they exist
self.res.metadata.create_element('GridDimensions',
numberOfLayers=5555,
typeOfRows='Irregular',
numberOfRows=6666,
typeOfColumns='Regular',
numberOfColumns=7777)
self.res.metadata.create_element('StressPeriod',
stressPeriodType='Steady and Transient',
steadyStateValue=8888,
transientStateValueType='Monthly',
transientStateValue=9999)
self.res.metadata.create_element('GroundwaterFlow',
flowPackage='LPF',
flowParameter='Hydraulic Conductivity')
self.res.metadata.create_element('BoundaryCondition',
specified_head_boundary_packages=['CHD', 'FHB'],
other_specified_head_boundary_packages='something',
specified_flux_boundary_packages=['FHB', 'WEL'],
other_specified_flux_boundary_packages='something1',
head_dependent_flux_boundary_packages=['RIV', 'MNW1'],
other_head_dependent_flux_boundary_packages='something2')
self.res.metadata.create_element('ModelCalibration',
calibratedParameter='test parameter',
observationType='test observation type',
observationProcessPackage='GBOB',
calibrationMethod='test calibration method')
self.res.metadata.create_element('GeneralElements',
modelParameter='test model parameter',
modelSolver='SIP',
output_control_package=['HYD', 'OC'],
subsidencePackage='SWT')
self.assertNotEqual(self.res.metadata.grid_dimensions, None)
self.assertNotEqual(self.res.metadata.stress_period, None)
self.assertNotEqual(self.res.metadata.ground_water_flow, None)
self.assertNotEqual(self.res.metadata.boundary_condition, None)
self.assertNotEqual(self.res.metadata.model_calibration, None)
self.assertNotEqual(self.res.metadata.general_elements, None)
# update them all to blanks
self.res.metadata.update_element('GridDimensions',
self.res.metadata.grid_dimensions.id,
numberOfLayers='',
typeOfRows='Choose a type',
numberOfRows='',
typeOfColumns='Choose a type',
numberOfColumns='')
self.res.metadata.update_element('StressPeriod',
self.res.metadata.stress_period.id,
stressPeriodType='Choose a type',
steadyStateValue='',
transientStateValueType='Choose a type',
transientStateValue='')
self.res.metadata.update_element('GroundwaterFlow',
self.res.metadata.ground_water_flow.id,
flowPackage='Choose a package',
flowParameter='Choose a package')
self.res.metadata.update_element('BoundaryCondition',
self.res.metadata.boundary_condition.id,
specified_head_boundary_packages=[],
other_specified_head_boundary_packages='',
specified_flux_boundary_packages=[],
other_specified_flux_boundary_packages='',
head_dependent_flux_boundary_packages=[],
other_head_dependent_flux_boundary_packages='')
self.res.metadata.update_element('ModelCalibration',
self.res.metadata.model_calibration.id,
calibratedParameter='',
observationType='',
observationProcessPackage='Choose a package',
calibrationMethod='')
self.res.metadata.update_element('GeneralElements',
self.res.metadata.general_elements.id,
modelParameter='',
modelSolver='Choose a solver',
output_control_package=[],
subsidencePackage='Choose a package')
# check to see that they don't exist anymore
self.assertEqual(self.res.metadata.grid_dimensions, None)
self.assertEqual(self.res.metadata.stress_period, None)
self.assertEqual(self.res.metadata.ground_water_flow, None)
self.assertEqual(self.res.metadata.boundary_condition, None)
self.assertEqual(self.res.metadata.model_calibration, None)
self.assertEqual(self.res.metadata.general_elements, None)
def test_public_or_discoverable(self):
self.assertFalse(self.res.has_required_content_files())
self.assertFalse(self.res.metadata.has_all_required_elements())
self.assertFalse(self.res.can_be_public_or_discoverable)
# add txt file
files = [UploadedFile(file=self.text_file_obj, name=self.text_file_obj.name)]
utils.resource_file_add_pre_process(resource=self.res, files=files, user=self.user,
extract_metadata=False)
utils.resource_file_add_process(resource=self.res, files=files, user=self.user,
extract_metadata=False)
self.assertTrue(self.res.has_required_content_files())
self.assertFalse(self.res.can_be_public_or_discoverable)
# add generically required elements; should be made public
self.res.metadata.create_element('Description', abstract="test abstract")
self.assertTrue(self.res.has_required_content_files())
self.assertFalse(self.res.can_be_public_or_discoverable)
self.res.metadata.create_element('Subject', value="test subject")
self.assertTrue(self.res.has_required_content_files())
self.assertTrue(self.res.can_be_public_or_discoverable)
def test_can_have_multiple_content_files(self):
self.assertTrue(self.res.can_have_multiple_files())
def test_can_upload_multiple_content_files(self):
# more than one file can be uploaded
self.assertTrue(self.res.allow_multiple_file_upload())
def test_get_xml(self):
self.res.metadata.create_element('Description', abstract="test abstract")
self.res.metadata.create_element('Subject', value="test subject")
self.res.metadata.create_element('ModelOutput', includes_output=True)
self.res.metadata.create_element('ExecutedBy', model_name=self.resGenModelProgram.short_id)
self.res.metadata.create_element('StudyArea',
totalLength=1111,
totalWidth=2222,
maximumElevation=3333,
minimumElevation=4444)
self.res.metadata.create_element('GridDimensions',
numberOfLayers=5555,
typeOfRows='Irregular',
numberOfRows=6666,
typeOfColumns='Regular',
numberOfColumns=7777)
self.res.metadata.create_element('StressPeriod',
stressPeriodType='Steady and Transient',
steadyStateValue=8888,
transientStateValueType='Monthly',
transientStateValue=9999)
self.res.metadata.create_element('GroundwaterFlow',
flowPackage='LPF',
flowParameter='Hydraulic Conductivity')
self.res.metadata.create_element('BoundaryCondition',
specified_head_boundary_packages=['CHD', 'FHB'],
specified_flux_boundary_packages=['FHB', 'WEL'],
head_dependent_flux_boundary_packages=['RIV', 'MNW1'])
self.res.metadata.create_element('ModelCalibration',
calibratedParameter='test parameter',
observationType='test observation type',
observationProcessPackage='GBOB',
calibrationMethod='test calibration method')
self.res.metadata.create_element('ModelInput',
inputType='test input type',
inputSourceName='test source name',
inputSourceURL='http://www.test.com')
self.res.metadata.create_element('GeneralElements',
modelParameter='test model parameter',
modelSolver='SIP',
output_control_package=['HYD', 'OC'],
subsidencePackage='SWT')
xml_doc = self.res.metadata.get_xml()
# check to see if the specific metadata are in the xml doc
self.assertTrue('1111' in xml_doc)
self.assertTrue('2222' in xml_doc)
self.assertTrue('3333' in xml_doc)
self.assertTrue('4444' in xml_doc)
self.assertTrue('5555' in xml_doc)
self.assertTrue('Irregular' in xml_doc)
self.assertTrue('6666' in xml_doc)
self.assertTrue('Regular' in xml_doc)
self.assertTrue('7777' in xml_doc)
self.assertTrue('Steady and Transient' in xml_doc)
self.assertTrue('8888' in xml_doc)
self.assertTrue('Monthly' in xml_doc)
self.assertTrue('9999' in xml_doc)
self.assertTrue('LPF' in xml_doc)
self.assertTrue('Hydraulic Conductivity' in xml_doc)
self.assertTrue('CHD' in xml_doc)
self.assertTrue('FHB' in xml_doc)
self.assertTrue('FHB' in xml_doc)
self.assertTrue('WEL' in xml_doc)
self.assertTrue('RIV' in xml_doc)
self.assertTrue('MNW1' in xml_doc)
self.assertTrue('test parameter' in xml_doc)
self.assertTrue('test observation type' in xml_doc)
self.assertTrue('GBOB' in xml_doc)
self.assertTrue('test calibration method' in xml_doc)
self.assertTrue('test input type' in xml_doc)
self.assertTrue('test source name' in xml_doc)
self.assertTrue('http://www.test.com' in xml_doc)
self.assertTrue('test model parameter' in xml_doc)
self.assertTrue('SIP' in xml_doc)
self.assertTrue('HYD' in xml_doc)
self.assertTrue('OC' in xml_doc)
self.assertTrue('SWT' in xml_doc)
def test_metadata_on_content_file_delete(self):
# Metadata should remain after content file deletion
# upload files
files = [UploadedFile(file=self.sample_nam_obj, name=self.sample_nam_obj.name)]
utils.resource_file_add_pre_process(resource=self.res, files=files, user=self.user,
extract_metadata=False)
utils.resource_file_add_process(resource=self.res, files=files, user=self.user,
extract_metadata=False)
files = [UploadedFile(file=self.sample_nam_obj2, name=self.sample_nam_obj2.name)]
utils.resource_file_add_pre_process(resource=self.res, files=files, user=self.user,
extract_metadata=False)
utils.resource_file_add_process(resource=self.res, files=files, user=self.user,
extract_metadata=False)
for f in self.file_list:
f_obj = open(f, 'r')
files = [UploadedFile(file=f_obj, name=f_obj.name)]
utils.resource_file_add_pre_process(resource=self.res, files=files, user=self.user,
extract_metadata=False)
utils.resource_file_add_process(resource=self.res, files=files, user=self.user,
extract_metadata=False)
# create metadata elements
self.res.metadata.create_element('Description', abstract="test abstract")
self.res.metadata.create_element('Subject', value="test subject")
self.res.metadata.create_element('ModelOutput', includes_output=True)
self.res.metadata.create_element('ExecutedBy', model_name=self.resGenModelProgram.short_id)
self.res.metadata.create_element('StudyArea',
totalLength=1111,
totalWidth=2222,
maximumElevation=3333,
minimumElevation=4444)
self.res.metadata.create_element('GridDimensions',
numberOfLayers=5555,
typeOfRows='Irregular',
numberOfRows=6666,
typeOfColumns='Regular',
numberOfColumns=7777)
self.res.metadata.create_element('StressPeriod',
stressPeriodType='Steady and Transient',
steadyStateValue=8888,
transientStateValueType='Monthly',
transientStateValue=9999)
self.res.metadata.create_element('GroundwaterFlow',
flowPackage='LPF',
flowParameter='Hydraulic Conductivity')
self.res.metadata.create_element('BoundaryCondition',
specified_head_boundary_packages=['CHD', 'FHB'],
specified_flux_boundary_packages=['FHB', 'WEL'],
head_dependent_flux_boundary_packages=['RIV', 'MNW1'])
self.res.metadata.create_element('ModelCalibration',
calibratedParameter='test parameter',
observationType='test observation type',
observationProcessPackage='GBOB',
calibrationMethod='test calibration method')
self.res.metadata.create_element('ModelInput',
inputType='test input type',
inputSourceName='test source name',
inputSourceURL='http://www.test.com')
self.res.metadata.create_element('GeneralElements',
modelParameter='test model parameter',
modelSolver='SIP',
output_control_package=['HYD', 'OC'],
subsidencePackage='SWT')
# there should 12 content files
self.assertEqual(self.res.files.all().count(), 12)
# there should be 11 format elements (2 nam)
self.assertEqual(self.res.metadata.formats.all().count(), 11)
# delete content files that we added above
for f in self.file_names:
hydroshare.delete_resource_file(self.res.short_id, f, self.user)
# there should no content file
self.assertEqual(self.res.files.all().count(), 0)
# there should be no format element
self.assertEqual(self.res.metadata.formats.all().count(), 0)
# test the core metadata at this point
self.assertNotEquals(self.res.metadata.title, None)
# there should be an abstract element
self.assertNotEquals(self.res.metadata.description, None)
# there should be one creator element
self.assertEqual(self.res.metadata.creators.all().count(), 1)
# testing extended metadata elements
self.assertNotEqual(self.res.metadata.model_output, None)
self.assertNotEqual(self.res.metadata.executed_by, None)
self.assertNotEqual(self.res.metadata.study_area, None)
self.assertNotEqual(self.res.metadata.grid_dimensions, None)
self.assertNotEqual(self.res.metadata.stress_period, None)
self.assertNotEqual(self.res.metadata.ground_water_flow, None)
self.assertNotEqual(self.res.metadata.boundary_condition, None)
self.assertNotEqual(self.res.metadata.model_calibration, None)
self.assertNotEqual(self.res.metadata.model_inputs, None)
self.assertNotEqual(self.res.metadata.general_elements, None)
def test_metadata_delete_on_resource_delete(self):
# upload files
files = [UploadedFile(file=self.sample_nam_obj, name=self.sample_nam_obj.name)]
utils.resource_file_add_pre_process(resource=self.res, files=files, user=self.user,
extract_metadata=False)
utils.resource_file_add_process(resource=self.res, files=files, user=self.user,
extract_metadata=False)
files = [UploadedFile(file=self.sample_nam_obj2, name=self.sample_nam_obj2.name)]
utils.resource_file_add_pre_process(resource=self.res, files=files, user=self.user,
extract_metadata=False)
utils.resource_file_add_process(resource=self.res, files=files, user=self.user,
extract_metadata=False)
for f in self.file_list:
f_obj = open(f, 'r')
files = [UploadedFile(file=f_obj, name=f_obj.name)]
utils.resource_file_add_pre_process(resource=self.res, files=files, user=self.user,
extract_metadata=False)
utils.resource_file_add_process(resource=self.res, files=files, user=self.user,
extract_metadata=False)
# create metadata elements
self.res.metadata.create_element('Description', abstract="test abstract")
self.res.metadata.create_element('Subject', value="test subject")
self.res.metadata.create_element('ModelOutput', includes_output=True)
self.res.metadata.create_element('ExecutedBy', model_name=self.resGenModelProgram.short_id)
self.res.metadata.create_element('StudyArea',
totalLength=1111,
totalWidth=2222,
maximumElevation=3333,
minimumElevation=4444)
self.res.metadata.create_element('GridDimensions',
numberOfLayers=5555,
typeOfRows='Irregular',
numberOfRows=6666,
typeOfColumns='Regular',
numberOfColumns=7777)
self.res.metadata.create_element('StressPeriod',
stressPeriodType='Steady and Transient',
steadyStateValue=8888,
transientStateValueType='Monthly',
transientStateValue=9999)
self.res.metadata.create_element('GroundwaterFlow',
flowPackage='LPF',
flowParameter='Hydraulic Conductivity')
self.res.metadata.create_element('BoundaryCondition',
specified_head_boundary_packages=['CHD', 'FHB'],
specified_flux_boundary_packages=['FHB', 'WEL'],
head_dependent_flux_boundary_packages=['RIV', 'MNW1'])
self.res.metadata.create_element('ModelCalibration',
calibratedParameter='test parameter',
observationType='test observation type',
observationProcessPackage='GBOB',
calibrationMethod='test calibration method')
self.res.metadata.create_element('ModelInput',
inputType='test input type',
inputSourceName='test source name',
inputSourceURL='http://www.test.com')
self.res.metadata.create_element('GeneralElements',
modelParameter='test model parameter',
modelSolver='SIP',
output_control_package=['HYD', 'OC'],
subsidencePackage='SWT')
self.res.metadata.create_element('Contributor', name="user2")
# before resource delete
core_metadata_obj = self.res.metadata
self.assertEqual(CoreMetaData.objects.all().count(), 3)
# there should be Creator metadata objects
self.assertTrue(Creator.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be no Contributor metadata objects
self.assertTrue(Contributor.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be Identifier metadata objects
self.assertTrue(Identifier.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be Type metadata objects
self.assertTrue(Type.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be no Source metadata objects
self.assertFalse(Source.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be no Relation metadata objects
self.assertFalse(Relation.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be no Publisher metadata objects
self.assertFalse(Publisher.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be Title metadata objects
self.assertTrue(Title.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be Description (Abstract) metadata objects
self.assertTrue(Description.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be Date metadata objects
self.assertTrue(Date.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be Subject metadata objects
self.assertTrue(Subject.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be no Coverage metadata objects
self.assertFalse(Coverage.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be Format metadata objects
self.assertTrue(Format.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be Language metadata objects
self.assertTrue(Language.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be Rights metadata objects
self.assertTrue(Rights.objects.filter(object_id=core_metadata_obj.id).exists())
# resource specific metadata
self.assertEqual(MODFLOWModelInstanceMetaData.objects.all().count(), 1)
# there should be Model Output metadata objects
self.assertTrue(ModelOutput.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be ExecutedBy metadata objects
self.assertTrue(ExecutedBy.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be StudyArea metadata objects
self.assertTrue(StudyArea.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be GridDimensions metadata objects
self.assertTrue(GridDimensions.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be StressPeriod metadata objects
self.assertTrue(StressPeriod.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be GroundWaterFlow metadata objects
self.assertTrue(GroundWaterFlow.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be BoundaryCondition metadata objects
self.assertTrue(BoundaryCondition.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be ModelCalibration metadata objects
self.assertTrue(ModelCalibration.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be ModelInput metadata objects
self.assertTrue(ModelInput.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be GeneralElements metadata objects
self.assertTrue(GeneralElements.objects.filter(object_id=core_metadata_obj.id).exists())
# delete resource
hydroshare.delete_resource(self.res.short_id)
self.assertEqual(CoreMetaData.objects.all().count(), 2)
self.assertEqual(MODFLOWModelInstanceMetaData.objects.all().count(), 0)
# there should be no Creator metadata objects
self.assertFalse(Creator.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be no Contributor metadata objects
self.assertFalse(Contributor.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be no Identifier metadata objects
self.assertFalse(Identifier.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be no Type metadata objects
self.assertFalse(Type.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be no Source metadata objects
self.assertFalse(Source.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be no Relation metadata objects
self.assertFalse(Relation.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be no Publisher metadata objects
self.assertFalse(Publisher.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be no Title metadata objects
self.assertFalse(Title.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be no Description (Abstract) metadata objects
self.assertFalse(Description.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be no Date metadata objects
self.assertFalse(Date.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be no Subject metadata objects
self.assertFalse(Subject.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be no Coverage metadata objects
self.assertFalse(Coverage.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be no Format metadata objects
self.assertFalse(Format.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be no Language metadata objects
self.assertFalse(Language.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be no Rights metadata objects
self.assertFalse(Rights.objects.filter(object_id=core_metadata_obj.id).exists())
# resource specific metadata
# there should be Model Output metadata objects
self.assertFalse(ModelOutput.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be ExecutedBy metadata objects
self.assertFalse(ExecutedBy.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be StudyArea metadata objects
self.assertFalse(StudyArea.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be GridDimensions metadata objects
self.assertFalse(GridDimensions.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be StressPeriod metadata objects
self.assertFalse(StressPeriod.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be GroundWaterFlow metadata objects
self.assertFalse(GroundWaterFlow.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be BoundaryCondition metadata objects
self.assertFalse(BoundaryCondition.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be ModelCalibration metadata objects
self.assertFalse(ModelCalibration.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be ModelInput metadata objects
self.assertFalse(ModelInput.objects.filter(object_id=core_metadata_obj.id).exists())
# there should be GeneralElements metadata objects
self.assertFalse(GeneralElements.objects.filter(object_id=core_metadata_obj.id).exists())
def test_bulk_metadata_update(self):
# here we are testing the update() method of the MODFLOWModelInstanceMetaData class
# check that there are no extended metadata elements at this point
self.assertEqual(self.res.metadata.model_output, None)
self.assertEqual(self.res.metadata.executed_by, None)
self.assertEqual(self.res.metadata.study_area, None)
self.assertEqual(self.res.metadata.grid_dimensions, None)
self.assertEqual(self.res.metadata.stress_period, None)
self.assertEqual(self.res.metadata.ground_water_flow, None)
self.assertEqual(self.res.metadata.boundary_condition, None)
self.assertEqual(self.res.metadata.model_calibration, None)
self.assertEqual(len(self.res.metadata.model_inputs), 0)
self.assertEqual(self.res.metadata.general_elements, None)
# create modeloutput element using the update()
self.res.metadata.update([{'modeloutput': {'includes_output': False}}])
self.assertNotEqual(self.res.metadata.model_output, None)
self.res.metadata.update([{'modeloutput': {'includes_output': True}}])
self.assertEqual(self.res.metadata.model_output.includes_output, True)
# test that we can also update core metadata using update()
# there should be a creator element
self.assertEqual(self.res.metadata.creators.count(), 1)
self.res.metadata.update([{'creator': {'name': 'Second Creator'}},
{'creator': {'name': 'Third Creator'}}])
# there should be 2 creators at this point (previously existed creator gets
# delete as part of the update() call
self.assertEqual(self.res.metadata.creators.count(), 2)
# test multiple updates in a single call to update()
metadata = list()
metadata.append({'executedby': {'model_name': self.resGenModelProgram.short_id}})
metadata.append({'studyarea': {'totalLength': 'a', 'totalWidth': 'b',
'maximumElevation': 'c', 'minimumElevation': 'd'}})
metadata.append({'griddimensions': {'numberOfLayers': 'a', 'typeOfRows': 'Regular',
'numberOfRows': 'c', 'typeOfColumns': 'Irregular',
'numberOfColumns': 'e'}})
metadata.append({'stressperiod': {'stressPeriodType': 'Steady', 'steadyStateValue': 'a',
'transientStateValueType': 'Daily',
'transientStateValue': 'b'}})
metadata.append({'groundwaterflow': {'flowPackage': 'BCF6',
'flowParameter': 'Transmissivity'}})
metadata.append({'boundarycondition': {'specified_head_boundary_packages': ['FHB'],
'specified_flux_boundary_packages': ['RCH'],
'head_dependent_flux_boundary_packages': ['GHB'],
'other_specified_head_boundary_packages': 'JMS',
'other_head_dependent_flux_boundary_packages':
'JLG'}})
metadata.append({'modelinput': {'inputType': 'a', 'inputSourceName': 'b',
'inputSourceURL': 'http://www.RVOB.com'}})
metadata.append({'generalelements': {'modelParameter': 'BCF6', 'modelSolver': 'DE4',
'output_control_package': ['LMT6'],
'subsidencePackage': 'SUB'}})
metadata.append({'modelcalibration': {'calibratedParameter': 'a', 'observationType': 'b',
'observationProcessPackage': 'RVOB',
'calibrationMethod': 'c'}})
self.res.metadata.update(metadata)
# check that there are extended metadata elements at this point
self.assertNotEqual(self.res.metadata.model_output, None)
self.assertNotEqual(self.res.metadata.executed_by, None)
self.assertNotEqual(self.res.metadata.study_area, None)
self.assertNotEqual(self.res.metadata.grid_dimensions, None)
self.assertNotEqual(self.res.metadata.stress_period, None)
self.assertNotEqual(self.res.metadata.ground_water_flow, None)
self.assertNotEqual(self.res.metadata.boundary_condition, None)
self.assertNotEqual(self.res.metadata.model_calibration, None)
self.assertNotEqual(len(self.res.metadata.model_inputs), 0)
self.assertNotEqual(self.res.metadata.general_elements, None)
| |
# Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Some unit tests for SSHClient.
"""
import socket
from tempfile import mkstemp
import threading
import unittest
import weakref
import warnings
import os
from tests.util import test_path
import paramiko
from paramiko.common import PY2
class NullServer (paramiko.ServerInterface):
def get_allowed_auths(self, username):
if username == 'slowdive':
return 'publickey,password'
return 'publickey'
def check_auth_password(self, username, password):
if (username == 'slowdive') and (password == 'pygmalion'):
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
def check_auth_publickey(self, username, key):
if (key.get_name() == 'ssh-dss') and key.get_fingerprint() == b'\x44\x78\xf0\xb9\xa2\x3c\xc5\x18\x20\x09\xff\x75\x5b\xc1\xd2\x6c':
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
def check_channel_request(self, kind, chanid):
return paramiko.OPEN_SUCCEEDED
def check_channel_exec_request(self, channel, command):
if command != 'yes':
return False
return True
class SSHClientTest (unittest.TestCase):
def setUp(self):
self.sockl = socket.socket()
self.sockl.bind(('localhost', 0))
self.sockl.listen(1)
self.addr, self.port = self.sockl.getsockname()
self.event = threading.Event()
def tearDown(self):
for attr in "tc ts socks sockl".split():
if hasattr(self, attr):
getattr(self, attr).close()
def _run(self):
self.socks, addr = self.sockl.accept()
self.ts = paramiko.Transport(self.socks)
host_key = paramiko.RSAKey.from_private_key_file(test_path('test_rsa.key'))
self.ts.add_server_key(host_key)
server = NullServer()
self.ts.start_server(self.event, server)
def test_1_client(self):
"""
verify that the SSHClient stuff works too.
"""
threading.Thread(target=self._run).start()
host_key = paramiko.RSAKey.from_private_key_file(test_path('test_rsa.key'))
public_host_key = paramiko.RSAKey(data=host_key.asbytes())
self.tc = paramiko.SSHClient()
self.tc.get_host_keys().add('[%s]:%d' % (self.addr, self.port), 'ssh-rsa', public_host_key)
self.tc.connect(self.addr, self.port, username='slowdive', password='pygmalion')
self.event.wait(1.0)
self.assertTrue(self.event.isSet())
self.assertTrue(self.ts.is_active())
self.assertEqual('slowdive', self.ts.get_username())
self.assertEqual(True, self.ts.is_authenticated())
stdin, stdout, stderr = self.tc.exec_command('yes')
schan = self.ts.accept(1.0)
schan.send('Hello there.\n')
schan.send_stderr('This is on stderr.\n')
schan.close()
self.assertEqual('Hello there.\n', stdout.readline())
self.assertEqual('', stdout.readline())
self.assertEqual('This is on stderr.\n', stderr.readline())
self.assertEqual('', stderr.readline())
stdin.close()
stdout.close()
stderr.close()
def test_2_client_dsa(self):
"""
verify that SSHClient works with a DSA key.
"""
threading.Thread(target=self._run).start()
host_key = paramiko.RSAKey.from_private_key_file(test_path('test_rsa.key'))
public_host_key = paramiko.RSAKey(data=host_key.asbytes())
self.tc = paramiko.SSHClient()
self.tc.get_host_keys().add('[%s]:%d' % (self.addr, self.port), 'ssh-rsa', public_host_key)
self.tc.connect(self.addr, self.port, username='slowdive', key_filename=test_path('test_dss.key'))
self.event.wait(1.0)
self.assertTrue(self.event.isSet())
self.assertTrue(self.ts.is_active())
self.assertEqual('slowdive', self.ts.get_username())
self.assertEqual(True, self.ts.is_authenticated())
stdin, stdout, stderr = self.tc.exec_command('yes')
schan = self.ts.accept(1.0)
schan.send('Hello there.\n')
schan.send_stderr('This is on stderr.\n')
schan.close()
self.assertEqual('Hello there.\n', stdout.readline())
self.assertEqual('', stdout.readline())
self.assertEqual('This is on stderr.\n', stderr.readline())
self.assertEqual('', stderr.readline())
stdin.close()
stdout.close()
stderr.close()
def test_3_multiple_key_files(self):
"""
verify that SSHClient accepts and tries multiple key files.
"""
threading.Thread(target=self._run).start()
host_key = paramiko.RSAKey.from_private_key_file(test_path('test_rsa.key'))
public_host_key = paramiko.RSAKey(data=host_key.asbytes())
self.tc = paramiko.SSHClient()
self.tc.get_host_keys().add('[%s]:%d' % (self.addr, self.port), 'ssh-rsa', public_host_key)
self.tc.connect(self.addr, self.port, username='slowdive', key_filename=[test_path('test_rsa.key'), test_path('test_dss.key')])
self.event.wait(1.0)
self.assertTrue(self.event.isSet())
self.assertTrue(self.ts.is_active())
self.assertEqual('slowdive', self.ts.get_username())
self.assertEqual(True, self.ts.is_authenticated())
def test_4_auto_add_policy(self):
"""
verify that SSHClient's AutoAddPolicy works.
"""
threading.Thread(target=self._run).start()
host_key = paramiko.RSAKey.from_private_key_file(test_path('test_rsa.key'))
public_host_key = paramiko.RSAKey(data=host_key.asbytes())
self.tc = paramiko.SSHClient()
self.tc.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.assertEqual(0, len(self.tc.get_host_keys()))
self.tc.connect(self.addr, self.port, username='slowdive', password='pygmalion')
self.event.wait(1.0)
self.assertTrue(self.event.isSet())
self.assertTrue(self.ts.is_active())
self.assertEqual('slowdive', self.ts.get_username())
self.assertEqual(True, self.ts.is_authenticated())
self.assertEqual(1, len(self.tc.get_host_keys()))
self.assertEqual(public_host_key, self.tc.get_host_keys()['[%s]:%d' % (self.addr, self.port)]['ssh-rsa'])
def test_5_save_host_keys(self):
"""
verify that SSHClient correctly saves a known_hosts file.
"""
warnings.filterwarnings('ignore', 'tempnam.*')
host_key = paramiko.RSAKey.from_private_key_file(test_path('test_rsa.key'))
public_host_key = paramiko.RSAKey(data=host_key.asbytes())
fd, localname = mkstemp()
os.close(fd)
client = paramiko.SSHClient()
self.assertEquals(0, len(client.get_host_keys()))
host_id = '[%s]:%d' % (self.addr, self.port)
client.get_host_keys().add(host_id, 'ssh-rsa', public_host_key)
self.assertEquals(1, len(client.get_host_keys()))
self.assertEquals(public_host_key, client.get_host_keys()[host_id]['ssh-rsa'])
client.save_host_keys(localname)
with open(localname) as fd:
assert host_id in fd.read()
os.unlink(localname)
def test_6_cleanup(self):
"""
verify that when an SSHClient is collected, its transport (and the
transport's packetizer) is closed.
"""
# Unclear why this is borked on Py3, but it is, and does not seem worth
# pursuing at the moment.
if not PY2:
return
threading.Thread(target=self._run).start()
host_key = paramiko.RSAKey.from_private_key_file(test_path('test_rsa.key'))
public_host_key = paramiko.RSAKey(data=host_key.asbytes())
self.tc = paramiko.SSHClient()
self.tc.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.assertEqual(0, len(self.tc.get_host_keys()))
self.tc.connect(self.addr, self.port, username='slowdive', password='pygmalion')
self.event.wait(1.0)
self.assertTrue(self.event.isSet())
self.assertTrue(self.ts.is_active())
p = weakref.ref(self.tc._transport.packetizer)
self.assertTrue(p() is not None)
self.tc.close()
del self.tc
# hrm, sometimes p isn't cleared right away. why is that?
#st = time.time()
#while (time.time() - st < 5.0) and (p() is not None):
# time.sleep(0.1)
# instead of dumbly waiting for the GC to collect, force a collection
# to see whether the SSHClient object is deallocated correctly
import gc
gc.collect()
self.assertTrue(p() is None)
| |
import os
import requests
import sys
download_errors = []
ALLOWED = "allowed"
WARNING = "warning"
ANGULARJS_VERSIONS = {
WARNING: [
"1.5.5",
],
}
BACKBONE_VERSIONS = {
ALLOWED: [
"1.0.0",
"1.1.0",
"1.1.1",
"1.1.2",
]
}
DOJO_VERSIONS = {
ALLOWED: [
"1.5.0",
"1.5.1",
"1.6.0",
"1.6.1",
"1.7.0",
"1.7.1",
"1.7.2",
"1.7.3",
"1.8.0",
"1.8.1",
"1.8.2",
"1.8.3",
"1.8.4",
"1.8.5",
"1.8.6",
"1.9.0",
"1.9.1",
"1.9.2",
"1.9.3",
]
}
JQUERY_VERSIONS = {
ALLOWED: [
"1.5",
"1.5.1",
"1.5.2",
"1.6",
"1.6.1",
"1.6.2",
"1.6.3",
"1.6.4",
"1.7",
"1.7.1",
"1.7.2",
"1.8.0",
"1.8.1",
"1.8.2",
"1.8.3",
"1.9.0",
"1.9.1",
"1.10.0",
"1.10.1",
"1.10.2",
"1.11.0",
"1.11.1",
"1.11.2",
"1.11.3",
"1.12.0",
"1.12.1",
"1.12.2",
"1.12.3",
"1.12.4",
"2.0.0",
"2.0.1",
"2.0.2",
"2.0.3",
"2.1.0",
"2.1.1",
"2.1.2",
"2.1.3",
"2.1.4",
"2.2.0",
"2.2.1",
"2.2.2",
"2.2.3",
"2.2.4",
]
}
JQUERYUI_VERSIONS = {
ALLOWED: [
"1.8.8",
"1.8.9",
"1.8.10",
"1.8.11",
"1.8.12",
"1.8.13",
"1.8.14",
"1.8.15",
"1.8.16",
"1.8.17",
"1.8.18",
"1.8.19",
"1.8.20",
"1.8.21",
"1.8.22",
"1.8.23",
"1.8.24",
"1.9.0",
"1.9.1",
"1.9.2",
"1.10.0",
"1.10.1",
"1.10.2",
"1.10.3",
"1.10.4",
"1.11.0",
"1.11.1",
"1.11.2",
"1.11.3",
"1.11.4",
]
}
MOMENTJS_VERSIONS = {
ALLOWED: [
"2.9.0",
"2.10.2",
"2.10.3",
"2.10.5",
"2.10.6",
]
}
MOOTOOLS_VERSIONS = {
ALLOWED: [
"1.3.1",
"1.3.2",
"1.4.0",
"1.4.1",
"1.4.2",
"1.4.3",
"1.4.4",
"1.4.5",
"1.5.0",
]
}
PROTOTYPE_VERSIONS = {
ALLOWED: [
"1.7.1.0",
"1.7.2.0",
]
}
UNDERSCORE_VERSIONS = {
ALLOWED: [
"1.1.4",
"1.1.5",
"1.1.7",
"1.2.0",
"1.2.1",
"1.2.2",
"1.2.3",
"1.2.4",
"1.3.0",
"1.3.1",
"1.3.2",
"1.3.3",
"1.4.0",
"1.4.1",
"1.4.2",
"1.4.3",
"1.4.4",
"1.5.0",
"1.5.1",
"1.5.2",
"1.6.0",
"1.7.0",
"1.8.0",
"1.8.1",
"1.8.2",
"1.8.3",
]
}
def process(url, rule, file):
dest_folder = os.path.join(os.path.abspath(os.path.dirname(__file__)),
sys.argv[1], rule)
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
destination = os.path.join(dest_folder, file)
if os.path.exists(destination):
return
try:
response = requests.get(url)
response.raise_for_status()
with open(destination, "wb") as code:
code.write(response.content)
print "Downloaded: {}".format(url)
except requests.exceptions.HTTPError as e:
global download_errors
download_errors.append((response.url, response.status_code, response.reason))
def get_pattern(prefix, url_pattern, library):
for rule, versions in library.iteritems():
for version in versions:
url = url_pattern % version
process(url, rule, "%s.%s.%s" % (prefix, version, url.split("/")[-1]))
def get_patterns():
# AngularJS
get_pattern("angularjs",
"https://code.angularjs.org/%s/angular.js",
ANGULARJS_VERSIONS)
get_pattern("angularjs",
"https://code.angularjs.org/%s/angular.min.js",
ANGULARJS_VERSIONS)
# Backbone
get_pattern("backbone",
"https://raw.githubusercontent.com/jashkenas/backbone/%s/backbone.js",
BACKBONE_VERSIONS)
get_pattern("backbone",
"https://raw.githubusercontent.com/jashkenas/backbone/%s/backbone-min.js",
BACKBONE_VERSIONS)
# Dojo Toolkit
get_pattern("dojo",
"https://download.dojotoolkit.org/release-%s/dojo.js",
DOJO_VERSIONS)
get_pattern("dojo",
"https://download.dojotoolkit.org/release-%s/dojo.js.uncompressed.js",
DOJO_VERSIONS)
# jQuery
get_pattern("jquery",
"https://code.jquery.com/jquery-%s.js",
JQUERY_VERSIONS)
get_pattern("jquery",
"https://code.jquery.com/jquery-%s.min.js",
JQUERY_VERSIONS)
# jQueryUI
get_pattern("jqueryui",
"https://code.jquery.com/ui/%s/jquery-ui.min.js",
JQUERYUI_VERSIONS)
get_pattern("jqueryui",
"https://code.jquery.com/ui/%s/jquery-ui.js",
JQUERYUI_VERSIONS)
# moment.js
get_pattern("moment",
"https://raw.githubusercontent.com/moment/moment/%s/moment.js",
MOMENTJS_VERSIONS)
get_pattern("moment",
"https://raw.githubusercontent.com/moment/moment/%s/min/moment.min.js",
MOMENTJS_VERSIONS)
# MooTools
get_pattern("mootools",
"https://ajax.googleapis.com/ajax/libs/mootools/%s/mootools-yui-compressed.js",
MOOTOOLS_VERSIONS)
get_pattern("mootools",
"https://ajax.googleapis.com/ajax/libs/mootools/%s/mootools.js",
MOOTOOLS_VERSIONS)
# Prototype.js
get_pattern("prototype",
"https://ajax.googleapis.com/ajax/libs/prototype/%s/prototype.js",
PROTOTYPE_VERSIONS)
# Underscore
get_pattern("underscore",
"https://raw.github.com/documentcloud/underscore/%s/underscore.js",
UNDERSCORE_VERSIONS)
get_pattern("underscore",
"https://raw.github.com/documentcloud/underscore/%s/underscore-min.js",
UNDERSCORE_VERSIONS)
CRYPTO_FILES = {
ALLOWED: ["aes", "cipher-core", "core", "enc-base64", "enc-utf16", "evpkdf",
"format-hex", "hmac", "lib-typedarrays", "md5", "mode-cfb",
"mode-ctr-gladman", "mode-ctr", "mode-ecb", "mode-ofb",
"pad-ansix923", "pad-iso10126", "pad-iso97971", "pad-nopadding",
"pad-zeropadding", "pbkdf2", "rabbit-legacy", "rabbit", "rc4",
"ripemd160", "sha1", "sha224", "sha256", "sha3", "sha384",
"sha512", "tripledes", "x64-core"]
}
get_pattern(
"crypto_js", "https://crypto-js.googlecode.com/svn/tags/3.1.2/src/%s.js",
CRYPTO_FILES)
print "Downloading third-party library files..."
get_patterns()
if download_errors:
for url, code, reason in download_errors:
print "Failed: {} is '{}' ({}).".format(url, reason, code)
print "Some files failed to download, please check the output above...Exiting."
sys.exit(1)
print "Downloading third-party library files complete."
| |
"""
Load npy xy, plot and save
"""
import os, sys
import matplotlib
#matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import matplotlib.cm as mpl_cm
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from matplotlib import cm
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rc('font', family = 'serif', serif = 'cmr10')
import numpy as np
from datetime import timedelta
import datetime
import imp
import re
from textwrap import wrap
model_name_convert_legend = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_legend.py')
#unrotate = imp.load_source('util', '/home/pwille/python_scripts/modules/unrotate_pole.py')
###############
# Things to change
top_dir='/nfs/a90/eepdw/Data/Rain_Land_Sea_Diurnal'
pp_file = 'avg.5216'
lon_max = 101.866
lon_min = 64.115
lat_max= 33.
lat_min=-6.79
trmm_dir = '/nfs/a90/eepdw/Data/Observations/Satellite/TRMM/Diurnal/'
trmm_file = "trmm_diurnal_average_lat_%s_%s_lon_%s_%s.npz" % (lat_min,lat_max, lon_min, lon_max)
#############
# Make own time x-axis
d = matplotlib.dates.drange(datetime.datetime(2011, 8, 21, 6,30), datetime.datetime(2011, 8, 22, 6, 30), timedelta(hours=1))
formatter = matplotlib.dates.DateFormatter('%H:%M')
def main():
#experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
experiment_ids_p = ['djznw', 'djzny', 'djznq', 'dklzq', 'dkmbq', 'dkjxq' ] # Most of Params
experiment_ids_e = ['dklwu', 'dklyu', 'djzns', 'dkbhu', 'djznu', 'dkhgu'] # Most of Explicit
#experiment_ids = ['djzny', 'djznq', 'djzns', 'djznw', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
#plt.ion()
NUM_COLOURS = 15
cmap=cm.get_cmap(cm.Set1, NUM_COLOURS)
#cgen = (cmap(1.*i/NUM_COLORS) for i in range(NUM_COLORS))
for ls in ['land', 'sea', 'total']:
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(111)
legendEntries=[]
legendtext=[]
## Satellite retrievals ##
plot_trmm = np.load('%s%s_%s' % (trmm_dir, ls, trmm_file))
dates_trmm=[]
p=[]
for dp in plot_trmm['hour']:
print dp
if ((int(dp)<23) & (int(dp)>=6)):
dates_trmm.append(datetime.datetime(2011, 8, 21, int(dp), 0))
p.append(plot_trmm['mean'][plot_trmm['hour']==dp])
if ((int(dp)>=0) & (int(dp)<=6)):
dates_trmm.append(datetime.datetime(2011, 8, 22, int(dp), 0))
p.append(plot_trmm['mean'][plot_trmm['hour']==dp])
#print dates_trmm
a = np.argsort(dates_trmm,axis=0)
d_trmm = np.array(dates_trmm)[a]
pl = (np.array(p)[a])
#pl=np.sort(pl,axis=1)
l, = plt.plot_date(d_trmm, pl, label='TRMM', linewidth=2, linestyle='-', marker='', markersize=2, fmt='', color='#262626')
legendEntries.append(l)
legendtext.append('TRMM')
cmorph_dir = '/nfs/a90/eepdw/Data/Observations/Satellite/CMORPH/Diurnal/'
cmorph_file = "CMORPH_diurnal_average_lat_%s_%s_lon_%s_%s.npz" % (lat_min,lat_max, lon_min, lon_max)
plot_cmorph = np.load('%s%s_%s' % (cmorph_dir, ls, cmorph_file))
dates_cmorph=[]
p=[]
for dp in plot_cmorph['hour']:
print dp
if ((int(dp)<23) & (int(dp)>=6)):
dates_cmorph.append(datetime.datetime(2011, 8, 21, int(dp), 0))
p.append(plot_cmorph['mean'][plot_cmorph['hour']==dp])
if ((int(dp)>=0) & (int(dp)<=6)):
dates_cmorph.append(datetime.datetime(2011, 8, 22, int(dp), 0))
p.append(plot_cmorph['mean'][plot_cmorph['hour']==dp])
a = np.argsort(dates_cmorph,axis=0)
d_cmorph = np.array(dates_cmorph)[a]
pl = (np.array(p)[a])
#pl=np.sort(pl,axis=1)
l, = plt.plot_date(d_cmorph, pl, label='CMORPH', linewidth=2, linestyle='--', marker='', markersize=2, fmt='', color='black')
legendEntries.append(l)
legendtext.append('CMORPH')
l0=plt.legend(legendEntries, legendtext,title='', frameon=False, prop={'size':8}, loc=9, bbox_to_anchor=(0.21, 0,1, 1))
gsmap_dir = '/nfs/a90/eepdw/Data/Observations/Satellite/GSMAP_Aug_Sep_2011/Diurnal/'
gsmap_file = "GSMAP_diurnal_average_rainfall_lat_%s_%s_lon_%s_%s.npz" % (lat_min,lat_max, lon_min, lon_max)
plot_gsmap = np.load('%s%s_%s' % (gsmap_dir, ls, gsmap_file))
dates_gsmap=[]
p=[]
for dp in plot_gsmap['hour']:
print dp
if ((int(dp)<=23) & (int(dp)>=6)):
dates_gsmap.append(datetime.datetime(2011, 8, 21, int(dp), 0))
p.append(plot_gsmap['mean'][plot_gsmap['hour']==dp])
if ((int(dp)>=0) & (int(dp)<=6)):
dates_gsmap.append(datetime.datetime(2011, 8, 22, int(dp), 0))
p.append(plot_gsmap['mean'][plot_gsmap['hour']==dp])
#print dates_trmm
a = np.argsort(dates_gsmap,axis=0)
d_gsmap = np.array(dates_gsmap)[a]
pl = (np.array(p)[a])
#pl=np.sort(pl,axis=1)
l, = plt.plot_date(d_gsmap, pl, label='GSMAP', linewidth=2, linestyle=':', marker='', markersize=2, fmt='', color='black')
legendEntries.append(l)
legendtext.append('GSMAP')
l0=plt.legend(legendEntries, legendtext,title='', frameon=False, prop={'size':8}, loc=9, bbox_to_anchor=(0.21, 0,1, 1))
# Change the legend label colors to almost black
texts = l0.texts
for t in texts:
t.set_color('#262626')
legendEntries=[]
legendtext=[]
for c, experiment_id in enumerate(experiment_ids_p):
expmin1 = experiment_id[:-1]
if (experiment_id=='djznw'):
print experiment_id
colour = cmap(1.*1/NUM_COLOURS)
linewidth=0.2
linestylez='--'
if (experiment_id=='djzny'):
print experiment_id
colour = cmap(1.*3/NUM_COLOURS)
linewidth=0.5
linestylez='--'
if ((experiment_id=='djznq') or (experiment_id=='dkjxq')):
print experiment_id
colour = cmap(1.*5/NUM_COLOURS)
linewidth=0.8
if (experiment_id=='djznq'):
linestylez='--'
if (experiment_id=='dkjxq'):
linestylez=':'
if ((experiment_id=='dklzq') or (experiment_id=='dklwu')):
print experiment_id
colour = cmap(1.*7/NUM_COLOURS)
linewidth=1
if (experiment_id=='dklzq'):
linestylez='--'
if (experiment_id=='dklwu'):
linestylez='-'
if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')):
print experiment_id
colour = cmap(1.*9/NUM_COLOURS)
linewidth=1.3
if (experiment_id=='dkmbq'):
linestylez='--'
if (experiment_id=='dklyu'):
linestylez='-'
if (experiment_id=='djzns'):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=1.6
linestylez='-'
if ((experiment_id=='dkbhu')or (experiment_id=='dkhgu')):
print experiment_id
colour = cmap(1.*13/NUM_COLOURS)
linewidth=1.9
if (experiment_id=='dkbhu'):
linestylez='-'
if (experiment_id=='dkhgu'):
linestylez=':'
if (experiment_id=='djznu'):
print experiment_id
colour = cmap(1.*15/NUM_COLOURS)
linewidth=2.
linestylez='-'
try:
plotnp = np.load('%s/%s/%s/%s_%s_rainfall_diurnal_np_domain_constrain.npy' % (top_dir, expmin1, experiment_id, pp_file, ls))
if (ls != 'total'):
l, = plt.plot_date(d, plotnp[0]*3600, label=model_name_convert_legend.main(experiment_id), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
else:
l, = plt.plot_date(d, plotnp*3600, label=model_name_convert_legend.main(experiment_id), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
legendEntries.append(l)
legendtext.append('%s' % (model_name_convert_legend.main(experiment_id)))
except Exception, e:
print e
pass
l1=plt.legend(legendEntries, legendtext, title='Parametrised', loc=9, frameon=False, prop={'size':8}, bbox_to_anchor=(0, 0,1, 1))
# Change the legend label colors to almost black
texts = l1.texts
for t in texts:
t.set_color('#262626')
legendEntries=[]
legendtext=[]
c1=0
for c, experiment_id in enumerate(experiment_ids_e):
if (experiment_id=='djznw'):
print experiment_id
colour = cmap(1.*1/NUM_COLOURS)
linewidth=0.2
linestylez='--'
if (experiment_id=='djzny'):
print experiment_id
colour = cmap(1.*3/NUM_COLOURS)
linewidth=0.5
linestylez='--'
if ((experiment_id=='djznq') or (experiment_id=='dkjxq')):
print experiment_id
colour = cmap(1.*5/NUM_COLOURS)
linewidth=0.8
if (experiment_id=='djznq'):
linestylez='--'
if (experiment_id=='dkjxq'):
linestylez=':'
if ((experiment_id=='dklzq') or (experiment_id=='dklwu')):
print experiment_id
colour = cmap(1.*7/NUM_COLOURS)
linewidth=1
if (experiment_id=='dklzq'):
linestylez='--'
if (experiment_id=='dklwu'):
linestylez='-'
if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')):
print experiment_id
colour = cmap(1.*9/NUM_COLOURS)
linewidth=1.3
if (experiment_id=='dkmbq'):
linestylez='--'
if (experiment_id=='dklyu'):
linestylez='-'
if (experiment_id=='djzns'):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=1.6
linestylez='-'
if ((experiment_id=='dkbhu')or (experiment_id=='dkhgu')):
print experiment_id
colour = cmap(1.*13/NUM_COLOURS)
linewidth=1.9
if (experiment_id=='dkbhu'):
linestylez='-'
if (experiment_id=='dkhgu'):
linestylez=':'
if (experiment_id=='djznu'):
print experiment_id
colour = cmap(1.*15/NUM_COLOURS)
linewidth=2.
linestylez='-'
expmin1 = experiment_id[:-1]
try:
plotnp = np.load('%s/%s/%s/%s_%s_rainfall_diurnal_np_domain_constrain.npy' % (top_dir, expmin1, experiment_id, pp_file, ls))
#plotnp = np.sort(pnp, axis=1)
if (ls != 'total'):
l, = plt.plot_date(d, plotnp[0]*3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
else:
l, = plt.plot_date(d, plotnp*3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
legendEntries.append(l)
legendtext.append('%s' % (model_name_convert_legend.main(experiment_id)))
except Exception, e:
print e
pass
l2=plt.legend(legendEntries, legendtext, title='Explicit', loc=9, frameon=False, bbox_to_anchor=(0.11, 0,1, 1), prop={'size':8})
plt.gca().add_artist(l1)
plt.gca().add_artist(l0)
plt.gca().xaxis.set_major_formatter(formatter)
# Change the legend label colors to almost black
texts = l2.texts
for t in texts:
t.set_color('#262626')
plt.xlabel('Time (UTC)')
plt.ylabel('mm/h')
title="Domain Averaged Rainfall - %s" % ls
t=re.sub('(.{68} )', '\\1\n', str(title), 0, re.DOTALL)
t = re.sub(r'[(\']', ' ', t)
t = re.sub(r'[\',)]', ' ', t)
pp_filenodot= pp_file.replace(".", "")
# Bit of formatting
# Set colour of axis lines
spines_to_keep = ['bottom', 'left']
for spine in spines_to_keep:
ax.spines[spine].set_linewidth(0.5)
ax.spines[spine].set_color('#262626')
# Remove top and right axes lines ("spines")
spines_to_remove = ['top', 'right']
for spine in spines_to_remove:
ax.spines[spine].set_visible(False)
# Get rid of ticks. The position of the numbers is informative enough of
# the position of the value.
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
# Change the labels to the off-black
ax.xaxis.label.set_color('#262626')
ax.yaxis.label.set_color('#262626')
if not os.path.exists('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/'): os.makedirs('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/')
#plt.savefig('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/%s_%s_latlon_dkbhu_notitle.png' % (pp_filenodot, ls), format='png', bbox_inches='tight')
plt.title('\n'.join(wrap('%s' % (t.title()), 1000,replace_whitespace=False)), fontsize=16, color='#262626')
plt.show()
#plt.savefig('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/%s_%s_latlon_dkbhu.png' % (pp_filenodot, ls), format='png', bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
# Copyright 2015 Sameer Suhas Marathe
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: eulerlib.numtheory
:synopsis: Number theory related functions.
.. moduleauthor:: Sameer Marathe
"""
__all__ = ["is_square", "gcd", "lcm", "lcm_n", "nCr", "nPr", "digital_sum",
"digital_root","Divisors"]
from .prime_numbers import primes
def is_square(num):
"""Determines if a positive integer *num* is the perfect square.
:param num: Integer to be checked
:returns: A tuple (is_square,root)
.. note::
* *is_square* is *True* if *num* is a perfect square.
* The integer *root* <= (square root of *num*).
Uses `Digit-by-digit algorithm
<http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Digit-by-
digit_calculation>`_ in base 10.
"""
if(num == 0):
pn = 0
rn = 1
else:
z = num
test = []
while(z !=0):
test.append(z%100)
z = z//100
test.reverse()
pn = 0
rn = 0
for i in range(len(test)):
#get an estimate of xn
c = 100*rn + test[i]
xn = 1
while ((20*pn+xn)*xn <= c):
xn += 1
xn -= 1
rn = c - (20*pn+xn)*xn
pn = pn*10 + xn
if(rn == 0):
return (True,pn)
else:
return (False,pn)
def gcd(a,b):
"""Calculates Greatest Common Divisor of two integers.
:param a: First integer
:param b: Second integer
:returns: Greatest Common Divisor (GCD) of *a* and *b*
Uses `Euclid's algorithm
<http://en.wikipedia.org/wiki/Greatest_common_divisor#
Using_Euclid.27s_algorithm>`_.
"""
if(a==b): return a
if(a== 0): return b
if(b== 0): return a
if(a==1 or b==1): return 1
if(a>b):
big, small = a, b
else:
big, small = b, a
r = big%small
while(r != 0):
big = small
small = r
r = big%small
return small
def lcm(a,b):
"""Calculates the `Least Common Multiple
<http://en.wikipedia.org/wiki/Least_common_multiple>`_ (LCM) of two
integers.
:param a: First integer
:param b: Second integer
:returns: Least Common Multiple (LCM) of *a* and *b*
"""
lcm_ab = 0
if a!=0 or b!=0:
lcm_ab = abs(a*b)/gcd(a,b)
return lcm_ab
def lcm_n(num_list):
"""Calculate the `Least Common Multiple`_ of a list of
integers.
:param num_list: A list of integers *[i1,i2,i3,...,in]*
:returns: LCM of the integers in *num_list*
Uses the associative property LCM(a,b,c) = LCM(*LCM(a,b)*,c)
"""
result = 0
lennum = len(num_list)
if not (lennum <=1 or sum(num_list) == 0):
cindex = 1
while cindex < lennum:
if cindex == 1:
curr_lcm = lcm(num_list[cindex-1],num_list[cindex])
else:
curr_lcm = lcm(curr_lcm,num_list[cindex])
cindex += 1
result = curr_lcm
return result
def nCr(n,r):
"""Calculate ways of selecting *r* members out of a collection of *n*
members.
:param n: Size of the collection
:param r: Number of members to be selected from the collection
:returns: n!/[r!(n-r)!]
.. note::
:sup:`n` C :sub:`r` is typically read as *n combination r*. Order of
members *is not* important in the combination. E.g. There are :sup:`4`
C :sub:`2` = 6 ways of selecting two members out of a collection
(A, B, C, D) => AB, AC, AD, BC, BD, CD.
"""
result = 0
if n >= 0 and r >= 0 and r <= n:
num = 1
denom = 1
for i in range(r):
num *= (n-i)
denom *= (i+1)
result = num/denom
return result
def nPr(n,r):
"""Calculate number of permutations of length *r* out of a collection of
*n* members (No repeated members).
:param n: Size of the collection
:param r: Number of members to be permutated.
:returns: n!/[(n-r)!]
.. note::
:sup:`n` P :sub:`r` is typically read as *n permutation r*. Order of
members *is* important in the permutation. E.g. There are :sup:`4` P
:sub:`2` = 12 permutations of length 2 out of a collection
(A, B, C, D) => AB, AC, AD, BA, BC, BD, CA, CB, CD, DA, DB, DC.
"""
result = 0
if n >= 0 and r >= 0 and r <= n:
num = 1
denom = 1
for i in range(r):
num *= (n-i)
result = num/denom
return result
def digital_sum(num):
"""Calculate the `digital sum <http://en.wikipedia.org/wiki/Digit_sum>`_
of a number *num* in base 10
:param num: Number for which digital sum is to be calculated. *num* should
be in base 10.
:returns: Digital sum of *num* in base 10.
"""
from .etc import num_to_list
testnum = num
if num < 0:
testnum = abs(num)
return sum(num_to_list(testnum))
def digital_root(num):
"""Calculate the `digital root
<http://en.wikipedia.org/wiki/Digital_root>`_ of a number *num* in base 10.
:param num: Number for which digital root is to be calculated. *num*
should be in base 10.
:returns: Digital root of *num* in base 10.
"""
testnum = num
if num < 0:
testnum = abs(num)
while testnum > 9:
testnum = digital_sum(testnum)
return testnum
class Divisors:
"""Implements methods related to prime factors and divisors.
:param maxnum: Upper limit for the list of primes. (default = 1000)
"""
def __init__(self,maxnum=1000):
"""Constructor for *Divisors* class
"""
self.limit = maxnum
self.primes_table = primes(maxnum)
self.sigma_table = {}
self.primefact_table = {}
self.pfactonly_table = {}
self.divisors_table = {}
def sigma_function(self,num):
"""Calculates the `divisor functions
<http://en.wikipedia.org/wiki/Divisor_function>`_ (sigma functions).
:param num: Integer for which sigma functions are needed.
:returns: A tuple (sigma0,sigma1,s(n))
.. note::
* sigma0 = number of divisors of *num*.
* sigma1 = sum of *all* divisors of *num*
* s(n) = sum of *proper* divisors of *num*
(includes 1, excludes *num*)
"""
if num in self.sigma_table:
return self.sigma_table[num]
elif ((num < 1) or (num > self.limit)):
return ()
elif num == 1:
return (1,1,0)
elif (num in self.primes_table):
return (2,num+1,1)
else:
sigma0 = 1
sigma1 = 1
#Implement divisor fucntion
pfs = self.prime_factors(num)
for (pi,ai) in pfs:
sigma0 = sigma0*(ai+1)
temp = 0
for i in range(ai+1):
temp = temp + pi**i
sigma1 = sigma1*temp
result = (sigma0,sigma1,sigma1-num)
self.sigma_table[num] = result
return result
def prime_factors(self,num):
"""Returns the `prime factors`_ *pf* :sub:`i` of *num* and the maximum
power *a* :sub:`i` for each prime factor *pf* :sub:`i`.
:param num: An integer for which prime factors are needed
:returns: A list of tuples [(pf1,a1),...(pfi,ai)]
.. note::
num = (pf1**a1)*(pf2**a2)..*(pfi**ai)
"""
if num in self.primefact_table:
return self.primefact_table[num]
elif ((num < 2) or (num > self.limit)):
return []
elif num in self.primes_table:
self.primefact_table[num] = [(num,1)]
return [(num,1)]
else:
result = []
tnum = num
for prime in self.primes_table:
if(tnum%prime==0):
ai = 2
pdiv = prime*prime
while(tnum%pdiv==0):
ai += 1
pdiv *= prime
ai -= 1
pdiv //= prime
result.append((prime,ai))
tnum //= pdiv
if(tnum in self.primes_table):
result.append((tnum,1))
break
elif(tnum==1):
break
self.primefact_table[num] = result
return result
def prime_factors_only(self,num):
"""Returns the `prime factors
<http://en.wikipedia.org/wiki/Prime_factor>`_ *pf* :sub:`i` of *num*.
:param num: An integer for which prime factors are needed
:returns: A list [pf1,pf2,...pfi] of prime factors of *num*
"""
if num in self.pfactonly_table:
return self.pfactonly_table[num]
elif ((num < 2) or (num > self.limit)):
return []
elif num in self.primes_table:
self.pfactonly_table[num] = [num]
return [num]
else:
result = []
tnum = num
for prime in self.primes_table:
if(tnum%prime==0):
result.append(prime)
pdiv = prime*prime
while(tnum%pdiv == 0):
pdiv *= prime
pdiv //= prime
tnum //= pdiv
if(tnum in self.primes_table):
result.append(tnum)
break
elif(tnum == 1):
break
self.pfactonly_table[num] = result
return result
def divisors(self,num):
"""Returns a list of ALL divisors of *num* (including 1 and num).
:param num: An integer for which divisors are needed.
:returns: A list [d1,d2,...dn] of divisors of *num*
"""
result = []
if (num < 1) or (num > self.limit):
return result
result.append(1)
if (num == 1):
return result
elif (num in self.primes_table):
result.append(num)
self.divisors_table[num] = result
return result
else:
pfs = self.prime_factors(num)
for (pi,ai) in pfs:
newdivs = []
for i in range(ai):
fact = pi**(i+1)
for div in result:
newdivs.append(div*fact)
result += newdivs
newdivs = []
result.sort()
self.divisors_table[num] = result
return result
def phi(self,num):
"""Returns the number of `totatives
<http://en.wikipedia.org/wiki/Totative>`_ of *num*
:param num: Integer for which number of totatives are needed.
:returns: Number of totatives of *num*
.. note::
A totative of an integer *num* is any integer *i* such that,
0 < i < n and *GCD(i,num) == 1*.
Uses `Euler's totient function
<http://en.wikipedia.org/wiki/Euler%27s_totient_function>`_.
"""
if(num < 1):
return 0
if(num == 1):
return 1
if(num in self.primes_table):
return num-1
pfs = self.prime_factors_only(num)
prod = num
for pfi in pfs:
prod = prod*(pfi-1)/pfi
return prod
| |
# Generated by the pRPC protocol buffer compiler plugin. DO NOT EDIT!
# source: proto/api/swarming.proto
import base64
import zlib
from google.protobuf import descriptor_pb2
# Includes description of the proto/api/swarming.proto and all of its transitive
# dependencies. Includes source code info.
FILE_DESCRIPTOR_SET = descriptor_pb2.FileDescriptorSet()
FILE_DESCRIPTOR_SET.ParseFromString(zlib.decompress(base64.b64decode(
'eJzUvQl8ZFd1J8yr0lJ66la/Vu/ltvtZGNRtS+rF7Xa7jYnVLdkWtCUhqTEexpGfqp6kiktVlX'
'pV3RYkk0xWEtaPkMVfWBJIGAIhG5NhmZmEQPwFCJMQZ1gM/ICfE7OEj8kCCYSB4Tv/c869776S'
'1N04k0y+/Ehbdd5999177rnnnHu26//eW7v8/Y1mvVU/GjUqR5NLUXOtUlsZY9Bgv/198Xjxup'
'V6faUaH+VHS+3lo+V2M2pV6jVpXDzY+TxpNdullj491Pm0VVmLk1a01pAGQx/3/OBsvTV5Ma61'
'krn4+9v0dHCP37NUby1Wyvu90DvcN9dNv6bKg9f4fY1oJV5MKi+K9+foSfdcAYB5+j14re/zw1'
'b9obi2P8/vcfMFAAZv8336bLO1iAHs76LH/SeKYzK6MTO6sQUzurk+bo3fg7f4hbhWlhe7r/hi'
'L7XFr6Hv83c6E0sa9VoSD476PTFDaGZ56mnPmIPrMdN+ThsNPtPfUYsfbi06M8vxzLYDPGtmN3'
'TS9+dbTeplNqo0BwM//1C8rrjDn4O7/e6LUbUd69vyY+iMPyBvna8krS3e3Ov3cOOEXs0TUH8N'
'vSPn52m4Wy0VrUYSJwmRCR7JV/sUQo9pPI16vZrQOqFT+TF4wu8hHLbaCa/PAKG5Aznz/HRhvR'
'HPaUv+EP+1uJas8PL08doR5N5kBQgstZtNQuZiK0oewmB6BIEKXiAoDeh23y/TotUwvmR/L6/N'
'NZnPZzE15zQfPOx3VWrL9f0FJo7dnaOeomdz3GLob7v8XoXQJ7cl7UajGlM/rajK+Os/sW8Dac'
'3zfprLNB7c7/dejJv4vCLX/Bw85PcTacTNWlRdrDR0I/gGNNUYPOIHUbu1Sv1USlErLi9Ggu++'
'uR0Z+DgWpK8UJYtAZ6KknyXYc+PzWJNkrkDt+K/B5/iDtWiNui1FpdXYvNzDGD2YeXkazc6hlf'
'QR1CxA+3rAP1CqNMpE/qWHiNYT6VO7lEUayo5nanZiVho7He9FJwpOUvjgUb9rtZ60dN2yyz27'
'up4QHqqThI7W+hw3JE7QW44vVkq0G/o2IZGOd0zbwWf526pR0lpM4ri2SCP3r8hEfLSfp+YLye'
'Cz/e2VchVcr1aK8Xr/FV/vxwvzaL+QDH2/P5Ad2OCg3wVU647lvzdQY+67ocYBP2dJjf4a+nd+'
'wVDG4PX+tlp7bSluLlZa8VrCH83P9QtsCiCMx7L0/Bz/TePpr1fLNB/hu/kro0yaM+td93d0EN'
'emc97su4RtXqt2El/tl/vxwoUk5k8/4vl7NqXCTUew9TY2Y8tfbmxXFmOZsb3T8wtGxEAgspCR'
'nrwrC0RuzQJxyM8Tl1cSCTrZ3Rwe0t7q5hd4AgMnDmwq55iTSzsIdxkP+LjwowIDiI0P3ef3Ek'
'EtNOMYAimJm4QmRaT+Arxcof3dUkzqr8GDfh9QnRAPiY1aYAFDt/s9E9KOEL4aJatmefA3CxdC'
'/OLSeosFIJaiD5CzAAx9r7+NRjUXL8ckSUoxSB3sslIjjNFv7aqfYFMKGrwpM8r+E7syeJGhmK'
'EPrfj9Dimhe2WEiw4h9Sts+vL0ROjl3dSIWquKhgIAs/R76Ky7X4hJNDfnEZk+ch19/NccjbZO'
'7KBSA1cYfIY/UK1fom3faFbqTWI73FVhbjtDZxU4OO8HpfStxRZRBHc9cOJwlrOnjdy/mYJ2lL'
'KAwWF/R7WyVqGRNuvEgEnv0H00wOBZAx38Hv+gNGzViY8tlupr9AMycS1eqzfXmQ7zcwe4zQKa'
'nDMt7uUGQ/f4OzpGM7jT3z49s7A4Pzt5buquqcmJ4GmDBb9remZ6MvDw1/iFhZkgRzzTf87M2c'
'WZs8+ZPLcQ5Ife2usPQBmhwTXiZqtCo7vZ94WeGu1Wolt0d6f8xaaY62MaQzOa0oB9abFZp306'
'yC8e6HzR0u3cNvP2HDUnxtDPQlc/m2NBt38rMTvno7H99jZX9rN2t7XUZ0IjSZBKfdAv1iCqlQ'
'n10AzNTxB/M67SweNivFi6VFZNr9/Azl1inZPUnGa0GDVXROOgrc6QcQKghyQuNeOWbude6mHb'
'XL/AeEOTIhesQt9xmxWYagcIPu+0zOqLmykDl9EXj/j5uHaR1IA8i9iNb/EbaDN4mrhi7SLvOA'
'j+K36GTioXsR2TwTO0jCll7t/GRLB/qz015zYevMvfGT8cl9o45rF8qLdb+7crGXWKiAk9D84F'
'9p0FeYVG71fqtoOBK3XQV6mbN0llWmkSg16kvVCpl/fvuNK7/dx8llsPXkffLcdrjXoLUw94BR'
'0IyIw+wuS9U8hMfxLX6iLp2ty/i1nQzgy6SIw25/jx0Cc8vw+bdb5KCh6IoWF3re7T7CplN/ac'
'05zl8MONikxDpcJl5uk0HrzR33kpIua1XG/SjiM5AC6b59nuwIO76s1zCgZLTD+6yJJOhOxACr'
'6HoENv7fb7MVxzFr/V7+eTU1JlvVeOrXs3zI4xMee3zJ/JYNEvWNZvDuuG69NoILap3WJUKtXb'
'qifQaBQ8LlCogbTp6DhytQqPL81ZTzEyrDur87Uiyx74b8B40XulHf4evMHfnjkNMfnTsTEDxD'
'mWPlddY9qmcyz/GDzpF5px0q62yktKuNl9N8cPJ86eW16Zsy0H9/m95ohaEA2mJWfTG/yBRpQ5'
'wvbx820C1RPskL9dWzXbfOjuN+oBgHNtHLtDYqLE4m0/gRwOAdNervP7uYX2sVP0JYCkhwl/V6'
'O9lLSXFmv1VmUZaADZ+psoM7Ptpfn20tygtJ92mg/e7Q/CaNCgliR5q3GT1aVtV2Qw9NIsvbNg'
'Xhm63++Rz2AlWvVGpWTMEPwDIgELljGd9AEiRiEiUSx2OWpFRicyv4feWfB92QdYHzoK9zZlR+'
'gG379hC+iOmTMNO4k3910Rb9ZgdeVTiGOwusPfFi2R0FTmfTXHBG3/T7B34TVjGmQby2WX0jYd'
'HPG7cZ6PefcNbMZZ8HROGg1O+gP8xyL23wpUtAK/dt3mr53TVnPbE/cn6IJ0j0U5hPJ26p7rI8'
'g0A2hMgxmrEfM+JvLuucAxHAn71+NQ/+WOQ8rwSB1WhTwhcs8bhhc3n69Q8PTSaqVabsJUINs0'
'Id6DpjvMA9mrrNGU43K7QQrXcrNumFC/wu4ikMtUdmSYyh6/R/d4oKyL9zcsPmxxoUMM7/4NFh'
'+ofvRwrsBGFfqLjqX9JEBICK3xNhZ18+BG+Ze2mXNf4JPfwyTGSvVyzIJ3kNQYApyj34Njqaje'
'fRkF2ArwcX8H1F/5Kfrvnivpv9vpjRl+gRVgOmYbprzIdr29m3RgeDgb97aZ9vgFE0DBoGnL4y'
'rU9GoFFKbnN2UPWyva26W9OQ2SkDFmMdWzt37Vthx6d97f0bEYgwf8QqnOZgUx4uagdcNqUB68'
'0x+Qk1GdxrwaR2VlCJfZ2dv5hRltjx7qJDGbaQ9XVHG28wu2h5PEIOJWu6E8cONONy3F2CeNSf'
'0ttOKoWa5fqin/u9KLtj0zGXSSjrlnix7m0cx0Q0zG/Tk47e80XaY99XJP12/oaUFb2s6CVgdk'
'6M05f+eGj2bYrnf1bPcOnC1hTKXzw5quSXZ+fCRboKd2SH0lAxoc9buw/3VNDmygPPsONyMq6H'
'cOhrogh7Y4F9p3/fRoSHwxT7tUyW9/5162r6DR0Fc9f/dmOH2quNJvb7pBO77dOdP8dz/TU3QG'
'rsZRjSi+axNWek6e2VdN46Hn+Ds3LNlTnPHQpL/NXcWn2s1z/cGNk32qnb3Z8/sddD/VxTxGlF'
'uvGjZ0sHM1YZSoqP9hjluSCMqv1lu6lJd/AQ3ZJpRdpKc65V/3ZMdnGNW/3ok3aOJZOGQ7KVkZ'
'03+BAGL3J11HpAtbWrSRWFl38AO2tUhb0u/leQ+bbeTH0DP8fucwBWEb16Klaqy2Rv1FW2ObK6'
'+h98Of4xg47W+2HdQu1kvpeZxOSCnkxs96/vaMK5J623sWBr+F8YUL84sXpl3LX7/fe+/U/PzU'
'9N2BR4J2z/MujM+NTy9MTU9OLJ69f3F+cu75k3NBjkY+2PGIugzypOJdO0Mt7pkcn1i8d3xqem'
'Fyenz63OTi5AsWJuemx88HXejVNsE40IgfddPBduCemfmFxbnJszMzCxhEDyyQZy/M3x/0Dm7z'
'C3OTPIKJoAD41MT5yaDvxlfm/W2uhZ5QUkTHk8+fnF5YXLh/drJjkrv8HXg+PXkfTYgmOzNNk9'
'3v73ZHs3jX+NT5C3OTNFcaFZ7cMzPz3MXJubmZOZpnQJ80sPMzd9O0CCOA2KEvYiY0J205f8+F'
'hYmZ+6ZpRjv8fkAmJs9PLtB4eg3AIL4Ak+zU9PzC3IVzNCBM0ifFeJcF0crN0cTG558b9GPc9g'
'Ghhx9hLbYNHvT32ycXZifGFyYZ3+dmJiaD7aAC+5SmfO/UtDYIBjBj9E5N752VQe7GsjFsA4L2'
'YPz86LlT589T2703nvO7YHyiTRBcoAXrQD/NjqHz943js6A0+iCDFibvnZ2ZG5+7P8jd+BFlJp'
'mD0eDT/UP8LVDv5OI5+ufumbn7Ny6w0+heIGoHBmObz05OT+DLQQY6d2F6GtCQ2MA+C10gKp+f'
'AilNwCZ+LPNw8gWT5y4sEAXJwzszD6eJAucW6XV5OHvjn+XVDMdnQ6IYZ5RT088fPz+lezAzPv'
'1BBDNxYRbQnWiSDpZWUn8s2o01P7lwYTa4ng6OBzY8W5gcn2NSHOKV07VHX0+HlV8XHb+Pgrow'
'/g2LfgyvTlxYsIDjZldNTM2Pz87SJ2glTgxu9/tm5yZpVUFEN+NnSlN34ufC1L3EP2YuLATjdM'
'bbaX8uzk+dnyTOEZwd9P0epaxzIBTdJjSm2QsL88EEkMGooQaz+DH5gtkpfP154BjnwH7w7hxG'
'PD2DPTJzYY46nsfnz88AWffQ8wWsh3lI63rP+IV5jPIC9tH8c6doThMb8fD8E/N+DzGf8dmpwS'
'm/R6JnBq/d1HtowoWK1231WIJuhp529ui/GV2pj5VW6Uhcaa+N1ZsrR6vtUhr4dNTGQt1O/99Y'
'es7Pfo5IK+gPnhb8vR94/qe7Ctv41+CJt3rhuXpjvVlZWW2FJ44dPx0urMbh+QvnpsLxdmu13k'
'zGwvFqNeQGSUgnQxz4ymN+SFs4rC+HrdVKEib1drMUhzjyhvRzBeeDWlwO27Vy3KQmcTjegNIU'
'nq+UYprDSKgWgvDE2DGfGkStsBTVwqU4XK7TS2Glxm+dnzo3OT0/GS5XSBvwT/xKnkZH/fP0Qp'
'bLIf1RqtfoeytxeKnSWvXD1VarkZw5enSFfrWXxkr1taOiTxAuEvfPpWp96ehalLTipoKPluOL'
'LQTvHG3Ga/VWfKnefIgGevTicZLcrRNH/TBKwrV2aRX/rbQw1wRuEJLKGMjhWr3Fj4+MhVOtcD'
'XCYxpaVKUJreJ43grLFR4rnVQT7m1e14xbR9L5pQrQ1iYEkw4ehxEhRJ6WK8vL0s0yYSmSeIVw'
'jdBe9QlzzXCJBsC2obFsx0Amu/1Y6mPdMFREPdSbLTQyxhg/1DCPMyF/MaGHNDn0W3F+W98ILX'
'GlNUL/VisPxeHc2ckx3y8UcsF2oq7d9FchGKC/JgAs9Nu/84WnBQH9fZj/9oKd9Pcz+e9cMEh/'
'H/H3FHqozT76+2DgnegNaQcl1PU2gOmFfQF17D+Lf+Wo4YEgF9xRHAllqxCZttrNWhLG5ifcaj'
'RLWqN6jWdDfQXmbervQNATbHMgOYJsD651IHmCHA5u828sdNH3DtHAbg284sFQdy1jX3b6mAyC'
'R9vFvR+i0e6n0XbpaK+n0e6m0eLdSpN2E94MpyYwPho60fXFOBQjTBLCFsaj1bcL3fx+wYF4BO'
'kLdjiQPEEGg13+r+YU5NHgc8He4mty4UwDCxdVx3gL8/5drsTVMr6eNOJSZXmd6WUteriy1l4L'
'xawImjFDooZLsa9Ypm2+JC+ILYjGyjxEflEv60SsTdhAsFGTVjPSzb11/7bjCtGjpTlEDdK+Wu'
'aXbeQkyPLYiPN92j3Er8o0j3LMYNu/n52A/YyDXY+wC0T1OhCgrhDsdCB5guwO9vif8RSUC8bo'
'pf3F/+ZdFrtqSceUGM+VEs9D2Cg2KD3TIY5gP1er9Usg24g4Hu3KejshhlCtAsP3rcY100tcHk'
'nxMRKmdnbmHMb4TayFul+LWsRg4oejUqvKq+Zn+x6mTd5caXNgQoqWHKFlLEN0oKgxIrpdDiRP'
'kL3BPv+VBi354AS9dFPxhxysTEbNagVI4DEJwdMqMIfh7UpLXCtV2wnxSUx0SllVZqqVmvAxQm'
'4ZG7pda1WqIQ3f7Pioueny5gs9PKRrHIhHkIPEfVIIhn0kuNH/dwrpCk7RO0eKNWca99Z5rUrg'
'xVvPZPLhy86kHC9Hhh5BAGqL5w6dUXfRqE9lRt1Foz5Fo77BgeQJMkwc9UZ+eoYY1LgyKNEbQG'
'WbMSiPWxeCA36Df4FB3UFfO1R80LBTYJM2bROyDEKJFI9avVpfQRBeWG+SsBohIWQRAopvgsyJ'
'+lbqRMAkVOqXasx9q5Bssfl+YL5IOMY3XUgPQfqVvjxlc3fQtis6kDxBrg2u83/VU5AX3EndXF'
't8jSeagmy9ZtyA4oKRKe8wBMSetAzbZY4RP9wyO9MyDct6OGQYpDc0NAIlYC2OSNTU6pbNWc5d'
'F91Ht70zX7CZO+1+8pTN3En7ab8DyRPkmuCg/wtY3lxwnpZ0hpb07z1aUzshUhmiBlyPWN+Ewy'
'cwn0j/ttw4AjvQ5s96KF4fkXk8e4SYQSlutEQJw3BJN6mX0Q12V0zKEWkRxJkqJSVitEkDO9Cq'
'0kriKth3A/4+sO0EzDeqsembEJQkhMwRy+R84UXEghNSP0SA0JCYYNCG/k5sE1IwCH2k/Q2GCF'
'eiRdP+DAFj1c8TAZPyz79AwPcScoVj51Rm3mvRnVNiupfQvc2B5AmyIwhsL14wTe/ssi2waNOZ'
'Xjxu00eaSArJE2QnjeW1sidfQIv2vbRoL89d1aKxCLCw5DKrZ5EtEeyXX0f/CgtpAm7+xRaTFw'
'689wW0cHsZ5XleuPvtwuV14e63KM/rwt1vFy6vC3c/L9wjnoK84AHmAi/x+OxiN63ZkjRLGtWY'
'AWbHPxbe016LwO2iMsxtsgbQ9GWdktV6m3jKWvRQzMp/LCsHT7s5tiD4CCyEFnKYPjSsO5+HRp'
'zuAcvp8kpWDxCn2+lAMIFB5QV5JasHmBf8d8yxK1ghsnq9R3T1Wx7rjuU4KTUrSzFIxCr+ouaG'
'Z+NShLNE9sll1HlHm2cGV67HSW0Y55mLMfQxmh5TGk2P2DqtuBwZQqW7tTprD4I0qH+l1ahGvJ'
'R0cGiHtI/TEbM8VXKA2Fohcuj3/5EnyfSwxqryFz3VkPlclSWmMCo164lwdjvB5Woc0xDujms4'
'fZGms0QHqXJYr4nKY8yj4SWcyvhVoKTZriUj4VJbNtFwwvI6Yv5NarrM6l75OKtNJL8roqbi9W'
'SstLxiqcVKiaia1KkFKQJlaZqGzlkJgf0yXCkbQulS0l+zpN+lpL9m9fwuJf011vMfNxjzgiZr'
'oq8XjGmOCmNunM59jQgY47Epb2nyIPVU5LQPG7EcJzXoFCBBH2NvTDRQxZwfgmeMthsrzYgWlq'
'lGgj9AZLX4UkfnY/7Q4XCaQG2syjqWypzaS82l9gof2m89ferWm29JMYKN0sxgxOPpGiW0SzdK'
'k5XQD+YUlAteRC8dLL4rF87iaG8YJM0ag1qKq3XitLSJx2hEzTWmlogGuoKVvViJ0qUFumjduR'
'NDgw4J8ANDA2lXMvVVKKfyLRXPeqihl4Tk5KFKgjVSICqNqn5OyAiRxmXdVjCyWNpejauN5XZV'
'qKlJykgFu0r3efJPoEh826VJqMIvssyrSw8GL7LMy8jNFxFN7nMgeYIUSX1tKCQf/Duos6Rgnl'
'ON19gt2jA3Ob+US8GigYFeWq3Q6cVs14oMN/LDUtxEtGcYrSHMjs9TqQrdpYo/vrnDgXgECUj4'
'pBCM6wApmK81u6kr+BGPt9NPe+G8kyISIpCCFlh1SMt+7dDMXDaKkvk4DjMOF4ty6JGqErQceY'
'PHcUTT1j59f6cZHSGfx1dwQB5A2BIpKA8Q9sQjZl7dwU94LCF/0rNLgEgaIkKxGNXK1fRcTxOi'
'Hc36guUWrELULMMdIRgTu5I1d6YWPbuZmO6BnQhng/VwhQ5GNbNSZrjdZnAFB+QBBOU4BeUBgk'
'R8V7fCeoKf9/ic9uZu5n2kwYhCkpL3mEvqTFovJkI/E75QNIEHfpCel/iI11yXneabWTCn4qUu'
'VSOVGWmsqTnCgzXSiciu6cSFhfDwBItHP7zA1tAFOgocUcsoFIVmvZpuav4KvwwEElONSyohgW'
'0WyC1titM6NdY4XB5ubA2EJV4n5nT0Ekky04xn7SttnTEkRqLZmXlG6DPf6dQ2MR7LsI0ZWkyo'
'YgpmBk6bpBmJZRr/jDbWx1YqraM3GZsrqcAxOF6cGq7L9dLRiZj2Mk1tdCIG4YytlZ9ORDOarh'
'x93FnGey/ML2zQPkkEse0D+0a1PFatO1Q9P5xEC9sbs70NuipjVnvB0seE4xLC9EHxtXBmjj4m'
'CwZtRywsoO+6c15ENxuYgaHmHmI0TLsuiMm5n47lKcgDqEin/hSUBwjH/o+Yvd0bvA5d7Sr+np'
'fdBg7PIg6mVmImvA0k7nAzUPSIn/JcOR0Y5d6h763Jm08uxDwrpYw+Tf+bnlm4DMmbWfYSLnhO'
'fQ7IA8ino1cKygOEs9cb88Qgnha8xSMt+a+gJf9f+Q6eqyqPrhDjIrFzVHRUeNtlpdFaA8YrZZ'
'Os7TOj5ocsi7YQRSN46sPuRj1AFV+t1x+CvlkzLH6apk74KhPlMtpoH2ItXM+1ij/s4KV6m5Qu'
'GiebUmSju2o6HqUuB5PiDvFDyrlLy2t1sPQ1kEgkHZGWCEZPvPIuSB4dMbH/yAjdWv2SH6qfjV'
'UXQziENMftP5K1ltDBk1QgfY2Gsh1rRMtIq9RNa3aCf0Llf6uIpuuJ49HLlbKzACM0SVbK6YtM'
'IN3WAs5v7XdAOYAgIN7vKcwLfgeNBopv8lwLNY7GJWwQe3K28yTtt6I6WdqetjawNkYEj2OSL/'
'jSEUL5bWW0rSQmJrrcwouh5o7wqYwOZ2mqtyqItbpwJ1KI/RPbwrNRWWkznatnptHngHIAbQu2'
'+68zc80F/8ljtXeJmcAl2oLf345IOWxVNrXYbzoc1RHdyZPIu1iB2thURhJfnS5/y6233Xw8nQ'
'UWgwe4zwHxmKEnvtXMIh/8FzQ6UPwFc5yhU4ZlaDSnNtF4VG2trotNPOJ1UzMqcWeVkdRw2Jn9'
'sDmn1+kcD3vo1cxfj0IbJm/HD4WSR7vbAeUA2kd0+fq8wrqCD6HRDcXP5raaUqTuRGIPzBQT2s'
'EMMGnvNLNKrdwuxWUiPg2mwiQmZbMmZuBn/HCUKb0Vr5H0p9k1Lc/binNDbajXw9XKyuqIZfN8'
'xAvxDIk4hmWVcDSBZRefYc2H7fFsJMGsOJQPv+BGDYnuzbepO1JJjHQ43CDNOm6xmSo0PPHISB'
'i3Sv+klTlxU2gizXQXjYRKLcK1avQWYRGSPF1GaNC8QoccUA6goeDp/ntzCusO/lz2149998so'
'KKrUZDGvvIJzKiYgMxIAzhnEthtQNun9yvI699kiPacOqzzHa6LtWeFl7BQyCxeFGrYf6ol9Kz'
'SXK0m0tlRZaeP8BVwjoRWryCuKoDX2v1wVCzh967FbT9ycIhpa/Z9nWUA3IfrPhQV8ybCAnuBx'
'NNpTfLtn5sLOBHbAJ5bsmvFSvc4TBM1y9l9J2F61zqZ3Y/AEtRhJaZotRSXSPOqEj7hWb4PwSQ'
'FmN+ISG/NgAG3SzFLhdWX2/RQwAgWPJxs4oBxAu4ip3Keg3uBzaNNfPGkJQ7TPq5MqQXiP7oFO'
'yQKdirvucUA5gPpIMX3MLEgheBKNdhR/zi7IsAkLGWbyqjOvYeWcNj1bjzngABYR52FG+YSbpR'
'pfhA6CLubBnebXE1CYw53+d2H+tuM333br6XTqBZr6k0YBN6AcQNtJw3yhgvqCLwnmn2MmbtgJ'
'aM7ljLzdr2qw9nt9NIQvZbHfR0P4kmD/Tz0+7f8NNNpfypFG+/975X47JkRzpikVgh3+j7M5g/'
'W/fwAWbix+1QvdqhyhZvSKmr7cjNlwshY+Z35mOkSKm6hvyjcTYa80XHY/uuYa1p3tlDOa8zJC'
'emru2Y39hwnb5i7xaaC2Ttod64Oia7QTZvNL69ZkR+pH0iItu5kYVwHr6G0xQ0Pjvsgr4atlVF'
'yU5gs7DRqI8f2DUWkNyAPoQPAMB5QH6HBwxH+ZQaAXfEsY5jeZREjQKrPfwgLs6qrWfm6KPhhu'
'ukwyQLYtERCd2stsCwcF0iHCJzFSjkSDboJ/GuMI240NwZFUbP0zWg2w09bKT5eRpHiE0fhbqT'
'mpWzXob2FbBQ4oDxDY7Ms8heWCH8mx7vliksoqqqdmw6hcbmJaRPYXKxL5tUn4C/Ydrz3sX+H0'
'+MIwsErIwTkgTrtD63OyQeMKHyOmZi+ehFCn/55y6AEaB4+n4IA8gPpY4TSgPEBQOO9TUD74Mb'
'x2XfEuDujTZF8OnsAiVlrrHecehiI+gaa4GcmYb5FCK10XHJAHUJ8aLbrVqEqgg8G1fklBXcFL'
'BLFzIUe6mr3GXjPi/4phGgArMcbrAG/WRjuZlHXwOUIwcUaHmAn+jAvyAOp38AXb6EsEX+8y69'
'4dvBzvnSi+2cuOj5NaZExJeJhHsARFvNQ0LIrZ/pHNBux3cEJG+VJci5crGuLldkSLMhKyC26p'
'XSH+wwf3VUNPqaELI3Lm3E2ofXl2zt2EhpdjzkUH5AF0TTDqgPIAHQuO+79i0NATvBpdfU/x1R'
'1oQCZNaDLwLr9EukLESqpVsEkSNcqLaPymi60mp895kmxKqFlGls4PlrNXZ6fcI0PvD0IH5AF0'
'fXDGAeUBuiN4tj+moN7gZ9FTsXhtOLWpvFgVTdJ0AbMUvxE4IA+gncEeB5QHaD/tiZ80uC0Ej+'
'C9ZxYvbvEljcHElqipbaLuDiEj2S8nqqGNMVtB4YnGKhxG7i4pEPoeyaKvQLN6JGcNj92qHRGo'
'GFzvgPIA3UCyaFlBfcFr0dNY8YIbemP2KolVJ4nBmK3q4UO1+iU5WZgUbBsNFyGECKcz2g7OmP'
'togPylaxyQB9DB4LADygN0E1H5LQrygzfgtaPFG+RMXBH/ryVdwmUJPmFUG3O+59P33pD9nk/f'
'ewO+d8QB5QEaCcb83xLu82s50tUeha72hlyYLVnmaimdy09rapwIQgWbLTe32Mx3zYc7dcdn1T'
'TBqm+CApzTEN60UR6kC5Ue0tdI20JklzgcnIEaA6R06LSkr18ixoYwsYwxtb7EslH5XAsKB72z'
'Au9nheMMoQxikxLOEA7yMY9/Qxn8DWB+sPiHXojUO+GbmXAuhIMrckORZ5jeXYwIVm9xljPuV4'
's85pzaStCl7QwfSndhGi3ATjBSDunFqFI1WuAI8ay12NEKERmSpKEh6AnBIWbf9ahz/zdS2dmj'
'2t1vQHZud0B5gIJgp/+SnMK84D/mWD3+h3929VjJ6F+jhsyYoD3GuNjvgBg9RkPuUc2OQNCQv2'
'NwmAt+j9l28Su5y2t0bvjDYZT4WCMMH9FjkvrkWHOJktj1AGbsB46fIf0YrO+OW7RCm2Crzjr2'
'OWwdeuzXPSs71v2m3/G1zLd4aTs2+cavZ7ZFJensUMdn28i+uBpltkeV2d/Lkj8o+/dA/v0OKA'
'/QAJ0O7yRQb/ABcNQPg6Me46OHWrq2UBsdolYOA+n8AXCYwN/BP8FgPsi6KH+0V3cmg3odkAdQ'
'QfXGXt2ZHxS90fTkBR/KceyaaYOTx4eyPXnSqhBsc0B5gHY4Y8oFf4zXRmwbKGN/nEqgXkXXH0'
'MCDTugPEA3Bjf5JwlUCP4U6PpzoOsGB12OFrsJiiDq/xQj3MfDKTCKPsI8mD9UUBR9JF29gqLo'
'IynzKiiKPiLMy/TkBX+WoqigKPqzFEUFRdGfpSgqKIr+LEVRgVH0GOsatg1Q9FiKooKi6LFUSB'
'cURY9BSI/6txOoL/gYUPQpoOgmB0WuhrslpqBzfAwDvZZH1ceY+niKqT7F1MdTTPUppj6eYqpP'
'MfXxFFN9PPJP4LU9tg0w9YlsT560MsfYPsUUgXCMNT3lgsdTnPfp3ns8xXmfYurxFOd9iqnHU5'
'z3MeCTKc4ZQDj/ZIrzPj0AfjLFeZ8eAD8pOH8unz0+A5y/P084vz3r3ZRQqkRDpqOkjnifdduG'
'jbkSVFZW96En3XXT9E7xT6zB5zCk64vPyLgPVd+kz2ziQjTR5fzmQQeUA+gQHSTe4CnMC76Q4z'
'ya5zG9pPo57CRsY8ffLLU02k2tgDyFMVsSdDPL7O3iSDCZIZrJwGIUnlpjSmVTQ6neUDatll8z'
'aM8McacDygGEhJX/18wjF3yFV674SbEjrkZlcRmrZWKZFBw4izgyHiZGIznUDejYdWUIvgl61Q'
'OgzI+UoDIHULpdRGWJQ25GrXrTSBLX4OSHWjYtjDgF8bIoVBkmEc/NZt1GKDtIwbrxfPc6IEYB'
'or3eZZCSD/6Odx1O/g5S2FUvXW+BjsvO2b/ypP/JE8S++ztzFDWgHEDgBa/OK6wr+GaOQ0N+OC'
'8TxMyq9ZWVrAFZQomWtIEcPKiVcMfqupm7MEt6INaXEROeIEZkPa3QjGOWOOr+Q2EuUbyZ0CPe'
'A7aUWuKn9H6J0LcqT9eNBz6DRT2ZyNbusHI3o0oiahDt8nCibTXxUl0C7MUHRy/NzZ4Lv6+tCX'
'zoULtNjwLok0iyESFWqrp+e3gxSee+ojkWV7V8QGKMMqHO0sEqxasy4IByACGS5cOGNruD74jR'
'7D3qdalVWpVIuM2qpN7AAcYGOxuzkWjMhgSbSBaKBphKcz/VMm2IAsxTuv2vemLKroHC1BHnaM'
'NGPTQzhBzg+ex2QDmAoFK9WEE9wY/mmVy/z3oxV9st7hsuQIkFj9YllS/r4KypSfq7nIEzRBxH'
'+fMDDigHENblb8269AYvyfMJ9Xe98Lm1+qVqXE4T+tio4BqHWYaV42osMkyUbj7aJ5pRIa5xgh'
'GB85mWz242fOYhxN3rkI2x3tfO6b2Js+rfXY+jpolkz8SzEFlf4vMbnyquzll27JZjp29NMQM1'
'mie93QHlAIIK8+MGM4XgpYKZllm9tUqC8NAxDTvnOKWlOK4hdwqFcds1Y5pmQ4EZOWaE0Oi46U'
't8UjmOynT8jq1jQiL7FcWQS846QqN9aXa0cO29VEb7upzC+oLX5Jn3/0EmNggcGhXi0+AwJMm2'
'iOjYSmSNncaOJPGxNB+RifCUWJ+gRpyPhfdhJTlelhlkHeN3OmBhb0OKYGHh/Ink6jWJexjjjX'
'qVjRSajna7cEPuXssQs5XCyjKdHalHUUkctCkWoe0ygnY4oBxAg7SHf9msuR88kme14pVXhcVm'
'K+u6vuLU0kZAZMdVGNLcz2xr5mbIN7DDhu2OB7nHAeUA2k+6wH80M+kPXp/n+I43Xnkmyny4/A'
'DEnOZClDnnPKURqAKGbW/MitgCBUaMxnaZbITLJny1nybH497rgHIAHSAt/ffN5LYFb0KjEDEV'
'V1wmO5nLM9PsIE18Cf2Ud5Ff4WeXDxrHmGmYMV8JE7R5ya7ZXydA8+QpFB1QDqBrg0PIBhXY9u'
'AtefZ9/czVkGO9wbZJNfj8C5LkdkRB5m34ooByAF1Dp8uvm9kMBL/NS1u8pGcPCHnRAP5l9tCJ'
'tRDlacJqZTkurZeQN6b5wRZrmaSBMTUqJbFz/kJmO3y1W348GxrBEyf88NQDB5QDaBft4q8bFr'
'4jeE+eI0efzF32THN4s5I1R7bS7X2otVXrznbCbb7rg46/4aSzyUFnIdVpM0sHFZvdKKmFLlvq'
'5qkeIq6CHq5AvztofRj1+xxQDqAiHad/xqxPELxP1Ll/nzMaAcnFUmWJBOFDlWrV6IyGmF2nh4'
'Y7grgiV9DbkgqHpQbQSGjrCbHzC+19YbNsKqUjB7Obw7aS0Ei4oajQkX/BvR8Q7t6XVU8Cwt37'
'oJ4M+u+GI8QPPpgPnhZ8HCaTt3ipPcRxg9hAcMcRazCoC209cfRs1aiQ47NTTIacWny2svK8do'
'ywczbYOwmaErwuE8CjYdjCZe9zLC10VDD/YRORMaxGMghbGryxuPpsoPkQpnsTG4gYQHo+g65x'
'QB5AqMqQgvIAoSzDBzyFecFHWEEv/icvnK9FDdJtbYjpWRPbAy6QnhM5xjJ1NlyKhxG4JToQa2'
'SJ6aeVzaaxGqmRSr50aJRs6IBpXjCr5887HxJtcpEirurDofbMtaXcuhrEffVl8Fy6HRBPr0eT'
'Hnw17xEIcd7PUVAueCzPx8LbwgUNzZRkOc25PcyDPKIhHs0KHfVVE3WNCNoXbKjmxGNAHkA71P'
'TtGxtqns9przMLkQ8+xtu/+NJMmpw4C6+QKaeVMrZMlEvtg08pT46HR6dnHmDBAXkA9amx1ldD'
'JYH2kO7018go6Q/+Ctvuf2HbfagrnFDrZBSeU4fDuHU4zBN/xxY7HFlzMdEJtOhSfES9b82YYx'
'yVnkY0HJJz5ixUMuTGKkm9ykd7KXvl07eJ6phmzoRTST2qtlJdxiSjrFVWmpFGCsydnRw9Nz4v'
'JWgedItDP+hbNMqtPOAYpnkHz7W2FLzM4efi1BHxN5xo7I34nBWkdatN0lR2KqFQgtSVkLTZ/9'
'0hYfo9zSMz6WP4+Kh83IfRgA0j7BnlYSEzjOW61gqQefH5nNteBFNMS29JQh0hTOdjDF9t9uoR'
'eTYfiqu84CBRU8PUhiidGBQtKiFCKberYj8Cs4Ty/ld5joZ8v8e/wS3/GoS7u/jbnrPi4lK00f'
'0WydqClmo5RhLaYVqaRjtZPWKdZFxMSnLYVS4h5n7ILMEQFpN/8Q+iuuXKw2PmO3fzuoTnqvU2'
'5x1+X1xqsT+JzTxGfBN6QqGmnWYStP/+Ot1//crh/zrPyewpKA8Qstl/00zfC74q03+dKPCr8c'
'NS1iLmimirrAfVUgREzdIqqu3owsUPN2z0DAfizd8zPnrcDw+fPAYGiR1KRwtWFPDkxC2nwsOn'
'TrqPRjKFA0J705ZEw/kZp3S/+mm+mp2qJ7NwpwpG/lWZ6n/JKywXfFt46FvzHOogn9Hv6gSz5u'
'Y0zcX1nAAtbBSKqiu4nWR1TWM416AtsPKTPoHsyvRUrdeT2PZYlgQ4IYNwiBQaQtHokP1eZNGm'
'Xx0Dm7QljRjbY+ghaS9zD6P0JiYidDa68qJKw+1Nn6YDHDOVrpgHpWVSlMasLj3SsTo2zTMWC7'
'wZ+t3nZofEuk1/2V60vkNiZwOEmYgHrZHH9R4MEpFTTEzdWXcYaL+dXXdQ77dTEdOvsvPbImJQ'
'8WRb8MNdJGF+oivwmAHgVEsQVLbjGKJtzAB+rIutaK/PWUNCFL5QrlZ74IUcHzi2FL0oro5Jyc'
'IxW8tj7OIJvYLtATkvoN4hUZSfygfIWD4SPNyoRjWN2ddgUFoBlEmqyvpUxPW5WXFF/joPRKsm'
'jqbVFW+9tbS8dPJkdPr0LbfeGt0aleNbyuUTJ48dO3niluVT5VuOnTx1W1w6VT4q73NX2stRO5'
'GjF08obDGdHPPVp5+/9ZabR8/fettxWYptym0YZwUH5AFknK7blNsQyDhdt/Fy/XiXHmIUQD0x'
'qNcBcauCLuo23cwEMou6PXgJFvUVZlFxuH9JF0cPcPr9dl7Ul3ZxpOFPeuFd7ao45bFGujCpYy'
'h8sCF8Njn6Yv2LjhY/eNQ0ILD58wcfpOWJx2gP2leMRB1lvdx5SQuP2bv9BHvbFXsvTbG3XbH3'
'UmBvjwPKA4SgxmcryAte3sWsepT3lBCeJM8kLQ4YYYbNh16U7eMi2WPph6ECvzz7YU86NZxzuy'
'KbQMw5gc2B4DXA9psI28W3eVZJg4DMePERYyNxqCHf2qR5WbXOM2c2uBWm0jRgFUMfP3d+2Ild'
'Tb1VUErVh2SLjPj8hE32YpWX6h9S7wC0AcPGa0Abu/w/5tkwbTwitPFuz6buMVM0eltmXjUbFM'
'ehP2xSlwC7ZgWKNB9TSFRBOo4IdQyJNiVlTmF3oP1UPXrDi/EVKDQ/OMTa28zEzOGLUbmyRurD'
'mfB8pfaQGIdL7VS3twnu8jEN9dJFHVBqeiRd1AGlpkdSahpQanpEqOmcgrzgF7vYHH8ifD478c'
'2Ejb2wo7ZauibO17F/fzH7dU96NkETA0pSBIKj9BU5heWCNzIrKH7LC3H/HE+dRESJFP310NzS'
'Z+w8qvvidha0GnEitsyw1fy7FBsyVP/PckjLi2hvt7GJHLNNDUvmLyjSUUAMfkk7LK5Px/osy0'
'FzxDedRmlVXaNNlKq0I23cGCpdSJmo1PtncATx9sYsJrFGb+yy4m1AxdsbhRN+HCeoHcFvY3O+'
'B5vz/8EJKq1R5UYhMfmm7tZm3HY0LrVkjIcdFy1yFKJmFWlBRVvZR+0sONdL0VyzcFq3tAEaYp'
'pJP2KrSJnMrYVVE/uTvl+xAShYkWfhxiF6+OyjP4ALDX/AFgiU1+Dqq9VFcUiythnDg1LasXJt'
'RM9mzieZQthB5KeTY48D52fY+lpcCVOXOO1AzbH8EmJlnbmniV/i/LXtjGk5URo1td/4IYcL61'
'y4Lh13UCubrZikT5xVHuGDvg+XhVCuKHLpqQooTGMYV2JZSJNYpOcX5ZswOP52F0epPY9/gm2+'
'Q/SkO8MLUhbMyFS7ImNOvd3zdF5scSkFrooswzl57DYTpbhDmdc7UqLfoczrHakisUOZ1ztEkf'
'ifnsK84N3CPr70T2QfmWA9BEfUN7CQccYczuzGMEmnFxDoGoRNGnIraStEdSw/TFKG+vmoF9ky'
'wD22v9R7ADc5SkNiz1Z2WUlHh7FmnSgX8t1BG7juu7No8wQjhlfsUK77buEVvwleEQQfBK/4C/'
'CK16fWFsaPGracKz3ZryTh1Y22s+VpX62hcIYa630NkbUCjR624yrJs02ScY+dPn3zKSd9ERnw'
'dBKambchWXzgPo/7ftVM0IpXcP9jWCXahkc3vK9SK9cvJZunnJ665fQp9PAc0sJnlvjwfPlXbr'
'35xOnjp/HKhMTrUnMSxO2HsYxX+tRJvFdaadaRI67vbfqRUydP3nYzZ5WDtal/A3eLcgkSLmuY'
'DpM3+lpUmpnf8sO3MJZQLv5SzFXlV1FyJCV/pLs3OXbM9jqi00o7h1VikzU6fvzk8ePCA2A4/6'
'DoToP8E2W5PwS6+/OuIM90JrVxPUB7ic5eaEFgFx/uCrqCvcW7wnvSWAs+1I9kq/SOhc69shxu'
'Upei1rrdIKyIvva4ndM3ufudHeAcwIjDu90Be8GfoO324jPD6bpL5CoKRFVnhafGtga3T8+8Xu'
'gA5wDuD7b5dzvgXPAR+dQJLU4dZzYV8eVmnbYQJ3up9qBKYcdn2fa+8bP6BXz2PgecDx5D213F'
's/zZLej/Ppf3GBVAk5d0FEnHMGDF5a4HOsA5gBGn85jHcCz4J0Stfq8nW1hLAesVrkZQKBeRkH'
'+OydCgzjHag2IQUGuB76JOLTIoaJY6lSMWzGLVTdrNi2D5Nt/IEeJE9lLGgx5WOfs2rYhuyvJb'
'/6rhtIEKKJ5VjwPyAOpV7TpQAfUJ0a5PK8gLPoXXhovDV2S0St6mM5zS+N2dDoi7GwyGHFAeoG'
'cEz/R/wixALvhcFwdU3Eybfa2iaoIz03rNlL/mpK/YnfKJO90aXGx0M+sAMYVEl40CAmkIcD6U'
'MjOAKvu59FAfKDV/TtSJFJQHCJUozisoHzyB144Ub3cnwBc7OcWe5KZ1rprGBnvOZNxk7eCTeC'
'I7DFDzExjGDQ6Iv4r6XjdxhNznIST/B4TkNYwRZs+mxkh6jt2umeDUvDvo8yc0OfhpwRdFL7mZ'
'lCTL4W43FfOF0UCm0tlwVISaKVGuLktTO5n7GXRAOYAgzR+0uclfFnvAzAYOe7uWQ0nNjhh+5C'
'iOMi1o8zYjvYldErnj8Mw3djigHEAwD5g554KvyEmS5pyY434tvjTC9V8qF0nFQfjrFog0HXME'
'cZcNQcilXePc+CYQ+c7g77E4X+qmxflJL7OxhpEVN6zdi9JGn6vQwfZFzkE666uFtUSNDvyIrS'
'ccaWbSqiSf2BdWwreksi8hnJ58Ae6FOhOeOOYP9YVTxpsBqbmT5vH3XZxk954c/wZV/K8u9iG+'
'JQfvUlWrPrNvB6VIo4Y5VaDqC0df2jPnRueUY6e+gnPqQdxsyt/hi00f9O3BN+N5Mo4x65nRY7'
'Djt8qMwTnhqtuJhbZW7oeLV++4iEwYnF2YcAbi5lIFwZjD+nA4NSZblUC28U51XjP2+hyQB5Cv'
'PtOdyoIJBJ/pyz2FecGPdNN7Ty/+gLGV2aDwzeeYxdaYuu9M9cOy7eMqbMK6eg84EwFj5wENOC'
'Ae447gWgeUBygkXv+oIZ9c8NPdLEzekcsQPWzvNJysRe1q7XALHDWUMcSJhU34elJfbl2SKpyy'
'G2pxDJ/Q4ciWpyitHyHe65yYhRKxzlq4LUslmoVvsZyMWGubnw5Ec0f41nonKCAt/cDX+3VWRG'
'/EW5junAVAdD4j0gX1ANTP0XwG5AG0T4XtThVVBIKw/Z+GuPLBq9DVCA6g89Y8kilgYHe2bq2N'
'e1sXQdr7cO1WK3pBAhBq3J7wFreg1qS2G9gcmMNzye8wtedIBKwG+ZvQnc3MBVdEpXsy7sBknj'
'D5qiwmkar0qm5bBmGnSlsCXaMZdDtV2hIIGXSxgrqCn+nmCMIFvnzAYRai8a1y6JvRIbk6oOUq'
'll902N3K2WoV/BnzIRfUDVC/aloC8gAa1HRFAeUBQnmNN5rF7w5+rptV3Vfhogi1NWxY3jQEly'
'WU5XaV2sZ7SZqcKqB2G+ck17TSyNZf65iqZoY4gao6RjPKggPyADKGYQHlAYLq+utmdj3BL3Zz'
'gtf/7aEuC6kFMWJEzaU1PGaOB7DJrle5HmOcu2rCsplmWVt3/cssTlJ7o13rrBx05on6FL+YXd'
'geWKWzC4tcg1/Ewh50QHmAkHz2gIJ6g9d1s5w+H87HpWbcEh8KK2yCfteiRAcqIsqYJzTimIfM'
'RSdlrnrmDLWXxsUf6HVAHkAFR5RxldVuFmX3KKgQvKGbNftbja/W/cII9shiwgOWm1FNUUy+wq'
'3t7twCDYH76nFAHkC9qpsLKA8QdPPPG6roC36tm5XzPyTNK60NzHlErsLEEUhyE4Ypp2qKFHRo'
'flco+js+PTGi2TXOvUpSM89+Xu3NUvYqLm+iX27K5Oi8MxBO1i5WmvWaXsRk5t5H9PRrWXpCPY'
'xf67YFOwTkAWQqBQsoDxBOEt+rID94O3o6VLzX/VjqOpLo75bsgs2E9OZjt5/0aaxvz44VtTTe'
'jrHucUAeQHud4aOWxtvB1K7zv96rsP7gA7LAT/ZK+AwrSNaUChuo7oTY7P1IC4zHm0zOxjpxeW'
'txGGK60TLWd61ehoKYYoLVhdnxhXtGwtn7F+6ZmZa/TXUVn5+NcqvNvpZIzAHflMS3ZqQF8irJ'
'GT+8MVzAxVEJ/DFSm4HUDJ7YqDF9mIdofK8JMWTLb7RE3KmNPJEbw1mdfcXGIJsztZT4UhWTxk'
'hDox1hVrSejMr39EaXkBa+EUnc8uEHb38wY6t78Az/np2Zn3rBEV9LaKNwNnYReyCt1blzWRgP'
'61bPV2gsF721LIoSJAVJfpLvViFmQUKDX5R+GXdh+MLwMH39jiGswZBea5Tc8cKh5Xqdfg4tRc'
'2hB46MoGWoLc+R0jxzb2d7qUqBtuEDuv/FKM6eKRoTfw1v3XEUAzjaqqclyGh/kGA8St88s+VD'
'GsmZG/A++knHsHVvMiIayyzPl3daVL0UrSeZ8wijSmz0RCrUR3lUqUUquPJ77IqKWrDsMnHESS'
'lqpNqccUAcroyRJDRv2ODlB8fGHjwy5ljHueQffXr8roXJOeNjBjk9SOvz4Jbqm1Rnl6BZua/I'
'nMpqkitn68sbZtBPXOQDWS7ST1zkA1mOh9i5D4DjPd0B5QF6JnG8lxgxsS34Y8PyJrSOP4sHUo'
'3KnRYkm4xgT6MO63Y9DFdX61PdC2Z021DToVvjvg3IA2jAmdY21HQQTvhzZg7bgz/DeyeKLyJG'
'0HmDotYQd1x5KtmW4mWu9J0muLjx97T5WCZGLWty0PfNXWZEAzZOnuRTX7hQWZPQRTPW7TQjHl'
'nRAXkAXaO1LASUB+hocFwq1gM2EHwU792EopZXMSVN+UxIF6NlOlyra9SpaLRlZGDVYBehv+Nm'
'80g6dfbfptVcV9taLbbSShMRHDzEUZNlfdm/PDpM2oCDiwHCxUezuECsx0eBi2c4oDxAh4Mb/V'
'cYXOwIPoH3Ros/GE53oADzWzEnMDYM26U0ybvzU3fjwmYz5UTTDyMfT5AhISFfY2NH6X9O1USt'
'Sz7KF38getCOcQfN5BPZmcD7+onsyWkHbMw4OY34/83YBYLgs91sbPzPuXCh2Y71BhE1V9VjvV'
'2pJBOordtcGLeIZC1ucalhXqaE4y1FtRQ7gCm2LyowUkTS06QShV4yYqB8tkdikq+OYZNMk7R5'
'JLhHR063Nv3MrLmRWnrIpek06gnXCyLhUo2jsoYn6m3TLsHYcBU5Fst9xyxWuDMnqSCNoyau4e'
'rGAenGn83qxnCBfRa68U4HlAdod7DXf7GCdgZPCs/7PhUjWxnzWGeSAFqpiCw1LUBIEY0MWtcc'
'z2bMifK+gg5I/5XPu6BugNzzD6yST2YPtjtpIk8K57tJQYPBF9HTzmIRbqQN1lpjHNbGRLdfzK'
'JrEHZroKvfAeUBGggClMbook/8TXfwtOCnegKv+CnPifk3Ky1fSq9LW7bB76k5Q+6WbdfsbWNO'
'VdXlZiS5h5pdf18mb5gzRDjFma+Br2I3ZE6tsgb8HXPlLTiD77Sv16AAJGl2UDr2ZRJoWFhjDA'
'ZC/qab77b9dI5/wxj89W5ODf1QjjWghrk3xnWQ8clt3bFnTzmXAxE/toV1IX2k9FtL7+3W+3LG'
'nFOS+sQqiSZNJzwPBL7UaKZtBK4J766srcVleCc5QLlWsbdhsQlKk1sqNi4+MxwTwZDE7gEt9U'
'CgDmEtPOzcAH9kTMunZ/dr2xmrHAw1K4KYgya441zgXDegVDmo5mLGb+CAPIB2avrToJqLCXRd'
'cIhvzBpkq9838dqNxUXBtRmAxRX7rCWjACKc5YGpSM3NEXuoYdJ8NWEt9h09XJM3naHCIPzNlP'
'EPqkH4m6kIG1SD8De7ubzaFwwJ5YIf7uE8zI/mhL4jG33kjFCUDE13T9dBlou23MWoUnUq6Kcy'
'WCaREjwWX9eApjI7OT0xNX03n834erYab52UcsZ008kTByNpjxnSY8mj5D0s17yae5mdbTeSce'
'q2YsfG4ZCUTRllUrIZQGIj4MZ3RVW9UPJivVIO2ygvBJ8Q7TZdTWeJYOpjVPc4IA+gXi2KN6j2'
'YQLhJpSvewrLBy+RJXrSc7wPkfVKiWuJnaqY4my6b50jgK1oawQeUKX4X2QYKn3y7iaNa61R5w'
'wUUC5bfRBXwapY6NyXEiWm3LBkdem9YhrrQ9RDYon4H5fITRN0gDyEl2Ghsp9aBj4dlMHtylMv'
'OCAPoD4HZXlBEFD2o6DqXcEre0gsfA1i4de9zdMqMTyODdTUfuEPyLyIahvtbua2Vl8lyDK9cQ'
'n2ulGerxRVFE+hw/OVCbY5C1MuWbR2PRMph7s50zJZQxPhvM0zOiOHHdlQI8p39YcUCxWLmE1K'
'LpXg1lZpsYvwRGhAwM3fMFJYWjwCZD6j+JlceF6vrnX2RMQBz4b1VppOzlMI88IaTHEpTdlEEH'
'tna5MVsO9K+PpZ6bul8BVLBJhPmrpOElZvs5Zjgc/3LAonj80FQajwyUWEbF3HJN6Uk4bjLaiF'
'2nSDF1E+ght9O088p9OsZ7mjUQmYMY6KvT1WodqlQoVApq71LhUqBNqjNZB3qVAh0NODG9i5sY'
'tZxet62DW+ICY148jQu31toFv2jijMTGKyHevoEunpMNoc50mduOUWZ8gICOQP9Tog/nZB5eAu'
'lSQEgjv9l3sUlgveLmzqlT1WF2MSnkEp8xMgc65WHlWd4yGn4TFto1oVi2MWDkrVTG6zhqLSKb'
'BxZzQcrtFiDZ9xisTZMOPsruBLDKvV9D4+J1hrTPtCHYsz3IdUtODSO/L6COvfibA3Ylsi9NIC'
'7WLPEaNVqPNFt6Phs1BuNozXSD4+WzqXLFhTN65znOCJLecGUdX/Tdd8v+hh3HM6rA0WtYdF7W'
'E4vdkUnBZC6EjmXgB4UOT/TJo736puqxJ19OfckdqK05jtznFzlGk6MSkhZorOGRuMiqURXxUK'
'3NEmDinLM+pik6RzpJ6+mB+lgccd30VfnBZXlpWourc8chDG4aTOO1o3KUdexOUjY1Yz43Lvta'
'RdaWlWRIzLfY1YuoNl/HDIpdHlFqNoi8nXfPfjapJlHcZuG+gAb08F2i7VAd6eCrRdqgO8XQTa'
'ioLywe/0sIv4YHqTr4bNO2UWT5wW1uBmcRPXjpD+dia9I5J9I9bTIocC8234XvlT1zggDyBTvX'
'SXilwCwff6S57CuoJ39nBM9ys8GQVjBVy/XEka1Wg9bLSbtNCxIaYzriqhSnGFBNvDOCnQ6qzX'
'2zbbH/mgSh/fz1UP7L0tfprzyZkUI7LJopW0yrEZNZy078xiH07ad/bYEPFd6qQlEELE/9BIzu'
'7gDxgnxd+hcxa6BhfFZeFnePTD5gZ42R/p9d6XMikOthL34QrxF2iRazh0lEyO/JFURwL3lJia'
'duJYSfh+8tRgz1XYTfos0GdeY0bos5nfxFzwNcKs+0SSZqHHqjFrPNdp6RKMyP50s8uB8ySG0U'
'F2ZfwwrmNiX1p8JfMCY5Fo5A+y0rBbUGvMC7vUs0ygQY2j2KWeZQKh/OOUgnqC9wu1nRbzgq3Z'
'ZTDNehJxU3NR0RgbfbnGaJQdFly878+SBVy878+SBVy87xeyGFdQb/BoD7tSj7mXdSDQoVqx9z'
'zqSTcup8Nzvg037qPZb8ON+yi+vdcB5QFCVahfNtutEHyghyP2XqnbjZhAdS2j4Got+UurdV5G'
'Lq3D92yZ/ADR0JBmthLjZqESAmCruMpN1B29wHkTeZoljLQqX3XNmRwcxB/ITg4O4g9gcgMOKA'
'/QTtJUn6OgvuBDgtjbrEJ4RoxZE2c1kF+YW/aum1TDdAYBhyv3tt0BeQANOBiGw/VDguHXGwz7'
'wWOicFVS75LRXLkqDIMNhBgxmOsQ0d1QiBu7bSFu3rlTEzZksr3GWQ+H1dwrmsPwsWHa/ieGZC'
'1XK7RStM/krqpszU07Zp/Q+1gWvXDPPtZjE+Z2qXv2MdHUXplXWH/wKcHvt3PhbMRy1Sresto6'
'Rtwiv6yh12K7dutSOvQsRwK+IMDGoTgmSFNkx52zTNlRKxAy6pwHM5dSt1t1yDOpaCqHM3ZPpT'
'daC63C2yBGXJ+pktSIdBgdiTFmUNAfUgUDaWKVsp2SvRcD+TNy3ON8czp643TIMWUNRuGivgl2'
'bA96vqMQRWZOnLlRd6VuPy3lp7JLCR/Zp7CU+xxQHiCEUnzLEOm24AlZyr/yNl9KHK6+y2W0c+'
'agwFrqwUBou2plYinKfkr0EFzCbRpnnidXphNBYQeZHB8eCYdPsAqGs80RB2/bEJedxRv8ck9k'
'2Sf8ck/I5r5XQduDvxSB/qxwDo5UkRqwXdbsjVlp2fxGi2tXNOko20zYp6PTMV/YTuP4y+w44E'
'37yx4bILVLvWkEQuXC5yloIPh8D0d/3emMA0v21McyQGP5fHYs8GZ9vsfePbVLvVkE2hcc8OcV'
'tCP4El4bKp4N5+MOtiNrhaVrLyXtJfrZIL0FDFcsGYkNiu5gv/BIfSk7GnikvoTRHHRAeYAOBd'
'f7jxsmFQRfxXs3F/8ob/2M5bjK7kQ5vi7pjb+J8aCZXI6s5xAFSE1duOaGOxg7bJ7awJZ3Za9c'
'kpZFdAJIYcRqrvtsDVK3J9fQ4xMfXFWJXgtl00gSuSIRNWHo1Wo9otOIc6GfukobzRhufzW78p'
'RFxhoHIdzCinYkA9psUMFONtBgxB0w2z1K1aiSlmtVEwxwJCKf83qMf5XU1aajwJIEw7VAE1OT'
'E9lCmSWOp9LpXcKpr5IkbROKE4Vn52cmHKIIiCh4cYsOyAPI3HO1S71jBDoWnPDfABvD7uClvc'
'HTgp/tDbziK3rC2fbSPFEiV9jQIjftpaPzljg5HgrHTDYzKJWirqUEqQix1vlgP5KxPdEZeLzR'
'mOQSRRuOeqzc4ojqh6RbxclR2RBj9J9qJYHlAmDDNTcZl/l+tMn1fUN3zcycHZ8bixqNpIEadf'
'W1oXQB3WHAuQw7lx86H+b8PZqVdnPnnaafFX3dmAjRr2sbdi/9S6MhMHat7sRBETJTOKKTo+II'
'HlUVk/vSEW0yrY2z6hy1yAKgx0/rYczOzTznKAOTowszs3ywO+PDeLHCYwMfiDIMKcG1IqOVaG'
'20Ua9WSuujSxXZMf/WmDyon/TH6Kh+K8SnXPhazNY9Rdu4oO2MovUyWHX7YDrYnEa0XtrVYTp7'
'DeCySsHMNUBq892N0h+9XJGmzD9h8n1Fb4AqjvNSukS2htShQOZQ0nLdIOkukeoTfjhkV+NZlf'
'KzzWrg7yHdz/wVEjn8nYID8gAyiv5utWkSCIl+r/IU5gWvxnv7iz9komS1PpzmCnNw3RBwsMhm'
'qiEERRGptuWGO+UBat1PFQxrU7jkRNVmDPCbBtbuVvvnq7Nz8WSYfXRCSUF5gPaSdjatoFzwGr'
'y2t3hHOL/JHJCyBDvMFWdgPgGrw2uyAwG+XoOB7HRAeYCQpvpTfQTbE/wmmOTrCsQkv1Fwld+K'
'uauDf7EDyXVWRNYW59aGNZvZnHac/pyClrCvmWC1w2rN52hbtjwe1WiKZa5PZLTzw3rGoCNA1q'
'JV4wvJiJWEh5fTYMJw8gWzU3OTE0dGrHMPX9VOxAhYSzO2av9qThK386t63xvcuDpkcRdhAOty'
'S3YtNlo1jxe5tWr9jGScbACTcrAZzMgBctMyuWMq6mI1SQxLSYmGxEbL96r1OkeWRHpDnD2f0q'
'mh87jmOE4vmQJ1xonI2WIXpqfxp/qLa76EptATbWQ1Li2kjosGTSFTYDkyjqNFwf0i5xqqFFdY'
'sqhDJNWpXhtdqdsDhtqveJCLi9MXzp9fXMTprMX1sMKkAmlEc1rk2wRhKqlJ7WIJQBiGKtGs0O'
'Rw5URbK38SD7+U8G2DsKqlo03ce9C0clIMiczYasYrHNiBEPZMT4jpasG22UR+QksiYyorNaO/'
'mnH7zsARACq5sWCP8NLZ75XkPKodZNHB4WP+1mJmqbLCxtSjcu0HvTcKDjUKhjVqHj4dIOdzvh'
'VBWQnkJDLeLOJoD3Gr3+zl6ls7+CfE0W/1WrVvj3rDGLTdAXkADejBaY9KDgIhs+QuBXnB7/ay'
'Qfw6xDbGDl1G9nTLJvE9GvvYIS9N1wic4J6ucUDcubF371F2TyDYu1/rKSwXvLuX4yB/2usYgr'
'lZyOawSSFk4ocj5qSiG2Vx5vmTc/dMjk8szk8uXOD7hSd4/Ya4D6HTUT1Y868hJ21d3Cq6R7VQ'
'lRkxLKTvzs6LK5302jq9e1R6EAh1ej+UU1g+eG8vxzS+KyfzurTB4RDhXhiusgUdD0eH9MZ4rU'
'ZjLcucdoUMnyTMMC2H1aFLUxBa1HJJTHHujcTtMBhLZGyraSUpN86JGDxqU8N6rdfb2LOh2fZE'
'qfPGMWGDQkX/2frrG68f9bk6dVkr42SOSc4qwJvy3uwqwJvy3l57++ke9aYQ6KZgRCog7GFvyq'
'O9nEHx3k7qcirJgy/GXD8GN6yMaCDRBtpamByfm5i5b9ohL8sER8OU0obk9AcNCQGS5rQrZn4J'
'BFVTK3FeYaaCMHS03BQnmN5YiQPuKnWSONjAPauPZrEB/8ujwMYNDigPEHJfxhXUHfwRXjtcPB'
'ZOGGOvE+GmZ9H44ct8G1cr/1GW98DRQKBrNAp9jzoaCPRM2voPK6gn+LCodfeG5zQ7I3EvWGa8'
'HGYjuo1m0hUYCc/N3Dt7fnIBhK36ywis67D07g8nbWjE5qypx3y73wF5AG1TBXCP+iU+LArghw'
'zx9AYf6eUk33d5WhmrFa9wDqAWWNKJpKp/epyXojwVCePh+No6Fht1sJfXU/GntbKdkD4n9j5N'
'2qosO7aVMus/8KQu6wU5jpVGmINJfE6xgHubeTa7HJAH0G5Nq92jHhICIa327w0jKwSf6uWQ5r'
'/IpQZ8qFsSu5E6246p50B9wxq3JgSOWlpa0GDMNndjoyt62Y/vqjjGI6chY8fd4BcpoKYiwtqj'
'amUp1RUZPrpsbmg1oSbEZEhlO87OQrd4msuYRZvdxD9YgnEddp62lq228dKtptFzRUjaAAoJjD'
'P3jAoB+eG58YXJu2fm7l9cIIVzfmpyemFxYmZ60lkweH0Y8b0OyAPI1OHco14fAqEGxeNmwfqC'
'J3o5mvWPcuEUvJDm8845RVwJHEiZhIedKEA1Npji6auVxKrvKS937jvXACyVQ5uGE9rk4SlzY5'
'lv36MDf6W6SRhhxYY32k1nUTb5gslzFxamZqYZZbwT7LPpSeLWi1DiFZ32uKVxk+bGlagjxs9I'
'urIbcZNx6zNqYSjPLgq8YE9gUYoOKA8QbmV5qeElfvAk3ttRfNGGqv02cJh1gsyltTbkVqNFOH'
'XZXGrt8GwrL5bSG2I3ucLTDBAZiTycbgfkAWQq7+9Rl9eT4JID/tfMNPqDLwtLfMIjrZitoVqc'
'0uylehvhbll3yLhznDO1LHXyalLFMdGUvzOuQGzm1chN7JOLvC/HY5lnllrCMOVCSneQMCJIyk'
'QZ9ywvtYVlgK0QZdJZiIP0bHyFwQTSsXjeLqgboH5HhMDVRKBBvTF+j7qaCHQD8dP/bLbntuBr'
'opJ827Phgxm3UDg1kTjbT/mtur/da4czWqHEVGXvM27GuHgvUhGFxLgOV5MUbNXLC/UbGpGfaT'
'gm+Y428k84pS2+Oixn9GGJ6BUHbaUZDrsMR717esYfJtF9o0bZlOUGc1lv67HdnHK30Up8LbsS'
'cF59LbsScF59DSvxdAeUBwiJcS9W0PbgG6xFFb9PBkIHdeEBNveFz0BpzAYzJ437de2+TLTfhc'
'ttj7q6vpGapfaoq+sbvdbVtUddXd/oZVfXtIIGgm9a+5iaGJRoNrqFNSIwLXFnYnSdgcDP9c3s'
'QODn+mavdX/vUT8XgXD10G8aQt4R/EiBHfuvy4XjJZzgVfNQ2U8r20nGjgjZYGOUm0hiE7tHZD'
'tiom467SUmWSqNRvVTQWtlgaqPwoCMbWljuq6uHDHeeqli6vVUJNVk3SFy/kqpTipf0qjrYbG5'
'vqhqBXHgJl8AEuuVuGoLOu6gegehmlFWcEAeQKba8x514hEIV8y9y6A6CF5aYP/mF4Rn2JtA4M'
'pgzDcqNUgNFFuZzdRpdkNEy2mVGeDhIojHlnOREjy26EvFKcxjBQy8mbVRs2P9lPeL5s63eoiz'
'agi1I5LW0JGsaptmOHDItsbEtNoNX7yNeiYj7nCHbEoTd8c7UAdzGGQELl5ZqiAmXgIzSZAzx5'
'CrZQ024SNj3LkgDyATOrxHfWQEghv35UbU7QxeUdBaRfMo/UxYL5nIM3M0Mvdu2loZrkyGg77R'
'1jsaklRmhyt1XhyhYZ79iBPgbY6jfNtnOuqdNBEe0E4H5AE0qLWKBJQHCLWKfkBBg8FrCuwbeB'
'6ur+CwhfhhGPHr5Tgb2XWvOE6t21LTM0aPszqWJi774pbl0xcWaThxAiI4ACEd9iDM71maRwbW'
'awq2MKqA8gDtCfb59ytoV/DzBT523JPW59IUvFQGZmpgcWktaSK1tTKltZwx7SJUcud9DsgDyH'
'ewu4vGRCBkFd6ooN3BI3jthuIBW7RevtdR7IXbIkC9YGtLCcgDaEdwnQPKA3Q9Sao7FLQneG2B'
'4wfoAGyCtbjkQ1x2A2MzkXrOd/fQd1+b/S4siK/Fdw86oDxAiBRA3MLe4JcKwdOCX4e745yNOn'
'd4Skfx90ucZ2YrdZfbzY4yGTBc7qXPUreId/+cx79huXxzgcPt/kSO1Ny3cavbCsDZL/LH+G4X'
'jRTqvMvlSje5ZOJxwEaIhyTtJck3kZtHlbKxjKYkIA9CzWQ2T6PsGx1LBy2hc4r/verEe3NK8H'
'vVFPvmlMnvVVPsmwtcs+/fKMgL3lLgGj9TkoMoyDeFs27qVJzd4aWnh5Ia0h2K36vmWe59uwPi'
'Dw4oUexV8+xbClx+Z11BueBtQvGrVle9Mjm4975JgYa2eIloH18ECt1qYFJd1wgSZ8ww3r0t5d'
'x71Rr7toKtDbZXrbFvA+c+5IDyAA3RnkLJ2X3Bb4K2PwjavokOxGlMt/ByM1Rx6jgiBDS8D8b3'
'AlfJfDH/BAn/jqg9pDZyHUxzu3ZaLcbGz9bCGy7MT5grIa1e7iZLi7XU77SUanHNhDQPs5D7lL'
'r4870OyAPIpJXsU+oiEIIVv5xTmBe8E++NFR/P6bitEGOXTOYOy1S3paNkSWUYPadd0+IrvnS3'
'mypTxqAJq0rSbmptE9aslHePNRxVQ+qH2pcqNfVLIfOvydFr2vHhI87tK+ZuLHNjVr25Yi/70N'
'uy5F9zRdYZvjwrvSfLVhoxfyzSXI/iWj/IwqPuGBrrt1fvOH38+O3lZumO8nJ0/GTp1OlTN5eP'
'n4xuue3UbUtLJ5fj6Njp0ydP3Xb6lvLp26K4fOupdJmw3xjdRQfEK3BNcMQB5QEaCUb9uoJywX'
'tklb43nDHLY0LPNyyTvfrK1AOU0gIgHD2ySFtfk4E6Dbr7dEu9JztS0Mp7siPFlnqPjPRlnsLy'
'we/hveuKL85IYXj3WKdZtPSl3o0xSR3NFAvilnZgY5kjVWoR8rd29JgRwkXA49nlgDyAdmuC/T'
'51ERAIleN+zsyjK3if8N0f8zZMxOhhVzUX0/ipTMfxLZixwsz/vuyMYOZ/H2Z00AHlAQLPLimo'
'O3i0wDaauX9GfMMR8Gh2dHAEPFqw9uR96gggEOzJFQX1BB8osF/sBf/sGOwxH9vrgDyA9qnbaJ'
'9a/gkEt9HNBNoffBji4uMQF09PN2Flg9xgVKqY2A+HAnjwAX+Uf0JM/EmBnS0HO5wtldSzomPl'
'5jTWP0n34X5l7H9SsI6V/crYCQTHyk0K8oKPyDGlGJ6Tm0WalTU23GzyFb5yM121/cqXPoJVu8'
'4B5QG6nhbyFgXlgsfkEHFDKv9XrMzX2W38Ht+umSqj+5W7PAZldJcDygOEKKBjCsoH/73AObaH'
'5A4Ua86UMqqbfAoMgN/Z7YA8gExC6H5lAARCQugRBXUFH2MaKe7nO5xAfwgp3ewb2JIfS9Wo/b'
'olPwY1aqcDygMEV9Ip+nEg+CQI6i9AUM+8HEE5hz7Q1AHq+ZOgqYNMUweYpj59tTR1QGnq0ylN'
'HVCa+nRKUweUpj4tNDWsIC/4jKBkL6Ok3dgMIQeUnj6TIuSA0tNnUoQcUHr6jCDkhIJywedkfa'
'/PrG+7tsUKH1Bi+ly6wgeUmD6XrvABJabPyQqPKSgfPCGS6trQlgEm1TmOai4vTD8EUnoiPWof'
'UFJ6omCLtRxQUnpCZMmzCFQMPo9l/iKWeeRyy1zK7FJd7CJCy4WB7OCfWOwv8GLzB4u6nF9Il7'
'Ooy/mFdDmLupxfkOVEkMc1wZcxrP+BYZ263LC22tY6wGtg3C5wVcxR/okBfuVqqfEaHf5X0uFf'
'o8P/Sjr8a3T4X5HhzxDoYPC3GP7XMPzvudzwhYrOSZlfS0TNlKJ0Hgfpk39bYHfNKP/EPL56tf'
'M4qPP4ajqPgzqPr6bzOKjz+KrMY4JA1wZfxzx+uI/mcfKyy5BhQU3dezr4a1E2pcDVCEb5Jwb/'
'j1c7+Gt18P+YDv5aHfw/poO/Vgf/jzL4dQV5wbcLbCxf5SHCr1DJHP9qfMOTFN/RA6A4omgSUp'
'BbKV/cOmUTMp/I5QdyD1Va/MoZM5jMt9PteK0ymW8XbHLjtcpkCITkxgcUlAu+I1bT81uM2bpL'
'zLg4V7NcKSMFTYy+HAwDH0qCuzhjF5fgR9/Jjgt4+g7GtccB5QFC0NUZAl0X/Ggfrr8EIdx4WT'
'ah/InUTaNlXEe909s4jI7yTyz/j/dd5fJfp8vPLxQdkAeQWf7rdPkJhOX/MWjJh4KfxpgfwZhb'
'GR0ZN5uwcxW6kJkMrKV86RKeLKhQyz5MteiN53FVUZud8vAQqoD3WR3rEM/+ZVc7+0M6+5elsz'
'+ks39ZOvtDOvuX9VniP8SL+qq+/wPEf0iJnz++0wHxeAzxH1LiJ5Ah/kNM/K/u+2ci/kNK/K/O'
'jovD0Pss8R9S4icQiB8XEYXBa0FIvwJCeq5rVZf0dnURVwhv7pDNQHEb2sNsaZk6OpPNUgV9hL'
'Byyu7YwT9BH6/rs+p/qOYTBvU6IA+ggk4kVAogEPQV05MXvB6vXWfbIL799dmePGlVUB0h1HUh'
'EHQErqkT8sK8Ce8NFv9Wi+ostTFx4//Qq3ajxFwMPMzJS61o1NwBjQoxmSAMPuJX1Txn7/NGDh'
'FfOqo+CiDN1uB169gnRKiWHI4fO/aQD+fbSjwWzmsMgjpvqlFzJb6ecy4QBgvT41Hco5kc5Sds'
'4jFx0nUuzO2zA78ai9tYyYeRQOh7UxZ9QPKbgL7tDigPEJLF3ymHybeBft4H+nlzTjgPjoPnTI'
'xIhxHtFifuQONIKppjl4ZsmUQiieUWN6LT+LA5ad47Pv9c3ifZkB2GH9FqbX6mBMdKPRxG4Paw'
'E7kt3ZigltvdYM4REzOE+vqcxYtMc1ilOVp9xNYoVbMPvPscYeOcjmVXmNKZHB6Pa3GgvhLqum'
'krXM8/sTfeDvw/sxgQD76YlrYZk/tf8ro1uNH1DigH0A3BM/xzCvKC30KbA8UT4dkKsu90GCbi'
'aF4ds1zFJI6Y6GT2zqc8081OB5QDaHew359UUC54h/C0k2kkmhtoJG5A8c3yZVTibaxo4SzTM8'
'bMHQ06IO57D+3dYQXlg9+Vj+3NfEytvE53QO7vZrvDZVy/K919MaewruC/Mi6LH0NpUOKpbLrn'
'hAUmOvFEtGumvKcAb+c8SBs8x7kRaf6Jz1b/Sg33SFXbyGNVD+96pjygm5aXUpUkTazFuLCrkq'
'yplZq3eyIJj344ZKL9hjKecI7W41DfJfcmQpPzoH43FPNMw/nQPRvA0xDcjjSi8shmhdO4mtmI'
'hBOLqVx6TviuRkzE7GODfZzJGdcHHFAOoIN0UDymoO7g92U9Djl1CRB0oCFHGmnp9AsD1+9n+8'
'X1VL8v/d6soJ7gD6TfobRfkqqsI3A1Dc4PrZOUdbqGXeoPsl33UNd/IF2/zOOiH38E5vdZn5jf'
'Dzg7q4PnOZeLGlak5CW8YUFjsjigx423S4MpwqVKa039Fxt4rHIU4PiPwFEG/GfyT3CUDwoj2G'
'M5SobN7jTt6FVuudsB5QDCBQuf6VKYF3yCGULx17syGzAtm6hb3Tr4nJBOLYDZjKOGG9vOdaqn'
'9LpVGpdGTBly9jUe0VS3tH2kHJZrLEo4d6OiKmdcpRYxH9e4ena9ZYk0vHdqfp5jmJ1auVFnec'
'SOHASna5C7U/ogSUMZx9JWnBEll8REpjxmWo9Gi0ev1G3IQVqpXj+YxkdKnMK6uesHM0KSZPJQ'
'Ra5BkbhXZ/jh4SSOxTkqSTZcLehIykySSqutue82AELITmKXsQpMfyb/SR5GUrxTB6RJRXq1IB'
'Oyr3nXvD6O6/nEdWJXVqJPO2bJeyYlQ89QWJ8DygG0LQj8L+UUlgs+K2Lg7bnLk6HwQnNrsAkM'
'RhiFrf9pAox9DXrjRRUt2wRNRysIT22ljHXrjla5VIrvZDKsSxbWBj6eoQJOJeNkSoPxynLmS2'
'kVMD/NoQNzhuvcqUbLfRhXpgRtcT3pmIv3dZSHH9n05tRbbj55Il0R7PnPpoK0S+XyZ0WQPu4p'
'LB98QRjDf/C2WJFURrNqZ6LekrRuHQebOeFTkvfWxPZfiyXdr14tu2/5aZDtRufIwvjcAvP1DY'
'8IxZvRpKtUOjQJVeILWZqEKvEFoclfMRjoCv6qjyO8KxkEuLHDkQmVtaUcOIJCFAAEesn1X1zt'
'WsrbISjbhDJyKkrsXsZyFct5622nbr3l1nQyEBE8zv0OKAfQNcEh/1fzCusO/q6PHRhfEo2+Iz'
'NHE21tHFjFKZM7Fs5IRBC31FtJnEC+al2LcQmrkUQysag5Jf9dD1RFL+/z3YwgiSB0Yy2YfHD9'
'WqKywMRTYY+VVutSuTVp1Ru2HrGqxL5KkchR2lkaxVw1orXqJru0a9U4cRmgXlKvATe+CYjSOZ'
'lD3SUtsi75ynTy68hnvarVPH385pPHnc0J5YcX6hoHlAPoOrkYT2A9wTf6ODrip7LsUnC/yY0D'
'2fhAFTBWAD/EKSFNU/1ba3qwnacpCVOEgjEpBOCkaJoQAcnSNIEDdRMmwHEB+Ge0sT62UmnZiI'
'HNggWcGwNWmlEJgeajZvg0kMXooWiURjtKew/gUSIr/I2Rj5Yhr67mpgxcIn3qWIpt6IOMyO0O'
'KAdQQGrT2w22e4PvsCLFtoTM3hnZsHFM+q8lGmwMZytp7q5v5DK0F+v21uUjIc8XdEo9klYrEo'
'5iQgg1/WAlsfH6JgNgweZOy+UccQt3/0ipjegSbRol9Wy9Wil/QrKR0LTCrIuGV7t6AdNJw8h4'
'YoRtc0A5gOCOfKdhr4Xgx332F/1Q5mRgRTJmucqFXtLDlO5M7FkTrOfcS2Zi7J0cGEPqZU6Ubt'
'pLH2CmkaovJDee0Sk3NrE9OOIDCUI89L0OKAfQgeBa/2cM1fQFL/V5j37cy05Q6psa7mvyrkQX'
'kSVjtunwOxN55m+4DC+bXgN+B+EUVSVNi/DXLPNlloal6dFj4sKCHx6eiLlMywWuz7xAW59rOo'
'4jbK+02qHS1GKrROkx8uqUj2MnTh9zpBXyeBgt2x1QDiDsuE8Z2vCDV/p8yHmDgzubqewQiChN'
'GNhDcdwwFswaSgaYOlDGkkMyC5dMbsoJtTvi/qsIMyvR7liVEuEpyZh+L3JMtiGeq+Q6t9yc4g'
'BJQDy9nQ4oBxCsMC8zErs/+Dk02lX8SG7jBoFBQpKHTaSS7nuxQsYP64YxymxTkuWIB+gNmVAj'
'pVa8kdq2vgTbxUS/NnYJe/iTwH6w7xB1fUUsb/iWDGJ8dgoosao3R3YyB4TpU0JBYynKxHOhIx'
'Xu3tb4Pvxtr78UaQ7BXeeqFUmz3TDhcb6ZOmtZyP1c51oxjXrmVjlWDfjS0KtftNvSRUPaEa9H'
'vwPKATRAivTPm02/LXijLNqvXdWmd4+lG6LGtcLWsRA3DpkVMdbDjrSOtGBqNgGeQ2ozZ55MOq'
'Wc5W6mTa/2bOWBcg26FKhQk61jBEJk20ammU0Y7GSaSBZ6YxZ/2wh/bxT8PWk2/vbgLYK/P+/A'
'H68eHVUlEE9maKpOS4GMmt95oYCN9l/US4g22DCUiaBYhUTP6p5qda5K5NadMlWujaISOopKyM'
'qIYc+k1fDVoWmClU4TYcFZbGwnbLxFsPEuQ00DwW/47BH7loMNzIWD9A3zqmnhN1G7TVEkc18V'
'p1ThtirePuYyPEJRpV7eeAOMg71K/V8L2q52u55KEYzkKsbdLgeUA2hvUPQ/bMhtR/AONAqK7/'
'E257FrUU1CvXWGhukNS/XZYTC5/9PosTNEmhPPp+CAcgD1Bzv8XzBUFQTv9jnK7NPOpKFpudQg'
'lkVJBUp9YMv1dk1oz4mP5WKFlwlmzF6zqBHaehM2yLNUaZTNT76WXW6Q1gtv4GFD4XQ1CKalNN'
'wL9VIbN3Tl+OIm2snVSGbkLDFuAgeUA2hXsM//tKGancF7WXwX3+RljM+uS5dt0KYOTwfLZWRr'
'+qPm6fvhcrtWsjcW0iQrtAKxFM9zzqqSHyQifOMtYjGsiKkKmYxWNlVvs+nbnZwa+U48vz4HlA'
'MI1pEvGiQMBo8KEj6WtQ+xk95cDldbR0bm7dA20kpZyraqsJAhKzdqRCW9c6XTwCwyb5UOMGzs'
'c7SBhLsyFV3EyBDJxZJxDfZXX+49YCd8540/TvLAskZQdFp7nW2FTKpHswgZJIQ8KghZU9Cu4A'
'M+O5z/bYoOO2d1R7gMRRjIVjxFC8HgRKMJsjCxO4NCKhV/0AXlANpOeuW7uxW2O/ioHEL+Q7dz'
'62iU6NA098yYKM0gXfsj7UhsoRGbXKd+UVs2w6afZm/FMsu6AeXCTXybUm02ib5HS1BP2O2UtF'
'H+K7YZ/E4VHdlw5q4tWxPL5j869nDJd4qcnMDOOmCunb2y7KcjPwwKOiL1lNfqkiLCBbFSgoHd'
'aZFWatGdLl9J2KqLBHB9yc4xxdjo2R9gXRJ80ZBjVx4L745rcZMJhrhz9JCt9mpNWr5b2SshGY'
'8i6PW0gotj2jfXp8mCVNhwifIeUaWshep8seMZ5CWKPfHTyT1mcnrQwrG03UZb9fooaeS4v4WY'
'GBsUIoys7OvtaKRaYqkjTaEw6YKb7keHwpHH99HsUXE3UfhH5aj4GcOH9gSPi8b4Ri9L4SLBjC'
'DX27psSRHU/6s34Ua1K5e0m6VVDrEQSwqKIJpDEny4rEDb+zC5fi0fEOm95UgXupOpcXRGSzu7'
'apl0yrGmIK3w8ayquIfQ8Lioig8raG/wKbQ5WLwjw4wvhwPine3l5UqJz07f3663oquXmc6JHv'
'mH/O3dDigH0L7gGv+DZpn2BZ/xOd/y5d6m/NHZJVdt9NE9tKXRJzI9ogLFFsafp6AlIFuNJ1N0'
'QDmArg2u99sEuD74Cz94WvB5OJHjp3wrQHohMjVqcD0k09Vco5SY3s4tm/Dm62lg9GEErt7JP+'
'EtflKk0tHwrmq0Iute5gJ/oqm6Q+EgD2xLE4J2vQZzcR89DsgDqFdjiK7XYK4nfY4hQjz4UPAl'
'IODvgYDr0gTbuQ0JtjryIVTZ9jnu+Xv4J0b+ZZFcY+n7MM4gm1EKvoZDRoeKGpWxUnOsHF8cko'
'EP6cC/bLRhA/IAMiVJh3TgX/Y5ie9ncgrzgq+J9enf5zji2a2eRWzUWcaL9ZLRFnRIKSzhVLdR'
'YyUfZVO3W1d4FHWsl+J4+fgQ7piRAlSXJxaW3Kzi8DJtSE/W2zJTJsiHfynoWFa/HN9xlziDlw'
'q7OkfSsFYqNeOUHEvRiVC8r2XR6f1/7T1LcFtHcgJpytSTbMOMk1XRlj2BTBlYAQ8/Ul6RordA'
'EiSfRBFcfMRIKsUACZDCGsRDAFAUraj2sLW7h61KlXPawx62ctjD5paq5L73VOWWa3LMMZVKqn'
'JM/2bee/hQ4uayB6ok+6HfTE93z7yZ7p6eHpaUJUcFIhKK99/46V/fuwy93Hez1k9vWZ/yDkGS'
'IHvHB0l9KNEmyMwHkudRv48sWtM61HTmuvWuJCy/HlKh6GRR/5z5yJpq19pu7/oEwKeK/GPlpy'
'HrT0C69gDOlfc0xh2E7ISepKXEoduqtQ/poKMhsH/aafSStOIZYjt7/xsK/d3E5MbOyu8mPt3g'
'yjtSw94FZe4Bli9j1fs/+9yaDn8K30AvHLL++dr0Nfoxk/n9NUVV9t2WWqGQxJ5KKEYGY4s333'
'Dmw134Q9qFP8LsMaswA7AjP5NK/UAqKKe9byty3khmaoxR6ZKmYiYy+CoaLfzeeoGszUJEguMi'
'e0kYPMUGruOU7lenSEDFjBZ73NshyF6zXeMrjY567JLDqVVcc5Y6cusmD3+c78BDNbEvnz07r0'
'3KDHPfJU6D9SbrHBQLcdToc/pspb4/QBiFFgpF7CjDc+pg9tTEPubkY/taYhZdDrCvMybqEE5/'
'i7JH6ZED7VHqeVy0xxABjflkoYnw3xVNdFgeIf8vOqyR1r9OWcyG0hG6N+nKOiNqnfvXUn7qDV'
'PbsgeIiPU04B9bbdd7R3JvYtQNZVJHVC7mMWI3pU6nB6osQCmoBYigoGyWSb8XuIjEksAV96BP'
'OxTauWsumyNzj3Ji90ErplHUM3dAbTolVSqsl3dzxbyC551i4ZGzll9TK4/hZR5M5p3HRWdjs6'
'w2C1tr+WJJ5bbXALpdLjorlXKhWLJUJFeCqhF6k9t+jCoz2AElVSgqBwxuB7ABejCTy06+FFfO'
'9upWhVMmAgawGsqW2nIeOmCXq3IhTs0O11OFdfUwX1zdhJ+5FWfLKT+mBted8jY2tl4o4rb3Tq'
'5YdlYrW7mi2qkUdwqlvELO1pzS6lYOE3+CerMNbSow3bfLqrSZ29oKMmqpwu52vojU+9lUK3mg'
'MreylcemiM81MAxWy8iQ97QKwgMCt+Jg4OzkVx14QocusJMrPo4L0lL+RxUoBS/VWu5hbgO4i7'
'5JKtAxq5Vi/iFSDaIoVVZKZadcKefVRqGwRsIu5YuPnNV8aUltFUoksEopD4Ss5co5ahpwgLjg'
'PTyvVEoOCY6224uVHfQ4x6CXd9GpoVZzUHeNJFzYRm5xrOQLxceIFuVAPRBXu5t5gBdRqCStHI'
'qhBFJbLfuLQYMgRNwrM3yq7fzGlrOR3wZrEV4XEM2uU8rHoMMcDEJDnCjm3Rw0WiGusaOALouf'
'fUM3Tv2pnHWVW3vkIOVSGkZAyZHhQmJb3RSZw/DH3UYFq8l1eMIMDJfCSwicnpNnhN6Ep88I+p'
'k8I/RzeNok6FV5RugcPMUJGpJnhN6CJ2rMPOPTF/AUIaglzwiNwtOfE/Rzef5NGDSCS+EDXgFn'
'vwvDIDenR3zBlDXF0TRxddB82agnWo32IcxYvU6ND5mga98Up3xyrA8f8zUzogpwfjnKmaDXB/'
'0CLzwEtUAuM6fsGDwFSt7LgXtram20CIGKeq2rb6JEl5vcDhKp104j5IyMHMEk+Dyi0fgsE8yA'
'C+bwUYdnbFnqdMTovrnR1OI87aa0XBhKCr8RlYTJasdGvc4RkDpFBEqE/Bk1DxEozKQ6IKIOzL'
'0vm0fsyrmdSKfiqVQKDJ0a3417U+XZL9vDWFXJdpxeBG3jqIM5/w0ZnFTCTy4tgJ1e47ju0ips'
'y2rt8cN5FJeVbdtLg+8wDsf/xjRk0lXIW35t9EXdrcuIwfySvMD699JAJRoAUoWfdQX6pRsB7T'
's61NA9lVK3bg3i+kqlYuqVvj5jqNLtZZVeGnorTS/j6Q/5I4VeqwbezT2SgK9GEnDvbAISZxBw'
'exQBvu7PeN3v9Rf1v/fzttdh5x8FY/t6/BjhV/4uXw52OVA0KIQlr5IeAL5O91cYGgVenaCcA2'
'POL2Kvwkjpet3rFfzKX3BMG7dHtzFyCPl6MDvuA8a9QQq9pD1EPO5EkbunoHm1dc/1UejDBaMw'
'8fWWs3EFxikg7i2nU7HgZwbVdGvRgVf2OrReNqj69RjNPfdLsEY/hDkKo+wooJYgbPKwS8jQjw'
'YZOyj4eBYtA3L9ChR5rveF8USPu4e3wMR9qa6kIN2OpTNLofPrpYr0IioqLhEsIqKnZHcWpz9t'
'7GMolN7x8u7SlqJiBHmrDO7hewfMLLMu4fa5vnkrsCWWNasVYUoFcIkDEtP5at6bAUGhKCLZXi'
'Qut4ZlA4ti2odMcNGZSI/EUdhsPbrSiBfxDGAFA62JByr9eN8WLeCEwWZZ11BDAJXjgLIq/VuI'
'fqLj58dhyV1W4kBb3bJOZuvTDSRJGebTwwGeyKYX4gtf3sFVDv9ZuB7fHgByiDEd4zFX+zboIu'
'3jtmQq2+fPhy2VRUvdSSERSRj7YDPBD3hIPu/Cc2ZePe8m4duA5+ydBTuzoPBDSeIKCyD6THm5'
'taywZnB6ilh81wcJAWQ6/IEPMgkQTFz280kBhcJd8uT9z4SWS0DhMefRAhqPT+Hxi8/y5Ke/MF'
'BwOAq2xodUNLZuQP/Sd7yAZKvSLVVJ+cZXznfcXpPiRcjfelij5yoRJAV58OupoUek+BqULK5x'
'Si7eTnzb6LoS5qKPuwWw6Qt8LcMeWrGoU+rg3wE6B0fM3bt34/KPR4sP4BspXvehI6wb6L4Q9Q'
'0eqfQgkwAJhz80XrDffm59MugF4wjMcT6wX4WsyyUqMbNkXeY0etdDajJ6NXNz0Lllc0F7nUrl'
'wTo/LUqV2R9ZV33gmbA1+U3jlFxpV4r4OBO3pkjA5Ea7mvmzIeSP8G2RCy1O/CAU+YcJa4qAQJ'
'nVPm61vmYEiPT9zOwQgm0oQuU3LxWvtPWPmZvWNZ5Nv/baD0GRqww1hXj2lkKTSDgWYigXAktn'
'z3U1Ge9AkWlsCmFc4B5hARFJkSli9Xtj5Cjo4clwid4aqXuZ6g5zian7DJct/WPlsvXON7DARJ'
'asK6bEjG1dZnVfenSc0KXU9z+2rhghzrxvWXTtzqPcViUfvrTyk9EOz6vMjHZ3Jt/S3cmMn8vZ'
'+ftPxdn5lxfOzgtn54Wz88LZeeHsvHB2/jE4O9d9zs71Nzg74z5nZ/xczs5/JB09nIEfC5g0Y1'
'JVee2tDrg69ZGbOq99tLjH9f14km6e1TbL3OzYofMrpzApyT2QuFhLGgnfHc64qB9jsuC4aRyN'
'NJyJMQXOcafjdiW/LGjWrJcb4thKD1qlGCqMsakcaG/Qsx/0fsmww95Pz0KQtBpshHOAex2Xq5'
'YYcLX+QLu0cHpxsH33sMFnDM29lbR+ax6MHaBJ0iefyNwcQI1lTV8AoVRGSPMs0AxdQ3vPGKDz'
'dOwzriqwIvCFT9gNeM/iuG7wW3eXqf5HAetuPvyn4Y8D1t086Eif4Yk+PMC5DCPnIYycf5pQVd'
'LxBgbOmHZ96WXQ9m6y2FDBJvOJtOi48ZPE6biqixmv4nSJgL6UXfpRRiMf+9YLth5mOVnY2PVh'
'zh2Avd/YF783herp9AaoxmEKnVobM8nX9nrk+RYHu7zwhaLgmRVMv/LGvhTp6K7UxyGwJ0MkSD'
'TDbtGvaejJH0JPbIUnZz8irKiHG/Klz6gc1Pwh1LxGOehDNAZy4XfCN2ZnQRPy9QOKdqA29ziW'
'fs8HCQHkfZizPMgkQD4Of2LFBRIKr0KdT2Y/CbZQd48xLm6wDTQ6sbwfghiuhr/ng0wCZBbGmW'
'5jIpwf1YZ4wwbbwEPC+UAbSGU+0AaeC89TGwmBTIY3oM712RvBNmSgDTUyCY1ghWkfJAQQvK/Y'
'gyBSnag0REexHWJEDTGi59PBdjCJqBNgBr8zJ8AMphB1iBndzlT4AXX7QDswEBu0uyPDz9cOZu'
'h9EOh8PML7IND5mFLpAXX+32M43KVwCT73R/C5/zqkqsasq/LODwa9txp9THgBH3BDuz9d72sQ'
'N6Qeid6ZZ/1xkK/0uC3q49nfU7B5KlZF3FX6qKZoKJcoAckN+oWfRoXivt9T28GvYcrMdhU6/O'
'lBJgDyQTjMee4mwk/ZUpx9oarGIhbuT7oYKor5MOikgU/0cq/AgTfjns1YEDMVq3W7tVOZLHBo'
'P6V8ZMvTnALhUvgZRVAmsO8H2nzDvE/VYZA+o3hsD3IZIFeBbw8SAsiHMgQnZF54hkPQOIr+K2'
'19Nugo6putvTG+oiXritk3OXfA1M/GBEy9b1BqF0LmLV0Iht5zeRH+M2ldIcfB34Yu3AgXboQL'
'N8KFG+HCjXDhRvhjcCM4PjeC8wY3QtLnRkiey43w7zfIjfCLkKyBs/9yA4a5FxARMAk7blPS4P'
'Lp56EoJoJ/SwZZl/PkWiayKR7cRz9POJU1eneRDX1GpMOhmnwUEm27jgt2KixglfKqOmrW2zS1'
'u21L3cfzcLAepOMqfffLVFzP2DD/tfAmtH210aUkobW2F5fFZi8mHmjX5VTaiFKYggumyTodJ6'
'UNWU40SGu/hDHQasu7u8QfHjG31RYmOzMsQ4lI74gSFUVg7tUHDTHXXMcyWUDpOBMef6PEKXKR'
'HiwtHVxkeWU/7vH9kE8z8wmYt6FXmm1ACzgQ+7Po2doH9meSSsa08tvVcV60lYlb6wn6W06lFu'
'nvE2T9LvxJpDOJbLqcyS4u3IW/9l3954mtVk4pLg5WJ7lySFgk7KCuYO4mvG5Ijm/xKTtg+kWj'
'2+f+5dVJPS2ur1oqm83e9Xg5OTmxm43+ASmL3YN9/Icl7P7LfoziTMQcfat4tIGAJPjinb9QVZ'
'RMNFYdjj0yeqgEyXh6dK/R/1o6OErVcUMtFhtZjsZ7NBVbeqsgKaHpENN9HTXcg3rt1EebuFrw'
'1QtMiPZCWgwUv9V/EVdE0NIfytILu/8Cf53FERcCHWRfIhUCHGbHcrjbbGczqrrR6JdOe/0GBf'
'bkeuvNVqMc7Ih1ZytfhoVYHfSFjHF1bh30NaUVWKTuzAPB+99gyFY0GmVI7KBv1082YeJYg0GD'
'tWLq3j2VzcTUXyt6t+We6FdabskkTKBAb9096RFK/FiAVX+EjW0K8CyVvjP8GRlsWD19Z35+/s'
'vsnZQ3bfBpXVVpN19qLDCZDWKx/7DOjDL/IAoWStJEf8XADPKR84YRjHhQXBrPnA8PDYBYYADM'
'jx0A92svaqrKHWlLPkAs8rDZAgXdNwAot+QRQaErx1c4Y5hDPS+wtd04WTlutkAljsaQsZJISJ'
'pgwcR0wJxSWGabeYe5GDmXksx6TEfoIOv2HmImWjwZLLxBBg5d5N63wYj1sS1QWCNOgPxAmTM5'
'9Qh/M8uAzYZJI4+DjWHRmI/zIPdSGH9Ex3B6Zyyn0l9azxgMFxzVUTr+zxuF8OGvev0efev4P5'
'+czggA1KkzcAE61/rDTck5aLkB2dIha9BY5BXqDa8TryjYG/4P0/Pr8itcvF8vvgIdAv4Ln+nr'
'p/YrVJfwk3397EnEktBDrs35eU4wAasXHse6wAFqAfXmId6VydF70lJcUVOg0XNj8Btb41g8ap'
'L0EoyOSnQ4KhyX7RNXY8MU4KyTeeGHli9k0WwcHLqUkRLUBF012rQbtgDTo7W9WNwK3OjJLUWe'
'RHRYZfCQKY4D0kSjEVAAI7GlANRihfGvjpt4+QHvNPTdLA+GHhnnzW+9o9giSvSyoDYZpdTc3B'
'rubllIhtzJ3cEU/joJ1MBQknA1X1OdWtd34htDyvQR9do+Zd3Yc/vPqU2TB9Pw0BuiQ1GQ4gF8'
'g7GhHCiRTCr9Ja4O6YVyKr2YTS2mF+xUGsTHoxsWGfxtlpdOrQd6N5Wk9t22pzcvxBVik4y0OC'
'2VaNOO0xX7VbWawuVRdr9Yy6OULjTYeTz25Y5v1J/r8D31XadUKNFHFo2NUFDtI/dbmFFr9HU1'
'2olKCRM59pK7jb2kR0qy2JCjD8mNlrtXa31dIBp6SSQo6WskRk6s526d9jV5puENLCZJVU1ksn'
'6oaoZkF0y4xYMRo1gEpqowaxxQVR9HQLXd4ZkNeckkW809TA9Aarf9vH/UuklPum6MnC+WGci6'
'EXTFqC/mHifmjhJz9fLc5uLcw8W5kj138OQLMCya3zROmj3aXaW+8noJxjNju+/W+TDHFz2gFU'
'SjlZp1nqzq8hMWnGdRdlnKPPdjqEnU40OC7IVap0kdoqFsRTCtyWHcxKduYC6zBn8tFUNBunvk'
'KqwJn5gDAXMc0B3jB6C+tmmnAodQ4ICIT/4U8/ue7LiCjYu+9994Qb+/DFHU79+E/FsvvshfHP'
'ckaOjFfb+qZY3WtYJhnmNsI2uUcfQkEPb5oS9ql2h81wcKIWhabhzWO7sAwsDd/9C8hcLfhShy'
'919DeMw7YcJizxe/a6ttqWiMTjkxxLf/GmTkVeWsfpSNqO1vk08QcEVJBchGO3QSGsvaozAoPz'
'EkvVDZUTLCXcrvgjIKMfv6vhEdHAsgX3Ts/wGm6OJR')))
_INDEX = {
f.name: {
'descriptor': f,
'services': {s.name: s for s in f.service},
}
for f in FILE_DESCRIPTOR_SET.file
}
BotAPIServiceDescription = {
'file_descriptor_set': FILE_DESCRIPTOR_SET,
'file_descriptor': _INDEX[u'proto/api/swarming.proto']['descriptor'],
'service_descriptor': _INDEX[u'proto/api/swarming.proto']['services'][u'BotAPI'],
}
| |
#!/usr/bin/env python
'''
SVM and KNearest digit recognition.
Sample loads a dataset of handwritten digits from '../data/digits.png'.
Then it trains a SVM and KNearest classifiers on it and evaluates
their accuracy.
Following preprocessing is applied to the dataset:
- Moment-based image deskew (see deskew())
- Digit images are split into 4 10x10 cells and 16-bin
histogram of oriented gradients is computed for each
cell
- Transform histograms to space with Hellinger metric (see [1] (RootSIFT))
[1] R. Arandjelovic, A. Zisserman
"Three things everyone should know to improve object retrieval"
http://www.robots.ox.ac.uk/~vgg/publications/2012/Arandjelovic12/arandjelovic12.pdf
'''
# Python 2/3 compatibility
from __future__ import print_function
# built-in modules
from multiprocessing.pool import ThreadPool
import cv2
import numpy as np
from numpy.linalg import norm
SZ = 20 # size of each digit is SZ x SZ
CLASS_N = 10
DIGITS_FN = 'samples/data/digits.png'
def split2d(img, cell_size, flatten=True):
h, w = img.shape[:2]
sx, sy = cell_size
cells = [np.hsplit(row, w//sx) for row in np.vsplit(img, h//sy)]
cells = np.array(cells)
if flatten:
cells = cells.reshape(-1, sy, sx)
return cells
def deskew(img):
m = cv2.moments(img)
if abs(m['mu02']) < 1e-2:
return img.copy()
skew = m['mu11']/m['mu02']
M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]])
img = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
return img
class StatModel(object):
def load(self, fn):
self.model.load(fn) # Known bug: https://github.com/opencv/opencv/issues/4969
def save(self, fn):
self.model.save(fn)
class KNearest(StatModel):
def __init__(self, k = 3):
self.k = k
self.model = cv2.ml.KNearest_create()
def train(self, samples, responses):
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
def predict(self, samples):
_retval, results, _neigh_resp, _dists = self.model.findNearest(samples, self.k)
return results.ravel()
class SVM(StatModel):
def __init__(self, C = 1, gamma = 0.5):
self.model = cv2.ml.SVM_create()
self.model.setGamma(gamma)
self.model.setC(C)
self.model.setKernel(cv2.ml.SVM_RBF)
self.model.setType(cv2.ml.SVM_C_SVC)
def train(self, samples, responses):
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
def predict(self, samples):
return self.model.predict(samples)[1].ravel()
def evaluate_model(model, digits, samples, labels):
resp = model.predict(samples)
err = (labels != resp).mean()
confusion = np.zeros((10, 10), np.int32)
for i, j in zip(labels, resp):
confusion[int(i), int(j)] += 1
return err, confusion
def preprocess_simple(digits):
return np.float32(digits).reshape(-1, SZ*SZ) / 255.0
def preprocess_hog(digits):
samples = []
for img in digits:
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
bin_n = 16
bin = np.int32(bin_n*ang/(2*np.pi))
bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:]
mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists)
# transform to Hellinger kernel
eps = 1e-7
hist /= hist.sum() + eps
hist = np.sqrt(hist)
hist /= norm(hist) + eps
samples.append(hist)
return np.float32(samples)
from tests_common import NewOpenCVTests
class digits_test(NewOpenCVTests):
def load_digits(self, fn):
digits_img = self.get_sample(fn, 0)
digits = split2d(digits_img, (SZ, SZ))
labels = np.repeat(np.arange(CLASS_N), len(digits)/CLASS_N)
return digits, labels
def test_digits(self):
digits, labels = self.load_digits(DIGITS_FN)
# shuffle digits
rand = np.random.RandomState(321)
shuffle = rand.permutation(len(digits))
digits, labels = digits[shuffle], labels[shuffle]
digits2 = list(map(deskew, digits))
samples = preprocess_hog(digits2)
train_n = int(0.9*len(samples))
_digits_train, digits_test = np.split(digits2, [train_n])
samples_train, samples_test = np.split(samples, [train_n])
labels_train, labels_test = np.split(labels, [train_n])
errors = list()
confusionMatrixes = list()
model = KNearest(k=4)
model.train(samples_train, labels_train)
error, confusion = evaluate_model(model, digits_test, samples_test, labels_test)
errors.append(error)
confusionMatrixes.append(confusion)
model = SVM(C=2.67, gamma=5.383)
model.train(samples_train, labels_train)
error, confusion = evaluate_model(model, digits_test, samples_test, labels_test)
errors.append(error)
confusionMatrixes.append(confusion)
eps = 0.001
normEps = len(samples_test) * 0.02
confusionKNN = [[45, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 57, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 59, 1, 0, 0, 0, 0, 1, 0],
[ 0, 0, 0, 43, 0, 0, 0, 1, 0, 0],
[ 0, 0, 0, 0, 38, 0, 2, 0, 0, 0],
[ 0, 0, 0, 2, 0, 48, 0, 0, 1, 0],
[ 0, 1, 0, 0, 0, 0, 51, 0, 0, 0],
[ 0, 0, 1, 0, 0, 0, 0, 54, 0, 0],
[ 0, 0, 0, 0, 0, 1, 0, 0, 46, 0],
[ 1, 1, 0, 1, 1, 0, 0, 0, 2, 42]]
confusionSVM = [[45, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 57, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 59, 2, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 43, 0, 0, 0, 1, 0, 0],
[ 0, 0, 0, 0, 40, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 50, 0, 0, 0, 0],
[ 0, 0, 0, 0, 1, 0, 51, 0, 0, 0],
[ 0, 0, 1, 0, 0, 0, 0, 54, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 47, 0],
[ 0, 1, 0, 1, 0, 0, 0, 0, 1, 45]]
self.assertLess(cv2.norm(confusionMatrixes[0] - confusionKNN, cv2.NORM_L1), normEps)
self.assertLess(cv2.norm(confusionMatrixes[1] - confusionSVM, cv2.NORM_L1), normEps)
self.assertLess(errors[0] - 0.034, eps)
self.assertLess(errors[1] - 0.018, eps)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
| |
# Copyright 2013 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack.compute import pci
from nova.api.openstack import wsgi
from nova import context
from nova import exception
from nova import objects
from nova.objects import fields
from nova.objects import pci_device_pool
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_pci_device
from nova.tests import uuidsentinel as uuids
pci_stats = [{"count": 3,
"vendor_id": "8086",
"product_id": "1520",
"numa_node": 1}]
fake_compute_node = objects.ComputeNode(
pci_device_pools=pci_device_pool.from_pci_stats(pci_stats))
class FakeResponse(wsgi.ResponseObject):
pass
class PciServerControllerTestV21(test.NoDBTestCase):
def setUp(self):
super(PciServerControllerTestV21, self).setUp()
self.controller = pci.PciServerController()
self.fake_obj = {'server': {'addresses': {},
'id': 'fb08',
'name': 'a3',
'status': 'ACTIVE',
'tenant_id': '9a3af784c',
'user_id': 'e992080ac0',
}}
self.fake_list = {'servers': [{'addresses': {},
'id': 'fb08',
'name': 'a3',
'status': 'ACTIVE',
'tenant_id': '9a3af784c',
'user_id': 'e992080ac',
}]}
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.claim(self.inst.uuid)
self.pci_device.allocate(self.inst)
def _create_fake_instance(self):
self.inst = objects.Instance()
self.inst.uuid = uuids.instance
self.inst.pci_devices = objects.PciDeviceList()
def _create_fake_pci_device(self):
def fake_pci_device_get_by_addr(ctxt, id, addr):
return test_pci_device.fake_db_dev
ctxt = context.get_admin_context()
self.stub_out('nova.db.pci_device_get_by_addr',
fake_pci_device_get_by_addr)
self.pci_device = objects.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
def test_show(self):
def fake_get_db_instance(id):
return self.inst
resp = FakeResponse(self.fake_obj, '')
req = fakes.HTTPRequest.blank('/os-pci/1', use_admin_context=True)
self.stubs.Set(req, 'get_db_instance', fake_get_db_instance)
self.controller.show(req, resp, '1')
self.assertEqual([{'id': 1}],
resp.obj['server']['os-pci:pci_devices'])
def test_detail(self):
def fake_get_db_instance(id):
return self.inst
resp = FakeResponse(self.fake_list, '')
req = fakes.HTTPRequest.blank('/os-pci/detail',
use_admin_context=True)
self.stubs.Set(req, 'get_db_instance', fake_get_db_instance)
self.controller.detail(req, resp)
self.assertEqual([{'id': 1}],
resp.obj['servers'][0]['os-pci:pci_devices'])
class PciHypervisorControllerTestV21(test.NoDBTestCase):
def setUp(self):
super(PciHypervisorControllerTestV21, self).setUp()
self.controller = pci.PciHypervisorController()
self.fake_objs = dict(hypervisors=[
dict(id=1,
service=dict(id=1, host="compute1"),
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1")])
self.fake_obj = dict(hypervisor=dict(
id=1,
service=dict(id=1, host="compute1"),
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1"))
def test_show(self):
def fake_get_db_compute_node(id):
return fake_compute_node
req = fakes.HTTPRequest.blank('/os-hypervisors/1',
use_admin_context=True)
resp = FakeResponse(self.fake_obj, '')
self.stubs.Set(req, 'get_db_compute_node', fake_get_db_compute_node)
self.controller.show(req, resp, '1')
self.assertIn('os-pci:pci_stats', resp.obj['hypervisor'])
self.assertEqual(pci_stats[0],
resp.obj['hypervisor']['os-pci:pci_stats'][0])
def test_detail(self):
def fake_get_db_compute_node(id):
return fake_compute_node
req = fakes.HTTPRequest.blank('/os-hypervisors/detail',
use_admin_context=True)
resp = FakeResponse(self.fake_objs, '')
self.stubs.Set(req, 'get_db_compute_node', fake_get_db_compute_node)
self.controller.detail(req, resp)
self.assertIn('os-pci:pci_stats', resp.obj['hypervisors'][0])
self.assertEqual(pci_stats[0],
resp.obj['hypervisors'][0]['os-pci:pci_stats'][0])
class PciControlletestV21(test.NoDBTestCase):
def setUp(self):
super(PciControlletestV21, self).setUp()
self.controller = pci.PciController()
def test_show(self):
def fake_pci_device_get_by_id(context, id):
return test_pci_device.fake_db_dev
self.stub_out('nova.db.pci_device_get_by_id',
fake_pci_device_get_by_id)
req = fakes.HTTPRequest.blank('/os-pci/1', use_admin_context=True)
result = self.controller.show(req, '1')
dist = {'pci_device': {'address': 'a',
'compute_node_id': 1,
'dev_id': 'i',
'extra_info': {},
'dev_type': fields.PciDeviceType.STANDARD,
'id': 1,
'server_uuid': None,
'label': 'l',
'product_id': 'p',
'status': 'available',
'vendor_id': 'v'}}
self.assertEqual(dist, result)
def test_show_error_id(self):
def fake_pci_device_get_by_id(context, id):
raise exception.PciDeviceNotFoundById(id=id)
self.stub_out('nova.db.pci_device_get_by_id',
fake_pci_device_get_by_id)
req = fakes.HTTPRequest.blank('/os-pci/0', use_admin_context=True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '0')
def _fake_compute_node_get_all(self, context):
return [objects.ComputeNode(id=1,
service_id=1,
host='fake',
cpu_info='cpu_info',
disk_available_least=100)]
def _fake_pci_device_get_all_by_node(self, context, node):
return [test_pci_device.fake_db_dev, test_pci_device.fake_db_dev_1]
def test_index(self):
self.stubs.Set(self.controller.host_api, 'compute_node_get_all',
self._fake_compute_node_get_all)
self.stub_out('nova.db.pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
req = fakes.HTTPRequest.blank('/os-pci', use_admin_context=True)
result = self.controller.index(req)
dist = {'pci_devices': [test_pci_device.fake_db_dev,
test_pci_device.fake_db_dev_1]}
for i in range(len(result['pci_devices'])):
self.assertEqual(dist['pci_devices'][i]['vendor_id'],
result['pci_devices'][i]['vendor_id'])
self.assertEqual(dist['pci_devices'][i]['id'],
result['pci_devices'][i]['id'])
self.assertEqual(dist['pci_devices'][i]['status'],
result['pci_devices'][i]['status'])
self.assertEqual(dist['pci_devices'][i]['address'],
result['pci_devices'][i]['address'])
def test_detail(self):
self.stubs.Set(self.controller.host_api, 'compute_node_get_all',
self._fake_compute_node_get_all)
self.stub_out('nova.db.pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
req = fakes.HTTPRequest.blank('/os-pci/detail',
use_admin_context=True)
result = self.controller.detail(req)
dist = {'pci_devices': [test_pci_device.fake_db_dev,
test_pci_device.fake_db_dev_1]}
for i in range(len(result['pci_devices'])):
self.assertEqual(dist['pci_devices'][i]['vendor_id'],
result['pci_devices'][i]['vendor_id'])
self.assertEqual(dist['pci_devices'][i]['id'],
result['pci_devices'][i]['id'])
self.assertEqual(dist['pci_devices'][i]['label'],
result['pci_devices'][i]['label'])
self.assertEqual(dist['pci_devices'][i]['dev_id'],
result['pci_devices'][i]['dev_id'])
class PciControllerPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(PciControllerPolicyEnforcementV21, self).setUp()
self.controller = pci.PciController()
self.req = fakes.HTTPRequest.blank('')
def _test_policy_failed(self, action, *args):
rule_name = "os_compute_api:os-pci:%s" % action
rule = {rule_name: "project:non_fake"}
self.policy.set_rules(rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, getattr(self.controller, action),
self.req, *args)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_policy_failed(self):
self._test_policy_failed('index')
def test_detail_policy_failed(self):
self._test_policy_failed('detail')
def test_show_policy_failed(self):
self._test_policy_failed('show', 1)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._snapshots_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_grant_access_request_initial, build_list_by_resource_group_request, build_list_request, build_revoke_access_request_initial, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SnapshotsOperations:
"""SnapshotsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
snapshot_name: str,
snapshot: "_models.Snapshot",
**kwargs: Any
) -> "_models.Snapshot":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Snapshot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(snapshot, 'Snapshot')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Snapshot', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Snapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
snapshot_name: str,
snapshot: "_models.Snapshot",
**kwargs: Any
) -> AsyncLROPoller["_models.Snapshot"]:
"""Creates or updates a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
max name length is 80 characters.
:type snapshot_name: str
:param snapshot: Snapshot object supplied in the body of the Put disk operation.
:type snapshot: ~azure.mgmt.compute.v2020_12_01.models.Snapshot
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Snapshot or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_12_01.models.Snapshot]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Snapshot"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
snapshot=snapshot,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Snapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
snapshot_name: str,
snapshot: "_models.SnapshotUpdate",
**kwargs: Any
) -> "_models.Snapshot":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Snapshot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(snapshot, 'SnapshotUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Snapshot', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Snapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
snapshot_name: str,
snapshot: "_models.SnapshotUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.Snapshot"]:
"""Updates (patches) a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
max name length is 80 characters.
:type snapshot_name: str
:param snapshot: Snapshot object supplied in the body of the Patch snapshot operation.
:type snapshot: ~azure.mgmt.compute.v2020_12_01.models.SnapshotUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Snapshot or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_12_01.models.Snapshot]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Snapshot"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
snapshot=snapshot,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Snapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
snapshot_name: str,
**kwargs: Any
) -> "_models.Snapshot":
"""Gets information about a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
max name length is 80 characters.
:type snapshot_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Snapshot, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_12_01.models.Snapshot
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Snapshot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Snapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
snapshot_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
snapshot_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
max name length is 80 characters.
:type snapshot_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SnapshotList"]:
"""Lists snapshots under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SnapshotList or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_12_01.models.SnapshotList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SnapshotList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SnapshotList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.SnapshotList"]:
"""Lists snapshots under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SnapshotList or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_12_01.models.SnapshotList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SnapshotList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SnapshotList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots'} # type: ignore
async def _grant_access_initial(
self,
resource_group_name: str,
snapshot_name: str,
grant_access_data: "_models.GrantAccessData",
**kwargs: Any
) -> Optional["_models.AccessUri"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AccessUri"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(grant_access_data, 'GrantAccessData')
request = build_grant_access_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
content_type=content_type,
json=_json,
template_url=self._grant_access_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_grant_access_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess'} # type: ignore
@distributed_trace_async
async def begin_grant_access(
self,
resource_group_name: str,
snapshot_name: str,
grant_access_data: "_models.GrantAccessData",
**kwargs: Any
) -> AsyncLROPoller["_models.AccessUri"]:
"""Grants access to a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
max name length is 80 characters.
:type snapshot_name: str
:param grant_access_data: Access data object supplied in the body of the get snapshot access
operation.
:type grant_access_data: ~azure.mgmt.compute.v2020_12_01.models.GrantAccessData
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AccessUri or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_12_01.models.AccessUri]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccessUri"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._grant_access_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
grant_access_data=grant_access_data,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_grant_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess'} # type: ignore
async def _revoke_access_initial(
self,
resource_group_name: str,
snapshot_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_revoke_access_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
template_url=self._revoke_access_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_revoke_access_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess'} # type: ignore
@distributed_trace_async
async def begin_revoke_access(
self,
resource_group_name: str,
snapshot_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Revokes access to a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
max name length is 80 characters.
:type snapshot_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._revoke_access_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_revoke_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess'} # type: ignore
| |
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import os
import sys
from os.path import basename, exists, join as joinpath, normpath
from os.path import isdir, isfile, islink
spec_dist = os.environ.get('M5_CPU2000', '/dist/m5/cpu2000')
def copyfiles(srcdir, dstdir):
from filecmp import cmp as filecmp
from shutil import copyfile
srcdir = normpath(srcdir)
dstdir = normpath(dstdir)
if not isdir(dstdir):
os.mkdir(dstdir)
for root, dirs, files in os.walk(srcdir):
root = normpath(root)
prefix = os.path.commonprefix([root, srcdir])
root = root[len(prefix):]
if root.startswith('/'):
root = root[1:]
for entry in dirs:
newdir = joinpath(dstdir, root, entry)
if not isdir(newdir):
os.mkdir(newdir)
for entry in files:
dest = normpath(joinpath(dstdir, root, entry))
src = normpath(joinpath(srcdir, root, entry))
if not isfile(dest) or not filecmp(src, dest):
copyfile(src, dest)
# some of the spec benchmarks expect to be run from one directory up.
# just create some symlinks that solve the problem
inlink = joinpath(dstdir, 'input')
outlink = joinpath(dstdir, 'output')
if not exists(inlink):
os.symlink('.', inlink)
if not exists(outlink):
os.symlink('.', outlink)
class Benchmark(object):
def __init__(self, isa, os, input_set):
if not hasattr(self.__class__, 'name'):
self.name = self.__class__.__name__
if not hasattr(self.__class__, 'binary'):
self.binary = self.name
if not hasattr(self.__class__, 'args'):
self.args = []
if not hasattr(self.__class__, 'output'):
self.output = '%s.out' % self.name
if not hasattr(self.__class__, 'simpoint'):
self.simpoint = None
try:
func = getattr(self.__class__, input_set)
except AttributeError:
raise AttributeError, \
'The benchmark %s does not have the %s input set' % \
(self.name, input_set)
executable = joinpath(spec_dist, 'binaries', isa, os, self.binary)
if not isfile(executable):
raise AttributeError, '%s not found' % executable
self.executable = executable
# root of tree for input & output data files
data_dir = joinpath(spec_dist, 'data', self.name)
# optional subtree with files shared across input sets
all_dir = joinpath(data_dir, 'all')
# dirs for input & output files for this input set
inputs_dir = joinpath(data_dir, input_set, 'input')
outputs_dir = joinpath(data_dir, input_set, 'output')
# keep around which input set was specified
self.input_set = input_set
if not isdir(inputs_dir):
raise AttributeError, '%s not found' % inputs_dir
self.inputs_dir = [ inputs_dir ]
if isdir(all_dir):
self.inputs_dir += [ joinpath(all_dir, 'input') ]
if isdir(outputs_dir):
self.outputs_dir = outputs_dir
if not hasattr(self.__class__, 'stdin'):
self.stdin = joinpath(inputs_dir, '%s.in' % self.name)
if not isfile(self.stdin):
self.stdin = None
if not hasattr(self.__class__, 'stdout'):
self.stdout = joinpath(outputs_dir, '%s.out' % self.name)
if not isfile(self.stdout):
self.stdout = None
func(self, isa, os)
def makeProcessArgs(self, **kwargs):
# set up default args for Process object
process_args = {}
process_args['cmd'] = [ self.name ] + self.args
process_args['executable'] = self.executable
if self.stdin:
process_args['input'] = self.stdin
if self.stdout:
process_args['output'] = self.stdout
if self.simpoint:
process_args['simpoint'] = self.simpoint
# explicit keywords override defaults
process_args.update(kwargs)
return process_args
def makeProcess(self, **kwargs):
process_args = self.makeProcessArgs(**kwargs)
# figure out working directory: use m5's outdir unless
# overridden by Process's cwd param
cwd = process_args.get('cwd')
if not cwd:
from m5 import options
cwd = options.outdir
process_args['cwd'] = cwd
if not isdir(cwd):
os.makedirs(cwd)
# copy input files to working directory
for d in self.inputs_dir:
copyfiles(d, cwd)
# generate Process object
from m5.objects import Process
return Process(**process_args)
def __str__(self):
return self.name
class DefaultBenchmark(Benchmark):
def ref(self, isa, os): pass
def test(self, isa, os): pass
def train(self, isa, os): pass
class MinneDefaultBenchmark(DefaultBenchmark):
def smred(self, isa, os): pass
def mdred(self, isa, os): pass
def lgred(self, isa, os): pass
class ammp(MinneDefaultBenchmark):
name = 'ammp'
number = 188
lang = 'C'
simpoint = 108*100E6
class applu(MinneDefaultBenchmark):
name = 'applu'
number = 173
lang = 'F77'
simpoint = 2179*100E6
class apsi(MinneDefaultBenchmark):
name = 'apsi'
number = 301
lang = 'F77'
simpoint = 3408*100E6
class art(DefaultBenchmark):
name = 'art'
number = 179
lang = 'C'
def test(self, isa, os):
self.args = [ '-scanfile', 'c756hel.in',
'-trainfile1', 'a10.img',
'-stride', '2',
'-startx', '134',
'-starty', '220',
'-endx', '139',
'-endy', '225',
'-objects', '1' ]
self.output = 'test.out'
def train(self, isa, os):
self.args = [ '-scanfile', 'c756hel.in',
'-trainfile1', 'a10.img',
'-stride', '2',
'-startx', '134',
'-starty', '220',
'-endx', '184',
'-endy', '240',
'-objects', '3' ]
self.output = 'train.out'
def lgred(self, isa, os):
self.args = ['-scanfile', 'c756hel.in',
'-trainfile1', 'a10.img',
'-stride', '5',
'-startx', '134',
'-starty', '220',
'-endx', '184',
'-endy', '240',
'-objects', '1' ]
self.output = 'lgred.out'
class art110(art):
def ref(self, isa, os):
self.args = [ '-scanfile', 'c756hel.in',
'-trainfile1', 'a10.img',
'-trainfile2', 'hc.img',
'-stride', '2',
'-startx', '110',
'-starty', '200',
'-endx', '160',
'-endy', '240',
'-objects', '10' ]
self.output = 'ref.1.out'
self.simpoint = 340*100E6
class art470(art):
def ref(self, isa, os):
self.args = [ '-scanfile', 'c756hel.in',
'-trainfile1', 'a10.img',
'-trainfile2', 'hc.img',
'-stride', '2',
'-startx', '470',
'-starty', '140',
'-endx', '520',
'-endy', '180',
'-objects', '10' ]
self.output = 'ref.2.out'
self.simpoint = 365*100E6
class equake(DefaultBenchmark):
name = 'equake'
number = 183
lang = 'C'
simpoint = 812*100E6
def lgred(self, isa, os): pass
class facerec(MinneDefaultBenchmark):
name = 'facerec'
number = 187
lang = 'F'
simpoint = 375*100E6
class fma3d(MinneDefaultBenchmark):
name = 'fma3d'
number = 191
lang = 'F'
simpoint = 2541*100E6
class galgel(MinneDefaultBenchmark):
name = 'galgel'
number = 178
lang = 'F'
simpoint = 2491*100E6
class lucas(MinneDefaultBenchmark):
name = 'lucas'
number = 189
lang = 'F'
simpoint = 545*100E6
class mesa(Benchmark):
name = 'mesa'
number = 177
lang = 'C'
stdin = None
def __set_args(self, frames):
self.args = [ '-frames', frames, '-meshfile', '%s.in' % self.name,
'-ppmfile', '%s.ppm' % self.name ]
def test(self, isa, os):
self.__set_args('10')
def train(self, isa, os):
self.__set_args('500')
def ref(self, isa, os):
self.__set_args('1000')
self.simpoint = 1135*100E6
def lgred(self, isa, os):
self.__set_args('1')
class mgrid(MinneDefaultBenchmark):
name = 'mgrid'
number = 172
lang = 'F77'
simpoint = 3292*100E6
class sixtrack(DefaultBenchmark):
name = 'sixtrack'
number = 200
lang = 'F77'
simpoint = 3043*100E6
def lgred(self, isa, os): pass
class swim(MinneDefaultBenchmark):
name = 'swim'
number = 171
lang = 'F77'
simpoint = 2079*100E6
class wupwise(DefaultBenchmark):
name = 'wupwise'
number = 168
lang = 'F77'
simpoint = 3237*100E6
def lgred(self, isa, os): pass
class bzip2(DefaultBenchmark):
name = 'bzip2'
number = 256
lang = 'C'
def test(self, isa, os):
self.args = [ 'input.random' ]
def train(self, isa, os):
self.args = [ 'input.compressed' ]
class bzip2_source(bzip2):
def ref(self, isa, os):
self.simpoint = 977*100E6
self.args = [ 'input.source', '58' ]
def lgred(self, isa, os):
self.args = [ 'input.source', '1' ]
class bzip2_graphic(bzip2):
def ref(self, isa, os):
self.simpoint = 718*100E6
self.args = [ 'input.graphic', '58' ]
def lgred(self, isa, os):
self.args = [ 'input.graphic', '1' ]
class bzip2_program(bzip2):
def ref(self, isa, os):
self.simpoint = 458*100E6
self.args = [ 'input.program', '58' ]
def lgred(self, isa, os):
self.args = [ 'input.program', '1' ]
class crafty(MinneDefaultBenchmark):
name = 'crafty'
number = 186
lang = 'C'
simpoint = 774*100E6
class eon(MinneDefaultBenchmark):
name = 'eon'
number = 252
lang = 'CXX'
stdin = None
class eon_kajiya(eon):
args = [ 'chair.control.kajiya', 'chair.camera', 'chair.surfaces',
'chair.kajiya.ppm', 'ppm', 'pixels_out.kajiya']
output = 'kajiya_log.out'
class eon_cook(eon):
args = [ 'chair.control.cook', 'chair.camera', 'chair.surfaces',
'chair.cook.ppm', 'ppm', 'pixels_out.cook' ]
output = 'cook_log.out'
class eon_rushmeier(eon):
args = [ 'chair.control.rushmeier', 'chair.camera', 'chair.surfaces',
'chair.rushmeier.ppm', 'ppm', 'pixels_out.rushmeier' ]
output = 'rushmeier_log.out'
simpoint = 403*100E6
class gap(DefaultBenchmark):
name = 'gap'
number = 254
lang = 'C'
def __set_args(self, size):
self.args = [ '-l', './', '-q', '-m', size ]
def test(self, isa, os):
self.__set_args('64M')
def train(self, isa, os):
self.__set_args('128M')
def ref(self, isa, os):
self.__set_args('192M')
self.simpoint = 674*100E6
def lgred(self, isa, os):
self.__set_args('64M')
def mdred(self, isa, os):
self.__set_args('64M')
def smred(self, isa, os):
self.__set_args('64M')
class gcc(DefaultBenchmark):
name = 'gcc'
number = 176
lang = 'C'
def test(self, isa, os):
self.args = [ 'cccp.i', '-o', 'cccp.s' ]
def train(self, isa, os):
self.args = [ 'cp-decl.i', '-o', 'cp-decl.s' ]
def smred(self, isa, os):
self.args = [ 'c-iterate.i', '-o', 'c-iterate.s' ]
def mdred(self, isa, os):
self.args = [ 'rdlanal.i', '-o', 'rdlanal.s' ]
def lgred(self, isa, os):
self.args = [ 'cp-decl.i', '-o', 'cp-decl.s' ]
class gcc_166(gcc):
def ref(self, isa, os):
self.simpoint = 389*100E6
self.args = [ '166.i', '-o', '166.s' ]
class gcc_200(gcc):
def ref(self, isa, os):
self.simpoint = 736*100E6
self.args = [ '200.i', '-o', '200.s' ]
class gcc_expr(gcc):
def ref(self, isa, os):
self.simpoint = 36*100E6
self.args = [ 'expr.i', '-o', 'expr.s' ]
class gcc_integrate(gcc):
def ref(self, isa, os):
self.simpoint = 4*100E6
self.args = [ 'integrate.i', '-o', 'integrate.s' ]
class gcc_scilab(gcc):
def ref(self, isa, os):
self.simpoint = 207*100E6
self.args = [ 'scilab.i', '-o', 'scilab.s' ]
class gzip(DefaultBenchmark):
name = 'gzip'
number = 164
lang = 'C'
def test(self, isa, os):
self.args = [ 'input.compressed', '2' ]
def train(self, isa, os):
self.args = [ 'input.combined', '32' ]
class gzip_source(gzip):
def ref(self, isa, os):
self.simpoint = 334*100E6
self.args = [ 'input.source', '1' ]
def smred(self, isa, os):
self.args = [ 'input.source', '1' ]
def mdred(self, isa, os):
self.args = [ 'input.source', '1' ]
def lgred(self, isa, os):
self.args = [ 'input.source', '1' ]
class gzip_log(gzip):
def ref(self, isa, os):
self.simpoint = 265*100E6
self.args = [ 'input.log', '60' ]
def smred(self, isa, os):
self.args = [ 'input.log', '1' ]
def mdred(self, isa, os):
self.args = [ 'input.log', '1' ]
def lgred(self, isa, os):
self.args = [ 'input.log', '1' ]
class gzip_graphic(gzip):
def ref(self, isa, os):
self.simpoint = 653*100E6
self.args = [ 'input.graphic', '60' ]
def smred(self, isa, os):
self.args = [ 'input.graphic', '1' ]
def mdred(self, isa, os):
self.args = [ 'input.graphic', '1' ]
def lgred(self, isa, os):
self.args = [ 'input.graphic', '1' ]
class gzip_random(gzip):
def ref(self, isa, os):
self.simpoint = 623*100E6
self.args = [ 'input.random', '60' ]
def smred(self, isa, os):
self.args = [ 'input.random', '1' ]
def mdred(self, isa, os):
self.args = [ 'input.random', '1' ]
def lgred(self, isa, os):
self.args = [ 'input.random', '1' ]
class gzip_program(gzip):
def ref(self, isa, os):
self.simpoint = 1189*100E6
self.args = [ 'input.program', '60' ]
def smred(self, isa, os):
self.args = [ 'input.program', '1' ]
def mdred(self, isa, os):
self.args = [ 'input.program', '1' ]
def lgred(self, isa, os):
self.args = [ 'input.program', '1' ]
class mcf(MinneDefaultBenchmark):
name = 'mcf'
number = 181
lang = 'C'
args = [ 'mcf.in' ]
simpoint = 553*100E6
class parser(MinneDefaultBenchmark):
name = 'parser'
number = 197
lang = 'C'
args = [ '2.1.dict', '-batch' ]
simpoint = 1146*100E6
class perlbmk(DefaultBenchmark):
name = 'perlbmk'
number = 253
lang = 'C'
def test(self, isa, os):
self.args = [ '-I.', '-I', 'lib', 'test.pl' ]
self.stdin = 'test.in'
class perlbmk_diffmail(perlbmk):
def ref(self, isa, os):
self.simpoint = 141*100E6
self.args = [ '-I', 'lib', 'diffmail.pl', '2', '550', '15', '24',
'23', '100' ]
def train(self, isa, os):
self.args = [ '-I', 'lib', 'diffmail.pl', '2', '350', '15', '24',
'23', '150' ]
class perlbmk_scrabbl(perlbmk):
def train(self, isa, os):
self.args = [ '-I.', '-I', 'lib', 'scrabbl.pl' ]
self.stdin = 'scrabbl.in'
class perlbmk_makerand(perlbmk):
def ref(self, isa, os):
self.simpoint = 11*100E6
self.args = [ '-I', 'lib', 'makerand.pl' ]
def lgred(self, isa, os):
self.args = [ '-I.', '-I', 'lib', 'lgred.makerand.pl' ]
def mdred(self, isa, os):
self.args = [ '-I.', '-I', 'lib', 'mdred.makerand.pl' ]
def smred(self, isa, os):
self.args = [ '-I.', '-I', 'lib', 'smred.makerand.pl' ]
class perlbmk_perfect(perlbmk):
def ref(self, isa, os):
self.simpoint = 5*100E6
self.args = [ '-I', 'lib', 'perfect.pl', 'b', '3', 'm', '4' ]
def train(self, isa, os):
self.args = [ '-I', 'lib', 'perfect.pl', 'b', '3' ]
class perlbmk_splitmail1(perlbmk):
def ref(self, isa, os):
self.simpoint = 405*100E6
self.args = [ '-I', 'lib', 'splitmail.pl', '850', '5', '19',
'18', '1500' ]
class perlbmk_splitmail2(perlbmk):
def ref(self, isa, os):
self.args = [ '-I', 'lib', 'splitmail.pl', '704', '12', '26',
'16', '836' ]
class perlbmk_splitmail3(perlbmk):
def ref(self, isa, os):
self.args = [ '-I', 'lib', 'splitmail.pl', '535', '13', '25',
'24', '1091' ]
class perlbmk_splitmail4(perlbmk):
def ref(self, isa, os):
self.args = [ '-I', 'lib', 'splitmail.pl', '957', '12', '23',
'26', '1014' ]
class twolf(Benchmark):
name = 'twolf'
number = 300
lang = 'C'
stdin = None
def test(self, isa, os):
self.args = [ 'test' ]
def train(self, isa, os):
self.args = [ 'train' ]
def ref(self, isa, os):
self.simpoint = 1066*100E6
self.args = [ 'ref' ]
def smred(self, isa, os):
self.args = [ 'smred' ]
def mdred(self, isa, os):
self.args = [ 'mdred' ]
def lgred(self, isa, os):
self.args = [ 'lgred' ]
class vortex(Benchmark):
name = 'vortex'
number = 255
lang = 'C'
stdin = None
def __init__(self, isa, os, input_set):
if (isa in ('alpha', 'arm', 'thumb', 'aarch64')):
self.endian = 'lendian'
elif (isa == 'sparc' or isa == 'sparc32'):
self.endian = 'bendian'
else:
raise AttributeError, "unknown ISA %s" % isa
super(vortex, self).__init__(isa, os, input_set)
def test(self, isa, os):
self.args = [ '%s.raw' % self.endian ]
self.output = 'vortex.out'
def train(self, isa, os):
self.args = [ '%s.raw' % self.endian ]
self.output = 'vortex.out'
def smred(self, isa, os):
self.args = [ '%s.raw' % self.endian ]
self.output = 'vortex.out'
def mdred(self, isa, os):
self.args = [ '%s.raw' % self.endian ]
self.output = 'vortex.out'
def lgred(self, isa, os):
self.args = [ '%s.raw' % self.endian ]
self.output = 'vortex.out'
class vortex1(vortex):
def ref(self, isa, os):
self.args = [ '%s1.raw' % self.endian ]
self.output = 'vortex1.out'
self.simpoint = 271*100E6
class vortex2(vortex):
def ref(self, isa, os):
self.simpoint = 1024*100E6
self.args = [ '%s2.raw' % self.endian ]
self.output = 'vortex2.out'
class vortex3(vortex):
def ref(self, isa, os):
self.simpoint = 564*100E6
self.args = [ '%s3.raw' % self.endian ]
self.output = 'vortex3.out'
class vpr(MinneDefaultBenchmark):
name = 'vpr'
number = 175
lang = 'C'
# not sure about vpr minnespec place.in
class vpr_place(vpr):
args = [ 'net.in', 'arch.in', 'place.out', 'dum.out', '-nodisp',
'-place_only', '-init_t', '5', '-exit_t', '0.005',
'-alpha_t', '0.9412', '-inner_num', '2' ]
output = 'place_log.out'
class vpr_route(vpr):
simpoint = 476*100E6
args = [ 'net.in', 'arch.in', 'place.in', 'route.out', '-nodisp',
'-route_only', '-route_chan_width', '15',
'-pres_fac_mult', '2', '-acc_fac', '1',
'-first_iter_pres_fac', '4', '-initial_pres_fac', '8' ]
output = 'route_log.out'
all = [ ammp, applu, apsi, art, art110, art470, equake, facerec, fma3d, galgel,
lucas, mesa, mgrid, sixtrack, swim, wupwise, bzip2_source,
bzip2_graphic, bzip2_program, crafty, eon_kajiya, eon_cook,
eon_rushmeier, gap, gcc_166, gcc_200, gcc_expr, gcc_integrate,
gcc_scilab, gzip_source, gzip_log, gzip_graphic, gzip_random,
gzip_program, mcf, parser, perlbmk_diffmail, perlbmk_makerand,
perlbmk_perfect, perlbmk_splitmail1, perlbmk_splitmail2,
perlbmk_splitmail3, perlbmk_splitmail4, twolf, vortex1, vortex2,
vortex3, vpr_place, vpr_route ]
__all__ = [ x.__name__ for x in all ]
if __name__ == '__main__':
from pprint import pprint
for bench in all:
for input_set in 'ref', 'test', 'train':
print 'class: %s' % bench.__name__
x = bench('alpha', 'tru64', input_set)
print '%s: %s' % (x, input_set)
pprint(x.makeProcessArgs())
print
| |
from threeML.minimizer.minimization import LocalMinimizer, CannotComputeErrors, FitFailed, CannotComputeCovariance
from threeML.io.dict_with_pretty_print import DictWithPrettyPrint
from threeML.io.detect_notebook import is_inside_notebook
from iminuit import Minuit
from iminuit.frontends.console import ConsoleFrontend
from iminuit.frontends.html import HtmlFrontend
import collections
import numpy as np
class MINOSFailed(Exception):
pass
# This is a function to add a method to a class
# We will need it in the MinuitMinimizer
def add_method(self, method, name=None):
if name is None:
name = method.func_name
setattr(self.__class__, name, method)
def _get_frontend():
"""
Returns the appropriate frontend (HTML for notebook and Console for the console)
:return:
"""
if is_inside_notebook():
return HtmlFrontend()
else:
return ConsoleFrontend()
class MinuitMinimizer(LocalMinimizer):
valid_setup_keys = ('ftol',)
# NOTE: this class is built to be able to work both with iMinuit and with a boost interface to SEAL
# minuit, i.e., it does not rely on functionality that iMinuit provides which is not of the original
# minuit. This makes the implementation a little bit more cumbersome, but more adaptable if we want
# to switch back to the bare bone SEAL minuit
def __init__(self, function, parameters, verbosity=0):
# This will contain the results of the last call to Migrad
self._last_migrad_results = None
super(MinuitMinimizer, self).__init__(function, parameters, verbosity)
def _setup(self, user_setup_dict):
# Prepare the dictionary for the parameters which will be used by iminuit
iminuit_init_parameters = collections.OrderedDict()
# List of variable names that will be used for iminuit.
variable_names_for_iminuit = []
# NOTE: we use the internal_ versions of value, min_value and max_value because they don't have
# units, and they are transformed to make the fit easier (for example in log scale)
for parameter_path, (value, delta, minimum, maximum) in self._internal_parameters.items():
current_name = self._parameter_name_to_minuit_name(parameter_path)
variable_names_for_iminuit.append(current_name)
# Initial value
iminuit_init_parameters['%s' % current_name] = value
# Initial delta
iminuit_init_parameters['error_%s' % current_name] = delta
# Limits
iminuit_init_parameters['limit_%s' % current_name] = (minimum,
maximum)
# This is useless, since all parameters here are free,
# but do it anyway for clarity
iminuit_init_parameters['fix_%s' % current_name] = False
# This is to tell Minuit that we are dealing with likelihoods,
# not chi square
iminuit_init_parameters['errordef'] = 0.5
iminuit_init_parameters['print_level'] = self.verbosity
iminuit_init_parameters['frontend'] = _get_frontend()
# We need to make a function with the parameters as explicit
# variables in the calling sequence, so that Minuit will be able
# to probe the parameter's names
var_spelled_out = ",".join(variable_names_for_iminuit)
# A dictionary to keep a way to convert from var. name to
# variable position in the function calling sequence
# (will use this in contours)
self.name_to_position = {k: i for i, k in enumerate(variable_names_for_iminuit)}
# Write and compile the code for such function
code = 'def _f(self, %s):\n return self.function(%s)' % (var_spelled_out, var_spelled_out)
exec code
# Add the function just created as a method of the class
# so it will be able to use the 'self' pointer
add_method(self, _f, "_f")
# Finally we can instance the Minuit class
self.minuit = Minuit(self._f, **iminuit_init_parameters)
# Make sure we got this right (some versions of iminuit does not understand the keyword in the setup)
self.minuit.errordef = 0.5
if user_setup_dict is not None:
if 'ftol' in user_setup_dict:
self.minuit.tol = user_setup_dict['ftol']
else:
# Do nothing and leave the default in iminuit
pass
self._best_fit_parameters = None
self._function_minimum_value = None
@staticmethod
def _parameter_name_to_minuit_name(parameter):
"""
Translate the name of the parameter to the format accepted by Minuit
:param parameter: the parameter name, of the form source.component.shape.parname
:return: a minuit-friendly name for the parameter, such as source_component_shape_parname
"""
return parameter.replace(".", "_")
# Override this because minuit uses different names
def restore_best_fit(self):
"""
Set the parameters back to their best fit value
:return: none
"""
# Reset the internal value of all parameters
super(MinuitMinimizer, self).restore_best_fit()
# Update also the internal iminuit dictionary
for k, par in self.parameters.items():
minuit_name = self._parameter_name_to_minuit_name(k)
self.minuit.values[minuit_name] = par._get_internal_value()
def _is_fit_ok(self):
"""
iMinuit provides the method migrad_ok(). However, that method also checks for a valid Hessian matrix, which
is a stricter requirement than just asking that the fit has converged. That is why we implement this method
:return: whether the fit converged or not
"""
assert self._last_migrad_results is not None, "MIGRAD has not been run yet."
if not self._last_migrad_results[0]['is_above_max_edm'] and \
self._last_migrad_results[0]['has_valid_parameters']:
return True
else:
return False
def _print_current_status(self):
"""
To be used to print info before raising an exception
:return:
"""
print("Last status:\n")
print(self._last_migrad_results[0])
print("\n")
# Print params to get some info about the failure
self.minuit.print_param()
def _minimize(self):
"""
Minimize the function using MIGRAD
:param compute_covar: whether to compute the covariance (and error estimates) or not
:return: best_fit: a dictionary containing the parameters at their best fit values
function_minimum : the value for the function at the minimum
NOTE: if the minimization fails, the dictionary will be empty and the function_minimum will be set
to minimization.FIT_FAILED
"""
# Try a maximum of 10 times and break as soon as the fit is ok
self._last_migrad_results = self.minuit.migrad(resume=False)
for i in range(9):
if self._is_fit_ok():
break
else:
# Try again
self._last_migrad_results = self.minuit.migrad()
if not self._is_fit_ok():
self._print_current_status()
raise FitFailed("MIGRAD call failed. This is usually due to unconstrained parameters.")
else:
# Gather the optimized values for all parameters from the internal
# iminuit dictionary
best_fit_values = []
for k, par in self.parameters.items():
minuit_name = self._parameter_name_to_minuit_name(k)
best_fit_values.append(self.minuit.values[minuit_name])
return best_fit_values, self._last_migrad_results[0]['fval']
# Override the default _compute_covariance_matrix
def _compute_covariance_matrix(self, best_fit_values):
self.minuit.hesse()
try:
covariance = np.array(self.minuit.matrix(correlation=False))
except RuntimeError:
# Covariance computation has failed
# Print current status
self._print_current_status()
raise CannotComputeCovariance("HESSE failed. Most probably some of your parameters are unconstrained.")
return covariance
def get_errors(self):
"""
Compute asymmetric errors using MINOS (slow, but accurate) and print them.
NOTE: this should be called immediately after the minimize() method
:return: a dictionary containing the asymmetric errors for each parameter.
"""
self.restore_best_fit()
if not self._is_fit_ok():
raise CannotComputeErrors("MIGRAD results not valid, cannot compute errors.")
try:
self.minuit.minos()
except:
self._print_current_status()
raise MINOSFailed("MINOS has failed. This is not necessarily a problem if:\n\n"
"* There are unconstrained parameters (the error is undefined). This is usually signaled "
"by an approximated error, printed after the fit, larger than the best fit value\n\n"
"* The fit is very difficult, because of high correlation between parameters. This is "
"signaled by values close to 1.0 or -1.0 in the correlation matrix printed after the "
"fit step.\n\n"
"In this cases you can check the contour plots with get_contours(). If you are using a "
"user-defined model, you can also try to reformulate your model with less correlated "
"parameters.")
# Make a list for the results
errors = collections.OrderedDict()
for k, par in self.parameters.items():
minuit_name = self._parameter_name_to_minuit_name(k)
minus_error = self.minuit.merrors[(minuit_name, -1)]
plus_error = self.minuit.merrors[(minuit_name, 1)]
if par.has_transformation():
# Need to transform in the external reference
best_fit_value_internal = self._fit_results.loc[par.path, 'value']
_, minus_error_external = par.internal_to_external_delta(best_fit_value_internal, minus_error)
_, plus_error_external = par.internal_to_external_delta(best_fit_value_internal, plus_error)
else:
minus_error_external = minus_error
plus_error_external = plus_error
errors[k] = ((minus_error_external, plus_error_external))
return errors
| |
# Copyright 2022 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cirq
NO_COMPILE_TAG = "no_compile_tag"
def assert_optimizes(before, after, measure_only_moment=True, with_context=False):
transformed_circuit = (
cirq.synchronize_terminal_measurements(before, after_other_operations=measure_only_moment)
if not with_context
else cirq.synchronize_terminal_measurements(
before,
context=cirq.TransformerContext(tags_to_ignore=(NO_COMPILE_TAG,)),
after_other_operations=measure_only_moment,
)
)
cirq.testing.assert_same_circuits(transformed_circuit, after)
def test_no_move():
q1 = cirq.NamedQubit('q1')
before = cirq.Circuit([cirq.Moment([cirq.H(q1)])])
after = before
assert_optimizes(before=before, after=after)
def test_simple_align():
q1 = cirq.NamedQubit('q1')
q2 = cirq.NamedQubit('q2')
before = cirq.Circuit(
[
cirq.Moment([cirq.H(q1), cirq.H(q2)]),
cirq.Moment([cirq.measure(q1).with_tags(NO_COMPILE_TAG), cirq.Z(q2)]),
cirq.Moment([cirq.measure(q2)]),
]
)
after = cirq.Circuit(
[
cirq.Moment([cirq.H(q1), cirq.H(q2)]),
cirq.Moment([cirq.Z(q2)]),
cirq.Moment([cirq.measure(q1).with_tags(NO_COMPILE_TAG), cirq.measure(q2)]),
]
)
assert_optimizes(before=before, after=after)
assert_optimizes(before=before, after=before, with_context=True)
def test_simple_partial_align():
q1 = cirq.NamedQubit('q1')
q2 = cirq.NamedQubit('q2')
before = cirq.Circuit(
[
cirq.Moment([cirq.measure(q1), cirq.Z(q2)]),
cirq.Moment([cirq.Z(q1), cirq.measure(q2).with_tags(NO_COMPILE_TAG)]),
]
)
after = cirq.Circuit(
[
cirq.Moment([cirq.measure(q1), cirq.Z(q2)]),
cirq.Moment([cirq.Z(q1)]),
cirq.Moment([cirq.measure(q2).with_tags(NO_COMPILE_TAG)]),
]
)
assert_optimizes(before=before, after=after)
assert_optimizes(before=before, after=before, with_context=True)
def test_slide_forward_one():
q1 = cirq.NamedQubit('q1')
q2 = cirq.NamedQubit('q2')
q3 = cirq.NamedQubit('q3')
before = cirq.Circuit(
[
cirq.Moment([cirq.H(q1), cirq.measure(q2).with_tags(NO_COMPILE_TAG), cirq.measure(q3)]),
]
)
after = cirq.Circuit(
[
cirq.Moment([cirq.H(q1)]),
cirq.Moment([cirq.measure(q2).with_tags(NO_COMPILE_TAG), cirq.measure(q3)]),
]
)
after_no_compile = cirq.Circuit(
[
cirq.Moment([cirq.H(q1), cirq.measure(q2).with_tags(NO_COMPILE_TAG)]),
cirq.Moment([cirq.measure(q3)]),
]
)
assert_optimizes(before=before, after=after)
assert_optimizes(before=before, after=after_no_compile, with_context=True)
def test_no_slide_forward_one():
q1 = cirq.NamedQubit('q1')
q2 = cirq.NamedQubit('q2')
q3 = cirq.NamedQubit('q3')
before = cirq.Circuit(
[
cirq.Moment([cirq.H(q1), cirq.measure(q2), cirq.measure(q3)]),
]
)
after = cirq.Circuit(
[
cirq.Moment([cirq.H(q1), cirq.measure(q2), cirq.measure(q3)]),
]
)
assert_optimizes(before=before, after=after, measure_only_moment=False)
def test_blocked_shift_one():
q1 = cirq.NamedQubit('q1')
q2 = cirq.NamedQubit('q2')
before = cirq.Circuit(
[
cirq.Moment([cirq.H(q1), cirq.H(q2)]),
cirq.Moment([cirq.measure(q1), cirq.Z(q2)]),
cirq.Moment([cirq.H(q1), cirq.measure(q2).with_tags(NO_COMPILE_TAG)]),
]
)
after = cirq.Circuit(
[
cirq.Moment([cirq.H(q1), cirq.H(q2)]),
cirq.Moment([cirq.measure(q1), cirq.Z(q2)]),
cirq.Moment([cirq.H(q1)]),
cirq.Moment([cirq.measure(q2).with_tags(NO_COMPILE_TAG)]),
]
)
assert_optimizes(before=before, after=after)
assert_optimizes(before=before, after=before, with_context=True)
def test_complex_move():
q1 = cirq.NamedQubit('q1')
q2 = cirq.NamedQubit('q2')
q3 = cirq.NamedQubit('q3')
before = cirq.Circuit(
[
cirq.Moment([cirq.H(q1), cirq.H(q2)]),
cirq.Moment([cirq.measure(q1), cirq.Z(q2)]),
cirq.Moment([cirq.H(q1), cirq.measure(q2).with_tags(NO_COMPILE_TAG)]),
cirq.Moment([cirq.H(q3)]),
cirq.Moment([cirq.X(q1), cirq.measure(q3).with_tags(NO_COMPILE_TAG)]),
]
)
after = cirq.Circuit(
[
cirq.Moment([cirq.H(q1), cirq.H(q2)]),
cirq.Moment([cirq.measure(q1), cirq.Z(q2)]),
cirq.Moment([cirq.H(q1)]),
cirq.Moment([cirq.H(q3)]),
cirq.Moment([cirq.X(q1)]),
cirq.Moment(
[
cirq.measure(q2).with_tags(NO_COMPILE_TAG),
cirq.measure(q3).with_tags(NO_COMPILE_TAG),
]
),
]
)
assert_optimizes(before=before, after=after)
assert_optimizes(before=before, after=before, with_context=True)
def test_complex_move_no_slide():
q1 = cirq.NamedQubit('q1')
q2 = cirq.NamedQubit('q2')
q3 = cirq.NamedQubit('q3')
before = cirq.Circuit(
[
cirq.Moment([cirq.H(q1), cirq.H(q2)]),
cirq.Moment([cirq.measure(q1), cirq.Z(q2)]),
cirq.Moment([cirq.H(q1), cirq.measure(q2).with_tags(NO_COMPILE_TAG)]),
cirq.Moment([cirq.H(q3)]),
cirq.Moment([cirq.X(q1), cirq.measure(q3)]),
]
)
after = cirq.Circuit(
[
cirq.Moment(cirq.H(q1), cirq.H(q2)),
cirq.Moment(cirq.measure(q1), cirq.Z(q2)),
cirq.Moment(cirq.H(q1)),
cirq.Moment(cirq.H(q3)),
cirq.Moment(cirq.X(q1), cirq.measure(q2).with_tags(NO_COMPILE_TAG), cirq.measure(q3)),
]
)
assert_optimizes(before=before, after=after, measure_only_moment=False)
assert_optimizes(before=before, after=before, measure_only_moment=False, with_context=True)
def test_multi_qubit():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.measure(q0, q1, key='m'), cirq.H(q1))
assert_optimizes(before=circuit, after=circuit)
def test_classically_controlled_op():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(
cirq.H(q0), cirq.measure(q0, key='m'), cirq.X(q1).with_classical_controls('m')
)
assert_optimizes(before=circuit, after=circuit)
| |
"""
RDS Module. Can be used to perform various RDS operations
"""
from __future__ import print_function
import os
import datetime
import logging
import time
import sys
from ConfigParser import ConfigParser, NoOptionError
import boto3
import botocore
import pytz
from . import read_config, ASIAQ_CONFIG
from .disco_alarm_config import DiscoAlarmsConfig
from .disco_alarm import DiscoAlarm
from .disco_aws_util import is_truthy
from .disco_creds import DiscoS3Bucket
from .disco_route53 import DiscoRoute53
from .exceptions import TimeoutError, RDSEnvironmentError
from .resource_helper import keep_trying
DEFAULT_CONFIG_FILE_RDS = "disco_rds.ini"
RDS_STATE_POLL_INTERVAL = 30 # seconds
RDS_DELETE_TIMEOUT = 1800 # seconds. From observation, it takes about 15-20 mins to delete an RDS instance
RDS_SNAPSHOT_DELETE_TIMEOUT = 60 # seconds. From observation, this takes approximately 0 seconds
RDS_STARTUP_TIMEOUT = 600 # seconds. Time we allow RDS to get IP address before we give up
RDS_RESTORE_TIMEOUT = 1200 # seconds. Time we allow RDS to be restored and available to try and modify it
DEFAULT_LICENSE = {
'oracle': 'bring-your-own-license',
'postgres': 'postgresql-license'
}
DEFAULT_PORT = {
'postgres': 5432,
'oracle': 1521
}
class DiscoRDS(object):
"""Class for doing RDS operations on a given environment"""
def __init__(self, vpc):
"""Initialize class"""
self.config_aws = read_config()
self.config_rds = read_config(config_file=DEFAULT_CONFIG_FILE_RDS)
self.client = boto3.client('rds')
self.vpc = vpc
self.vpc_name = vpc.environment_name
if self.vpc_name not in ['staging', 'production']:
self.domain_name = self.config_aws.get('disco_aws', 'default_domain_name')
else:
self.domain_name = self.config_aws.get('disco_aws',
'default_domain_name@{0}'.format(self.vpc_name))
def config_with_default(self, section, param, default=None):
"""Read the RDS config file and extract the parameter value, or return default if missing"""
try:
return self.config_rds.get(section, param)
except NoOptionError:
return default
def config_integer(self, section, param, default=None):
"""
Read the RDS config file and extract an integer value. If the value is not found and no
default is provided, raise NoOptionError.
"""
try:
return int(self.config_rds.get(section, param))
except NoOptionError:
if default is not None:
return int(default)
else:
raise
def config_truthy(self, section, param, default='True'):
"""Read the RDS config file and extract the boolean value, or return default if missing"""
return is_truthy(self.config_with_default(section, param, default))
def get_master_password(self, instance_identifier):
"""
Get the Master Password for instance stored in the S3 bucket
"""
s3_password_key = 'rds/{0}/master_user_password'.format(instance_identifier)
bucket_name = self.vpc.get_credential_buckets_from_env_name(self.config_aws, self.vpc_name)[0]
return DiscoS3Bucket(bucket_name).get_key(s3_password_key)
def get_instance_parameters(self, instance_identifier):
"""Read the config file and extract the Instance related parameters"""
section = instance_identifier
db_engine = self.config_rds.get(section, 'engine')
engine_family = db_engine.split('-')[0]
default_license = DEFAULT_LICENSE.get(engine_family)
default_port = DEFAULT_PORT.get(engine_family)
instance_params = {
'AllocatedStorage': self.config_integer(section, 'allocated_storage'),
'AutoMinorVersionUpgrade': self.config_truthy(section, 'auto_minor_version_upgrade'),
'CharacterSetName': self.config_with_default(section, 'character_set_name'),
'DBInstanceClass': self.config_rds.get(section, 'db_instance_class'),
'DBInstanceIdentifier': instance_identifier,
'DBParameterGroupName': section,
'DBSubnetGroupName': section,
'Engine': db_engine,
'EngineVersion': self.config_rds.get(section, 'engine_version'),
'Iops': self.config_integer(section, 'iops', 0),
'LicenseModel': self.config_with_default(section, 'license_model', default_license),
'MasterUserPassword': self.get_master_password(instance_identifier),
'MasterUsername': self.config_rds.get(section, 'master_username'),
'MultiAZ': self.config_truthy(section, 'multi_az'),
'Port': self.config_integer(section, 'port', default_port),
'PubliclyAccessible': self.config_truthy(section, 'publicly_accessible', 'False'),
'VpcSecurityGroupIds': [self.get_rds_security_group_id()],
'StorageEncrypted': self.config_truthy(section, 'storage_encrypted')}
return instance_params
def get_rds_security_group_id(self):
"""
Returns the intranet security group id for the VPC for the current environment
"""
security_groups = self.vpc.get_all_security_groups_for_vpc()
intranet = [sg for sg in security_groups if sg.tags and sg.tags.get("meta_network") == "intranet"][0]
return intranet.id
def update_cluster(self, instance_identifier):
"""
Run the RDS Cluster update
"""
instance_params = self.get_instance_parameters(instance_identifier)
database_class = instance_identifier.split('-')[1]
try:
self.client.describe_db_instances(DBInstanceIdentifier=instance_identifier)
instance_exists = True
except botocore.exceptions.ClientError:
instance_exists = False
if instance_exists:
self.modify_db_instance(instance_params)
else:
self.recreate_db_subnet_group(instance_params["DBSubnetGroupName"])
# Process the Engine-specific Parameters for the Instance
group_name = instance_params["DBParameterGroupName"]
group_family = self.get_db_parameter_group_family(
instance_params["Engine"], instance_params["EngineVersion"])
logging.debug("creating parameter group %s with family %s", group_name, group_family)
self.recreate_db_parameter_group(database_class, group_name, group_family)
self.create_db_instance(instance_params)
# Create/Update CloudWatch Alarms for this instance
self.spinup_alarms(database_class)
# Create a DNS record for this instance
self.setup_dns(instance_identifier)
def _get_instance_address(self, instance_identifier):
"""
Obtains the instance end point for the given RDS instance
"""
instance_info = self.client.describe_db_instances(DBInstanceIdentifier=instance_identifier)
return instance_info['DBInstances'][0]['Endpoint']['Address']
def setup_dns(self, instance_identifier):
"""
Setup Domain Name Lookup using Route 53
"""
start_time = time.time()
instance_endpoint = keep_trying(RDS_STARTUP_TIMEOUT, self._get_instance_address, instance_identifier)
logging.info("Waited %s seconds for RDS to get an address", time.time() - start_time)
disco_route53 = DiscoRoute53()
instance_record_name = '{0}.{1}.'.format(instance_identifier, self.domain_name)
# Delete and recreate DNS record for this Instance
disco_route53.delete_record(self.domain_name, instance_record_name, 'CNAME')
disco_route53.create_record(self.domain_name, instance_record_name, 'CNAME', instance_endpoint)
def spinup_alarms(self, database_class):
"""
Configure alarms for this RDS instance. The alarms are configured in disco_alarms.ini
"""
logging.debug("Configuring Cloudwatch alarms ")
disco_alarm_config = DiscoAlarmsConfig(self.vpc_name)
disco_alarm = DiscoAlarm()
instance_alarms = disco_alarm_config.get_alarms(database_class)
disco_alarm.create_alarms(instance_alarms)
def update_all_clusters_in_vpc(self):
"""
Updates every RDS instance in the current VPC to match the configuration
"""
sections = [section for section in self.config_rds.sections()
if section.split("-")[0] == self.vpc_name]
logging.debug("The following RDS clusters will be updated: %s", ", ".join(sections))
for section in sections:
self.update_cluster(section)
def recreate_db_subnet_group(self, db_subnet_group_name):
"""
Creates the DB Subnet Group. If it exists already, drops it and recreates it.
DB subnet groups must contain at least one subnet in at least two AZs in the region.
@db_subnet_group_name: String. The name for the DB subnet group.
This value is stored as a lowercase string.
"""
try:
self.client.delete_db_subnet_group(DBSubnetGroupName=db_subnet_group_name)
except Exception as err:
logging.debug("Not deleting subnet group '%s': %s", db_subnet_group_name, repr(err))
db_subnet_group_description = 'Subnet Group for VPC {0}'.format(self.vpc_name)
subnets = self.vpc.vpc.connection.get_all_subnets(filters=self.vpc.vpc_filter())
subnet_ids = [str(subnet.id) for subnet in subnets if subnet.tags['meta_network'] == 'intranet']
self.client.create_db_subnet_group(DBSubnetGroupName=db_subnet_group_name,
DBSubnetGroupDescription=db_subnet_group_description,
SubnetIds=subnet_ids)
def get_final_snapshot(self, db_instance_identifier):
"""
Returns the information on the Final DB Snapshot. This can be used to restore a deleted DB instance
"""
db_snapshot_identifier = '{}-final-snapshot'.format(db_instance_identifier)
try:
result_dict = self.client.describe_db_snapshots(DBSnapshotIdentifier=db_snapshot_identifier)
snapshots = result_dict["DBSnapshots"]
return snapshots[0] if snapshots else None
except botocore.exceptions.ClientError:
return None
def delete_keys(self, dictionary, keys):
"""Returns a copy of the given dict, with the given keys deleted"""
copy = dictionary.copy()
for key in keys:
del copy[key]
return copy
def create_db_instance(self, instance_params):
"""Creates the Relational database instance
If a final snapshot exists for the given DB Instance ID, We restore from the final snapshot
If one doesn't exist, we create a new DB Instance
"""
instance_identifier = instance_params['DBInstanceIdentifier']
final_snapshot = self.get_final_snapshot(instance_identifier)
if not final_snapshot:
# For Postgres, We dont need this parameter at creation
if instance_params['Engine'] == 'postgres':
instance_params = self.delete_keys(instance_params, ["CharacterSetName"])
logging.info("Creating new RDS cluster %s", instance_identifier)
self.client.create_db_instance(**instance_params)
else:
logging.info("Restoring RDS cluster from snapshot: %s", final_snapshot["DBSnapshotIdentifier"])
params = self.delete_keys(instance_params, [
"AllocatedStorage", "CharacterSetName", "DBParameterGroupName", "StorageEncrypted",
"EngineVersion", "MasterUsername", "MasterUserPassword", "VpcSecurityGroupIds"])
params["DBSnapshotIdentifier"] = final_snapshot["DBSnapshotIdentifier"]
self.client.restore_db_instance_from_db_snapshot(**params)
keep_trying(RDS_RESTORE_TIMEOUT, self.modify_db_instance, instance_params)
def modify_db_instance(self, instance_params, apply_immediately=True):
"""
Modify settings for a DB instance. You can change one or more database configuration parameters
by specifying these parameters and the new values in the request.
"""
logging.info("Updating RDS cluster %s", instance_params["DBInstanceIdentifier"])
params = self.delete_keys(instance_params, [
"Engine", "LicenseModel", "DBSubnetGroupName", "PubliclyAccessible",
"MasterUsername", "Port", "CharacterSetName", "StorageEncrypted"])
self.client.modify_db_instance(ApplyImmediately=apply_immediately, **params)
def get_db_instances(self, status=None):
"""
Get all RDS clusters/instances in the current VPC.
When status is not None, filter instances that are only in the specified status or list of states.
"""
response = self.client.describe_db_instances() # filters are "not currently implemented"
instances = response["DBInstances"]
states = None if not status else status if isinstance(status, list) else [status]
vpc_instances = [
instance
for instance in instances
if instance["DBSubnetGroup"]["VpcId"] == self.vpc.vpc.id and (
not states or instance["DBInstanceStatus"] in states)]
return vpc_instances
def _wait_for_db_instance_deletions(self, timeout=RDS_DELETE_TIMEOUT):
instances_waiting_for = []
time_passed = 0
while True:
instance_dicts = self.get_db_instances(status="deleting")
instances = sorted([instance["DBInstanceIdentifier"] for instance in instance_dicts])
if not instances:
return
if time_passed >= timeout:
raise TimeoutError(
"Timed out waiting for RDS clusters to finish deleting after {}s.".format(time_passed))
if instances != instances_waiting_for:
logging.info("Waiting for deletion of RDS clusters: %s", ", ".join(instances))
instances_waiting_for = instances
time.sleep(RDS_STATE_POLL_INTERVAL)
time_passed += RDS_STATE_POLL_INTERVAL
def delete_db_instance(self, instance_identifier, skip_final_snapshot=False):
""" Delete an RDS instance/cluster. Final snapshot is automatically taken. """
logging.info("Deleting RDS cluster %s", instance_identifier)
if skip_final_snapshot:
allocated_storage = self.client.describe_db_instances(DBInstanceIdentifier=instance_identifier)[
"DBInstances"][0]["AllocatedStorage"]
ansi_color_red = "\033[91m"
ansi_color_none = "\033[0m"
print(ansi_color_red + "CAREFUL! All tables in " + instance_identifier +
" will be dropped and no backup taken. Data will be irrecoverable." + ansi_color_none)
response = raw_input("Confirm by typing the amount of allocated storage that will be dropped: ")
if response == str(allocated_storage):
self.client.delete_db_instance(DBInstanceIdentifier=instance_identifier,
SkipFinalSnapshot=True)
print("Done")
else:
print(("User input did not match the AllocatedStorage value for {}. Chickening out.".format(
instance_identifier)))
sys.exit(1)
else:
final_snapshot = "%s-final-snapshot" % instance_identifier
try:
self.client.delete_db_snapshot(DBSnapshotIdentifier=final_snapshot)
except botocore.exceptions.ClientError:
pass
keep_trying(
RDS_SNAPSHOT_DELETE_TIMEOUT,
self.client.delete_db_instance,
DBInstanceIdentifier=instance_identifier,
FinalDBSnapshotIdentifier=final_snapshot)
def delete_all_db_instances(self, wait=True):
"""
Deletes all RDS instances/clusters in the VPC. After issuing the commands for all instances,
optionally waits for instances to be deleted.
"""
instances = [i for i in self.get_db_instances() if i["DBInstanceStatus"] != "deleting"]
good_states = ["available", "backing-up", "creating"]
instances_in_bad_state = [
"{} ({})".format(instance["DBInstanceIdentifier"], instance["DBInstanceStatus"])
for instance in instances
if instance["DBInstanceStatus"] not in good_states]
if instances_in_bad_state:
raise RDSEnvironmentError("Cowardly refusing to delete the following RDS clusters because their"
" state does not allow for a snapshot to be taken: {}".format(
", ".join(instances_in_bad_state)))
for instance in instances:
self.delete_db_instance(instance["DBInstanceIdentifier"])
if wait:
self._wait_for_db_instance_deletions()
def create_db_parameter_group(self, db_parameter_group_name, db_parameter_group_family):
"""
Creates a new DB parameter group. Used to set customized parameters.
A DB parameter group is initially created with the default parameters for the
database engine used by the DB instance.
To provide custom values for any of the parameters, you must modify the group after creating it.
"""
self.client.create_db_parameter_group(
DBParameterGroupName=db_parameter_group_name,
DBParameterGroupFamily=db_parameter_group_family,
Description='Custom params-{0}'.format(db_parameter_group_name))
def modify_db_parameter_group(self, db_parameter_group_name, parameters):
"""
Modifies the parameters of a DB parameter group.
Submit a list of the following:
('ParameterName', 'ParameterValue', 'Description', 'Source',
'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable',
'MinimumEngineVersion', 'ApplyMethod')
"""
for parameter in parameters:
self.client.modify_db_parameter_group(DBParameterGroupName=db_parameter_group_name,
Parameters=[{'ParameterName': parameter[0],
'ParameterValue': parameter[1],
'Description': 'Description',
'Source': 'engine-default',
'ApplyType': 'static',
'DataType': 'string',
'AllowedValues': 'somevalues',
'IsModifiable': True,
'MinimumEngineVersion': 'someversion',
'ApplyMethod': 'pending-reboot'}])
def recreate_db_parameter_group(self, database_class, db_parameter_group_name,
db_parameter_group_family):
"""
Check if there are any custom parameters for this instance
Custom Parameters are set in ./rds/engine_specific/{instance_identifier}.ini
If this file doesn't exist, we'll use default RDS parameters
"""
try:
self.client.delete_db_parameter_group(DBParameterGroupName=db_parameter_group_name)
except Exception as err:
logging.debug("Not deleting DB parameter group '%s': %s", db_parameter_group_name, repr(err))
# DB Parameter Group Name must be created first, using RDS defaults
self.create_db_parameter_group(db_parameter_group_name, db_parameter_group_family)
# Extract the Custom Values from the config file
custom_param_file = os.path.join(ASIAQ_CONFIG,
'rds', 'engine_specific',
'{0}.ini'.format(database_class))
if os.path.isfile(custom_param_file):
custom_config = ConfigParser()
custom_config.read(custom_param_file)
custom_db_params = custom_config.items(self.vpc_name)
logging.info("Updating RDS db_parameter_group %s (family: %s, #params: %s)",
db_parameter_group_name, db_parameter_group_family, len(custom_db_params))
self.modify_db_parameter_group(db_parameter_group_name, custom_db_params)
def get_db_parameter_group_family(self, engine, engine_version):
"""
Extract the DB Parameter Group Family from Engine and Engine Version
Valid parameter group families are set based on the engine name (in lower case, if
applicable) and the major and minor versions of the DB (patchlevel and further
subdivisions of release version are ignored).
The rules here are heuristic, and may need to be tweaked.
Rules:
* oracle/sqlserver (engines that contain dashes): {engine}-{major}.{minor}
* others (no dashes): {engine}{major}.{minor}
"""
engine_version_list = engine_version.split('.', 2)
format_string = "{0}-{1}.{2}" if "-" in engine else "{0}{1}.{2}"
return format_string.format(engine.lower(), engine_version_list[0], engine_version_list[1])
def cleanup_snapshots(self, days):
"""
Cleanup all manual snapshots older than --age specified, by default its 30 days
Automated Snapshots are managed by RDS
"""
snapshots = self.client.describe_db_snapshots(SnapshotType='manual')
for snapshot in snapshots['DBSnapshots']:
snap_create_date = snapshot['SnapshotCreateTime']
today = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
snapshot_age = (today - snap_create_date).days
if snapshot_age > days:
snapshot_id = snapshot['DBSnapshotIdentifier']
logging.info("Deleting Snapshot %s since its older than %d", snapshot_id, days)
self.client.delete_db_snapshot(DBSnapshotIdentifier=snapshot_id)
| |
"""Support for AlarmDecoder devices."""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.helpers.discovery import load_platform
from homeassistant.util import dt as dt_util
from homeassistant.components.binary_sensor import DEVICE_CLASSES_SCHEMA
REQUIREMENTS = ['alarmdecoder==1.13.2']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'alarmdecoder'
DATA_AD = 'alarmdecoder'
CONF_DEVICE = 'device'
CONF_DEVICE_BAUD = 'baudrate'
CONF_DEVICE_HOST = 'host'
CONF_DEVICE_PATH = 'path'
CONF_DEVICE_PORT = 'port'
CONF_DEVICE_TYPE = 'type'
CONF_PANEL_DISPLAY = 'panel_display'
CONF_ZONE_NAME = 'name'
CONF_ZONE_TYPE = 'type'
CONF_ZONE_LOOP = 'loop'
CONF_ZONE_RFID = 'rfid'
CONF_ZONES = 'zones'
CONF_RELAY_ADDR = 'relayaddr'
CONF_RELAY_CHAN = 'relaychan'
DEFAULT_DEVICE_TYPE = 'socket'
DEFAULT_DEVICE_HOST = 'localhost'
DEFAULT_DEVICE_PORT = 10000
DEFAULT_DEVICE_PATH = '/dev/ttyUSB0'
DEFAULT_DEVICE_BAUD = 115200
DEFAULT_PANEL_DISPLAY = False
DEFAULT_ZONE_TYPE = 'opening'
SIGNAL_PANEL_MESSAGE = 'alarmdecoder.panel_message'
SIGNAL_PANEL_ARM_AWAY = 'alarmdecoder.panel_arm_away'
SIGNAL_PANEL_ARM_HOME = 'alarmdecoder.panel_arm_home'
SIGNAL_PANEL_DISARM = 'alarmdecoder.panel_disarm'
SIGNAL_ZONE_FAULT = 'alarmdecoder.zone_fault'
SIGNAL_ZONE_RESTORE = 'alarmdecoder.zone_restore'
SIGNAL_RFX_MESSAGE = 'alarmdecoder.rfx_message'
SIGNAL_REL_MESSAGE = 'alarmdecoder.rel_message'
DEVICE_SOCKET_SCHEMA = vol.Schema({
vol.Required(CONF_DEVICE_TYPE): 'socket',
vol.Optional(CONF_DEVICE_HOST, default=DEFAULT_DEVICE_HOST): cv.string,
vol.Optional(CONF_DEVICE_PORT, default=DEFAULT_DEVICE_PORT): cv.port})
DEVICE_SERIAL_SCHEMA = vol.Schema({
vol.Required(CONF_DEVICE_TYPE): 'serial',
vol.Optional(CONF_DEVICE_PATH, default=DEFAULT_DEVICE_PATH): cv.string,
vol.Optional(CONF_DEVICE_BAUD, default=DEFAULT_DEVICE_BAUD): cv.string})
DEVICE_USB_SCHEMA = vol.Schema({
vol.Required(CONF_DEVICE_TYPE): 'usb'})
ZONE_SCHEMA = vol.Schema({
vol.Required(CONF_ZONE_NAME): cv.string,
vol.Optional(CONF_ZONE_TYPE,
default=DEFAULT_ZONE_TYPE): vol.Any(DEVICE_CLASSES_SCHEMA),
vol.Optional(CONF_ZONE_RFID): cv.string,
vol.Optional(CONF_ZONE_LOOP):
vol.All(vol.Coerce(int), vol.Range(min=1, max=4)),
vol.Inclusive(CONF_RELAY_ADDR, 'relaylocation',
'Relay address and channel must exist together'): cv.byte,
vol.Inclusive(CONF_RELAY_CHAN, 'relaylocation',
'Relay address and channel must exist together'): cv.byte})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_DEVICE): vol.Any(
DEVICE_SOCKET_SCHEMA, DEVICE_SERIAL_SCHEMA,
DEVICE_USB_SCHEMA),
vol.Optional(CONF_PANEL_DISPLAY,
default=DEFAULT_PANEL_DISPLAY): cv.boolean,
vol.Optional(CONF_ZONES): {vol.Coerce(int): ZONE_SCHEMA},
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up for the AlarmDecoder devices."""
from alarmdecoder import AlarmDecoder
from alarmdecoder.devices import (SocketDevice, SerialDevice, USBDevice)
conf = config.get(DOMAIN)
restart = False
device = conf.get(CONF_DEVICE)
display = conf.get(CONF_PANEL_DISPLAY)
zones = conf.get(CONF_ZONES)
device_type = device.get(CONF_DEVICE_TYPE)
host = DEFAULT_DEVICE_HOST
port = DEFAULT_DEVICE_PORT
path = DEFAULT_DEVICE_PATH
baud = DEFAULT_DEVICE_BAUD
def stop_alarmdecoder(event):
"""Handle the shutdown of AlarmDecoder."""
_LOGGER.debug("Shutting down alarmdecoder")
nonlocal restart
restart = False
controller.close()
def open_connection(now=None):
"""Open a connection to AlarmDecoder."""
from alarmdecoder.util import NoDeviceError
nonlocal restart
try:
controller.open(baud)
except NoDeviceError:
_LOGGER.debug("Failed to connect. Retrying in 5 seconds")
hass.helpers.event.track_point_in_time(
open_connection, dt_util.utcnow() + timedelta(seconds=5))
return
_LOGGER.debug("Established a connection with the alarmdecoder")
restart = True
def handle_closed_connection(event):
"""Restart after unexpected loss of connection."""
nonlocal restart
if not restart:
return
restart = False
_LOGGER.warning("AlarmDecoder unexpectedly lost connection.")
hass.add_job(open_connection)
def handle_message(sender, message):
"""Handle message from AlarmDecoder."""
hass.helpers.dispatcher.dispatcher_send(
SIGNAL_PANEL_MESSAGE, message)
def handle_rfx_message(sender, message):
"""Handle RFX message from AlarmDecoder."""
hass.helpers.dispatcher.dispatcher_send(
SIGNAL_RFX_MESSAGE, message)
def zone_fault_callback(sender, zone):
"""Handle zone fault from AlarmDecoder."""
hass.helpers.dispatcher.dispatcher_send(
SIGNAL_ZONE_FAULT, zone)
def zone_restore_callback(sender, zone):
"""Handle zone restore from AlarmDecoder."""
hass.helpers.dispatcher.dispatcher_send(
SIGNAL_ZONE_RESTORE, zone)
def handle_rel_message(sender, message):
"""Handle relay message from AlarmDecoder."""
hass.helpers.dispatcher.dispatcher_send(
SIGNAL_REL_MESSAGE, message)
controller = False
if device_type == 'socket':
host = device.get(CONF_DEVICE_HOST)
port = device.get(CONF_DEVICE_PORT)
controller = AlarmDecoder(SocketDevice(interface=(host, port)))
elif device_type == 'serial':
path = device.get(CONF_DEVICE_PATH)
baud = device.get(CONF_DEVICE_BAUD)
controller = AlarmDecoder(SerialDevice(interface=path))
elif device_type == 'usb':
AlarmDecoder(USBDevice.find())
return False
controller.on_message += handle_message
controller.on_rfx_message += handle_rfx_message
controller.on_zone_fault += zone_fault_callback
controller.on_zone_restore += zone_restore_callback
controller.on_close += handle_closed_connection
controller.on_relay_changed += handle_rel_message
hass.data[DATA_AD] = controller
open_connection()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_alarmdecoder)
load_platform(hass, 'alarm_control_panel', DOMAIN, conf, config)
if zones:
load_platform(
hass, 'binary_sensor', DOMAIN, {CONF_ZONES: zones}, config)
if display:
load_platform(hass, 'sensor', DOMAIN, conf, config)
return True
| |
# -*- coding: utf-8 -*-
from cms.models.placeholderpluginmodel import PlaceholderReference
from cms.utils.urlutils import admin_reverse
from django.contrib.admin.helpers import AdminForm
from django.utils.decorators import method_decorator
import json
from django.views.decorators.clickjacking import xframe_options_sameorigin
from cms.constants import PLUGIN_COPY_ACTION, PLUGIN_MOVE_ACTION
from cms.exceptions import PluginLimitReached
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.plugin_pool import plugin_pool
from cms.utils import get_cms_setting
from cms.utils.compat.dj import force_unicode
from cms.utils.plugins import requires_reload, has_reached_plugin_limit
from django.contrib.admin import ModelAdmin
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.template.defaultfilters import force_escape, escapejs
from django.utils.translation import ugettext as _
from django.conf import settings
from django.views.decorators.http import require_POST
import warnings
from django.template.response import TemplateResponse
from django.contrib.admin.util import get_deleted_objects
from django.core.exceptions import PermissionDenied
from django.db import router
from django.http import HttpResponseRedirect
from cms.utils import copy_plugins, permissions, get_language_from_request
from cms.utils.i18n import get_language_list
from cms.utils.transaction import wrap_transaction
class FrontendEditableAdminMixin(object):
frontend_editable_fields = []
def get_urls(self):
"""
Register the url for the single field edit view
"""
from django.conf.urls import patterns, url
info = "%s_%s" % (self.model._meta.app_label, self.model._meta.module_name)
pat = lambda regex, fn: url(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
url_patterns = patterns(
'',
pat(r'edit-field/([0-9]+)/([a-z\-]+)/$', self.edit_field),
)
return url_patterns + super(FrontendEditableAdminMixin, self).get_urls()
def _get_object_for_single_field(self, object_id, language):
# Quick and dirty way to retrieve objects for django-hvad
# Cleaner implementation will extend this method in a child mixin
try:
return self.model.objects.language(language).get(pk=object_id)
except AttributeError:
return self.model.objects.get(pk=object_id)
def edit_field(self, request, object_id, language):
obj = self._get_object_for_single_field(object_id, language)
opts = obj.__class__._meta
saved_successfully = False
cancel_clicked = request.POST.get("_cancel", False)
raw_fields = request.GET.get("edit_fields")
fields = [field for field in raw_fields.split(",") if field in self.frontend_editable_fields]
if not fields:
context = {
'opts': opts,
'message': force_unicode(_("Field %s not found")) % raw_fields
}
return render_to_response('admin/cms/page/plugin/error_form.html', context, RequestContext(request))
if not request.user.has_perm("%s_change" % self.model._meta.module_name):
context = {
'opts': opts,
'message': force_unicode(_("You do not have permission to edit this item"))
}
return render_to_response('admin/cms/page/plugin/error_form.html', context, RequestContext(request))
# Dinamically creates the form class with only `field_name` field
# enabled
form_class = self.get_form(request, obj, fields=fields)
if not cancel_clicked and request.method == 'POST':
form = form_class(instance=obj, data=request.POST)
if form.is_valid():
form.save()
saved_successfully = True
else:
form = form_class(instance=obj)
admin_form = AdminForm(form, fieldsets=[(None, {'fields': fields})], prepopulated_fields={},
model_admin=self)
media = self.media + admin_form.media
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'title': opts.verbose_name,
'plugin': None,
'plugin_id': None,
'adminform': admin_form,
'add': False,
'is_popup': True,
'media': media,
'opts': opts,
'change': True,
'save_as': False,
'has_add_permission': False,
'window_close_timeout': 10,
}
if cancel_clicked:
# cancel button was clicked
context.update({
'cancel': True,
})
return render_to_response('admin/cms/page/plugin/confirm_form.html', context, RequestContext(request))
if not cancel_clicked and request.method == 'POST' and saved_successfully:
return render_to_response('admin/cms/page/plugin/confirm_form.html', context, RequestContext(request))
return render_to_response('admin/cms/page/plugin/change_form.html', context, RequestContext(request))
class PlaceholderAdminMixin(object):
def get_urls(self):
"""
Register the plugin specific urls (add/edit/copy/remove/move)
"""
from django.conf.urls import patterns, url
info = "%s_%s" % (self.model._meta.app_label, self.model._meta.module_name)
pat = lambda regex, fn: url(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
url_patterns = patterns(
'',
pat(r'copy-plugins/$', self.copy_plugins),
pat(r'add-plugin/$', self.add_plugin),
pat(r'edit-plugin/([0-9]+)/$', self.edit_plugin),
pat(r'delete-plugin/([0-9]+)/$', self.delete_plugin),
pat(r'clear-placeholder/([0-9]+)/$', self.clear_placeholder),
pat(r'move-plugin/$', self.move_plugin),
)
return url_patterns + super(PlaceholderAdminMixin, self).get_urls()
def has_add_plugin_permission(self, request, placeholder, plugin_type):
if not permissions.has_plugin_permission(request.user, plugin_type, "add"):
return False
if not placeholder.has_add_permission(request):
return False
return True
def has_copy_plugin_permission(self, request, source_placeholder, target_placeholder, plugins):
if not source_placeholder.has_add_permission(request) or not target_placeholder.has_add_permission(
request):
return False
for plugin in plugins:
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "add"):
return False
return True
def has_change_plugin_permission(self, request, plugin):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "change"):
return False
if not plugin.placeholder.has_change_permission(request):
return False
return True
def has_move_plugin_permission(self, request, plugin, target_placeholder):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "change"):
return False
if not target_placeholder.has_change_permission(request):
return False
return True
def has_delete_plugin_permission(self, request, plugin):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "delete"):
return False
placeholder = plugin.placeholder
if not placeholder.has_delete_permission(request):
return False
return True
def has_clear_placeholder_permission(self, request, placeholder):
if not placeholder.has_delete_permission(request):
return False
return True
def post_add_plugin(self, request, placeholder, plugin):
pass
def post_copy_plugins(self, request, source_placeholder, target_placeholder, plugins):
pass
def post_edit_plugin(self, request, plugin):
pass
def post_move_plugin(self, request, source_placeholder, target_placeholder, plugin):
pass
def post_delete_plugin(self, request, plugin):
pass
def post_clear_placeholder(self, request, placeholder):
pass
def get_placeholder_template(self, request, placeholder):
pass
@method_decorator(require_POST)
@xframe_options_sameorigin
def add_plugin(self, request):
"""
POST request should have the following data:
- placeholder_id
- plugin_type
- plugin_language
- plugin_parent (optional)
"""
parent = None
plugin_type = request.POST['plugin_type']
placeholder_id = request.POST.get('placeholder_id', None)
placeholder = get_object_or_404(Placeholder, pk=placeholder_id)
parent_id = request.POST.get('plugin_parent', None)
language = request.POST.get('plugin_language') or get_language_from_request(request)
if not self.has_add_plugin_permission(request, placeholder, plugin_type):
return HttpResponseForbidden(force_unicode(_('You do not have permission to add a plugin')))
try:
has_reached_plugin_limit(placeholder, plugin_type, language,
template=self.get_placeholder_template(request, placeholder))
except PluginLimitReached as er:
return HttpResponseBadRequest(er)
# page add-plugin
if not parent_id:
position = request.POST.get('plugin_order',
CMSPlugin.objects.filter(language=language, placeholder=placeholder).count())
# in-plugin add-plugin
else:
parent = get_object_or_404(CMSPlugin, pk=parent_id)
placeholder = parent.placeholder
position = request.POST.get('plugin_order',
CMSPlugin.objects.filter(language=language, parent=parent).count())
# placeholder (non-page) add-plugin
# Sanity check to make sure we're not getting bogus values from JavaScript:
if settings.USE_I18N:
if not language or not language in [lang[0] for lang in settings.LANGUAGES]:
return HttpResponseBadRequest(force_unicode(_("Language must be set to a supported language!")))
if parent and parent.language != language:
return HttpResponseBadRequest(force_unicode(_("Parent plugin language must be same as language!")))
else:
language = settings.LANGUAGE_CODE
plugin = CMSPlugin(language=language, plugin_type=plugin_type, position=position, placeholder=placeholder)
if parent:
plugin.position = CMSPlugin.objects.filter(parent=parent).count()
plugin.parent_id = parent.pk
plugin.save()
self.post_add_plugin(request, placeholder, plugin)
response = {
'url': force_unicode(
admin_reverse("%s_%s_edit_plugin" % (self.model._meta.app_label, self.model._meta.module_name),
args=[plugin.pk])),
'delete': force_unicode(
admin_reverse("%s_%s_delete_plugin" % (self.model._meta.app_label, self.model._meta.module_name),
args=[plugin.pk])),
'breadcrumb': plugin.get_breadcrumb(),
}
return HttpResponse(json.dumps(response), content_type='application/json')
@method_decorator(require_POST)
@xframe_options_sameorigin
@wrap_transaction
def copy_plugins(self, request):
"""
POST request should have the following data:
- source_language
- source_placeholder_id
- source_plugin_id (optional)
- target_language
- target_placeholder_id
- target_plugin_id (optional, new parent)
"""
source_language = request.POST['source_language']
source_placeholder_id = request.POST['source_placeholder_id']
source_plugin_id = request.POST.get('source_plugin_id', None)
target_language = request.POST['target_language']
target_placeholder_id = request.POST['target_placeholder_id']
target_plugin_id = request.POST.get('target_plugin_id', None)
source_placeholder = get_object_or_404(Placeholder, pk=source_placeholder_id)
target_placeholder = get_object_or_404(Placeholder, pk=target_placeholder_id)
if not target_language or not target_language in get_language_list():
return HttpResponseBadRequest(force_unicode(_("Language must be set to a supported language!")))
if source_plugin_id:
source_plugin = get_object_or_404(CMSPlugin, pk=source_plugin_id)
reload_required = requires_reload(PLUGIN_COPY_ACTION, [source_plugin])
if source_plugin.plugin_type == "PlaceholderPlugin":
# if it is a PlaceholderReference plugin only copy the plugins it references
inst, cls = source_plugin.get_plugin_instance(self)
plugins = inst.placeholder_ref.get_plugins_list()
else:
plugins = list(
source_placeholder.cmsplugin_set.filter(
path__startswith=source_plugin.path,
depth__gte=source_plugin.depth).order_by('path')
)
else:
plugins = list(
source_placeholder.cmsplugin_set.filter(language=source_language).order_by('path'))
reload_required = requires_reload(PLUGIN_COPY_ACTION, plugins)
if not self.has_copy_plugin_permission(request, source_placeholder, target_placeholder, plugins):
return HttpResponseForbidden(force_unicode(_('You do not have permission to copy these plugins.')))
if target_placeholder.pk == request.toolbar.clipboard.pk and not source_plugin_id and not target_plugin_id:
# if we copy a whole placeholder to the clipboard create PlaceholderReference plugin instead and fill it
# the content of the source_placeholder.
ref = PlaceholderReference()
ref.name = source_placeholder.get_label()
ref.plugin_type = "PlaceholderPlugin"
ref.language = target_language
ref.placeholder = target_placeholder
ref.save()
ref.copy_from(source_placeholder, source_language)
else:
copy_plugins.copy_plugins_to(plugins, target_placeholder, target_language, target_plugin_id)
plugin_list = CMSPlugin.objects.filter(language=target_language, placeholder=target_placeholder).order_by(
'path')
reduced_list = []
for plugin in plugin_list:
reduced_list.append(
{
'id': plugin.pk, 'type': plugin.plugin_type, 'parent': plugin.parent_id,
'position': plugin.position, 'desc': force_unicode(plugin.get_short_description()),
'language': plugin.language, 'placeholder_id': plugin.placeholder_id
}
)
self.post_copy_plugins(request, source_placeholder, target_placeholder, plugins)
json_response = {'plugin_list': reduced_list, 'reload': reload_required}
return HttpResponse(json.dumps(json_response), content_type='application/json')
@xframe_options_sameorigin
def edit_plugin(self, request, plugin_id):
plugin_id = int(plugin_id)
cms_plugin = get_object_or_404(CMSPlugin.objects.select_related('placeholder'), pk=plugin_id)
instance, plugin_admin = cms_plugin.get_plugin_instance(self.admin_site)
if not self.has_change_plugin_permission(request, cms_plugin):
return HttpResponseForbidden(force_unicode(_("You do not have permission to edit this plugin")))
plugin_admin.cms_plugin_instance = cms_plugin
try:
plugin_admin.placeholder = cms_plugin.placeholder
except Placeholder.DoesNotExist:
pass
if request.method == "POST":
# set the continue flag, otherwise will plugin_admin make redirect to list
# view, which actually doesn't exists
request.POST['_continue'] = True
if request.POST.get("_cancel", False):
# cancel button was clicked
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'plugin': cms_plugin,
'is_popup': True,
"type": cms_plugin.get_plugin_name(),
'plugin_id': plugin_id,
'icon': force_escape(escapejs(cms_plugin.get_instance_icon_src())),
'alt': force_escape(escapejs(cms_plugin.get_instance_icon_alt())),
'cancel': True,
}
instance = cms_plugin.get_plugin_instance()[0]
if instance:
context['name'] = force_unicode(instance)
else:
# cancelled before any content was added to plugin
cms_plugin.delete()
context.update({
"deleted": True,
'name': force_unicode(cms_plugin),
})
return render_to_response('admin/cms/page/plugin/confirm_form.html', context, RequestContext(request))
if not instance:
# instance doesn't exist, call add view
response = plugin_admin.add_view(request)
else:
# already saved before, call change view
# we actually have the instance here, but since i won't override
# change_view method, is better if it will be loaded again, so
# just pass id to plugin_admin
response = plugin_admin.change_view(request, str(plugin_id))
if request.method == "POST" and plugin_admin.object_successfully_changed:
self.post_edit_plugin(request, plugin_admin.saved_object)
saved_object = plugin_admin.saved_object
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'plugin': saved_object,
'is_popup': True,
'name': force_unicode(saved_object),
"type": saved_object.get_plugin_name(),
'plugin_id': plugin_id,
'icon': force_escape(saved_object.get_instance_icon_src()),
'alt': force_escape(saved_object.get_instance_icon_alt()),
}
return render_to_response('admin/cms/page/plugin/confirm_form.html', context, RequestContext(request))
return response
@method_decorator(require_POST)
@xframe_options_sameorigin
def move_plugin(self, request):
"""
POST request with following parameters:
-plugin_id
-placeholder_id
-plugin_language (optional)
-plugin_parent (optional)
-plugin_order (array, optional)
"""
plugin = CMSPlugin.objects.get(pk=int(request.POST['plugin_id']))
placeholder = Placeholder.objects.get(pk=request.POST['placeholder_id'])
parent_id = request.POST.get('plugin_parent', None)
language = request.POST.get('plugin_language', None)
source_placeholder = plugin.placeholder
if not parent_id:
parent_id = None
else:
parent_id = int(parent_id)
if not language and plugin.language:
language = plugin.language
order = request.POST.getlist("plugin_order[]")
if not self.has_move_plugin_permission(request, plugin, placeholder):
return HttpResponseForbidden(force_unicode(_("You have no permission to move this plugin")))
if not placeholder == source_placeholder:
try:
template = self.get_placeholder_template(request, placeholder)
has_reached_plugin_limit(placeholder, plugin.plugin_type, plugin.language, template=template)
except PluginLimitReached as er:
return HttpResponseBadRequest(er)
if parent_id:
if plugin.parent_id != parent_id:
parent = CMSPlugin.objects.get(pk=parent_id)
if parent.placeholder_id != placeholder.pk:
return HttpResponseBadRequest(force_unicode('parent must be in the same placeholder'))
if parent.language != language:
return HttpResponseBadRequest(force_unicode('parent must be in the same language as plugin_language'))
plugin.parent_id = parent.pk
plugin.save()
plugin.move(parent, pos='last-child')
else:
sibling = CMSPlugin.get_last_root_node()
plugin.parent_id = None
plugin.save()
plugin.move(sibling, pos='right')
plugin = CMSPlugin.objects.get(pk=plugin.pk)
for child in [plugin] + list(plugin.get_descendants()):
child.placeholder = placeholder
child.language = language
child.save()
plugins = CMSPlugin.objects.filter(parent=parent_id, placeholder=placeholder, language=language).order_by('position')
x = 0
for level_plugin in plugins:
if order:
x = 0
found = False
for pk in order:
if level_plugin.pk == int(pk):
level_plugin.position = x
level_plugin.save()
found = True
break
x += 1
if not found:
return HttpResponseBadRequest('order parameter did not have all plugins of the same level in it')
else:
level_plugin.position = x
level_plugin.save()
x += 1
self.post_move_plugin(request, source_placeholder, placeholder, plugin)
json_response = {'reload': requires_reload(PLUGIN_MOVE_ACTION, [plugin])}
return HttpResponse(json.dumps(json_response), content_type='application/json')
@xframe_options_sameorigin
def delete_plugin(self, request, plugin_id):
plugin = get_object_or_404(CMSPlugin.objects.select_related('placeholder'), pk=plugin_id)
if not self.has_delete_plugin_permission(request, plugin):
return HttpResponseForbidden(force_unicode(_("You do not have permission to delete this plugin")))
plugin_cms_class = plugin.get_plugin_class()
plugin_class = plugin_cms_class.model
opts = plugin_class._meta
using = router.db_for_write(plugin_class)
app_label = opts.app_label
(deleted_objects, perms_needed, protected) = get_deleted_objects(
[plugin], opts, request.user, self.admin_site, using)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied(_("You do not have permission to delete this plugin"))
obj_display = force_unicode(plugin)
self.log_deletion(request, plugin, obj_display)
plugin.delete()
self.message_user(request, _('The %(name)s plugin "%(obj)s" was deleted successfully.') % {
'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj_display)})
self.post_delete_plugin(request, plugin)
return HttpResponseRedirect(admin_reverse('index', current_app=self.admin_site.name))
plugin_name = force_unicode(plugin_pool.get_plugin(plugin.plugin_type).name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": plugin_name}
else:
title = _("Are you sure?")
context = {
"title": title,
"object_name": plugin_name,
"object": plugin,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": app_label,
}
return TemplateResponse(request, "admin/cms/page/plugin/delete_confirmation.html", context,
current_app=self.admin_site.name)
@xframe_options_sameorigin
def clear_placeholder(self, request, placeholder_id):
placeholder = get_object_or_404(Placeholder, pk=placeholder_id)
if not self.has_clear_placeholder_permission(request, placeholder):
return HttpResponseForbidden(force_unicode(_("You do not have permission to clear this placeholder")))
language = request.GET.get('language', None)
plugins = placeholder.get_plugins(language)
opts = Placeholder._meta
using = router.db_for_write(Placeholder)
app_label = opts.app_label
(deleted_objects, perms_needed, protected) = get_deleted_objects(
plugins, opts, request.user, self.admin_site, using)
obj_display = force_unicode(placeholder)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
return HttpResponseForbidden(force_unicode(_("You do not have permission to clear this placeholder")))
self.log_deletion(request, placeholder, obj_display)
placeholder.clear()
self.message_user(request, _('The placeholder "%(obj)s" was cleared successfully.') % {
'obj': force_unicode(obj_display)})
self.post_clear_placeholder(request, placeholder)
return HttpResponseRedirect(admin_reverse('index', current_app=self.admin_site.name))
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": obj_display}
else:
title = _("Are you sure?")
context = {
"title": title,
"object_name": _("placeholder"),
"object": placeholder,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": app_label,
}
return TemplateResponse(request, "admin/cms/page/plugin/delete_confirmation.html", context,
current_app=self.admin_site.name)
class PlaceholderAdmin(PlaceholderAdminMixin, ModelAdmin):
def __init__(self, *args, **kwargs):
warnings.warn("Class PlaceholderAdmin is deprecated and will be removed in 3.1. "
"Instead, combine PlaceholderAdminMixin with admin.ModelAdmin.", DeprecationWarning)
super(PlaceholderAdmin, self).__init__(*args, **kwargs)
class FrontendEditableAdmin(FrontendEditableAdminMixin):
def __init__(self, *args, **kwargs):
warnings.warn("Class FrontendEditableAdmin is deprecated and will be removed in 3.1. "
"Instead, use FrontendEditableAdminMixin.", DeprecationWarning)
super(FrontendEditableAdmin, self).__init__(*args, **kwargs)
| |
#!/usr/bin/env python
'''
The reporter module is in charge of producing the HTML Report as well as
provide plugins with common HTML Rendering functions
'''
import cgi
from tornado.template import Loader
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.dependency_management.interfaces import ReporterInterface
from framework.lib.general import *
from framework.interface.html.filter import sanitiser
class Reporter(BaseComponent, ReporterInterface):
COMPONENT_NAME = "reporter"
def __init__(self):
self.register_in_service_locator()
self.config = self.get_component("config")
self.resource = self.get_component("resource")
self.transaction = self.get_component("transaction")
self.plugin_handler = self.get_component("plugin_handler")
self.requester = None
self.Init = False
self.Sanitiser = sanitiser.HTMLSanitiser()
self.Loader = Loader(self.config.FrameworkConfigGet('POUTPUT_TEMPLATES_DIR'))
self.mNumLinesToShow = 15
self.CounterList = []
def init(self):
self.requester = self.get_component("requester")
def TransactionTableFromIDs(self, TransactionIDs, NumLinesReq=15, NumLinesRes=15):
""" Draws a table of HTTP Transactions """
# functions to get the first lines of a long string
transactions = self.transaction.GetByIDs(TransactionIDs)
return self.TransactionTableForTransactions(transactions)
def TransactionTableForURL(self, UseCache, URL, Method=None, Data=None):
transaction = self.requester.GetTransaction(UseCache, URL, method=Method, data=Data)
return self.TransactionTableForTransactions([transaction])
def TransactionTableForURLList(self, UseCache, URLList, Method=None, Data=None):
transactions = self.requester.GetTransactions(UseCache, URLList, method=Method, data=Data)
return self.TransactionTableForTransactions(transactions)
def TransactionTableForTransactions(self, Transactions):
return self.Loader.load("transaction_table.html").generate(TransactionList=Transactions)
def unicode(self, *args):
try:
return unicode(*args)
except TypeError:
return args[0] # Input is already Unicode
def sanitize_html(self, RawHTML):
return self.Sanitiser.CleanThirdPartyHTML(RawHTML)
def reset_loader(self):
return self.Loader.reset()
# ----------------------------------- Methods exported from plugin_helper.py ---------------------------------
def CommandTable(self, Command):
return self.Loader.load("command_table.html").generate(Command=Command)
def LinkList(self, LinkListName, Links):
"""
Wrapper to allow rendering a bunch of links -without name- as resource
links with name = link
"""
return self.Loader.load("link_list.html").generate(LinkListName=LinkListName, Links=Links)
def ResourceLinkList(self, ResourceListName, ResourceList):
"""
Draws an HTML Search box for defined Vuln Search resources
"""
return self.Loader.load("resource_link_list.html").generate(ResourceListName=ResourceListName,
ResourceList=ResourceList)
def TabbedResourceLinkList(self, ResourcesList):
"""
ResourceList = [
"ResourceListName", [["Name1","Resource1"],["Name2","Resource2"]]
]
"""
TabData = []
Resources = []
for ResourceListName, ResourceList in ResourcesList:
TabID = ResourceListName.replace(' ', '_')
TabData.append([ResourceListName, TabID])
Resources.append([TabID, ResourceList])
return self.Loader.load("tabbed_resource_link_list.html").generate(TabData=TabData, Resources=Resources)
def ListPostProcessing(self, ResourceListName, LinkList, HTMLLinkList):
return self.Loader.load("list_post_processing.html").generate(ResourceListName=ResourceListName,
LinkList=LinkList, HTMLLinkList=HTMLLinkList)
def RequestLinkList(self, ResourceListName, LinkList):
return self.Loader.load("request_link_list.html").generate(ResourceListName=ResourceListName, LinkList=LinkList)
def VulnerabilitySearchBox(self, SearchStr):
"""
Draws an HTML Search box for defined Vuln Search resources
"""
VulnSearchResources = self.resource.GetResources('VulnSearch')
return self.Loader.load("vulnerability_search_box.html").generate(SearchStr=SearchStr,
VulnSearchResources=VulnSearchResources)
def SuggestedCommandBox(self, PluginOutputDir, CommandCategoryList, Header=''):
"""
Draws HTML tabs for a list of TabName => Resource Group (i.e. how to run hydra, etc)
"""
TitleList = []
CommandList = []
for item in CommandCategoryList:
TitleList.append(item[0])
CommandList.append(self.resource.GetResources(item[1]))
# TODO: Fix up the plugin
return self.Loader.load("suggested_command_box.html").generate(Header=Header, TitleList=TitleList,
CommandList=CommandList)
def CommandDump(self, Name, CommandIntro, ModifiedCommand, RelativeFilePath, OutputIntro, TimeStr):
AbsPath = self.plugin_handler.RetrieveAbsPath(RelativeFilePath)
OutputLines = open(AbsPath, "r").readlines()
longOutput = (len(OutputLines) > self.mNumLinesToShow)
if (len(OutputLines) > self.mNumLinesToShow):
OutputLines = ''.join(OutputLines[0:self.mNumLinesToShow])
else:
OutputLines = ''.join(OutputLines)
table_vars = {
"Name": Name,
"CommandIntro": CommandIntro,
"ModifiedCommand": ModifiedCommand,
"FilePath": RelativeFilePath,
"OutputIntro": OutputIntro,
"OutputLines": OutputLines,
"TimeStr": TimeStr,
"mNumLinesToShow": self.mNumLinesToShow,
"longOutput": longOutput
}
return self.Loader.load("command_dump.html").generate(**table_vars)
def URLsFromStr(self, TimeStr, VisitURLs, URLList, NumFound):
html_content = self.Loader.load("urls_from_str.html").generate(TimeStr=TimeStr, VisitURLs=VisitURLs,
NumURLs=len(URLList), NumFound=NumFound)
if URLList:
html_content += self.LinkList("URLs Scraped", URLList)
return html_content
def Robots(self, NotStr, NumLines, NumAllow, NumDisallow, NumSitemap, SavePath, EntriesList, NumAddedURLs):
vars = {
"robots_found": NotStr,
"num_lines": NumLines,
"num_allow": NumAllow,
"num_disallow": NumDisallow,
"num_sitemap": NumSitemap,
"save_path": SavePath
}
TestResult = self.Loader.load("robots.html").generate(**vars)
# robots.txt contains some entries, show browsable list! :)
if NumDisallow > 0 or NumAllow > 0 or NumSitemap > 0:
for Display, Links in EntriesList:
if Links: # Filters empty lists
TestResult += self.ResourceLinkList(Display, Links)
return TestResult
def HtmlString(self, String):
return String
# ---------------------- Grep Plugin Outputs -------------------- #
def ResponseBodyMatches(self, ResponseRegexpName):
RegexpName, GrepOutputs, TransactionIDS, match_percent = self.transaction.SearchByRegexName(ResponseRegexpName,
stats=True)
variables = {
"name": RegexpName.replace("RESPONSE_REGEXP_FOR_", "").replace('_', ' '),
"matches": GrepOutputs,
"transaction_ids": TransactionIDS,
"match_percent": match_percent
}
return self.Loader.load("response_matches.html").generate(**variables)
def ResponseHeaderMatches(self, HeaderRegexpName):
return self.ResearchHeaders(HeaderRegexpName)[0]
def ResearchHeaders(self, RegexName):
regex_name, grep_outputs, transaction_ids, match_percent = self.transaction.SearchByRegexName(RegexName,
stats=True)
# [[unique_matches, matched_transactions, matched_percentage]]
searches = self.Loader.load("header_searches.html").generate(match_percent=match_percent, matches=grep_outputs,
transaction_ids=transaction_ids)
return [searches, grep_outputs]
def FingerprintData(self):
HeaderTable, matches = self.ResearchHeaders('HEADERS_FOR_FINGERPRINT')
for item in matches:
# Add Vulnerability search boxes after table
HeaderTable += self.VulnerabilitySearchBox(item[1])
return HeaderTable
def TopTransactionsBySpeed(self, Order):
transactions = self.transaction.GetTopTransactionsBySpeed(Order)
return self.TransactionTableForTransactions(transactions)
def CookieAttributeAnalysis(self, CookieValueList, Header2TransacDict):
vars = {
"Cookies": [{
"Name": Cookie.split('=')[0],
"Link": Header2TransacDict[self.config.Get('HEADERS_FOR_COOKIES').lower() + Cookie],
"Attribs": Cookie.replace(Cookie.split('=')[0] + "=", "").replace("; ", ";").split(";"),
} for Cookie in CookieValueList],
}
Table = self.Render.CreateTable({'class': 'report_intro'})
SetCookie = self.config.Get('HEADERS_FOR_COOKIES').lower()
PossibleCookieAttributes = self.config.Get('COOKIE_ATTRIBUTES').split(',')
for Cookie in CookieValueList:
CookieName = Cookie.split('=')[0]
CookieLink = self.Render.DrawButtonLink(cgi.escape(CookieName), Header2TransacDict[SetCookie + Cookie])
CookieAttribs = Cookie.replace(CookieName + "=", "").replace("; ", ";").split(";")
Table.CreateCustomRow('<tr><th colspan="2">Cookie: %s</th></tr>' % CookieLink)
Table.CreateRow(['Attribute', 'Value'], True)
NotFoundStr = "<b>Not Found</b>"
if CookieAttribs[0]:
CookieValue = CookieAttribs[0]
else:
CookieValue = NotFoundStr
Table.CreateRow(['Value', CookieValue])
for Attrib in PossibleCookieAttributes:
DisplayAttribute = NotFoundStr
for PresentAttrib in CookieAttribs:
# Avoid false positives due to cookie contents
if PresentAttrib.lower().startswith(Attrib.lower()):
DisplayAttribute = PresentAttrib
break
Table.CreateRow([Attrib, DisplayAttribute])
if Table.GetNumRows() == 0:
return "" # No Attributes found
return "<h3>Cookie Attribute Analysis</h3>%s" % Table.Render()
| |
#!/usr/bin/env python
#NOTE: must u+x this file for things to work with the bm daemon!!!!
import sys
sys.path.append("/home/fhuici/research/eu/demons/svn/Sources/blockmon/main/node/daemon/")
from txjsonrpc.web import jsonrpc
from twisted.web import server
from twisted.internet import reactor
from txjsonrpc.web.jsonrpc import Proxy
import xml.dom.minidom
import xmlrpclib, pickle
from SimpleXMLRPCServer import SimpleXMLRPCServer
from core.returnvalue import *
from core.bmparser import CompositionParser
import os
import random
import commands
from composition import CompositionManager
from core.bmlogging import setup_logging
import imp
blockmon = imp.load_dynamic('blockmon','../libblockmonlib.so')
class BMProcessManager:
"""\brief Controls a running blockmon process. Note that this class/file can be
used directly as an executable (the method used by the blockmon daemon
to spawn blockmon processes) or by creating an instance of the class
(the method used for the blockmon CLI). For the former, the manager runs
an XML-RPC server which the blockmon daemon uses to communicate with it.
Further note that all xml-rpc operations return a pickled ReturnValue object.
"""
bm_running = False
def __init__(self, comp=None, bm_logger=None, port=None, is_comp_str=False):
"""\brief Initializes class
\param comp (\c string) The composition
\param bm_logger (\c logging.logger) The bm logger
\param port (\c string) The port to run the xml-rpc server on
\parm is_comp_str (\c bool) Whether the composition or a file path
"""
self.__composition = comp
if comp and not is_comp_str:
f = open(comp, "r")
self.__composition = f.read()
f.close()
self.__logger = bm_logger
self.__port = None
if port:
self.__port = int(port)
self.__server = None
def set_composition(self, comp):
self.__composition = comp
def set_logger(self, logger):
self.__logger = logger
def serve(self):
"""\brief Starts up a composition as well as the xml-rpc server
"""
self.start_composition()
self.__server = SimpleXMLRPCServer(("localhost", self.__port))
self.__server.register_function(self.update_composition, "update_composition")
self.__server.register_function(self.stop_composition, "stop_composition")
self.__server.register_function(self.read_variables, "read_variables")
self.__server.register_function(self.write_variables, "write_variables")
self.__logger.info("Starting Blockmon process with pid=" + str(os.getpid()) +\
" and listening on localhost:" + str(self.__port))
self.__server.serve_forever()
def start_composition(self, comp=None):
"""\brief Starts up a composition
\param comp (\c string) The composition. If None self.__composition is used
\return (\c ReturnValue) The result of the operation
"""
if comp:
self.__composition = comp
self.__parser = CompositionParser(self.__composition)
self.__comp_id = self.__parser.parse_comp_id()
self.__comp_mngr = CompositionManager(self.__comp_id, blockmon, self.__logger)
self.__comp_mngr.install(xml.dom.minidom.parseString(self.__composition))
self.start_bm()
return ReturnValue(ReturnValue.CODE_SUCCESS, "", None)
def update_composition(self, comp):
"""\brief Updates up a composition
\param comp (\c string) The composition
\return (\c ReturnValue) The result of the operation
"""
self.stop_bm()
self.__composition = comp
self.__comp_mngr.reconfigure(xml.dom.minidom.parseString(comp))
self.start_bm()
r = ReturnValue(ReturnValue.CODE_SUCCESS, "", None)
if self.__server:
return pickle.dumps(r)
return r
def is_running(self):
return self.bm_running
def stop_composition(self):
"""\brief Stops the composition
\return (\c ReturnValue) The result of the operation
"""
self.stop_bm()
self.__comp_mngr.remove()
r = ReturnValue(ReturnValue.CODE_SUCCESS, "", None)
if self.__server:
return pickle.dumps(r)
return r
def read_variables(self, variables):
"""\brief Reads variables from blocks
\param variables (\c [VariableInfo]) The variables to read, pickled.
\return (\c ReturnValue) The result of the operation
"""
if self.__server:
variables = pickle.loads(variables)
for v in variables:
value = self.__comp_mngr.read_block_var(v.get_block_name(), v.get_name())
v.set_value(value)
r = ReturnValue(ReturnValue.CODE_SUCCESS, "", variables)
if self.__server:
return pickle.dumps(r)
return r
def write_variables(self, variables):
"""\brief Writes values to block variables
\param variables (\c [VariableInfo]) The variables to write to, pickled.
\return (\c ReturnValue) The result of the operation
"""
if self.__server:
variables = pickle.loads(variables)
for v in variables:
self.__comp_mngr.write_block_var(v.get_block_name(),\
v.get_name(),\
v.get_value())
r = ReturnValue(ReturnValue.CODE_SUCCESS, "", None)
if self.__server:
return pickle.dumps(r)
return r
@staticmethod
def start_bm():
"""\brief Starts all blockmon schedulers and timers
"""
if (BMProcessManager.bm_running):
raise Exception('blockmon already running')
else:
BMProcessManager.bm_running = True
blockmon.start_schedulers()
blockmon.start_timer()
@staticmethod
def stop_bm():
"""\brief Stops all blockmon schedulers and timers
"""
if (BMProcessManager.bm_running):
blockmon.stop_schedulers()
blockmon.stop_timer()
BMProcessManager.bm_running = False
class BMProcessInfo:
"""\brief Convenience class for storing information about a running blockmon process
"""
def __init__(self, proc, comp, logfile, port=None):
"""\brief Initializes class
\param proc (\c subprocess.Popen) The process
\param comp (\c string) The composition XML
\param logfile (\c string) The path to the process' log file
\param port (\c int) The port the process' json-rpc server is running on
"""
self.__proc = proc
self.__comp = comp
self.__logfile = logfile
self.__port = port
def get_pid(self):
if not self.__proc:
return None
return self.__proc.pid
def get_comp(self):
return self.__comp
def get_port(self):
return self.__port
def set_port(self, p):
self.__port = p
def get_logfile(self):
return self.__logfile
def get_proc(self):
return self.__proc
def __str__(self):
return "BMProcessInfo: pid=" + str(self.get_pid()) + \
"port=" + str(self.get_port()) + \
"logfile=" + str(self.get_logfile()) + \
"\n\tcomposition:\n" + str(self.get_comp())
########################################################################
# MAIN EXECUTION
########################################################################
if __name__ == "__main__":
if (len(sys.argv) < 3):
os._exit(1)
# Setup manager
compfile = sys.argv[1]
logfile = sys.argv[2]
process_port = sys.argv[3]
setup_logging(logfile)
from core.bmlogging import bm_logger
# Start server
mngr = BMProcessManager(compfile, bm_logger, process_port)
mngr.serve()
| |
import click
from helium_commander import (
Client,
Label,
Sensor,
Element,
device_sort_option,
device_mac_option,
metadata_filter_option,
ResourceParamType
)
from helium_commander.commands import metadata, timeseries
from collections import namedtuple
pass_client = click.make_pass_decorator(Client)
label_includes = [Sensor, Element]
LabelActionResources = namedtuple('LabelResourceActions',
['add', 'remove', 'replace'])
def lookup_label_action_resources(client, cls, mac=False, **kwargs):
"""Look up resources for a label."""
def _lookup(action, resources):
id_reps = kwargs.pop(action, None)
if not id_reps:
return None # No change
if 'none' in id_reps:
return [] # Empty out the resources
return [cls.lookup(client, id, resources=resources, mac=mac)
for id in id_reps]
all_resources = cls.all(client)
return LabelActionResources(_lookup('add', all_resources),
_lookup('remove', all_resources),
_lookup('replace', all_resources))
@click.group()
def cli():
"""Operations on labels of sensors.
"""
pass
@cli.command()
@click.argument('label', required=False)
@metadata_filter_option
@pass_client
def list(client, label, **kwargs):
"""List labels.
Lists information for a given LABEL or all labels in the
organization.
"""
if label:
labels = [Label.lookup(client, label, include=label_includes)]
else:
metadata = kwargs.get('metadata') or None
labels = Label.where(client, include=label_includes, metadata=metadata)
Label.display(client, labels, include=label_includes)
cli.add_command(timeseries.cli(Label, history=False,
writable=False, device=False))
@cli.command()
@click.argument('name')
@click.option('--sensors',
type=ResourceParamType(metavar='SENSOR'),
help="Add sensors to a label")
@click.option('--elements',
type=ResourceParamType(metavar='ELEMENT'),
help="Add elements to a label")
@click.pass_context
def create(ctx, name, sensors, elements):
"""Create a label.
Creates a label with a given NAME and an (optional) list of
sensors and elements associated with that label.
"""
client = ctx.find_object(Client)
sensors = sensors or []
if sensors:
all_sensors = Sensor.all(client)
sensors = [Sensor.lookup(client, id, resources=all_sensors)
for id in sensors]
elements = elements or []
if elements:
all_elements = Element.all(client)
elements = [Element.lookup(client, id, resources=all_elements)
for id in elements]
label = Label.create(client, attributes={
'name': name
})
if sensors:
label.update_sensors(sensors)
if elements:
label.update_elements(elements)
label = Label.find(client, label.id, include=label_includes)
Label.display(client, [label], include=label_includes)
@cli.command()
@click.argument('label', nargs=-1)
@pass_client
def delete(client, label):
"""Delete one or more labels.
Deletes the LABELs with the given ids
"""
all_labels = Label.all(client)
label = [Label.lookup(client, id, resources=all_labels) for id in label]
for entry in label:
entry.delete()
click.echo("Deleted {} ".format(entry.id))
@cli.command()
@click.argument('label')
@click.option('--name',
help="the new name for the label")
@pass_client
def update(client, label, name):
"""Update a label.
Changes basic attributes on a label.
To add or remove sensors or elements from a label see the `label
element` and `label sensor` commands.
"""
label = Label.lookup(client, label)
if name:
label.update(attributes={
'name': name
})
label = Label.find(client, label.id, include=label_includes)
Label.display(client, [label], include=label_includes)
cli.add_command(metadata.cli(Label))
@cli.command()
@click.argument('label')
@click.option('--add',
type=ResourceParamType(metavar='SENSOR'),
help="Add sensors to a label")
@click.option('--remove',
type=ResourceParamType(metavar='SENSOR'),
help="Remove sensors from a label")
@click.option('--replace',
type=ResourceParamType(metavar='SENSOR'),
help="Replace all sensors in a label")
@device_sort_option
@device_mac_option
@pass_client
def sensor(client, label, mac, **kwargs):
"""List sensors for a label.
List sensors for a given LABEL.
Add, remove or replace sensors from the LABEL by using the --add,
--remove or --replace arguments respectively. Note that you can
specify "none" with these to indicate an empty list.
"""
label = Label.lookup(client, label)
actions = lookup_label_action_resources(client, Sensor,
mac=mac, **kwargs)
if actions.add is not None:
label.add_sensors(actions.add)
if actions.remove is not None:
label.remove_sensors(actions.remove)
if actions.replace is not None:
label.update_sensors(actions.replace)
sensors = label.sensors()
Sensor.display(client, sensors, **kwargs)
@cli.command()
@click.argument('label')
@click.option('--add',
type=ResourceParamType(metavar='SENSOR'),
help="Add sensors to a label")
@click.option('--remove',
type=ResourceParamType(metavar='SENSOR'),
help="Remove sensors from a label")
@click.option('--replace',
type=ResourceParamType(metavar='SENSOR'),
help="Replace all sensors in a label")
@device_sort_option
@device_mac_option
@pass_client
def element(client, label, mac, **kwargs):
"""List elements for a label.
List elements for a given LABEL.
Add, remove or replace sensors from the LABEL by using the --add,
--remove or --replace arguments respectively. Note that you can
specify "none" with these to indicate an empty list.
"""
label = Label.lookup(client, label)
actions = lookup_label_action_resources(client, Element,
mac=mac, **kwargs)
if actions.add is not None:
label.add_elements(actions.add)
if actions.remove is not None:
label.remove_elements(actions.remove)
if actions.replace is not None:
label.update_elements(actions.replace)
elements = label.elements()
Element.display(client, elements, **kwargs)
| |
import io
import math
from PIL import Image as BaseImage, ImageOps
import urllib.error
import urllib.parse
import urllib.request
import functools
from .. import consts
from .misc import utils, abc
from .accounts.records import Record
import jinja2
from bson.objectid import ObjectId
from pymongo import MongoClient
client = MongoClient()
class Image(abc.Scorable, abc.Item):
''' Represents a single image.
The implementation is similar to `posts.Post`. '''
_allowed = {'JPEG', 'PNG'}
type = consts.CONTENT_IMAGE
_db, _collection = consts.MONGO['images']
collection = client[_db][_collection]
def __init__(self, image=None, file=False):
''' Getting truthy `file` requires loading the image from disc. Don't do
that unless you're sure you need the image object. '''
self._fields = {}
self.derived = []
if image:
image = ObjectId(image)
data = self.collection.find_one({self.pk: image}) or {}
if data and file:
data['file'] = self._load_file(data['name'])
self._init_setfields(self, data)
def _prepare(self):
if self.owner and not isinstance(self.owner, Record):
self._setfields(self, {'owner': Record(id=self.owner)})
@classmethod
def _load_file(cls, file):
# Check if we can skip loading the file from disk
if not isinstance(file, io.IOBase):
full_name = '{0}-{1}'.format(consts.ORIGINAL_IMAGE, file)
file = (consts.MEDIA_IMAGES / full_name).open('rb')
with file:
# Same here, don't modify the original object
content = file.read()
file.seek(0)
return BaseImage.open(io.BytesIO(content))
@staticmethod
def _download(url):
if not urllib.parse.urlparse(url)[0]:
url = 'http://{}'.format(url)
return io.BytesIO(urllib.request.urlopen(url, timeout=10).read())
@classmethod
def _store_n_link(cls, acct, file, allow_gif=False):
# `file` argument must be provided
content = file.read()
# Keep the original object unmodified.
# It won't be used anywhere in this function
file.seek(0)
if len(content) > consts.MAX_IMAGE_SIZE:
raise ValueError('Image is too large')
try:
# Try to get image type
img = BaseImage.open(io.BytesIO(content))
if img.format not in cls._allowed.union({'GIF'} if allow_gif else set()):
raise ValueError
except (IOError, ValueError) as e:
raise ValueError('Invalid image type') from None
name = '{}.{}'.format(utils.unique_id()[0], img.format.lower())
sizes = (consts.ORIGINAL_IMAGE, consts.SQUARE_THUMBNAIL, consts.SHRINKED_IMAGE)
names = [consts.MEDIA_IMAGES / '{}-{}'.format(x, name) for x in sizes]
consts.MEDIA_IMAGES.mkdir(parents=True, exist_ok=True)
# Save full image without changin' a byte
with names[0].open('wb') as unmodified:
unmodified.write(content)
# Construct `PIL.Image` instance and make a thumbnail and a shrinked copy
# Thumbnails are always square
ImageOps.fit(img, (100, 100), BaseImage.ANTIALIAS).save(str(names[1]), quality=100)
# Shrinked image is a fixed-width image derived from the full-size image
# Don't modify GIF images
if consts.SHRINKED_WIDTH < img.size[0] and img.format != 'GIF':
nh = math.ceil(consts.SHRINKED_WIDTH / img.size[0] * img.size[1])
shrinked = ImageOps.fit(img, (consts.SHRINKED_WIDTH, nh), BaseImage.ANTIALIAS)
shrinked.save(str(names[2]), quality=100)
else:
with names[2].open('wb') as shrinked:
shrinked.write(content)
# Link the image to `acct`, create a new `Image` instance and return it
data = {'name': name, 'owner': acct.id, 'id': utils.unique_id()[0], 'score': 0}
cls.collection.insert_one(data)
data['owner'] = acct
data['file'] = img
return data
@classmethod
def fromdata(cls, data, file=False):
if file:
data['file'] = cls._load_file(data['name'])
# I think it's okay to go against DRY now
return cls._init_setfields(cls(), data)
def setavatar(self):
''' `Image.setavatar` (as well as `Image.setcover`) performs only file IO
operations. No database stuff.'''
path = consts.MEDIA_IMAGES / '{0}-{1}'.format(consts.AVATAR , self.name)
if not path.exists():
new = ImageOps.fit(self.file, (500, 500), BaseImage.ANTIALIAS)
new.save(str(path), quality=100)
def setcover(self):
path = consts.MEDIA_IMAGES / '{0}-{1}'.format(consts.COVER_IMAGE , self.name)
if not path.exists():
ratio = consts.COVER_RATIO[1] / consts.COVER_RATIO[0]
nh = math.ceil(self.file.size[0] * ratio)
cr = ImageOps.fit(self.file, (self.file.size[0], nh), BaseImage.ANTIALIAS)
cr.save(str(path), quality=100)
@classmethod
def delete(cls, acct, images):
if not isinstance(acct, Record):
raise TypeError
if not images:
raise ValueError('Nothing to delete')
ins = cls.instances(images)
# Make a list excluding not `acct`'s images
valid = [x for x in ins if x.owner == acct]
for name in (x.name for x in valid):
for file in consts.MEDIA_IMAGES.glob('*-' + name):
file.unlink()
with acct:
op = cls.collection.delete_many({cls.pk: {'$in': [x.id for x in valid]}})
acct.images -= op.deleted_count
@classmethod
def new(cls, acct, images, allow_gif=False):
''' `images` is an iterable of file objects or strings.
Strings are treated as URLs '''
if not isinstance(acct, Record):
raise TypeError
if not images:
raise ValueError('No data supplied')
with acct:
for img in images:
if isinstance(img, str):
# _download doesn't track exceptions
try:
img = cls._download(img)
except (urllib.error.URLError,) as e:
continue
try:
data = cls._store_n_link(acct, img, allow_gif)
except (ValueError, IOError) as e: # Invalid image
continue
else:
acct.images += 1
yield cls.fromdata(data)
def __repr__(self):
pat = '<Image object at {addr:#x}: {self.pk}={self.id}>'
mapping = {'addr': id(self), 'self': self}
return pat.format(**mapping)
def urls_dict(name, types=()):
func = functools.partial(utils.image_url, name=name)
return {x: func(type=x) for x in types}
def raw(acct, number=50, files=False):
if not isinstance(acct, Record):
raise TypeError
imgs = Image.collection.find({'owner': acct.id})
# Returns instances
fromdata = functools.partial(Image.fromdata, file=files)
yield from map(fromdata, imgs.sort([('$natural', -1)]).limit(number))
| |
import numpy
import chainer
from chainer import configuration
from chainer import cuda
from chainer import function
from chainer.utils import argument
from chainer.utils import conv
from chainer.utils import type_check
from chainer import variable
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.cudnn.cudnn
_cudnn_version = libcudnn.getVersion()
_fwd_pref = libcudnn.CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT
if _cudnn_version >= 3000:
_bwd_filter_pref = \
libcudnn.CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT
_bwd_data_pref = \
libcudnn.CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT
def _check_cudnn_acceptable_type(x_dtype, W_dtype):
return x_dtype == W_dtype and (
_cudnn_version >= 3000 or x_dtype != numpy.float16)
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
class Convolution2DFunction(function.Function):
def __init__(self, stride=1, pad=0, cover_all=False, requires_x_grad=True,
**kwargs):
argument.check_unexpected_kwargs(
kwargs, deterministic="deterministic argument is not "
"supported anymore. "
"Use chainer.using_config('cudnn_deterministic', value) "
"context where value is either `True` or `False`.")
argument.assert_kwargs_empty(kwargs)
self.sy, self.sx = _pair(stride)
self.ph, self.pw = _pair(pad)
self.cover_all = cover_all
self.requires_x_grad = requires_x_grad
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type = in_types[0]
w_type = in_types[1]
type_check.expect(
x_type.dtype.kind == 'f',
w_type.dtype.kind == 'f',
x_type.ndim == 4,
w_type.ndim == 4,
x_type.shape[1] == w_type.shape[1],
)
if type_check.eval(n_in) == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype == x_type.dtype,
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
def forward_cpu(self, inputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
if not type_check.same_types(*inputs):
if b is not None:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}, type(b): {2}'
.format(type(W), type(x), type(b)))
else:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}'
.format(type(W), type(x)))
kh, kw = W.shape[2:]
self.col = conv.im2col_cpu(
x, kh, kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all)
y = numpy.tensordot(
self.col, W, ((1, 2, 3), (1, 2, 3))).astype(x.dtype, copy=False)
if b is not None:
y += b
return numpy.rollaxis(y, 3, 1),
def forward_gpu(self, inputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
if not type_check.same_types(*inputs):
if b is not None:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}, type(b): {2}'
.format(type(W), type(x), type(b)))
else:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}'
.format(type(W), type(x)))
out_c, _, kh, kw = W.shape
n, c, h, w = x.shape
out_h = conv.get_conv_outsize(h, kh, self.sy, self.ph,
cover_all=self.cover_all)
assert out_h > 0, 'Height in the output should be positive.'
out_w = conv.get_conv_outsize(w, kw, self.sx, self.pw,
cover_all=self.cover_all)
assert out_w > 0, 'Width in the output should be positive.'
y = cuda.cupy.empty((n, out_c, out_h, out_w), dtype=x.dtype)
if (not self.cover_all and chainer.should_use_cudnn('>=auto') and
_check_cudnn_acceptable_type(x.dtype, W.dtype)):
x = cuda.cupy.ascontiguousarray(x)
W = cuda.cupy.ascontiguousarray(W)
if b is not None:
b = cuda.cupy.ascontiguousarray(b)
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(x)
y_desc = cudnn.create_tensor_descriptor(y)
self.filter_desc = cudnn.create_filter_descriptor(W)
self.conv_desc = cudnn.create_convolution_descriptor(
(self.ph, self.pw), (self.sy, self.sx), x.dtype)
if b is not None:
self.bias_desc = cudnn.create_tensor_descriptor(
b[None, :, None, None])
workspace_size = cuda.get_max_workspace_size()
workspace = cuda.cupy.empty((workspace_size,), dtype='b')
algo = libcudnn.getConvolutionForwardAlgorithm(
handle, x_desc.value, self.filter_desc.value,
self.conv_desc.value, y_desc.value, _fwd_pref,
workspace_size)
oz_dtype = 'd' if x.dtype == 'd' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
zero = numpy.array(0, dtype=oz_dtype).ctypes
libcudnn.convolutionForward(
handle, one.data, x_desc.value, x.data.ptr,
self.filter_desc.value, W.data.ptr, self.conv_desc.value,
algo, workspace.data.ptr, workspace_size, zero.data,
y_desc.value, y.data.ptr)
# TODO(beam2d): Support unshared bias
if b is not None:
cudnn.add_tensor(
handle, one.data, self.bias_desc.value, b.data.ptr,
one.data, y_desc.value, y.data.ptr)
else:
# Implementation using im2col
self.col = conv.im2col_gpu(
x, kh, kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all)
y = cuda.cupy.tensordot(
self.col, W, ((1, 2, 3), (1, 2, 3))).astype(x.dtype,
copy=False)
# TODO(beam2d): Support unshared bias
if b is not None:
y += b
y = cuda.cupy.rollaxis(y, 3, 1)
return y,
def backward_cpu(self, inputs, grad_outputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
if not type_check.same_types(*inputs):
if b is not None:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}, type(b): {2}'
.format(type(W), type(x), type(b)))
else:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}'
.format(type(W), type(x)))
gy = grad_outputs[0]
h, w = x.shape[2:]
gW = numpy.tensordot(
gy, self.col, ((0, 2, 3), (0, 4, 5))).astype(W.dtype, copy=False)
if not self.requires_x_grad:
gx = None
else:
gcol = numpy.tensordot(W, gy, (0, 1)).astype(x.dtype, copy=False)
gcol = numpy.rollaxis(gcol, 3)
gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw,
h, w)
if b is None:
return gx, gW
else:
gb = gy.sum(axis=(0, 2, 3))
return gx, gW, gb
def backward_gpu(self, inputs, grad_outputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
if not type_check.same_types(*inputs):
if b is not None:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}, type(b): {2}'
.format(type(W), type(x), type(b)))
else:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}'
.format(type(W), type(x)))
gy = grad_outputs[0]
_, out_c, out_h, out_w = gy.shape
n, c, h, w = x.shape
kh, kw = W.shape[2:]
gW = cuda.cupy.empty_like(W)
gx = None
if (not self.cover_all and chainer.should_use_cudnn('>=auto') and
_check_cudnn_acceptable_type(x.dtype, W.dtype)):
x = cuda.cupy.ascontiguousarray(x)
W = cuda.cupy.ascontiguousarray(W)
gy = cuda.cupy.ascontiguousarray(gy)
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(x)
gy_desc = cudnn.create_tensor_descriptor(gy)
oz_dtype = 'd' if x.dtype == 'd' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
zero = numpy.array(0, dtype=oz_dtype).ctypes
if _cudnn_version >= 3000:
workspace_size = cuda.get_max_workspace_size()
workspace = cuda.cupy.empty((workspace_size,), dtype='b')
if configuration.config.cudnn_deterministic:
algo = cuda.cupy.cuda.cudnn.CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 # NOQA
else:
algo = libcudnn.getConvolutionBackwardFilterAlgorithm(
handle, x_desc.value, gy_desc.value,
self.conv_desc.value, self.filter_desc.value,
_bwd_filter_pref, workspace_size)
libcudnn.convolutionBackwardFilter_v3(
handle, one.data, x_desc.value, x.data.ptr,
gy_desc.value, gy.data.ptr, self.conv_desc.value,
algo, workspace.data.ptr, workspace_size,
zero.data, self.filter_desc.value, gW.data.ptr)
if self.requires_x_grad:
if configuration.config.cudnn_deterministic:
algo = cuda.cupy.cuda.cudnn.CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 # NOQA
else:
algo = libcudnn.getConvolutionBackwardDataAlgorithm(
handle, self.filter_desc.value, gy_desc.value,
self.conv_desc.value, x_desc.value, _bwd_data_pref,
workspace_size)
gx = cuda.cupy.empty_like(x)
libcudnn.convolutionBackwardData_v3(
handle, one.data, self.filter_desc.value, W.data.ptr,
gy_desc.value, gy.data.ptr, self.conv_desc.value,
algo, workspace.data.ptr, workspace_size,
zero.data, x_desc.value, gx.data.ptr)
else:
if configuration.config.cudnn_deterministic:
raise ValueError(
"`cudnn_deterministic` option must be False "
"if the backpropagation of "
"chainer.functions.Convolution2D "
"uses cuDNN and cuDNN versions < v3. "
"Turn off cudnn_deterministic option with "
"`chainer.using_config('cudnn_deterministic', False)` "
"context.")
libcudnn.convolutionBackwardFilter_v2(
handle, one.data, x_desc.value, x.data.ptr,
gy_desc.value, gy.data.ptr, self.conv_desc.value,
zero.data, self.filter_desc.value, gW.data.ptr)
if self.requires_x_grad:
gx = cuda.cupy.empty_like(x)
libcudnn.convolutionBackwardData_v2(
handle, one.data, self.filter_desc.value, W.data.ptr,
gy_desc.value, gy.data.ptr, self.conv_desc.value,
zero.data, x_desc.value, gx.data.ptr)
if b is not None:
gb = cuda.cupy.empty_like(b)
libcudnn.convolutionBackwardBias(
handle, one.data, gy_desc.value, gy.data.ptr,
zero.data, self.bias_desc.value, gb.data.ptr)
else:
gW = cuda.cupy.tensordot(
gy, self.col, ((0, 2, 3), (0, 4, 5))).astype(W.dtype,
copy=False)
if self.requires_x_grad:
gcol = cuda.cupy.tensordot(W, gy, (0, 1)).astype(x.dtype,
copy=False)
gcol = cuda.cupy.rollaxis(gcol, 3)
gx = conv.col2im_gpu(
gcol, self.sy, self.sx, self.ph, self.pw, h, w)
if b is not None:
gb = gy.sum(axis=(0, 2, 3))
if b is None:
return gx, gW
else:
return gx, gW, gb
def convolution_2d(x, W, b=None, stride=1, pad=0, cover_all=False, **kwargs):
"""convolution_2d(x, W, b=None, stride=1, pad=0, cover_all=False)
Two-dimensional convolution function.
This is an implementation of two-dimensional convolution in ConvNets.
It takes three variables: the input image ``x``, the filter weight ``W``,
and the bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output
channels, respectively.
- :math:`h_I` and :math:`w_I` are the height and width of the input image,
respectively.
- :math:`h_K` and :math:`w_K` are the height and width of the filters,
respectively.
- :math:`h_P` and :math:`w_P` are the height and width of the spatial
padding size, respectively.
Then the ``Convolution2D`` function computes correlations between filters
and patches of size :math:`(h_K, w_K)` in ``x``.
Note that correlation here is equivalent to the inner product between
expanded vectors.
Patches are extracted at positions shifted by multiples of ``stride`` from
the first position ``(-h_P, -w_P)`` for each spatial axis.
The right-most (or bottom-most) patches do not run over the padded spatial
size.
Let :math:`(s_Y, s_X)` be the stride of filter application. Then, the
output size :math:`(h_O, w_O)` is determined by the following equations:
.. math::
h_O &= (h_I + 2h_P - h_K) / s_Y + 1,\\\\
w_O &= (w_I + 2w_P - w_K) / s_X + 1.
If ``cover_all`` option is ``True``, the filter will cover the all
spatial locations. So, if the last stride of filter does not cover the
end of spatial locations, an addtional stride will be applied to the end
part of spatial locations. In this case, the output size :math:`(h_O, w_O)`
is determined by the following equations:
.. math::
h_O &= (h_I + 2h_P - h_K + s_Y - 1) / s_Y + 1,\\\\
w_O &= (w_I + 2w_P - w_K + s_X - 1) / s_X + 1.
If the bias vector is given, then it is added to all spatial locations of
the output of convolution.
The output of this function can be non-deterministic when it uses cuDNN.
If ``chainer.configuration.config.cudnn_deterministic`` is ``True`` and
cuDNN version is >= v3, it forces cuDNN to use a deterministic algorithm.
.. warning::
``deterministic`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('cudnn_deterministic', value)``
(value is either ``True`` or ``False``).
See :func:`chainer.using_config`.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Input variable of shape :math:`(n, c_I, h_I, w_I)`.
W (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Weight variable of shape :math:`(c_O, c_I, h_K, w_K)`.
b (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Bias variable of length :math:`c_O` (optional).
stride (:class:`int` or pair of :class:`int` s):
Stride of filter applications. ``stride=s`` and ``stride=(s, s)``
are equivalent.
pad (:class:`int` or pair of :class:`int` s):
Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
cover_all (bool): If ``True``, all spatial locations are convoluted
into some output pixels.
Returns:
~chainer.Variable:
Output variable of shape :math:`(n, c_O, h_O, w_O)`.
.. seealso:: :class:`~chainer.links.Convolution2D`
.. admonition:: Example
>>> n = 10
>>> c_i, c_o = 3, 1
>>> h_i, w_i = 30, 40
>>> h_k, w_k = 10, 10
>>> h_p, w_p = 5, 5
>>> x = np.random.uniform(0, 1, (n, c_i, h_i, w_i)).astype('f')
>>> x.shape
(10, 3, 30, 40)
>>> W = np.random.uniform(0, 1, (c_o, c_i, h_k, w_k)).astype('f')
>>> W.shape
(1, 3, 10, 10)
>>> b = np.random.uniform(0, 1, (c_o,)).astype('f')
>>> b.shape
(1,)
>>> s_y, s_x = 5, 7
>>> y = F.convolution_2d(x, W, b, stride=(s_y, s_x), pad=(h_p, w_p))
>>> y.shape
(10, 1, 7, 6)
>>> h_o = int((h_i + 2 * h_p - h_k) / s_y + 1)
>>> w_o = int((w_i + 2 * w_p - w_k) / s_x + 1)
>>> y.shape == (n, c_o, h_o, w_o)
True
>>> y = F.convolution_2d(x, W, b, stride=(s_y, s_x), pad=(h_p, w_p), \
cover_all=True)
>>> y.shape == (n, c_o, h_o, w_o + 1)
True
"""
argument.check_unexpected_kwargs(
kwargs, deterministic="deterministic argument is not "
"supported anymore. "
"Use chainer.using_config('cudnn_deterministic', value) "
"context where value is either `True` or `False`.")
argument.assert_kwargs_empty(kwargs)
requires_x_grad = isinstance(x, variable.Variable) and x.requires_grad
func = Convolution2DFunction(stride, pad, cover_all, requires_x_grad)
if b is None:
return func(x, W)
else:
return func(x, W, b)
| |
import argparse
from datetime import datetime
import os
import os.path
import Queue
import signal
import subprocess
import sys
import threading
import time
from slackclient import SlackClient
from patterns import *
TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S'
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
class MinecraftBot:
""" A Slack-bot for reporting on and interacting with a multiplayer Minecraft server.
The bot will read the latest.log and report to the Slack channel player-centered events, such as:
- Joins
- Departs
- Achievements
- Deaths
- In-game chat
In addition, the bot will respond to commands. Currently, the commands understood are:
- list: list the currently logged-in players
"""
READ_WEBSOCKET_DELAY = 1 # 1 second delay between reading from firehose
def __init__(self, bot_id, slack_client, server_directory, channel="#general"):
""" Create a Minecraft bot. It requires a few bits to get going:
- bot_id: the non-human-readable id of the bot
- slack_client: an initialized SlackClient (created with your bot's API key)
- server_directory: the path to the Minecraft server's directory
- channel: the name of the channel to send messages (defaults to #general)
"""
self.bot_id = bot_id
self.slack_client = slack_client
self.server_directory = server_directory
self.server_process = None
self.server_message_queue = Queue.Queue()
self.server_thread = None
self.server_version = None
self.server_port = None
self.most_recent_timestamp_file = os.path.join(self.server_directory, 'latest_timestamp.txt')
self.channel = channel
self.most_recent_timestamp = self.find_most_recent_timestamp()
self.current_players = set()
self.commands = {
'info': self.command_server_info,
'launch': self.command_launch_server,
'list': self.command_list_current_players,
'restart': self.command_restart_server,
'stop': self.command_stop_server,
'whitelist': self.command_whitelist_user,
}
self.log_parsers = {
version_pattern: self.handle_version,
port_pattern: self.handle_port,
chat_pattern: self.handle_chat,
join_pattern: self.handle_join,
left_pattern: self.handle_left,
died_pattern: self.handle_broadcast,
achievement_pattern: self.handle_broadcast,
}
self.launch_args = [
'java',
'-Xmx1024M',
'-Xms1024M',
'-Dlog4j.configurationFile={}'.format(os.path.join(self.server_directory, 'custom-log4j2.xml')),
'-jar',
os.path.join(self.server_directory, 'current.jar'),
'nogui'
]
signal.signal(signal.SIGINT, self.handle_signal)
# Bot/server
def run(self):
""" The main loop - read from the Slack RTM firehose, and also keep an eye on the server's stdout """
if self.slack_client.rtm_connect():
while True:
slack_lines = self.slack_client.rtm_read()
for slack_line in slack_lines:
command, channel = self.parse_slack_line(slack_line)
if command and channel:
# first, respond to in-Slack messages
self.handle_command(command, channel)
time.sleep(self.READ_WEBSOCKET_DELAY)
if self.server_process and self.server_process.poll() is None:
# then, read server output and respond
try:
line = self.server_message_queue.get_nowait()
except Queue.Empty:
continue
else:
print line.strip()
line_datetime = datetime.min
timestamp_match = timestamp_pattern.match(line)
if timestamp_match:
line_datetime = datetime.strptime(timestamp_match.group(1), TIMESTAMP_FORMAT)
if line_datetime > self.most_recent_timestamp:
for pattern, handler in self.log_parsers.items():
maybe_match = pattern.match(line)
if maybe_match:
handler(maybe_match.groups())
time.sleep(1)
else:
print("Connection failed. Invalid Slack token or bot ID?")
def launch_server(self):
""" Launch the server """
if not self.server_process:
os.chdir(self.server_directory)
self.server_process = subprocess.Popen(self.launch_args, bufsize=4096, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.server_thread = threading.Thread(target=enqueue_output, args=(self.server_process.stdout, self.server_message_queue))
self.server_thread.daemon = True
self.server_thread.start()
def stop_server(self):
""" Stop the server """
if self.server_process:
self.server_process.terminate()
self.server_process = None
self.current_players.clear()
def find_most_recent_timestamp(self):
""" Set the most recent timestamp seen by the bot """
try:
with open(self.most_recent_timestamp_file) as f:
timestamp_float = float(f.read().strip())
most_recent_timestamp = datetime.fromtimestamp(timestamp_float)
except IOError, e:
most_recent_timestamp = datetime.min
return most_recent_timestamp
def remember_timestamp(self, timestamp):
""" Record the timestamp from a given log line as its mktime float """
most_recent_timestamp = datetime.strptime(timestamp, TIMESTAMP_FORMAT)
seconds_timestamp = time.mktime(most_recent_timestamp.timetuple())
with open(self.most_recent_timestamp_file, 'w') as f:
f.write(str(seconds_timestamp))
def post_message(self, message, channel=None):
""" Post a message to the the channel as our bot """
self.slack_client.api_call(
'chat.postMessage',
channel=channel or self.channel,
text=message,
as_user=True,
link_names=True,
username=self.bot_id)
def parse_slack_line(self, message):
""" Parse the output of the Slack Real-Time Messaging firehose
Look for messages directed at our bot and return the relevant information
"""
if message:
text = message.get('text')
if text:
print(u"Channel output: {}".format(text))
maybe_match = slack_pattern.match(text)
if maybe_match and maybe_match.group('addressee') == self.bot_id:
# return text after the @ mention, whitespace removed
print(u"Found a command for me! {}".format(maybe_match.groups()))
return maybe_match.group('command').lower(), message['channel']
return None, None
def handle_command(self, command, channel):
""" Process the command given from a channel and write back to same.
If the command is not understoon, return a helpful message.
"""
command_args = command.split(' ')
command = command_args[0]
args = command_args[1:] if len(command_args) > 1 else []
handler = self.commands.get(command, self.command_unknown)
response = handler(*args)
self.post_message(response, channel)
def handle_signal(self, signum, frame):
""" Handle SIGINT, primarily """
response = "Stopping bot, shutting down server!" if self.server_process else "Stopping bot, goodbye!"
self.post_message(response)
self.stop_server()
sys.exit()
# Commands
def command_unknown(self, *args, **kw):
""" The default response for commands the bot doesn't understand. """
return "So far, the commands I recognize are: {}! Tell @zacbir to add more smarts!".format(
', '.join(['`{}`'.format(x) for x in sorted(self.commands)])
)
def command_list_current_players(self, *args, **kw):
""" List the currently logged in users """
if not self.server_process:
return "The server isn't running right now"
current_players = self.current_players
response = "There {} currently {} player{} logged into the server{}.".format(
'is' if len(current_players) == 1 else 'are',
len(current_players),
'' if len(current_players) == 1 else 's',
'' if len(current_players) == 0 else ': {}'.format(', '.join(current_players)))
return response
def command_launch_server(self, *args, **kw):
""" Command handler for launching the server """
response = "Server already running" if self.server_process else "Launching Server"
self.launch_server()
return response
def command_stop_server(self, *args, **kw):
""" Command handler for stopping the server """
response = "Stopping server" if self.server_process else "Server not running"
self.stop_server()
return response
def command_restart_server(self, *args, **kw):
""" Restart the server, stopping it first, if necessary """
response = "Restarting server" if self.server_process else "Starting server"
self.stop_server()
self.launch_server()
return response
def command_server_info(self, *args, **kw):
""" Report information about the server """
response = "I'm running Minecraft server version {} on port {}".format(self.server_version, self.server_port) if self.server_process else "Server not running"
return response
def command_whitelist_user(self, *args, **kw):
""" Whitelist a user """
user = args[0]
self.server_process.stdin.write('/whitelist add {}'.format(user))
return "Whitelisted {}".format(user)
# Log line parsers
def handle_version(self, groups):
timestamp, version = groups
self.server_version = version
self.remember_timestamp(timestamp)
def handle_port(self, groups):
timestamp, port = groups
self.server_port = port
self.remember_timestamp(timestamp)
def handle_chat(self, groups):
timestamp, user, message = groups
self.post_message('{} said: {}'.format(user, message))
self.remember_timestamp(timestamp)
def handle_join(self, groups):
timestamp, user, message = groups
self.post_message('{} {}'.format(user, message))
self.remember_timestamp(timestamp)
self.current_players.add(user)
def handle_left(self, groups):
timestamp, user, message = groups
self.post_message('{} {}'.format(user, message))
self.remember_timestamp(timestamp)
self.current_players.discard(user)
def handle_broadcast(self, groups):
timestamp, user, message = groups
self.post_message('{} {}'.format(user, message))
self.remember_timestamp(timestamp)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Launch a bot to communicate information about a shared Minecraft server to Slack')
parser.add_argument('-d', '--directory', help="The directory where the Minecraft server lives")
parser.add_argument('-c', '--channel', help="The name of the channel where the Minecraft bot should report to")
args = parser.parse_args()
api_key = os.environ.get('SLACK_API_KEY')
slack_client = SlackClient(api_key)
channel = args.channel or '#general'
minecraft_bot = MinecraftBot(
os.environ.get('SLACK_BOT_ID'),
slack_client,
args.directory,
channel = channel)
minecraft_bot.run()
| |
#
#
# File: utilities.py
#
#
#
#
import datetime
import time
import string
import urllib2
import math
import redis
import base64
import json
import py_cf
import os
import copy
import load_files
import rabbit_cloud_status_publish
from eto.eto import *
from eto.cimis_request import *
import load_files
#
#
# This Class Deletes Legacy cimis emails sent to lacima ranch
# Emails are not used as an api key now is used to access data
#
#
#
class Delete_Cimis_Email():
def __init__(self, email_data ):
self.email_data = email_data
def delete_email_files( self,chainFlowHandle, chainOjb, parameters, event ):
#print "make it here"
if self.email_data != None:
IMAP_SERVER = 'imap.gmail.com'
IMAP_PORT = '993'
#print self.email_data
imap_username = self.email_data["imap_username"]
imap_password = self.email_data["imap_password"]
self.imap = imaplib.IMAP4_SSL(IMAP_SERVER, IMAP_PORT)
self.imap.login(imap_username, imap_password)
self.imap.select('Inbox')
status, data = self.imap.search(None, 'ALL')
count = sum(1 for num in data[0].split())
print ("count",count)
if count > 0 :
self.imap.select('Inbox')
status, data = self.imap.search(None, 'ALL')
for num in data[0].split():
self.imap.store(num, '+FLAGS', r'\Deleted')
self.imap.expunge()
class System_Monitoring():
def __init__(self, redis_handle ):
self.redis_handle = redis_handle
self.app_files = load_files.APP_FILES(redis_handle)
def check_schedule_flag( self, schedule_name ):
data = self.redis_handle.hget("SYSTEM_COMPLETED", schedule_name)
try:
data = json.loads( data)
except:
data = [ 0 , -3 ]
if int(data[0]) == 0 :
return_value = True
else:
return_value = False
return return_value
def match_time( self, compare, value ):
return_value = False
if compare[0] < value[0]:
return_value = True
if (compare[0] == value[0]) and ( compare[1] <= value[1] ):
return_value = True
return return_value
def determine_start_time( self, start_time,end_time ):
return_value = False
temp = datetime.datetime.today()
st_array = [ temp.hour, temp.minute ]
if self.match_time( start_time,end_time ) == True:
if ( self.match_time( start_time, st_array) and
self.match_time( st_array, end_time )) == True:
return_value = True
else:
# this is a wrap around case
if self.match_time( start_time,st_array) :
return_value = True
if self.match_time(st_array,end_time):
return_value = True
return return_value
def clear_done_flag( self, *arg ):
dow_array = [ 1,2,3,4,5,6,0]
dow = datetime.datetime.today().weekday()
dow = dow_array[dow]
sprinkler_ctrl = self.app_files.load_file("system_actions.json")
for j in sprinkler_ctrl:
name = j["name"]
if self.determine_start_time( j["start_time"],j["end_time"]) == False:
temp_1 = json.dumps( [0,-1] )
self.redis_handle.hset( "SYSTEM_COMPLETED", name,temp_1 )
def check_for_active_schedule( self, *args):
temp = datetime.datetime.today()
dow_array = [ 1,2,3,4,5,6,0]
dow = datetime.datetime.today().weekday()
dow = dow_array[dow]
st_array = [temp.hour,temp.minute]
sprinkler_ctrl = self.app_files.load_file("system_actions.json")
for j in sprinkler_ctrl:
name = j["name"]
command = j["command_string"]
print "checking schedule",name
if j["dow"][dow] != 0 :
start_time = j["start_time"]
end_time = j["end_time"]
if self.determine_start_time( start_time,end_time ):
print "made it past start time",start_time,end_time
if self.check_schedule_flag( name ):
print "queue in schedule ",name
temp = {}
temp["command"] = command
temp["schedule_name"] = name
temp["step"] = 0
temp["run_time"] = 0
scratch = json.dumps(temp)
self.redis_handle.lpush("QUEUES:SPRINKLER:CTRL", base64.b64encode(scratch) )
temp = [1,time.time()+60*3600 ] # +hour prevents a race condition
self.redis_handle.hset( "SYSTEM_COMPLETED",name,json.dumps(temp) )
class Schedule_Monitoring():
def __init__(self, redis_handle ):
self.redis_handle = redis_handle
self.app_files = load_files.APP_FILES(redis_handle)
def check_schedule_flag( self, schedule_name ):
data = self.redis_handle.hget("SCHEDULE_COMPLETED", schedule_name)
try:
data = json.loads( data)
except:
data = [ 0 , -3 ]
if int(data[0]) == 0 :
return_value = True
else:
return_value = False
return return_value
def match_time( self, compare, value ):
return_value = False
if compare[0] < value[0]:
return_value = True
if (compare[0] == value[0]) and ( compare[1] <= value[1] ):
return_value = True
return return_value
def determine_start_time( self, start_time,end_time ):
return_value = False
temp = datetime.datetime.today()
st_array = [ temp.hour, temp.minute ]
if self.match_time( start_time,end_time ) == True:
if ( self.match_time( start_time, st_array) and
self.match_time( st_array, end_time )) == True:
return_value = True
else:
# this is a wrap around case
if self.match_time( start_time,st_array) :
return_value = True
if self.match_time(st_array,end_time):
return_value = True
return return_value
def clear_done_flag( self, *arg ):
dow_array = [ 1,2,3,4,5,6,0]
dow = datetime.datetime.today().weekday()
dow = dow_array[dow]
sprinkler_ctrl = self.app_files.load_file("sprinkler_ctrl.json")
for j in sprinkler_ctrl:
name = j["name"]
if self.determine_start_time( j["start_time"],j["end_time"]) == False:
temp_1 = json.dumps( [0,-1] )
self.redis_handle.hset( "SCHEDULE_COMPLETED", name,temp_1 )
def check_for_active_schedule( self, *args):
temp = datetime.datetime.today()
dow_array = [ 1,2,3,4,5,6,0]
dow = datetime.datetime.today().weekday()
dow = dow_array[dow]
st_array = [temp.hour,temp.minute]
rain_day = self.redis_handle.hget("CONTROL_VARIABLES" ,"rain_day" )
try:
rain_day = int( rain_day )
except:
rain_day = 0
self.redis_handle.set("CONTROL_VARIABLES", "rain_day", rain_day)
if rain_day != 0:
return
sprinkler_ctrl = self.app_files.load_file("sprinkler_ctrl.json")
for j in sprinkler_ctrl:
name = j["name"]
print "checking schedule",name
if j["dow"][dow] != 0 :
start_time = j["start_time"]
end_time = j["end_time"]
if self.determine_start_time( start_time,end_time ):
print "made it past start time",start_time,end_time
if self.check_schedule_flag( name ):
print "queue in schedule ",name
temp = {}
temp["command"] = "QUEUE_SCHEDULE"
temp["schedule_name"] = name
temp["step"] = 0
temp["run_time"] = 0
scratch = json.dumps(temp)
self.redis_handle.lpush("QUEUES:SPRINKLER:CTRL", base64.b64encode(scratch) )
temp = [1,time.time()+60*3600 ] # +hour prevents a race condition
self.redis_handle.hset( "SCHEDULE_COMPLETED",name,json.dumps(temp) )
class Ntpd():
def __init__( self ):
pass
def get_time( self, chainFlowHandle, chainObj, parameters, event ):
os.system("ntpdate -b -s -u pool.ntp.org")
if __name__ == "__main__":
import time
import construct_graph
import io_control.construct_classes
import io_control.new_instrument
from linux_acquisition import construct_linux_acquisition_class
from linux_acquisition import add_chains
gm = construct_graph.Graph_Management("PI_1","main_remote","LaCima_DataStore")
cimis_email_data = gm.match_relationship( "CIMIS_EMAIL", json_flag = True )[0]
delete_cimis_email = Delete_Cimis_Email(cimis_email_data)
data_store_nodes = gm.find_data_stores()
io_server_nodes = gm.find_io_servers()
# find ip and port for redis data store
data_server_ip = data_store_nodes[0]["ip"]
data_server_port = data_store_nodes[0]["port"]
redis_new_handle = redis.StrictRedis( host = data_server_ip, port=data_server_port, db = 12 )
redis_handle = redis.StrictRedis( host = data_server_ip, port=data_server_port, db = 0 )
io_server_ip = io_server_nodes[0]["ip"]
io_server_port = io_server_nodes[0]["port"]
# find ip and port for ip server
instrument = io_control.new_instrument.Modbus_Instrument()
instrument.set_ip(ip= io_server_ip, port = int(io_server_port))
linux_monitoring = construct_linux_acquisition_class( redis_new_handle, gm, instrument )
action = System_Monitoring( redis_handle )
sched = Schedule_Monitoring( redis_handle )
ntpd = Ntpd()
#
# Adding chains
#
cf = py_cf.CF_Interpreter()
cf.define_chain("delete_cimis_email_data",True)
cf.insert_link( "link_1","WaitTod",["*",9,"*","*" ])
cf.insert_link( "link_2","One_Step",[delete_cimis_email.delete_email_files])
cf.insert_link( "link_3","WaitTod",["*",10,"*","*" ])
cf.insert_link( "link_4","Reset",[])
cf.define_chain( "plc_auto_mode", True )
cf.insert_link( "link_2", "One_Step", [ action.check_for_active_schedule ] )
cf.insert_link( "link_1", "One_Step", [ sched.check_for_active_schedule ] )
cf.insert_link( "link_2", "WaitEvent",[ "MINUTE_TICK" ] )
cf.insert_link( "link_3", "Reset",[] )
cf.define_chain("clear_done_flag",True)
cf.insert_link( "link_2", "One_Step", [action.clear_done_flag ] )
cf.insert_link( "link_2", "One_Step", [sched.clear_done_flag ] )
cf.insert_link( "link_1", "WaitEvent",[ "MINUTE_TICK" ] )
cf.insert_link( "link_3", "Reset",[] )
#
#
# internet time update
#
#
cf.define_chain("ntpd",True)
cf.insert_link( "link_9","Log",["ntpd"] )
cf.insert_link( "link_1", "One_Step", [ntpd.get_time] )
cf.insert_link( "link_10", "Log",["got time"] )
cf.insert_link( "link_2", "WaitEvent",[ "HOUR_TICK" ] )
cf.insert_link( "link_3", "Reset",[] )
cf.define_chain("linux_test",True)
cf.insert_link( "linkxx","Log",["test chain start"])
cf.insert_link( "link_0", "SendEvent", ["MINUTE_TICK",1] )
cf.insert_link( "link_1", "WaitEvent", ["TIME_TICK"] )
cf.insert_link( "link_2", "SendEvent", [ "HOUR_TICK",1 ] )
cf.insert_link( "link_3", "WaitEventCount", ["TIME_TICK",2,0])
cf.insert_link( "link_4", "SendEvent", [ "DAY_TICK", 1] )
add_chains(cf, linux_monitoring)
cf_environ = py_cf.Execute_Cf_Environment( cf )
cf_environ.execute()
| |
# Copyright 2014-2016 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import functools
import json
import os
import re
import subprocess
from glob import glob
from os.path import (abspath, basename, dirname, expanduser, isdir, isfile,
join, realpath)
from platform import system, uname
from threading import Thread
from platformio import __apiurl__, __version__, exception
# pylint: disable=wrong-import-order
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
class AsyncPipe(Thread):
def __init__(self, outcallback=None):
Thread.__init__(self)
self.outcallback = outcallback
self._fd_read, self._fd_write = os.pipe()
self._pipe_reader = os.fdopen(self._fd_read)
self._buffer = []
self.start()
def get_buffer(self):
return self._buffer
def fileno(self):
return self._fd_write
def run(self):
for line in iter(self._pipe_reader.readline, ""):
line = line.strip()
self._buffer.append(line)
if self.outcallback:
self.outcallback(line)
else:
print line
self._pipe_reader.close()
def close(self):
os.close(self._fd_write)
self.join()
class cd(object):
def __init__(self, new_path):
self.new_path = new_path
self.prev_path = os.getcwd()
def __enter__(self):
os.chdir(self.new_path)
def __exit__(self, etype, value, traceback):
os.chdir(self.prev_path)
class memoized(object):
'''
Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
def singleton(cls):
""" From PEP-318 http://www.python.org/dev/peps/pep-0318/#examples """
_instances = {}
def get_instance(*args, **kwargs):
if cls not in _instances:
_instances[cls] = cls(*args, **kwargs)
return _instances[cls]
return get_instance
def get_systype():
data = uname()
type_ = data[0].lower()
arch = data[4].lower() if data[4] else ""
return "%s_%s" % (type_, arch) if arch else type_
def pioversion_to_intstr():
vermatch = re.match(r"^([\d\.]+)", __version__)
assert vermatch
return [int(i) for i in vermatch.group(1).split(".")[:3]]
def _get_projconf_option_dir(name, default=None):
_env_name = "PLATFORMIO_%s" % name.upper()
if _env_name in os.environ:
return os.getenv(_env_name)
try:
config = get_project_config()
if (config.has_section("platformio") and
config.has_option("platformio", name)):
option_dir = config.get("platformio", name)
if option_dir.startswith("~"):
option_dir = expanduser(option_dir)
return abspath(option_dir)
except exception.NotPlatformProject:
pass
return default
def get_home_dir():
home_dir = _get_projconf_option_dir(
"home_dir",
join(expanduser("~"), ".platformio")
)
if not isdir(home_dir):
os.makedirs(home_dir)
assert isdir(home_dir)
return home_dir
def get_lib_dir():
return _get_projconf_option_dir(
"lib_dir",
join(get_home_dir(), "lib")
)
def get_source_dir():
return dirname(realpath(__file__))
def get_project_dir():
return os.getcwd()
def get_projectsrc_dir():
return _get_projconf_option_dir(
"src_dir",
join(get_project_dir(), "src")
)
def get_projectlib_dir():
return join(get_project_dir(), "lib")
def get_pioenvs_dir():
return _get_projconf_option_dir(
"envs_dir",
join(get_project_dir(), ".pioenvs")
)
def get_projectdata_dir():
return _get_projconf_option_dir(
"data_dir",
join(get_project_dir(), "data")
)
def get_project_config():
path = join(get_project_dir(), "platformio.ini")
if not isfile(path):
raise exception.NotPlatformProject(get_project_dir())
cp = ConfigParser()
cp.read(path)
return cp
def change_filemtime(path, time):
os.utime(path, (time, time))
def is_ci():
return os.getenv("CI", "").lower() == "true"
def exec_command(*args, **kwargs):
result = {
"out": None,
"err": None,
"returncode": None
}
default = dict(
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=system() == "Windows"
)
default.update(kwargs)
kwargs = default
p = subprocess.Popen(*args, **kwargs)
try:
result['out'], result['err'] = p.communicate()
result['returncode'] = p.returncode
except KeyboardInterrupt:
raise exception.AbortedByUser()
finally:
for s in ("stdout", "stderr"):
if isinstance(kwargs[s], AsyncPipe):
kwargs[s].close()
for s in ("stdout", "stderr"):
if isinstance(kwargs[s], AsyncPipe):
result[s[3:]] = "\n".join(kwargs[s].get_buffer())
for k, v in result.iteritems():
if v and isinstance(v, basestring):
result[k].strip()
return result
def get_serialports():
try:
from serial.tools.list_ports import comports
except ImportError:
raise exception.GetSerialPortsError(os.name)
result = [{"port": p, "description": d, "hwid": h}
for p, d, h in comports() if p]
# fix for PySerial
if not result and system() == "Darwin":
for p in glob("/dev/tty.*"):
result.append({"port": p, "description": "", "hwid": ""})
return result
def get_logicaldisks():
disks = []
if system() == "Windows":
result = exec_command(
["wmic", "logicaldisk", "get", "name,VolumeName"]).get("out")
disknamere = re.compile(r"^([A-Z]{1}\:)\s*(\S+)?")
for line in result.split("\n"):
match = disknamere.match(line.strip())
if not match:
continue
disks.append({"disk": match.group(1), "name": match.group(2)})
else:
result = exec_command(["df"]).get("out")
disknamere = re.compile(r"\d+\%\s+([a-z\d\-_/]+)$", flags=re.I)
for line in result.split("\n"):
match = disknamere.search(line.strip())
if not match:
continue
disks.append({"disk": match.group(1),
"name": basename(match.group(1))})
return disks
def get_request_defheaders():
import requests
return {"User-Agent": "PlatformIO/%s CI/%d %s" % (
__version__, int(is_ci()), requests.utils.default_user_agent()
)}
def get_api_result(path, params=None, data=None):
import requests
result = None
r = None
try:
if data:
r = requests.post(__apiurl__ + path, params=params, data=data,
headers=get_request_defheaders())
else:
r = requests.get(__apiurl__ + path, params=params,
headers=get_request_defheaders())
result = r.json()
r.raise_for_status()
except requests.exceptions.HTTPError as e:
if result and "errors" in result:
raise exception.APIRequestError(result['errors'][0]['title'])
else:
raise exception.APIRequestError(e)
except requests.exceptions.ConnectionError:
raise exception.APIRequestError(
"Could not connect to PlatformIO Registry Service")
except ValueError:
raise exception.APIRequestError(
"Invalid response: %s" % r.text.encode("utf-8"))
finally:
if r:
r.close()
return result
@memoized
def _lookup_boards():
boards = {}
bdirs = [join(get_source_dir(), "boards")]
if isdir(join(get_home_dir(), "boards")):
bdirs.append(join(get_home_dir(), "boards"))
for bdir in bdirs:
for json_file in sorted(os.listdir(bdir)):
if not json_file.endswith(".json"):
continue
with open(join(bdir, json_file)) as f:
boards.update(json.load(f))
return boards
def get_boards(type_=None):
boards = _lookup_boards()
if type_ is None:
return boards
else:
if type_ not in boards:
raise exception.UnknownBoard(type_)
return boards[type_]
@memoized
def _lookup_frameworks():
frameworks = {}
frameworks_path = join(
get_source_dir(), "builder", "scripts", "frameworks")
frameworks_list = [f[:-3] for f in os.listdir(frameworks_path)
if not f.startswith("__") and f.endswith(".py")]
for _type in frameworks_list:
script_path = join(frameworks_path, "%s.py" % _type)
with open(script_path) as f:
fcontent = f.read()
assert '"""' in fcontent
_doc_start = fcontent.index('"""') + 3
fdoc = fcontent[
_doc_start:fcontent.index('"""', _doc_start)].strip()
doclines = [l.strip() for l in fdoc.splitlines() if l.strip()]
frameworks[_type] = {
"name": doclines[0],
"description": " ".join(doclines[1:-1]),
"url": doclines[-1],
"script": script_path
}
return frameworks
def get_frameworks(type_=None):
frameworks = _lookup_frameworks()
if type_ is None:
return frameworks
else:
if type_ not in frameworks:
raise exception.UnknownFramework(type_)
return frameworks[type_]
return frameworks
def where_is_program(program, envpath=None):
env = os.environ
if envpath:
env['PATH'] = envpath
# try OS's built-in commands
try:
result = exec_command(
["where" if "windows" in get_systype() else "which", program],
env=env
)
if result['returncode'] == 0 and isfile(result['out'].strip()):
return result['out'].strip()
except OSError:
pass
# look up in $PATH
for bin_dir in env.get("PATH", "").split(os.pathsep):
if isfile(join(bin_dir, program)):
return join(bin_dir, program)
elif isfile(join(bin_dir, "%s.exe" % program)):
return join(bin_dir, "%s.exe" % program)
return program
| |
import os
import sys
import numpy as np
from eval.ner.readers.brown import prepare_cluster_to_word_map as brown_map
from hmtm import HMTM
from inference.sum_product import SumProduct
__author__ = 'sim'
class HMRTM(HMTM):
"""
Hidden Markov Relation Tree Model
"""
def __init__(self, N, M, R=None, params=None, writeout=False, brown_init_path=None, x_dict=None, approx=False,
dirname=None, omit_class_cond=False, omit_emis_cond=False):
"""
:param N: number of states
:param M: number of observation symbols
:param R: number of dep relations (relation-specific HMTM only)
:param params: numpy objects
-initial_probs
-transition_probs
-final_probs
-emission_probs)
:param writeout: save hmm details to a file
:param omit_class_cond: do not condition the class variable on the relation variable
:param omit_emis_cond: do not condition the output/emission variable on the relation variable
"""
if dirname is None:
sys.exit("Output dirname not given.")
self.dirname = dirname
self.N = N
self.start_N = None # for split-merge
self.M = M
self.R = R
self.omit_class_cond = omit_class_cond
self.omit_emis_cond = omit_emis_cond
# initial state probability vector
if self.omit_class_cond:
self.initial_probs = np.zeros(N, 'f')
self.transition_probs = np.zeros([N, N], 'f')
self.final_probs = np.zeros(N, 'f')
else:
self.initial_probs = np.zeros([N, R], 'f')
self.transition_probs = np.zeros([N, N, R], 'f')
self.final_probs = np.zeros([N, R], 'f')
if self.omit_emis_cond:
self.emission_probs = np.zeros([M, N], 'f')
else:
self.emission_probs = np.zeros([M, N, R], 'f')
self.params_fixed_path = None
self.params_fixed_type = None # random init or trained init; set by experimental script
self.brown_init_path = brown_init_path
if not params:
if brown_init_path is None:
self.initialize_params()
self.params_exist = False
else:
if x_dict is None:
sys.exit("wordrep vocab missing")
self.initialize_brown_params(self.brown_init_path, x_dict, dist_even=True)
self.params_exist = False
else:
try:
(self.initial_probs,
self.transition_probs,
self.final_probs,
self.emission_probs) = params
self.initial_probs = self.initial_probs.astype('f', copy=False)
self.transition_probs = self.transition_probs.astype('f', copy=False)
self.final_probs = self.final_probs.astype('f', copy=False)
self.emission_probs = self.emission_probs.astype('f', copy=False)
self.params_exist = True
except ValueError:
print("Number of provided model parameters not right.")
# for updates in em_multiprocess
self.total_ll = 0.0
# Count matrices; use 64 dtype here to avoid overflow
self.initial_counts = np.zeros([self.N, self.R])
self.transition_counts = np.zeros([self.N, self.N, self.R])
self.final_counts = np.zeros([self.N, self.R])
self.emission_counts = np.zeros([self.M, self.N, self.R])
# storing log likelihoods per iteration
self.lls = []
self.sanity_check_init()
self.inference = SumProduct(approximate=approx)
self.max_iter = None
self.n_proc = None
self.n_sent = None
self.data_name = None
self.data_n_tokens = None
#online EM:
self.minibatch_size = None
self.alpha = None
self.a = None
self.permute = None
self.posttypes = None
self.hmm_type = None
self.writeout = writeout
def sanity_check_init(self, logger=None):
""" Verify dimensions and column-stochasticness"""
if self.omit_class_cond:
assert self.initial_probs.shape == (self.N,)
assert self.transition_probs.shape == (self.N, self.N)
assert self.final_probs.shape == (self.N,)
else:
assert self.initial_probs.shape == (self.N, self.R)
assert self.transition_probs.shape == (self.N, self.N, self.R)
assert self.final_probs.shape == (self.N, self.R)
if self.omit_emis_cond:
assert self.emission_probs.shape == (self.M, self.N)
else:
assert self.emission_probs.shape == (self.M, self.N, self.R)
if self.omit_class_cond:
# should be 1 up to some numerical precision:
assert np.isclose(np.sum(self.initial_probs), 1, atol=1e-02), logger.debug(
np.sum(self.initial_probs)) if logger is not None else print(np.sum(self.initial_probs))
# combined transition and final probs must sum to one:
stacked_probs = np.vstack((self.transition_probs, self.final_probs))
else:
for r in range(self.R):
assert np.isclose(np.sum(self.initial_probs[:, r]), 1, atol=1e-02), logger.debug(
np.sum(self.initial_probs[:, r])) if logger is not None else print(np.sum(self.initial_probs[:, r]))
# combined transition and final probs must sum to one:
stacked_probs = np.vstack((self.transition_probs[:, :, r], self.final_probs[:, r]))
assert np.allclose(np.sum(stacked_probs, 0), 1, atol=1e-02), logger.debug(
np.sum(stacked_probs, 0)) if logger is not None else print(np.sum(stacked_probs, 0))
if self.omit_emis_cond:
assert np.allclose(np.sum(self.emission_probs, 0), 1, atol=1e-02), logger.debug(
np.sum(self.emission_probs, 0)) if logger is not None else print(np.sum(self.emission_probs, 0))
else:
for r in range(self.R):
assert np.allclose(np.sum(self.emission_probs[:, :, r], 0), 1, atol=1e-02), logger.debug(
np.sum(self.emission_probs[:, :, r], 0)) if logger is not None else print(
np.sum(self.emission_probs[:, :, r], 0))
def init_rand_params(self):
if self.omit_class_cond:
initial_probs = np.random.rand(self.N).astype('f')
transition_probs = np.random.rand(self.N, self.N).astype('f')
final_probs = np.random.rand(self.N).astype('f')
else:
initial_probs = np.random.rand(self.N, self.R).astype('f')
transition_probs = np.random.rand(self.N, self.N, self.R).astype('f')
final_probs = np.random.rand(self.N, self.R).astype('f')
if self.omit_emis_cond:
emission_probs = np.random.rand(self.M, self.N).astype('f')
else:
emission_probs = np.random.rand(self.M, self.N, self.R).astype('f')
return initial_probs, transition_probs, final_probs, emission_probs
def normalize_params(self, initial_probs, transition_probs, final_probs, emission_probs):
if self.omit_class_cond:
self.initial_probs = initial_probs / np.sum(initial_probs)
sums = np.sum(transition_probs, 0) + final_probs # sum along columns
self.transition_probs = transition_probs / sums # sums gets broadcast
self.final_probs = final_probs / sums
else:
self.initial_probs = np.zeros([self.N, self.R], 'f')
self.transition_probs = np.zeros([self.N, self.N, self.R], 'f')
self.final_probs = np.zeros([self.N, self.R], 'f')
for r in range(self.R):
self.initial_probs[:, r] = initial_probs[:, r] / np.sum(initial_probs[:, r])
# don't forget to add final_probs to transition_probs
sums = np.sum(transition_probs[:, :, r], 0) + final_probs[:, r] # sum along columns
self.transition_probs[:, :, r] = transition_probs[:, :, r] / sums
self.final_probs[:, r] = final_probs[:, r] / sums
if self.omit_emis_cond:
sums = np.sum(emission_probs, 0) # sum along columns
self.emission_probs = emission_probs / sums
else:
self.emission_probs = np.zeros([self.M, self.N, self.R], 'f')
for r in range(self.R):
sums = np.sum(emission_probs[:, :, r], 0) # sum along columns
self.emission_probs[:, :, r] = emission_probs[:, :, r] / sums
def clear_counts(self, smoothing=1e-8):
""" Clear the count tables for another iteration.
Smoothing might be preferred to avoid "RuntimeWarning: divide by zero encountered in log"
"""
# use 64 dtype here to avoid overflow
if self.omit_class_cond:
self.initial_counts = np.zeros(self.N)
self.transition_counts = np.zeros([self.N, self.N])
self.final_counts = np.zeros(self.N)
else:
self.initial_counts = np.zeros([self.N, self.R])
self.transition_counts = np.zeros([self.N, self.N, self.R])
self.final_counts = np.zeros([self.N, self.R])
if self.omit_emis_cond:
self.emission_counts = np.zeros([self.M, self.N])
else:
self.emission_counts = np.zeros([self.M, self.N, self.R])
self.initial_counts.fill(smoothing)
self.transition_counts.fill(smoothing)
self.final_counts.fill(smoothing)
self.emission_counts.fill(smoothing)
def treerepr_scores(self, tree):
"""
Tree-analogue to trellis_scores; potentials depend on the relation
:param tree: tree graph
"""
if self.omit_class_cond:
# every leaf gets initial_probs
for leaf in tree.get_leaves():
leaf.set_initial_potentials(np.log(self.initial_probs))
# every edge gets transition_probs
for edge in tree.get_edges_not_to_root():
edge.set_potentials(np.log(self.transition_probs))
# every edge to # root gets final_probs
for edge in tree.get_edges_to_root():
edge.set_potentials(np.log(self.final_probs))
else:
# every leaf gets initial_probs
for leaf in tree.get_leaves():
leaf.set_initial_potentials(np.log(self.initial_probs[:, leaf.rel]))
# every edge gets transition_probs
for edge in tree.get_edges_not_to_root():
edge.set_potentials(np.log(self.transition_probs[:, :, edge.parent.rel]))
# every edge to # root gets final_probs
for edge in tree.get_edges_to_root():
edge.set_potentials(
np.log(self.final_probs[:, edge.child.rel])) # because trans and final probs are tied (
# should sum to 1 columwise when stacked, we have final probs conditioned on child's rel
if self.omit_emis_cond:
# every node except root gets emission_probs
for node in tree.get_nonroots():
node.set_potentials(np.log(self.emission_probs[node.get_name(), :]))
else:
# every node except root gets emission_probs
for node in tree.get_nonroots():
node.set_potentials(np.log(self.emission_probs[node.get_name(), :, node.rel]))
def update_counts_from_tree(self, tree):
"""
In E-step:
Update the count matrices with partials from one tree
BUG: can overflow because of the large log posteriors in the case of a huge tree
get extremely big when taking exp
TODO: fix by postponing the exp from compute_posteriors() until compute_parameters()
"""
if self.omit_class_cond:
self.initial_counts += sum([leaf.posterior for leaf in tree.get_leaves()])
for edge in tree.get_edges_not_to_root():
self.transition_counts += edge.posterior
self.final_counts += sum([edge.posterior for edge in tree.get_edges_to_root()])
else:
for leaf in tree.get_leaves():
self.initial_counts[:, leaf.rel] += leaf.posterior
for edge in tree.get_edges_not_to_root():
self.transition_counts[:, :, edge.parent.rel] += edge.posterior
for edge in tree.get_edges_to_root():
self.final_counts[:, edge.child.rel] += edge.posterior
if self.omit_emis_cond:
for node in tree.get_nonroots():
self.emission_counts[node.get_name(), :] += node.posterior
else:
for node in tree.get_nonroots():
self.emission_counts[node.get_name(), :, node.rel] += node.posterior
def compute_online_parameters(self, t):
"""
In M-step of online EM: normalize the counts; interpolate between the old parameters
and the contribution of new probs.
(1-eta_t)*param^(t-1) + eta_t*probs
Note: different from Liang and Klein 2009, and Cappe 2009 in that we interpolate probs directly
Doesn't exploit the sparsity of the counts.
:param t: minibatch (update) number
"""
# stepsize
eta = self.compute_eta(t)
assert not np.isnan(self.initial_counts.sum())
assert not np.isnan(self.transition_counts.sum())
assert not np.isnan(self.emission_counts.sum())
assert not np.isnan(self.final_counts.sum())
if self.omit_class_cond:
self.initial_probs = (
(1 - eta) * self.initial_probs + eta * (self.initial_counts / np.sum(self.initial_counts))).astype('f')
sums = np.sum(self.transition_counts, 0) + self.final_counts
self.transition_probs = ((1 - eta) * self.transition_probs + eta * (self.transition_counts / sums)).astype(
'f')
self.final_probs = ((1 - eta) * self.final_probs + eta * (self.final_counts / sums)).astype('f')
else:
for r in range(self.R):
self.initial_probs[:, r] = ((1 - eta) * self.initial_probs[:, r] + eta * (
self.initial_counts[:, r] / np.sum(self.initial_counts[:, r]))).astype('f')
sums = np.sum(self.transition_counts[:, :, r], 0) + self.final_counts[:, r]
self.transition_probs[:, :, r] = (
(1 - eta) * self.transition_probs[:, :, r] + eta * (self.transition_counts[:, :, r] / sums)).astype(
'f')
self.final_probs[:, r] = (
(1 - eta) * self.final_probs[:, r] + eta * (self.final_counts[:, r] / sums)).astype('f')
if self.omit_emis_cond:
self.emission_probs = (
(1 - eta) * self.emission_probs + eta * (
self.emission_counts / np.sum(self.emission_counts, 0))).astype('f')
else:
for r in range(self.R):
self.emission_probs[:, :, r] = ((1 - eta) * self.emission_probs[:, :, r] + eta * (
self.emission_counts[:, :, r] / np.sum(self.emission_counts[:, :, r], 0))).astype('f')
def em_process_multiseq(self, trees):
"""
Makes a local copy of count matrices, the worker updates them for all trees
and finally returns them as yet another partial counts.
"""
try:
total_ll = 0
initial_counts = self.initial_counts
transition_counts = self.transition_counts
final_counts = self.final_counts
emission_counts = self.emission_counts
c = 0
for c, tree in enumerate(trees, 1):
# prepare tree representation
self.treerepr_scores(tree)
# obtain node and edge posteriors and ll:
self.inference.compute_posteriors(tree, self.N)
if self.omit_class_cond:
initial_counts += sum([leaf.posterior for leaf in tree.get_leaves()])
for edge in tree.get_edges_not_to_root():
transition_counts += edge.posterior
final_counts += sum([edge.posterior for edge in tree.get_edges_to_root()])
else:
for leaf in tree.get_leaves():
initial_counts[:, leaf.rel] += leaf.posterior
for edge in tree.get_edges_not_to_root():
transition_counts[:, :, edge.parent.rel] += edge.posterior
for edge in tree.get_edges_to_root():
final_counts[:, edge.child.rel] += edge.posterior
if self.omit_emis_cond:
for node in tree.get_nonroots():
emission_counts[node.get_name(), :] += node.posterior
else:
for node in tree.get_nonroots():
emission_counts[node.get_name(), :, node.rel] += node.posterior
total_ll += tree.get_ll()
tree.clear_tree()
return initial_counts, transition_counts, final_counts, emission_counts, total_ll
except KeyboardInterrupt:
pass
def compute_parameters(self, logger):
"""
In M-step: normalize the counts to obtain true parameters.
"""
if logger is not None:
logger.info("Recomputing parameters.")
if self.omit_class_cond:
self.initial_probs = (self.initial_counts / np.sum(self.initial_counts)).astype(
'f') # probs should be 32 dtype
sums = np.sum(self.transition_counts, 0) + self.final_counts
self.transition_probs = (self.transition_counts / sums).astype('f')
self.final_probs = (self.final_counts / sums).astype('f')
else:
for r in range(self.R):
self.initial_probs[:, r] = (self.initial_counts[:, r] / np.sum(self.initial_counts[:, r])).astype(
'f') # probs should be 32 dtype
sums = np.sum(self.transition_counts[:, :, r], 0) + self.final_counts[:, r]
self.transition_probs[:, :, r] = (self.transition_counts[:, :, r] / sums).astype('f')
self.final_probs[:, r] = (self.final_counts[:, r] / sums).astype('f')
if self.omit_emis_cond:
self.emission_probs = (self.emission_counts / np.sum(self.emission_counts, 0)).astype('f')
else:
for r in range(self.R):
self.emission_probs[:, :, r] = (
self.emission_counts[:, :, r] / np.sum(self.emission_counts[:, :, r], 0)).astype('f')
def split_params(self, noise_amount):
"""
Split states in two. Each state parameters are copied and some noise added.
"""
split_dim = self.N * 2
if self.omit_class_cond:
initial_probs_split = self.initial_probs.repeat(2, axis=0) # split along columns
r = np.random.normal(0, noise_amount, initial_probs_split.shape) # noise
initial_probs_split += initial_probs_split * r # downscale r according to individual values in initial_probs...
transition_probs_split = self.transition_probs.repeat(2, axis=1).repeat(2,
axis=0) # split along columns then rows
r = np.random.normal(0, noise_amount, transition_probs_split.shape) # noise
transition_probs_split += transition_probs_split * r # downscale r according to individual values in initial_probs...
final_probs_split = self.final_probs.repeat(2, axis=0) # split along columns
r = np.random.normal(0, noise_amount, final_probs_split.shape) # noise
final_probs_split += final_probs_split * r # downscale r according to individual values in initial_probs...
else:
initial_probs_split = np.zeros([split_dim, self.R], 'f')
transition_probs_split = np.zeros([split_dim, split_dim, self.R], 'f')
final_probs_split = np.zeros([split_dim, self.R], 'f')
for rel in range(self.R):
initial_probs_split[:, rel] = self.initial_probs[:, rel].repeat(2, axis=0) # split along columns
r = np.random.normal(0, noise_amount, initial_probs_split[:, rel].shape) # noise
initial_probs_split[:, rel] += initial_probs_split[:,
rel] * r # downscale r according to individual values in initial_probs...
transition_probs_split[:, :, rel] = self.transition_probs[:, :, rel].repeat(2, axis=1).repeat(2,
axis=0) # split along columns then rows
r = np.random.normal(0, noise_amount, transition_probs_split[:, :, rel].shape) # noise
transition_probs_split[:, :, rel] += transition_probs_split[:, :,
rel] * r # downscale r according to individual values in initial_probs...
final_probs_split[:, rel] = self.final_probs[:, rel].repeat(2, axis=0) # split along columns
r = np.random.normal(0, noise_amount, final_probs_split[:, rel].shape) # noise
final_probs_split[:, rel] += final_probs_split[:,
rel] * r # downscale r according to individual values in initial_probs...
if self.omit_emis_cond:
emission_probs_split = self.emission_probs.repeat(2, axis=1) # split along columns
r = np.random.normal(0, noise_amount, emission_probs_split.shape) # noise
emission_probs_split += emission_probs_split * r # downscale r according to individual values in initial_probs...
else:
emission_probs_split = np.zeros([self.M, split_dim, self.R], 'f')
for rel in range(self.R):
emission_probs_split[:, :, rel] = self.emission_probs[:, :, rel].repeat(2,
axis=1) # split along columns
r = np.random.normal(0, noise_amount, emission_probs_split[:, :, rel].shape) # noise
emission_probs_split[:, :, rel] += emission_probs_split[:, :,
rel] * r # downscale r according to individual values in initial_probs...
assert initial_probs_split.shape[0] == final_probs_split.shape[0] == transition_probs_split.shape[0] == \
emission_probs_split.shape[1]
self.N = transition_probs_split.shape[0]
return initial_probs_split, transition_probs_split, final_probs_split, emission_probs_split
def initialize_brown_params(self, brown_init_path, x_dict, c_factor=1000, dist_even=True):
""" init parameters to be non-random column-stochastic matrices
based on brown clusters
Concerns emission params only (for now) although transitions could be approximated somehow as well.
Assume for now that n of clusters = state size.
Some words might not be found in clusters.
For w belonging to cluster c_x, we put most of the prob mass to w|c_x, and distribute remaining prob mass
unevenly or evenly among all other c_y.
First initialize randomly, then all w entries belonging to c_x are multiplied by c_factor; finally, normalize.
"""
initial_probs, transition_probs, final_probs, emission_probs = self.init_rand_params()
c_to_w = brown_map(brown_init_path)
assert len(c_to_w) == self.N
if dist_even:
if self.omit_emis_cond:
emission_probs = np.zeros((self.M, self.N)).astype('f') + np.random.rand()
else:
emission_probs = np.zeros((self.M, self.N, self.R)).astype('f') + np.random.rand()
for c, c_id in enumerate(c_to_w):
w_ids = self.get_label_ids(c_to_w[c_id], x_dict) # x_dict.get_label_name(w_id)
if self.omit_emis_cond:
emission_probs[w_ids, c] *= c_factor
else:
for r in range(self.R):
emission_probs[w_ids, c, r] *= c_factor
self.normalize_params(initial_probs, transition_probs, final_probs, emission_probs)
def posterior_cont_type_decode_corpus(self, dataset, rep_dataset, logger=None, ignore_rel=None):
"""Run posterior_decode at corpus level,
return continuous rep per type (avg. over posteriors in all
instances). """
if self.posttypes is None:
if self.dirname is not None:
assert len(dataset.wordrep_dict) == len(rep_dataset.x_dict)
posttype_f = "{}posttype{}.npy".format(self.dirname, ignore_rel or "")
self.posttypes = np.load(posttype_f) if os.path.exists(posttype_f) else self.obtain_posttypes(
posttype_f, rep_dataset, len(dataset.wordrep_dict), logger=logger, ignore_rel=ignore_rel)
assert self.posttypes.shape == (len(dataset.wordrep_dict), self.N, self.R)
else:
sys.exit("dirname not set properly")
if logger is not None: logger.info("Decoding on eval datasets.")
# assign posteriors to types in dataset
for seq in dataset.seq_list:
if seq.t is None:
print("seq.t is None")
seq.u = None
continue
seq.u = {}
for node in seq.t.get_nonroots():
post = self.posttypes[node.name, :, node.rel]
if not np.isnan(
np.sum(post)) and node.rel is not ignore_rel: # second check probably redundant as isnan anyway
seq.u[node.index] = post
seq.t = None
def obtain_posttypes_cumul(self, posttype_f, rep_dataset, n_types, logger=None, ignore_rel=None):
super().obtain_posttypes(posttype_f=posttype_f, rep_dataset=rep_dataset, n_types=n_types, logger=logger)
def obtain_posttypes(self, posttype_f, rep_dataset, n_types, logger=None, ignore_rel=None):
if logger is not None: logger.info("Obtaining posterior type counts.")
# obtain type posteriors
type_posteriors = np.zeros((n_types, self.N, self.R))
type_freq = np.zeros((n_types, self.R))
for count, tree in enumerate(rep_dataset.train):
# posteriors is dict with keys starting at 1
if tree is None:
# print("tree is None")
continue
if logger is not None:
if count % 1000 == 0:
logger.debug(count)
posteriors = self.posterior_decode(tree, cont=True, ignore_rel=ignore_rel)
for node in tree.get_nonroots():
if node.index in posteriors:
type_posteriors[node.name, :, node.rel] += posteriors[node.index]
type_freq[node.name, node.rel] += 1
# normalize
for r in range(self.R):
type_posteriors[:, :, r] /= type_freq[:, r].reshape(-1, 1) # yields NaNs, avoided by the parent method
np.save(posttype_f, type_posteriors)
return type_posteriors
def write_add(self, out):
out.write("Number of relations: {}\n".format(self.R))
out.write("Omit class conditioning: {}\n".format(self.omit_class_cond))
out.write("Omit emis conditioning: {}\n".format(self.omit_emis_cond))
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from oslo.config import cfg
import webob
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import config_drive
from nova.api.openstack.compute.plugins.v3 import servers
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import db
from nova.network import manager
from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.image import fake
CONF = cfg.CONF
FAKE_UUID = fakes.FAKE_UUID
def fake_gen_uuid():
return FAKE_UUID
def return_security_group(context, instance_id, security_group_id):
pass
class ConfigDriveTest(test.TestCase):
def setUp(self):
super(ConfigDriveTest, self).setUp()
ext_info = plugins.LoadedExtensionInfo()
self.Controller = config_drive.ConfigDriveController(
extension_info=ext_info)
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fake.stub_out_image_service(self.stubs)
def test_show(self):
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get())
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get())
req = webob.Request.blank('/v3/servers/1')
req.headers['Content-Type'] = 'application/json'
response = req.get_response(fakes.wsgi_app_v3(
init_only=('servers', 'os-config-drive')))
self.assertEquals(response.status_int, 200)
res_dict = jsonutils.loads(response.body)
self.assertTrue('config_drive' in res_dict['server'])
def test_detail_servers(self):
self.stubs.Set(db, 'instance_get_all_by_filters',
fakes.fake_instance_get_all_by_filters())
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get())
req = fakes.HTTPRequestV3.blank('/v3/servers/detail')
res = req.get_response(fakes.wsgi_app_v3(
init_only=('servers', 'os-config-drive')))
server_dicts = jsonutils.loads(res.body)['servers']
self.assertNotEqual(len(server_dicts), 0)
for server_dict in server_dicts:
self.assertTrue('config_drive' in server_dict)
class ServersControllerCreateTest(test.TestCase):
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTest, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
CONF.set_override('extensions_blacklist', 'os-config-drive',
'osapi_v3')
self.no_config_drive_controller = servers.ServersController(
extension_info=ext_info)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = {
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': dict(inst_type),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"config_drive": None,
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
}
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
def instance_update(context, uuid, values):
instance = self.instance_cache_by_uuid[uuid]
instance.update(values)
return instance
def rpc_call_wrapper(context, topic, msg, timeout=None):
"""Stub out the scheduler creating the instance entry."""
if (topic == CONF.scheduler_topic and
msg['method'] == 'run_instance'):
request_spec = msg['args']['request_spec']
num_instances = request_spec.get('num_instances', 1)
instances = []
for x in xrange(num_instances):
instances.append(instance_create(context,
request_spec['instance_properties']))
return instances
def server_update(context, instance_uuid, params):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return (inst, inst)
def fake_method(*args, **kwargs):
pass
def project_get_networks(context, user_id):
return dict(id='1', host='localhost')
def queue_get_for(context, *args):
return 'network_topic'
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'instance_add_security_group',
return_security_group)
self.stubs.Set(db, 'project_get_networks',
project_get_networks)
self.stubs.Set(db, 'instance_create', instance_create)
self.stubs.Set(db, 'instance_system_metadata_update',
fake_method)
self.stubs.Set(db, 'instance_get', instance_get)
self.stubs.Set(db, 'instance_update', instance_update)
self.stubs.Set(rpc, 'cast', fake_method)
self.stubs.Set(rpc, 'call', rpc_call_wrapper)
self.stubs.Set(db, 'instance_update_and_get_original',
server_update)
self.stubs.Set(rpc, 'queue_get_for', queue_get_for)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
def _test_create_extra(self, params, no_image=False,
override_controller=None):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
if no_image:
server.pop('imageRef', None)
server.update(params)
body = dict(server=server)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
if override_controller:
server = override_controller.create(req, body).obj['server']
else:
server = self.controller.create(req, body).obj['server']
def test_create_instance_with_config_drive_disabled(self):
config_drive = [{'config_drive': 'foo'}]
params = {'config_drive': config_drive}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertNotIn('config_drive', kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params,
override_controller=self.no_config_drive_controller)
def test_create_instance_with_config_drive(self):
def create(*args, **kwargs):
self.assertIn('config_drive', kwargs)
return old_create(*args, **kwargs)
old_create = compute_api.API.create
self.stubs.Set(compute_api.API, 'create', create)
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/v3/flavors/3'
body = {
'server': {
'name': 'config_drive_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
'personality': {},
'config_drive': "true",
},
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.create(req, body).obj
server = res['server']
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_with_bad_config_drive(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/v3/flavors/3'
body = {
'server': {
'name': 'config_drive_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
'personality': {},
'config_drive': image_href,
},
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
def test_create_instance_without_config_drive(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/v3/flavors/3'
body = {
'server': {
'name': 'config_drive_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
'personality': {},
},
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.create(req, body).obj
server = res['server']
self.assertEqual(FAKE_UUID, server['id'])
class TestServerCreateRequestXMLDeserializer(test.TestCase):
def setUp(self):
super(TestServerCreateRequestXMLDeserializer, self).setUp()
ext_info = plugins.LoadedExtensionInfo()
controller = servers.ServersController(extension_info=ext_info)
self.deserializer = servers.CreateDeserializer(controller)
def test_request_with_config_drive(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
name="config_drive_test"
imageRef="1"
flavorRef="1"
config_drive="true"/>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"server": {
"name": "config_drive_test",
"imageRef": "1",
"flavorRef": "1",
"config_drive": "true"
},
}
self.assertEquals(request['body'], expected)
| |
import pdb
import numpy as np
import matplotlib.pyplot as plt
import math
import itertools
import operator
######################################################################
#
# Learning algorithms and helpers
#
######################################################################
# Ordinary least squares regression
# X is an n x d matrix
# Y is an n x 1 matrix
# returns weights: d x 1 matrix
def ols(X, y):
return (X.T * X).I * X.T * y
# "Ridge" regression with lambda = l; otherwise like ols
# Penalizes the offset weight, which is sometimes undesirable
def olsr(X, y, l = 0):
d = X.shape[1]
return (X.T * X + l * np.identity(d)).I * X.T * y
# Given an order for a polynomial feature space, d
# return a *function* that maps a single value into a vector of d+1 features
def polynomialFeatures(d):
def p(xi):
return np.matrix([xi[0,0]**i for i in range(d+1)])
return p
# Given an order for a polynomial feature space, order
# return a *function* that maps an input vector of dimension d to
# an output vector of dimension D by taking all combinations
def polynomialFeaturesN(order):
def p(xi):
d = xi.shape[1]
# assume xi is matrix [[xi1, ...., xid]]
features = []
for o in range(order+1):
indexTuples = itertools.combinations_with_replacement(range(d), o)
for it in indexTuples:
features.append(mul(xi[0, i] for i in it))
return np.matrix([features])
return p
def mul(seq):
return reduce(operator.mul, seq, 1)
# Given X, an array of training examples, and b, a bandwidth,
# return a *function* that maps a single value into a vector of features
def RBFs(X, b):
def g(x1, x2, b):
return np.exp(-((np.linalg.norm(x1 - x2) / b)**2))
return lambda xi: np.matrix([g(xi, x, b) for x in X] + [1.0])
# Given a function, such as the one generated by polynomialFeatures,
# which takes a d-dimensional vector and returns a D-dimensional
# vector, and an n x d matrix X, return an n x D matrix of feature
# vectors
def applyFeatureFun(phi, X):
return np.vstack([phi(X[i,:]) for i in range(X.shape[0])])
# Given a D x 1 weight vector and a "feature function" that takes a
# d-dimensional vector into a D x 1 vector, return a **function** that
# takes a d-dimensional vector into a scalar regression value, but
# mapping the input into the high-dimensional feature space and then
# taking the dot product with the weights.
def makeRegressor(w, phi):
def r(x):
return (phi(x)*w)[0,0]
return r
def makeLogisticRegressor(w, phi):
def r(x):
return s((phi(x)*w)[0,0])
return r
# Given an n x d matrix X of input values and an n x 1 matrix y of
# target values, and a function that maps a row of X into a
# prediction, return the root mean squared error of the predictor
# applied to the X values.
def rmse(X, y, predictor):
return np.sqrt(sse(X, y, predictor)/len(y))
# As for rmse, but just return the sum squared error.
def sse(X, y, predictor):
p = np.matrix([predictor(X[i,:]) for i in range(X.shape[0])]).T
assert p.shape == y.shape
return np.sum(np.square(p - y))
# Generic gradient descent:
# - f is a function mapping a vector of parameters to a score
# (not strictly necessary)
# - df is a function mapping a vector of parameters to the gradient at
# that point
# - x0 is an initial parameter vector
# - step_size is the step size
# - terminates after max_iter iterations, or when parameters have not changed
# by more than eps from one iteration to the next
# Returns: the final parameter vector, a list of scores (one per iteration)
# and a list of parameter vectors (one per iteration)
def gd(f, df, x0, step_size = .01, max_iter = 1000, eps = .00001,
lrDecay = 1.0):
prev_x = x0
prev_f = f(x0)
fs = [prev_f]; xs = [prev_x]
for i in range(max_iter):
x = prev_x - step_size * df(prev_x)
if np.all(abs(x - prev_x) < eps): return x, fs, xs
fs.append(f(x)); xs.append(x)
prev_x = x
step_size *= lrDecay
return x, fs, xs
# Linear regression using gradient descent.
# X and y are as for ordinary least squares;
# w0 is the initial weight vector
# max_iter is as for gd
# l is used to do ridge regression
# Returns: final weight vector, a list of scores, list of weight vectors
def gdLinReg(X, y, l = 0, step_size = 0.01, w0 = None, max_iter = 1000):
# w is d by 1; X is n by d
# return result is d by 1
n = float(len(y))
def df(w):
return 2 * np.sum(np.multiply(X*w - y, X), axis = 0).T / n + l * w
def f(w):
return np.sum(np.square(X*w - y)) / n + l * float(w.T * w)
if w0 is None: w0 = np.matrix(np.ones(X.shape[1])).T
return gd(f, df, w0, step_size = step_size, max_iter = max_iter)
# Sigmoid function
def s(z):
return 1.0 / (1 + np.exp(-z))
# Logistic regression using gradient descent.
# X is a matrix of feature vectors; y is a vector of labels in {0, 1}
# w0 is the initial weight vector
# max_iter is as for gd
# l currently unused, but can be used for regularization
# Returns: final weight vector, a list of scores, list of weight vectors
def gdLogReg(X, y, l = 0, step_size = 0.01, w0 = None, max_iter = 1000,
eps = .00001):
# w is d by 1; X is n by d
# return result is d by 1
n = float(len(y))
def df(w):
return np.sum(np.multiply(s(X*w) - y, X), axis = 0).T + l * w
def f(w):
z = s(X*w)
score = - (np.sum(np.multiply(y, np.log(z)) + \
np.multiply(1.0 - y, np.log(1.0 - z)))) \
+ l * float(w.T * w)
return score
if w0 is None: w0 = np.matrix(np.ones(X.shape[1])).T * 0.0000001
return gd(f, df, w0, step_size = step_size, max_iter = max_iter,
eps = eps)
# Stochastic gradient descent
# X is an n x d matrix of input examples
# y is an n x 1 matrix of outputs (we could combine X and Y if we wanted to)
# f is a function taking a parameter vector, a 1 x d input example,
# and a 1 x 1 output value, and returning an error; we want to
# minimize this error in expectation over the whole data set
# df has the same inputs as f, but returns a gradient vector of the same
# dimension as w0
# if earlyTermination is true, then terminate if params move by less
# than epsilon; otherwise, terminate after max_iter iterations.
# Returns last parameter vector, list of scores, list of param
# vectors, the iteration on which termination happened, and a Boolean
# indicating whether termination was early or not.
def sgd(X, y, f, df, w0, step_size = .01, max_iter = 20000, eps = .0000001,
earlyTermination = False):
n = y.shape[0]
prev_w = w0
fs = []; ws = [prev_w]
for i in range(max_iter):
j = np.random.randint(n)
Xj = X[j]; yj = y[j]
w = prev_w - step_size * df(prev_w, Xj, yj)
if earlyTermination and np.all(abs(w - prev_w) < eps):
return w, fs, ws, i, False
fs.append(f(prev_w, Xj, yj)); ws.append(w)
prev_w = w
return w, fs, ws, i, True
# Linear regression computed by stochastic gradient descent.
# Parameters are as for gdLinReg.
# Returns last parameter vector, list of scores, list of param
# vectors, the iteration on which termination happened, and a Boolean
# indicating whether termination was early or not.
def sgdLinReg(X, y, l = 0, step_size = 0.01, w0 = None, max_iter = 20000):
def df(w, Xj, yj):
return 2 * ((Xj * w - yj) * Xj).T
def f(w, Xj, yj):
return np.square(Xj * w - yj)
if w0 is None: w0 = np.matrix(np.ones(X.shape[1])).T
return sgd(X, y, f, df, w0, step_size = step_size, max_iter = max_iter)
######################################################################
#
# Data
#
######################################################################
# Uses data set from Chapter 1 of Bishop, stored in file
# "curvefitting.txt". They are 10 points drawn from (x, sin(2 pi x))
# with noise added (but I'm not sure how much.)
# If random is not False, it should be an integer, and instead of
# returning data from the file, we will generate a new random data set
# of that size, with 0 mean, 0.2 stdev Gaussian noise.
# if addOnes is true, return: n x 1 matrix X, n x 2 matrix F (with
# column of 1's added) and n x 1 matrix Y.
def getCurveData(addOnes = False, random = False):
if random:
X = np.matrix([[i / float(random)] for i in range(random + 1)])
noise = np.random.normal(scale = 0.2, size = (random+1, 1))
y = np.matrix([[np.sin(2 * np.pi * X[i,0])] for i in range(X.shape[0])]) + noise
else:
data = np.loadtxt('curvefitting.txt')
X, y = np.matrix(data[0]).T, np.matrix(data[1]).T
if addOnes:
F = np.append(np.ones_like(X), X, 1)
return X, F, y
else:
return X, y
def superSimpleSeparable(addOnes = False):
X = np.matrix([[2, 3],
[3, 2],
[9, 10],
[10, 9]])
y = np.matrix([[1, 1, 0, 0]]).T
if addOnes:
X = np.append(np.ones_like(y), X, 1)
return X, y
def superSimpleSeparable2(addOnes = False):
X = np.matrix([[2, 5],
[3, 2],
[9, 6],
[12, 5]])
y = np.matrix([[1, 0, 1, 0]]).T
if addOnes:
X = np.append(np.ones_like(y), X, 1)
return X, y
def xor(addOnes = False):
X = np.matrix([[1, 1],
[2, 2],
[1, 2],
[2, 1]])
y = np.matrix([[1, 1, 0, 0]]).T
if addOnes:
X = np.append(np.ones_like(y), X, 1)
return X, y
def xor_more(addOnes = False):
X = np.matrix([[1, 1], [2, 2], [1, 2], [2, 1],
[2, 3], [4, 1], [1, 3], [3, 3]])
y = np.matrix([[1, 1, 0, 0, 1, 1, 0, 0]]).T
if addOnes:
X = np.append(np.ones_like(y), X, 1)
return X, y
def multimodalData(modes = None, numPerMode = 20,
numModes = 2,
modeCov = np.eye(2, 2)):
Xs = []
Ys = []
if modes is None:
modes = np.random.multivariate_normal([0, 0], modeCov * 20,
numModes)
for (i, mode) in enumerate(modes):
Xs.extend(np.random.multivariate_normal(mode, modeCov, numPerMode))
Ys.extend([[i % 2]]*numPerMode)
return np.matrix(Xs), np.matrix(Ys)
# Get the Blog data
def getBlogData(n):
dir = '/Users/lpk/Desktop/BlogFeedback_data/'
X = np.matrix(np.genfromtxt(dir+'x_train.csv',
delimiter = ','))
if n == None:
n = X.shape[0]
else:
X = X[:n]
y = np.matrix(np.genfromtxt(dir+'y_train.csv',
delimiter = ',')).T[:n]
XTest = np.matrix(np.genfromtxt(dir+'x_test.csv',
delimiter = ','))
yTest = np.matrix(np.genfromtxt(dir+'y_test.csv',
delimiter = ',')).T
return X, y, XTest, yTest
######################################################################
#
# Plotting stuff
#
######################################################################
def tidyPlot(xmin, xmax, ymin, ymax, center = False, title = None,
xlabel = None, ylabel = None):
plt.ion()
plt.figure(facecolor="white")
ax = plt.subplot()
if center:
ax.spines['left'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('zero')
ax.spines['top'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
else:
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
eps = .05
plt.xlim(xmin-eps, xmax+eps)
plt.ylim(ymin-eps, ymax+eps)
if title: ax.set_title(title)
if xlabel: ax.set_xlabel(xlabel)
if ylabel: ax.set_ylabel(ylabel)
return ax
def plotData(ax, x, y, style = 'ro', c = None, label = None):
if style is None and c is None:
ax.plot(x, y, label = label)
elif style is not None:
ax.plot(x, y, style, label = label)
elif c is not None:
ax.plot(x, y, c = c, label = label)
plt.show()
# w is (c, a, b)
# ax + by + c = 0
# y = -(a/b) x - (c/b)
def plotLineABC(ax, w, xmin, xmax):
m = - float(w[1]) / float(w[2])
b = -float(w[0]) / float(w[2])
plotFun(ax, lambda x: m*x + b, xmin, xmax)
# w is a (1 x 2) matrix
def plotLine(ax, w, xmin, xmax, nPts = 100):
b = float(w[0])
m = float(w[1])
plotFun(ax, lambda x: m*x + b, xmin, xmax, nPts)
def plotFun(ax, f, xmin, xmax, nPts = 100, label = None):
x = np.linspace(xmin, xmax, nPts)
y = np.vstack([f(np.matrix([[xi]])) for xi in x])
ax.plot(x, y, label = label)
plt.show()
def smooth(n, vals):
# Run a box filter of size n
x = sum(vals[0:n])
result = [x]
for i in range(n, len(vals)):
x = x - vals[i-n] + vals[i]
result.append(x)
return result
######################################################################
#
# Tests
#
######################################################################
######################################################################
#
# Ordinary least squares in 2D (one constant input dimension)
def t1():
X, F, y = getCurveData(True)
w = ols(F, y)
print 'w', w.T
xmin, xmax = float(min(X)), float(max(X))
ymin, ymax = float(min(y)), float(max(y))
ax = tidyPlot(xmin, xmax, ymin, ymax, xlabel = 'x', ylabel = 'y')
plotData(ax, X, y)
plotLine(ax, w, xmin, xmax)
######################################################################
#
# Ordinary least squares in polynomial feature spaces
# Plots predictors
def t2(ds = range(1, 10)):
X, y = getCurveData()
xmin, xmax = float(min(X)), float(max(X))
ymin, ymax = float(min(y)), float(max(y))
ax = tidyPlot(xmin, xmax, ymin-1, ymax+1, xlabel = 'x', ylabel = 'y')
plotData(ax, X, y)
for d in ds:
phi = polynomialFeatures(d)
phiD = applyFeatureFun(phi, X)
w = ols(phiD, y)
predictor = makeRegressor(w, phi)
plotFun(ax, predictor, xmin, xmax, label = str(d))
print 'Order', d, 'Training RMSE', rmse(X, y, predictor)
print ' w', w.T
ax.legend(loc="upper left", bbox_to_anchor=(1,1))
######################################################################
#
# Ordinary least squares in polynomial feature spaces
# Plots train and test error versus order of polynomial basis
def t3(ds = range(1, 10)):
X, y = getCurveData()
XTest, yTest = getCurveData(random = 10)
xmin, xmax = float(min(X)), float(max(X))
ymin, ymax = float(min(y)), float(max(y))
ax = tidyPlot(xmin, xmax, ymin-1, ymax+1, xlabel = 'x', ylabel = 'y')
plotData(ax, X, y)
plotData(ax, XTest, yTest, style = 'go')
trainErr = []
testErr = []
for d in ds:
phi = polynomialFeatures(d)
phiD = applyFeatureFun(phi, X)
w = ols(phiD, y)
predictor = makeRegressor(w, phi)
plotFun(ax, predictor, xmin, xmax)
trainErr.append(rmse(X, y, predictor))
testErr.append(rmse(XTest, yTest, predictor))
print 'Order', d, 'Training RMSE', trainErr[-1]
print ' Test RMSE', testErr[-1]
ax = tidyPlot(0, len(ds), 0, max(max(testErr), max(trainErr)), center =True,
xlabel = 'Polynomial order', ylabel = 'RMSE')
ax.plot(ds, trainErr, label = 'train err')
ax.plot(ds, testErr, label = 'test err')
ax.legend()
######################################################################
#
# Ordinary least squares in RBF feature spaces
# Plots train and test error versus bandwidth
# LPK: these are good b values (.01, .05, .1, .5, 1, 2, 4)
def tRBF(bs = []):
X, y = getCurveData()
XTest, yTest = getCurveData(random = 10)
xmin, xmax = float(min(X)), float(max(X))
ymin, ymax = float(min(y)), float(max(y))
ax = tidyPlot(xmin, xmax, ymin-1, ymax+1, xlabel = 'x', ylabel = 'y')
plotData(ax, X, y)
trainErr = []
testErr = []
for b in bs:
phi = RBFs(X, b)
phiD = applyFeatureFun(phi, X)
w = olsr(phiD, y, 1e-5)
predictor = makeRegressor(w, phi)
plotFun(ax, predictor, xmin, xmax, label = str(b))
trainErr.append(rmse(X, y, predictor))
testErr.append(rmse(XTest, yTest, predictor))
print 'Bandwidth', b, 'Training RMSE', trainErr[-1]
print ' Test RMSE', testErr[-1]
print ' w', w.T
ax.legend(loc = 'best')
if len(bs) > 1:
ax = tidyPlot(min(bs), max(bs), 0, max(max(testErr), max(trainErr)),
xlabel = 'Bandwidth', ylabel = 'RMSE')
ax.plot(bs, trainErr, label = 'train err')
ax.plot(bs, testErr, label = 'test err')
ax.legend(loc = 'best')
######################################################################
#
# Ordinary least squares in polynomial feature spaces
# Plots train and test error versus size of training set
def t4(trainSizes = (10, 15, 20, 30, 50, 100), showData = False):
XTest, yTest = getCurveData(random = 100)
xmin, xmax = float(np.min(XTest)), float(np.max(XTest))
ymin, ymax = float(np.min(yTest)), float(np.max(yTest))
trainErr = []
testErr = []
phi = polynomialFeatures(9)
if not showData:
ax = tidyPlot(xmin, xmax, ymin-1, ymax+1, xlabel = 'x', ylabel = 'y',
title = 'Predictors for different train sizes')
for ts in trainSizes:
X, y = getCurveData(random = ts-1)
if showData:
ax = tidyPlot(xmin, xmax, ymin-1, ymax+1, xlabel = 'x', ylabel = 'y',
title = 'Train size = ' + str(ts))
plotData(ax, X, y)
phiD = applyFeatureFun(phi, X)
w = ols(phiD, y)
predictor = makeRegressor(w, phi)
plotFun(ax, predictor, xmin, xmax, label=str(ts))
trainErr.append(rmse(X, y, predictor))
testErr.append(rmse(XTest, yTest, predictor))
print 'Train size', ts, 'Training RMSE', trainErr[-1]
print ' Test RMSE', testErr[-1]
print ' w', w.T
ax.legend(loc="upper left", bbox_to_anchor=(1,1))
if len(trainSizes) > 1:
ax = tidyPlot(0, max(trainSizes), 0, max(testErr), center = True,
xlabel = 'Training set size', ylabel = 'RMSE')
ax.plot(trainSizes, trainErr, label = 'train err')
ax.plot(trainSizes, testErr, label = 'test err')
ax.legend(loc="upper right")
######################################################################
#
# Ridge regression in polynomial feature spaces
# Plot train and test error versus ridge parameter lambda
def t5(logLambdaValues = (-50, -40, -30, -20, -15, -10, -1, 0, 1, 10)):
XTest, yTest = getCurveData(random = 100)
xmin, xmax = float(np.min(XTest)), float(np.max(XTest))
ymin, ymax = float(np.min(yTest)), float(np.max(yTest))
trainErr = []
testErr = []
phi = polynomialFeatures(9)
ax = tidyPlot(xmin, xmax, ymin-1, ymax+1,
xlabel = 'x', ylabel = 'y',
title = 'Predictors for different lambda values')
X, y = getCurveData()
phiD = applyFeatureFun(phi, X)
plotData(ax, X, y)
for llv in logLambdaValues:
w = olsr(phiD, y, np.exp(llv))
predictor = makeRegressor(w, phi)
plotFun(ax, predictor, xmin, xmax, label=str(llv))
trainErr.append(rmse(X, y, predictor))
testErr.append(rmse(XTest, yTest, predictor))
print 'Log lambda', llv, 'Training RMSE', trainErr[-1]
print ' Test RMSE', testErr[-1]
print w.T
ax.legend(loc="upper left", bbox_to_anchor=(1,1))
if len(logLambdaValues) > 1:
ax = tidyPlot(min(logLambdaValues), max(logLambdaValues),
0, max(testErr), center = True,
xlabel = 'Log lambda', ylabel = 'RMSE')
ax.plot(logLambdaValues, trainErr, label = 'train err')
ax.plot(logLambdaValues, testErr, label = 'test err')
ax.legend(loc="best")
######################################################################
#
# Batch gradient descent in 2D feature space
# Plots error as a function of iteration number for different step sizes
# Individual plots of trajectory of w during optimization
def t6(learning_rates = (.0001, .001, .01, .05, .07, .075, .1)):
X, F, y = getCurveData(True)
ax = tidyPlot(0, 1000, 2, 8,
xlabel = 'iteration', ylabel = 'err')
for lr in learning_rates:
w, scores, vals = gdLinReg(F, y, step_size = lr)
plotData(ax, range(len(scores)), scores, style = None,
label = str(lr))
print 'lr', lr, 'w', w.T, 'err', scores[-1]
dmin = min(-1.5, np.min(vals)); dmax = max(1.5, np.max(vals))
nax = tidyPlot(dmin, dmax, dmin, dmax,
xlabel = 'w0', ylabel = 'w1',
title = 'step size = '+str(lr), center = True)
plotData(nax, [float(xd) for (xd, yd) in vals],
[float(yd) for (xd, yd) in vals],
style = 'bo-')
ax.legend(loc="upper left", bbox_to_anchor=(1,1))
######################################################################
#
# Stochastic gradient descent in 2D feature space
# Plots error as a function of iteration number for different step sizes
# Individual plots of trajectory of w during optimization
def t7(learning_rates = (.0001, .001, .01, .1, .5, 1)):
X, F, y = getCurveData(True)
ax = tidyPlot(0, 1000, 0, 100,
xlabel = 'iteration', ylabel = 'err')
for lr in learning_rates:
w, scores, vals, iters, failed = sgdLinReg(F, y, step_size = lr)
smoothScores = smooth(len(y)*4, [float(s) for s in scores])
plotData(ax, range(len(smoothScores)), smoothScores,
style = None, label = str(lr))
print 'lr', lr, 'w', w.T, 'err', smoothScores[-1]
dmin = min(-1.5, np.min(vals)); dmax = max(1.5, np.max(vals))
nax = tidyPlot(dmin, dmax, dmin, dmax,
xlabel = 'w0', ylabel = 'w1',
title = 'step size = '+str(lr), center = True)
plotData(nax, [float(xd) for (xd, yd) in vals],
[float(yd) for (xd, yd) in vals],
style = 'bo-')
ax.legend(loc="best")
######################################################################
#
# Batch gradient descent in polynomial feature space
# Plots train and test error as a function of iteration number
def t8(order = 9):
goodw = np.matrix(\
[[ 3.49512436e-01, 2.32326298e+02, -5.32086773e+03, 4.85596769e+04,
-2.31598783e+05, 6.39931981e+05, -1.06162004e+06, 1.04222515e+06,
-5.57590377e+05, 1.25180846e+05]]).T
zeros = np.matrix(np.zeros([order+1, 1]))
medw = np.matrix(\
[[ 3.0e-01, 2.0e+02, -5.0e+03, 5.0e+04,
-2.0e+05, 6.0e+05, -1.0e+06, 1.0e+06,
-5.0e+05, 1.0e+05]]).T
X, y = getCurveData()
xmin, xmax = float(min(X)), float(max(X))
ymin, ymax = float(min(y)), float(max(y))
XTest, yTest = getCurveData(random = 100)
phi = polynomialFeatures(order)
phiD = applyFeatureFun(phi, X)
lr = 0.005
w, scores, vals = gdLinReg(phiD, y, step_size = .01, w0 = zeros,
max_iter = 10000)
print 'w', w.T, 's', scores[-1], 'iter', len(scores)
iVals = []; trainVals = []; testVals = []
axPred = tidyPlot(xmin, xmax, ymin-1, ymax+1, xlabel = 'x', ylabel = 'y')
for i in range(0, len(vals), 100):
predictor = makeRegressor(vals[i], phi)
plotFun(axPred, predictor, xmin, xmax, label = 'iters = '+str(i))
iVals.append(i)
trainVals.append(rmse(X, y, predictor))
testVals.append(rmse(XTest, yTest, predictor))
plotData(axPred, X, y)
axPred.legend(loc="upper left", bbox_to_anchor=(1,1))
ax = tidyPlot(0, np.max(iVals), 0, 1, #np.max(testVals),
xlabel = 'iteration', ylabel = 'rmse')
plotData(ax, iVals, trainVals, label = 'training error', style = 'bo-')
plotData(ax, iVals, testVals, label = 'testing error', style = 'ro-')
ax.legend(loc="best")
def t9(order = 9):
zeros = np.matrix(np.zeros([order+1, 1]))
goodw = np.matrix(\
[[ 3.49512436e-01, 2.32326298e+02, -5.32086773e+03, 4.85596769e+04,
-2.31598783e+05, 6.39931981e+05, -1.06162004e+06, 1.04222515e+06,
-5.57590377e+05, 1.25180846e+05]]).T
medw = np.matrix(\
[[ 3.0e-01, 2.0e+02, -5.0e+03, 5.0e+04,
-2.0e+05, 6.0e+05, -1.0e+06, 1.0e+06,
-5.0e+05, 1.0e+05]]).T
randw = np.matrix(np.random.randn(order+1,1))
X, y = getCurveData()
xmin, xmax = float(min(X)), float(max(X))
ymin, ymax = float(min(y)), float(max(y))
XTest, yTest = getCurveData(random = 100)
phi = polynomialFeatures(order)
phiD = applyFeatureFun(phi, X)
lr = 0.05
w, scores, vals, iters, failed = sgdLinReg(phiD, y, step_size = lr,
max_iter = 100000,
w0 = randw)
print 'w', w.T, 's', scores[-1], 'iter', len(scores)
iVals = []; trainVals = []; testVals = []
axPred = tidyPlot(xmin, xmax, ymin-1, ymax+1, xlabel = 'x', ylabel = 'y')
for i in range(0, len(vals), 5000):
predictor = makeRegressor(vals[i], phi)
plotFun(axPred, predictor, xmin, xmax, label = 'iters = '+str(i))
iVals.append(i)
trainVals.append(rmse(X, y, predictor))
testVals.append(rmse(XTest, yTest, predictor))
plotData(axPred, X, y)
ax = tidyPlot(0, np.max(iVals), 0, 1, #np.max(testVals),
xlabel = 'iteration', ylabel = 'rmse')
plotData(ax, iVals, trainVals, label = 'training error', style = 'bo-')
plotData(ax, iVals, testVals, label = 'testing error', style = 'ro-')
ax.legend(loc="upper left", bbox_to_anchor=(1,1))
# Blog feedback data, with analytic ridge regression
def t10(n = None,
logLambdaValues = [-14, -12, -10, -8, -6, -4, -2, 0, 2, 4, 5, 6, 7,
8, 9, 10, 12, 14, 16, 18, 20]):
X, y, XTest, yTest = getBlogData(n = n)
print X.shape, y.shape
iVals = []; trainVals = []; testVals = []
for llv in logLambdaValues:
w = olsr(X, y, np.exp(llv))
predictor = lambda x: x * w
iVals.append(llv)
trainVals.append(rmse(X, y, predictor))
testVals.append(rmse(XTest, yTest, predictor))
print llv, trainVals[-1], testVals[-1]
ax = tidyPlot(np.min(iVals), np.max(iVals), 28, 40, #np.max(testVals),
xlabel = 'Log lambda', ylabel = 'rmse',
title = 'Training data size = '+str(n))
plotData(ax, iVals, trainVals, label = 'training error', style = 'bo-')
plotData(ax, iVals, testVals, label = 'testing error', style = 'ro-')
ax.legend(loc="upper left", bbox_to_anchor=(1,1))
# Blog feedback data, with gradient regression.
# Add ridge!
# lambda = 80; rmse = 31.8; mse = 1024
# lambda = ? ; mse = 1015 (seems untrustworthy)
# log lambda = 2.8
# lambda 100, MSE 894
# lr = .000000005 good for whole data set (or divide by 5)
def t11(lr = .000000005, n = None):
X, y, XTest, yTest = getBlogData(n = n)
print X.shape, y.shape
iVals = []; trainVals = []; testVals = []
#w, scores, vals = gdLinReg(X, y, step_size = lr, max_iter = 20000)
w, scores, vals, iters, failed = \
sgdLinReg(X, y,
step_size = lr, max_iter = 2000000)
print 's', scores[-1], 'iter', len(scores)
iVals = []; trainVals = []; testVals = []
for i in range(0, len(vals), 50000):
predictor = lambda x: float(x * vals[i])
iVals.append(i)
trainVals.append(rmse(X, y, predictor))
testVals.append(rmse(XTest, yTest, predictor))
print trainVals
print testVals
ax = tidyPlot(0, np.max(iVals), 0, 400, #np.max(testVals),
xlabel = 'iteration', ylabel = 'rmse')
plotData(ax, iVals, trainVals, label = 'training error', style = 'bo-')
plotData(ax, iVals, testVals, label = 'testing error', style = 'ro-')
ax.legend(loc="upper left", bbox_to_anchor=(1,1))
def tGD(step_sizes = [.01, .1, .2, .3]):
def f(x):
return (2 * x + 3)**2
def df(x):
return 2 * 2 * (2 * x + 3)
x0 = 0
for ss in step_sizes:
x, fs, xs = gd(f, df, x0, step_size = ss)
print 'ss', ss, 'x', x
nax = tidyPlot(-4, 1, 0, 15,
xlabel = 'x', ylabel = 'f(x)',
title = 'step size = '+str(ss), center = True)
plotFun(nax, f, -4, 1)
plotData(nax, xs, fs, style = 'ro-')
def tGD2(x0 = 0):
def f(x):
return (x - 2) * (x - 3) * (x + 3) * (x + 1)
def df(x):
return 9 - (22 * x) - (3 * x**2) + (4 * x**3)
ss = 0.01
x, fs, xs = gd(f, df, x0, step_size = ss)
nax = tidyPlot(-4, 4, -25, 25,
xlabel = 'x', ylabel = 'f(x)',
title = 'step size = '+str(ss), center = True)
plotFun(nax, f, -4, 4)
plotData(nax, xs, fs, style = 'ro-')
def tLogReg(X, y, d = 1, max_iter = 5000, convPlot = False,
quiet = False, stepSize = .01, l = 0):
phi = polynomialFeaturesN(d)
phiD = applyFeatureFun(phi, X)
w, fs, ws = gdLogReg(phiD, y, step_size = stepSize, max_iter = max_iter,
l = l)
print 'nll', fs[-1], 'num iters', len(fs)
print w
eps = .1
xmin = np.min(X[:,0]) - eps; xmax = np.max(X[:,0]) + eps
ymin = np.min(X[:,1]) - eps; ymax = np.max(X[:,1]) + eps
ax = tidyPlot(xmin, xmax, ymin, ymax, xlabel = 'x', ylabel = 'y')
predictor = makeLogisticRegressor(w, phi) # sigmoid
def fizz(xx, yy):
return predictor(np.matrix([[xx, yy]]))
res = 30 # resolution of plot
ima = np.array([[fizz(xi, yi) for xi in np.linspace(xmin, xmax, res)] \
for yi in np.linspace(ymin, ymax, res)])
im = ax.imshow(np.flipud(ima), interpolation = 'none',
extent = [xmin, xmax, ymin, ymax],
cmap = 'viridis')
plt.colorbar(im)
colors = [('r' if l == 0 else 'g') for l in y]
ax.scatter(X[:,0], X[:,1], c = colors, marker = 'o', s=80,
edgecolors = 'none')
if not quiet:
z = s(phiD*w)
print y
print z
if convPlot:
pl = len(fs) #min(500,len(fs))
iters = range(0, pl, 100)
pfs = [fs[i] for i in iters]
nax = tidyPlot(0, pl, 0, max(pfs))
plotData(nax, iters, pfs, style = 'r-')
return w, fs, ws
# No features, will step through as separator moves
def tLogReg1(interactive = False, data2 = True):
X, y = superSimpleSeparable2(True) if data2 else superSimpleSeparable(True)
w, fs, ws = gdLogReg(X, y, step_size = .005, max_iter = 5000)
xmin = np.min(X[:,0]); xmax = np.max(X[:,0])
xmin = -10; xmax = 20
ax = tidyPlot(xmin, xmax, xmin, xmax, xlabel = 'x', ylabel = 'y')
colors = [('r' if l == 0 else 'g') for l in y]
ax.scatter(X[:,1], X[:,2], c = colors, marker = 'o', s=50,
edgecolors = 'none')
for i in range(0, 50, 1):
plotLineABC(ax, ws[i], xmin, xmax)
if interactive:
raw_input('go?')
plotLineABC(ax, w, xmin, xmax)
ax.scatter(X[:,1], X[:,2], c = colors, marker = 'o', s=50,
edgecolors = 'none')
print 'nll', fs[-1], 'num iters', len(fs)
print w
z = s(X*w)
print y
print z
pl = 500 # len(fs)
nax = tidyPlot(0, pl, 0, 5)
iters = range(pl)
plotData(nax, iters, fs[:pl], style = 'r-')
# Now, with polynomial features. Separable and xor
def tLogReg2(d = 1, max_iter = 5000, easy = False, convPlot = False):
X, y = superSimpleSeparable2() if easy else xor()
tLogReg(X, y, d, max_iter, convPlot)
# Now, with polynomial features. More complicated data!
def tLogReg3(d = 1, max_iter = 5000, convPlot = False, stepSize = .01):
X, y = xor_more()
tLogReg(X, y, d, max_iter, convPlot, stepSize = stepSize)
# Noisy xor
def tLogReg4(d, max_iter = 5000, convPlot = False, modeMult = 2.0):
modes = np.array([[1, 1], [1, -1], [-1, -1], [-1, 1]]) * modeMult
X, y = multimodalData(modes)
tLogReg(X, y, d, max_iter, convPlot, quiet = True, stepSize = .0001)
# General data to play with
def tLogReg5(d, modes = None, max_iter = 5000, convPlot = False, numModes = 2):
X, y = multimodalData(modes, numModes = numModes)
tLogReg(X, y, d, max_iter, convPlot, quiet = True,
stepSize = .000001)
print 'Loaded learn.py'
| |
#!/usr/bin/env python3
# The MIT License (MIT)
#
# Copyright (c) 2016 Ivor Wanders
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .interface import SerialInterface
from . import message
import socketserver
import argparse
import threading
import time
import logging
class IR_Control:
def __init__(self, interface, serial_port, baudrate):
self.i = interface
self.serial_port = serial_port
self.baudrate = baudrate
self.log = logging.getLogger("IR_Control")
self.running = True
def stop(self):
self.running = False
# blocks reading from serial port and acting appropriately.
def loop(self):
while(self.running):
if (not self.i.is_serial_connected()):
self.log.error("No serial port!")
self.i.connect(self.serial_port, self.baudrate)
time.sleep(1)
a = self.i.get_message()
if (a):
self.received_serial(a)
else:
time.sleep(0.001)
# processes received messages from serial
def received_serial(self, msg):
# receives messages from the interface.
if (msg.msg_type == msg.type.action_IR_received):
# convert it into a ir_message
ir_code = message.IR(**dict(msg.ir_specification))
self.ir_received(ir_code)
# sends a message over the serial port
def send_serial(self, msg):
self.i.put_message(msg)
# send an IR code with the hardware.
def send_ir(self, ir_code):
self.log.debug("sending ir {}".format(ir_code))
# create the message
msg = message.Msg()
msg.msg_type = msg.type.action_IR_send
try:
msg.ir_specification.from_dict(ir_code.raw())
except TypeError as e:
self.log.error("Conversion failed: {} ".format(str(e)))
self.send_serial(msg)
# This method is called when an IR code is received from the serial port.
def ir_received(self, ir_code):
raise NotImplementedError("Subclass should implement this.")
# This object actually deals with the interaction and configuration file
# it is up to you to change this to suit your needs... or use this and modify
# the configuration file.
class Interactor(IR_Control):
def __init__(self, *args, **kwargs):
super(Interactor, self).__init__(*args, **kwargs)
self.log = logging.getLogger("Interactor")
def load_config(self, conf):
self.ir_by_name = {}
self.ir_by_code = {}
ir_codes = conf.get_codes()
for code in ir_codes:
name = ir_codes[code]
# store lookup for name -> ir_code and ir_code -> name.
self.ir_by_name[name] = code
self.ir_by_code[code.tuple()] = name
# store actions per name.
self.ir_actions = conf.get_actions()
# called when an ir code is received from the serial port.
def ir_received(self, ir_code):
if (ir_code.tuple() in self.ir_by_code):
# if it is in the list, convert to ir_name
ir_name = self.ir_by_code[ir_code.tuple()]
self.log.debug("IR name known: {}".format(ir_name))
# try to perform the action:
self.perform_action(ir_name)
else:
self.log.debug("IR code not known:\n{}".format(
ir_code.config_print()))
# When an IR code is received and we have a name for this, this performs
# the action associated to that name.
def perform_action(self, action_name):
if (action_name not in self.ir_actions):
return
self.log.info("Action found for {}.".format(action_name))
action = self.ir_actions[action_name]
# call the action, with the interactor and action_name argument.
action(self, action_name)
# send an IR code by name.
def send_ir_by_name(self, name):
if name in self.ir_by_name:
self.send_ir(self.ir_by_name[name])
else:
self.log.warn("Tried to send unknown {} ir code".format(name))
# this method is called when something is passed via the TCP socket.
def incoming_external_command(self, cmd):
cmd = str(cmd, 'ascii')
self.log.debug("Incoming command: {}".format(cmd))
self.send_ir_by_name(cmd)
# self.perform_action(cmd)
class TCPCommandHandler(socketserver.StreamRequestHandler):
def handle(self):
data = self.request.recv(1024).strip()
self.server.mcu_manager_.incoming_external_command(data)
self.finish()
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def setManager(self, manager):
self.mcu_manager_ = manager
def start(conf):
parser = argparse.ArgumentParser(description="Control MCU at serial port.")
parser.add_argument('--serial', '-s', help="The serial port to use.",
default="/dev/ttyUSB0")
parser.add_argument('--baudrate', '-r', help="The badurate for the port.",
default=9600, type=int)
parser.add_argument('--verbose', '-v', help="Print all communication.",
action="store_true", default=False)
parser.add_argument('--tcpport', '-p', help="The port used for the tcp"
" socket.",
default=9999)
parser.add_argument('--tcphost', '-b', help="The host/ip on which to bind"
" the tcp socket receiving the IR commands.",
default="127.0.0.1")
# parse the arguments.
args = parser.parse_args()
# start the serial interface
a = SerialInterface(packet_size=message.PACKET_SIZE)
a.connect(serial_port=args.serial, baudrate=args.baudrate)
a.start() # start the interface
# pretty elaborate logging...
logger_interface = logging.getLogger("interface")
logger_IR_control = logging.getLogger("IR_control")
logger_interactor = logging.getLogger("Interactor")
if (args.verbose):
logger_interface.setLevel(logging.DEBUG)
logger_IR_control.setLevel(logging.DEBUG)
logger_interactor.setLevel(logging.DEBUG)
else:
logger_interactor.setLevel(logging.WARN)
logger_interface.setLevel(logging.WARN)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(name)s - %(asctime)s - %(levelname)s'
' - %(message)s')
ch.setFormatter(formatter)
logger_interface.addHandler(ch)
logger_IR_control.addHandler(ch)
logger_interactor.addHandler(ch)
# start the Interactor 'glue' object.
m = Interactor(a, serial_port=args.serial, baudrate=args.baudrate)
m.load_config(conf)
# This is only for the TCP server to facilitate sending IR codes from the
# terminal easily.
server = ThreadedTCPServer((args.tcphost, args.tcpport), TCPCommandHandler)
server.setManager(m)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
# loop the IR_Control object such that the correct actions are performed
try:
m.loop()
except KeyboardInterrupt as e:
m.stop()
a.stop()
logger_IR_control.error("Received interrupt signal, stopping.")
| |
# Copyright 2011,2012,2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A utility module for handling some mundane parts of ARP
"""
"""
TODO
----
arp_responder should be refactored to use this. Also, it should be possible
to have a simple ARP learner which keeps an ARP table without responding...
"""
from pox.core import core
import pox
log = core.getLogger()
from pox.lib.packet.ethernet import ethernet, ETHER_BROADCAST
from pox.lib.packet.arp import arp
from pox.lib.addresses import EthAddr, IPAddr
from pox.lib.util import dpid_to_str, str_to_bool
from pox.lib.revent import EventHalt, Event, EventMixin
import pox.openflow.libopenflow_01 as of
def send_arp_reply (reply_to, mac, src_mac = None, src_ip = None):
# reply_to should be a PacketIn event
arpp = reply_to.parsed.find('arp')
mac = EthAddr(mac)
if src_mac is None:
src_mac = mac
else:
src_mac = EthAddr(src_mac)
r = arp()
r.opcode = r.REPLY
r.hwdst = arpp.hwsrc
r.protodst = arpp.protosrc
r.hwsrc = EthAddr(src_mac)
r.protosrc = IPAddr("0.0.0.0") if src_ip is None else IPAddr(src_ip)
e = ethernet(type=ethernet.ARP_TYPE, src=src_mac, dst=r.hwdst)
e.payload = r
msg = of.ofp_packet_out()
msg.data = e.pack()
msg.actions.append(of.ofp_action_output(port = reply_to.port))
msg.in_port = of.OFPP_NONE
reply_to.connection.send(msg)
def send_arp_request (connection, ip, port = of.OFPP_FLOOD,
src_mac = None, src_ip = None):
if src_mac is None:
src_mac = _dpid_to_mac(connection.dpid)
else:
src_mac = EthAddr(src_mac)
r = arp()
r.opcode = r.REQUEST
r.hwdst = ETHER_BROADCAST
r.protodst = IPAddr(ip)
r.hwsrc = src_mac
r.protosrc = IPAddr("0.0.0.0") if src_ip is None else IPAddr(src_ip)
e = ethernet(type=ethernet.ARP_TYPE, src=src_mac, dst=r.hwdst)
e.payload = r
msg = of.ofp_packet_out()
msg.data = e.pack()
msg.actions.append(of.ofp_action_output(port = port))
msg.in_port = of.OFPP_NONE
connection.send(msg)
def _dpid_to_mac (dpid):
# Should maybe look at internal port MAC instead?
return EthAddr("%012x" % (dpid & 0xffFFffFFffFF,))
class ARPRequest (Event):
@property
def dpid (self):
return self.connection.dpid
def __str__ (self):
return "ARPRequest for %s on %s" % (self.ip, dpid_to_str(self.dpid))
def __init__ (self, con, arpp, reply_from, eat_packet, port):
super(ARPRequest,self).__init__()
self.connection = con
self.request = arpp # ARP packet
self.reply_from = reply_from # MAC
self.eat_packet = eat_packet
self.port = port
self.ip = arpp.protosrc
self.reply = None # Set to desired EthAddr
class ARPReply (Event):
@property
def dpid (self):
return self.connection.dpid
def __str__ (self):
return "ARPReply for %s on %s" % (self.reply.protodst,
dpid_to_str(self.dpid))
def __init__ (self, con, arpp, eat_packet, port):
super(ARPReply,self).__init__()
self.connection = con
self.reply = arpp
self.eat_packet = eat_packet
self.port = port
class ARPHelper (EventMixin):
_eventMixin_events = set([ARPRequest,ARPReply])
_rule_priority = 0x7000 # Pretty high
def __init__ (self, no_flow, eat_packets):
core.addListeners(self)
self._install_flow = not no_flow
self.eat_packets = eat_packets
def send_arp_request (self, *args, **kw):
return send_arp_request(*args, **kw)
def send_arp_reply (self, *args, **kw):
return send_arp_reply(*args, **kw)
def _handle_GoingUpEvent (self, event):
core.openflow.addListeners(self)
log.debug("Up...")
def _handle_ConnectionUp (self, event):
if self._install_flow:
fm = of.ofp_flow_mod()
fm.priority = self._rule_priority
fm.match.dl_type = ethernet.ARP_TYPE
fm.actions.append(of.ofp_action_output(port=of.OFPP_CONTROLLER))
event.connection.send(fm)
def _handle_PacketIn (self, event):
dpid = event.connection.dpid
inport = event.port
packet = event.parsed
a = packet.find('arp')
if not a: return
if a.prototype != arp.PROTO_TYPE_IP:
return
if a.hwtype != arp.HW_TYPE_ETHERNET:
return
if a.opcode == arp.REQUEST:
log.debug("%s ARP request %s => %s", dpid_to_str(dpid),
a.protosrc, a.protodst)
ev = ARPRequest(event.connection,a,_dpid_to_mac(dpid),self.eat_packets,
inport)
self.raiseEvent(ev)
if ev.reply is not None:
r = arp()
r.hwtype = a.hwtype
r.prototype = a.prototype
r.hwlen = a.hwlen
r.protolen = a.protolen
r.opcode = arp.REPLY
r.hwdst = a.hwsrc
r.protodst = a.protosrc
r.protosrc = a.protodst
r.hwsrc = EthAddr(ev.reply)
e = ethernet(type=packet.type, src=ev.reply_from, dst=a.hwsrc)
e.payload = r
log.debug("%s answering ARP for %s" % (dpid_to_str(dpid),
str(r.protosrc)))
msg = of.ofp_packet_out()
msg.data = e.pack()
msg.actions.append(of.ofp_action_output(port =
of.OFPP_IN_PORT))
msg.in_port = inport
event.connection.send(msg)
return EventHalt if ev.eat_packet else None
elif a.opcode == arp.REPLY:
log.debug("%s ARP reply %s => %s", dpid_to_str(dpid),
a.protosrc, a.hwsrc)
ev = ARPReply(event.connection,a,self.eat_packets,inport)
self.raiseEvent(ev)
return EventHalt if ev.eat_packet else None
return EventHalt if self.eat_packets else None
def launch (no_flow=False, eat_packets=True):
core.registerNew(ARPHelper, str_to_bool(no_flow), str_to_bool(eat_packets))
| |
"""
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import argparse
import datetime
import errno
import logging
import os
import sys
import cleanup
import closure
from preprocess import Preprocess
def main(args):
"""Preprocess a font for use as a TachyFont.
Args:
args: list, command line arguments.
Raises:
ValueError: if build directory cannot be created
Returns:
Status of the operation.
"""
parser = argparse.ArgumentParser(prog='pyprepfnt')
parser.add_argument('fontfile', help='Input font file')
parser.add_argument('output_dir', help='Output directory')
parser.add_argument('--force', default=False, action='store_true',
help='Force preprocessing even if the timestamps indicate'
' it is not necessary')
parser.add_argument('--hinting', default=False, action='store_true',
help='Retain hinting if set, else strip hinting')
parser.add_argument('--reuse_clean', default=False, action='store_true',
help='Reuse the "clean" file if possible')
parser.add_argument('--log', default='WARNING',
help='Set the logging level; eg, --log=INFO')
parser.add_argument('--verbose', default=False, action='store_true',
help='Report internal operations')
cmd_args = parser.parse_args(args)
loglevel = getattr(logging, cmd_args.log.upper(), None)
if not isinstance(loglevel, int):
raise ValueError('Invalid log level: %s' % loglevel)
log = logging.getLogger()
logging_handler = logging.StreamHandler(sys.stdout)
logging_handler.setLevel(loglevel)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
logging_handler.setFormatter(formatter)
log.addHandler(logging_handler)
log.setLevel(loglevel)
verbose = cmd_args.verbose
force_preprocessing = cmd_args.force
log.debug('force_preprocessing = ' + str(force_preprocessing))
fontfile = cmd_args.fontfile
fonttime = os.path.getmtime(fontfile)
# TODO(bstell) use Logger
basename = os.path.basename(fontfile)
log.info('preprocess %s = %d bytes' % (cmd_args.fontfile,
os.path.getsize(cmd_args.fontfile)))
filename, extension = os.path.splitext(basename)
cur_time = datetime.datetime.now()
build_dir = 'tmp-%s' % filename
if not cmd_args.reuse_clean:
build_dir = ('%s-%04d-%02d-%02d-%02d-%02d-%02d.%d' %
(build_dir, cur_time.year, cur_time.month, cur_time.day,
cur_time.hour, cur_time.minute, cur_time.second, os.getpid()))
output_dir = cmd_args.output_dir
log.debug('TAR file: ' + output_dir)
try:
os.makedirs(build_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
log.error('failed to create build_dir (' + build_dir + ')')
raise
log.debug('if reuse_clean then we should compare the source font and final tar')
cleanfile = filename + '_clean' + extension
cleanfilepath = build_dir + '/' + cleanfile
# Decide if we are building the cleaned up version of the font.
rebuild_clean = not cmd_args.reuse_clean
cleanfile_exists = os.path.isfile(cleanfilepath)
if force_preprocessing or not cleanfile_exists:
rebuild_clean = True
else:
cleantime = os.path.getmtime(cleanfilepath)
if cleantime <= fonttime:
rebuild_clean = True
log.debug('rebuild_clean = ' + str(rebuild_clean))
if rebuild_clean:
log.debug('cleaned version: ' + cleanfilepath)
cleanup.cleanup(fontfile, cmd_args.hinting, cleanfilepath, verbose)
closure.dump_closure_map(cleanfilepath, build_dir)
else:
log.debug('reuse cleaned up version: ' + cleanfilepath)
# Get the latest cleaned up font timestamp.
cleantime = os.path.getmtime(cleanfilepath)
# Decide if we are rebuilding the tar file.
tachyfont_file = filename + '.TachyFont.tar'
tarfilepath = build_dir + '/' + tachyfont_file
rebuild_tar = False
tarfile_exists = os.path.isfile(tarfilepath)
log.debug('file %s exists: %s' % (tarfilepath, tarfile_exists))
if force_preprocessing or not tarfile_exists:
rebuild_tar = True
else:
tartime = os.path.getmtime(tarfilepath)
if tartime <= cleantime:
rebuild_tar = True
log.debug('rebuild_tar = ' + str(rebuild_tar))
if rebuild_tar:
log.debug('start proprocess')
preprocess = Preprocess(cleanfilepath, build_dir, verbose)
log.debug('build base')
preprocess.base_font()
log.debug('dump cmap')
preprocess.cmap_dump()
log.debug('build glyph data')
preprocess.serial_glyphs()
log.debug('write sha-1 fingerprint')
preprocess.sha1_fingerprint()
log.debug('create tar file')
sub_files = ('base closure_data closure_idx codepoints gids glyph_data '
'glyph_table sha1_fingerprint')
tar_cmd = 'cd %s; tar cf %s %s' % (build_dir, tachyfont_file, sub_files)
log.debug('tar_cmd: ' + tar_cmd)
status = os.system(tar_cmd)
log.debug('tar command status: ' + str(status))
if status:
log.error('tar command status: ' + str(status))
return status
else:
log.debug('no need to rebuild intermediate tar file: ' + tarfilepath)
# Get the latest cleaned up tar timestamp.
tartime = os.path.getmtime(tarfilepath)
# Decide if we are copying over the tar file.
copy_tar = False
tarcopy_filepath = output_dir + '/' + tachyfont_file
tarcopy_exists = os.path.isfile(tarcopy_filepath)
if force_preprocessing or not tarcopy_exists:
copy_tar = True
else:
tarcopytime = os.path.getmtime(tarcopy_filepath)
if tarcopytime <= tartime:
copy_tar = True
log.debug('copy_tar = ' + str(copy_tar))
if copy_tar:
log.debug('cp the files to the output directory')
log.info('cleaned: %s = %d' % (cleanfile, os.path.getsize(cleanfilepath)))
log.info('Tar: %s/%s' % (output_dir, tachyfont_file))
cp_cmd = ('cp %s/%s %s/%s %s' %
(build_dir, tachyfont_file, build_dir, cleanfile, output_dir))
log.debug('cp_cmd: ' + cp_cmd)
status = os.system(cp_cmd)
log.debug('cp status ' + str(status))
if status:
log.error('cp status = ' + str(status))
return status
else:
log.debug('the existing tar file is up to date: ' + tarfilepath)
if cmd_args.reuse_clean:
log.debug('leaving the build directory: ' + build_dir)
status = 0
else:
log.debug('cleanup the build directory')
rm_cmd = ('rm -rf %s' % build_dir)
log.debug('rm_cmd: ' + rm_cmd)
status = os.system(rm_cmd)
log.debug('rm status ' + str(status))
if status:
log.error('rm status = ' + str(status))
return status
log.debug('command status = ' + str(status))
if status != 0:
log.info('preprocessing FAILED')
return status
if __name__ == '__main__':
cmd_status = main(sys.argv[1:])
sys.exit(cmd_status)
| |
"""Support for GTFS (Google/General Transport Format Schema)."""
from __future__ import annotations
import datetime
import logging
import os
import threading
from typing import Any, Callable
import pygtfs
from sqlalchemy.sql import text
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA as SENSOR_PLATFORM_SCHEMA,
SensorEntity,
)
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_NAME,
CONF_OFFSET,
DEVICE_CLASS_TIMESTAMP,
STATE_UNKNOWN,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_ARRIVAL = "arrival"
ATTR_BICYCLE = "trip_bikes_allowed_state"
ATTR_DAY = "day"
ATTR_FIRST = "first"
ATTR_DROP_OFF_DESTINATION = "destination_stop_drop_off_type_state"
ATTR_DROP_OFF_ORIGIN = "origin_stop_drop_off_type_state"
ATTR_INFO = "info"
ATTR_OFFSET = CONF_OFFSET
ATTR_LAST = "last"
ATTR_LOCATION_DESTINATION = "destination_station_location_type_name"
ATTR_LOCATION_ORIGIN = "origin_station_location_type_name"
ATTR_PICKUP_DESTINATION = "destination_stop_pickup_type_state"
ATTR_PICKUP_ORIGIN = "origin_stop_pickup_type_state"
ATTR_ROUTE_TYPE = "route_type_name"
ATTR_TIMEPOINT_DESTINATION = "destination_stop_timepoint_exact"
ATTR_TIMEPOINT_ORIGIN = "origin_stop_timepoint_exact"
ATTR_WHEELCHAIR = "trip_wheelchair_access_available"
ATTR_WHEELCHAIR_DESTINATION = "destination_station_wheelchair_boarding_available"
ATTR_WHEELCHAIR_ORIGIN = "origin_station_wheelchair_boarding_available"
CONF_DATA = "data"
CONF_DESTINATION = "destination"
CONF_ORIGIN = "origin"
CONF_TOMORROW = "include_tomorrow"
DEFAULT_NAME = "GTFS Sensor"
DEFAULT_PATH = "gtfs"
BICYCLE_ALLOWED_DEFAULT = STATE_UNKNOWN
BICYCLE_ALLOWED_OPTIONS = {1: True, 2: False}
DROP_OFF_TYPE_DEFAULT = STATE_UNKNOWN
DROP_OFF_TYPE_OPTIONS = {
0: "Regular",
1: "Not Available",
2: "Call Agency",
3: "Contact Driver",
}
ICON = "mdi:train"
ICONS = {
0: "mdi:tram",
1: "mdi:subway",
2: "mdi:train",
3: "mdi:bus",
4: "mdi:ferry",
5: "mdi:train-variant",
6: "mdi:gondola",
7: "mdi:stairs",
100: "mdi:train",
101: "mdi:train",
102: "mdi:train",
103: "mdi:train",
104: "mdi:train-car",
105: "mdi:train",
106: "mdi:train",
107: "mdi:train",
108: "mdi:train",
109: "mdi:train",
110: "mdi:train-variant",
111: "mdi:train-variant",
112: "mdi:train-variant",
113: "mdi:train-variant",
114: "mdi:train-variant",
115: "mdi:train-variant",
116: "mdi:train-variant",
117: "mdi:train-variant",
200: "mdi:bus",
201: "mdi:bus",
202: "mdi:bus",
203: "mdi:bus",
204: "mdi:bus",
205: "mdi:bus",
206: "mdi:bus",
207: "mdi:bus",
208: "mdi:bus",
209: "mdi:bus",
400: "mdi:subway-variant",
401: "mdi:subway-variant",
402: "mdi:subway",
403: "mdi:subway-variant",
404: "mdi:subway-variant",
405: "mdi:subway-variant",
700: "mdi:bus",
701: "mdi:bus",
702: "mdi:bus",
703: "mdi:bus",
704: "mdi:bus",
705: "mdi:bus",
706: "mdi:bus",
707: "mdi:bus",
708: "mdi:bus",
709: "mdi:bus",
710: "mdi:bus",
711: "mdi:bus",
712: "mdi:bus-school",
713: "mdi:bus-school",
714: "mdi:bus",
715: "mdi:bus",
716: "mdi:bus",
800: "mdi:bus",
900: "mdi:tram",
901: "mdi:tram",
902: "mdi:tram",
903: "mdi:tram",
904: "mdi:tram",
905: "mdi:tram",
906: "mdi:tram",
1000: "mdi:ferry",
1100: "mdi:airplane",
1200: "mdi:ferry",
1300: "mdi:airplane",
1400: "mdi:gondola",
1500: "mdi:taxi",
1501: "mdi:taxi",
1502: "mdi:ferry",
1503: "mdi:train-variant",
1504: "mdi:bicycle-basket",
1505: "mdi:taxi",
1506: "mdi:car-multiple",
1507: "mdi:taxi",
1700: "mdi:train-car",
1702: "mdi:horse-variant",
}
LOCATION_TYPE_DEFAULT = "Stop"
LOCATION_TYPE_OPTIONS = {
0: "Station",
1: "Stop",
2: "Station Entrance/Exit",
3: "Other",
}
PICKUP_TYPE_DEFAULT = STATE_UNKNOWN
PICKUP_TYPE_OPTIONS = {
0: "Regular",
1: "None Available",
2: "Call Agency",
3: "Contact Driver",
}
ROUTE_TYPE_OPTIONS = {
0: "Tram",
1: "Subway",
2: "Rail",
3: "Bus",
4: "Ferry",
5: "Cable Tram",
6: "Aerial Lift",
7: "Funicular",
100: "Railway Service",
101: "High Speed Rail Service",
102: "Long Distance Trains",
103: "Inter Regional Rail Service",
104: "Car Transport Rail Service",
105: "Sleeper Rail Service",
106: "Regional Rail Service",
107: "Tourist Railway Service",
108: "Rail Shuttle (Within Complex)",
109: "Suburban Railway",
110: "Replacement Rail Service",
111: "Special Rail Service",
112: "Lorry Transport Rail Service",
113: "All Rail Services",
114: "Cross-Country Rail Service",
115: "Vehicle Transport Rail Service",
116: "Rack and Pinion Railway",
117: "Additional Rail Service",
200: "Coach Service",
201: "International Coach Service",
202: "National Coach Service",
203: "Shuttle Coach Service",
204: "Regional Coach Service",
205: "Special Coach Service",
206: "Sightseeing Coach Service",
207: "Tourist Coach Service",
208: "Commuter Coach Service",
209: "All Coach Services",
400: "Urban Railway Service",
401: "Metro Service",
402: "Underground Service",
403: "Urban Railway Service",
404: "All Urban Railway Services",
405: "Monorail",
700: "Bus Service",
701: "Regional Bus Service",
702: "Express Bus Service",
703: "Stopping Bus Service",
704: "Local Bus Service",
705: "Night Bus Service",
706: "Post Bus Service",
707: "Special Needs Bus",
708: "Mobility Bus Service",
709: "Mobility Bus for Registered Disabled",
710: "Sightseeing Bus",
711: "Shuttle Bus",
712: "School Bus",
713: "School and Public Service Bus",
714: "Rail Replacement Bus Service",
715: "Demand and Response Bus Service",
716: "All Bus Services",
800: "Trolleybus Service",
900: "Tram Service",
901: "City Tram Service",
902: "Local Tram Service",
903: "Regional Tram Service",
904: "Sightseeing Tram Service",
905: "Shuttle Tram Service",
906: "All Tram Services",
1000: "Water Transport Service",
1100: "Air Service",
1200: "Ferry Service",
1300: "Aerial Lift Service",
1400: "Funicular Service",
1500: "Taxi Service",
1501: "Communal Taxi Service",
1502: "Water Taxi Service",
1503: "Rail Taxi Service",
1504: "Bike Taxi Service",
1505: "Licensed Taxi Service",
1506: "Private Hire Service Vehicle",
1507: "All Taxi Services",
1700: "Miscellaneous Service",
1702: "Horse-drawn Carriage",
}
TIMEPOINT_DEFAULT = True
TIMEPOINT_OPTIONS = {0: False, 1: True}
WHEELCHAIR_ACCESS_DEFAULT = STATE_UNKNOWN
WHEELCHAIR_ACCESS_OPTIONS = {1: True, 2: False}
WHEELCHAIR_BOARDING_DEFAULT = STATE_UNKNOWN
WHEELCHAIR_BOARDING_OPTIONS = {1: True, 2: False}
PLATFORM_SCHEMA = SENSOR_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ORIGIN): cv.string,
vol.Required(CONF_DESTINATION): cv.string,
vol.Required(CONF_DATA): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_OFFSET, default=0): cv.time_period,
vol.Optional(CONF_TOMORROW, default=False): cv.boolean,
}
)
def get_next_departure(
schedule: Any,
start_station_id: Any,
end_station_id: Any,
offset: cv.time_period,
include_tomorrow: bool = False,
) -> dict:
"""Get the next departure for the given schedule."""
now = dt_util.now().replace(tzinfo=None) + offset
now_date = now.strftime(dt_util.DATE_STR_FORMAT)
yesterday = now - datetime.timedelta(days=1)
yesterday_date = yesterday.strftime(dt_util.DATE_STR_FORMAT)
tomorrow = now + datetime.timedelta(days=1)
tomorrow_date = tomorrow.strftime(dt_util.DATE_STR_FORMAT)
# Fetch all departures for yesterday, today and optionally tomorrow,
# up to an overkill maximum in case of a departure every minute for those
# days.
limit = 24 * 60 * 60 * 2
tomorrow_select = tomorrow_where = tomorrow_order = ""
if include_tomorrow:
limit = int(limit / 2 * 3)
tomorrow_name = tomorrow.strftime("%A").lower()
tomorrow_select = f"calendar.{tomorrow_name} AS tomorrow,"
tomorrow_where = f"OR calendar.{tomorrow_name} = 1"
tomorrow_order = f"calendar.{tomorrow_name} DESC,"
sql_query = f"""
SELECT trip.trip_id, trip.route_id,
time(origin_stop_time.arrival_time) AS origin_arrival_time,
time(origin_stop_time.departure_time) AS origin_depart_time,
date(origin_stop_time.departure_time) AS origin_depart_date,
origin_stop_time.drop_off_type AS origin_drop_off_type,
origin_stop_time.pickup_type AS origin_pickup_type,
origin_stop_time.shape_dist_traveled AS origin_dist_traveled,
origin_stop_time.stop_headsign AS origin_stop_headsign,
origin_stop_time.stop_sequence AS origin_stop_sequence,
origin_stop_time.timepoint AS origin_stop_timepoint,
time(destination_stop_time.arrival_time) AS dest_arrival_time,
time(destination_stop_time.departure_time) AS dest_depart_time,
destination_stop_time.drop_off_type AS dest_drop_off_type,
destination_stop_time.pickup_type AS dest_pickup_type,
destination_stop_time.shape_dist_traveled AS dest_dist_traveled,
destination_stop_time.stop_headsign AS dest_stop_headsign,
destination_stop_time.stop_sequence AS dest_stop_sequence,
destination_stop_time.timepoint AS dest_stop_timepoint,
calendar.{yesterday.strftime("%A").lower()} AS yesterday,
calendar.{now.strftime("%A").lower()} AS today,
{tomorrow_select}
calendar.start_date AS start_date,
calendar.end_date AS end_date
FROM trips trip
INNER JOIN calendar calendar
ON trip.service_id = calendar.service_id
INNER JOIN stop_times origin_stop_time
ON trip.trip_id = origin_stop_time.trip_id
INNER JOIN stops start_station
ON origin_stop_time.stop_id = start_station.stop_id
INNER JOIN stop_times destination_stop_time
ON trip.trip_id = destination_stop_time.trip_id
INNER JOIN stops end_station
ON destination_stop_time.stop_id = end_station.stop_id
WHERE (calendar.{yesterday.strftime("%A").lower()} = 1
OR calendar.{now.strftime("%A").lower()} = 1
{tomorrow_where}
)
AND start_station.stop_id = :origin_station_id
AND end_station.stop_id = :end_station_id
AND origin_stop_sequence < dest_stop_sequence
AND calendar.start_date <= :today
AND calendar.end_date >= :today
ORDER BY calendar.{yesterday.strftime("%A").lower()} DESC,
calendar.{now.strftime("%A").lower()} DESC,
{tomorrow_order}
origin_stop_time.departure_time
LIMIT :limit
"""
result = schedule.engine.execute(
text(sql_query),
origin_station_id=start_station_id,
end_station_id=end_station_id,
today=now_date,
limit=limit,
)
# Create lookup timetable for today and possibly tomorrow, taking into
# account any departures from yesterday scheduled after midnight,
# as long as all departures are within the calendar date range.
timetable = {}
yesterday_start = today_start = tomorrow_start = None
yesterday_last = today_last = ""
for row in result:
if row["yesterday"] == 1 and yesterday_date >= row["start_date"]:
extras = {"day": "yesterday", "first": None, "last": False}
if yesterday_start is None:
yesterday_start = row["origin_depart_date"]
if yesterday_start != row["origin_depart_date"]:
idx = f"{now_date} {row['origin_depart_time']}"
timetable[idx] = {**row, **extras}
yesterday_last = idx
if row["today"] == 1:
extras = {"day": "today", "first": False, "last": False}
if today_start is None:
today_start = row["origin_depart_date"]
extras["first"] = True
if today_start == row["origin_depart_date"]:
idx_prefix = now_date
else:
idx_prefix = tomorrow_date
idx = f"{idx_prefix} {row['origin_depart_time']}"
timetable[idx] = {**row, **extras}
today_last = idx
if (
"tomorrow" in row
and row["tomorrow"] == 1
and tomorrow_date <= row["end_date"]
):
extras = {"day": "tomorrow", "first": False, "last": None}
if tomorrow_start is None:
tomorrow_start = row["origin_depart_date"]
extras["first"] = True
if tomorrow_start == row["origin_depart_date"]:
idx = f"{tomorrow_date} {row['origin_depart_time']}"
timetable[idx] = {**row, **extras}
# Flag last departures.
for idx in filter(None, [yesterday_last, today_last]):
timetable[idx]["last"] = True
_LOGGER.debug("Timetable: %s", sorted(timetable.keys()))
item = {}
for key in sorted(timetable.keys()):
if dt_util.parse_datetime(key) > now:
item = timetable[key]
_LOGGER.debug(
"Departure found for station %s @ %s -> %s", start_station_id, key, item
)
break
if item == {}:
return {}
# Format arrival and departure dates and times, accounting for the
# possibility of times crossing over midnight.
origin_arrival = now
if item["origin_arrival_time"] > item["origin_depart_time"]:
origin_arrival -= datetime.timedelta(days=1)
origin_arrival_time = (
f"{origin_arrival.strftime(dt_util.DATE_STR_FORMAT)} "
f"{item['origin_arrival_time']}"
)
origin_depart_time = f"{now_date} {item['origin_depart_time']}"
dest_arrival = now
if item["dest_arrival_time"] < item["origin_depart_time"]:
dest_arrival += datetime.timedelta(days=1)
dest_arrival_time = (
f"{dest_arrival.strftime(dt_util.DATE_STR_FORMAT)} "
f"{item['dest_arrival_time']}"
)
dest_depart = dest_arrival
if item["dest_depart_time"] < item["dest_arrival_time"]:
dest_depart += datetime.timedelta(days=1)
dest_depart_time = (
f"{dest_depart.strftime(dt_util.DATE_STR_FORMAT)} "
f"{item['dest_depart_time']}"
)
depart_time = dt_util.parse_datetime(origin_depart_time)
arrival_time = dt_util.parse_datetime(dest_arrival_time)
origin_stop_time = {
"Arrival Time": origin_arrival_time,
"Departure Time": origin_depart_time,
"Drop Off Type": item["origin_drop_off_type"],
"Pickup Type": item["origin_pickup_type"],
"Shape Dist Traveled": item["origin_dist_traveled"],
"Headsign": item["origin_stop_headsign"],
"Sequence": item["origin_stop_sequence"],
"Timepoint": item["origin_stop_timepoint"],
}
destination_stop_time = {
"Arrival Time": dest_arrival_time,
"Departure Time": dest_depart_time,
"Drop Off Type": item["dest_drop_off_type"],
"Pickup Type": item["dest_pickup_type"],
"Shape Dist Traveled": item["dest_dist_traveled"],
"Headsign": item["dest_stop_headsign"],
"Sequence": item["dest_stop_sequence"],
"Timepoint": item["dest_stop_timepoint"],
}
return {
"trip_id": item["trip_id"],
"route_id": item["route_id"],
"day": item["day"],
"first": item["first"],
"last": item["last"],
"departure_time": depart_time,
"arrival_time": arrival_time,
"origin_stop_time": origin_stop_time,
"destination_stop_time": destination_stop_time,
}
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: Callable[[list], None],
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the GTFS sensor."""
gtfs_dir = hass.config.path(DEFAULT_PATH)
data = config[CONF_DATA]
origin = config.get(CONF_ORIGIN)
destination = config.get(CONF_DESTINATION)
name = config.get(CONF_NAME)
offset: datetime.timedelta = config[CONF_OFFSET]
include_tomorrow = config[CONF_TOMORROW]
if not os.path.exists(gtfs_dir):
os.makedirs(gtfs_dir)
if not os.path.exists(os.path.join(gtfs_dir, data)):
_LOGGER.error("The given GTFS data file/folder was not found")
return
(gtfs_root, _) = os.path.splitext(data)
sqlite_file = f"{gtfs_root}.sqlite?check_same_thread=False"
joined_path = os.path.join(gtfs_dir, sqlite_file)
gtfs = pygtfs.Schedule(joined_path)
# pylint: disable=no-member
if not gtfs.feeds:
pygtfs.append_feed(gtfs, os.path.join(gtfs_dir, data))
add_entities(
[GTFSDepartureSensor(gtfs, name, origin, destination, offset, include_tomorrow)]
)
class GTFSDepartureSensor(SensorEntity):
"""Implementation of a GTFS departure sensor."""
_attr_device_class = DEVICE_CLASS_TIMESTAMP
def __init__(
self,
gtfs: Any,
name: Any | None,
origin: Any,
destination: Any,
offset: datetime.timedelta,
include_tomorrow: bool,
) -> None:
"""Initialize the sensor."""
self._pygtfs = gtfs
self.origin = origin
self.destination = destination
self._include_tomorrow = include_tomorrow
self._offset = offset
self._custom_name = name
self._available = False
self._icon = ICON
self._name = ""
self._state: str | None = None
self._attributes: dict[str, Any] = {}
self._agency = None
self._departure: dict[str, Any] = {}
self._destination = None
self._origin = None
self._route = None
self._trip = None
self.lock = threading.Lock()
self.update()
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def native_value(self) -> str | None:
"""Return the state of the sensor."""
return self._state
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def extra_state_attributes(self) -> dict:
"""Return the state attributes."""
return self._attributes
@property
def icon(self) -> str:
"""Icon to use in the frontend, if any."""
return self._icon
def update(self) -> None:
"""Get the latest data from GTFS and update the states."""
with self.lock:
# Fetch valid stop information once
if not self._origin:
stops = self._pygtfs.stops_by_id(self.origin)
if not stops:
self._available = False
_LOGGER.warning("Origin stop ID %s not found", self.origin)
return
self._origin = stops[0]
if not self._destination:
stops = self._pygtfs.stops_by_id(self.destination)
if not stops:
self._available = False
_LOGGER.warning(
"Destination stop ID %s not found", self.destination
)
return
self._destination = stops[0]
self._available = True
# Fetch next departure
self._departure = get_next_departure(
self._pygtfs,
self.origin,
self.destination,
self._offset,
self._include_tomorrow,
)
# Define the state as a UTC timestamp with ISO 8601 format
if not self._departure:
self._state = None
else:
self._state = dt_util.as_utc(
self._departure["departure_time"]
).isoformat()
# Fetch trip and route details once, unless updated
if not self._departure:
self._trip = None
else:
trip_id = self._departure["trip_id"]
if not self._trip or self._trip.trip_id != trip_id:
_LOGGER.debug("Fetching trip details for %s", trip_id)
self._trip = self._pygtfs.trips_by_id(trip_id)[0]
route_id = self._departure["route_id"]
if not self._route or self._route.route_id != route_id:
_LOGGER.debug("Fetching route details for %s", route_id)
self._route = self._pygtfs.routes_by_id(route_id)[0]
# Fetch agency details exactly once
if self._agency is None and self._route:
_LOGGER.debug("Fetching agency details for %s", self._route.agency_id)
try:
self._agency = self._pygtfs.agencies_by_id(self._route.agency_id)[0]
except IndexError:
_LOGGER.warning(
"Agency ID '%s' was not found in agency table, "
"you may want to update the routes database table "
"to fix this missing reference",
self._route.agency_id,
)
self._agency = False
# Assign attributes, icon and name
self.update_attributes()
if self._route:
self._icon = ICONS.get(self._route.route_type, ICON)
else:
self._icon = ICON
name = (
f"{getattr(self._agency, 'agency_name', DEFAULT_NAME)} "
f"{self.origin} to {self.destination} next departure"
)
if not self._departure:
name = f"{DEFAULT_NAME}"
self._name = self._custom_name or name
def update_attributes(self) -> None:
"""Update state attributes."""
# Add departure information
if self._departure:
self._attributes[ATTR_ARRIVAL] = dt_util.as_utc(
self._departure["arrival_time"]
).isoformat()
self._attributes[ATTR_DAY] = self._departure["day"]
if self._departure[ATTR_FIRST] is not None:
self._attributes[ATTR_FIRST] = self._departure["first"]
elif ATTR_FIRST in self._attributes:
del self._attributes[ATTR_FIRST]
if self._departure[ATTR_LAST] is not None:
self._attributes[ATTR_LAST] = self._departure["last"]
elif ATTR_LAST in self._attributes:
del self._attributes[ATTR_LAST]
else:
if ATTR_ARRIVAL in self._attributes:
del self._attributes[ATTR_ARRIVAL]
if ATTR_DAY in self._attributes:
del self._attributes[ATTR_DAY]
if ATTR_FIRST in self._attributes:
del self._attributes[ATTR_FIRST]
if ATTR_LAST in self._attributes:
del self._attributes[ATTR_LAST]
# Add contextual information
self._attributes[ATTR_OFFSET] = self._offset.total_seconds() / 60
if self._state is None:
self._attributes[ATTR_INFO] = (
"No more departures"
if self._include_tomorrow
else "No more departures today"
)
elif ATTR_INFO in self._attributes:
del self._attributes[ATTR_INFO]
if self._agency:
self._attributes[ATTR_ATTRIBUTION] = self._agency.agency_name
elif ATTR_ATTRIBUTION in self._attributes:
del self._attributes[ATTR_ATTRIBUTION]
# Add extra metadata
key = "agency_id"
if self._agency and key not in self._attributes:
self.append_keys(self.dict_for_table(self._agency), "Agency")
key = "origin_station_stop_id"
if self._origin and key not in self._attributes:
self.append_keys(self.dict_for_table(self._origin), "Origin Station")
self._attributes[ATTR_LOCATION_ORIGIN] = LOCATION_TYPE_OPTIONS.get(
self._origin.location_type, LOCATION_TYPE_DEFAULT
)
self._attributes[ATTR_WHEELCHAIR_ORIGIN] = WHEELCHAIR_BOARDING_OPTIONS.get(
self._origin.wheelchair_boarding, WHEELCHAIR_BOARDING_DEFAULT
)
key = "destination_station_stop_id"
if self._destination and key not in self._attributes:
self.append_keys(
self.dict_for_table(self._destination), "Destination Station"
)
self._attributes[ATTR_LOCATION_DESTINATION] = LOCATION_TYPE_OPTIONS.get(
self._destination.location_type, LOCATION_TYPE_DEFAULT
)
self._attributes[
ATTR_WHEELCHAIR_DESTINATION
] = WHEELCHAIR_BOARDING_OPTIONS.get(
self._destination.wheelchair_boarding, WHEELCHAIR_BOARDING_DEFAULT
)
# Manage Route metadata
key = "route_id"
if not self._route and key in self._attributes:
self.remove_keys("Route")
elif self._route and (
key not in self._attributes or self._attributes[key] != self._route.route_id
):
self.append_keys(self.dict_for_table(self._route), "Route")
self._attributes[ATTR_ROUTE_TYPE] = ROUTE_TYPE_OPTIONS[
self._route.route_type
]
# Manage Trip metadata
key = "trip_id"
if not self._trip and key in self._attributes:
self.remove_keys("Trip")
elif self._trip and (
key not in self._attributes or self._attributes[key] != self._trip.trip_id
):
self.append_keys(self.dict_for_table(self._trip), "Trip")
self._attributes[ATTR_BICYCLE] = BICYCLE_ALLOWED_OPTIONS.get(
self._trip.bikes_allowed, BICYCLE_ALLOWED_DEFAULT
)
self._attributes[ATTR_WHEELCHAIR] = WHEELCHAIR_ACCESS_OPTIONS.get(
self._trip.wheelchair_accessible, WHEELCHAIR_ACCESS_DEFAULT
)
# Manage Stop Times metadata
prefix = "origin_stop"
if self._departure:
self.append_keys(self._departure["origin_stop_time"], prefix)
self._attributes[ATTR_DROP_OFF_ORIGIN] = DROP_OFF_TYPE_OPTIONS.get(
self._departure["origin_stop_time"]["Drop Off Type"],
DROP_OFF_TYPE_DEFAULT,
)
self._attributes[ATTR_PICKUP_ORIGIN] = PICKUP_TYPE_OPTIONS.get(
self._departure["origin_stop_time"]["Pickup Type"], PICKUP_TYPE_DEFAULT
)
self._attributes[ATTR_TIMEPOINT_ORIGIN] = TIMEPOINT_OPTIONS.get(
self._departure["origin_stop_time"]["Timepoint"], TIMEPOINT_DEFAULT
)
else:
self.remove_keys(prefix)
prefix = "destination_stop"
if self._departure:
self.append_keys(self._departure["destination_stop_time"], prefix)
self._attributes[ATTR_DROP_OFF_DESTINATION] = DROP_OFF_TYPE_OPTIONS.get(
self._departure["destination_stop_time"]["Drop Off Type"],
DROP_OFF_TYPE_DEFAULT,
)
self._attributes[ATTR_PICKUP_DESTINATION] = PICKUP_TYPE_OPTIONS.get(
self._departure["destination_stop_time"]["Pickup Type"],
PICKUP_TYPE_DEFAULT,
)
self._attributes[ATTR_TIMEPOINT_DESTINATION] = TIMEPOINT_OPTIONS.get(
self._departure["destination_stop_time"]["Timepoint"], TIMEPOINT_DEFAULT
)
else:
self.remove_keys(prefix)
@staticmethod
def dict_for_table(resource: Any) -> dict:
"""Return a dictionary for the SQLAlchemy resource given."""
return {
col: getattr(resource, col) for col in resource.__table__.columns.keys()
}
def append_keys(self, resource: dict, prefix: str | None = None) -> None:
"""Properly format key val pairs to append to attributes."""
for attr, val in resource.items():
if val == "" or val is None or attr == "feed_id":
continue
key = attr
if prefix and not key.startswith(prefix):
key = f"{prefix} {key}"
key = slugify(key)
self._attributes[key] = val
def remove_keys(self, prefix: str) -> None:
"""Remove attributes whose key starts with prefix."""
self._attributes = {
k: v for k, v in self._attributes.items() if not k.startswith(prefix)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.