text stringlengths 4 1.02M | meta dict |
|---|---|
"""Grab bag file for transaction."""
import logging
from google.appengine.api import datastore_errors
from google.appengine.ext import ndb
from google.appengine.runtime import apiproxy_errors
__all__ = [
'CommitError',
'transaction',
'transaction_async',
]
class CommitError(Exception):
"""A transaction probably failed but it may or may not have occurred.
The caller may want to run a second transaction to verify if the previous one
succeeded.
"""
@ndb.tasklet
def transaction_async(callback, **ctx_options):
"""Converts all sorts of random exceptions into CommitError.
Arguments:
callback: function to run in the transaction. See
https://cloud.google.com/appengine/docs/python/ndb/functions for more
details.
Sets retries default value to 1 instead 3 (!)
"""
ctx_options.setdefault('retries', 1)
try:
result = yield ndb.transaction_async(callback, **ctx_options)
raise ndb.Return(result)
except (
datastore_errors.InternalError,
datastore_errors.Timeout,
datastore_errors.TransactionFailedError) as e:
# https://cloud.google.com/appengine/docs/python/datastore/transactions
# states the result is ambiguous, it could have succeeded.
logging.info('Transaction likely failed: %s', e)
raise CommitError(e)
except (
apiproxy_errors.CancelledError,
datastore_errors.BadRequestError,
RuntimeError) as e:
logging.info('Transaction failure: %s', e)
raise CommitError(e)
def transaction(callback, **ctx_options):
"""Synchronous version of transaction_async()."""
future = transaction_async(callback, **ctx_options)
return future.get_result()
| {
"content_hash": "67d577d092560d6fce3b6187f67e2338",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 79,
"avg_line_length": 28.35593220338983,
"alnum_prop": 0.7184698147041243,
"repo_name": "madecoste/swarming",
"id": "7ec08a19af400d046fb96f123aba3a938e20a284",
"size": "1840",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "appengine/components/components/datastore_utils/txn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3014"
},
{
"name": "HTML",
"bytes": "249103"
},
{
"name": "JavaScript",
"bytes": "925519"
},
{
"name": "Protocol Buffer",
"bytes": "8868"
},
{
"name": "Python",
"bytes": "1495031"
},
{
"name": "Shell",
"bytes": "1267"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
#http://www.w3.org/TR/2009/WD-xmlschema11-2-20091203/type-hierarchy-200901.longdesc.html
#https://www.w3.org/TR/xmlschema11-2/#built-in-datatypes
class Type(object):
def detectType(self, value):
raise NotImplementedError('Must provide implementation in subclass.')
def type(self):
return self.__class__.__name__
def _isType(self, value):
raise NotImplementedError('Must provide implementation in subclass.')
def convert(self, value):
return value
class SingleSubType(Type):
def __init__(self, subType=None):
self._subType = subType
def detectType(self, value):
results=[]
if self._isType(value):
results.append(self.type())
value= self.convert(value)
if self._subType is not None:
res=self._subType.detectType(value)
if res:
#print ("RES",res)
if isinstance(res,list):
results+=res
else:
results.append(res)
return results
else:
return None
class MultiSubType(Type):
def __init__(self, subTypes=None):
self._subTypes = subTypes
def detectType(self, value):
results=[]
if self._isType(value):
results.append(self.type())
value= self.convert(value)
if self._subTypes is not None:
for subtype in self._subTypes:
res = subtype.detectType(value)
if res:
if isinstance(res, list):
results += res
else:
results.append(res)
return results
else:
return None
| {
"content_hash": "81dfe2bf221371a729349d31c364c302",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 88,
"avg_line_length": 26.774647887323944,
"alnum_prop": 0.5318253550762756,
"repo_name": "ODInfoBiz/pyyacp",
"id": "c9b423485827f4efaae6aca9de9481c827f9c768",
"size": "1943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyyacp/profiler/xsd11/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1612220"
},
{
"name": "HTML",
"bytes": "3374006"
},
{
"name": "JavaScript",
"bytes": "3349321"
},
{
"name": "Python",
"bytes": "159798"
}
],
"symlink_target": ""
} |
"""Makes figure with PMM composites of extreme examples for GridRad model.
PMM = probability-matched means
"Extreme examples" include best hits, best correct nulls, worst misses, worst
false alarms, high-probability examples (regardless of true label), and
low-probability examples (regardless of true label).
"""
import pickle
import os.path
import argparse
import numpy
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.deep_learning import cnn
from gewittergefahr.deep_learning import model_interpretation
from gewittergefahr.deep_learning import training_validation_io as trainval_io
from gewittergefahr.plotting import imagemagick_utils
from gewittergefahr.scripts import plot_input_examples as plot_examples
RADAR_HEIGHTS_M_AGL = numpy.array([2000, 6000, 10000], dtype=int)
MODEL_FILE_KEY = model_interpretation.MODEL_FILE_KEY
MEAN_PREDICTOR_MATRICES_KEY = model_interpretation.MEAN_PREDICTOR_MATRICES_KEY
MEAN_SOUNDING_PRESSURES_KEY = model_interpretation.MEAN_SOUNDING_PRESSURES_KEY
COLOUR_BAR_LENGTH = 0.25
PANEL_NAME_FONT_SIZE = 30
COLOUR_BAR_FONT_SIZE = 25
SOUNDING_FONT_SIZE = 30
CONVERT_EXE_NAME = '/usr/bin/convert'
TITLE_FONT_SIZE = 150
TITLE_FONT_TYPE = 'DejaVu-Sans-Bold'
BORDER_WIDTH_SANS_TITLE_PX = 300
BORDER_WIDTH_WITH_TITLE_PX = 10
FIGURE_RESOLUTION_DPI = 300
CONCAT_FIGURE_SIZE_PX = int(1e7)
INPUT_FILES_ARG_NAME = 'input_composite_file_names'
COMPOSITE_NAMES_ARG_NAME = 'composite_names'
OUTPUT_DIR_ARG_NAME = 'output_dir_name'
INPUT_FILES_HELP_STRING = (
'List of paths to input files. Each should contain a PMM composite over '
'many examples (storm objects). Specifically, each should be a Pickle file'
' with one dictionary, containing the keys "{0:s}" and "{1:s}".'
).format(MEAN_PREDICTOR_MATRICES_KEY, MODEL_FILE_KEY)
COMPOSITE_NAMES_HELP_STRING = (
'List of PMM-composite names (one per input file). The list should be '
'space-separated. In each list item, underscores will be replaced with '
'spaces.')
OUTPUT_DIR_HELP_STRING = (
'Name of output directory (figures will be saved here).')
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + INPUT_FILES_ARG_NAME, type=str, nargs='+', required=True,
help=INPUT_FILES_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + COMPOSITE_NAMES_ARG_NAME, type=str, nargs='+', required=True,
help=COMPOSITE_NAMES_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,
help=OUTPUT_DIR_HELP_STRING)
def _read_composite(pickle_file_name):
"""Reads PMM composite of examples (storm objects) from Pickle file.
T = number of input tensors to model
H_s = number of sounding heights
:param pickle_file_name: Path to input file.
:return: mean_predictor_matrices: length-T of numpy arrays, where the [i]th
item has dimensions of the [i]th input tensor to the model.
:return: model_metadata_dict: Dictionary returned by
`cnn.read_model_metadata`.
:return: mean_sounding_pressures_pa: numpy array (length H_s) of
sounding pressures.
"""
print('Reading data from: "{0:s}"...'.format(pickle_file_name))
file_handle = open(pickle_file_name, 'rb')
composite_dict = pickle.load(file_handle)
file_handle.close()
mean_predictor_matrices = composite_dict[MEAN_PREDICTOR_MATRICES_KEY]
mean_sounding_pressures_pa = composite_dict[MEAN_SOUNDING_PRESSURES_KEY]
for i in range(len(mean_predictor_matrices)):
mean_predictor_matrices[i] = numpy.expand_dims(
mean_predictor_matrices[i], axis=0
)
model_file_name = composite_dict[MODEL_FILE_KEY]
model_metafile_name = '{0:s}/model_metadata.p'.format(
os.path.split(model_file_name)[0]
)
print('Reading metadata from: "{0:s}"...'.format(model_metafile_name))
model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY][
trainval_io.UPSAMPLE_REFLECTIVITY_KEY
] = False
all_radar_heights_m_agl = model_metadata_dict[
cnn.TRAINING_OPTION_DICT_KEY][trainval_io.RADAR_HEIGHTS_KEY]
good_flags = numpy.array(
[h in RADAR_HEIGHTS_M_AGL for h in all_radar_heights_m_agl], dtype=bool
)
good_indices = numpy.where(good_flags)[0]
mean_predictor_matrices[0] = mean_predictor_matrices[0][
..., good_indices, :]
model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY][
trainval_io.RADAR_HEIGHTS_KEY
] = RADAR_HEIGHTS_M_AGL
return (mean_predictor_matrices, model_metadata_dict,
mean_sounding_pressures_pa)
def _overlay_text(
image_file_name, x_offset_from_center_px, y_offset_from_top_px,
text_string):
"""Overlays text on image.
:param image_file_name: Path to image file.
:param x_offset_from_center_px: Center-relative x-coordinate (pixels).
:param y_offset_from_top_px: Top-relative y-coordinate (pixels).
:param text_string: String to overlay.
:raises: ValueError: if ImageMagick command (which is ultimately a Unix
command) fails.
"""
command_string = (
'"{0:s}" "{1:s}" -gravity north -pointsize {2:d} -font "{3:s}" '
'-fill "rgb(0, 0, 0)" -annotate {4:+d}{5:+d} "{6:s}" "{1:s}"'
).format(
CONVERT_EXE_NAME, image_file_name, TITLE_FONT_SIZE, TITLE_FONT_TYPE,
x_offset_from_center_px, y_offset_from_top_px, text_string
)
exit_code = os.system(command_string)
if exit_code == 0:
return
raise ValueError(imagemagick_utils.ERROR_STRING)
def _plot_composite(
composite_file_name, composite_name_abbrev, composite_name_verbose,
output_dir_name):
"""Plots one composite.
:param composite_file_name: Path to input file. Will be read by
`_read_composite`.
:param composite_name_abbrev: Abbreviated name for composite. Will be used
in names of output files.
:param composite_name_verbose: Verbose name for composite. Will be used as
figure title.
:param output_dir_name: Path to output directory. Figures will be saved
here.
:return: radar_figure_file_name: Path to file with radar figure for this
composite.
:return: sounding_figure_file_name: Path to file with sounding figure for
this composite.
"""
mean_predictor_matrices, model_metadata_dict, mean_sounding_pressures_pa = (
_read_composite(composite_file_name)
)
radar_field_names = model_metadata_dict[
cnn.TRAINING_OPTION_DICT_KEY][trainval_io.RADAR_FIELDS_KEY]
radar_heights_m_agl = model_metadata_dict[
cnn.TRAINING_OPTION_DICT_KEY][trainval_io.RADAR_HEIGHTS_KEY]
num_radar_fields = len(radar_field_names)
num_radar_heights = len(radar_heights_m_agl)
handle_dict = plot_examples.plot_one_example(
list_of_predictor_matrices=mean_predictor_matrices,
model_metadata_dict=model_metadata_dict, pmm_flag=True,
plot_sounding=True,
sounding_pressures_pascals=mean_sounding_pressures_pa,
allow_whitespace=True, plot_panel_names=True,
panel_name_font_size=PANEL_NAME_FONT_SIZE,
add_titles=False, label_colour_bars=True,
colour_bar_length=COLOUR_BAR_LENGTH,
colour_bar_font_size=COLOUR_BAR_FONT_SIZE,
sounding_font_size=SOUNDING_FONT_SIZE,
num_panel_rows=num_radar_heights)
sounding_figure_file_name = '{0:s}/{1:s}_sounding.jpg'.format(
output_dir_name, composite_name_abbrev)
print('Saving figure to: "{0:s}"...'.format(sounding_figure_file_name))
sounding_figure_object = handle_dict[plot_examples.SOUNDING_FIGURE_KEY]
sounding_figure_object.savefig(
sounding_figure_file_name, dpi=FIGURE_RESOLUTION_DPI,
pad_inches=0, bbox_inches='tight')
pyplot.close(sounding_figure_object)
imagemagick_utils.resize_image(
input_file_name=sounding_figure_file_name,
output_file_name=sounding_figure_file_name,
output_size_pixels=CONCAT_FIGURE_SIZE_PX)
imagemagick_utils.trim_whitespace(
input_file_name=sounding_figure_file_name,
output_file_name=sounding_figure_file_name,
border_width_pixels=TITLE_FONT_SIZE + 25)
_overlay_text(
image_file_name=sounding_figure_file_name,
x_offset_from_center_px=0, y_offset_from_top_px=0,
text_string=composite_name_verbose)
imagemagick_utils.trim_whitespace(
input_file_name=sounding_figure_file_name,
output_file_name=sounding_figure_file_name,
border_width_pixels=10)
radar_figure_objects = handle_dict[plot_examples.RADAR_FIGURES_KEY]
panel_file_names = [None] * num_radar_fields
for j in range(num_radar_fields):
panel_file_names[j] = '{0:s}/{1:s}_{2:s}.jpg'.format(
output_dir_name, composite_name_abbrev,
radar_field_names[j].replace('_', '-')
)
print('Saving figure to: "{0:s}"...'.format(panel_file_names[j]))
radar_figure_objects[j].savefig(
panel_file_names[j], dpi=FIGURE_RESOLUTION_DPI,
pad_inches=0, bbox_inches='tight'
)
pyplot.close(radar_figure_objects[j])
radar_figure_file_name = '{0:s}/{1:s}_radar.jpg'.format(
output_dir_name, composite_name_abbrev)
print('Concatenating panels to: "{0:s}"...'.format(radar_figure_file_name))
imagemagick_utils.concatenate_images(
input_file_names=panel_file_names,
output_file_name=radar_figure_file_name,
num_panel_rows=1, num_panel_columns=num_radar_fields,
border_width_pixels=50)
imagemagick_utils.resize_image(
input_file_name=radar_figure_file_name,
output_file_name=radar_figure_file_name,
output_size_pixels=CONCAT_FIGURE_SIZE_PX)
imagemagick_utils.trim_whitespace(
input_file_name=radar_figure_file_name,
output_file_name=radar_figure_file_name,
border_width_pixels=TITLE_FONT_SIZE + 25)
_overlay_text(
image_file_name=radar_figure_file_name,
x_offset_from_center_px=0, y_offset_from_top_px=0,
text_string=composite_name_verbose)
imagemagick_utils.trim_whitespace(
input_file_name=radar_figure_file_name,
output_file_name=radar_figure_file_name,
border_width_pixels=10)
return radar_figure_file_name, sounding_figure_file_name
def _run(composite_file_names, composite_names, output_dir_name):
"""Makes figure with extreme examples for GridRad model.
This is effectively the main method.
:param composite_file_names: See documentation at top of file.
:param composite_names: Same.
:param output_dir_name: Same.
"""
file_system_utils.mkdir_recursive_if_necessary(
directory_name=output_dir_name)
num_composites = len(composite_file_names)
expected_dim = numpy.array([num_composites], dtype=int)
error_checking.assert_is_numpy_array(
numpy.array(composite_names), exact_dimensions=expected_dim
)
composite_names_abbrev = [
n.replace('_', '-').lower() for n in composite_names
]
composite_names_verbose = [n.replace('_', ' ') for n in composite_names]
radar_panel_file_names = [None] * num_composites
sounding_panel_file_names = [None] * num_composites
for i in range(num_composites):
radar_panel_file_names[i], sounding_panel_file_names[i] = (
_plot_composite(
composite_file_name=composite_file_names[i],
composite_name_abbrev=composite_names_abbrev[i],
composite_name_verbose=composite_names_verbose[i],
output_dir_name=output_dir_name)
)
print('\n')
radar_figure_file_name = '{0:s}/radar_concat.jpg'.format(output_dir_name)
print('Concatenating panels to: "{0:s}"...'.format(radar_figure_file_name))
num_panel_rows = int(numpy.floor(
numpy.sqrt(num_composites)
))
num_panel_columns = int(numpy.ceil(
float(num_composites) / num_panel_rows
))
imagemagick_utils.concatenate_images(
input_file_names=radar_panel_file_names,
output_file_name=radar_figure_file_name, num_panel_rows=num_panel_rows,
num_panel_columns=num_panel_columns, border_width_pixels=100)
imagemagick_utils.trim_whitespace(
input_file_name=radar_figure_file_name,
output_file_name=radar_figure_file_name,
border_width_pixels=10)
sounding_figure_file_name = '{0:s}/sounding_concat.jpg'.format(
output_dir_name)
print('Concatenating panels to: "{0:s}"...'.format(
sounding_figure_file_name
))
imagemagick_utils.concatenate_images(
input_file_names=sounding_panel_file_names,
output_file_name=sounding_figure_file_name,
num_panel_rows=num_panel_rows, num_panel_columns=num_panel_columns,
border_width_pixels=10)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
composite_file_names=getattr(INPUT_ARG_OBJECT, INPUT_FILES_ARG_NAME),
composite_names=getattr(INPUT_ARG_OBJECT, COMPOSITE_NAMES_ARG_NAME),
output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME)
)
| {
"content_hash": "53f713341e2177cadbbf9f1b17efe282",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 80,
"avg_line_length": 36.369565217391305,
"alnum_prop": 0.6792438732815301,
"repo_name": "thunderhoser/GewitterGefahr",
"id": "3edcd44d4d8b364da07f1a144f144eab740356ca",
"size": "13384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gewittergefahr/prediction_paper_2019/make_extreme_gridrad_figure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "31275"
},
{
"name": "Python",
"bytes": "5661041"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import unittest
from pyoauth2.provider import AuthorizationProvider
class MockAuthorizationProvider(AuthorizationProvider):
pass
class AuthorizationProviderTest(unittest.TestCase):
def setUp(self):
self.provider = MockAuthorizationProvider()
def test_make_redirect_error_response(self):
response = self.provider._make_redirect_error_response(
'https://test.example.com/oauthredirect?param=1234',
'some_error')
self.assertEquals(302, response.status_code)
self.assertEquals('https://test.example.com/oauthredirect?'
'param=1234&error=some_error',
response.headers['Location'])
def test_make_json_error_response(self):
response = self.provider._make_json_error_response('some_error')
self.assertEquals(400, response.status_code)
try:
response_json = response.json()
except TypeError:
response_json = response.json
self.assertEquals({'error': 'some_error'}, response_json)
def test_get_authorization_code_invalid_response_type(self):
response = self.provider.get_authorization_code('foo', 'client12345',
'https://example.com/oauth')
self.assertEquals(302, response.status_code)
self.assertEquals('https://example.com/oauth?'
'error=unsupported_response_type',
response.headers['Location'])
| {
"content_hash": "c1e9237b29cfef0cb7a9ee77e1c7fdc2",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 77,
"avg_line_length": 36.116279069767444,
"alnum_prop": 0.6297488731487444,
"repo_name": "iamjakob/pyoauth2",
"id": "330a870ec53372971d409dd9f67d64ec81607201",
"size": "1553",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pyoauth2/tests/test_provider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34829"
}
],
"symlink_target": ""
} |
from django.core.mail.backends import locmem
from base import BaseEmailBackend
class EmailBackend(locmem.EmailBackend, BaseEmailBackend):
def send_messages(self, email_messages):
email_messages = self.render_messages(email_messages)
super(EmailBackend, self).send_messages(email_messages)
| {
"content_hash": "5b7a9b15b87cdca4a4840db4e577ea96",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 63,
"avg_line_length": 38.875,
"alnum_prop": 0.77491961414791,
"repo_name": "benoitbar/django-template-mail",
"id": "87fd0941385de853fc14caa37e8ac9140c79ae53",
"size": "311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "templatemail/backends/locmem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5544"
}
],
"symlink_target": ""
} |
from __future__ import division, unicode_literals
from future.builtins import str
from copy import copy
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import ImproperlyConfigured, AppRegistryNotReady
from django.db.models import IntegerField, CharField, FloatField
from django.db.models.signals import post_save, post_delete
from mezzanine.utils.deprecation import get_related_model
class BaseGenericRelation(GenericRelation):
"""
Extends ``GenericRelation`` to:
- Add a consistent default value for ``object_id_field`` and
check for a ``default_related_model`` attribute which can be
defined on subclasses as a default for the ``to`` argument.
- Add one or more custom fields to the model that the relation
field is applied to, and then call a ``related_items_changed``
method each time related items are saved or deleted, so that a
calculated value can be stored against the custom fields since
aggregates aren't available for GenericRelation instances.
"""
# Mapping of field names to model fields that will be added.
fields = {}
def __init__(self, *args, **kwargs):
"""
Set up some defaults and check for a ``default_related_model``
attribute for the ``to`` argument.
"""
kwargs.setdefault("object_id_field", "object_pk")
to = getattr(self, "default_related_model", None)
# Avoid having both a positional arg and a keyword arg for
# the parameter ``to``
if to and not args:
kwargs.setdefault("to", to)
try:
# Check if ``related_model`` has been modified by a subclass
self.related_model
except (AppRegistryNotReady, AttributeError):
# if not, all is good
super(BaseGenericRelation, self).__init__(*args, **kwargs)
else:
# otherwise, warn the user to stick to the new (as of 4.0)
# ``default_related_model`` attribute
raise ImproperlyConfigured("BaseGenericRelation changed the "
"way it handled a default ``related_model`` in mezzanine "
"4.0. Please override ``default_related_model`` instead "
"and do not tamper with django's ``related_model`` "
"property anymore.")
def contribute_to_class(self, cls, name):
"""
Add each of the names and fields in the ``fields`` attribute
to the model the relationship field is applied to, and set up
the related item save and delete signals for calling
``related_items_changed``.
"""
for field in cls._meta.many_to_many:
if isinstance(field, self.__class__):
e = "Multiple %s fields are not supported (%s.%s, %s.%s)" % (
self.__class__.__name__, cls.__name__, cls.__name__,
name, field.name)
raise ImproperlyConfigured(e)
self.related_field_name = name
super(BaseGenericRelation, self).contribute_to_class(cls, name)
# Not applicable to abstract classes, and in fact will break.
if not cls._meta.abstract:
for (name_string, field) in self.fields.items():
if "%s" in name_string:
name_string = name_string % name
extant_fields = cls._meta._forward_fields_map
if name_string in extant_fields:
continue
if field.verbose_name is None:
field.verbose_name = self.verbose_name
cls.add_to_class(name_string, copy(field))
# Add a getter function to the model we can use to retrieve
# the field/manager by name.
getter_name = "get_%s_name" % self.__class__.__name__.lower()
cls.add_to_class(getter_name, lambda self: name)
sender = get_related_model(self)
post_save.connect(self._related_items_changed, sender=sender)
post_delete.connect(self._related_items_changed, sender=sender)
def _related_items_changed(self, **kwargs):
"""
Ensure that the given related item is actually for the model
this field applies to, and pass the instance to the real
``related_items_changed`` handler.
"""
for_model = kwargs["instance"].content_type.model_class()
if for_model and issubclass(for_model, self.model):
instance_id = kwargs["instance"].object_pk
try:
instance = for_model.objects.get(id=instance_id)
except self.model.DoesNotExist:
# Instance itself was deleted - signals are irrelevant.
return
if hasattr(instance, "get_content_model"):
instance = instance.get_content_model()
related_manager = getattr(instance, self.related_field_name)
self.related_items_changed(instance, related_manager)
def related_items_changed(self, instance, related_manager):
"""
Can be implemented by subclasses - called whenever the
state of related items change, eg they're saved or deleted.
The instance for this field and the related manager for the
field are passed as arguments.
"""
pass
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
See: https://code.djangoproject.com/ticket/22552
"""
return getattr(obj, self.attname).all()
class CommentsField(BaseGenericRelation):
"""
Stores the number of comments against the
``COMMENTS_FIELD_NAME_count`` field when a comment is saved or
deleted.
"""
default_related_model = "generic.ThreadedComment"
fields = {"%s_count": IntegerField(editable=False, default=0)}
def related_items_changed(self, instance, related_manager):
"""
Stores the number of comments. A custom ``count_filter``
queryset gets checked for, allowing managers to implement
custom count logic.
"""
try:
count = related_manager.count_queryset()
except AttributeError:
count = related_manager.count()
count_field_name = list(self.fields.keys())[0] % \
self.related_field_name
setattr(instance, count_field_name, count)
instance.save()
class KeywordsField(BaseGenericRelation):
"""
Stores the keywords as a single string into the
``KEYWORDS_FIELD_NAME_string`` field for convenient access when
searching.
"""
default_related_model = "generic.AssignedKeyword"
fields = {"%s_string": CharField(editable=False, blank=True,
max_length=500)}
def __init__(self, *args, **kwargs):
"""
Mark the field as editable so that it can be specified in
admin class fieldsets and pass validation, and also so that
it shows up in the admin form.
"""
super(KeywordsField, self).__init__(*args, **kwargs)
self.editable = True
def formfield(self, **kwargs):
"""
Provide the custom form widget for the admin, since there
isn't a form field mapped to ``GenericRelation`` model fields.
"""
from mezzanine.generic.forms import KeywordsWidget
kwargs["widget"] = KeywordsWidget
return super(KeywordsField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
"""
The ``KeywordsWidget`` field will return data as a string of
comma separated IDs for the ``Keyword`` model - convert these
into actual ``AssignedKeyword`` instances. Also delete
``Keyword`` instances if their last related ``AssignedKeyword``
instance is being removed.
"""
from mezzanine.generic.models import Keyword
related_manager = getattr(instance, self.name)
# Get a list of Keyword IDs being removed.
old_ids = [str(a.keyword_id) for a in related_manager.all()]
new_ids = data.split(",")
removed_ids = set(old_ids) - set(new_ids)
# Remove current AssignedKeyword instances.
related_manager.all().delete()
# Convert the data into AssignedKeyword instances.
if data:
data = [related_manager.create(keyword_id=i) for i in new_ids]
# Remove keywords that are no longer assigned to anything.
Keyword.objects.delete_unused(removed_ids)
super(KeywordsField, self).save_form_data(instance, data)
def contribute_to_class(self, cls, name):
"""
Swap out any reference to ``KeywordsField`` with the
``KEYWORDS_FIELD_string`` field in ``search_fields``.
"""
super(KeywordsField, self).contribute_to_class(cls, name)
string_field_name = list(self.fields.keys())[0] % \
self.related_field_name
if hasattr(cls, "search_fields") and name in cls.search_fields:
try:
weight = cls.search_fields[name]
except TypeError:
# search_fields is a sequence.
index = cls.search_fields.index(name)
search_fields_type = type(cls.search_fields)
cls.search_fields = list(cls.search_fields)
cls.search_fields[index] = string_field_name
cls.search_fields = search_fields_type(cls.search_fields)
else:
del cls.search_fields[name]
cls.search_fields[string_field_name] = weight
def related_items_changed(self, instance, related_manager):
"""
Stores the keywords as a single string for searching.
"""
assigned = related_manager.select_related("keyword")
keywords = " ".join([str(a.keyword) for a in assigned])
string_field_name = list(self.fields.keys())[0] % \
self.related_field_name
if getattr(instance, string_field_name) != keywords:
setattr(instance, string_field_name, keywords)
instance.save()
class RatingField(BaseGenericRelation):
"""
Stores the rating count and average against the
``RATING_FIELD_NAME_count`` and ``RATING_FIELD_NAME_average``
fields when a rating is saved or deleted.
"""
default_related_model = "generic.Rating"
fields = {"%s_count": IntegerField(default=0, editable=False),
"%s_sum": IntegerField(default=0, editable=False),
"%s_average": FloatField(default=0, editable=False)}
def related_items_changed(self, instance, related_manager):
"""
Calculates and saves the average rating.
"""
ratings = [r.value for r in related_manager.all()]
count = len(ratings)
_sum = sum(ratings)
average = _sum / count if count > 0 else 0
setattr(instance, "%s_count" % self.related_field_name, count)
setattr(instance, "%s_sum" % self.related_field_name, _sum)
setattr(instance, "%s_average" % self.related_field_name, average)
instance.save()
| {
"content_hash": "b9cd2ae88cc5cc52b3a4027db71942ee",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 77,
"avg_line_length": 42.14661654135338,
"alnum_prop": 0.6124342163946125,
"repo_name": "wbtuomela/mezzanine",
"id": "d27e48402c8065d176d6c264390df68457e31b2e",
"size": "11211",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "mezzanine/generic/fields.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "60317"
},
{
"name": "HTML",
"bytes": "79259"
},
{
"name": "JavaScript",
"bytes": "453209"
},
{
"name": "Python",
"bytes": "706912"
}
],
"symlink_target": ""
} |
"""Test the functionality of the detect_repo module.
This will consist of the following functional test:
1. Determine if an OSS-Fuzz projects main repo can be detected from example
commits.
2. Determine if an OSS-Fuzz project main repo can be detected from a
repo name.
"""
import os
import re
import sys
import tempfile
import unittest
import detect_repo
# Appending to path for access to repo_manager module.
# pylint: disable=wrong-import-position
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
import repo_manager
import test_repos
# pylint: enable=wrong-import-position
class DetectRepoIntegrationTest(unittest.TestCase):
"""Class to test the functionality of the detect_repo module."""
def test_infer_main_repo_from_commit(self):
"""Tests that the main repo can be inferred based on an example commit."""
with tempfile.TemporaryDirectory() as tmp_dir:
# Construct example repo's to check for commits.
for example_repo in test_repos.TEST_REPOS:
repo_manager.RepoManager(example_repo.git_url, tmp_dir)
self.check_with_repo(example_repo.git_url,
example_repo.git_repo_name,
tmp_dir,
commit=example_repo.old_commit)
def test_infer_main_repo_from_name(self):
"""Tests that the main project repo can be inferred from a repo name."""
with tempfile.TemporaryDirectory() as tmp_dir:
for example_repo in test_repos.TEST_REPOS:
repo_manager.RepoManager(example_repo.git_url, tmp_dir)
self.check_with_repo(example_repo.git_url, example_repo.git_repo_name,
tmp_dir)
def check_with_repo(self, repo_origin, repo_name, tmp_dir, commit=None):
"""Checks the detect repo's main method for a specific set of inputs.
Args:
repo_origin: URL of the git repo.
repo_name: The name of the directory it is cloned to.
tmp_dir: The location of the directory of git repos to be searched.
commit: The commit that should be used to look up the repo.
"""
command = ['python3', 'detect_repo.py', '--src_dir', tmp_dir]
if commit:
command += ['--example_commit', commit]
else:
command += ['--repo_name', repo_name]
out, _ = detect_repo.execute(command,
location=os.path.dirname(
os.path.realpath(__file__)))
match = re.search(r'\bDetected repo: ([^ ]+) ([^ ]+)', out.rstrip())
if match and match.group(1) and match.group(2):
self.assertEqual(match.group(1), repo_origin)
self.assertEqual(match.group(2), os.path.join(tmp_dir, repo_name))
else:
self.assertIsNone(repo_origin)
self.assertIsNone(repo_name)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "8909f4bf185ea084d764707dab5bac57",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 78,
"avg_line_length": 36.22784810126582,
"alnum_prop": 0.6477987421383647,
"repo_name": "FeliciaLim/oss-fuzz",
"id": "4886522ac3dd59d1130f32b90ea010e4f73fd5d9",
"size": "3437",
"binary": false,
"copies": "1",
"ref": "refs/heads/opus",
"path": "infra/base-images/base-builder/detect_repo_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "7113"
},
{
"name": "C++",
"bytes": "29021"
},
{
"name": "Groovy",
"bytes": "8689"
},
{
"name": "HTML",
"bytes": "603"
},
{
"name": "Python",
"bytes": "25585"
},
{
"name": "Shell",
"bytes": "70002"
}
],
"symlink_target": ""
} |
from .inference import InferenceStrategy, SumProductInference, NaiveInference, random_ordering | {
"content_hash": "ce89d32cc795e3e20d178f99368c5914",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 94,
"avg_line_length": 94,
"alnum_prop": 0.8829787234042553,
"repo_name": "DLunin/pygraphmodels",
"id": "4055af9ae8a8d2c9123d5e21fd09d46a748d9645",
"size": "94",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphmodels/inference/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "9868"
},
{
"name": "Cuda",
"bytes": "12568"
},
{
"name": "Jupyter Notebook",
"bytes": "701970"
},
{
"name": "Makefile",
"bytes": "2047"
},
{
"name": "Python",
"bytes": "49367"
}
],
"symlink_target": ""
} |
import random, time, os, datetime
import boto
from boto.sdb.persist.object import SDBObject
from boto.sdb.persist.property import *
class Identifier(object):
_hex_digits = '0123456789abcdef'
@classmethod
def gen(cls, prefix):
suffix = ''
for i in range(0,8):
suffix += random.choice(cls._hex_digits)
return ts + '-' + suffix
class Version(SDBObject):
name = StringProperty()
pdb = ObjectProperty(ref_class=SDBObject)
date = DateTimeProperty()
def __init__(self, id=None, manager=None):
SDBObject.__init__(self, id, manager)
if id == None:
self.name = Identifier.gen('v')
self.date = datetime.datetime.now()
print 'created Version %s' % self.name
def partitions(self):
"""
Return an iterator containing all Partition objects related to this Version.
@rtype: iterator of L{Partitions<boto.mapreduce.partitiondb.Partition>}
@return: The Partitions in this Version
"""
return self.get_related_objects('version', Partition)
def add_partition(self, name=None):
"""
Add a new Partition to this Version.
@type name: string
@param name: The name of the new Partition (optional)
@rtype: L{Partition<boto.mapreduce.partitiondb.Partition>}
@return: The new Partition object
"""
p = Partition(manager=self.manager, name=name)
p.version = self
p.pdb = self.pdb
p.save()
return p
def get_s3_prefix(self):
if not self.pdb:
raise ValueError, 'pdb attribute must be set to compute S3 prefix'
return self.pdb.get_s3_prefix() + self.name + '/'
class PartitionDB(SDBObject):
name = StringProperty()
bucket_name = StringProperty()
versions = ObjectListProperty(ref_class=Version)
def __init__(self, id=None, manager=None, name='', bucket_name=''):
SDBObject.__init__(self, id, manager)
if id == None:
self.name = name
self.bucket_name = bucket_name
def get_s3_prefix(self):
return self.name + '/'
def add_version(self):
"""
Add a new Version to this PartitionDB. The newly added version becomes the
current version.
@rtype: L{Version<boto.mapreduce.partitiondb.Version>}
@return: The newly created Version object.
"""
v = Version()
v.pdb = self
v.save()
self.versions.append(v)
return v
def revert(self):
"""
Revert to the previous version of this PartitionDB. The current version is removed from the
list of Versions and the Version immediately preceeding it becomes the current version.
Note that this method does not delete the Version object or any Partitions related to the
Version object.
@rtype: L{Version<boto.mapreduce.partitiondb.Version>}
@return: The previous current Version object.
"""
v = self.current_version()
if v:
self.versions.remove(v)
return v
def current_version(self):
"""
Get the currently active Version of this PartitionDB object.
@rtype: L{Version<boto.mapreduce.partitiondb.Version>}
@return: The current Version object or None if there are no Versions associated
with this PartitionDB object.
"""
if self.versions:
if len(self.versions) > 0:
return self.versions[-1]
return None
class Partition(SDBObject):
def __init__(self, id=None, manager=None, name=None):
SDBObject.__init__(self, id, manager)
if id == None:
self.name = name
name = StringProperty()
version = ObjectProperty(ref_class=Version)
pdb = ObjectProperty(ref_class=PartitionDB)
data = S3KeyProperty()
def get_key_name(self):
return self.version.get_s3_prefix() + self.name
def upload(self, path, bucket_name=None):
if not bucket_name:
bucket_name = self.version.pdb.bucket_name
s3 = self.manager.get_s3_connection()
bucket = s3.lookup(bucket_name)
directory, filename = os.path.split(path)
self.name = filename
key = bucket.new_key(self.get_key_name())
key.set_contents_from_filename(path)
self.data = key
self.save()
def delete(self):
if self.data:
self.data.delete()
SDBObject.delete(self)
| {
"content_hash": "3c77c18817e139115f28317cae21b813",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 100,
"avg_line_length": 30.526666666666667,
"alnum_prop": 0.6018781393317318,
"repo_name": "marshall/pynaries",
"id": "6443d0293c12030b8155996deb44af50aaebba5e",
"size": "5686",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "boto/mapreduce/partitiondb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1098305"
}
],
"symlink_target": ""
} |
import json
import nltk
import codecs
import random
from news_item import NewsItem
from pymongo import MongoClient
from nltk.classify import apply_features
DEBUG = True
def _load_stop_words():
stop_words = nltk.corpus.stopwords.words('portuguese')
d_stop_words = set(stop_words)
return d_stop_words
class Classificador:
"""
Script de aprendizado de máquina para aprender com os textos extraidos pelo Scrapy
e determinar a categoria dado uma palavra
"""
def __init__(self):
self.client = MongoClient()
self.db = self.client.articles_news
self.news = self.db.articles
self.stop_words = _load_stop_words()
self.classify()
def identify_top_words(self, all_words):
freq_dist = nltk.FreqDist(w.lower() for w in all_words)
return list(freq_dist)[:1000]
def collect_news(self):
news_list = []
contador = 0
for news in self.news.find():
if contador < 10000:
ns = NewsItem(news, self.stop_words)
news_list.append(ns)
else:
break
contador += 1
return news_list
def collect_all_words(self, news_list):
all_words = []
for news in news_list:
all_words.extend(news.all_words)
return all_words
def classificar(self, word):
return self.classifier.classify(word)
def features(self, top_words):
word_set = set(self.all_words)
features = {}
features['url'] = self.url
for w in top_words:
features["w_%s" % w] = (w in word_set)
return features
def classify(self):
print(u"Coletando as Notícias")
news_items = self.collect_news()
print(u"Coletando todas as palavras")
all_words = self.collect_all_words(news_items)
print(u"Coletando as principais palavras")
top_words = self.identify_top_words(all_words)
print(u"Embaralhando")
random.shuffle(news_items)
print(u"Gerando conjunto de treinamento")
featuresets = []
for item in news_items:
item_features = item.features(top_words)
tup = (item_features, item.category)
featuresets.append(tup)
train_set = featuresets[1000:]
#test_set = featuresets[:1000]
print('Featuresets tamanho: ' + str(len(featuresets)))
print("Treinando...")
self.classifier = nltk.NaiveBayesClassifier.train(train_set)
print("Treinamento Completo complete")
if DEBUG:
arquivo_teste = codecs.open("doc_test_2.json", "r", encoding="utf-8")
items_news = json.loads(arquivo_teste.read())
list_test = []
for item in items_news:
news = NewsItem(item, self.stop_words)
list_test.append(news)
for i in list_test:
feat = i.features(top_words)
print(u"{} - {}".format(i.title, self.classificar(feat)))
#print(nltk.classify.accuracy(self.classifier, test_set))
if __name__ == "__main__":
clas = Classificador()
| {
"content_hash": "ea4c5b9b305c8523098617675ad457ac",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 90,
"avg_line_length": 29.128440366972477,
"alnum_prop": 0.5861417322834646,
"repo_name": "nolram/news_crawler",
"id": "414f425549e05f4f31c66d63bf2c5b6cdb9c0677",
"size": "3201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classificador/nltk_classificador.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16111"
},
{
"name": "Shell",
"bytes": "136"
}
],
"symlink_target": ""
} |
from GameHistory import *
from GameBoard import *
"""class yang menginisiasi menu utama MNK Game"""
class GameMNK:
def __init__(self, screenWidth, screenHeight):
self.window = Tk()
self.window.resizable(False, False)
self.screenWidth = screenWidth
self.screenHeight = screenHeight
self.fixWindowSize()
self.window.title(" MNK Game ")
self.window.iconbitmap(r'MNK-Game.ico')
self.inputFrame = Frame(self.window)
self.bttnFrame = Frame(self.window)
self.color1 = "blue"
self.color2 = "yellow"
self.m = StringVar()
self.n = StringVar()
self.k = StringVar()
self.gametitleLabel = Label(self.window, text="MNK Game", font='times 42 bold')
self.labelM = Label(self.inputFrame, text="M : ")
self.labelN = Label(self.inputFrame, text="N : ")
self.labelK = Label(self.inputFrame, text="K : ")
self.rowEnt = Entry(self.inputFrame, textvariable=self.m)
self.colEnt = Entry(self.inputFrame, textvariable=self.n)
self.requiretowin = Entry(self.inputFrame, textvariable=self.k)
self.startBttn = Button(self.window, text="Mulai Game", command=self.generateBoard)
self.historyBttn = Button(self.bttnFrame, text="Riwayat", command=self.gameHistory)
self.aboutBttn = Button(self.bttnFrame, text="Tentang", command=self.gameAbout)
self.gametitleLabel.pack(padx=5, pady=20)
self.inputFrame.pack(padx=5, pady=100)
self.startBttn.pack(padx=5, pady=50)
self.bttnFrame.pack(padx=5, pady=50)
self.labelM.grid(row=1, column=1)
self.rowEnt.grid(row=1, column=2)
self.labelN.grid(row=2, column=1)
self.colEnt.grid(row=2, column=2)
self.labelK.grid(row=3, column=1)
self.requiretowin.grid(row=3, column=2)
self.historyBttn.grid(row=2, column=1)
self.aboutBttn.grid(row=2, column=2)
self.centerofscreen(self.window)
self.window.mainloop()
"""method untuk meletakkan window di center of screen"""
def centerofscreen(self, toplevel):
toplevel.update_idletasks()
w = toplevel.winfo_screenwidth()
h = toplevel.winfo_screenheight()
size = tuple(int(_) for _ in toplevel.geometry().split('+')[0].split('x'))
x = w / 2 - size[0] / 2
y = h / 2 - size[1] / 2
toplevel.geometry("%dx%d+%d+%d" % (size + (x, y)))
"""method untuk menginisasi board"""
def generateBoard(self):
if (self.checkInput()):
return
if (int(self.m.get()) <= 5 or int(self.n.get()) <= 5):
tilesize = 60
elif (int(self.m.get()) <= 15 or int(self.n.get()) <= 15):
tilesize = 37
elif (int(self.m.get()) <= 30 or int(self.n.get()) <= 30):
tilesize = 19
elif (int(self.m.get()) <= 40 or int(self.n.get()) <= 40):
tilesize = 14
elif (int(self.m.get()) <= 50 or int(self.n.get()) <= 50):
tilesize = 10
else:
tilesize = 5
try:
self.GenerateBoard = GameBoard(int(self.m.get()), int(self.n.get()), int(self.k.get()), tilesize)
except ValueError:
messagebox.showwarning("Info", "Dimohon agar mengisi input dengan benar.")
"""method untuk mengakes class game history"""
def gameHistory(self):
GameHistory()
"""method untuk menampilkan info program"""
def gameAbout(self):
messagebox.showinfo("Info",
"Created with love by " + "\n" + "Arga Ghulam Ahmad, 1606821601" + "\n" + "Fasilkom, Universitas Indonesia")
"""method untuk menetapkan size window"""
def fixWindowSize(self):
self.window.minsize(width=800, height=600)
self.window.maxsize(width=self.screenWidth, height=self.screenHeight)
"""method untuk memeriksa nilai m,n, dan k"""
def checkInput(self):
if (int(self.m.get()) != int(self.n.get())):
messagebox.showwarning("Input Value M or N is different", "Nilai m atau n direkomendasikan sama.")
return True
if (int(self.m.get()) < 3 or int(self.n.get()) < 3):
messagebox.showwarning("Input Value M or N is too low", "Nilai m atau n Minimal adalah tiga")
return True
if (int(self.m.get()) > 110 or int(self.n.get()) > 110):
messagebox.showwarning("Input Value M or N is Overload", "Nilai m (max: 250) atau n (max:110) telah melampaui batas maksimum!")
return True
if (int(self.k.get()) > int(self.m.get()) or int(self.k.get()) > int(self.n.get())):
messagebox.showwarning("Input Value K is Overload", "Nilai k telah melampaui batas maksimum!")
return True | {
"content_hash": "9eaa50a7d740ae0919f16fc9534db848",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 139,
"avg_line_length": 41.8859649122807,
"alnum_prop": 0.5979057591623037,
"repo_name": "argaghulamahmad/MNK-Game",
"id": "acc9597f8bf95f417d2f8f649b9a275b1e674f03",
"size": "4775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GameMNK.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16303"
}
],
"symlink_target": ""
} |
"""
Add to SockJS-tornado the multi-room support
"""
from sockjs.tornado import SockJSConnection
from jsonParser import Parser
from structure import Struct
import logging
try:
import json
except ImportError:
import simplejson as json
# Limit import
__all__ = ["SockJSDefaultHandler", "SockJSRoomHandler"]
class SockJSDefaultHandler(SockJSConnection):
""" Default handler """
_parser = Parser()
def on_message(self, data):
""" Parsing data, and try to call responding message """
# Trying to parse response
data = json.loads(data)
if not data["name"] is None:
logging.debug("%s: receiving message %s" % (data["name"], data["data"]))
fct = getattr(self, "on_" + data["name"])
try:
res = fct(Struct(data["data"]))
except:
# We try without Struct item (on transaction request this can happend)
res = fct(data["data"])
if res is not None:
self.write_message(res)
else:
logging.error("SockJSDefaultHandler: data.name was null")
def publish(self, name, data, userList):
""" Publish data """
# Publish data to all room users
self.broadcast(userList, {
"name": name,
"data": SockJSDefaultHandler._parser.encode(data)
})
class SockJSRoomHandler(SockJSDefaultHandler):
""" Room handler """
_room = {}
def _gcls(self):
""" Get the classname """
return self.__class__.__name__
def join(self, _id):
""" Join a room """
if not SockJSRoomHandler._room.has_key(self._gcls() + _id):
SockJSRoomHandler._room[self._gcls() + _id] = set()
SockJSRoomHandler._room[self._gcls() + _id].add(self)
def leave(self, _id):
""" Leave a room """
if SockJSRoomHandler._room.has_key(self._gcls() + _id):
SockJSRoomHandler._room[self._gcls() + _id].remove(self)
if len(SockJSRoomHandler._room[self._gcls() + _id]) == 0:
del SockJSRoomHandler._room[self._gcls() + _id]
def getRoom(self, _id):
""" Retrieve a room from it's id """
if SockJSRoomHandler._room.has_key(self._gcls() + _id):
return SockJSRoomHandler._room[self._gcls() + _id]
return None
def publishToRoom(self, roomId, name, data, userList=None):
""" Publish to given room data submitted """
if userList is None:
userList = self.getRoom(roomId)
# Publish data to all room users
logging.debug("%s: broadcasting (name: %s, data: %s, number of users: %s)" % (self._gcls(), name, data, len(userList)))
self.broadcast(userList, {
"name": name,
"data": SockJSRoomHandler._parser.encode(data)
})
def publishToOther(self, roomId, name, data):
""" Publish to only other people than myself """
tmpList = self.getRoom(roomId)
# Select everybody except me
userList = [x for x in tmpList if x is not self]
self.publishToRoom(roomId, name, data, userList)
def publishToMyself(self, roomId, name, data):
""" Publish to only myself """
self.publishToRoom(roomId, name, data, [self])
def isInRoom(self, _id):
""" Check a given user is in given room """
if SockJSRoomHandler._room.has_key(self._gcls() + _id):
if self in SockJSRoomHandler._room[self._gcls() + _id]:
return True
return False
| {
"content_hash": "f63f67c402005ab07ea8652a361ae6d4",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 127,
"avg_line_length": 33.84761904761905,
"alnum_prop": 0.5796285875070343,
"repo_name": "Deisss/python-sockjsroom",
"id": "08128b65a1f23f99bcf86430374e490d4d415943",
"size": "3601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sockjsroom/socketHandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "460"
},
{
"name": "HTML",
"bytes": "1409"
},
{
"name": "JavaScript",
"bytes": "6796"
},
{
"name": "Python",
"bytes": "13230"
}
],
"symlink_target": ""
} |
from tempest.api.compute import base
class BaseFloatingIPsTest(base.BaseV2ComputeTest):
@classmethod
def setUpClass(cls):
# Floating IP actions might need a full network configuration
cls.set_network_resources(network=True, subnet=True,
router=True, dhcp=True)
super(BaseFloatingIPsTest, cls).setUpClass()
| {
"content_hash": "02b47b7dd90ee1af9270f09827d3dcff",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 69,
"avg_line_length": 34.09090909090909,
"alnum_prop": 0.672,
"repo_name": "vedujoshi/os_tempest",
"id": "fd76e62e3f321d17fd7e4f5f3303678a81705e96",
"size": "1011",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tempest/api/compute/floating_ips/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3096313"
},
{
"name": "Shell",
"bytes": "8664"
}
],
"symlink_target": ""
} |
__version__=''' $Id: signsandsymbols.py 3959 2012-09-27 14:39:39Z robin $ '''
__doc__="""This file is a collection of widgets to produce some common signs and symbols.
Widgets include:
- ETriangle (an equilateral triangle),
- RTriangle (a right angled triangle),
- Octagon,
- Crossbox,
- Tickbox,
- SmileyFace,
- StopSign,
- NoEntry,
- NotAllowed (the red roundel from 'no smoking' signs),
- NoSmoking,
- DangerSign (a black exclamation point in a yellow triangle),
- YesNo (returns a tickbox or a crossbox depending on a testvalue),
- FloppyDisk,
- ArrowOne, and
- ArrowTwo
"""
from reportlab.lib import colors
from reportlab.lib.validators import *
from reportlab.lib.attrmap import *
from reportlab.graphics import shapes
from reportlab.graphics.widgetbase import Widget
from reportlab.graphics import renderPDF
class _Symbol(Widget):
"""Abstract base widget
possible attributes:
'x', 'y', 'size', 'fillColor', 'strokeColor'
"""
_nodoc = 1
_attrMap = AttrMap(
x = AttrMapValue(isNumber,desc='symbol x coordinate'),
y = AttrMapValue(isNumber,desc='symbol y coordinate'),
dx = AttrMapValue(isNumber,desc='symbol x coordinate adjustment'),
dy = AttrMapValue(isNumber,desc='symbol x coordinate adjustment'),
size = AttrMapValue(isNumber),
fillColor = AttrMapValue(isColorOrNone),
strokeColor = AttrMapValue(isColorOrNone),
strokeWidth = AttrMapValue(isNumber),
)
def __init__(self):
assert self.__class__.__name__!='_Symbol', 'Abstract class _Symbol instantiated'
self.x = self.y = self.dx = self.dy = 0
self.size = 100
self.fillColor = colors.red
self.strokeColor = None
self.strokeWidth = 0.1
def demo(self):
D = shapes.Drawing(200, 100)
s = float(self.size)
ob = self.__class__()
ob.x=50
ob.y=0
ob.draw()
D.add(ob)
D.add(shapes.String(ob.x+(s/2),(ob.y-12),
ob.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=10))
return D
class ETriangle(_Symbol):
"""This draws an equilateral triangle."""
def __init__(self):
pass #AbstractSymbol
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# Triangle specific bits
ae = s*0.125 #(ae = 'an eighth')
triangle = shapes.Polygon(points = [
self.x, self.y,
self.x+s, self.y,
self.x+(s/2),self.y+s],
fillColor = self.fillColor,
strokeColor = self.strokeColor,
strokeWidth=s/50.)
g.add(triangle)
return g
class RTriangle(_Symbol):
"""This draws a right-angled triangle.
possible attributes:
'x', 'y', 'size', 'fillColor', 'strokeColor'
"""
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.fillColor = colors.green
self.strokeColor = None
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# Triangle specific bits
ae = s*0.125 #(ae = 'an eighth')
triangle = shapes.Polygon(points = [
self.x, self.y,
self.x+s, self.y,
self.x,self.y+s],
fillColor = self.fillColor,
strokeColor = self.strokeColor,
strokeWidth=s/50.)
g.add(triangle)
return g
class Octagon(_Symbol):
"""This widget draws an Octagon.
possible attributes:
'x', 'y', 'size', 'fillColor', 'strokeColor'
"""
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.fillColor = colors.yellow
self.strokeColor = None
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# Octagon specific bits
athird=s/3
octagon = shapes.Polygon(points=[self.x+athird, self.y,
self.x, self.y+athird,
self.x, self.y+(athird*2),
self.x+athird, self.y+s,
self.x+(athird*2), self.y+s,
self.x+s, self.y+(athird*2),
self.x+s, self.y+athird,
self.x+(athird*2), self.y],
strokeColor = self.strokeColor,
fillColor = self.fillColor,
strokeWidth=10)
g.add(octagon)
return g
class Crossbox(_Symbol):
"""This draws a black box with a red cross in it - a 'checkbox'.
possible attributes:
'x', 'y', 'size', 'crossColor', 'strokeColor', 'crosswidth'
"""
_attrMap = AttrMap(BASE=_Symbol,
crossColor = AttrMapValue(isColorOrNone),
crosswidth = AttrMapValue(isNumber),
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.fillColor = colors.white
self.crossColor = colors.red
self.strokeColor = colors.black
self.crosswidth = 10
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# crossbox specific bits
box = shapes.Rect(self.x+1, self.y+1, s-2, s-2,
fillColor = self.fillColor,
strokeColor = self.strokeColor,
strokeWidth=2)
g.add(box)
crossLine1 = shapes.Line(self.x+(s*0.15), self.y+(s*0.15), self.x+(s*0.85), self.y+(s*0.85),
fillColor = self.crossColor,
strokeColor = self.crossColor,
strokeWidth = self.crosswidth)
g.add(crossLine1)
crossLine2 = shapes.Line(self.x+(s*0.15), self.y+(s*0.85), self.x+(s*0.85) ,self.y+(s*0.15),
fillColor = self.crossColor,
strokeColor = self.crossColor,
strokeWidth = self.crosswidth)
g.add(crossLine2)
return g
class Tickbox(_Symbol):
"""This draws a black box with a red tick in it - another 'checkbox'.
possible attributes:
'x', 'y', 'size', 'tickColor', 'strokeColor', 'tickwidth'
"""
_attrMap = AttrMap(BASE=_Symbol,
tickColor = AttrMapValue(isColorOrNone),
tickwidth = AttrMapValue(isNumber),
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.tickColor = colors.red
self.strokeColor = colors.black
self.fillColor = colors.white
self.tickwidth = 10
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# tickbox specific bits
box = shapes.Rect(self.x+1, self.y+1, s-2, s-2,
fillColor = self.fillColor,
strokeColor = self.strokeColor,
strokeWidth=2)
g.add(box)
tickLine = shapes.PolyLine(points = [self.x+(s*0.15), self.y+(s*0.35), self.x+(s*0.35), self.y+(s*0.15),
self.x+(s*0.35), self.y+(s*0.15), self.x+(s*0.85) ,self.y+(s*0.85)],
fillColor = self.tickColor,
strokeColor = self.tickColor,
strokeWidth = self.tickwidth)
g.add(tickLine)
return g
class SmileyFace(_Symbol):
"""This draws a classic smiley face.
possible attributes:
'x', 'y', 'size', 'fillColor'
"""
def __init__(self):
_Symbol.__init__(self)
self.x = 0
self.y = 0
self.size = 100
self.fillColor = colors.yellow
self.strokeColor = colors.black
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# SmileyFace specific bits
g.add(shapes.Circle(cx=self.x+(s/2), cy=self.y+(s/2), r=s/2,
fillColor=self.fillColor, strokeColor=self.strokeColor,
strokeWidth=max(s/38.,self.strokeWidth)))
for i in (1,2):
g.add(shapes.Ellipse(self.x+(s/3)*i,self.y+(s/3)*2, s/30, s/10,
fillColor=self.strokeColor, strokeColor = self.strokeColor,
strokeWidth=max(s/38.,self.strokeWidth)))
# calculate a pointslist for the mouth
# THIS IS A HACK! - don't use if there is a 'shapes.Arc'
centerx=self.x+(s/2)
centery=self.y+(s/2)
radius=s/3
yradius = radius
xradius = radius
startangledegrees=200
endangledegrees=340
degreedelta = 1
pointslist = []
a = pointslist.append
from math import sin, cos, pi
degreestoradians = pi/180.0
radiansdelta = degreedelta*degreestoradians
startangle = startangledegrees*degreestoradians
endangle = endangledegrees*degreestoradians
while endangle<startangle:
endangle = endangle+2*pi
angle = startangle
while angle<endangle:
x = centerx + cos(angle)*radius
y = centery + sin(angle)*yradius
a(x); a(y)
angle = angle+radiansdelta
# make the mouth
smile = shapes.PolyLine(pointslist,
fillColor = self.strokeColor,
strokeColor = self.strokeColor,
strokeWidth = max(s/38.,self.strokeWidth))
g.add(smile)
return g
class StopSign(_Symbol):
"""This draws a (British) stop sign.
possible attributes:
'x', 'y', 'size'
"""
_attrMap = AttrMap(BASE=_Symbol,
stopColor = AttrMapValue(isColorOrNone,desc='color of the word stop'),
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.strokeColor = colors.black
self.fillColor = colors.orangered
self.stopColor = colors.ghostwhite
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# stop-sign specific bits
athird=s/3
outerOctagon = shapes.Polygon(points=[self.x+athird, self.y,
self.x, self.y+athird,
self.x, self.y+(athird*2),
self.x+athird, self.y+s,
self.x+(athird*2), self.y+s,
self.x+s, self.y+(athird*2),
self.x+s, self.y+athird,
self.x+(athird*2), self.y],
strokeColor = self.strokeColor,
fillColor = None,
strokeWidth=1)
g.add(outerOctagon)
innerOctagon = shapes.Polygon(points=[self.x+athird+(s/75), self.y+(s/75),
self.x+(s/75), self.y+athird+(s/75),
self.x+(s/75), self.y+(athird*2)-(s/75),
self.x+athird+(s/75), self.y+s-(s/75),
self.x+(athird*2)-(s/75), (self.y+s)-(s/75),
(self.x+s)-(s/75), self.y+(athird*2)-(s/75),
(self.x+s)-(s/75), self.y+athird+(s/75),
self.x+(athird*2)-(s/75), self.y+(s/75)],
strokeColor = None,
fillColor = self.fillColor,
strokeWidth=0)
g.add(innerOctagon)
if self.stopColor:
g.add(shapes.String(self.x+(s*0.5),self.y+(s*0.4),
'STOP', fillColor=self.stopColor, textAnchor='middle',
fontSize=s/3, fontName="Helvetica-Bold"))
return g
class NoEntry(_Symbol):
"""This draws a (British) No Entry sign - a red circle with a white line on it.
possible attributes:
'x', 'y', 'size'
"""
_attrMap = AttrMap(BASE=_Symbol,
innerBarColor = AttrMapValue(isColorOrNone,desc='color of the inner bar'),
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.strokeColor = colors.black
self.fillColor = colors.orangered
self.innerBarColor = colors.ghostwhite
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# no-entry-sign specific bits
if self.strokeColor:
g.add(shapes.Circle(cx = (self.x+(s/2)), cy = (self.y+(s/2)), r = s/2, fillColor = None, strokeColor = self.strokeColor, strokeWidth=1))
if self.fillColor:
g.add(shapes.Circle(cx = (self.x+(s/2)), cy =(self.y+(s/2)), r = ((s/2)-(s/50)), fillColor = self.fillColor, strokeColor = None, strokeWidth=0))
innerBarColor = self.innerBarColor
if innerBarColor:
g.add(shapes.Rect(self.x+(s*0.1), self.y+(s*0.4), width=s*0.8, height=s*0.2, fillColor = innerBarColor, strokeColor = innerBarColor, strokeLineCap = 1, strokeWidth = 0))
return g
class NotAllowed(_Symbol):
"""This draws a 'forbidden' roundel (as used in the no-smoking sign).
possible attributes:
'x', 'y', 'size'
"""
_attrMap = AttrMap(BASE=_Symbol,
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.strokeColor = colors.red
self.fillColor = colors.white
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
strokeColor = self.strokeColor
# not=allowed specific bits
outerCircle = shapes.Circle(cx = (self.x+(s/2)), cy = (self.y+(s/2)), r = (s/2)-(s/10), fillColor = self.fillColor, strokeColor = strokeColor, strokeWidth=s/10.)
g.add(outerCircle)
centerx=self.x+s
centery=self.y+(s/2)-(s/6)
radius=s-(s/6)
yradius = radius/2
xradius = radius/2
startangledegrees=100
endangledegrees=-80
degreedelta = 90
pointslist = []
a = pointslist.append
from math import sin, cos, pi
degreestoradians = pi/180.0
radiansdelta = degreedelta*degreestoradians
startangle = startangledegrees*degreestoradians
endangle = endangledegrees*degreestoradians
while endangle<startangle:
endangle = endangle+2*pi
angle = startangle
while angle<endangle:
x = centerx + cos(angle)*radius
y = centery + sin(angle)*yradius
a(x); a(y)
angle = angle+radiansdelta
crossbar = shapes.PolyLine(pointslist, fillColor = strokeColor, strokeColor = strokeColor, strokeWidth = s/10.)
g.add(crossbar)
return g
class NoSmoking(NotAllowed):
"""This draws a no-smoking sign.
possible attributes:
'x', 'y', 'size'
"""
def __init__(self):
NotAllowed.__init__(self)
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = NotAllowed.draw(self)
# no-smoking-sign specific bits
newx = self.x+(s/2)-(s/3.5)
newy = self.y+(s/2)-(s/32)
cigarrette1 = shapes.Rect(x = newx, y = newy, width = (s/2), height =(s/16),
fillColor = colors.ghostwhite, strokeColor = colors.gray, strokeWidth=0)
newx=newx+(s/2)+(s/64)
g.insert(-1,cigarrette1)
cigarrette2 = shapes.Rect(x = newx, y = newy, width = (s/80), height =(s/16),
fillColor = colors.orangered, strokeColor = None, strokeWidth=0)
newx= newx+(s/35)
g.insert(-1,cigarrette2)
cigarrette3 = shapes.Rect(x = newx, y = newy, width = (s/80), height =(s/16),
fillColor = colors.orangered, strokeColor = None, strokeWidth=0)
newx= newx+(s/35)
g.insert(-1,cigarrette3)
cigarrette4 = shapes.Rect(x = newx, y = newy, width = (s/80), height =(s/16),
fillColor = colors.orangered, strokeColor = None, strokeWidth=0)
newx= newx+(s/35)
g.insert(-1,cigarrette4)
return g
class DangerSign(_Symbol):
"""This draws a 'danger' sign: a yellow box with a black exclamation point.
possible attributes:
'x', 'y', 'size', 'strokeColor', 'fillColor', 'strokeWidth'
"""
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.strokeColor = colors.black
self.fillColor = colors.gold
self.strokeWidth = self.size*0.125
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
ew = self.strokeWidth
ae = s*0.125 #(ae = 'an eighth')
# danger sign specific bits
ew = self.strokeWidth
ae = s*0.125 #(ae = 'an eighth')
outerTriangle = shapes.Polygon(points = [
self.x, self.y,
self.x+s, self.y,
self.x+(s/2),self.y+s],
fillColor = None,
strokeColor = self.strokeColor,
strokeWidth=0)
g.add(outerTriangle)
innerTriangle = shapes.Polygon(points = [
self.x+(s/50), self.y+(s/75),
(self.x+s)-(s/50), self.y+(s/75),
self.x+(s/2),(self.y+s)-(s/50)],
fillColor = self.fillColor,
strokeColor = None,
strokeWidth=0)
g.add(innerTriangle)
exmark = shapes.Polygon(points=[
((self.x+s/2)-ew/2), self.y+ae*2.5,
((self.x+s/2)+ew/2), self.y+ae*2.5,
((self.x+s/2)+((ew/2))+(ew/6)), self.y+ae*5.5,
((self.x+s/2)-((ew/2))-(ew/6)), self.y+ae*5.5],
fillColor = self.strokeColor,
strokeColor = None)
g.add(exmark)
exdot = shapes.Polygon(points=[
((self.x+s/2)-ew/2), self.y+ae,
((self.x+s/2)+ew/2), self.y+ae,
((self.x+s/2)+ew/2), self.y+ae*2,
((self.x+s/2)-ew/2), self.y+ae*2],
fillColor = self.strokeColor,
strokeColor = None)
g.add(exdot)
return g
class YesNo(_Symbol):
"""This widget draw a tickbox or crossbox depending on 'testValue'.
If this widget is supplied with a 'True' or 1 as a value for
testValue, it will use the tickbox widget. Otherwise, it will
produce a crossbox.
possible attributes:
'x', 'y', 'size', 'tickcolor', 'crosscolor', 'testValue'
"""
_attrMap = AttrMap(BASE=_Symbol,
tickcolor = AttrMapValue(isColor),
crosscolor = AttrMapValue(isColor),
testValue = AttrMapValue(isBoolean),
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.tickcolor = colors.green
self.crosscolor = colors.red
self.testValue = 1
def draw(self):
if self.testValue:
yn=Tickbox()
yn.tickColor=self.tickcolor
else:
yn=Crossbox()
yn.crossColor=self.crosscolor
yn.x=self.x
yn.y=self.y
yn.size=self.size
yn.draw()
return yn
def demo(self):
D = shapes.Drawing(200, 100)
yn = YesNo()
yn.x = 15
yn.y = 25
yn.size = 70
yn.testValue = 0
yn.draw()
D.add(yn)
yn2 = YesNo()
yn2.x = 120
yn2.y = 25
yn2.size = 70
yn2.testValue = 1
yn2.draw()
D.add(yn2)
labelFontSize = 8
D.add(shapes.String(yn.x+(yn.size/2),(yn.y-(1.2*labelFontSize)),
'testValue=0', fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
D.add(shapes.String(yn2.x+(yn2.size/2),(yn2.y-(1.2*labelFontSize)),
'testValue=1', fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
labelFontSize = 10
D.add(shapes.String(yn.x+85,(yn.y-20),
self.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
return D
class FloppyDisk(_Symbol):
"""This widget draws an icon of a floppy disk.
possible attributes:
'x', 'y', 'size', 'diskcolor'
"""
_attrMap = AttrMap(BASE=_Symbol,
diskColor = AttrMapValue(isColor),
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.diskColor = colors.black
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# floppy disk specific bits
diskBody = shapes.Rect(x=self.x, y=self.y+(s/100), width=s, height=s-(s/100),
fillColor = self.diskColor,
strokeColor = None,
strokeWidth=0)
g.add(diskBody)
label = shapes.Rect(x=self.x+(s*0.1), y=(self.y+s)-(s*0.5), width=s*0.8, height=s*0.48,
fillColor = colors.whitesmoke,
strokeColor = None,
strokeWidth=0)
g.add(label)
labelsplash = shapes.Rect(x=self.x+(s*0.1), y=(self.y+s)-(s*0.1), width=s*0.8, height=s*0.08,
fillColor = colors.royalblue,
strokeColor = None,
strokeWidth=0)
g.add(labelsplash)
line1 = shapes.Line(x1=self.x+(s*0.15), y1=self.y+(0.6*s), x2=self.x+(s*0.85), y2=self.y+(0.6*s),
fillColor = colors.black,
strokeColor = colors.black,
strokeWidth=0)
g.add(line1)
line2 = shapes.Line(x1=self.x+(s*0.15), y1=self.y+(0.7*s), x2=self.x+(s*0.85), y2=self.y+(0.7*s),
fillColor = colors.black,
strokeColor = colors.black,
strokeWidth=0)
g.add(line2)
line3 = shapes.Line(x1=self.x+(s*0.15), y1=self.y+(0.8*s), x2=self.x+(s*0.85), y2=self.y+(0.8*s),
fillColor = colors.black,
strokeColor = colors.black,
strokeWidth=0)
g.add(line3)
metalcover = shapes.Rect(x=self.x+(s*0.2), y=(self.y), width=s*0.5, height=s*0.35,
fillColor = colors.silver,
strokeColor = None,
strokeWidth=0)
g.add(metalcover)
coverslot = shapes.Rect(x=self.x+(s*0.28), y=(self.y)+(s*0.035), width=s*0.12, height=s*0.28,
fillColor = self.diskColor,
strokeColor = None,
strokeWidth=0)
g.add(coverslot)
return g
class ArrowOne(_Symbol):
"""This widget draws an arrow (style one).
possible attributes:
'x', 'y', 'size', 'fillColor'
"""
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.fillColor = colors.red
self.strokeWidth = 0
self.strokeColor = None
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
x = self.x
y = self.y
s2 = s/2
s3 = s/3
s5 = s/5
g.add(shapes.Polygon(points = [
x,y+s3,
x,y+2*s3,
x+s2,y+2*s3,
x+s2,y+4*s5,
x+s,y+s2,
x+s2,y+s5,
x+s2,y+s3,
],
fillColor = self.fillColor,
strokeColor = self.strokeColor,
strokeWidth = self.strokeWidth,
)
)
return g
class ArrowTwo(ArrowOne):
"""This widget draws an arrow (style two).
possible attributes:
'x', 'y', 'size', 'fillColor'
"""
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.fillColor = colors.blue
self.strokeWidth = 0
self.strokeColor = None
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# arrow specific bits
x = self.x
y = self.y
s2 = s/2
s3 = s/3
s5 = s/5
s24 = s/24
g.add(shapes.Polygon(
points = [
x,y+11*s24,
x,y+13*s24,
x+18.75*s24, y+13*s24,
x+2*s3, y+2*s3,
x+s, y+s2,
x+2*s3, y+s3,
x+18.75*s24, y+11*s24,
],
fillColor = self.fillColor,
strokeColor = self.strokeColor,
strokeWidth = self.strokeWidth)
)
return g
def test():
"""This function produces a pdf with examples of all the signs and symbols from this file.
"""
labelFontSize = 10
D = shapes.Drawing(450,650)
cb = Crossbox()
cb.x = 20
cb.y = 530
D.add(cb)
D.add(shapes.String(cb.x+(cb.size/2),(cb.y-(1.2*labelFontSize)),
cb.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
tb = Tickbox()
tb.x = 170
tb.y = 530
D.add(tb)
D.add(shapes.String(tb.x+(tb.size/2),(tb.y-(1.2*labelFontSize)),
tb.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
yn = YesNo()
yn.x = 320
yn.y = 530
D.add(yn)
tempstring = yn.__class__.__name__ + '*'
D.add(shapes.String(yn.x+(tb.size/2),(yn.y-(1.2*labelFontSize)),
tempstring, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
D.add(shapes.String(130,6,
"(The 'YesNo' widget returns a tickbox if testvalue=1, and a crossbox if testvalue=0)", fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize*0.75))
ss = StopSign()
ss.x = 20
ss.y = 400
D.add(ss)
D.add(shapes.String(ss.x+(ss.size/2), ss.y-(1.2*labelFontSize),
ss.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
ne = NoEntry()
ne.x = 170
ne.y = 400
D.add(ne)
D.add(shapes.String(ne.x+(ne.size/2),(ne.y-(1.2*labelFontSize)),
ne.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
sf = SmileyFace()
sf.x = 320
sf.y = 400
D.add(sf)
D.add(shapes.String(sf.x+(sf.size/2),(sf.y-(1.2*labelFontSize)),
sf.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
ds = DangerSign()
ds.x = 20
ds.y = 270
D.add(ds)
D.add(shapes.String(ds.x+(ds.size/2),(ds.y-(1.2*labelFontSize)),
ds.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
na = NotAllowed()
na.x = 170
na.y = 270
D.add(na)
D.add(shapes.String(na.x+(na.size/2),(na.y-(1.2*labelFontSize)),
na.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
ns = NoSmoking()
ns.x = 320
ns.y = 270
D.add(ns)
D.add(shapes.String(ns.x+(ns.size/2),(ns.y-(1.2*labelFontSize)),
ns.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
a1 = ArrowOne()
a1.x = 20
a1.y = 140
D.add(a1)
D.add(shapes.String(a1.x+(a1.size/2),(a1.y-(1.2*labelFontSize)),
a1.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
a2 = ArrowTwo()
a2.x = 170
a2.y = 140
D.add(a2)
D.add(shapes.String(a2.x+(a2.size/2),(a2.y-(1.2*labelFontSize)),
a2.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
fd = FloppyDisk()
fd.x = 320
fd.y = 140
D.add(fd)
D.add(shapes.String(fd.x+(fd.size/2),(fd.y-(1.2*labelFontSize)),
fd.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
renderPDF.drawToFile(D, 'signsandsymbols.pdf', 'signsandsymbols.py')
print 'wrote file: signsandsymbols.pdf'
if __name__=='__main__':
test()
| {
"content_hash": "08b908b0cacbd3332eb8f1703c6f37d0",
"timestamp": "",
"source": "github",
"line_count": 927,
"max_line_length": 181,
"avg_line_length": 33.33980582524272,
"alnum_prop": 0.49330227140361094,
"repo_name": "nickpack/reportlab",
"id": "34e92bf87f8965145310a30857aef733474b0740",
"size": "31213",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/reportlab/graphics/widgets/signsandsymbols.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "782870"
},
{
"name": "C++",
"bytes": "1390"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "Python",
"bytes": "3275159"
},
{
"name": "Shell",
"bytes": "1736"
}
],
"symlink_target": ""
} |
from fabric.api import env, require, task
from denim import paths, system, utils
from denim.constants import DeployUser
def manage(cmd, args=None, revision=None, noinput=True, use_sudo=True, user=DeployUser):
"""
Run a django manage.py command.
:param cmd: the command to run.
:param args: arguments to append.
:param revision: version name that is being worked on.
:param noinput: Do not ask for input.
"""
args = args or []
if noinput:
args.insert(0, '--noinput')
args.insert(0, cmd)
with paths.cd_package(revision):
utils.run_as('python%s manage.py %s' % (env.get('python_version', 2), ' '.join(args)), use_sudo, user)
def test_deploy(revision=None):
"""
Call manage.py validate to ensure deployment is working correctly.
"""
manage('validate', revision=revision, noinput=False)
def collectstatic(revision=None, user=None):
"""
Collect static files.
"""
manage('collectstatic',
revision=revision, user=user)
def syncdb(revision=None, **kwargs):
"""
Run a database sync
"""
manage('syncdb', revision=revision, **kwargs)
def createsuperuser(username='', revision=None, **kwargs):
"""
Run a database sync and migrate operation.
"""
manage('createsuperuser', [username], revision=revision, noinput=False, **kwargs)
def link_settings(revision=None, user=None, **kwargs):
"""
Put correct settings in place.
"""
require('deploy_env')
system.create_symlink(
paths.package_path(revision, '%(package_name)s/deployment/settings_%(deploy_env)s.py' % env),
paths.package_path(revision, '%(package_name)s/local_settings.py' % env),
user=user, **kwargs
)
| {
"content_hash": "b4eb408e5ede9ab0192dcd47003d9c85",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 110,
"avg_line_length": 25.63235294117647,
"alnum_prop": 0.6483075157773953,
"repo_name": "timsavage/denim",
"id": "7d0504d807467a200f0242ae8dbb9fd7d37f8bba",
"size": "1767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "denim/django/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "89692"
}
],
"symlink_target": ""
} |
from a10sdk.common.A10BaseClass import A10BaseClass
class Connect(A10BaseClass):
"""Class Description::
Connect to license manager to activate.
Class connect supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param connect: {"description": "Connect to license manager to activate", "partition-visibility": "shared", "default": 0, "type": "number", "format": "flag", "optional": true}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/license-manager/connect`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "connect"
self.a10_url="/axapi/v3/license-manager/connect"
self.DeviceProxy = ""
self.connect = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| {
"content_hash": "f71b3e9f43861af7e139eefe2c8d0c7f",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 179,
"avg_line_length": 33.24324324324324,
"alnum_prop": 0.6357723577235772,
"repo_name": "a10networks/a10sdk-python",
"id": "204902fc337380e6073bced5a1b5aacd6f290c7a",
"size": "1230",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/A10_license/license_manager_connect.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956372"
}
],
"symlink_target": ""
} |
import json
import datetime
from httpretty import HTTPretty
from social.p3 import urlencode
from social.actions import do_disconnect
from social.tests.models import User
from social.tests.backends.oauth import OAuth1Test, OAuth2Test
from social.tests.backends.open_id import OpenIdTest
class GoogleOAuth2Test(OAuth2Test):
backend_path = 'social.backends.google.GoogleOAuth2'
user_data_url = 'https://www.googleapis.com/plus/v1/people/me'
expected_username = 'foo'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
user_data_body = json.dumps({
'aboutMe': 'About me text',
'cover': {
'coverInfo': {
'leftImageOffset': 0,
'topImageOffset': 0
},
'coverPhoto': {
'height': 629,
'url': 'https://lh5.googleusercontent.com/-ui-GqpNh5Ms/'
'AAAAAAAAAAI/AAAAAAAAAZw/a7puhHMO_fg/photo.jpg',
'width': 940
},
'layout': 'banner'
},
'displayName': 'Foo Bar',
'emails': [{
'type': 'account',
'value': 'foo@bar.com'
}],
'etag': '"e-tag string"',
'gender': 'male',
'id': '101010101010101010101',
'image': {
'url': 'https://lh5.googleusercontent.com/-ui-GqpNh5Ms/'
'AAAAAAAAAAI/AAAAAAAAAZw/a7puhHMO_fg/photo.jpg',
},
'isPlusUser': True,
'kind': 'plus#person',
'language': 'en',
'name': {
'familyName': 'Bar',
'givenName': 'Foo'
},
'objectType': 'person',
'occupation': 'Software developer',
'organizations': [{
'name': 'Org name',
'primary': True,
'type': 'school'
}],
'placesLived': [{
'primary': True,
'value': 'Anyplace'
}],
'url': 'https://plus.google.com/101010101010101010101',
'urls': [{
'label': 'http://foobar.com',
'type': 'otherProfile',
'value': 'http://foobar.com',
}],
'verified': False
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
def test_with_unique_user_id(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GOOGLE_OAUTH2_USE_UNIQUE_USER_ID': True,
})
self.do_login()
class GoogleOAuth2DeprecatedAPITest(GoogleOAuth2Test):
user_data_url = 'https://www.googleapis.com/oauth2/v1/userinfo'
user_data_body = json.dumps({
'family_name': 'Bar',
'name': 'Foo Bar',
'picture': 'https://lh5.googleusercontent.com/-ui-GqpNh5Ms/'
'AAAAAAAAAAI/AAAAAAAAAZw/a7puhHMO_fg/photo.jpg',
'locale': 'en',
'gender': 'male',
'email': 'foo@bar.com',
'birthday': '0000-01-22',
'link': 'https://plus.google.com/101010101010101010101',
'given_name': 'Foo',
'id': '101010101010101010101',
'verified_email': True
})
def test_login(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GOOGLE_OAUTH2_USE_DEPRECATED_API': True
})
self.do_login()
def test_partial_pipeline(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GOOGLE_OAUTH2_USE_DEPRECATED_API': True
})
self.do_partial_pipeline()
def test_with_unique_user_id(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GOOGLE_OAUTH2_USE_UNIQUE_USER_ID': True,
'SOCIAL_AUTH_GOOGLE_OAUTH2_USE_DEPRECATED_API': True
})
self.do_login()
class GoogleOAuth1Test(OAuth1Test):
backend_path = 'social.backends.google.GoogleOAuth'
user_data_url = 'https://www.googleapis.com/userinfo/email'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
user_data_body = urlencode({
'email': 'foobar@gmail.com',
'isVerified': 'true',
'id': '101010101010101010101'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
def test_with_unique_user_id(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GOOGLE_OAUTH_USE_UNIQUE_USER_ID': True
})
self.do_login()
def test_with_anonymous_key_and_secret(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GOOGLE_OAUTH_KEY': None,
'SOCIAL_AUTH_GOOGLE_OAUTH_SECRET': None
})
self.do_login()
JANRAIN_NONCE = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
class GoogleOpenIdTest(OpenIdTest):
backend_path = 'social.backends.google.GoogleOpenId'
expected_username = 'FooBar'
discovery_body = ''.join([
'<?xml version="1.0" encoding="UTF-8"?>',
'<xrds:XRDS xmlns:xrds="xri://$xrds" xmlns="xri://$xrd*($v*2.0)">',
'<XRD>',
'<Service priority="0">',
'<Type>http://specs.openid.net/auth/2.0/signon</Type>',
'<Type>http://openid.net/srv/ax/1.0</Type>',
'<Type>'
'http://specs.openid.net/extensions/ui/1.0/mode/popup'
'</Type>',
'<Type>http://specs.openid.net/extensions/ui/1.0/icon</Type>',
'<Type>http://specs.openid.net/extensions/pape/1.0</Type>',
'<URI>https://www.google.com/accounts/o8/ud</URI>',
'</Service>',
'<Service priority="10">',
'<Type>http://specs.openid.net/auth/2.0/signon</Type>',
'<Type>http://openid.net/srv/ax/1.0</Type>',
'<Type>'
'http://specs.openid.net/extensions/ui/1.0/mode/popup'
'</Type>',
'<Type>http://specs.openid.net/extensions/ui/1.0/icon</Type>',
'<Type>http://specs.openid.net/extensions/pape/1.0</Type>',
'<URI>https://www.google.com/accounts/o8/ud?source=mail</URI>',
'</Service>',
'<Service priority="10">',
'<Type>http://specs.openid.net/auth/2.0/signon</Type>',
'<Type>http://openid.net/srv/ax/1.0</Type>',
'<Type>'
'http://specs.openid.net/extensions/ui/1.0/mode/popup'
'</Type>',
'<Type>http://specs.openid.net/extensions/ui/1.0/icon</Type>',
'<Type>http://specs.openid.net/extensions/pape/1.0</Type>',
'<URI>'
'https://www.google.com/accounts/o8/ud?source=gmail.com'
'</URI>',
'</Service>',
'<Service priority="10">',
'<Type>http://specs.openid.net/auth/2.0/signon</Type>',
'<Type>http://openid.net/srv/ax/1.0</Type>',
'<Type>'
'http://specs.openid.net/extensions/ui/1.0/mode/popup'
'</Type>',
'<Type>http://specs.openid.net/extensions/ui/1.0/icon</Type>',
'<Type>http://specs.openid.net/extensions/pape/1.0</Type>',
'<URI>'
'https://www.google.com/accounts/o8/ud?source=googlemail.com'
'</URI>',
'</Service>',
'<Service priority="10">',
'<Type>http://specs.openid.net/auth/2.0/signon</Type>',
'<Type>http://openid.net/srv/ax/1.0</Type>',
'<Type>'
'http://specs.openid.net/extensions/ui/1.0/mode/popup'
'</Type>',
'<Type>http://specs.openid.net/extensions/ui/1.0/icon</Type>',
'<Type>http://specs.openid.net/extensions/pape/1.0</Type>',
'<URI>https://www.google.com/accounts/o8/ud?source=profiles</URI>',
'</Service>',
'</XRD>',
'</xrds:XRDS>'
])
server_response = urlencode({
'janrain_nonce': JANRAIN_NONCE,
'openid.assoc_handle': 'assoc-handle',
'openid.claimed_id': 'https://www.google.com/accounts/o8/id?'
'id=some-google-id',
'openid.ext1.mode': 'fetch_response',
'openid.ext1.type.email': 'http://axschema.org/contact/email',
'openid.ext1.type.first_name': 'http://axschema.org/namePerson/first',
'openid.ext1.type.last_name': 'http://axschema.org/namePerson/last',
'openid.ext1.type.old_email': 'http://schema.openid.net/contact/email',
'openid.ext1.value.email': 'foo@bar.com',
'openid.ext1.value.first_name': 'Foo',
'openid.ext1.value.last_name': 'Bar',
'openid.ext1.value.old_email': 'foo@bar.com',
'openid.identity': 'https://www.google.com/accounts/o8/id?'
'id=some-google-id',
'openid.mode': 'id_res',
'openid.ns': 'http://specs.openid.net/auth/2.0',
'openid.ns.ext1': 'http://openid.net/srv/ax/1.0',
'openid.op_endpoint': 'https://www.google.com/accounts/o8/ud',
'openid.response_nonce': JANRAIN_NONCE + 'by95cT34vX7p9g',
'openid.return_to': 'http://myapp.com/complete/google/?'
'janrain_nonce=' + JANRAIN_NONCE,
'openid.sig': 'brT2kmu3eCzb1gQ1pbaXdnWioVM=',
'openid.signed': 'op_endpoint,claimed_id,identity,return_to,'
'response_nonce,assoc_handle,ns.ext1,ext1.mode,'
'ext1.type.old_email,ext1.value.old_email,'
'ext1.type.first_name,ext1.value.first_name,'
'ext1.type.last_name,ext1.value.last_name,'
'ext1.type.email,ext1.value.email'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
class GoogleRevokeTokenTest(GoogleOAuth2Test):
def test_revoke_token(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GOOGLE_OAUTH2_REVOKE_TOKENS_ON_DISCONNECT': True
})
self.do_login()
user = User.get(self.expected_username)
user.password = 'password'
backend = self.backend
HTTPretty.register_uri(self._method(backend.REVOKE_TOKEN_METHOD),
backend.REVOKE_TOKEN_URL,
status=200)
do_disconnect(self.strategy, user)
| {
"content_hash": "0a577df17ff2f5bb8d2b6b00c9186983",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 79,
"avg_line_length": 36.975265017667844,
"alnum_prop": 0.5449159021406728,
"repo_name": "duoduo369/python-social-auth",
"id": "bef2d088bb38323947b35ad90a2d45f604108d13",
"size": "10464",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "social/tests/backends/test_google.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "54"
},
{
"name": "Python",
"bytes": "562718"
},
{
"name": "Shell",
"bytes": "67"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 1bd8afe10204
Revises: 10d978d3a8e
Create Date: 2013-09-26 21:32:34.543861
"""
# revision identifiers, used by Alembic.
revision = '1bd8afe10204'
down_revision = '10d978d3a8e'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table(u'roles_users')
op.drop_table(u'roles')
op.drop_column('users', u'last_login_at')
op.drop_column('users', u'login_count')
op.drop_column('users', u'current_login_at')
op.drop_column('users', u'current_login_ip')
op.drop_column('users', u'last_login_ip')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column(u'last_login_ip', mysql.VARCHAR(length=100), nullable=True))
op.add_column('users', sa.Column(u'current_login_ip', mysql.VARCHAR(length=100), nullable=True))
op.add_column('users', sa.Column(u'current_login_at', sa.DATETIME(), nullable=True))
op.add_column('users', sa.Column(u'login_count', mysql.INTEGER(display_width=11), nullable=True))
op.add_column('users', sa.Column(u'last_login_at', sa.DATETIME(), nullable=True))
op.create_table(u'roles',
sa.Column(u'id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column(u'name', mysql.VARCHAR(length=80), nullable=True),
sa.Column(u'description', mysql.VARCHAR(length=255), nullable=True),
sa.PrimaryKeyConstraint(u'id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table(u'roles_users',
sa.Column(u'user_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column(u'role_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['role_id'], [u'roles.id'], name=u'roles_users_ibfk_1'),
sa.ForeignKeyConstraint(['user_id'], [u'users.id'], name=u'roles_users_ibfk_2'),
sa.PrimaryKeyConstraint(),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
### end Alembic commands ###
| {
"content_hash": "c12962d9ac4690a5b396f32e34e5daa2",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 101,
"avg_line_length": 40.283018867924525,
"alnum_prop": 0.688056206088993,
"repo_name": "dedalusj/PaperChase",
"id": "11521ede36563ad718c9c2f56104ec359d9d86de",
"size": "2135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/alembic/versions/1bd8afe10204_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24393"
},
{
"name": "JavaScript",
"bytes": "54250"
},
{
"name": "Python",
"bytes": "83893"
}
],
"symlink_target": ""
} |
"""
Description: Singleton implementation using decorators
@author: Paul Bodean
@date: 23.01.2018
"""
from selenium import webdriver
from singleton_decorator import singleton
def my_singleton(*args):
"""
:param cls: class to be changed as a singleton
:return: instance of the singleton
"""
instances = dict()
def get_instance():
if args[0] not in instances:
instances[args[0]] = args[0]()
# else: #uncomment this part in order to raise a warning related to instantiation
# raise UserWarning("An instantiation already exists!")
return instances[args[0]]
return get_instance
@my_singleton
class MyDriver:
@staticmethod
def get_driver():
return webdriver.Chrome()
@singleton
class Driver:
@staticmethod
def get_driver():
return webdriver.Chrome()
| {
"content_hash": "617a98b3a6d509e6294161a9339f2f2d",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 89,
"avg_line_length": 21.048780487804876,
"alnum_prop": 0.660486674391657,
"repo_name": "paulbodean88/automation-design-patterns",
"id": "232024fdf36e7f84ec08131bf350a384f1211dfb",
"size": "863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/singleton/singleton_decor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40634"
}
],
"symlink_target": ""
} |
import sys
import re
import requests
from cachecontrol import CacheControl
from bs4 import BeautifulSoup
session = requests.session()
cached_session = CacheControl(session)
# URL for the chatbot
URL = "https://kakko.pandorabots.com/pandora/talk?botid=f6a012073e345a08&skin=chat"
# Regex pattern to get the appropriate data
PATTERN = re.compile("</b>((.|\n)*?)<br>")
def ask_mitsuku(message):
# Payload with message to POST
payload = {
'message': message
}
# Make POST request
r = cached_session.post(URL, data=payload)
# Parse data for Mitsuku's response
soup = BeautifulSoup(r.content, 'html.parser')
content = str(soup.p)
pat = re.findall(PATTERN, content)
# Return the appropriate response
return pat[1][0]
def main():
# Check that we have the right number of arguments
if (len(sys.argv) != 2):
print 'Incorrect number of arguments; please pass in only one string that contains the query'
exit()
print ask_mitsuku(sys.argv[1])
# Execute main
if __name__ == '__main__':
main()
| {
"content_hash": "f6633665c3e0becd92f2f577f1a38e7c",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 95,
"avg_line_length": 23.204545454545453,
"alnum_prop": 0.7110675808031341,
"repo_name": "christopher18/Celsearch",
"id": "a7c2bdcce8cfc4a38b9ae2f4f67fba1ba6854ef6",
"size": "1098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/celSearch/api/scripts/query_mitsuku.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "13117"
},
{
"name": "JavaScript",
"bytes": "3015"
},
{
"name": "Python",
"bytes": "1756"
}
],
"symlink_target": ""
} |
from flask import Flask
from redis import Redis
import os
app = Flask(__name__)
redis = Redis(host="redis", port=6379)
@app.route('/')
def hello():
redis.incr('hits')
return 'Hello Docker Book reader! I have been seen {0} times'.format(redis.get('hits'))
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
| {
"content_hash": "d832ada702995aa9f3b440370c585e21",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 91,
"avg_line_length": 22.266666666666666,
"alnum_prop": 0.6377245508982036,
"repo_name": "abelwuxin/docker-study",
"id": "6dd62973dc3827d4660181d79dd43f4909a27dbd",
"size": "334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compose/composeapp/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1259"
},
{
"name": "JavaScript",
"bytes": "1031"
},
{
"name": "Nginx",
"bytes": "415"
},
{
"name": "Ruby",
"bytes": "528"
}
],
"symlink_target": ""
} |
import pytest
from matchpy.expressions.constraints import CustomConstraint
from matchpy.expressions.expressions import Symbol, Pattern, Operation, Arity, Wildcard
from matchpy.matching.many_to_one import ManyToOneMatcher
from .common import *
from .utils import MockConstraint
def test_add_duplicate_pattern():
pattern = Pattern(f(a))
matcher = ManyToOneMatcher()
matcher.add(pattern)
matcher.add(pattern)
assert len(matcher.patterns) == 1
def test_add_duplicate_pattern_with_different_constraint():
pattern1 = Pattern(f(a))
pattern2 = Pattern(f(a), MockConstraint(False))
matcher = ManyToOneMatcher()
matcher.add(pattern1)
matcher.add(pattern2)
assert len(matcher.patterns) == 2
def test_different_constraints():
c1 = CustomConstraint(lambda x: len(str(x)) > 1)
c2 = CustomConstraint(lambda x: len(str(x)) == 1)
pattern1 = Pattern(f(x_), c1)
pattern2 = Pattern(f(x_), c2)
pattern3 = Pattern(f(x_, b), c1)
pattern4 = Pattern(f(x_, b), c2)
matcher = ManyToOneMatcher(pattern1, pattern2, pattern3, pattern4)
subject = f(a)
results = list(matcher.match(subject))
assert len(results) == 1
assert results[0][0] == pattern2
assert results[0][1] == {'x': a}
subject = f(Symbol('longer'), b)
results = sorted(matcher.match(subject))
assert len(results) == 1
assert results[0][0] == pattern3
assert results[0][1] == {'x': Symbol('longer')}
def test_different_constraints_with_match_on_operation():
c1 = CustomConstraint(lambda x: len(str(x)) > 1)
c2 = CustomConstraint(lambda x: len(str(x)) == 1)
pattern1 = Pattern(f(x_), c1)
pattern2 = Pattern(f(x_), c2)
pattern3 = Pattern(f(x_, b), c1)
pattern4 = Pattern(f(x_, b), c2)
matcher = ManyToOneMatcher(pattern1, pattern2, pattern3, pattern4)
subject = f(a)
results = list(matcher.match(subject))
assert len(results) == 1
assert results[0][0] == pattern2
assert results[0][1] == {'x': a}
subject = f(Symbol('longer'), b)
results = sorted(matcher.match(subject))
assert len(results) == 1
assert results[0][0] == pattern3
assert results[0][1] == {'x': Symbol('longer')}
def test_different_constraints_no_match_on_operation():
c1 = CustomConstraint(lambda x: x == a)
c2 = CustomConstraint(lambda x: x == b)
pattern1 = Pattern(f(x_), c1)
pattern2 = Pattern(f(x_), c2)
matcher = ManyToOneMatcher(pattern1, pattern2)
subject = f(c)
results = list(matcher.match(subject))
assert len(results) == 0
def test_different_constraints_on_commutative_operation():
c1 = CustomConstraint(lambda x: len(str(x)) > 1)
c2 = CustomConstraint(lambda x: len(str(x)) == 1)
pattern1 = Pattern(f_c(x_), c1)
pattern2 = Pattern(f_c(x_), c2)
pattern3 = Pattern(f_c(x_, b), c1)
pattern4 = Pattern(f_c(x_, b), c2)
matcher = ManyToOneMatcher(pattern1, pattern2, pattern3, pattern4)
subject = f_c(a)
results = list(matcher.match(subject))
assert len(results) == 1
assert results[0][0] == pattern2
assert results[0][1] == {'x': a}
subject = f_c(Symbol('longer'), b)
results = sorted(matcher.match(subject))
assert len(results) == 1
assert results[0][0] == pattern3
assert results[0][1] == {'x': Symbol('longer')}
subject = f_c(a, b)
results = list(matcher.match(subject))
assert len(results) == 1
assert results[0][0] == pattern4
assert results[0][1] == {'x': a}
@pytest.mark.parametrize('c1', [True, False])
@pytest.mark.parametrize('c2', [True, False])
def test_different_pattern_same_constraint(c1, c2):
constr1 = CustomConstraint(lambda x: c1)
constr2 = CustomConstraint(lambda x: c2)
constr3 = CustomConstraint(lambda x: True)
patterns = [
Pattern(f2(x_, a), constr3),
Pattern(f(a, a, x_), constr3),
Pattern(f(a, x_), constr1),
Pattern(f(x_, a), constr2),
Pattern(f(a, x_, b), constr1),
Pattern(f(x_, a, b), constr1),
]
subject = f(a, a)
matcher = ManyToOneMatcher(*patterns)
results = list(matcher.match(subject))
assert len(results) == int(c1) + int(c2)
def test_same_commutative_but_different_pattern():
pattern1 = Pattern(f(f_c(x_), a))
pattern2 = Pattern(f(f_c(x_), b))
matcher = ManyToOneMatcher(pattern1, pattern2)
subject = f(f_c(a), a)
result = list(matcher.match(subject))
assert result == [(pattern1, {'x': a})]
subject = f(f_c(a), b)
result = list(matcher.match(subject))
assert result == [(pattern2, {'x': a})]
def test_grouped():
pattern1 = Pattern(a, MockConstraint(True))
pattern2 = Pattern(a, MockConstraint(True))
pattern3 = Pattern(x_, MockConstraint(True))
matcher = ManyToOneMatcher(pattern1, pattern2, pattern3)
result = [[p for p, _ in ps] for ps in matcher.match(a).grouped()]
assert len(result) == 2
for res in result:
if len(res) == 2:
assert pattern1 in res
assert pattern2 in res
elif len(res) == 1:
assert pattern3 in res
else:
assert False, "Wrong number of grouped matches"
def test_same_pattern_different_label():
pattern = Pattern(a)
matcher = ManyToOneMatcher()
matcher.add(pattern, 42)
matcher.add(pattern, 23)
result = sorted((l, sorted(map(tuple, s.items()))) for l, s in matcher.match(a))
assert result == [(23, []), (42, [])]
def test_different_pattern_same_label():
matcher = ManyToOneMatcher()
matcher.add(Pattern(a), 42)
matcher.add(Pattern(x_), 42)
result = sorted((l, sorted(map(tuple, s.items()))) for l, s in matcher.match(a))
assert result == [(42, []), (42, [('x', a)])]
def test_different_pattern_different_label():
matcher = ManyToOneMatcher()
matcher.add(Pattern(a), 42)
matcher.add(Pattern(x_), 23)
result = sorted((l, sorted(map(tuple, s.items()))) for l, s in matcher.match(a))
assert result == [(23, [('x', a)]), (42, [])]
def test_one_identity_optional_commutativity():
Int = Operation.new('Int', Arity.binary)
Add = Operation.new('+', Arity.variadic, 'Add', infix=True, associative=True, commutative=True, one_identity=True)
Mul = Operation.new('*', Arity.variadic, 'Mul', infix=True, associative=True, commutative=True, one_identity=True)
Pow = Operation.new('^', Arity.binary, 'Pow', infix=True)
class Integer(Symbol):
def __init__(self, value):
super().__init__(str(value))
i0 = Integer(0)
i1 = Integer(1)
i2 = Integer(2)
x_, m_, a_ = map(Wildcard.dot, 'xma')
x, m = map(Symbol, 'xm')
a0_ = Wildcard.optional('a', i0)
b1_ = Wildcard.optional('b', i1)
c0_ = Wildcard.optional('c', i0)
d1_ = Wildcard.optional('d', i1)
m1_ = Wildcard.optional('m', i1)
n1_ = Wildcard.optional('n', i1)
pattern22 = Pattern(Int(Mul(Pow(Add(a0_, Mul(b1_, x_)), m1_), Pow(Add(c0_, Mul(d1_, x_)), n1_)), x_))
pattern23 = Pattern(Int(Mul(Pow(Add(a_, Mul(b1_, x_)), m1_), Pow(Add(c0_, Mul(d1_, x_)), n1_)), x_))
matcher = ManyToOneMatcher()
matcher.add(pattern22, 22)
matcher.add(pattern23, 23)
subject = Int(Mul(Pow(Add(Mul(b, x), a), i2), Pow(x, i2)), x)
result = sorted((l, sorted(map(tuple, s.items()))) for l, s in matcher.match(subject))
assert result == [
(22, [('a', i0), ('b', i1), ('c', a), ('d', b), ('m', i2), ('n', i2), ('x', x)]),
(22, [('a', a), ('b', b), ('c', i0), ('d', i1), ('m', i2), ('n', i2), ('x', x)]),
(23, [('a', a), ('b', b), ('c', i0), ('d', i1), ('m', i2), ('n', i2), ('x', x)]),
]
from .test_matching import PARAM_MATCHES, PARAM_PATTERNS
@pytest.mark.parametrize('subject, patterns', PARAM_PATTERNS.items())
def test_many_to_one(subject, patterns):
patterns = [Pattern(p) for p in patterns]
matcher = ManyToOneMatcher(*patterns)
matches = list(matcher.match(subject))
for pattern in patterns:
expected_matches = PARAM_MATCHES[subject, pattern.expression]
for expected_match in expected_matches:
assert (pattern, expected_match) in matches, "Subject {!s} and pattern {!s} did not yield the match {!s} but were supposed to".format(
subject, pattern, expected_match
)
while (pattern, expected_match) in matches:
matches.remove((pattern, expected_match))
assert matches == [], "Subject {!s} and pattern {!s} yielded unexpected matches".format(
subject, pattern
)
| {
"content_hash": "65b97253013048cda98c7a20898db738",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 146,
"avg_line_length": 32.71647509578544,
"alnum_prop": 0.6124838974118749,
"repo_name": "wheerd/patternmatcher",
"id": "eca1747eceb2bc8fd4ea3fe71dd552bffa48d495",
"size": "8563",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_matching_many_to_one.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "728"
},
{
"name": "Makefile",
"bytes": "428"
},
{
"name": "Python",
"bytes": "318242"
}
],
"symlink_target": ""
} |
from RGT.XML.SVG.baseContainerNode import BaseContainerNode
from RGT.XML.SVG.Attribs.conditionalProcessingAttributes import ConditionalProcessingAttributes
from RGT.XML.SVG.Attribs.graphicalEventAttributes import GraphicalEventAttributes
from types import StringType
from RGT.XML.SVG.basicSvgNode import BasicSvgNode
class SwitchNode(BaseContainerNode, ConditionalProcessingAttributes, GraphicalEventAttributes):
svgNodeType = BasicSvgNode.SVG_SWITCH_NODE
ATTRIBUTE_EXTERNAL_RESOURCES_REQUIRED = 'externalResourcesRequired'
ATTRIBUTE_TRANSFORM = 'transform'
def __init__(self, ownerDoc):
BaseContainerNode.__init__(self, ownerDoc, 'switch')
ConditionalProcessingAttributes.__init__(self)
GraphicalEventAttributes.__init__(self)
#add groups
self._allowedSvgChildNodes.update(self.SVG_GROUP_ANIMATION_ELEMENTS, self.SVG_GROUP_DESCRIPTIVE_ELEMENTS,
self.SVG_GROUP_SHAPE_ELEMENTS)
#add indivudual elements
self._allowedSvgChildNodes.update(
{self.SVG_A_NODE, self.SVG_FOREIGN_OBJECT_NODE, self.SVG_G_NODE, self.SVG_IMAGE_NODE,
self.SVG_SVG_NODE, self.SVG_SWITCH_NODE, self.SVG_TEXT_NODE, self.SVG_USE_NODE})
def setExternalResourcesRequired(self, data):
allowedValues = ['true', 'false']
if data is not None:
if data not in allowedValues:
values = ''
for value in allowedValues:
values += value + ', '
values = values[0: len(values) - 2]
raise ValueError('Value not allowed, only ' + values + 'are allowed')
else:
self._setNodeAttribute(self.ATTRIBUTE_EXTERNAL_RESOURCES_REQUIRED, data)
def setTransform(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_TRANSFORM, data)
def getExternalResourcesRequired(self):
node = self._getNodeAttribute(self.ATTRIBUTE_EXTERNAL_RESOURCES_REQUIRED)
if node is not None:
return node.nodeValue
return None
def getTransform(self):
node = self._getNodeAttribute(self.ATTRIBUTE_TRANSFORM)
if node is not None:
return node.nodeValue
return None | {
"content_hash": "012ec8393f7e136f59b7abaf58be84f9",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 113,
"avg_line_length": 44,
"alnum_prop": 0.6483471074380165,
"repo_name": "danrg/RGT-tool",
"id": "0c6d07464721a9d01254a0edc513d2bf89d7aa88",
"size": "2420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/RGT/XML/SVG/switchNode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "83200"
},
{
"name": "HTML",
"bytes": "93970"
},
{
"name": "JavaScript",
"bytes": "111380"
},
{
"name": "Python",
"bytes": "788710"
},
{
"name": "SQLPL",
"bytes": "722"
}
],
"symlink_target": ""
} |
import sys
import logging
import warnings
from datetime import timedelta
from celery import routes
from celery.loaders import load_settings
DEFAULT_PROCESS_LOG_FMT = """
[%(asctime)s: %(levelname)s/%(processName)s] %(message)s
""".strip()
DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s] %(message)s'
DEFAULT_TASK_LOG_FMT = " ".join("""
[%(asctime)s: %(levelname)s/%(processName)s]
[%(task_name)s(%(task_id)s)] %(message)s
""".strip().split())
LOG_LEVELS = dict(logging._levelNames)
LOG_LEVELS["FATAL"] = logging.FATAL
LOG_LEVELS[logging.FATAL] = "FATAL"
settings = load_settings()
_DEFAULTS = {
"CELERY_RESULT_BACKEND": "database",
"CELERY_ALWAYS_EAGER": False,
"CELERY_EAGER_PROPAGATES_EXCEPTIONS": False,
"CELERY_TASK_RESULT_EXPIRES": timedelta(days=1),
"CELERY_SEND_EVENTS": False,
"CELERY_IGNORE_RESULT": False,
"CELERY_STORE_ERRORS_EVEN_IF_IGNORED": False,
"CELERY_TASK_SERIALIZER": "pickle",
"CELERY_DISABLE_RATE_LIMITS": False,
"CELERYD_TASK_TIME_LIMIT": None,
"CELERYD_TASK_SOFT_TIME_LIMIT": None,
"CELERYD_MAX_TASKS_PER_CHILD": None,
"CELERY_ROUTES": None,
"CELERY_CREATE_MISSING_QUEUES": True,
"CELERY_DEFAULT_ROUTING_KEY": "celery",
"CELERY_DEFAULT_QUEUE": "celery",
"CELERY_DEFAULT_EXCHANGE": "celery",
"CELERY_DEFAULT_EXCHANGE_TYPE": "direct",
"CELERY_DEFAULT_DELIVERY_MODE": 2, # persistent
"BROKER_CONNECTION_TIMEOUT": 4,
"BROKER_CONNECTION_RETRY": True,
"BROKER_CONNECTION_MAX_RETRIES": 100,
"CELERY_ACKS_LATE": False,
"CELERYD_POOL_PUTLOCKS": True,
"CELERYD_POOL": "celery.concurrency.processes.TaskPool",
"CELERYD_MEDIATOR": "celery.worker.controllers.Mediator",
"CELERYD_ETA_SCHEDULER": "celery.worker.controllers.ScheduleController",
"CELERYD_LISTENER": "celery.worker.listener.CarrotListener",
"CELERYD_CONCURRENCY": 0, # defaults to cpu count
"CELERYD_PREFETCH_MULTIPLIER": 4,
"CELERYD_LOG_FORMAT": DEFAULT_PROCESS_LOG_FMT,
"CELERYD_TASK_LOG_FORMAT": DEFAULT_TASK_LOG_FMT,
"CELERYD_LOG_COLOR": False,
"CELERYD_LOG_LEVEL": "WARN",
"CELERYD_LOG_FILE": None, # stderr
"CELERYBEAT_SCHEDULE_FILENAME": "celerybeat-schedule",
"CELERYBEAT_MAX_LOOP_INTERVAL": 5 * 60, # five minutes.
"CELERYBEAT_LOG_LEVEL": "INFO",
"CELERYBEAT_LOG_FILE": None, # stderr
"CELERYMON_LOG_LEVEL": "INFO",
"CELERYMON_LOG_FILE": None, # stderr
"CELERYMON_LOG_FORMAT": DEFAULT_LOG_FMT,
"CELERY_BROADCAST_QUEUE": "celeryctl",
"CELERY_BROADCAST_EXCHANGE": "celeryctl",
"CELERY_BROADCAST_EXCHANGE_TYPE": "fanout",
"CELERY_EVENT_QUEUE": "celeryevent",
"CELERY_EVENT_EXCHANGE": "celeryevent",
"CELERY_EVENT_EXCHANGE_TYPE": "direct",
"CELERY_EVENT_ROUTING_KEY": "celeryevent",
"CELERY_EVENT_SERIALIZER": "json",
"CELERY_RESULT_EXCHANGE": "celeryresults",
"CELERY_RESULT_EXCHANGE_TYPE": "direct",
"CELERY_RESULT_SERIALIZER": "pickle",
"CELERY_RESULT_PERSISTENT": False,
"CELERY_MAX_CACHED_RESULTS": 5000,
"CELERY_TRACK_STARTED": False,
# Default e-mail settings.
"SERVER_EMAIL": "celery@localhost",
"EMAIL_HOST": "localhost",
"EMAIL_PORT": 25,
"ADMINS": (),
}
def isatty(fh):
# Fixes bug with mod_wsgi:
# mod_wsgi.Log object has no attribute isatty.
return getattr(fh, "isatty", None) and fh.isatty()
_DEPRECATION_FMT = """
%s is deprecated in favor of %s and is scheduled for removal in celery v1.4.
""".strip()
def _get(name, default=None, compat=None):
compat = compat or []
if default is None:
default = _DEFAULTS.get(name)
compat = [name] + compat
for i, alias in enumerate(compat):
try:
value = getattr(settings, alias)
i > 0 and warnings.warn(DeprecationWarning(_DEPRECATION_FMT % (
alias, name)))
return value
except AttributeError:
pass
return default
# <--- Task <- -- --- - ----- -- #
ALWAYS_EAGER = _get("CELERY_ALWAYS_EAGER")
EAGER_PROPAGATES_EXCEPTIONS = _get("CELERY_EAGER_PROPAGATES_EXCEPTIONS")
RESULT_BACKEND = _get("CELERY_RESULT_BACKEND", compat=["CELERY_BACKEND"])
CELERY_BACKEND = RESULT_BACKEND # FIXME Remove in 1.4
CACHE_BACKEND = _get("CELERY_CACHE_BACKEND") or _get("CACHE_BACKEND")
CACHE_BACKEND_OPTIONS = _get("CELERY_CACHE_BACKEND_OPTIONS") or {}
TASK_SERIALIZER = _get("CELERY_TASK_SERIALIZER")
TASK_RESULT_EXPIRES = _get("CELERY_TASK_RESULT_EXPIRES")
IGNORE_RESULT = _get("CELERY_IGNORE_RESULT")
TRACK_STARTED = _get("CELERY_TRACK_STARTED")
ACKS_LATE = _get("CELERY_ACKS_LATE")
# Make sure TASK_RESULT_EXPIRES is a timedelta.
if isinstance(TASK_RESULT_EXPIRES, int):
TASK_RESULT_EXPIRES = timedelta(seconds=TASK_RESULT_EXPIRES)
# <--- SQLAlchemy <- -- --- - ----- -- #
RESULT_DBURI = _get("CELERY_RESULT_DBURI")
RESULT_ENGINE_OPTIONS = _get("CELERY_RESULT_ENGINE_OPTIONS")
# <--- Client <- -- --- - ----- -- #
MAX_CACHED_RESULTS = _get("CELERY_MAX_CACHED_RESULTS")
# <--- Worker <- -- --- - ----- -- #
SEND_EVENTS = _get("CELERY_SEND_EVENTS")
DEFAULT_RATE_LIMIT = _get("CELERY_DEFAULT_RATE_LIMIT")
DISABLE_RATE_LIMITS = _get("CELERY_DISABLE_RATE_LIMITS")
CELERYD_TASK_TIME_LIMIT = _get("CELERYD_TASK_TIME_LIMIT")
CELERYD_TASK_SOFT_TIME_LIMIT = _get("CELERYD_TASK_SOFT_TIME_LIMIT")
CELERYD_MAX_TASKS_PER_CHILD = _get("CELERYD_MAX_TASKS_PER_CHILD")
STORE_ERRORS_EVEN_IF_IGNORED = _get("CELERY_STORE_ERRORS_EVEN_IF_IGNORED")
CELERY_SEND_TASK_ERROR_EMAILS = _get("CELERY_SEND_TASK_ERROR_EMAILS", False,
compat=["SEND_CELERY_TASK_ERROR_EMAILS"])
CELERYD_LOG_FORMAT = _get("CELERYD_LOG_FORMAT",
compat=["CELERYD_DAEMON_LOG_FORMAT"])
CELERYD_TASK_LOG_FORMAT = _get("CELERYD_TASK_LOG_FORMAT")
CELERYD_LOG_FILE = _get("CELERYD_LOG_FILE")
CELERYD_LOG_COLOR = _get("CELERYD_LOG_COLOR",
CELERYD_LOG_FILE is None and isatty(sys.stderr))
CELERYD_LOG_LEVEL = _get("CELERYD_LOG_LEVEL",
compat=["CELERYD_DAEMON_LOG_LEVEL"])
CELERYD_LOG_LEVEL = LOG_LEVELS[CELERYD_LOG_LEVEL.upper()]
CELERYD_CONCURRENCY = _get("CELERYD_CONCURRENCY")
CELERYD_PREFETCH_MULTIPLIER = _get("CELERYD_PREFETCH_MULTIPLIER")
CELERYD_POOL_PUTLOCKS = _get("CELERYD_POOL_PUTLOCKS")
CELERYD_POOL = _get("CELERYD_POOL")
CELERYD_LISTENER = _get("CELERYD_LISTENER")
CELERYD_MEDIATOR = _get("CELERYD_MEDIATOR")
CELERYD_ETA_SCHEDULER = _get("CELERYD_ETA_SCHEDULER")
# :--- Email settings <- -- --- - ----- -- #
ADMINS = _get("ADMINS")
SERVER_EMAIL = _get("SERVER_EMAIL")
EMAIL_HOST = _get("EMAIL_HOST")
EMAIL_HOST_USER = _get("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = _get("EMAIL_HOST_PASSWORD")
EMAIL_PORT = _get("EMAIL_PORT")
# :--- Broker connections <- -- --- - ----- -- #
BROKER_HOST = _get("BROKER_HOST")
BROKER_PORT = _get("BROKER_PORT")
BROKER_USER = _get("BROKER_USER")
BROKER_PASSWORD = _get("BROKER_PASSWORD")
BROKER_VHOST = _get("BROKER_VHOST")
BROKER_USE_SSL = _get("BROKER_USE_SSL")
BROKER_INSIST = _get("BROKER_INSIST")
BROKER_CONNECTION_TIMEOUT = _get("BROKER_CONNECTION_TIMEOUT",
compat=["CELERY_BROKER_CONNECTION_TIMEOUT"])
BROKER_CONNECTION_RETRY = _get("BROKER_CONNECTION_RETRY",
compat=["CELERY_BROKER_CONNECTION_RETRY"])
BROKER_CONNECTION_MAX_RETRIES = _get("BROKER_CONNECTION_MAX_RETRIES",
compat=["CELERY_BROKER_CONNECTION_MAX_RETRIES"])
BROKER_BACKEND = _get("BROKER_BACKEND") or _get("CARROT_BACKEND")
# <--- Message routing <- -- --- - ----- -- #
DEFAULT_QUEUE = _get("CELERY_DEFAULT_QUEUE")
DEFAULT_ROUTING_KEY = _get("CELERY_DEFAULT_ROUTING_KEY")
DEFAULT_EXCHANGE = _get("CELERY_DEFAULT_EXCHANGE")
DEFAULT_EXCHANGE_TYPE = _get("CELERY_DEFAULT_EXCHANGE_TYPE")
DEFAULT_DELIVERY_MODE = _get("CELERY_DEFAULT_DELIVERY_MODE")
QUEUES = _get("CELERY_QUEUES") or {DEFAULT_QUEUE: {
"exchange": DEFAULT_EXCHANGE,
"exchange_type": DEFAULT_EXCHANGE_TYPE,
"binding_key": DEFAULT_ROUTING_KEY}}
CREATE_MISSING_QUEUES = _get("CELERY_CREATE_MISSING_QUEUES")
ROUTES = routes.prepare(_get("CELERY_ROUTES") or [])
# :--- Broadcast queue settings <- -- --- - ----- -- #
BROADCAST_QUEUE = _get("CELERY_BROADCAST_QUEUE")
BROADCAST_EXCHANGE = _get("CELERY_BROADCAST_EXCHANGE")
BROADCAST_EXCHANGE_TYPE = _get("CELERY_BROADCAST_EXCHANGE_TYPE")
# :--- Event queue settings <- -- --- - ----- -- #
EVENT_QUEUE = _get("CELERY_EVENT_QUEUE")
EVENT_EXCHANGE = _get("CELERY_EVENT_EXCHANGE")
EVENT_EXCHANGE_TYPE = _get("CELERY_EVENT_EXCHANGE_TYPE")
EVENT_ROUTING_KEY = _get("CELERY_EVENT_ROUTING_KEY")
EVENT_SERIALIZER = _get("CELERY_EVENT_SERIALIZER")
# :--- AMQP Backend settings <- -- --- - ----- -- #
RESULT_EXCHANGE = _get("CELERY_RESULT_EXCHANGE")
RESULT_EXCHANGE_TYPE = _get("CELERY_RESULT_EXCHANGE_TYPE")
RESULT_SERIALIZER = _get("CELERY_RESULT_SERIALIZER")
RESULT_PERSISTENT = _get("CELERY_RESULT_PERSISTENT")
# :--- Celery Beat <- -- --- - ----- -- #
CELERYBEAT_LOG_LEVEL = _get("CELERYBEAT_LOG_LEVEL")
CELERYBEAT_LOG_FILE = _get("CELERYBEAT_LOG_FILE")
CELERYBEAT_SCHEDULE_FILENAME = _get("CELERYBEAT_SCHEDULE_FILENAME")
CELERYBEAT_MAX_LOOP_INTERVAL = _get("CELERYBEAT_MAX_LOOP_INTERVAL")
# :--- Celery Monitor <- -- --- - ----- -- #
CELERYMON_LOG_LEVEL = _get("CELERYMON_LOG_LEVEL")
CELERYMON_LOG_FILE = _get("CELERYMON_LOG_FILE")
def _init_queues(queues):
"""Convert configuration mapping to a table of queues digestible
by a :class:`carrot.messaging.ConsumerSet`."""
def _defaults(opts):
opts.setdefault("exchange", DEFAULT_EXCHANGE),
opts.setdefault("exchange_type", DEFAULT_EXCHANGE_TYPE)
opts.setdefault("binding_key", DEFAULT_EXCHANGE)
opts.setdefault("routing_key", opts.get("binding_key"))
return opts
return dict((queue, _defaults(opts)) for queue, opts in queues.items())
def get_queues():
return _init_queues(QUEUES)
| {
"content_hash": "3a1edf3bd1734479727d752cbc363815",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 78,
"avg_line_length": 41.276679841897234,
"alnum_prop": 0.629129560471129,
"repo_name": "mitsuhiko/celery",
"id": "b2b25cf29854f0995b099533d19900caba13a9b1",
"size": "10443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "celery/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "624101"
},
{
"name": "Shell",
"bytes": "5742"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HM6_if_IsolatedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HM6_if_IsolatedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HM6_if_IsolatedLHS, self).__init__(name='HM6_if_IsolatedLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'M6_if')
# Set the node attributes
# Add the attribute equations
self["equations"] = []
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| {
"content_hash": "1c2c27a398951f36c2b8b2b4ce61ba61",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 125,
"avg_line_length": 43.3859649122807,
"alnum_prop": 0.48281439547108773,
"repo_name": "levilucio/SyVOLT",
"id": "2d79dbebf7748741c0c5b4f7d648a8c9fc871408",
"size": "2473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GM2AUTOSAR_MM/Properties/from_eclipse/HM6_if_IsolatedLHS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
from csvkit import CSVKitReader, CSVKitDictReader
from csvkit.cli import CSVFileType, CSVKitUtility
class CSVPy(CSVKitUtility):
description = 'Load a CSV file into a CSVKitReader object and then drops into a Python shell.'
override_flags = ['l', 'f', 'zero']
def add_arguments(self):
self.argparser.add_argument('file', metavar="FILE", type=CSVFileType(),
help='The CSV file to operate on.')
self.argparser.add_argument('--dict', dest='as_dict', action='store_true',
help='Use CSVKitDictReader instead of CSVKitReader.')
def main(self):
# Attempt reading filename, will cause lazy loader to access file and raise error if it does not exist
filename = self.args.file.name
if self.args.as_dict:
reader_class = CSVKitDictReader
else:
reader_class = CSVKitReader
reader = reader_class(self.args.file, **self.reader_kwargs)
welcome_message = 'Welcome! "%s" has been loaded in a %s object named "reader".' % (filename, reader_class.__name__)
try:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
ipy = InteractiveShellEmbed(banner1=welcome_message)
ipy()
except ImportError:
import code
code.interact(welcome_message, local={ 'reader': reader })
def launch_new_instance():
utility = CSVPy()
utility.main()
if __name__ == "__main__":
launch_new_instance()
| {
"content_hash": "718d788c956fafee26ecd06571ad4bde",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 124,
"avg_line_length": 36.90243902439025,
"alnum_prop": 0.6338400528750826,
"repo_name": "jsvine/csvkit",
"id": "a840f2f4ae5f634c4c8a71fe8fefb9f1d8bf9984",
"size": "1536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "csvkit/utilities/csvpy.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
'''
Given an array with n objects colored red, white or blue, sort them so that objects of the same color are adjacent, with the colors in the order red, white and blue.
Here, we will use the integers 0, 1, and 2 to represent the color red, white, and blue respectively.
Note:
Please do the sorting in a one-pass algorithm
'''
class Solution:
# @param data A list of
def switch(self, data, index_src, index_dest):
if index_src == index_dest:
return
tmp = data[index_dest]
data[index_dest] = data[index_src]
data[index_src] = tmp
# @param A a list of integers
# @return nothing, sort in place
def sortColors(self, A):
size = len(A)
index0 = 0
index1 = index0
index2 = size - 1
while (index1 <= index2 and index0 <= index2):
#print '%d, %d, %d' % (index0, index1, index2)
if A[index1] == 0:
self.switch(A, index0, index1)
while index0 <= index2 and A[index0] == 0:
index0 += 1
index1 = index0
elif A[index1] == 2:
self.switch(A, index1, index2)
while index2 >= index1 and A[index2] == 2:
index2 -= 1
else:
index1 += 1
if __name__ == '__main__':
solution = Solution()
A = [1,0,0,0,2]
solution.sortColors(A)
print A
A = [1,0,2,0,0,1,1,2]
solution.sortColors(A)
print A
A = [2,1,0]
solution.sortColors(A)
print A
| {
"content_hash": "cd5cf2abc2bdc75870829a2b356e1a7e",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 165,
"avg_line_length": 29.692307692307693,
"alnum_prop": 0.5375647668393783,
"repo_name": "shub0/algorithm-data-structure",
"id": "2d4e87de369c1948c893c14ede2016553c563fd2",
"size": "1564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/sorted_color.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "166293"
},
{
"name": "Python",
"bytes": "487573"
}
],
"symlink_target": ""
} |
import unittest
import merge_active_shadow
from google.protobuf import descriptor_pb2
from google.protobuf import text_format
class MergeActiveShadowTest(unittest.TestCase):
# Poor man's text proto equivalence. Tensorflow has better tools for this,
# i.e. assertProto2Equal.
def assertTextProtoEq(self, lhs, rhs):
self.assertMultiLineEqual(lhs.strip(), rhs.strip())
def testAdjustReservedRange(self):
"""AdjustReservedRange removes specified skip_reserved_numbers."""
desc_pb_text = """
reserved_range {
start: 41
end: 41
}
reserved_range {
start: 42
end: 42
}
reserved_range {
start: 43
end: 44
}
reserved_range {
start: 50
end: 51
}
"""
desc = descriptor_pb2.DescriptorProto()
text_format.Merge(desc_pb_text, desc)
target = descriptor_pb2.DescriptorProto()
merge_active_shadow.AdjustReservedRange(target, desc.reserved_range, [42, 43])
target_pb_text = """
reserved_range {
start: 41
end: 41
}
reserved_range {
start: 50
end: 51
}
"""
self.assertTextProtoEq(target_pb_text, str(target))
def testMergeActiveShadowEnum(self):
"""MergeActiveShadowEnum recovers shadow values."""
active_pb_text = """
value {
number: 1
name: "foo"
}
value {
number: 0
name: "DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE"
}
value {
number: 3
name: "bar"
}
reserved_name: "baz"
reserved_range {
start: 2
end: 3
}
"""
active_proto = descriptor_pb2.EnumDescriptorProto()
text_format.Merge(active_pb_text, active_proto)
shadow_pb_text = """
value {
number: 1
name: "foo"
}
value {
number: 0
name: "wow"
}
value {
number: 3
name: "bar"
}
value {
number: 2
name: "hidden_envoy_deprecated_baz"
}
value {
number: 4
name: "hidden_envoy_deprecated_huh"
}
"""
shadow_proto = descriptor_pb2.EnumDescriptorProto()
text_format.Merge(shadow_pb_text, shadow_proto)
target_proto = descriptor_pb2.EnumDescriptorProto()
merge_active_shadow.MergeActiveShadowEnum(active_proto, shadow_proto, target_proto)
target_pb_text = """
value {
name: "foo"
number: 1
}
value {
name: "wow"
number: 0
}
value {
name: "bar"
number: 3
}
value {
name: "hidden_envoy_deprecated_baz"
number: 2
}
"""
self.assertTextProtoEq(target_pb_text, str(target_proto))
def testMergeActiveShadowMessage(self):
"""MergeActiveShadowMessage recovers shadow fields with oneofs."""
active_pb_text = """
field {
number: 1
name: "foo"
}
field {
number: 0
name: "bar"
oneof_index: 2
}
field {
number: 3
name: "baz"
}
field {
number: 4
name: "newbie"
}
reserved_name: "wow"
reserved_range {
start: 2
end: 3
}
oneof_decl {
name: "ign"
}
oneof_decl {
name: "ign2"
}
oneof_decl {
name: "some_oneof"
}
"""
active_proto = descriptor_pb2.DescriptorProto()
text_format.Merge(active_pb_text, active_proto)
shadow_pb_text = """
field {
number: 1
name: "foo"
}
field {
number: 0
name: "bar"
}
field {
number: 3
name: "baz"
}
field {
number: 2
name: "hidden_envoy_deprecated_wow"
oneof_index: 0
}
oneof_decl {
name: "some_oneof"
}
"""
shadow_proto = descriptor_pb2.DescriptorProto()
text_format.Merge(shadow_pb_text, shadow_proto)
target_proto = descriptor_pb2.DescriptorProto()
merge_active_shadow.MergeActiveShadowMessage(active_proto, shadow_proto, target_proto)
target_pb_text = """
field {
name: "foo"
number: 1
}
field {
name: "baz"
number: 3
}
field {
name: "newbie"
number: 4
}
field {
name: "bar"
number: 0
oneof_index: 2
}
field {
name: "hidden_envoy_deprecated_wow"
number: 2
oneof_index: 2
}
oneof_decl {
name: "ign"
}
oneof_decl {
name: "ign2"
}
oneof_decl {
name: "some_oneof"
}
"""
self.assertTextProtoEq(target_pb_text, str(target_proto))
def testMergeActiveShadowMessageMissing(self):
"""MergeActiveShadowMessage recovers missing messages from shadow."""
active_proto = descriptor_pb2.DescriptorProto()
shadow_proto = descriptor_pb2.DescriptorProto()
shadow_proto.nested_type.add().name = 'foo'
target_proto = descriptor_pb2.DescriptorProto()
merge_active_shadow.MergeActiveShadowMessage(active_proto, shadow_proto, target_proto)
self.assertEqual(target_proto.nested_type[0].name, 'foo')
def testMergeActiveShadowFileMissing(self):
"""MergeActiveShadowFile recovers missing messages from shadow."""
active_proto = descriptor_pb2.FileDescriptorProto()
shadow_proto = descriptor_pb2.FileDescriptorProto()
shadow_proto.message_type.add().name = 'foo'
target_proto = descriptor_pb2.DescriptorProto()
target_proto = merge_active_shadow.MergeActiveShadowFile(active_proto, shadow_proto)
self.assertEqual(target_proto.message_type[0].name, 'foo')
# TODO(htuch): add some test for recursion.
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "838ed362958ad6dbf913eb8f74fc6170",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 90,
"avg_line_length": 20.099585062240664,
"alnum_prop": 0.6765070189925682,
"repo_name": "istio/envoy",
"id": "8f7c98d4fa7b5de057b41836503666e5c261a4fb",
"size": "4844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/protoxform/merge_active_shadow_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "35685"
},
{
"name": "C++",
"bytes": "19486055"
},
{
"name": "Dockerfile",
"bytes": "245"
},
{
"name": "Emacs Lisp",
"bytes": "966"
},
{
"name": "Go",
"bytes": "695"
},
{
"name": "JavaScript",
"bytes": "1760"
},
{
"name": "Makefile",
"bytes": "1985"
},
{
"name": "PowerShell",
"bytes": "6173"
},
{
"name": "PureBasic",
"bytes": "472"
},
{
"name": "Python",
"bytes": "418501"
},
{
"name": "Rust",
"bytes": "3471"
},
{
"name": "Shell",
"bytes": "120251"
},
{
"name": "Starlark",
"bytes": "1184414"
},
{
"name": "Thrift",
"bytes": "748"
}
],
"symlink_target": ""
} |
"""Save and restore variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import re
import time
import uuid
import numpy as np
import six
from google.protobuf.any_pb2 import Any
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState
from tensorflow.python.util import compat
def ops_used_by_graph_def(graph_def):
"""Collect the list of ops used by a graph.
Does not validate that the ops are all registered.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
A list of strings, each naming an op used by the graph.
"""
# Map function names to definitions
name_to_function = {}
for fun in graph_def.library.function:
name_to_function[fun.signature.name] = fun
# Collect the list of op names. Since functions can reference functions, we
# need a recursive traversal.
used_ops = set() # Includes both primitive ops and functions
functions_to_process = [] # A subset of used_ops
def mark_op_as_used(op):
if op not in used_ops and op in name_to_function:
functions_to_process.append(name_to_function[op])
used_ops.add(op)
for node in graph_def.node:
mark_op_as_used(node.op)
while functions_to_process:
fun = functions_to_process.pop()
if fun.node_def:
for node in fun.node_def:
mark_op_as_used(node.op)
else: # TODO(josh11b): Eventually remove this case.
for node in fun.node:
mark_op_as_used(node.op)
return [op for op in used_ops if op not in name_to_function]
def stripped_op_list_for_graph(graph_def):
"""Collect the stripped OpDefs for ops used by a graph.
This function computes the `stripped_op_list` field of `MetaGraphDef` and
similar protos. The result can be communicated from the producer to the
consumer, which can then use the C++ function
`RemoveNewDefaultAttrsFromGraphDef` to improve forwards compatibility.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
An `OpList` of ops used by the graph.
Raises:
ValueError: If an unregistered op is used.
"""
# This is the Python equivalent of StrippedOpListForGraph in C++.
# Unfortunately, since the Python op registry can differ from that in C++, we
# can't remove the duplication using swig (at least naively).
# TODO(irving): Support taking graphs directly.
used_ops = ops_used_by_graph_def(graph_def)
# Verify that all used ops are registered.
registered_ops = op_def_registry.get_registered_ops()
# These internal ops used by functions are not registered, so we need to
# whitelist them. # TODO(irving): Do something better here.
op_whitelist = ("_Arg", "_Retval", "_ListToArray", "_ArrayToList")
for op in used_ops:
if op not in registered_ops and op not in op_whitelist:
raise ValueError("Op %s is used by the graph, but is not registered" % op)
# Build the stripped op list in sorted order
return op_def_pb2.OpList(op=[registered_ops[op] for op in sorted(used_ops)
if op in registered_ops])
class BaseSaverBuilder(object):
"""Base class for Savers.
Can be extended to create different Ops.
"""
_CHECKPOINT_FORMAT_VERSION = saver_pb2.SaverDef.V1
class SaveSpec(object):
"""Class used to describe tensor slices that need to be saved."""
def __init__(self, tensor, slice_spec, name):
"""Creates a `SaveSpec` object.
Args:
tensor: the tensor to save.
slice_spec: the slice to be saved. See `Variable.SaveSliceInfo`.
name: the name to save the tensor under.
"""
self.tensor = tensor
self.slice_spec = slice_spec
self.name = name
class SaveableObject(object):
"""Base class for saving and restoring saveable objects."""
def __init__(self, op, specs, name):
"""Creates a `SaveableObject` object.
Args:
op: the "producer" object that this class wraps; it produces a list of
tensors to save. E.g., a "Variable" object saving its backing tensor.
specs: a list of SaveSpec, each element of which describes one tensor to
save under this object.
name: the name to save the object under.
"""
self.op = op
self.specs = specs
self.name = name
# The device of this saveable. All tensors must be on the same device.
self.device = specs[0].tensor.device
def restore(self, restored_tensors, restored_shapes):
"""Restores this object from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint
restored_shapes: the shapes this object should conform to after
restore, or None.
Returns:
An operation that restores the state of the object.
Raises:
ValueError: If the object cannot be restored using the provided
parameters.
"""
# pylint: disable=unused-argument
raise ValueError("Calling an abstract method.")
class VariableSaveable(SaveableObject):
"""SaveableObject implementation that handles Variables."""
def __init__(self, var, slice_spec, name):
spec = BaseSaverBuilder.SaveSpec(var, slice_spec, name)
super(BaseSaverBuilder.VariableSaveable, self).__init__(var, [spec], name)
def restore(self, restored_tensors, restored_shapes):
restored_tensor = restored_tensors[0]
if restored_shapes is not None:
restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])
return state_ops.assign(
self.op,
restored_tensor,
validate_shape=restored_shapes is None and
self.op.get_shape().is_fully_defined())
def __init__(self):
pass
def save_op(self, filename_tensor, saveables):
"""Create an Op to save 'saveables'.
This is intended to be overridden by subclasses that want to generate
different Ops.
Args:
filename_tensor: String Tensor.
saveables: A list of BaseSaverBuilder.SaveableObject objects.
Returns:
An Operation that save the variables.
"""
# pylint: disable=protected-access
tensor_names = []
tensors = []
tensor_slices = []
for saveable in saveables:
for spec in saveable.specs:
tensor_names.append(spec.name)
tensors.append(spec.tensor)
tensor_slices.append(spec.slice_spec)
return io_ops._save(
filename=filename_tensor,
tensor_names=tensor_names,
tensors=tensors,
tensor_slices=tensor_slices)
def restore_op(self, filename_tensor, saveable, preferred_shard):
"""Create ops to restore 'saveable'.
This is intended to be overridden by subclasses that want to generate
different Ops.
Args:
filename_tensor: String Tensor.
saveable: A BaseSaverBuilder.SaveableObject object.
preferred_shard: Int. Shard to open first when loading a sharded file.
Returns:
A list of Tensors resulting from reading 'saveable' from
'filename'.
"""
# pylint: disable=protected-access
tensors = []
for spec in saveable.specs:
tensors.append(
io_ops._restore_slice(
filename_tensor,
spec.name,
spec.slice_spec,
spec.tensor.dtype,
preferred_shard=preferred_shard))
return tensors
def sharded_filename(self, filename_tensor, shard, num_shards):
"""Append sharding information to a filename.
Args:
filename_tensor: A string tensor.
shard: Integer. The shard for the filename.
num_shards: An int Tensor for the number of shards.
Returns:
A string tensor.
"""
# pylint: disable=protected-access
return gen_io_ops._sharded_filename(filename_tensor, shard, num_shards)
def _AddSaveOps(self, filename_tensor, saveables):
"""Add ops to save variables that are on the same shard.
Args:
filename_tensor: String Tensor.
saveables: A list of SaveableObject objects.
Returns:
A tensor with the filename used to save.
"""
save = self.save_op(filename_tensor, saveables)
return control_flow_ops.with_dependencies([save], filename_tensor)
def _AddShardedSaveOps(self, filename_tensor, per_device):
"""Add ops to save the params per shard.
Args:
filename_tensor: a scalar String Tensor.
per_device: A list of (device, BaseSaverBuilder.SaveableObject) pairs, as
returned by _GroupByDevices().
Returns:
An op to save the variables.
"""
num_shards = len(per_device)
sharded_saves = []
num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
for shard, (device, saveables) in enumerate(per_device):
with ops.device(device):
sharded_filename = self.sharded_filename(filename_tensor, shard,
num_shards_tensor)
sharded_saves.append(self._AddSaveOps(sharded_filename, saveables))
# Return the sharded name for the save path.
with ops.control_dependencies([x.op for x in sharded_saves]):
# pylint: disable=protected-access
return gen_io_ops._sharded_filespec(filename_tensor, num_shards_tensor)
def _AddRestoreOps(self,
filename_tensor,
saveables,
restore_sequentially,
reshape,
preferred_shard=-1,
name="restore_all"):
"""Add operations to restore saveables.
Args:
filename_tensor: Tensor for the path of the file to load.
saveables: A list of SaveableObject objects.
restore_sequentially: True if we want to restore variables sequentially
within a shard.
reshape: True if we want to reshape loaded tensors to the shape of
the corresponding variable.
preferred_shard: Shard to open first when loading a sharded file.
name: Name for the returned op.
Returns:
An Operation that restores the variables.
"""
assign_ops = []
for saveable in saveables:
restore_control_inputs = assign_ops[-1:] if restore_sequentially else []
# Load and optionally reshape on the CPU, as string tensors are not
# available on the GPU.
# TODO(touts): Re-enable restore on GPU when we can support annotating
# string tensors as "HostMemory" inputs.
with ops.device(
graph_util.set_cpu0(saveable.device) if saveable.device else None):
with ops.control_dependencies(restore_control_inputs):
tensors = self.restore_op(filename_tensor, saveable, preferred_shard)
shapes = None
if reshape:
# Compute the shapes, let the restore op decide if and how to do
# the reshape.
shapes = []
for spec in saveable.specs:
v = spec.tensor
shape = v.get_shape()
if not shape.is_fully_defined():
shape = array_ops.shape(v)
shapes.append(shape)
assign_ops.append(saveable.restore(tensors, shapes))
# Create a Noop that has control dependencies from all the updates.
return control_flow_ops.group(*assign_ops, name=name)
def _AddShardedRestoreOps(self, filename_tensor, per_device,
restore_sequentially, reshape):
"""Add Ops to restore variables from multiple devices.
Args:
filename_tensor: Tensor for the path of the file to load.
per_device: A list of (device, SaveableObject) pairs, as
returned by _GroupByDevices().
restore_sequentially: True if we want to restore variables sequentially
within a shard.
reshape: True if we want to reshape loaded tensors to the shape of
the corresponding variable.
Returns:
An Operation that restores the variables.
"""
sharded_restores = []
for shard, (device, saveables) in enumerate(per_device):
with ops.device(device):
sharded_restores.append(
self._AddRestoreOps(
filename_tensor,
saveables,
restore_sequentially,
reshape,
preferred_shard=shard,
name="restore_shard"))
return control_flow_ops.group(*sharded_restores, name="restore_all")
@staticmethod
def _IsVariable(v):
return isinstance(v, ops.Tensor) and (v.op.type == "Variable" or
v.op.type == "AutoReloadVariable")
def _GroupByDevices(self, saveables):
"""Group Variable tensor slices per device.
TODO(touts): Make sure that all the devices found are on different
job/replica/task/cpu|gpu. It would be bad if 2 were on the same device.
It can happen if the devices are unspecified.
Args:
saveables: A list of BaseSaverBuilder.SaveableObject objects.
Returns:
A list of tuples: (device_name, BaseSaverBuilder.SaveableObject) tuples.
The list is sorted by ascending device_name.
Raises:
ValueError: If the tensors of a saveable are on different devices.
"""
per_device = collections.defaultdict(lambda: [])
for saveable in saveables:
canonical_device = set(
pydev.canonical_name(spec.tensor.device) for spec in saveable.specs)
if len(canonical_device) != 1:
raise ValueError("All tensors of a saveable object must be "
"on the same device: %s" % saveable.name)
per_device[canonical_device.pop()].append(saveable)
return sorted(per_device.items(), key=lambda t: t[0])
@staticmethod
def OpListToDict(op_list):
"""Create a dictionary of names to operation lists.
Args:
op_list: A list, tuple, or set of Variables or SaveableObjects.
Returns:
A dictionary of names to the operations that must be saved under
that name. Variables with save_slice_info are grouped together under the
same key in no particular order.
Raises:
TypeError: If the type of op_list or its elements is not supported.
ValueError: If at least two saveables share the same name.
"""
if not isinstance(op_list, (list, tuple, set)):
raise TypeError("Variables to save should be passed in a dict or a "
"list: %s" % op_list)
op_list = set(op_list)
names_to_saveables = {}
# pylint: disable=protected-access
for var in op_list:
if isinstance(var, BaseSaverBuilder.SaveableObject):
names_to_saveables[var.name] = var
elif isinstance(var, variables.Variable) and var._save_slice_info:
name = var._save_slice_info.full_name
if name in names_to_saveables:
if not isinstance(names_to_saveables[name], list):
raise ValueError("Mixing slices and non-slices with the same name: "
"%s" % name)
names_to_saveables[name].append(var)
else:
names_to_saveables[name] = [var]
else:
var = ops.convert_to_tensor(var, as_ref=True)
if not BaseSaverBuilder._IsVariable(var):
raise TypeError("Variable to save is not a Variable: %s" % var)
name = var.op.name
if name in names_to_saveables:
raise ValueError("At least two variables have the same name: %s" %
name)
names_to_saveables[name] = var
# pylint: enable=protected-access
return names_to_saveables
def _ValidateAndSliceInputs(self, names_to_saveables):
"""Returns the variables and names that will be used for a Saver.
Args:
names_to_saveables: A dict (k, v) where k is the name of an operation and
v is an operation to save or a BaseSaverBuilder.Saver.
Returns:
A list of BaseSaverBuilder.SaveableObject objects.
Raises:
TypeError: If any of the keys are not strings or any of the
values are not one of Tensor or Variable or a checkpointable operation.
ValueError: If the same operation is given in more than one value
(this also applies to slices of SlicedVariables).
"""
if not isinstance(names_to_saveables, dict):
names_to_saveables = BaseSaverBuilder.OpListToDict(names_to_saveables)
saveables = []
seen_ops = set()
for name in sorted(names_to_saveables.keys()):
if not isinstance(name, six.string_types):
raise TypeError(
"names_to_saveables must be a dict mapping string names to "
"checkpointable operations. Name is not a string: %s" % name)
op = names_to_saveables[name]
if isinstance(op, BaseSaverBuilder.SaveableObject):
self._AddSaveable(saveables, seen_ops, op)
elif isinstance(op, (list, tuple)):
# A set of slices.
slice_name = None
# pylint: disable=protected-access
for variable in op:
if not isinstance(variable, variables.Variable):
raise ValueError("Slices must all be Variables: %s" % variable)
if not variable._save_slice_info:
raise ValueError("Slices must all be slices: %s" % variable)
if slice_name is None:
slice_name = variable._save_slice_info.full_name
elif slice_name != variable._save_slice_info.full_name:
raise ValueError(
"Slices must all be from the same tensor: %s != %s" %
(slice_name, variable._save_slice_info.full_name))
saveable = BaseSaverBuilder.VariableSaveable(
variable, variable._save_slice_info.spec, name)
self._AddSaveable(saveables, seen_ops, saveable)
# pylint: enable=protected-access
else:
# A variable or tensor.
variable = ops.convert_to_tensor(op, as_ref=True)
if not BaseSaverBuilder._IsVariable(variable):
raise TypeError("names_to_saveables must be a dict mapping string "
"names to Tensors/Variables. Not a variable: %s" %
variable)
saveable = BaseSaverBuilder.VariableSaveable(variable, "", name)
self._AddSaveable(saveables, seen_ops, saveable)
return saveables
def _AddSaveable(self, saveables, seen_ops, saveable):
"""Adds the saveable to the saveables list.
Args:
saveables: List to append the SaveableObject to.
seen_ops: Set of the ops of the saveables already processed. Used to
check that each saveable is only saved once.
saveable: The saveable.
Raises:
ValueError: If the saveable has already been processed.
"""
if saveable.op in seen_ops:
raise ValueError("The same saveable will be restored with two names: %s" %
saveable.name)
saveables.append(saveable)
seen_ops.add(saveable.op)
def build(self,
names_to_saveables,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
filename="model"):
"""Adds save/restore nodes to the graph and creates a SaverDef proto.
Args:
names_to_saveables: A dictionary mapping name to a Variable or
SaveableObject. Each name will be associated with the
corresponding variable in the checkpoint.
reshape: If True, allow restoring parameters from a checkpoint
that where the parameters have a different shape. This is
only needed when you try to restore from a Dist-Belief checkpoint,
and only some times.
sharded: If True, shard the checkpoints, one per device that has
Variable nodes.
max_to_keep: Maximum number of checkpoints to keep. As new checkpoints
are created, old ones are deleted. If None or 0, no checkpoints are
deleted from the filesystem but only the last one is kept in the
`checkpoint` file. Presently the number is only roughly enforced. For
example in case of restarts more than max_to_keep checkpoints may be
kept.
keep_checkpoint_every_n_hours: How often checkpoints should be kept.
Defaults to 10,000 hours.
name: String. Optional name to use as a prefix when adding operations.
restore_sequentially: A Bool, which if true, causes restore of different
variables to happen sequentially within each device.
filename: If known at graph construction time, filename used for variable
loading/saving.
Returns:
A SaverDef proto.
Raises:
TypeError: If 'names_to_saveables' is not a dictionary mapping string
keys to variable Tensors.
ValueError: If any of the keys or values in 'names_to_saveables' is not
unique.
"""
saveables = self._ValidateAndSliceInputs(names_to_saveables)
if max_to_keep is None:
max_to_keep = 0
with ops.name_scope(name, "save",
[saveable.op for saveable in saveables]) as name:
# Add the Constant string tensor for the filename.
filename_tensor = constant_op.constant(filename)
# Add the save ops.
if sharded:
per_device = self._GroupByDevices(saveables)
save_tensor = self._AddShardedSaveOps(filename_tensor, per_device)
restore_op = self._AddShardedRestoreOps(filename_tensor, per_device,
restore_sequentially, reshape)
else:
save_tensor = self._AddSaveOps(filename_tensor, saveables)
restore_op = self._AddRestoreOps(filename_tensor, saveables,
restore_sequentially, reshape)
# In the following use case, it's possible to have restore_ops be called
# something else:
# - Build inference graph and export a meta_graph.
# - Import the inference meta_graph
# - Extend the inference graph to a train graph.
# - Export a new meta_graph.
# Now the second restore_op will be called "restore_all_1".
# As such, comment out the assert for now until we know whether supporting
# such usage model makes sense.
#
# assert restore_op.name.endswith("restore_all"), restore_op.name
return saver_pb2.SaverDef(
filename_tensor_name=filename_tensor.name,
save_tensor_name=save_tensor.name,
restore_op_name=restore_op.name,
max_to_keep=max_to_keep,
sharded=sharded,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
version=self._CHECKPOINT_FORMAT_VERSION)
def _GetCheckpointFilename(save_dir, latest_filename):
"""Returns a filename for storing the CheckpointState.
Args:
save_dir: The directory for saving and restoring checkpoints.
latest_filename: Name of the file in 'save_dir' that is used
to store the CheckpointState.
Returns:
The path of the file that contains the CheckpointState proto.
"""
if latest_filename is None:
latest_filename = "checkpoint"
return os.path.join(save_dir, latest_filename)
def generate_checkpoint_state_proto(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None):
"""Generates a checkpoint state proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
Returns:
CheckpointState proto with model_checkpoint_path and
all_model_checkpoint_paths updated to either absolute paths or
relative paths to the current save_dir.
"""
if all_model_checkpoint_paths is None:
all_model_checkpoint_paths = []
if (not all_model_checkpoint_paths or
all_model_checkpoint_paths[-1] != model_checkpoint_path):
logging.info("%s is not in all_model_checkpoint_paths. Manually adding it.",
model_checkpoint_path)
all_model_checkpoint_paths.append(model_checkpoint_path)
# Relative paths need to be rewritten to be relative to the "save_dir"
# if model_checkpoint_path already contains "save_dir".
if not os.path.isabs(save_dir):
if not os.path.isabs(model_checkpoint_path):
model_checkpoint_path = os.path.relpath(model_checkpoint_path, save_dir)
for i in range(len(all_model_checkpoint_paths)):
p = all_model_checkpoint_paths[i]
if not os.path.isabs(p):
all_model_checkpoint_paths[i] = os.path.relpath(p, save_dir)
coord_checkpoint_proto = CheckpointState(
model_checkpoint_path=model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths)
return coord_checkpoint_proto
def update_checkpoint_state(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
latest_filename=None):
"""Updates the content of the 'checkpoint' file.
This updates the checkpoint file containing a CheckpointState
proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
Raises:
RuntimeError: If the save paths conflict.
"""
# Writes the "checkpoint" file for the coordinator for later restoration.
coord_checkpoint_filename = _GetCheckpointFilename(save_dir, latest_filename)
ckpt = generate_checkpoint_state_proto(
save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths)
if coord_checkpoint_filename == ckpt.model_checkpoint_path:
raise RuntimeError("Save path '%s' conflicts with path used for "
"checkpoint state. Please use a different save path." %
model_checkpoint_path)
# Saves to a tmp file first. On success, *atomically* renames it back.
# This prevents a potential read/write race between this function and
# get_checkpoint_state().
temp_pathname = coord_checkpoint_filename + ".tmp." + uuid.uuid4().hex
file_io.write_string_to_file(temp_pathname, text_format.MessageToString(ckpt))
file_io.rename(temp_pathname, coord_checkpoint_filename, overwrite=True)
def get_checkpoint_state(checkpoint_dir, latest_filename=None):
"""Returns CheckpointState proto from the "checkpoint" file.
If the "checkpoint" file contains a valid CheckpointState
proto, returns it.
Args:
checkpoint_dir: The directory of checkpoints.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
Returns:
A CheckpointState if the state was available, None
otherwise.
Raises:
ValueError: if the checkpoint read doesn't have model_checkpoint_path set.
"""
ckpt = None
coord_checkpoint_filename = _GetCheckpointFilename(checkpoint_dir,
latest_filename)
f = None
try:
# Check that the file exists before opening it to avoid
# many lines of errors from colossus in the logs.
if file_io.file_exists(coord_checkpoint_filename):
file_content = file_io.read_file_to_string(
coord_checkpoint_filename).decode("utf-8")
ckpt = CheckpointState()
text_format.Merge(file_content, ckpt)
if not ckpt.model_checkpoint_path:
raise ValueError("Invalid checkpoint state loaded from %s",
checkpoint_dir)
# For relative model_checkpoint_path and all_model_checkpoint_paths,
# prepend checkpoint_dir.
if not os.path.isabs(ckpt.model_checkpoint_path):
ckpt.model_checkpoint_path = os.path.join(checkpoint_dir,
ckpt.model_checkpoint_path)
for i in range(len(ckpt.all_model_checkpoint_paths)):
p = ckpt.all_model_checkpoint_paths[i]
if not os.path.isabs(p):
ckpt.all_model_checkpoint_paths[i] = os.path.join(checkpoint_dir, p)
except IOError as e:
# It's ok if the file cannot be read
logging.warning(str(e))
logging.warning("%s: Checkpoint ignored", coord_checkpoint_filename)
return None
except text_format.ParseError as e:
logging.warning(str(e))
logging.warning("%s: Checkpoint ignored", coord_checkpoint_filename)
return None
finally:
if f:
f.close()
return ckpt
class Saver(object):
"""Saves and restores variables.
See [Variables](../../how_tos/variables/index.md)
for an overview of variables, saving and restoring.
The `Saver` class adds ops to save and restore variables to and from
*checkpoints*. It also provides convenience methods to run these ops.
Checkpoints are binary files in a proprietary format which map variable names
to tensor values. The best way to examine the contents of a checkpoint is to
load it using a `Saver`.
Savers can automatically number checkpoint filenames with a provided counter.
This lets you keep multiple checkpoints at different steps while training a
model. For example you can number the checkpoint filenames with the training
step number. To avoid filling up disks, savers manage checkpoint files
automatically. For example, they can keep only the N most recent files, or
one checkpoint for every N hours of training.
You number checkpoint filenames by passing a value to the optional
`global_step` argument to `save()`:
```python
saver.save(sess, 'my-model', global_step=0) ==> filename: 'my-model-0'
...
saver.save(sess, 'my-model', global_step=1000) ==> filename: 'my-model-1000'
```
Additionally, optional arguments to the `Saver()` constructor let you control
the proliferation of checkpoint files on disk:
* `max_to_keep` indicates the maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. Defaults to 5 (that is, the 5 most recent
checkpoint files are kept.)
* `keep_checkpoint_every_n_hours`: In addition to keeping the most recent
`max_to_keep` checkpoint files, you might want to keep one checkpoint file
for every N hours of training. This can be useful if you want to later
analyze how a model progressed during a long training session. For
example, passing `keep_checkpoint_every_n_hours=2` ensures that you keep
one checkpoint file for every 2 hours of training. The default value of
10,000 hours effectively disables the feature.
Note that you still have to call the `save()` method to save the model.
Passing these arguments to the constructor will not save variables
automatically for you.
A training program that saves regularly looks like:
```python
...
# Create a saver.
saver = tf.train.Saver(...variables...)
# Launch the graph and train, saving the model every 1,000 steps.
sess = tf.Session()
for step in xrange(1000000):
sess.run(..training_op..)
if step % 1000 == 0:
# Append the step number to the checkpoint name:
saver.save(sess, 'my-model', global_step=step)
```
In addition to checkpoint files, savers keep a protocol buffer on disk with
the list of recent checkpoints. This is used to manage numbered checkpoint
files and by `latest_checkpoint()`, which makes it easy to discover the path
to the most recent checkpoint. That protocol buffer is stored in a file named
'checkpoint' next to the checkpoint files.
If you create several savers, you can specify a different filename for the
protocol buffer file in the call to `save()`.
@@__init__
@@save
@@restore
Other utility methods.
@@last_checkpoints
@@set_last_checkpoints_with_time
@@recover_last_checkpoints
@@as_saver_def
"""
def __init__(self,
var_list=None,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
saver_def=None,
builder=None,
defer_build=False,
allow_empty=False):
"""Creates a `Saver`.
The constructor adds ops to save and restore variables.
`var_list` specifies the variables that will be saved and restored. It can
be passed as a `dict` or a list:
* A `dict` of names to variables: The keys are the names that will be
used to save or restore the variables in the checkpoint files.
* A list of variables: The variables will be keyed with their op name in
the checkpoint files.
For example:
```python
v1 = tf.Variable(..., name='v1')
v2 = tf.Variable(..., name='v2')
# Pass the variables as a dict:
saver = tf.train.Saver({'v1': v1, 'v2': v2})
# Or pass them as a list.
saver = tf.train.Saver([v1, v2])
# Passing a list is equivalent to passing a dict with the variable op names
# as keys:
saver = tf.train.Saver({v.op.name: v for v in [v1, v2]})
```
The optional `reshape` argument, if `True`, allows restoring a variable from
a save file where the variable had a different shape, but the same number
of elements and type. This is useful if you have reshaped a variable and
want to reload it from an older checkpoint.
The optional `sharded` argument, if `True`, instructs the saver to shard
checkpoints per device.
Args:
var_list: A list of `Variable`/`SaveableObject`, or a dictionary mapping
names to `SaveableObject`s. If `None`, defaults to the list of all
saveable objects.
reshape: If `True`, allows restoring parameters from a checkpoint
where the variables have a different shape.
sharded: If `True`, shard the checkpoints, one per device.
max_to_keep: Maximum number of recent checkpoints to keep.
Defaults to 5.
keep_checkpoint_every_n_hours: How often to keep checkpoints.
Defaults to 10,000 hours.
name: String. Optional name to use as a prefix when adding operations.
restore_sequentially: A `Bool`, which if true, causes restore of different
variables to happen sequentially within each device. This can lower
memory usage when restoring very large models.
saver_def: Optional `SaverDef` proto to use instead of running the
builder. This is only useful for specialty code that wants to recreate
a `Saver` object for a previously built `Graph` that had a `Saver`.
The `saver_def` proto should be the one returned by the
`as_saver_def()` call of the `Saver` that was created for that `Graph`.
builder: Optional `SaverBuilder` to use if a `saver_def` was not provided.
Defaults to `BaseSaverBuilder()`.
defer_build: If `True`, defer adding the save and restore ops to the
`build()` call. In that case `build()` should be called before
finalizing the graph or using the saver.
allow_empty: If `False` (default) raise an error if there are no
variables in the graph. Otherwise, construct the saver anyway and make
it a no-op.
Raises:
TypeError: If `var_list` is invalid.
ValueError: If any of the keys or values in `var_list` are not unique.
"""
if defer_build and var_list:
raise ValueError(
"If `var_list` is provided then build cannot be deferred. "
"Either set defer_build=False or var_list=None.")
self._var_list = var_list
self._reshape = reshape
self._sharded = sharded
self._max_to_keep = max_to_keep
self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self._name = name
self._restore_sequentially = restore_sequentially
self.saver_def = saver_def
self._builder = builder
self._is_built = False
self._allow_empty = allow_empty
self._is_empty = None
if not defer_build:
self.build()
if self.saver_def:
self._check_saver_def()
def build(self):
"""Builds saver_def."""
if self._is_built:
return
self._is_built = True
if not self.saver_def:
if self._builder is None:
self._builder = BaseSaverBuilder()
if self._var_list is None:
# pylint: disable=protected-access
self._var_list = variables._all_saveable_objects()
if not self._var_list:
if self._allow_empty:
self._is_empty = True
return
else:
raise ValueError("No variables to save")
self._is_empty = False
self.saver_def = self._builder.build(
self._var_list,
reshape=self._reshape,
sharded=self._sharded,
max_to_keep=self._max_to_keep,
keep_checkpoint_every_n_hours=self._keep_checkpoint_every_n_hours,
name=self._name,
restore_sequentially=self._restore_sequentially)
self._check_saver_def()
# Updates next checkpoint time.
self._next_checkpoint_time = (
time.time() + self.saver_def.keep_checkpoint_every_n_hours * 3600)
self._last_checkpoints = []
def _check_saver_def(self):
if not isinstance(self.saver_def, saver_pb2.SaverDef):
raise ValueError("saver_def must be a saver_pb2.SaverDef: %s" %
self.saver_def)
if not self.saver_def.save_tensor_name:
raise ValueError("saver_def must specify the save_tensor_name: %s" %
str(self.saver_def))
if not self.saver_def.restore_op_name:
raise ValueError("saver_def must specify the restore_op_name: %s" %
str(self.saver_def))
def _CheckpointFilename(self, p):
"""Returns the checkpoint filename given a `(filename, time)` pair.
Args:
p: (filename, time) pair.
Returns:
Checkpoint file name.
"""
name, _ = p
return name
def _MetaGraphFilename(self, checkpoint_filename, meta_graph_suffix="meta"):
"""Returns the meta graph filename.
Args:
checkpoint_filename: Name of the checkpoint file.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
Returns:
MetaGraph file name.
"""
# If the checkpoint_filename is sharded, the checkpoint_filename could
# be of format model.ckpt-step#-?????-of-shard#. For example,
# model.ckpt-123456-?????-of-00005, or model.ckpt-123456-00001-of-00002.
basename = re.sub(r"-[\d\?]+-of-\d+$", "", checkpoint_filename)
meta_graph_filename = ".".join([basename, meta_graph_suffix])
return meta_graph_filename
def _MaybeDeleteOldCheckpoints(self,
latest_save_path,
meta_graph_suffix="meta"):
"""Deletes old checkpoints if necessary.
Always keep the last `max_to_keep` checkpoints. If
`keep_checkpoint_every_n_hours` was specified, keep an additional checkpoint
every `N` hours. For example, if `N` is 0.5, an additional checkpoint is
kept for every 0.5 hours of training; if `N` is 10, an additional
checkpoint is kept for every 10 hours of training.
Args:
latest_save_path: Name including path of checkpoint file to save.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
"""
if not self.saver_def.max_to_keep:
return
# Remove first from list if the same name was used before.
for p in self._last_checkpoints:
if latest_save_path == self._CheckpointFilename(p):
self._last_checkpoints.remove(p)
# Append new path to list
self._last_checkpoints.append((latest_save_path, time.time()))
# If more than max_to_keep, remove oldest.
if len(self._last_checkpoints) > self.saver_def.max_to_keep:
p = self._last_checkpoints.pop(0)
# Do not delete the file if we keep_checkpoint_every_n_hours is set and we
# have reached N hours of training.
should_keep = p[1] > self._next_checkpoint_time
if should_keep:
self._next_checkpoint_time += (
self.saver_def.keep_checkpoint_every_n_hours * 3600)
return
# Otherwise delete the files.
try:
checkpoint_prefix = self._CheckpointFilename(p)
self._delete_file_if_exists(
self._MetaGraphFilename(checkpoint_prefix, meta_graph_suffix))
if self.saver_def.version == saver_pb2.SaverDef.V2:
# V2 has a metadata file and some data files.
self._delete_file_if_exists(checkpoint_prefix + ".index")
self._delete_file_if_exists(checkpoint_prefix +
".data-?????-of-?????")
else:
# V1, Legacy. Exact match on the data file.
self._delete_file_if_exists(checkpoint_prefix)
except Exception as e: # pylint: disable=broad-except
logging.warning("Ignoring: %s", str(e))
def _delete_file_if_exists(self, filespec):
for pathname in file_io.get_matching_files(filespec):
file_io.delete_file(pathname)
def as_saver_def(self):
"""Generates a `SaverDef` representation of this saver.
Returns:
A `SaverDef` proto.
"""
return self.saver_def
def to_proto(self):
"""Converts this `Saver` to a `SaverDef` protocol buffer.
Returns:
A `SaverDef` protocol buffer.
"""
return self.saver_def
@staticmethod
def from_proto(saver_def):
"""Returns a `Saver` object created from `saver_def`."""
return Saver(saver_def=saver_def)
@property
def last_checkpoints(self):
"""List of not-yet-deleted checkpoint filenames.
You can pass any of the returned values to `restore()`.
Returns:
A list of checkpoint filenames, sorted from oldest to newest.
"""
return list(self._CheckpointFilename(p) for p in self._last_checkpoints)
def set_last_checkpoints(self, last_checkpoints):
"""DEPRECATED: Use set_last_checkpoints_with_time.
Sets the list of old checkpoint filenames.
Args:
last_checkpoints: A list of checkpoint filenames.
Raises:
AssertionError: If last_checkpoints is not a list.
"""
assert isinstance(last_checkpoints, list)
# We use a timestamp of +inf so that this checkpoint will never be
# deleted. This is both safe and backwards compatible to a previous
# version of the code which used s[1] as the "timestamp".
self._last_checkpoints = [(s, np.inf) for s in last_checkpoints]
def set_last_checkpoints_with_time(self, last_checkpoints_with_time):
"""Sets the list of old checkpoint filenames and timestamps.
Args:
last_checkpoints_with_time: A list of tuples of checkpoint filenames and
timestamps.
Raises:
AssertionError: If last_checkpoints_with_time is not a list.
"""
assert isinstance(last_checkpoints_with_time, list)
self._last_checkpoints = last_checkpoints_with_time
def recover_last_checkpoints(self, checkpoint_paths):
"""Recovers the internal saver state after a crash.
This method is useful for recovering the "self._last_checkpoints" state.
Globs for the checkpoints pointed to by `checkpoint_paths`. If the files
exist, use their mtime as the checkpoint timestamp.
Args:
checkpoint_paths: a list of checkpoint paths.
"""
last_checkpoints = []
for checkpoint_prefix in checkpoint_paths:
pathname = _prefix_to_checkpoint_path(checkpoint_prefix,
self.saver_def.version)
fnames = file_io.get_matching_files(pathname)
if fnames:
mtime = int(file_io.stat(fnames[0]).mtime_nsec / 1e9)
last_checkpoints.append((checkpoint_prefix, mtime))
self.set_last_checkpoints_with_time(last_checkpoints)
def save(self,
sess,
save_path,
global_step=None,
latest_filename=None,
meta_graph_suffix="meta",
write_meta_graph=True):
"""Saves variables.
This method runs the ops added by the constructor for saving variables.
It requires a session in which the graph was launched. The variables to
save must also have been initialized.
The method returns the path of the newly created checkpoint file. This
path can be passed directly to a call to `restore()`.
Args:
sess: A Session to use to save the variables.
save_path: String. Path to the checkpoint filename. If the saver is
`sharded`, this is the prefix of the sharded checkpoint filename.
global_step: If provided the global step number is appended to
`save_path` to create the checkpoint filename. The optional argument
can be a `Tensor`, a `Tensor` name or an integer.
latest_filename: Optional name for the protocol buffer file that will
contains the list of most recent checkpoint filenames. That file,
kept in the same directory as the checkpoint files, is automatically
managed by the saver to keep track of recent checkpoints. Defaults to
'checkpoint'.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
write_meta_graph: `Boolean` indicating whether or not to write the meta
graph file.
Returns:
A string: path at which the variables were saved. If the saver is
sharded, this string ends with: '-?????-of-nnnnn' where 'nnnnn'
is the number of shards created.
If the saver is empty, returns None.
Raises:
TypeError: If `sess` is not a `Session`.
ValueError: If `latest_filename` contains path components, or if it
collides with `save_path`.
RuntimeError: If save and restore ops weren't built.
"""
if not self._is_built:
raise RuntimeError(
"`build()` should be called before save if defer_build==True")
if latest_filename is None:
latest_filename = "checkpoint"
if os.path.split(latest_filename)[0]:
raise ValueError("'latest_filename' must not contain path components")
if global_step is not None:
if not isinstance(global_step, compat.integral_types):
global_step = training_util.global_step(sess, global_step)
checkpoint_file = "%s-%d" % (save_path, global_step)
else:
checkpoint_file = save_path
if os.path.basename(
save_path) == latest_filename and not self.saver_def.sharded:
# Guard against collision between data file and checkpoint state file.
raise ValueError(
"'latest_filename' collides with 'save_path': '%s' and '%s'" %
(latest_filename, save_path))
if not gfile.IsDirectory(os.path.dirname(save_path)):
raise ValueError(
"Parent directory of {} doesn't exist, can't save.".format(save_path))
save_path = os.path.dirname(save_path)
if not isinstance(sess, session.SessionInterface):
raise TypeError("'sess' must be a Session; %s" % sess)
if not self._is_empty:
model_checkpoint_path = sess.run(
self.saver_def.save_tensor_name,
{self.saver_def.filename_tensor_name: checkpoint_file})
model_checkpoint_path = compat.as_str(model_checkpoint_path)
self._MaybeDeleteOldCheckpoints(
model_checkpoint_path, meta_graph_suffix=meta_graph_suffix)
update_checkpoint_state(save_path, model_checkpoint_path,
self.last_checkpoints, latest_filename)
if write_meta_graph:
meta_graph_filename = self._MetaGraphFilename(
checkpoint_file, meta_graph_suffix=meta_graph_suffix)
with sess.graph.as_default():
self.export_meta_graph(meta_graph_filename)
if self._is_empty:
return None
else:
return model_checkpoint_path
def export_meta_graph(self,
filename=None,
collection_list=None,
as_text=False):
"""Writes `MetaGraphDef` to save_path/filename.
Args:
filename: Optional meta_graph filename including the path.
collection_list: List of string keys to collect.
as_text: If `True`, writes the meta_graph as an ASCII proto.
Returns:
A `MetaGraphDef` proto.
"""
return export_meta_graph(
filename=filename,
graph_def=ops.get_default_graph().as_graph_def(add_shapes=True),
saver_def=self.saver_def,
collection_list=collection_list,
as_text=as_text)
def restore(self, sess, save_path):
"""Restores previously saved variables.
This method runs the ops added by the constructor for restoring variables.
It requires a session in which the graph was launched. The variables to
restore do not have to have been initialized, as restoring is itself a way
to initialize variables.
The `save_path` argument is typically a value previously returned from a
`save()` call, or a call to `latest_checkpoint()`.
Args:
sess: A `Session` to use to restore the parameters.
save_path: Path where parameters were previously saved.
Raises:
ValueError: If the given `save_path` does not point to a file.
"""
if self._is_empty:
return
# Performs this check only for V1, as the V2 restore op can read either a
# V1 ckpt or a V2 ckpt, making this check invalid.
if self.saver_def.version == saver_pb2.SaverDef.V1:
file_path = _prefix_to_checkpoint_path(save_path, self.saver_def.version)
if not file_io.get_matching_files(file_path):
raise ValueError("Restore called with invalid save path: %r. "
"File path is: %r" % (save_path, file_path))
sess.run(self.saver_def.restore_op_name,
{self.saver_def.filename_tensor_name: save_path})
@staticmethod
def _add_collection_def(meta_graph_def, key):
"""Adds a collection to MetaGraphDef protocol buffer.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
key: One of the GraphKeys or user-defined string.
"""
_add_collection_def(meta_graph_def, key)
def _prefix_to_checkpoint_path(prefix, format_version=saver_pb2.SaverDef.V1):
"""Returns the pathname of a checkpoint file, given the checkpoint prefix.
For V1 checkpoint, simply returns the prefix itself (the data file). For V2,
returns the pathname to the index file.
Args:
prefix: a string, the prefix of a checkpoint.
format_version: the checkpoint format version that corresponds to the
prefix.
Returns:
The pathname of a checkpoint file, taking into account the checkpoint
format version.
"""
if format_version == saver_pb2.SaverDef.V2:
return prefix + ".index" # The index file identifies a checkpoint.
return prefix # Just the data file.
def latest_checkpoint(checkpoint_dir, latest_filename=None):
"""Finds the filename of latest saved checkpoint file.
Args:
checkpoint_dir: Directory where the variables were saved.
latest_filename: Optional name for the protocol buffer file that
contains the list of most recent checkpoint filenames.
See the corresponding argument to `Saver.save()`.
Returns:
The full path to the latest checkpoint or `None` if no checkpoint was found.
"""
# Pick the latest checkpoint based on checkpoint state.
ckpt = get_checkpoint_state(checkpoint_dir, latest_filename)
if ckpt and ckpt.model_checkpoint_path:
# Look for either a V2 path or a V1 path, with priority for V2.
v2_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,
saver_pb2.SaverDef.V2)
v1_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,
saver_pb2.SaverDef.V1)
if file_io.get_matching_files(v2_path) or file_io.get_matching_files(
v1_path):
return ckpt.model_checkpoint_path
else:
logging.error("Couldn't match files for checkpoint %s",
ckpt.model_checkpoint_path)
return None
def _get_kind_name(item):
"""Returns the kind name in CollectionDef.
Args:
item: A data item.
Returns:
The string representation of the kind in CollectionDef.
"""
if isinstance(item, (six.string_types, six.binary_type)):
kind = "bytes_list"
elif isinstance(item, six.integer_types):
kind = "int64_list"
elif isinstance(item, float):
kind = "float_list"
elif isinstance(item, Any):
kind = "any_list"
else:
kind = "node_list"
return kind
def _add_collection_def(meta_graph_def, key):
"""Adds a collection to MetaGraphDef protocol buffer.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
key: One of the GraphKeys or user-defined string.
"""
if not isinstance(key, six.string_types) and not isinstance(key, bytes):
logging.warning("Only collections with string type keys will be "
"serialized. This key has %s", type(key))
return
collection_list = ops.get_collection(key)
if not collection_list:
return
try:
col_def = meta_graph_def.collection_def[key]
to_proto = ops.get_to_proto_function(key)
proto_type = ops.get_collection_proto_type(key)
if to_proto:
kind = "bytes_list"
for x in collection_list:
# Additional type check to make sure the returned proto is indeed
# what we expect.
proto = to_proto(x)
assert isinstance(proto, proto_type)
getattr(col_def, kind).value.append(proto.SerializeToString())
else:
kind = _get_kind_name(collection_list[0])
if kind == "node_list":
getattr(col_def, kind).value.extend([x.name for x in collection_list])
elif kind == "bytes_list":
# NOTE(opensource): This force conversion is to work around the fact
# that Python3 distinguishes between bytes and strings.
getattr(col_def, kind).value.extend(
[compat.as_bytes(x) for x in collection_list])
else:
getattr(col_def, kind).value.extend([x for x in collection_list])
except Exception as e: # pylint: disable=broad-except
logging.warning("Error encountered when serializing %s.\n"
"Type is unsupported, or the types of the items don't "
"match field type in CollectionDef.\n%s", key, str(e))
if key in meta_graph_def.collection_def:
del meta_graph_def.collection_def[key]
return
def _as_meta_graph_def(meta_info_def=None,
graph_def=None,
saver_def=None,
collection_list=None):
"""Construct and returns a `MetaGraphDef` protocol buffer.
Args:
meta_info_def: `MetaInfoDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
saver_def: `SaverDef` protocol buffer.
collection_list: List of string keys to collect.
Returns:
MetaGraphDef protocol buffer.
Raises:
TypeError: If the arguments are not of the correct proto buffer type.
"""
# Type check.
if meta_info_def and not isinstance(meta_info_def,
meta_graph_pb2.MetaGraphDef.MetaInfoDef):
raise TypeError("meta_info_def must be of type MetaInfoDef, not %s",
type(meta_info_def))
if graph_def and not isinstance(graph_def, graph_pb2.GraphDef):
raise TypeError("graph_def must be of type GraphDef, not %s",
type(graph_def))
if saver_def and not isinstance(saver_def, saver_pb2.SaverDef):
raise TypeError("saver_def must be of type SaverDef, not %s",
type(saver_def))
# Creates a MetaGraphDef proto.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Adds meta_info_def.
if meta_info_def:
meta_graph_def.meta_info_def.MergeFrom(meta_info_def)
# Adds graph_def or the default.
if not graph_def:
meta_graph_def.graph_def.MergeFrom(ops.get_default_graph().as_graph_def(
add_shapes=True))
else:
meta_graph_def.graph_def.MergeFrom(graph_def)
# Fills in meta_info_def.stripped_op_list using the ops from graph_def.
# pylint: disable=g-explicit-length-test
if len(meta_graph_def.meta_info_def.stripped_op_list.op) == 0:
meta_graph_def.meta_info_def.stripped_op_list.MergeFrom(
stripped_op_list_for_graph(meta_graph_def.graph_def))
# pylint: enable=g-explicit-length-test
# Adds saver_def.
if saver_def:
meta_graph_def.saver_def.MergeFrom(saver_def)
# Adds collection_list.
if collection_list:
clist = collection_list
else:
clist = ops.get_all_collection_keys()
for ctype in clist:
_add_collection_def(meta_graph_def, ctype)
return meta_graph_def
def read_meta_graph_file(filename):
"""Reads a file containing `MetaGraphDef` and returns the protocol buffer.
Args:
filename: `meta_graph_def` filename including the path.
Returns:
A `MetaGraphDef` protocol buffer.
Raises:
IOError: If the file doesn't exist, or cannot be successfully parsed.
"""
meta_graph_def = meta_graph_pb2.MetaGraphDef()
if not file_io.file_exists(filename):
raise IOError("File %s does not exist." % filename)
# First try to read it as a binary file.
file_content = file_io.read_file_to_string(filename)
try:
meta_graph_def.ParseFromString(file_content)
return meta_graph_def
except Exception: # pylint: disable=broad-except
pass
# Next try to read it as a text file.
try:
text_format.Merge(file_content.decode("utf-8"), meta_graph_def)
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (filename, str(e)))
return meta_graph_def
def _import_meta_graph_def(meta_graph_def, clear_devices):
"""Recreates a Graph saved in a `MetaGraphDef` proto.
This function adds all the nodes from the meta graph def proto to the current
graph, recreates all the collections, and returns a saver from saver_def.
Args:
meta_graph_def: `MetaGraphDef` protocol buffer.
clear_devices: Boolean which controls whether to clear device information
from graph_def.
Returns:
A saver constructed from `saver_def` in `meta_graph_def` or None.
A None value is returned if no variables exist in the `meta_graph_def`
(i.e., no variables to restore).
"""
# Gathers the list of nodes we are interested in.
producer_op_list = None
if meta_graph_def.meta_info_def.HasField("stripped_op_list"):
producer_op_list = meta_graph_def.meta_info_def.stripped_op_list
input_graph_def = meta_graph_def.graph_def
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
for node in input_graph_def.node:
node.device = ""
importer.import_graph_def(
input_graph_def, name="", producer_op_list=producer_op_list)
# Restores all the other collections.
for key, col_def in meta_graph_def.collection_def.items():
kind = col_def.WhichOneof("kind")
if kind is None:
logging.error("Cannot identify data type for collection %s. Skipping.",
key)
continue
from_proto = ops.get_from_proto_function(key)
if from_proto:
assert kind == "bytes_list"
proto_type = ops.get_collection_proto_type(key)
for value in col_def.bytes_list.value:
proto = proto_type()
proto.ParseFromString(value)
ops.add_to_collection(key, from_proto(proto))
else:
field = getattr(col_def, kind)
if kind == "node_list":
for value in field.value:
col_op = ops.get_default_graph().as_graph_element(value)
ops.add_to_collection(key, col_op)
elif kind == "int64_list":
# NOTE(opensource): This force conversion is to work around the fact
# that Python2 distinguishes between int and long, while Python3 has
# only int.
for value in field.value:
ops.add_to_collection(key, int(value))
else:
for value in field.value:
ops.add_to_collection(key, value)
if meta_graph_def.HasField("saver_def"):
return Saver(saver_def=meta_graph_def.saver_def)
else:
if variables._all_saveable_objects(): # pylint: disable=protected-access
# Return the default saver instance for all graph variables.
return Saver()
else:
# If not graph variables exist, then a Saver cannot be constructed.
logging.info("Saver not created because there are no variables in the"
" graph to restore")
return None
def import_meta_graph(meta_graph_or_file, clear_devices=False):
"""Recreates a Graph saved in a `MetaGraphDef` proto.
This function takes a `MetaGraphDef` protocol buffer as input. If
the argument is a file containing a `MetaGraphDef` protocol buffer ,
it constructs a protocol buffer from the file content. The function
then adds all the nodes from the `graph_def` field to the
current graph, recreates all the collections, and returns a saver
constructed from the `saver_def` field.
In combination with `export_meta_graph()`, this function can be used to
* Serialize a graph along with other Python objects such as `QueueRunner`,
`Variable` into a `MetaGraphDef`.
* Restart training from a saved graph and checkpoints.
* Run inference from a saved graph and checkpoints.
```Python
...
# Create a saver.
saver = tf.train.Saver(...variables...)
# Remember the training_op we want to run by adding it to a collection.
tf.add_to_collection('train_op', train_op)
sess = tf.Session()
for step in xrange(1000000):
sess.run(train_op)
if step % 1000 == 0:
# Saves checkpoint, which by default also exports a meta_graph
# named 'my-model-global_step.meta'.
saver.save(sess, 'my-model', global_step=step)
```
Later we can continue training from this saved `meta_graph` without building
the model from scratch.
```Python
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph('my-save-dir/my-model-10000.meta')
new_saver.restore(sess, 'my-save-dir/my-model-10000')
# tf.get_collection() returns a list. In this example we only want the
# first one.
train_op = tf.get_collection('train_op')[0]
for step in xrange(1000000):
sess.run(train_op)
```
NOTE: Restarting training from saved `meta_graph` only works if the
device assignments have not changed.
Args:
meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including
the path) containing a `MetaGraphDef`.
clear_devices: Boolean which controls whether to clear device information
from graph_def. Default false.
Returns:
A saver constructed from `saver_def` in `MetaGraphDef` or None.
A None value is returned if no variables exist in the `MetaGraphDef`
(i.e., there are no variables to restore).
"""
if isinstance(meta_graph_or_file, meta_graph_pb2.MetaGraphDef):
return _import_meta_graph_def(meta_graph_or_file, clear_devices)
else:
return _import_meta_graph_def(
read_meta_graph_file(meta_graph_or_file), clear_devices)
def export_meta_graph(filename=None,
meta_info_def=None,
graph_def=None,
saver_def=None,
collection_list=None,
as_text=False):
"""Returns `MetaGraphDef` proto. Optionally writes it to filename.
This function exports the graph, saver, and collection objects into
`MetaGraphDef` protocol buffer with the intention of it being imported
at a later time or location to restart training, run inference, or be
a subgraph.
Args:
filename: Optional filename including the path for writing the
generated `MetaGraphDef` protocol buffer.
meta_info_def: `MetaInfoDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
saver_def: `SaverDef` protocol buffer.
collection_list: List of string keys to collect.
as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto.
Returns:
A `MetaGraphDef` proto.
"""
meta_graph_def = _as_meta_graph_def(
meta_info_def=meta_info_def,
graph_def=graph_def,
saver_def=saver_def,
collection_list=collection_list)
if filename:
training_util.write_graph(
meta_graph_def,
os.path.dirname(filename),
os.path.basename(filename),
as_text=as_text)
return meta_graph_def
ops.register_proto_function(
ops.GraphKeys.SAVERS,
proto_type=saver_pb2.SaverDef,
to_proto=Saver.to_proto,
from_proto=Saver.from_proto)
| {
"content_hash": "ba5e79e2ce03e25f69b0189fa56fdd6a",
"timestamp": "",
"source": "github",
"line_count": 1741,
"max_line_length": 80,
"avg_line_length": 37.90522688110281,
"alnum_prop": 0.6643128816692679,
"repo_name": "naturali/tensorflow",
"id": "b1b40061468c1db076e13f81abbb48b7ade86822",
"size": "66714",
"binary": false,
"copies": "1",
"ref": "refs/heads/r0.11",
"path": "tensorflow/python/training/saver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "159351"
},
{
"name": "C++",
"bytes": "9498060"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "GCC Machine Description",
"bytes": "1"
},
{
"name": "HTML",
"bytes": "787519"
},
{
"name": "Java",
"bytes": "39229"
},
{
"name": "JavaScript",
"bytes": "10875"
},
{
"name": "Jupyter Notebook",
"bytes": "1773504"
},
{
"name": "Makefile",
"bytes": "12318"
},
{
"name": "Objective-C",
"bytes": "5332"
},
{
"name": "Objective-C++",
"bytes": "45585"
},
{
"name": "Protocol Buffer",
"bytes": "114983"
},
{
"name": "Python",
"bytes": "7015287"
},
{
"name": "Shell",
"bytes": "201064"
},
{
"name": "TypeScript",
"bytes": "414414"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import logging
from geocoder.location import BBox
from geocoder.base import OneResult, MultipleResultsQuery
class KomootResult(OneResult):
def __init__(self, json_content):
# create safe shortcuts
self._geometry = json_content.get('geometry', {})
self._properties = json_content.get('properties', {})
# proceed with super.__init__
super(KomootResult, self).__init__(json_content)
@property
def lat(self):
return self._geometry['coordinates'][1]
@property
def lng(self):
return self._geometry['coordinates'][0]
@property
def bbox(self):
extent = self._properties.get('extent')
if extent and all(extent):
west = extent[0]
north = extent[1]
east = extent[2]
south = extent[3]
return BBox.factory([south, west, north, east]).as_dict
@property
def address(self):
# Ontario, Canada
address = ', '.join([self.state, self.country])
# 453 Booth street, Ottawa ON, Canada
if self.housenumber:
middle = ', '.join([self.street, self.city])
address = ' '.join([self.housenumber, middle, address])
# 453 Booth street, Ottawa ON, Canada
elif self.street:
middle = ', '.join([self.street, self.city])
address = ' '.join([middle, address])
# Ottawa ON, Canada
elif self.city:
address = ' '.join([self.city, address])
return address
@property
def country(self):
return self._properties.get('country', '')
@property
def state(self):
if self.osm_value == 'state':
return self._properties.get('name', '')
return self._properties.get('state', '')
@property
def city(self):
if self.osm_value == 'city':
return self._properties.get('name', '')
return self._properties.get('city', '')
@property
def street(self):
return self._properties.get('street', '')
@property
def housenumber(self):
return self._properties.get('housenumber', '')
@property
def postal(self):
return self._properties.get('postcode', '')
@property
def osm_id(self):
return self._properties.get('osm_id', '')
@property
def osm_value(self):
return self._properties.get('osm_value', '')
@property
def osm_key(self):
return self._properties.get('osm_key', '')
@property
def osm_type(self):
return self._properties.get('osm_type', '')
class KomootQuery(MultipleResultsQuery):
"""
Komoot REST API
=======================
API Reference
-------------
http://photon.komoot.de
"""
provider = 'komoot'
method = 'geocode'
_URL = 'http://photon.komoot.de/api'
_RESULT_CLASS = KomootResult
_KEY_MANDATORY = False
def _build_params(self, location, provider_key, **kwargs):
return {
'q': location,
'limit': kwargs.get('maxRows', 1),
'lang': 'en',
}
def _adapt_results(self, json_response):
return json_response['features']
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
g = KomootQuery('Ottawa Ontario', maxRows=3)
g.debug()
| {
"content_hash": "c9860469698899adfab147c970b9e01c",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 67,
"avg_line_length": 25.353383458646615,
"alnum_prop": 0.5652431791221827,
"repo_name": "DenisCarriere/geocoder",
"id": "4a647d4309da71d27af13ae37623ed9c13e2d025",
"size": "3406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geocoder/komoot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "415"
},
{
"name": "Python",
"bytes": "283948"
}
],
"symlink_target": ""
} |
'''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p
from OpenGL.GL import glget
EXTENSION_NAME = 'GL_NV_multisample_filter_hint'
_p.unpack_constants( """GL_MULTISAMPLE_FILTER_HINT_NV 0x8534""", globals())
glget.addGLGetConstant( GL_MULTISAMPLE_FILTER_HINT_NV, (1,) )
def glInitMultisampleFilterHintNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
| {
"content_hash": "28991bc992c08f699c5db9f1cb9f08f4",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 75,
"avg_line_length": 42.583333333333336,
"alnum_prop": 0.761252446183953,
"repo_name": "frederica07/Dragon_Programming_Process",
"id": "10ef9a52ee89d3df260f183d795d85d7fbefac92",
"size": "511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyOpenGL-3.0.2/OpenGL/raw/GL/NV/multisample_filter_hint.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1548"
},
{
"name": "Python",
"bytes": "2558317"
}
],
"symlink_target": ""
} |
__author__ = ['davidharcombe@google.com (David Harcombe)']
"""Discovery Class.
Authenticate and fetch a discoverable API service.
"""
import logging
from typing import Any, Mapping
from apiclient import discovery
from oauth2client.client import AccessTokenCredentials
from classes.secret_manager_credentials import Credentials
from classes.services import Service
def get_service(service: Service,
credentials: Credentials,
api_key: str=None) -> discovery.Resource:
"""Fetch a discoverable API service.
Create an endpoint to one of the Google services listed in Services.py as
a defined service. Only services listed in the Services enum can be used,
and they each have a in a ServiceDefinition containing all the information
needed to create the service. These parameters are decomposed to a dict of
keyword arguments ans passed on to the Google Discovery API.
Not all services require an API key, hence it is optional.
Args:
service (Service): [description]
credentials (Credentials): [description]
api_key (str, optional): [description]. Defaults to None.
Returns:
discovery.Resource: a service for REST calls
Raises:
NotImplementedError: if an invalid service is requested.
"""
if definition := service.definition:
_credentials = \
AccessTokenCredentials(credentials.credentials.token,
user_agent='report2bq')
auth_https = _credentials.authorize(discovery.httplib2.Http())
service = discovery.build(http=auth_https,
cache_discovery=False,
developerKey=api_key,
**definition.to_args)
return service
else:
raise NotImplementedError(f'Unknown service {service}')
| {
"content_hash": "4f808ed531ef2ae32e37c6e2d71ee039",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 77,
"avg_line_length": 32.81818181818182,
"alnum_prop": 0.6947368421052632,
"repo_name": "google/report2bq",
"id": "b1b3f5fe348130199f82d7d2d845bb3a6eb4399f",
"size": "2398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "auth-appengine/classes/discovery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "689"
},
{
"name": "HTML",
"bytes": "13362"
},
{
"name": "JavaScript",
"bytes": "375"
},
{
"name": "Python",
"bytes": "435292"
},
{
"name": "Shell",
"bytes": "35343"
}
],
"symlink_target": ""
} |
import logging
from ginga import GingaPlugin
from ginga.gtkw import GtkHelp
import gtk
import pango
class Log(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(Log, self).__init__(fv)
self.histlimit = 100
self.histmax = 10000
self.levels = (('Error', logging.ERROR),
('Warn', logging.WARN),
('Info', logging.INFO),
('Debug', logging.DEBUG))
def build_gui(self, container):
self.msgFont = pango.FontDescription("Fixed 10")
tw = gtk.TextView()
tw.set_wrap_mode(gtk.WRAP_NONE)
tw.set_left_margin(4)
tw.set_right_margin(4)
tw.set_editable(False)
tw.set_left_margin(4)
tw.set_right_margin(4)
tw.modify_font(self.msgFont)
self.tw = tw
self.buf = self.tw.get_buffer()
sw = gtk.ScrolledWindow()
sw.set_border_width(2)
sw.set_policy(gtk.POLICY_AUTOMATIC,
gtk.POLICY_AUTOMATIC)
sw.add(self.tw)
container.pack_start(sw, fill=True, expand=True)
hbox = gtk.HBox()
lbl = gtk.Label('Level:')
hbox.pack_start(lbl, fill=False, expand=False)
combobox = GtkHelp.combo_box_new_text()
for (name, level) in self.levels:
combobox.append_text(name)
combobox.set_active(1)
combobox.sconnect('changed', self.set_loglevel_cb)
hbox.pack_start(combobox, fill=False, expand=False,
padding=4)
lbl = gtk.Label('History:')
hbox.pack_start(lbl, fill=False, expand=False)
spinbox = GtkHelp.SpinButton()
adj = spinbox.get_adjustment()
adj.configure(self.histlimit, 100, self.histmax, 10, 100, 0)
spinbox.sconnect('value-changed', self.set_history_cb)
hbox.pack_start(spinbox, fill=False, expand=False,
padding=4)
btn = gtk.Button("Clear")
btn.connect('clicked', lambda w: self.clear())
hbox.pack_end(btn, fill=False, expand=False)
container.pack_end(hbox, fill=False, expand=False)
def set_history(self, histlimit):
assert histlimit <= self.histmax, \
Exception("Limit exceeds maximum value of %d" % (self.histmax))
self.histlimit = histlimit
self.logger.debug("Logging history limit set to %d" % (
histlimit))
self.history_housekeeping()
def set_history_cb(self, rng):
histlimit = rng.get_value()
self.set_history(histlimit)
def history_housekeeping(self):
# remove some lines to keep us within our history limit
numlines = self.buf.get_line_count()
if numlines > self.histlimit:
rmcount = int(numlines - self.histlimit)
start = self.buf.get_iter_at_line(0)
end = self.buf.get_iter_at_line(rmcount)
self.buf.delete(start, end)
def set_loglevel_cb(self, w):
index = w.get_active()
name, level = self.levels[index]
self.fv.set_loglevel(level)
self.logger.info("GUI log level changed to '%s'" % (
name))
def log(self, text):
end = self.buf.get_end_iter()
self.buf.insert(end, text + '\n')
self.history_housekeeping()
# scroll window to end of buffer
end = self.buf.get_end_iter()
mark = self.buf.get_insert()
#self.tw.scroll_to_iter(end, 0.5)
# TODO: this is causing a segfault if the text widget is
# not mapped yet!
#self.buf.move_mark(mark, end)
#res = self.tw.scroll_to_mark(mark, 0.2, True)
def clear(self):
start = self.buf.get_start_iter()
end = self.buf.get_end_iter()
self.buf.delete(start, end)
return True
def __str__(self):
return 'log'
#END
| {
"content_hash": "b1deb3cb0cd3b37f2bb7d2b799dbf2b3",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 78,
"avg_line_length": 33.05785123966942,
"alnum_prop": 0.56575,
"repo_name": "astrofrog/ginga",
"id": "2a301553d0b44f179c8f6b9a8773091205c149e0",
"size": "4246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ginga/gtkw/plugins/Log.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1548520"
}
],
"symlink_target": ""
} |
"""Views for comments app."""
from __future__ import absolute_import
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.decorators import method_decorator
from rest_framework import permissions, status
from rest_framework.decorators import (
api_view,
authentication_classes,
permission_classes,
renderer_classes,
detail_route
)
from rest_framework.exceptions import ParseError
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from sphinx.websupport import WebSupport
from readthedocs.comments.models import (
DocumentComment, DocumentNode, NodeSnapshot, DocumentCommentSerializer,
DocumentNodeSerializer, ModerationActionSerializer)
from readthedocs.projects.models import Project
from readthedocs.restapi.permissions import CommentModeratorOrReadOnly
from .backend import DjangoStorage
from .session import UnsafeSessionAuthentication
storage = DjangoStorage()
support = WebSupport(
srcdir='/Users/eric/projects/readthedocs.org/docs',
builddir='/Users/eric/projects/readthedocs.org/docs/_build/websupport',
datadir='/Users/eric/projects/readthedocs.org/docs/_build/websupport/data',
storage=storage,
docroot='websupport',
)
########
# called by javascript
########
@api_view(['GET'])
@permission_classes([permissions.IsAuthenticatedOrReadOnly])
@renderer_classes((JSONRenderer,))
def get_options(request): # pylint: disable=unused-argument
base_opts = support.base_comment_opts
base_opts['addCommentURL'] = '/api/v2/comments/'
base_opts['getCommentsURL'] = '/api/v2/comments/'
return Response(base_opts)
@api_view(['GET'])
@permission_classes([permissions.IsAuthenticatedOrReadOnly])
@renderer_classes((JSONRenderer,))
def get_metadata(request):
"""
Check for get_metadata
GET: page
"""
document = request.GET.get('page', '')
return Response(storage.get_metadata(docname=document))
@api_view(['GET', 'POST'])
@permission_classes([permissions.AllowAny])
@authentication_classes([UnsafeSessionAuthentication])
@renderer_classes((JSONRenderer,))
def attach_comment(request):
comment_id = request.POST.get('comment', '')
comment = DocumentComment.objects.get(pk=comment_id)
node_id = request.POST.get('node', '')
snapshot = NodeSnapshot.objects.get(hash=node_id)
comment.node = snapshot.node
serialized_comment = DocumentCommentSerializer(comment)
return Response(serialized_comment.data)
#######
# Normal Views
#######
def build(request): # pylint: disable=unused-argument
support.build()
def serve_file(request, file): # pylint: disable=redefined-builtin
document = support.get_document(file)
return render_to_response('doc.html',
{'document': document},
context_instance=RequestContext(request))
######
# Called by Builder
######
@api_view(['GET'])
@permission_classes([permissions.IsAuthenticatedOrReadOnly])
def has_node(request):
"""
Checks to see if a node exists.
GET: node_id - The node's ID to check
"""
node_id = request.GET.get('node_id', '')
exists = storage.has_node(node_id)
return Response({'exists': exists})
@api_view(['GET', 'POST'])
@permission_classes([permissions.AllowAny])
@authentication_classes([UnsafeSessionAuthentication])
@renderer_classes((JSONRenderer,))
def add_node(request):
post_data = request.data
project = Project.objects.get(slug=post_data['project'])
page = post_data.get('document', '')
node_hash = post_data.get('id', '')
version = post_data.get('version', '')
commit = post_data.get('commit', '')
project.add_node(node_hash, page, version=version, commit=commit)
return Response()
@api_view(['GET', 'POST'])
@permission_classes([permissions.AllowAny])
@authentication_classes([UnsafeSessionAuthentication])
@renderer_classes((JSONRenderer,))
def update_node(request):
post_data = request.data
try:
old_hash = post_data['old_hash']
new_hash = post_data['new_hash']
commit = post_data['commit']
project = post_data['project']
version = post_data['version']
page = post_data['page']
node = DocumentNode.objects.from_hash(
node_hash=old_hash, project_slug=project, version_slug=version,
page=page)
node.update_hash(new_hash, commit)
return Response(DocumentNodeSerializer(node).data)
except KeyError:
return Response("You must include new_hash and commit in POST payload to this view.",
status.HTTP_400_BAD_REQUEST)
class CommentViewSet(ModelViewSet):
"""Viewset for Comment model."""
serializer_class = DocumentCommentSerializer
permission_classes = [CommentModeratorOrReadOnly, permissions.IsAuthenticatedOrReadOnly]
def get_queryset(self):
qp = self.request.query_params
if qp.get('node'):
try:
node = DocumentNode.objects.from_hash(version_slug=qp['version'],
page=qp['document_page'],
node_hash=qp['node'],
project_slug=qp['project'])
queryset = DocumentComment.objects.filter(node=node)
except KeyError:
raise ParseError(
'To get comments by node, you must also provide page, '
'version, and project.')
except DocumentNode.DoesNotExist:
queryset = DocumentComment.objects.none()
elif qp.get('project'):
queryset = DocumentComment.objects.filter(node__project__slug=qp['project'])
else:
queryset = DocumentComment.objects.all()
return queryset
@method_decorator(login_required)
def create(self, request, *args, **kwargs):
project = Project.objects.get(slug=request.data['project'])
comment = project.add_comment(version_slug=request.data['version'],
page=request.data['document_page'],
content_hash=request.data['node'],
commit=request.data['commit'],
user=request.user,
text=request.data['text'])
serializer = self.get_serializer(comment)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@detail_route(methods=['put'])
def moderate(self, request, pk): # pylint: disable=unused-argument
comment = self.get_object()
decision = request.data['decision']
moderation_action = comment.moderate(request.user, decision)
return Response(ModerationActionSerializer(moderation_action).data)
| {
"content_hash": "e38c2d29f3bfcd800273d2fec82333b3",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 93,
"avg_line_length": 34.35748792270532,
"alnum_prop": 0.656214848143982,
"repo_name": "pombredanne/readthedocs.org",
"id": "a959924dcf083255e7a87414d8a47d96826aa038",
"size": "7112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readthedocs/comments/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "66514"
},
{
"name": "HTML",
"bytes": "205587"
},
{
"name": "JavaScript",
"bytes": "444672"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1175310"
}
],
"symlink_target": ""
} |
from networkx.algorithms.components.strongly_connected import strongly_connected_components
from networkx.classes.digraph import DiGraph
from networkx.classes.function import selfloop_edges
from typing import List, Set, Tuple, Dict
from hwtHls.ssa.basicBlock import SsaBasicBlock
from hwtHls.ssa.transformation.utils.blockAnalysis import collect_all_blocks
from ipCorePackager.constants import DIRECTION
# https://github.com/baharev/sdopt-tearing/blob/master/heap_md.py
# https://github.com/zhenv5/breaking_cycles_in_noisy_hierarchies
Node = int
def rm_non_sccs(g: DiGraph):
sccs = [c for c in strongly_connected_components(g) if len(c) > 1]
sccs_nodes = set()
for c in sccs:
sccs_nodes.update(c)
for n in tuple(g.nodes()):
if n not in sccs_nodes:
g.remove_node(n) # edges removed automatically
return sccs, sccs_nodes
def get_nodes_degree_dict(g: DiGraph, nodes: List[Node]) -> Dict[Node, Tuple[float, DIRECTION]]:
in_degrees = g.in_degree(nodes)
out_degrees = g.out_degree(nodes)
degree_dict = {}
for node in nodes:
in_d = in_degrees[node]
out_d = out_degrees[node]
if in_d > out_d:
value = in_d / out_d
f = DIRECTION.IN
else:
value = out_d / in_d
f = DIRECTION.OUT
degree_dict[node] = (value, f)
return degree_dict
def greedy_local_heuristic(g: DiGraph, sccs: List[Set[Node]], degree_dict: Dict[int, int], edges_to_be_removed):
while sccs:
scc = sccs.pop()
(_, max_value), max_node = max(((degree_dict[node], node)
for node in scc),
key=lambda x: x[0][0])
# degrees = [(node,degree_dict[node]) for node in list(graph.nodes())]
# max_node,max_value = max(degrees,key = lambda x: x[1][0])
assert isinstance(max_value, DIRECTION), max_value
if max_value == DIRECTION.IN:
# indegree > outdegree, remove out-edges
edges = ((max_node, o) for o in g.succ[max_node])
else:
# outdegree > indegree, remove in-edges
edges = ((i, max_node) for i in g.pred[max_node])
edges_to_be_removed.extend(edges)
g.remove_edges_from(edges_to_be_removed)
sccs.extend(c for c in strongly_connected_components(g) if len(c) > 1)
def remove_cycle_edges_by_mfas(g: DiGraph):
backedges = list(selfloop_edges(g))
g.remove_edges_from(backedges)
sccs, sccs_nodes = rm_non_sccs(g)
# scc_nodes, _, _, _ = scc_nodes_edges(g)
degree_dict = get_nodes_degree_dict(g, sccs_nodes)
# import timeit
# t1 = timeit.default_timer()
greedy_local_heuristic(g, sccs, degree_dict, backedges)
# t2 = timeit.default_timer()
# print("mfas time usage: %0.4f s" % (t2 - t1))
# edges_to_be_removed = list(set(edges_to_be_removed))
# g.remove_edges_from(edges_to_be_removed)
# edges_to_be_removed.extend(self_loops)
# edges_to_be_removed_file = graph_file[:len(graph_file) - 6] + "_removed_by_mfas.edges"
# write_pairs_to_file(edges_to_be_removed, edges_to_be_removed_file)
return backedges
class PipelineExtractor():
"""
Cut the circuit graph to individual independent pipelines and mark some edges as backward
to linearize the circuit for scheduling.
:note: The edge is backward
In this class we solve the feedback arc set problem (which is NP-complete), more info:
* https://doi.org/10.1016/0020-0190(93)90079-O
* https://www.mat.univie.ac.at/~neum/ms/minimum_feedback_arc_set.pdf
"""
# def _split_to_pipelines_from_component(self,
# nodes_to_handle: List[SsaBasicBlock],
# used_nodes: Set[SsaBasicBlock],
# backward_edges: Set[Tuple[SsaBasicBlock, SsaBasicBlock]]):
# if len(nodes_to_handle) == 1:
# # single node pipeline
# for n in nodes_to_handle:
# assert n not in used_nodes, (n, used_nodes)
# used_nodes.update(nodes_to_handle)
# yield nodes_to_handle
#
# sc = DiscoverScc(nodes_to_handle, used_nodes, backward_edges).discover()
# if len(sc) == len(nodes_to_handle):
# # all nodes as separate component => no cycle, the pipeline we are looking for
# for n in nodes_to_handle:
# assert n not in used_nodes, (n, used_nodes)
# used_nodes.update(nodes_to_handle)
# yield nodes_to_handle
# else:
# yield from self._split_to_pipelines_from_components(sc, used_nodes, backward_edges)
# def _split_to_pipelines_from_components(self,
# components: List[List[SsaBasicBlock]],
# used_nodes: Set[SsaBasicBlock],
# backward_edges: Set[Tuple[SsaBasicBlock, SsaBasicBlock]]):
# # current_pipeline = []
# for c in components:
# # collect all single node components to a current pipeline
# # plus add entry points to other components
# for i0, c0 in enumerate(c):
# c0:SsaBasicBlock
#
# for pred in c0.successors.iterBlocks():
# try:
# i1 = c.index(pred)
# except ValueError:
# i1 = math.inf
# if i1 <= i0:
# backward_edges.add((pred, c0))
# # current_pipeline.append(node)
#
# # used_nodes.update(current_pipeline)
# for c in components:
# # build pipeline from rest of the blocks (without the entry point which was added into parent pipeline)
# yield from self._split_to_pipelines_from_component(c, used_nodes, backward_edges)
#
# # yield current_pipeline
def collect_pipelines(self, ssa: SsaBasicBlock):
"""
The pipeline is a DAG of SsaBasicBlocks which should share the synchronization or a single node
The goal is to extract as long as possible pipelines from ssa graph and detect places
where these pipelines connect to each other and where some sort of extra synchronization is required.
"""
# used_nodes: Set[SsaBasicBlock] = set()
allBlocksSet: Set[SsaBasicBlock] = set()
blocks: List[SsaBasicBlock] = list(collect_all_blocks(ssa, allBlocksSet))
block_to_id = {b: i for i, b in enumerate(blocks)}
dg = DiGraph()
for b0_i, b0 in enumerate(blocks):
b0: SsaBasicBlock
for b1 in b0.successors.iterBlocks():
b1_i = block_to_id[b1]
dg.add_edge(b0_i, b1_i)
back_edges: Set[Tuple[SsaBasicBlock, SsaBasicBlock]] = set()
for b0_i, b1_i in set(remove_cycle_edges_by_mfas(dg)):
back_edges.add((blocks[b0_i], blocks[b1_i]))
# components = DiscoverScc((ssa,), (), ()).discover()
self.backward_edges = back_edges
return blocks
# for component in self._split_to_pipelines_from_components(components, used_nodes, self.backward_edges):
# yield component
| {
"content_hash": "e92dd1201a2b9e586346f38c746a1200",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 116,
"avg_line_length": 41.31609195402299,
"alnum_prop": 0.6000834608429545,
"repo_name": "Nic30/hwtHls",
"id": "f5830c4cd728058d4d171b41f4f29d1590bf1717",
"size": "7189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hwtHls/ssa/translation/toHwtHlsNetlist/pipelineExtractor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1001"
},
{
"name": "C++",
"bytes": "131805"
},
{
"name": "Dockerfile",
"bytes": "1731"
},
{
"name": "LLVM",
"bytes": "74517"
},
{
"name": "Meson",
"bytes": "2683"
},
{
"name": "Python",
"bytes": "739246"
}
],
"symlink_target": ""
} |
"""
Test experiment for parse.
Add cardinality format field after type:
"... {person:Person?} ..." -- CARDINALITY: Zero or one, 0..1 (optional)
"... {persons:Person*} ..." -- CARDINALITY: Zero or more, 0..N (many0)
"... {persons:Person+} ..." -- CARDINALITY: One or more, 1..N (many)
REQUIRES:
parse >= 1.5.3.1 ('pattern' attribute support and further extensions)
STATUS:
IDEA, working prototype with patched parse module, but not accepted.
"""
from __future__ import absolute_import
from .parse_type_test \
import TestCase, parse_number, unittest
from .test_cardinality import CardinalityTypeBuilderTest
from parse_type import Cardinality
from parse_type.cardinality_field \
import CardinalityField, CardinalityFieldTypeBuilder, MissingTypeError
# -------------------------------------------------------------------------
# TEST CASE: TestParseTypeWithCardinalityField
# -------------------------------------------------------------------------
class TestCardinalityField(TestCase):
VALID_TYPE_NAMES = ["Number?", "Number*", "Number+"]
INVALID_TYPE_NAMES = ["?Invalid", "Inval*d", "In+valid"]
def test_pattern_chars(self):
for pattern_char in CardinalityField.pattern_chars:
self.assertIn(pattern_char, CardinalityField.from_char_map)
def test_to_from_char_map_symmetry(self):
for cardinality, char in CardinalityField.to_char_map.items():
self.assertEqual(cardinality, CardinalityField.from_char_map[char])
for char, cardinality in CardinalityField.from_char_map.items():
self.assertEqual(char, CardinalityField.to_char_map[cardinality])
def test_matches_type_name(self):
for type_name in self.VALID_TYPE_NAMES:
self.assertTrue(CardinalityField.matches_type(type_name))
for type_name in self.INVALID_TYPE_NAMES:
self.assertFalse(CardinalityField.matches_type(type_name))
def test_split_type__with_valid_special_names(self):
actual = CardinalityField.split_type("Color?")
self.assertEqual(actual, ("Color", Cardinality.optional))
self.assertEqual(actual, ("Color", Cardinality.zero_or_one))
actual = CardinalityField.split_type("Color+")
self.assertEqual(actual, ("Color", Cardinality.many))
self.assertEqual(actual, ("Color", Cardinality.one_or_more))
actual = CardinalityField.split_type("Color*")
self.assertEqual(actual, ("Color", Cardinality.many0))
self.assertEqual(actual, ("Color", Cardinality.zero_or_more))
def test_split_type__with_valid_special_names2(self):
for type_name in self.VALID_TYPE_NAMES:
self.assertTrue(CardinalityField.matches_type(type_name))
cardinality_char = type_name[-1]
expected_basename = type_name[:-1]
expected_cardinality = CardinalityField.from_char_map[cardinality_char]
expected = (expected_basename, expected_cardinality)
actual = CardinalityField.split_type(type_name)
self.assertEqual(actual, expected)
def test_split_type__with_cardinality_one(self):
actual = CardinalityField.split_type("Color")
self.assertEqual(actual, ("Color", Cardinality.one))
def test_split_type__with_invalid_names(self):
for type_name in self.INVALID_TYPE_NAMES:
expected = (type_name, Cardinality.one)
actual = CardinalityField.split_type(type_name)
self.assertEqual(actual, expected)
self.assertFalse(CardinalityField.matches_type(type_name))
def test_make_type__with_cardinality_one(self):
expected = "Number"
type_name = CardinalityField.make_type("Number", Cardinality.one)
self.assertEqual(type_name, expected)
self.assertFalse(CardinalityField.matches_type(type_name))
def test_make_type__with_cardinality_optional(self):
expected = "Number?"
type_name = CardinalityField.make_type("Number", Cardinality.optional)
self.assertEqual(type_name, expected)
self.assertTrue(CardinalityField.matches_type(type_name))
type_name2 = CardinalityField.make_type("Number", Cardinality.zero_or_one)
self.assertEqual(type_name2, expected)
self.assertEqual(type_name2, type_name)
def test_make_type__with_cardinality_many(self):
expected = "Number+"
type_name = CardinalityField.make_type("Number", Cardinality.many)
self.assertEqual(type_name, expected)
self.assertTrue(CardinalityField.matches_type(type_name))
type_name2 = CardinalityField.make_type("Number", Cardinality.one_or_more)
self.assertEqual(type_name2, expected)
self.assertEqual(type_name2, type_name)
def test_make_type__with_cardinality_many0(self):
expected = "Number*"
type_name = CardinalityField.make_type("Number", Cardinality.many0)
self.assertEqual(type_name, expected)
self.assertTrue(CardinalityField.matches_type(type_name))
type_name2 = CardinalityField.make_type("Number", Cardinality.zero_or_more)
self.assertEqual(type_name2, expected)
self.assertEqual(type_name2, type_name)
def test_split_type2make_type__symmetry_with_valid_names(self):
for type_name in self.VALID_TYPE_NAMES:
primary_name, cardinality = CardinalityField.split_type(type_name)
type_name2 = CardinalityField.make_type(primary_name, cardinality)
self.assertEqual(type_name, type_name2)
def test_split_type2make_type__symmetry_with_cardinality_one(self):
for type_name in self.INVALID_TYPE_NAMES:
primary_name, cardinality = CardinalityField.split_type(type_name)
type_name2 = CardinalityField.make_type(primary_name, cardinality)
self.assertEqual(type_name, primary_name)
self.assertEqual(type_name, type_name2)
self.assertEqual(cardinality, Cardinality.one)
# -------------------------------------------------------------------------
# TEST CASE:
# -------------------------------------------------------------------------
class TestCardinalityFieldTypeBuilder(CardinalityTypeBuilderTest):
INVALID_TYPE_DICT_DATA = [
(dict(), "empty type_dict"),
(dict(NumberX=parse_number), "non-empty type_dict (wrong name)"),
]
# -- UTILITY METHODS:
def generate_type_variants(self,type_name):
for pattern_char in CardinalityField.pattern_chars:
special_name = "%s%s" % (type_name.strip(), pattern_char)
self.assertTrue(CardinalityField.matches_type(special_name))
yield special_name
# -- METHOD: CardinalityFieldTypeBuilder.create_type_variant()
def test_create_type_variant__with_many_and_type_converter(self):
type_builder = CardinalityFieldTypeBuilder
parse_candidate = type_builder.create_type_variant("Number+",
type_converter=parse_number)
self.check_parse_number_with_many(parse_candidate, "Number+")
def test_create_type_variant__with_optional_and_type_dict(self):
type_builder = CardinalityFieldTypeBuilder
parse_candidate = type_builder.create_type_variant("Number?",
dict(Number=parse_number))
self.check_parse_number_with_optional(parse_candidate, "Number?")
def test_create_type_variant__with_many_and_type_dict(self):
type_builder = CardinalityFieldTypeBuilder
parse_candidate = type_builder.create_type_variant("Number+",
dict(Number=parse_number))
self.check_parse_number_with_many(parse_candidate, "Number+")
def test_create_type_variant__with_many0_and_type_dict(self):
type_builder = CardinalityFieldTypeBuilder
parse_candidate = type_builder.create_type_variant("Number*",
dict(Number=parse_number))
self.check_parse_number_with_many0(parse_candidate, "Number*")
def test_create_type_variant__can_create_all_variants(self):
type_builder = CardinalityFieldTypeBuilder
for special_name in self.generate_type_variants("Number"):
# -- CASE: type_converter
parse_candidate = type_builder.create_type_variant(special_name,
parse_number)
self.assertTrue(callable(parse_candidate))
# -- CASE: type_dict
parse_candidate = type_builder.create_type_variant(special_name,
dict(Number=parse_number))
self.assertTrue(callable(parse_candidate))
def test_create_type_variant__raises_error_with_invalid_type_name(self):
type_builder = CardinalityFieldTypeBuilder
for invalid_type_name in TestCardinalityField.INVALID_TYPE_NAMES:
with self.assertRaises(ValueError):
type_builder.create_type_variant(invalid_type_name,
parse_number)
def test_create_type_variant__raises_error_with_missing_primary_type(self):
type_builder = CardinalityFieldTypeBuilder
for special_name in self.generate_type_variants("Number"):
for type_dict, description in self.INVALID_TYPE_DICT_DATA:
with self.assertRaises(MissingTypeError):
type_builder.create_type_variant(special_name, type_dict)
# -- METHOD: CardinalityFieldTypeBuilder.create_type_variants()
def test_create_type_variants__all(self):
type_builder = CardinalityFieldTypeBuilder
special_names = ["Number?", "Number+", "Number*"]
type_dict = dict(Number=parse_number)
new_types = type_builder.create_type_variants(special_names, type_dict)
self.assertSequenceEqual(set(new_types.keys()), set(special_names))
self.assertEqual(len(new_types), 3)
parse_candidate = new_types["Number?"]
self.check_parse_number_with_optional(parse_candidate, "Number?")
parse_candidate = new_types["Number+"]
self.check_parse_number_with_many(parse_candidate, "Number+")
parse_candidate = new_types["Number*"]
self.check_parse_number_with_many0(parse_candidate, "Number*")
def test_create_type_variants__raises_error_with_invalid_type_name(self):
type_builder = CardinalityFieldTypeBuilder
for invalid_type_name in TestCardinalityField.INVALID_TYPE_NAMES:
type_dict = dict(Number=parse_number)
with self.assertRaises(ValueError):
type_names = [invalid_type_name]
type_builder.create_type_variants(type_names, type_dict)
def test_create_missing_type_variants__raises_error_with_missing_primary_type(self):
type_builder = CardinalityFieldTypeBuilder
for special_name in self.generate_type_variants("Number"):
for type_dict, description in self.INVALID_TYPE_DICT_DATA:
self.assertNotIn("Number", type_dict)
with self.assertRaises(MissingTypeError):
names = [special_name]
type_builder.create_type_variants(names, type_dict)
# -- METHOD: CardinalityFieldTypeBuilder.create_missing_type_variants()
def test_create_missing_type_variants__all_missing(self):
type_builder = CardinalityFieldTypeBuilder
missing_names = ["Number?", "Number+", "Number*"]
new_types = type_builder.create_missing_type_variants(missing_names,
dict(Number=parse_number))
self.assertSequenceEqual(set(new_types.keys()), set(missing_names))
self.assertEqual(len(new_types), 3)
def test_create_missing_type_variants__none_missing(self):
# -- PREPARE: Create all types and store them in the type_dict.
type_builder = CardinalityFieldTypeBuilder
type_names = ["Number?", "Number+", "Number*"]
all_type_names = ["Number", "Number?", "Number+", "Number*"]
type_dict = dict(Number=parse_number)
new_types = type_builder.create_missing_type_variants(type_names,
type_dict)
type_dict.update(new_types)
self.assertSequenceEqual(set(new_types.keys()), set(type_names))
self.assertSequenceEqual(set(type_dict.keys()), set(all_type_names))
# -- TEST: All special types are already stored in the type_dict.
new_types2 = type_builder.create_missing_type_variants(type_names,
type_dict)
self.assertEqual(len(new_types2), 0)
def test_create_missing_type_variants__some_missing(self):
# -- PREPARE: Create some types and store them in the type_dict.
type_builder = CardinalityFieldTypeBuilder
special_names = ["Number?", "Number+", "Number*"]
type_names1 = ["Number?", "Number*"]
type_names2 = special_names
type_dict = dict(Number=parse_number)
new_types = type_builder.create_missing_type_variants(type_names1,
type_dict)
type_dict.update(new_types)
self.assertSequenceEqual(set(new_types.keys()), set(type_names1))
self.assertSequenceEqual(set(type_dict.keys()),
set(["Number", "Number?", "Number*"]))
# -- TEST: All special types are already stored in the type_dict.
new_types2 = type_builder.create_missing_type_variants(type_names2,
type_dict)
self.assertEqual(len(new_types2), 1)
self.assertSequenceEqual(set(new_types2.keys()), set(["Number+"]))
def test_create_type_variant__raises_error_with_invalid_type_name(self):
type_builder = CardinalityFieldTypeBuilder
for invalid_type_name in TestCardinalityField.INVALID_TYPE_NAMES:
type_dict = dict(Number=parse_number)
with self.assertRaises(ValueError):
type_names = [invalid_type_name]
type_builder.create_missing_type_variants(type_names, type_dict)
def test_create_missing_type_variants__raises_error_with_missing_primary_type(self):
type_builder = CardinalityFieldTypeBuilder
for special_name in self.generate_type_variants("Number"):
for type_dict, description in self.INVALID_TYPE_DICT_DATA:
self.assertNotIn("Number", type_dict)
with self.assertRaises(MissingTypeError):
names = [special_name]
type_builder.create_missing_type_variants(names, type_dict)
# -----------------------------------------------------------------------------
# MAIN:
# -----------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2012-2013 by Jens Engel (https://github/jenisys/parse_type)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
| {
"content_hash": "f9757bae9a064c2f86e31b226276804c",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 88,
"avg_line_length": 49.8159509202454,
"alnum_prop": 0.6322660098522167,
"repo_name": "benthomasson/parse_type",
"id": "3c59cb0635583f21ca5821eb487854a4dd406550",
"size": "16286",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_cardinality_field.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "268858"
},
{
"name": "Shell",
"bytes": "672"
}
],
"symlink_target": ""
} |
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as pl
# import pylab as pl
if len(sys.argv) != 2:
print("Need a filename for the plot output. Exiting...")
else:
outputFile = sys.argv[1]
data = []
for line in sys.stdin:
data.append(line.strip())
data=map(int, data)
pl.hist(data,100)
pl.savefig(outputFile)
# pl.show()
| {
"content_hash": "fd8cc1626137cdd6073a7bfdb80cfdb3",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 60,
"avg_line_length": 21.555555555555557,
"alnum_prop": 0.6443298969072165,
"repo_name": "kunalghosh/T-61.5060-Algorithmic-Methods-of-Data-Mining",
"id": "68eb8303bd85aa3c3d024558d40b1f684ded040a",
"size": "388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/approximate/hash_collisions/plot_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "21871"
},
{
"name": "Python",
"bytes": "8834"
},
{
"name": "Shell",
"bytes": "5736"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
## GTFS Models
class Agency(models.Model):
agency_id = models.CharField(primary_key=True, blank=False, null=False, max_length=200, unique=True)
agency_url = models.URLField(blank=False, null=False)
agency_name = models.CharField(max_length=200, blank=False, null=False)
agency_timezone = models.CharField(blank=False, null=False, max_length=100)
agency_lang = models.CharField(blank=True, null=True, max_length=2)
agency_phone = models.CharField(blank=True, null=True, max_length=12)
agency_fare_url = models.URLField(blank=True, null=True)
agency_email = models.EmailField(blank=True, null=True)
def __str__(self):
return str(self.agency_name)
class Calendar(models.Model):
service_id = models.CharField(primary_key=True, unique=True, max_length=200)
monday = models.BooleanField(null=False, blank=False)
tuesday = models.BooleanField(null=False, blank=False)
wednesday = models.BooleanField(null=False, blank=False)
thursday = models.BooleanField(null=False, blank=False)
friday = models.BooleanField(null=False, blank=False)
saturday = models.BooleanField(null=False, blank=False)
sunday = models.BooleanField(null=False, blank=False)
start_date = models.DateField(null=False, blank=False)
end_date = models.DateField(null=False, blank=False)
def __str__(self):
return str(self.service_id)
class Calendar_date(models.Model):
exception_types = (
(1, 'Added'),
(2, 'Removed')
)
# auto_increment_id = models.AutoField(primary_key=True,default=0)
service_id = models.CharField(max_length=200)
date = models.DateField(null=False, blank=False)
exception_type = models.IntegerField(choices=exception_types, null=False, blank=False)
def __str__(self):
return str(self.service_id+" for " + str(self.date))
class Fare_Attribute(models.Model):
payment_methods = (
(0, 'On Board'),
(1, 'Before Boarding')
)
transfers_methods = (
(0, 'No transfers permitted'),
(1, 'Passenger may transfer once'),
(2, 'Passenger may transfer twice')
)
fare_id = models.CharField(primary_key=True, unique=True, max_length=200)
price = models.FloatField(null=False, blank=False)
currency_type = models.CharField(max_length=3, blank=False, null=False)
payment_method = models.IntegerField(choices=payment_methods, blank=False, null=False)
transfers = models.IntegerField(choices=transfers_methods, blank=True, null=True)
transfer_duration = models.FloatField(blank=True, null=False)
def __str__(self):
return str(self.service_id)
class Route(models.Model):
route_types = (
(0, 'Tram, Streetcar, Light rail'),
(1, 'Subway, Metro'),
(2, 'Rail'),
(3, 'Bus'),
(4, 'Ferry'),
(5, 'Cable Car'),
(6, 'Gondola, Suspended cable car'),
(7, 'Funicular')
)
route_id = models.CharField(primary_key=True, unique=True, max_length=200)
agency = models.ForeignKey(Agency, on_delete=models.CASCADE, null=True, blank=True)
route_short_name = models.CharField(null=False, blank=False, max_length=200)
route_long_name = models.CharField(null=False, blank=False, max_length=200)
route_desc = models.CharField(null=True, blank=True, max_length=200)
route_type = models.IntegerField(choices=route_types, default=0, null=False,blank=False)
route_url = models.URLField(blank=True, null=True)
route_color = models.CharField(blank=True, null=True, max_length=6)
route_text_color = models.CharField(blank=True, null=True, max_length=6)
def __str__(self):
return str(self.route_id+" " + self.route_short_name)
def GetPatternIdTripDict(self):
"""Return a dictionary that maps pattern_id to a list of Trip objects."""
d = {}
trips = Trip.objects.filter(route_id=self.route_id)
for t in trips:
d.setdefault(t.pattern_id, []).append(t)
return d
class Trip(models.Model):
choices = (
(0, 'No Information'),
(1, 'At least one place'),
(2, 'No Place'))
direction_choices = (
(0, 'Outbound'),
(1, 'Inbound'))
trip_id = models.CharField(primary_key=True, unique=True, max_length=200)
route = models.ForeignKey(Route, on_delete=models.CASCADE)
dynamic_key = models.ForeignKey(ContentType, on_delete=models.CASCADE)
service_id = models.CharField(max_length=200)
content_object = GenericForeignKey('dynamic_key', 'service_id')
trip_headsign = models.CharField(null=True, blank=True, max_length=200)
trip_short_name = models.CharField(null=True, blank=True, max_length=200)
direction_id = models.IntegerField(choices=direction_choices, null=True, blank=True)
block_id = models.CharField(null=False, blank=False, max_length=200)
wheelchair_accessible = models.IntegerField(choices=choices, default=0)
bikes_allowed = models.IntegerField(choices=choices, default=0)
def __str__(self):
return str(self.trip_id + " " + self.trip_headsign)
class Stop(models.Model):
location_choices = (
(0, 'Stop'),
(1, 'Station'))
choices = (
(0, 'No Information'),
(1, 'At least one place'),
(2, 'No Place'))
stop_id = models.CharField(primary_key=True, unique=True, max_length=200)
stop_code = models.CharField(null=True, blank=True, max_length=200)
stop_name = models.CharField(null=False, blank=False, max_length=200)
stop_desc = models.CharField(null=True, blank=True, max_length=200)
stop_lat = models.FloatField(null=False, blank=False)
stop_lon = models.FloatField(null=False, blank=False)
zone_id = models.CharField(null=True, blank=True, max_length=200)
stop_url = models.URLField(null=True, blank=True)
location_type = models.IntegerField(choices=location_choices, blank=True, null=True)
stop_timezone = models.CharField(null=True, blank=True, max_length=200)
wheelchair_boarding = models.IntegerField(choices=choices, blank=True, null=True)
def __str__(self):
return str(self.stop_id + " " + self.stop_name)
class Stop_time(models.Model):
pickup_choices = (
(0, 'Regularly scheduled pickup'),
(1, 'No pickup available'),
(2, 'Must phone agency to arrange pickup'),
(3, 'Must coordinate with driver to arrange pickup'))
drop_off_choices = (
(0, 'Regularly scheduled drop off'),
(1, 'No drop off available'),
(2, 'Must phone agency to arrange drop off'),
(3, 'Must coordinate with driver to arrange drop off'))
times_choices = (
(0, 'Times are considered approximate'),
(1, 'Times are considered exact'))
trip = models.ForeignKey(Trip, on_delete=models.CASCADE)
arrival_time = models.TimeField(null=False, blank=False, help_text="HH:MM:SS")
departure_time = models.TimeField(null=False, blank=False, help_text="HH:MM:SS")
stop = models.ForeignKey(Stop, related_name='stops', on_delete=models.CASCADE)
stop_sequence = models.PositiveIntegerField(null=True, blank=True)
stop_headsign = models.CharField(null=True, blank=True, max_length=200)
pickup_type = models.IntegerField(choices=pickup_choices, blank=True, null=True, default=0)
drop_off_type = models.IntegerField(choices=drop_off_choices, blank=True, null=True, default=0)
timepoint = models.IntegerField(choices=times_choices, blank=True, null=True, default=1)
def __str__(self):
return str(self.trip_id) + " " + str(self.stop) + " " + str(self.arrival_time)\
+ " " + str(self.departure_time)
| {
"content_hash": "aedac52a69492d501b48463ec45cbca1",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 104,
"avg_line_length": 40.807291666666664,
"alnum_prop": 0.6703254626675176,
"repo_name": "LiquidGalaxyLAB/FlOYBD",
"id": "03ee9c05c7b0c243b8293cdff8a32c6a72ddc810",
"size": "7835",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Django/mysite/floybd/gtfs_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18712"
},
{
"name": "HTML",
"bytes": "93263"
},
{
"name": "JavaScript",
"bytes": "507197"
},
{
"name": "PHP",
"bytes": "4606"
},
{
"name": "Python",
"bytes": "385172"
},
{
"name": "Shell",
"bytes": "8966"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import sys, logging
import json
import re
import mechanize
import boto3
mechlog = logging.getLogger("mechanize")
mechlog.addHandler(logging.StreamHandler(sys.stdout))
if os.getenv('DEBUG') != None:
logging.basicConfig(level=logging.DEBUG)
mechlog.setLevel(logging.DEBUG)
confirm_url = re.compile("https://.*\.certificates.amazon.com/approvals\?[A-Za-z0-9=&-]+")
approval_text = re.compile("You have approved")
domain_re = re.compile(".*<b>Domain name</b>.*?<td class='right-column'>\s+(.*?)\s.*", re.DOTALL)
accountid_re = re.compile(".*<b>AWS account number</b>.*?<td class='right-column'>\s+(.*?)\s.*", re.DOTALL)
region_re = re.compile(".*<b>AWS Region</b>.*?<td class='right-column'>\s+(.*?)\s.*", re.DOTALL)
certid_re = re.compile(".*<b>Certificate identifier</b>.*?<td class='right-column'>\s+(.*?)\s.*", re.DOTALL)
def panic(msg):
raise Exception(msg)
def validate(event, context):
msg = json.loads(event['Records'][0]['Sns']['Message'])
match = confirm_url.search(msg['content'])
# Ignore emails that don't match the certificate confirm URL
if not match:
return
url = match.group(0)
logging.info("CONFIRMATION URL: %s" % url)
br = mechanize.Browser()
br.set_handle_robots(False)
# Fetch approval page
logging.debug("OPENING CONFIRMATION URL")
response = br.open(url)
logging.debug("OPENED CONFIRMATION URL")
content = response.get_data()
# Extract confirmation page details
domain, account_id, region, cert_id = [regex.match(content).group(1)
if regex.match(content) else panic("Couldn't parse confirmation page!")
for regex in (domain_re, accountid_re, region_re, certid_re)]
# Remove dashes from account_id
account_id = account_id.translate(None, '-')
# Always log what we're confirming
print("Validation URL: '%s'" % url)
print("Domain: '%s'" % domain)
print("Account ID: '%s'" % account_id)
print("Region: '%s'" % region)
print("Certificate ID: '%s'" % cert_id)
# Check if the cert is pending validation
acm = boto3.client('acm', region_name=region)
cert = acm.describe_certificate(CertificateArn="arn:aws:acm:%s:%s:certificate/%s"
% (region, account_id, cert_id))
logging.debug(cert)
if cert['Certificate']['Status'] != 'PENDING_VALIDATION':
panic("Confirmation certificate is not pending validation!")
# It's the first and only form on the page
# Could we match on action="/approvals"?
br.select_form(nr=0)
logging.info("SUBMITTING CONFIRMATION FORM")
response = br.submit(name='commit')
logging.info("SUBMITTED CONFIRMATION FORM")
content = response.get_data()
match = approval_text.search(content)
if match:
print("Certificate for %s approved!" % domain)
else:
logging.error(content)
panic("No confirmation of certificate approval!")
| {
"content_hash": "61f080ff85add966bb02aa81363c451a",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 108,
"avg_line_length": 34.63529411764706,
"alnum_prop": 0.6606657608695652,
"repo_name": "kounta/lambda-acm-validate",
"id": "5b30e0ab57d1e31b8f6c0a435bb906112b14c1bb",
"size": "2944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "312"
},
{
"name": "Python",
"bytes": "2944"
}
],
"symlink_target": ""
} |
import sqlite3
from cobiv.modules.database.datasources.datasource import Datasource
class Sqliteds(Datasource):
def build_yaml_config(self, config):
config[self.get_name()] = {'url': self.get_app().get_user_path('cobiv.db')}
def create_connection(self):
url = self.get_config_value('url', ':memory:')
conn = sqlite3.connect(url, check_same_thread=False)
conn.row_factory = sqlite3.Row
c=conn.execute('PRAGMA cache_spill = off')
c.execute('PRAGMA temp_store = MEMORY')
c.execute('PRAGMA synchronous = NORMAL')
c.execute('PRAGMA journal_mode = wal')
c.execute('PRAGMA locking_mode = EXCLUSIVE')
return conn
def get_name(self):
return "sqlite"
| {
"content_hash": "c345417f541506405244d96d8ce149ed",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 83,
"avg_line_length": 33.90909090909091,
"alnum_prop": 0.6434316353887399,
"repo_name": "gokudomatic/cobiv",
"id": "0f5d327b422c28e5c08286c4c7426d7f3be94684",
"size": "746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cobiv/modules/database/datasources/sqlite/sqliteds.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2628"
},
{
"name": "Python",
"bytes": "343723"
}
],
"symlink_target": ""
} |
from FileType import *
class FlatDict(FileType):
extensions = ['.dict'] ## list of extensions handled by this class
dataTypes = [dict] ## list of python types handled by this class
priority = 50
##not implemented yet.
#@classmethod
#def write(cls, data, dirHandle, fileName, **args):
#"""Write data to fileName.
#Return the file name written (this allows the function to modify the requested file name)
#"""
#fileName = cls.addExtension(fileName)
#return fileName
#@classmethod
#def read(cls, fileHandle):
#"""Read a file, return a data object"""
#return MA(file=fileHandle.name())
| {
"content_hash": "9a3ec609ce441e70c7b2291b26fb3756",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 98,
"avg_line_length": 28.96,
"alnum_prop": 0.5925414364640884,
"repo_name": "hiuwo/acq4",
"id": "7d7aadc2c9f48914181d3eb98f439515d2650d98",
"size": "748",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "acq4/filetypes/FlatDict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "18652"
},
{
"name": "C",
"bytes": "1051646"
},
{
"name": "C++",
"bytes": "636100"
},
{
"name": "CSS",
"bytes": "716"
},
{
"name": "Matlab",
"bytes": "1752"
},
{
"name": "Processing",
"bytes": "13403"
},
{
"name": "Python",
"bytes": "4925976"
},
{
"name": "Shell",
"bytes": "64"
}
],
"symlink_target": ""
} |
"""
Copyright 2013 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import syndicate.rg.common as rg_common
import syndicate.rg.request as rg_request
import syndicate.rg.storage as rg_storage
from hashlib import sha256 as HashFunc
from wsgiref.simple_server import make_server
from wsgiref.util import FileWrapper
from cgi import parse_qs, FieldStorage
from StringIO import StringIO
import types
import errno
log = rg_common.get_logger()
#-------------------------
METADATA_FIELD_NAME = "metadata"
DATA_FIELD_NAME = "data"
#-------------------------
def validate_post( self, post_dict ):
global REQUIRED_POST_FIELDS
# sanity check
try:
rg_common.validate_fields( post_dict, REQUIRED_POST_FIELDS )
return 0
except:
return 400
#-------------------------
def post_interpret_error( rc ):
"""
Intepret a system error code into an HTTP error code,
for purposes of validating a caller's POST request.
"""
if rc == -errno.EAGAIN:
return (503, "Try again later")
elif rc == -errno.ENOENT:
return (404, "Not found")
else:
return (400, "Invalid Request")
#-------------------------
def validate_infile( req_info, infile ):
'''
validate the incoming data.
check its hash and size
return (HTTP status, message)
'''
hf = HashFunc()
total_read = 0
buflen = 4096
while True:
inbuf = infile.read( buflen )
hf.update( inbuf )
total_read += len(inbuf)
if len(inbuf) == 0:
break
infile_hash = hf.hexdigest()
infile.seek(0)
# check size
if req_info.size != total_read:
log.error("Size mismatch: expected %s, got %s" % (req_info.size, total_read))
return (400, "Invalid Request")
# check hash
if req_info.data_hash != infile_hash:
log.error("Hash mismatch: expected '%s', got '%s'" % (req_info.data_hash, infile_hash))
return (400, "Invalid request")
return (200, "OK")
#-------------------------
def post( metadata_field, infile ):
'''
Process a POST request. Return an HTTP status code. Read all data from infile.
metdata_field: a string containing a serialized ms_gateway_request_info structure
infile: file-like object which can be read from
'''
# parse
req_info = None
try:
req_info = rg_request.parse_request_info_from_pb( metadata_field )
except Exception, e:
# verification failure
log.exception( e )
return (403, "Authorization Required")
if req_info == None:
log.error("could not parse uploaded request info")
return (400, "Invalid request")
log.info("POST %s" % rg_request.req_info_to_string( req_info ) )
# validate security--the calling gateway must be a UG with CAP_WRITE_DATA
rc = rg_request.gateway_is_UG( req_info )
if rc != 0:
return post_interpret_error( rc )
rc = rg_request.check_post_caps( req_info )
if rc != 0:
return post_interpret_error( rc )
# validate the input
rc, msg = validate_infile( req_info, infile )
if rc != 200:
return (rc, msg)
# store
rc = 0
try:
rc = rg_storage.write_data( req_info, infile )
except Exception, e:
log.exception( e )
rc = (500, "Internal server error")
log.info("write_data rc = %s" % (str(rc)) )
return (rc, "OK")
#-------------------------
def get( url_path, outfile ):
'''
Process a GET request. Return an HTTP status code. Write all data to outfile.
url_path: path of the object to fetch
outfile: file-like object which can be written to
'''
# parse
req_info = rg_request.parse_request_info_from_url_path( url_path )
if req_info == None:
log.error("Invalid URL path '%s'" % url_path )
return (400, "Invalid request")
# fetch
rc = 0
status = "OK"
try:
rc = rg_storage.read_data( req_info, outfile )
except Exception, e:
log.exception( e )
rc = 500
status = "Internal server error"
log.info("read_data rc = %s" % str(rc) )
return (rc, status)
#-------------------------
def delete( metadata_field ):
'''
Process a DELETE request. Return an HTTP status code.
Generate and write out a deletion receipt, if this is a manifest.
Do this even if the data wasn't found (i.e. the HTTP status code
indicates the status of the operation, but we should always
give back a deletion receipt as proof of work).
metadata_field: uploaded metadata value for the request
'''
# parse
req_info = None
try:
req_info = rg_request.parse_request_info_from_pb( metadata_field )
except Exception, e:
# verification failure
log.exception( e )
return (403, "Authorization required")
if req_info == None:
log.error("could not parse uploaded request info")
return (400, "Invalid request")
log.info("DELETE %s" % rg_request.req_info_to_string( req_info ) )
# validate security--the calling gateway must be a UG with CAP_COORDINATE
if rg_request.gateway_is_UG( req_info ) != 0:
return (400, "Invalid Request")
if rg_request.check_delete_caps( req_info ) != 0:
return (400, "Invalid Reqeust")
# delete
rc = 0
try:
rc = rg_storage.delete_data( req_info )
except Exception, e:
log.exception( e )
rc = (500, "Internal server error")
log.info("delete_data rc = %s" % str(rc) )
return (rc, "OK")
#-------------------------
def invalid_request( start_response, status="400 Invalid request", resp="Invalid request\n" ):
'''
HTTP error
'''
headers = [('Content-Type', 'text/plain'), ('Content-Length', str(len(resp)))]
start_response( status, headers )
return [resp]
#-------------------------
def valid_request( start_response, status="200 OK", resp="OK" ):
'''
HTTP OK
'''
headers = [('Content-Type', 'text/plain'), ('Content-Length', str(len(resp)))]
start_response( status, headers )
return [resp]
#-------------------------
def wsgi_handle_request( environ, start_response ):
'''
handle one WSGI request
'''
global METADATA_FIELD_NAME
global DATA_FIELD_NAME
required_post_fields = [METADATA_FIELD_NAME, DATA_FIELD_NAME]
required_delete_fields = [METADATA_FIELD_NAME]
if environ['REQUEST_METHOD'] == 'GET':
# GET request
url_path = environ['PATH_INFO']
outfile = StringIO()
rc, msg = get( url_path, outfile )
if rc == 200:
size = outfile.len
outfile.seek(0)
headers = [('Content-Type', 'application/octet-stream'), ('Content-Length', str(size))]
start_response( '200 %s' % msg, headers )
return FileWrapper( outfile )
else:
return invalid_request( start_response, status="%s %s" % (rc, msg) )
elif environ['REQUEST_METHOD'] == 'POST':
# POST request.
# get POST'ed fields
post_fields = FieldStorage( fp=environ['wsgi.input'], environ=environ )
# validate
for f in required_post_fields:
if f not in post_fields.keys():
return invalid_request( start_response )
metadata_field = post_fields[METADATA_FIELD_NAME].value
infile = post_fields[DATA_FIELD_NAME].file
# if no file was given, then make a stringIO wrapper around the given string
if infile == None:
infile = StringIO( post_fields[DATA_FIELD_NAME].value )
rc, msg = post( metadata_field, infile )
if rc == 200:
return valid_request( start_response )
else:
return invalid_request( start_response, status="%s %s" % (rc, msg), resp="error code %s\n" % str(rc))
elif environ['REQUEST_METHOD'] == 'DELETE':
# DELETE request
post_fields = FieldStorage( fp=environ['wsgi.input'], environ=environ )
# validate
if not post_fields.has_key(METADATA_FIELD_NAME):
return invalid_request( start_response )
metadata_field = post_fields[METADATA_FIELD_NAME].value
rc, msg = delete( metadata_field )
if rc == 200:
return valid_request( start_response )
else:
return invalid_request( start_response, status="%s %s" % (rc, msg), resp="error code %s\n" % str(rc))
else:
# not supported
return invalid_request( start_response, status="501 No Such Method", resp="Method not supported\n" )
| {
"content_hash": "69b10abf31480ba090f34cf8216aaf3d",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 110,
"avg_line_length": 27.63855421686747,
"alnum_prop": 0.5990627724498693,
"repo_name": "iychoi/syndicate",
"id": "94d90bbf5a01384c6cf676e8a5c050a2119ae0a0",
"size": "9199",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "old/RG-python/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "343449"
},
{
"name": "C++",
"bytes": "3135505"
},
{
"name": "CSS",
"bytes": "321366"
},
{
"name": "Gnuplot",
"bytes": "3596"
},
{
"name": "HTML",
"bytes": "172638"
},
{
"name": "JavaScript",
"bytes": "55112"
},
{
"name": "Makefile",
"bytes": "43170"
},
{
"name": "Perl",
"bytes": "8025"
},
{
"name": "Protocol Buffer",
"bytes": "20793"
},
{
"name": "Python",
"bytes": "3289342"
},
{
"name": "Ruby",
"bytes": "13015"
},
{
"name": "Shell",
"bytes": "63133"
},
{
"name": "TeX",
"bytes": "605910"
},
{
"name": "Thrift",
"bytes": "2996"
}
],
"symlink_target": ""
} |
"""Interface for a USB-connected Monsoon power meter
(http://msoon.com/LabEquipment/PowerMonitor/).
"""
_author_ = 'kens@google.com (Ken Shirriff)'
import fcntl
import os
import select
import signal
import stat
import struct
import sys
import time
import collections
try:
from absl import flags
except ImportError:
import gflags as flags # http://code.google.com/p/python-gflags/
try:
from google3.third_party.py import serial
except ImportError:
import serial # http://pyserial.sourceforge.net/
FLAGS = flags.FLAGS
class Monsoon:
"""
Provides a simple class to use the power meter, e.g.
mon = monsoon.Monsoon()
mon.SetVoltage(3.7)
mon.StartDataCollection()
mydata = []
while len(mydata) < 1000:
mydata.extend(mon.CollectData())
mon.StopDataCollection()
See http://wiki/Main/MonsoonProtocol for information on the protocol.
"""
USB_OFF = 0
USB_ON = 1
USB_AUTO = 2
def __init__(self, device=None, serialno=None, wait=1):
"""
Establish a connection to a Monsoon.
By default, opens the first available port, waiting if none are ready.
A particular port can be specified with "device", or a particular Monsoon
can be specified with "serialno" (using the number printed on its back).
With wait=0, IOError is thrown if a device is not immediately available.
"""
self._coarse_ref = self._fine_ref = self._coarse_zero = self._fine_zero = 0
self._coarse_scale = self._fine_scale = 0
self._last_seq = 0
self.start_voltage = 0
self._usb_passthrough = Monsoon.USB_AUTO
if device:
self.ser = serial.Serial(device, timeout=1)
return
while 1: # try all /dev/ttyACM* until we find one we can use
for dev in os.listdir("/dev"):
if not dev.startswith("ttyACM"): continue
tmpname = "/tmp/monsoon.%s.%s" % (os.uname()[0], dev)
self._tempfile = open(tmpname, "w")
try:
os.chmod(tmpname, 0666)
except OSError:
pass
try: # use a lockfile to ensure exclusive access
fcntl.lockf(self._tempfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as e:
print >>sys.stderr, "device %s is in use" % dev
continue
try: # try to open the device
self.ser = serial.Serial("/dev/%s" % dev, timeout=1)
self.StopDataCollection() # just in case
self._FlushInput() # discard stale input
status = self.GetStatus()
except Exception as e:
print >>sys.stderr, "error opening device %s: %s" % (dev, e)
continue
if not status:
print >>sys.stderr, "no response from device %s" % dev
elif serialno and status["serialNumber"] != serialno:
print >>sys.stderr, ("Note: another device serial #%d seen on %s" %
(status["serialNumber"], dev))
else:
self.start_voltage = status["voltage1"]
return
self._tempfile = None
if not wait: raise IOError("No device found")
print >>sys.stderr, "waiting for device..."
time.sleep(1)
def GetStatus(self):
""" Requests and waits for status. Returns status dictionary. """
# status packet format
STATUS_FORMAT = ">BBBhhhHhhhHBBBxBbHBHHHHBbbHHBBBbbbbbbbbbBH"
STATUS_FIELDS = [
"packetType", "firmwareVersion", "protocolVersion",
"mainFineCurrent", "usbFineCurrent", "auxFineCurrent", "voltage1",
"mainCoarseCurrent", "usbCoarseCurrent", "auxCoarseCurrent", "voltage2",
"outputVoltageSetting", "temperature", "status", "leds",
"mainFineResistor", "serialNumber", "sampleRate",
"dacCalLow", "dacCalHigh",
"powerUpCurrentLimit", "runTimeCurrentLimit", "powerUpTime",
"usbFineResistor", "auxFineResistor",
"initialUsbVoltage", "initialAuxVoltage",
"hardwareRevision", "temperatureLimit", "usbPassthroughMode",
"mainCoarseResistor", "usbCoarseResistor", "auxCoarseResistor",
"defMainFineResistor", "defUsbFineResistor", "defAuxFineResistor",
"defMainCoarseResistor", "defUsbCoarseResistor", "defAuxCoarseResistor",
"eventCode", "eventData", ]
self._SendStruct("BBB", 0x01, 0x00, 0x00)
while 1: # Keep reading, discarding non-status packets
bytes = self._ReadPacket()
if not bytes: return None
if len(bytes) != struct.calcsize(STATUS_FORMAT) or bytes[0] != "\x10":
print >>sys.stderr, "wanted status, dropped type=0x%02x, len=%d" % (
ord(bytes[0]), len(bytes))
continue
status = dict(zip(STATUS_FIELDS, struct.unpack(STATUS_FORMAT, bytes)))
assert status["packetType"] == 0x10
for k in status.keys():
if k.endswith("VoltageSetting"):
status[k] = 2.0 + status[k] * 0.01
elif k.endswith("FineCurrent"):
pass # needs calibration data
elif k.endswith("CoarseCurrent"):
pass # needs calibration data
elif k.startswith("voltage") or k.endswith("Voltage"):
status[k] = status[k] * 0.000125
elif k.endswith("Resistor"):
status[k] = 0.05 + status[k] * 0.0001
if k.startswith("aux") or k.startswith("defAux"): status[k] += 0.05
elif k.endswith("CurrentLimit"):
status[k] = 8 * (1023 - status[k]) / 1023.0
return status
def RampVoltage(self, start, end):
v = start
if v < 3.0: v = 3.0 # protocol doesn't support lower than this
while (v < end):
self.SetVoltage(v)
v += .1
time.sleep(.1)
self.SetVoltage(end)
def SetVoltage(self, v):
""" Set the output voltage, 0 to disable. """
if v == 0:
self._SendStruct("BBB", 0x01, 0x01, 0x00)
else:
self._SendStruct("BBB", 0x01, 0x01, int((v - 2.0) * 100))
def SetMaxCurrent(self, i):
"""Set the max output current."""
assert i >= 0 and i <= 8
val = 1023 - int((i/8)*1023)
self._SendStruct("BBB", 0x01, 0x0a, val & 0xff)
self._SendStruct("BBB", 0x01, 0x0b, val >> 8)
def SetMaxPowerUpCurrent(self, i):
"""Set the max power up current."""
assert i >= 0 and i <= 8
val = 1023 - int((i/8)*1023)
self._SendStruct("BBB", 0x01, 0x08, val & 0xff)
self._SendStruct("BBB", 0x01, 0x09, val >> 8)
def SetUsbPassthrough(self, val):
""" Set the USB passthrough mode: 0 = off, 1 = on, 2 = auto. """
if val not in [Monsoon.USB_OFF, Monsoon.USB_ON, Monsoon.USB_AUTO]:
raise Error("Expected one of: USB_OFF, USB_ON, USB_AUTO")
self._usb_passthrough = val
self._SendStruct("BBB", 0x01, 0x10, val)
def GetUsbPassthrough(self):
""" Return the USB passthrough mode (USB_OFF, USB_ON, USB_AUTO). """
return self._usb_passthrough
def StartDataCollection(self):
""" Tell the device to start collecting and sending measurement data. """
self._SendStruct("BBB", 0x01, 0x1b, 0x01) # Mystery command
self._SendStruct("BBBBBBB", 0x02, 0xff, 0xff, 0xff, 0xff, 0x03, 0xe8)
def StopDataCollection(self):
""" Tell the device to stop collecting measurement data. """
self._SendStruct("BB", 0x03, 0x00) # stop
def CollectData(self):
""" Return some current samples. Call StartDataCollection() first. """
while 1: # loop until we get data or a timeout
bytes = self._ReadPacket()
if not bytes: return None
if len(bytes) < 4 + 8 + 1 or bytes[0] < "\x20" or bytes[0] > "\x2F":
print >>sys.stderr, "wanted data, dropped type=0x%02x, len=%d" % (
ord(bytes[0]), len(bytes))
continue
seq, type, x, y = struct.unpack("BBBB", bytes[:4])
data = [struct.unpack(">hhhh", bytes[x:x+8])
for x in range(4, len(bytes) - 8, 8)]
if self._last_seq and seq & 0xF != (self._last_seq + 1) & 0xF:
print >>sys.stderr, "data sequence skipped, lost packet?"
self._last_seq = seq
if type == 0:
if not self._coarse_scale or not self._fine_scale:
print >>sys.stderr, "waiting for calibration, dropped data packet"
continue
out = []
for main, usb, aux, voltage in data:
if main & 1:
out.append(((main & ~1) - self._coarse_zero) * self._coarse_scale)
else:
out.append((main - self._fine_zero) * self._fine_scale)
return out
elif type == 1:
self._fine_zero = data[0][0]
self._coarse_zero = data[1][0]
# print >>sys.stderr, "zero calibration: fine 0x%04x, coarse 0x%04x" % (
# self._fine_zero, self._coarse_zero)
elif type == 2:
self._fine_ref = data[0][0]
self._coarse_ref = data[1][0]
# print >>sys.stderr, "ref calibration: fine 0x%04x, coarse 0x%04x" % (
# self._fine_ref, self._coarse_ref)
else:
print >>sys.stderr, "discarding data packet type=0x%02x" % type
continue
# See http://wiki/Main/MonsoonProtocol for the origin of these values
if self._coarse_ref != self._coarse_zero:
self._coarse_scale = 2.88 / (self._coarse_ref - self._coarse_zero)
if self._fine_ref != self._fine_zero:
self._fine_scale = 0.0332 / (self._fine_ref - self._fine_zero)
def _SendStruct(self, fmt, *args):
""" Pack a struct (without length or checksum) and send it. """
data = struct.pack(fmt, *args)
data_len = len(data) + 1
checksum = (data_len + sum(struct.unpack("B" * len(data), data))) % 256
out = struct.pack("B", data_len) + data + struct.pack("B", checksum)
self.ser.write(out)
def _ReadPacket(self):
""" Read a single data record as a string (without length or checksum). """
len_char = self.ser.read(1)
if not len_char:
print >>sys.stderr, "timeout reading from serial port"
return None
data_len = struct.unpack("B", len_char)
data_len = ord(len_char)
if not data_len: return ""
result = self.ser.read(data_len)
if len(result) != data_len: return None
body = result[:-1]
checksum = (data_len + sum(struct.unpack("B" * len(body), body))) % 256
if result[-1] != struct.pack("B", checksum):
print >>sys.stderr, "invalid checksum from serial port"
return None
return result[:-1]
def _FlushInput(self):
""" Flush all read data until no more available. """
self.ser.flush()
flushed = 0
while True:
ready_r, ready_w, ready_x = select.select([self.ser], [], [self.ser], 0)
if len(ready_x) > 0:
print >>sys.stderr, "exception from serial port"
return None
elif len(ready_r) > 0:
flushed += 1
self.ser.read(1) # This may cause underlying buffering.
self.ser.flush() # Flush the underlying buffer too.
else:
break
if flushed > 0:
print >>sys.stderr, "dropped >%d bytes" % flushed
def main(argv):
""" Simple command-line interface for Monsoon."""
useful_flags = ["voltage", "status", "usbpassthrough", "samples", "current",
"startcurrent"]
if not any(
[FLAGS.voltage, FLAGS.status, FLAGS.usbpassthrough, FLAGS.samples, FLAGS.current, FLAGS.startcurrent]):
print __doc__.strip()
print FLAGS.main_module_help()
if FLAGS.avg and FLAGS.avg < 0:
print "--avg must be greater than 0"
return
mon = Monsoon(device=FLAGS.device, serialno=FLAGS.serialno)
if FLAGS.voltage is not None:
if FLAGS.ramp is not None:
mon.RampVoltage(mon.start_voltage, FLAGS.voltage)
else:
mon.SetVoltage(FLAGS.voltage)
if FLAGS.current is not None:
mon.SetMaxCurrent(FLAGS.current)
if FLAGS.status:
items = sorted(mon.GetStatus().items())
print "\n".join(["%s: %s" % item for item in items])
if FLAGS.usbpassthrough:
if FLAGS.usbpassthrough == 'off':
mon.SetUsbPassthrough(0)
elif FLAGS.usbpassthrough == 'on':
mon.SetUsbPassthrough(1)
elif FLAGS.usbpassthrough == 'auto':
mon.SetUsbPassthrough(2)
else:
sys.exit('bad passthrough flag: %s' % FLAGS.usbpassthrough)
if FLAGS.startcurrent is not None:
mon.SetMaxPowerUpCurrent(FLAGS.startcurrent)
if FLAGS.samples:
# Make sure state is normal
mon.StopDataCollection()
status = mon.GetStatus()
native_hz = status["sampleRate"] * 1000
# Collect and average samples as specified
mon.StartDataCollection()
# In case FLAGS.hz doesn't divide native_hz exactly, use this invariant:
# 'offset' = (consumed samples) * FLAGS.hz - (emitted samples) * native_hz
# This is the error accumulator in a variation of Bresenham's algorithm.
emitted = offset = 0
collected = []
history_deque = collections.deque() # past n samples for rolling average
try:
last_flush = time.time()
while emitted < FLAGS.samples or FLAGS.samples == -1:
# The number of raw samples to consume before emitting the next output
need = (native_hz - offset + FLAGS.hz - 1) / FLAGS.hz
if need > len(collected): # still need more input samples
samples = mon.CollectData()
if not samples: break
collected.extend(samples)
else:
# Have enough data, generate output samples.
# Adjust for consuming 'need' input samples.
offset += need * FLAGS.hz
while offset >= native_hz: # maybe multiple, if FLAGS.hz > native_hz
this_sample = sum(collected[:need]) / need
if FLAGS.timestamp: print int(time.time()),
if FLAGS.avg:
history_deque.appendleft(this_sample)
if len(history_deque) > FLAGS.avg: history_deque.pop()
print "%f %f" % (this_sample,
sum(history_deque) / len(history_deque))
else:
print "%f" % this_sample
sys.stdout.flush()
offset -= native_hz
emitted += 1 # adjust for emitting 1 output sample
collected = collected[need:]
now = time.time()
if now - last_flush >= 0.99: # flush every second
sys.stdout.flush()
last_flush = now
except KeyboardInterrupt:
print >>sys.stderr, "interrupted"
mon.StopDataCollection()
if __name__ == '__main__':
# Define flags here to avoid conflicts with people who use us as a library
flags.DEFINE_boolean("status", None, "Print power meter status")
flags.DEFINE_integer("avg", None,
"Also report average over last n data points")
flags.DEFINE_float("voltage", None, "Set output voltage (0 for off)")
flags.DEFINE_float("current", None, "Set max output current")
flags.DEFINE_float("startcurrent", None, "Set max power-up/inital current")
flags.DEFINE_string("usbpassthrough", None, "USB control (on, off, auto)")
flags.DEFINE_integer("samples", None, "Collect and print this many samples")
flags.DEFINE_integer("hz", 5000, "Print this many samples/sec")
flags.DEFINE_string("device", None, "Use this /dev/ttyACM... file")
flags.DEFINE_integer("serialno", None, "Look for this Monsoon serial number")
flags.DEFINE_boolean("timestamp", None,
"Also print integer (seconds) timestamp on each line")
flags.DEFINE_boolean("ramp", True, "Gradually increase voltage to prevent "
"tripping Monsoon overvoltage")
main(FLAGS(sys.argv))
| {
"content_hash": "84bcf6d7991581a3856968ae4c39abdb",
"timestamp": "",
"source": "github",
"line_count": 428,
"max_line_length": 109,
"avg_line_length": 35.85747663551402,
"alnum_prop": 0.6148432918485698,
"repo_name": "google/synthmark",
"id": "33a69e49505749147cd2a34ba2af1aa680ddc0c4",
"size": "15421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/android/autopower/monsoon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "10372"
},
{
"name": "C++",
"bytes": "289984"
},
{
"name": "CMake",
"bytes": "2497"
},
{
"name": "Java",
"bytes": "52852"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Objective-C",
"bytes": "6078"
},
{
"name": "Objective-C++",
"bytes": "29553"
},
{
"name": "Python",
"bytes": "22818"
},
{
"name": "Shell",
"bytes": "1384"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
from datetime import datetime, timedelta
from sqlalchemy import Table, Column, Integer, Float, String, Unicode, Boolean, DateTime
from sqlalchemy.schema import ForeignKey, Index
from sqlalchemy.orm import relation
from flexget import db_schema, plugin
from flexget.db_schema import UpgradeImpossible
from flexget.event import event
from flexget.entry import Entry
from flexget.utils.log import log_once
from flexget.utils.imdb import ImdbSearch, ImdbParser, extract_id, make_url
from flexget.utils.database import with_session
SCHEMA_VER = 7
Base = db_schema.versioned_base('imdb_lookup', SCHEMA_VER)
# association tables
genres_table = Table('imdb_movie_genres', Base.metadata,
Column('movie_id', Integer, ForeignKey('imdb_movies.id')),
Column('genre_id', Integer, ForeignKey('imdb_genres.id')),
Index('ix_imdb_movie_genres', 'movie_id', 'genre_id'))
Base.register_table(genres_table)
actors_table = Table('imdb_movie_actors', Base.metadata,
Column('movie_id', Integer, ForeignKey('imdb_movies.id')),
Column('actor_id', Integer, ForeignKey('imdb_actors.id')),
Index('ix_imdb_movie_actors', 'movie_id', 'actor_id'))
Base.register_table(actors_table)
directors_table = Table('imdb_movie_directors', Base.metadata,
Column('movie_id', Integer, ForeignKey('imdb_movies.id')),
Column('director_id', Integer, ForeignKey('imdb_directors.id')),
Index('ix_imdb_movie_directors', 'movie_id', 'director_id'))
Base.register_table(directors_table)
class Movie(Base):
__tablename__ = 'imdb_movies'
id = Column(Integer, primary_key=True)
title = Column(Unicode)
original_title = Column(Unicode)
url = Column(String, index=True)
# many-to-many relations
genres = relation('Genre', secondary=genres_table, backref='movies')
actors = relation('Actor', secondary=actors_table, backref='movies')
directors = relation('Director', secondary=directors_table, backref='movies')
languages = relation('MovieLanguage', order_by='MovieLanguage.prominence')
score = Column(Float)
votes = Column(Integer)
year = Column(Integer)
plot_outline = Column(Unicode)
mpaa_rating = Column(String, default='')
photo = Column(String)
# updated time, so we can grab new rating counts after 48 hours
# set a default, so existing data gets updated with a rating
updated = Column(DateTime)
@property
def imdb_id(self):
return extract_id(self.url)
@property
def expired(self):
"""
:return: True if movie details are considered to be expired, ie. need of update
"""
if self.updated is None:
log.debug('updated is None: %s' % self)
return True
refresh_interval = 2
if self.year:
# Make sure age is not negative
age = max((datetime.now().year - self.year), 0)
refresh_interval += age * 5
log.debug('movie `%s` age %i expires in %i days' % (self.title, age, refresh_interval))
return self.updated < datetime.now() - timedelta(days=refresh_interval)
def __repr__(self):
return '<Movie(name=%s,votes=%s,year=%s)>' % (self.title, self.votes, self.year)
class MovieLanguage(Base):
__tablename__ = 'imdb_movie_languages'
movie_id = Column(Integer, ForeignKey('imdb_movies.id'), primary_key=True)
language_id = Column(Integer, ForeignKey('imdb_languages.id'), primary_key=True)
prominence = Column(Integer)
language = relation('Language')
def __init__(self, language, prominence=None):
self.language = language
self.prominence = prominence
class Language(Base):
__tablename__ = 'imdb_languages'
id = Column(Integer, primary_key=True)
name = Column(Unicode)
def __init__(self, name):
self.name = name
class Genre(Base):
__tablename__ = 'imdb_genres'
id = Column(Integer, primary_key=True)
name = Column(String)
def __init__(self, name):
self.name = name
class Actor(Base):
__tablename__ = 'imdb_actors'
id = Column(Integer, primary_key=True)
imdb_id = Column(String)
name = Column(Unicode)
def __init__(self, imdb_id, name=None):
self.imdb_id = imdb_id
self.name = name
class Director(Base):
__tablename__ = 'imdb_directors'
id = Column(Integer, primary_key=True)
imdb_id = Column(String)
name = Column(Unicode)
def __init__(self, imdb_id, name=None):
self.imdb_id = imdb_id
self.name = name
class SearchResult(Base):
__tablename__ = 'imdb_search'
id = Column(Integer, primary_key=True)
title = Column(Unicode, index=True)
url = Column(String)
fails = Column(Boolean, default=False)
queried = Column(DateTime)
@property
def imdb_id(self):
return extract_id(self.url)
def __init__(self, title, url=None):
self.title = title
self.url = url
self.queried = datetime.now()
def __repr__(self):
return '<SearchResult(title=%s,url=%s,fails=%s)>' % (self.title, self.url, self.fails)
log = logging.getLogger('imdb_lookup')
@db_schema.upgrade('imdb_lookup')
def upgrade(ver, session):
# v5 We may have cached bad data due to imdb changes, just wipe everything. GitHub #697
# v6 The association tables were not cleared on the last upgrade, clear again. GitHub #714
# v7 Another layout change cached bad data. GitHub #729
if ver is None or ver <= 6:
raise UpgradeImpossible('Resetting imdb_lookup caches because bad data may have been cached.')
return ver
class ImdbLookup(object):
"""
Retrieves imdb information for entries.
Example:
imdb_lookup: yes
Also provides imdb lookup functionality to all other imdb related plugins.
"""
field_map = {
'imdb_url': 'url',
'imdb_id': lambda movie: extract_id(movie.url),
'imdb_name': 'title',
'imdb_original_name': 'original_title',
'imdb_photo': 'photo',
'imdb_plot_outline': 'plot_outline',
'imdb_score': 'score',
'imdb_votes': 'votes',
'imdb_year': 'year',
'imdb_genres': lambda movie: [genre.name for genre in movie.genres],
'imdb_languages': lambda movie: [lang.language.name for lang in movie.languages],
'imdb_actors': lambda movie: dict((actor.imdb_id, actor.name) for actor in movie.actors),
'imdb_directors': lambda movie: dict((director.imdb_id, director.name) for director in movie.directors),
'imdb_mpaa_rating': 'mpaa_rating',
# Generic fields filled by all movie lookup plugins:
'movie_name': 'title',
'movie_year': 'year'}
schema = {'type': 'boolean'}
@plugin.priority(130)
def on_task_metainfo(self, task, config):
if not config:
return
for entry in task.entries:
self.register_lazy_fields(entry)
def register_lazy_fields(self, entry):
entry.register_lazy_func(self.lazy_loader, self.field_map)
def lazy_loader(self, entry):
"""Does the lookup for this entry and populates the entry fields."""
try:
self.lookup(entry)
except plugin.PluginError as e:
log_once(str(e.value).capitalize(), logger=log)
@with_session
def imdb_id_lookup(self, movie_title=None, raw_title=None, session=None):
"""
Perform faster lookup providing just imdb_id.
Falls back to using basic lookup if data cannot be found from cache.
.. note::
API will be changed, it's dumb to return None on errors AND
raise PluginError on some else
:param movie_title: Name of the movie
:param raw_title: Raw entry title
:return: imdb id or None
:raises PluginError: Failure reason
"""
if movie_title:
log.debug('imdb_id_lookup: trying with title: %s' % movie_title)
movie = session.query(Movie).filter(Movie.title == movie_title).first()
if movie:
log.debug('--> success! got %s returning %s' % (movie, movie.imdb_id))
return movie.imdb_id
if raw_title:
log.debug('imdb_id_lookup: trying cache with: %s' % raw_title)
result = session.query(SearchResult).filter(SearchResult.title == raw_title).first()
if result:
# this title is hopeless, give up ..
if result.fails:
return None
log.debug('--> success! got %s returning %s' % (result, result.imdb_id))
return result.imdb_id
if raw_title:
# last hope with hacky lookup
fake_entry = Entry(raw_title, '')
self.lookup(fake_entry)
return fake_entry['imdb_id']
@plugin.internet(log)
@with_session
def lookup(self, entry, search_allowed=True, session=None):
"""
Perform imdb lookup for entry.
:param entry: Entry instance
:param search_allowed: Allow fallback to search
:raises PluginError: Failure reason
"""
from flexget.manager import manager
if entry.get('imdb_id', eval_lazy=False):
log.debug('No title passed. Lookup for %s' % entry['imdb_id'])
elif entry.get('imdb_url', eval_lazy=False):
log.debug('No title passed. Lookup for %s' % entry['imdb_url'])
elif entry.get('title', eval_lazy=False):
log.debug('lookup for %s' % entry['title'])
else:
raise plugin.PluginError('looking up IMDB for entry failed, no title, imdb_url or imdb_id passed.')
# if imdb_id is included, build the url.
if entry.get('imdb_id', eval_lazy=False) and not entry.get('imdb_url', eval_lazy=False):
entry['imdb_url'] = make_url(entry['imdb_id'])
# make sure imdb url is valid
if entry.get('imdb_url', eval_lazy=False):
imdb_id = extract_id(entry['imdb_url'])
if imdb_id:
entry['imdb_url'] = make_url(imdb_id)
else:
log.debug('imdb url %s is invalid, removing it' % entry['imdb_url'])
del(entry['imdb_url'])
# no imdb_url, check if there is cached result for it or if the
# search is known to fail
if not entry.get('imdb_url', eval_lazy=False):
result = session.query(SearchResult).filter(SearchResult.title == entry['title']).first()
if result:
# TODO: 1.2 this should really be checking task.options.retry
if result.fails and not manager.options.execute.retry:
# this movie cannot be found, not worth trying again ...
log.debug('%s will fail lookup' % entry['title'])
raise plugin.PluginError('IMDB lookup failed for %s' % entry['title'])
else:
if result.url:
log.trace('Setting imdb url for %s from db' % entry['title'])
entry['imdb_id'] = result.imdb_id
entry['imdb_url'] = result.url
movie = None
# no imdb url, but information required, try searching
if not entry.get('imdb_url', eval_lazy=False) and search_allowed:
log.verbose('Searching from imdb `%s`' % entry['title'])
search = ImdbSearch()
search_name = entry.get('movie_name', entry['title'], eval_lazy=False)
search_result = search.smart_match(search_name)
if search_result:
entry['imdb_url'] = search_result['url']
# store url for this movie, so we don't have to search on every run
result = SearchResult(entry['title'], entry['imdb_url'])
session.add(result)
session.commit()
log.verbose('Found %s' % (entry['imdb_url']))
else:
log_once('IMDB lookup failed for %s' % entry['title'], log, logging.WARN, session=session)
# store FAIL for this title
result = SearchResult(entry['title'])
result.fails = True
session.add(result)
session.commit()
raise plugin.PluginError('Title `%s` lookup failed' % entry['title'])
# check if this imdb page has been parsed & cached
movie = session.query(Movie).filter(Movie.url == entry['imdb_url']).first()
# If we have a movie from cache, we are done
if movie and not movie.expired:
entry.update_using_map(self.field_map, movie)
return
# Movie was not found in cache, or was expired
if movie is not None:
if movie.expired:
log.verbose('Movie `%s` details expired, refreshing ...' % movie.title)
# Remove the old movie, we'll store another one later.
session.query(MovieLanguage).filter(MovieLanguage.movie_id == movie.id).delete()
session.query(Movie).filter(Movie.url == entry['imdb_url']).delete()
session.commit()
# search and store to cache
if 'title' in entry:
log.verbose('Parsing imdb for `%s`' % entry['title'])
else:
log.verbose('Parsing imdb for `%s`' % entry['imdb_id'])
try:
movie = self._parse_new_movie(entry['imdb_url'], session)
except UnicodeDecodeError:
log.error('Unable to determine encoding for %s. Installing chardet library may help.' %
entry['imdb_url'])
# store cache so this will not be tried again
movie = Movie()
movie.url = entry['imdb_url']
session.add(movie)
session.commit()
raise plugin.PluginError('UnicodeDecodeError')
except ValueError as e:
# TODO: might be a little too broad catch, what was this for anyway? ;P
if manager.options.debug:
log.exception(e)
raise plugin.PluginError('Invalid parameter: %s' % entry['imdb_url'], log)
for att in ['title', 'score', 'votes', 'year', 'genres', 'languages', 'actors', 'directors', 'mpaa_rating']:
log.trace('movie.%s: %s' % (att, getattr(movie, att)))
# Update the entry fields
entry.update_using_map(self.field_map, movie)
def _parse_new_movie(self, imdb_url, session):
"""
Get Movie object by parsing imdb page and save movie into the database.
:param imdb_url: IMDB url
:param session: Session to be used
:return: Newly added Movie
"""
parser = ImdbParser()
parser.parse(imdb_url)
# store to database
movie = Movie()
movie.photo = parser.photo
movie.title = parser.name
movie.original_title = parser.original_name
movie.score = parser.score
movie.votes = parser.votes
movie.year = parser.year
movie.mpaa_rating = parser.mpaa_rating
movie.plot_outline = parser.plot_outline
movie.url = imdb_url
for name in parser.genres:
genre = session.query(Genre).filter(Genre.name == name).first()
if not genre:
genre = Genre(name)
movie.genres.append(genre) # pylint:disable=E1101
for index, name in enumerate(parser.languages):
language = session.query(Language).filter(Language.name == name).first()
if not language:
language = Language(name)
movie.languages.append(MovieLanguage(language, prominence=index))
for imdb_id, name in parser.actors.items():
actor = session.query(Actor).filter(Actor.imdb_id == imdb_id).first()
if not actor:
actor = Actor(imdb_id, name)
movie.actors.append(actor) # pylint:disable=E1101
for imdb_id, name in parser.directors.items():
director = session.query(Director).filter(Director.imdb_id == imdb_id).first()
if not director:
director = Director(imdb_id, name)
movie.directors.append(director) # pylint:disable=E1101
# so that we can track how long since we've updated the info later
movie.updated = datetime.now()
session.add(movie)
return movie
@event('plugin.register')
def register_plugin():
plugin.register(ImdbLookup, 'imdb_lookup', api_ver=2)
| {
"content_hash": "35e961846aa1b5dd84cbd943cf2fbfbf",
"timestamp": "",
"source": "github",
"line_count": 447,
"max_line_length": 116,
"avg_line_length": 37.28635346756152,
"alnum_prop": 0.6046679066418672,
"repo_name": "qvazzler/Flexget",
"id": "6a4134bb8fe60d13e1db13d22b8a76fec3f388ef",
"size": "16667",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "flexget/plugins/metainfo/imdb_lookup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5275"
},
{
"name": "HTML",
"bytes": "33930"
},
{
"name": "JavaScript",
"bytes": "58811"
},
{
"name": "Python",
"bytes": "2428468"
}
],
"symlink_target": ""
} |
import os, sys, io
from . import ffiplatform, model
from .error import VerificationError
from .cffi_opcode import *
VERSION_BASE = 0x2601
VERSION_EMBEDDED = 0x2701
VERSION_CHAR16CHAR32 = 0x2801
USE_LIMITED_API = (sys.platform != 'win32' or sys.version_info < (3, 0) or
sys.version_info >= (3, 5))
class GlobalExpr:
def __init__(self, name, address, type_op, size=0, check_value=0):
self.name = name
self.address = address
self.type_op = type_op
self.size = size
self.check_value = check_value
def as_c_expr(self):
return ' { "%s", (void *)%s, %s, (void *)%s },' % (
self.name, self.address, self.type_op.as_c_expr(), self.size)
def as_python_expr(self):
return "b'%s%s',%d" % (self.type_op.as_python_bytes(), self.name,
self.check_value)
class FieldExpr:
def __init__(self, name, field_offset, field_size, fbitsize, field_type_op):
self.name = name
self.field_offset = field_offset
self.field_size = field_size
self.fbitsize = fbitsize
self.field_type_op = field_type_op
def as_c_expr(self):
spaces = " " * len(self.name)
return (' { "%s", %s,\n' % (self.name, self.field_offset) +
' %s %s,\n' % (spaces, self.field_size) +
' %s %s },' % (spaces, self.field_type_op.as_c_expr()))
def as_python_expr(self):
raise NotImplementedError
def as_field_python_expr(self):
if self.field_type_op.op == OP_NOOP:
size_expr = ''
elif self.field_type_op.op == OP_BITFIELD:
size_expr = format_four_bytes(self.fbitsize)
else:
raise NotImplementedError
return "b'%s%s%s'" % (self.field_type_op.as_python_bytes(),
size_expr,
self.name)
class StructUnionExpr:
def __init__(self, name, type_index, flags, size, alignment, comment,
first_field_index, c_fields):
self.name = name
self.type_index = type_index
self.flags = flags
self.size = size
self.alignment = alignment
self.comment = comment
self.first_field_index = first_field_index
self.c_fields = c_fields
def as_c_expr(self):
return (' { "%s", %d, %s,' % (self.name, self.type_index, self.flags)
+ '\n %s, %s, ' % (self.size, self.alignment)
+ '%d, %d ' % (self.first_field_index, len(self.c_fields))
+ ('/* %s */ ' % self.comment if self.comment else '')
+ '},')
def as_python_expr(self):
flags = eval(self.flags, G_FLAGS)
fields_expr = [c_field.as_field_python_expr()
for c_field in self.c_fields]
return "(b'%s%s%s',%s)" % (
format_four_bytes(self.type_index),
format_four_bytes(flags),
self.name,
','.join(fields_expr))
class EnumExpr:
def __init__(self, name, type_index, size, signed, allenums):
self.name = name
self.type_index = type_index
self.size = size
self.signed = signed
self.allenums = allenums
def as_c_expr(self):
return (' { "%s", %d, _cffi_prim_int(%s, %s),\n'
' "%s" },' % (self.name, self.type_index,
self.size, self.signed, self.allenums))
def as_python_expr(self):
prim_index = {
(1, 0): PRIM_UINT8, (1, 1): PRIM_INT8,
(2, 0): PRIM_UINT16, (2, 1): PRIM_INT16,
(4, 0): PRIM_UINT32, (4, 1): PRIM_INT32,
(8, 0): PRIM_UINT64, (8, 1): PRIM_INT64,
}[self.size, self.signed]
return "b'%s%s%s\\x00%s'" % (format_four_bytes(self.type_index),
format_four_bytes(prim_index),
self.name, self.allenums)
class TypenameExpr:
def __init__(self, name, type_index):
self.name = name
self.type_index = type_index
def as_c_expr(self):
return ' { "%s", %d },' % (self.name, self.type_index)
def as_python_expr(self):
return "b'%s%s'" % (format_four_bytes(self.type_index), self.name)
# ____________________________________________________________
class Recompiler:
_num_externpy = 0
def __init__(self, ffi, module_name, target_is_python=False):
self.ffi = ffi
self.module_name = module_name
self.target_is_python = target_is_python
self._version = VERSION_BASE
def needs_version(self, ver):
self._version = max(self._version, ver)
def collect_type_table(self):
self._typesdict = {}
self._generate("collecttype")
#
all_decls = sorted(self._typesdict, key=str)
#
# prepare all FUNCTION bytecode sequences first
self.cffi_types = []
for tp in all_decls:
if tp.is_raw_function:
assert self._typesdict[tp] is None
self._typesdict[tp] = len(self.cffi_types)
self.cffi_types.append(tp) # placeholder
for tp1 in tp.args:
assert isinstance(tp1, (model.VoidType,
model.BasePrimitiveType,
model.PointerType,
model.StructOrUnionOrEnum,
model.FunctionPtrType))
if self._typesdict[tp1] is None:
self._typesdict[tp1] = len(self.cffi_types)
self.cffi_types.append(tp1) # placeholder
self.cffi_types.append('END') # placeholder
#
# prepare all OTHER bytecode sequences
for tp in all_decls:
if not tp.is_raw_function and self._typesdict[tp] is None:
self._typesdict[tp] = len(self.cffi_types)
self.cffi_types.append(tp) # placeholder
if tp.is_array_type and tp.length is not None:
self.cffi_types.append('LEN') # placeholder
assert None not in self._typesdict.values()
#
# collect all structs and unions and enums
self._struct_unions = {}
self._enums = {}
for tp in all_decls:
if isinstance(tp, model.StructOrUnion):
self._struct_unions[tp] = None
elif isinstance(tp, model.EnumType):
self._enums[tp] = None
for i, tp in enumerate(sorted(self._struct_unions,
key=lambda tp: tp.name)):
self._struct_unions[tp] = i
for i, tp in enumerate(sorted(self._enums,
key=lambda tp: tp.name)):
self._enums[tp] = i
#
# emit all bytecode sequences now
for tp in all_decls:
method = getattr(self, '_emit_bytecode_' + tp.__class__.__name__)
method(tp, self._typesdict[tp])
#
# consistency check
for op in self.cffi_types:
assert isinstance(op, CffiOp)
self.cffi_types = tuple(self.cffi_types) # don't change any more
def _do_collect_type(self, tp):
if not isinstance(tp, model.BaseTypeByIdentity):
if isinstance(tp, tuple):
for x in tp:
self._do_collect_type(x)
return
if tp not in self._typesdict:
self._typesdict[tp] = None
if isinstance(tp, model.FunctionPtrType):
self._do_collect_type(tp.as_raw_function())
elif isinstance(tp, model.StructOrUnion):
if tp.fldtypes is not None and (
tp not in self.ffi._parser._included_declarations):
for name1, tp1, _, _ in tp.enumfields():
self._do_collect_type(self._field_type(tp, name1, tp1))
else:
for _, x in tp._get_items():
self._do_collect_type(x)
def _generate(self, step_name):
lst = self.ffi._parser._declarations.items()
for name, (tp, quals) in sorted(lst):
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_cpy_%s_%s' % (kind,
step_name))
except AttributeError:
raise VerificationError(
"not implemented in recompile(): %r" % name)
try:
self._current_quals = quals
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise
# ----------
ALL_STEPS = ["global", "field", "struct_union", "enum", "typename"]
def collect_step_tables(self):
# collect the declarations for '_cffi_globals', '_cffi_typenames', etc.
self._lsts = {}
for step_name in self.ALL_STEPS:
self._lsts[step_name] = []
self._seen_struct_unions = set()
self._generate("ctx")
self._add_missing_struct_unions()
#
for step_name in self.ALL_STEPS:
lst = self._lsts[step_name]
if step_name != "field":
lst.sort(key=lambda entry: entry.name)
self._lsts[step_name] = tuple(lst) # don't change any more
#
# check for a possible internal inconsistency: _cffi_struct_unions
# should have been generated with exactly self._struct_unions
lst = self._lsts["struct_union"]
for tp, i in self._struct_unions.items():
assert i < len(lst)
assert lst[i].name == tp.name
assert len(lst) == len(self._struct_unions)
# same with enums
lst = self._lsts["enum"]
for tp, i in self._enums.items():
assert i < len(lst)
assert lst[i].name == tp.name
assert len(lst) == len(self._enums)
# ----------
def _prnt(self, what=''):
self._f.write(what + '\n')
def write_source_to_f(self, f, preamble):
if self.target_is_python:
assert preamble is None
self.write_py_source_to_f(f)
else:
assert preamble is not None
self.write_c_source_to_f(f, preamble)
def _rel_readlines(self, filename):
g = open(os.path.join(os.path.dirname(__file__), filename), 'r')
lines = g.readlines()
g.close()
return lines
def write_c_source_to_f(self, f, preamble):
self._f = f
prnt = self._prnt
if self.ffi._embedding is not None:
prnt('#define _CFFI_USE_EMBEDDING')
if not USE_LIMITED_API:
prnt('#define _CFFI_NO_LIMITED_API')
#
# first the '#include' (actually done by inlining the file's content)
lines = self._rel_readlines('_cffi_include.h')
i = lines.index('#include "parse_c_type.h"\n')
lines[i:i+1] = self._rel_readlines('parse_c_type.h')
prnt(''.join(lines))
#
# if we have ffi._embedding != None, we give it here as a macro
# and include an extra file
base_module_name = self.module_name.split('.')[-1]
if self.ffi._embedding is not None:
prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,))
prnt('static const char _CFFI_PYTHON_STARTUP_CODE[] = {')
self._print_string_literal_in_array(self.ffi._embedding)
prnt('0 };')
prnt('#ifdef PYPY_VERSION')
prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % (
base_module_name,))
prnt('#elif PY_MAJOR_VERSION >= 3')
prnt('# define _CFFI_PYTHON_STARTUP_FUNC PyInit_%s' % (
base_module_name,))
prnt('#else')
prnt('# define _CFFI_PYTHON_STARTUP_FUNC init%s' % (
base_module_name,))
prnt('#endif')
lines = self._rel_readlines('_embedding.h')
i = lines.index('#include "_cffi_errors.h"\n')
lines[i:i+1] = self._rel_readlines('_cffi_errors.h')
prnt(''.join(lines))
self.needs_version(VERSION_EMBEDDED)
#
# then paste the C source given by the user, verbatim.
prnt('/************************************************************/')
prnt()
prnt(preamble)
prnt()
prnt('/************************************************************/')
prnt()
#
# the declaration of '_cffi_types'
prnt('static void *_cffi_types[] = {')
typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()])
for i, op in enumerate(self.cffi_types):
comment = ''
if i in typeindex2type:
comment = ' // ' + typeindex2type[i]._get_c_name()
prnt('/* %2d */ %s,%s' % (i, op.as_c_expr(), comment))
if not self.cffi_types:
prnt(' 0')
prnt('};')
prnt()
#
# call generate_cpy_xxx_decl(), for every xxx found from
# ffi._parser._declarations. This generates all the functions.
self._seen_constants = set()
self._generate("decl")
#
# the declaration of '_cffi_globals' and '_cffi_typenames'
nums = {}
for step_name in self.ALL_STEPS:
lst = self._lsts[step_name]
nums[step_name] = len(lst)
if nums[step_name] > 0:
prnt('static const struct _cffi_%s_s _cffi_%ss[] = {' % (
step_name, step_name))
for entry in lst:
prnt(entry.as_c_expr())
prnt('};')
prnt()
#
# the declaration of '_cffi_includes'
if self.ffi._included_ffis:
prnt('static const char * const _cffi_includes[] = {')
for ffi_to_include in self.ffi._included_ffis:
try:
included_module_name, included_source = (
ffi_to_include._assigned_source[:2])
except AttributeError:
raise VerificationError(
"ffi object %r includes %r, but the latter has not "
"been prepared with set_source()" % (
self.ffi, ffi_to_include,))
if included_source is None:
raise VerificationError(
"not implemented yet: ffi.include() of a Python-based "
"ffi inside a C-based ffi")
prnt(' "%s",' % (included_module_name,))
prnt(' NULL')
prnt('};')
prnt()
#
# the declaration of '_cffi_type_context'
prnt('static const struct _cffi_type_context_s _cffi_type_context = {')
prnt(' _cffi_types,')
for step_name in self.ALL_STEPS:
if nums[step_name] > 0:
prnt(' _cffi_%ss,' % step_name)
else:
prnt(' NULL, /* no %ss */' % step_name)
for step_name in self.ALL_STEPS:
if step_name != "field":
prnt(' %d, /* num_%ss */' % (nums[step_name], step_name))
if self.ffi._included_ffis:
prnt(' _cffi_includes,')
else:
prnt(' NULL, /* no includes */')
prnt(' %d, /* num_types */' % (len(self.cffi_types),))
flags = 0
if self._num_externpy:
flags |= 1 # set to mean that we use extern "Python"
prnt(' %d, /* flags */' % flags)
prnt('};')
prnt()
#
# the init function
prnt('#ifdef __GNUC__')
prnt('# pragma GCC visibility push(default) /* for -fvisibility= */')
prnt('#endif')
prnt()
prnt('#ifdef PYPY_VERSION')
prnt('PyMODINIT_FUNC')
prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,))
prnt('{')
if self._num_externpy:
prnt(' if (((intptr_t)p[0]) >= 0x0A03) {')
prnt(' _cffi_call_python_org = '
'(void(*)(struct _cffi_externpy_s *, char *))p[1];')
prnt(' }')
prnt(' p[0] = (const void *)0x%x;' % self._version)
prnt(' p[1] = &_cffi_type_context;')
prnt('#if PY_MAJOR_VERSION >= 3')
prnt(' return NULL;')
prnt('#endif')
prnt('}')
# on Windows, distutils insists on putting init_cffi_xyz in
# 'export_symbols', so instead of fighting it, just give up and
# give it one
prnt('# ifdef _MSC_VER')
prnt(' PyMODINIT_FUNC')
prnt('# if PY_MAJOR_VERSION >= 3')
prnt(' PyInit_%s(void) { return NULL; }' % (base_module_name,))
prnt('# else')
prnt(' init%s(void) { }' % (base_module_name,))
prnt('# endif')
prnt('# endif')
prnt('#elif PY_MAJOR_VERSION >= 3')
prnt('PyMODINIT_FUNC')
prnt('PyInit_%s(void)' % (base_module_name,))
prnt('{')
prnt(' return _cffi_init("%s", 0x%x, &_cffi_type_context);' % (
self.module_name, self._version))
prnt('}')
prnt('#else')
prnt('PyMODINIT_FUNC')
prnt('init%s(void)' % (base_module_name,))
prnt('{')
prnt(' _cffi_init("%s", 0x%x, &_cffi_type_context);' % (
self.module_name, self._version))
prnt('}')
prnt('#endif')
prnt()
prnt('#ifdef __GNUC__')
prnt('# pragma GCC visibility pop')
prnt('#endif')
self._version = None
def _to_py(self, x):
if isinstance(x, str):
return "b'%s'" % (x,)
if isinstance(x, (list, tuple)):
rep = [self._to_py(item) for item in x]
if len(rep) == 1:
rep.append('')
return "(%s)" % (','.join(rep),)
return x.as_python_expr() # Py2: unicode unexpected; Py3: bytes unexp.
def write_py_source_to_f(self, f):
self._f = f
prnt = self._prnt
#
# header
prnt("# auto-generated file")
prnt("import _cffi_backend")
#
# the 'import' of the included ffis
num_includes = len(self.ffi._included_ffis or ())
for i in range(num_includes):
ffi_to_include = self.ffi._included_ffis[i]
try:
included_module_name, included_source = (
ffi_to_include._assigned_source[:2])
except AttributeError:
raise VerificationError(
"ffi object %r includes %r, but the latter has not "
"been prepared with set_source()" % (
self.ffi, ffi_to_include,))
if included_source is not None:
raise VerificationError(
"not implemented yet: ffi.include() of a C-based "
"ffi inside a Python-based ffi")
prnt('from %s import ffi as _ffi%d' % (included_module_name, i))
prnt()
prnt("ffi = _cffi_backend.FFI('%s'," % (self.module_name,))
prnt(" _version = 0x%x," % (self._version,))
self._version = None
#
# the '_types' keyword argument
self.cffi_types = tuple(self.cffi_types) # don't change any more
types_lst = [op.as_python_bytes() for op in self.cffi_types]
prnt(' _types = %s,' % (self._to_py(''.join(types_lst)),))
typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()])
#
# the keyword arguments from ALL_STEPS
for step_name in self.ALL_STEPS:
lst = self._lsts[step_name]
if len(lst) > 0 and step_name != "field":
prnt(' _%ss = %s,' % (step_name, self._to_py(lst)))
#
# the '_includes' keyword argument
if num_includes > 0:
prnt(' _includes = (%s,),' % (
', '.join(['_ffi%d' % i for i in range(num_includes)]),))
#
# the footer
prnt(')')
# ----------
def _gettypenum(self, type):
# a KeyError here is a bug. please report it! :-)
return self._typesdict[type]
def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode):
extraarg = ''
if isinstance(tp, model.BasePrimitiveType) and not tp.is_complex_type():
if tp.is_integer_type() and tp.name != '_Bool':
converter = '_cffi_to_c_int'
extraarg = ', %s' % tp.name
elif isinstance(tp, model.UnknownFloatType):
# don't check with is_float_type(): it may be a 'long
# double' here, and _cffi_to_c_double would loose precision
converter = '(%s)_cffi_to_c_double' % (tp.get_c_name(''),)
else:
cname = tp.get_c_name('')
converter = '(%s)_cffi_to_c_%s' % (cname,
tp.name.replace(' ', '_'))
if cname in ('char16_t', 'char32_t'):
self.needs_version(VERSION_CHAR16CHAR32)
errvalue = '-1'
#
elif isinstance(tp, model.PointerType):
self._convert_funcarg_to_c_ptr_or_array(tp, fromvar,
tovar, errcode)
return
#
elif (isinstance(tp, model.StructOrUnionOrEnum) or
isinstance(tp, model.BasePrimitiveType)):
# a struct (not a struct pointer) as a function argument;
# or, a complex (the same code works)
self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)'
% (tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
return
#
elif isinstance(tp, model.FunctionPtrType):
converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('')
extraarg = ', _cffi_type(%d)' % self._gettypenum(tp)
errvalue = 'NULL'
#
else:
raise NotImplementedError(tp)
#
self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg))
self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % (
tovar, tp.get_c_name(''), errvalue))
self._prnt(' %s;' % errcode)
def _extra_local_variables(self, tp, localvars, freelines):
if isinstance(tp, model.PointerType):
localvars.add('Py_ssize_t datasize')
localvars.add('struct _cffi_freeme_s *large_args_free = NULL')
freelines.add('if (large_args_free != NULL)'
' _cffi_free_array_arguments(large_args_free);')
def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode):
self._prnt(' datasize = _cffi_prepare_pointer_call_argument(')
self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % (
self._gettypenum(tp), fromvar, tovar))
self._prnt(' if (datasize != 0) {')
self._prnt(' %s = ((size_t)datasize) <= 640 ? '
'(%s)alloca((size_t)datasize) : NULL;' % (
tovar, tp.get_c_name('')))
self._prnt(' if (_cffi_convert_array_argument(_cffi_type(%d), %s, '
'(char **)&%s,' % (self._gettypenum(tp), fromvar, tovar))
self._prnt(' datasize, &large_args_free) < 0)')
self._prnt(' %s;' % errcode)
self._prnt(' }')
def _convert_expr_from_c(self, tp, var, context):
if isinstance(tp, model.BasePrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
elif isinstance(tp, model.UnknownFloatType):
return '_cffi_from_c_double(%s)' % (var,)
elif tp.name != 'long double' and not tp.is_complex_type():
cname = tp.name.replace(' ', '_')
if cname in ('char16_t', 'char32_t'):
self.needs_version(VERSION_CHAR16CHAR32)
return '_cffi_from_c_%s(%s)' % (cname, var)
else:
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, (model.PointerType, model.FunctionPtrType)):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.ArrayType):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(model.PointerType(tp.item)))
elif isinstance(tp, model.StructOrUnion):
if tp.fldnames is None:
raise TypeError("'%s' is used as %s, but is opaque" % (
tp._get_c_name(), context))
return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.EnumType):
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
else:
raise NotImplementedError(tp)
# ----------
# typedefs
def _typedef_type(self, tp, name):
return self._global_type(tp, "(*(%s *)0)" % (name,))
def _generate_cpy_typedef_collecttype(self, tp, name):
self._do_collect_type(self._typedef_type(tp, name))
def _generate_cpy_typedef_decl(self, tp, name):
pass
def _typedef_ctx(self, tp, name):
type_index = self._typesdict[tp]
self._lsts["typename"].append(TypenameExpr(name, type_index))
def _generate_cpy_typedef_ctx(self, tp, name):
tp = self._typedef_type(tp, name)
self._typedef_ctx(tp, name)
if getattr(tp, "origin", None) == "unknown_type":
self._struct_ctx(tp, tp.name, approxname=None)
elif isinstance(tp, model.NamedPointerType):
self._struct_ctx(tp.totype, tp.totype.name, approxname=tp.name,
named_ptr=tp)
# ----------
# function declarations
def _generate_cpy_function_collecttype(self, tp, name):
self._do_collect_type(tp.as_raw_function())
if tp.ellipsis and not self.target_is_python:
self._do_collect_type(tp)
def _generate_cpy_function_decl(self, tp, name):
assert not self.target_is_python
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
# cannot support vararg functions better than this: check for its
# exact type (including the fixed arguments), and build it as a
# constant function pointer (no CPython wrapper)
self._generate_cpy_constant_decl(tp, name)
return
prnt = self._prnt
numargs = len(tp.args)
if numargs == 0:
argname = 'noarg'
elif numargs == 1:
argname = 'arg0'
else:
argname = 'args'
#
# ------------------------------
# the 'd' version of the function, only for addressof(lib, 'func')
arguments = []
call_arguments = []
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
arguments.append(type.get_c_name(' x%d' % i, context))
call_arguments.append('x%d' % i)
repr_arguments = ', '.join(arguments)
repr_arguments = repr_arguments or 'void'
if tp.abi:
abi = tp.abi + ' '
else:
abi = ''
name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments)
prnt('static %s' % (tp.result.get_c_name(name_and_arguments),))
prnt('{')
call_arguments = ', '.join(call_arguments)
result_code = 'return '
if isinstance(tp.result, model.VoidType):
result_code = ''
prnt(' %s%s(%s);' % (result_code, name, call_arguments))
prnt('}')
#
prnt('#ifndef PYPY_VERSION') # ------------------------------
#
prnt('static PyObject *')
prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname))
prnt('{')
#
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
arg = type.get_c_name(' x%d' % i, context)
prnt(' %s;' % arg)
#
localvars = set()
freelines = set()
for type in tp.args:
self._extra_local_variables(type, localvars, freelines)
for decl in sorted(localvars):
prnt(' %s;' % (decl,))
#
if not isinstance(tp.result, model.VoidType):
result_code = 'result = '
context = 'result of %s' % name
result_decl = ' %s;' % tp.result.get_c_name(' result', context)
prnt(result_decl)
prnt(' PyObject *pyresult;')
else:
result_decl = None
result_code = ''
#
if len(tp.args) > 1:
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
prnt()
prnt(' if (!PyArg_UnpackTuple(args, "%s", %d, %d, %s))' % (
name, len(rng), len(rng),
', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
prnt()
#
for i, type in enumerate(tp.args):
self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i,
'return NULL')
prnt()
#
prnt(' Py_BEGIN_ALLOW_THREADS')
prnt(' _cffi_restore_errno();')
call_arguments = ['x%d' % i for i in range(len(tp.args))]
call_arguments = ', '.join(call_arguments)
prnt(' { %s%s(%s); }' % (result_code, name, call_arguments))
prnt(' _cffi_save_errno();')
prnt(' Py_END_ALLOW_THREADS')
prnt()
#
prnt(' (void)self; /* unused */')
if numargs == 0:
prnt(' (void)noarg; /* unused */')
if result_code:
prnt(' pyresult = %s;' %
self._convert_expr_from_c(tp.result, 'result', 'result type'))
for freeline in freelines:
prnt(' ' + freeline)
prnt(' return pyresult;')
else:
for freeline in freelines:
prnt(' ' + freeline)
prnt(' Py_INCREF(Py_None);')
prnt(' return Py_None;')
prnt('}')
#
prnt('#else') # ------------------------------
#
# the PyPy version: need to replace struct/union arguments with
# pointers, and if the result is a struct/union, insert a first
# arg that is a pointer to the result. We also do that for
# complex args and return type.
def need_indirection(type):
return (isinstance(type, model.StructOrUnion) or
(isinstance(type, model.PrimitiveType) and
type.is_complex_type()))
difference = False
arguments = []
call_arguments = []
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
indirection = ''
if need_indirection(type):
indirection = '*'
difference = True
arg = type.get_c_name(' %sx%d' % (indirection, i), context)
arguments.append(arg)
call_arguments.append('%sx%d' % (indirection, i))
tp_result = tp.result
if need_indirection(tp_result):
context = 'result of %s' % name
arg = tp_result.get_c_name(' *result', context)
arguments.insert(0, arg)
tp_result = model.void_type
result_decl = None
result_code = '*result = '
difference = True
if difference:
repr_arguments = ', '.join(arguments)
repr_arguments = repr_arguments or 'void'
name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name,
repr_arguments)
prnt('static %s' % (tp_result.get_c_name(name_and_arguments),))
prnt('{')
if result_decl:
prnt(result_decl)
call_arguments = ', '.join(call_arguments)
prnt(' { %s%s(%s); }' % (result_code, name, call_arguments))
if result_decl:
prnt(' return result;')
prnt('}')
else:
prnt('# define _cffi_f_%s _cffi_d_%s' % (name, name))
#
prnt('#endif') # ------------------------------
prnt()
def _generate_cpy_function_ctx(self, tp, name):
if tp.ellipsis and not self.target_is_python:
self._generate_cpy_constant_ctx(tp, name)
return
type_index = self._typesdict[tp.as_raw_function()]
numargs = len(tp.args)
if self.target_is_python:
meth_kind = OP_DLOPEN_FUNC
elif numargs == 0:
meth_kind = OP_CPYTHON_BLTN_N # 'METH_NOARGS'
elif numargs == 1:
meth_kind = OP_CPYTHON_BLTN_O # 'METH_O'
else:
meth_kind = OP_CPYTHON_BLTN_V # 'METH_VARARGS'
self._lsts["global"].append(
GlobalExpr(name, '_cffi_f_%s' % name,
CffiOp(meth_kind, type_index),
size='_cffi_d_%s' % name))
# ----------
# named structs or unions
def _field_type(self, tp_struct, field_name, tp_field):
if isinstance(tp_field, model.ArrayType):
actual_length = tp_field.length
if actual_length == '...':
ptr_struct_name = tp_struct.get_c_name('*')
actual_length = '_cffi_array_len(((%s)0)->%s)' % (
ptr_struct_name, field_name)
tp_item = self._field_type(tp_struct, '%s[0]' % field_name,
tp_field.item)
tp_field = model.ArrayType(tp_item, actual_length)
return tp_field
def _struct_collecttype(self, tp):
self._do_collect_type(tp)
if self.target_is_python:
# also requires nested anon struct/unions in ABI mode, recursively
for fldtype in tp.anonymous_struct_fields():
self._struct_collecttype(fldtype)
def _struct_decl(self, tp, cname, approxname):
if tp.fldtypes is None:
return
prnt = self._prnt
checkfuncname = '_cffi_checkfld_%s' % (approxname,)
prnt('_CFFI_UNUSED_FN')
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
for fname, ftype, fbitsize, fqual in tp.enumfields():
try:
if ftype.is_integer_type() or fbitsize >= 0:
# accept all integers, but complain on float or double
if fname != '':
prnt(" (void)((p->%s) | 0); /* check that '%s.%s' is "
"an integer */" % (fname, cname, fname))
continue
# only accept exactly the type declared, except that '[]'
# is interpreted as a '*' and so will match any array length.
# (It would also match '*', but that's harder to detect...)
while (isinstance(ftype, model.ArrayType)
and (ftype.length is None or ftype.length == '...')):
ftype = ftype.item
fname = fname + '[0]'
prnt(' { %s = &p->%s; (void)tmp; }' % (
ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual),
fname))
except VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
prnt('struct _cffi_align_%s { char x; %s y; };' % (approxname, cname))
prnt()
def _struct_ctx(self, tp, cname, approxname, named_ptr=None):
type_index = self._typesdict[tp]
reason_for_not_expanding = None
flags = []
if isinstance(tp, model.UnionType):
flags.append("_CFFI_F_UNION")
if tp.fldtypes is None:
flags.append("_CFFI_F_OPAQUE")
reason_for_not_expanding = "opaque"
if (tp not in self.ffi._parser._included_declarations and
(named_ptr is None or
named_ptr not in self.ffi._parser._included_declarations)):
if tp.fldtypes is None:
pass # opaque
elif tp.partial or any(tp.anonymous_struct_fields()):
pass # field layout obtained silently from the C compiler
else:
flags.append("_CFFI_F_CHECK_FIELDS")
if tp.packed:
if tp.packed > 1:
raise NotImplementedError(
"%r is declared with 'pack=%r'; only 0 or 1 are "
"supported in API mode (try to use \"...;\", which "
"does not require a 'pack' declaration)" %
(tp, tp.packed))
flags.append("_CFFI_F_PACKED")
else:
flags.append("_CFFI_F_EXTERNAL")
reason_for_not_expanding = "external"
flags = '|'.join(flags) or '0'
c_fields = []
if reason_for_not_expanding is None:
expand_anonymous_struct_union = not self.target_is_python
enumfields = list(tp.enumfields(expand_anonymous_struct_union))
for fldname, fldtype, fbitsize, fqual in enumfields:
fldtype = self._field_type(tp, fldname, fldtype)
self._check_not_opaque(fldtype,
"field '%s.%s'" % (tp.name, fldname))
# cname is None for _add_missing_struct_unions() only
op = OP_NOOP
if fbitsize >= 0:
op = OP_BITFIELD
size = '%d /* bits */' % fbitsize
elif cname is None or (
isinstance(fldtype, model.ArrayType) and
fldtype.length is None):
size = '(size_t)-1'
else:
size = 'sizeof(((%s)0)->%s)' % (
tp.get_c_name('*') if named_ptr is None
else named_ptr.name,
fldname)
if cname is None or fbitsize >= 0:
offset = '(size_t)-1'
elif named_ptr is not None:
offset = '((char *)&((%s)0)->%s) - (char *)0' % (
named_ptr.name, fldname)
else:
offset = 'offsetof(%s, %s)' % (tp.get_c_name(''), fldname)
c_fields.append(
FieldExpr(fldname, offset, size, fbitsize,
CffiOp(op, self._typesdict[fldtype])))
first_field_index = len(self._lsts["field"])
self._lsts["field"].extend(c_fields)
#
if cname is None: # unknown name, for _add_missing_struct_unions
size = '(size_t)-2'
align = -2
comment = "unnamed"
else:
if named_ptr is not None:
size = 'sizeof(*(%s)0)' % (named_ptr.name,)
align = '-1 /* unknown alignment */'
else:
size = 'sizeof(%s)' % (cname,)
align = 'offsetof(struct _cffi_align_%s, y)' % (approxname,)
comment = None
else:
size = '(size_t)-1'
align = -1
first_field_index = -1
comment = reason_for_not_expanding
self._lsts["struct_union"].append(
StructUnionExpr(tp.name, type_index, flags, size, align, comment,
first_field_index, c_fields))
self._seen_struct_unions.add(tp)
def _check_not_opaque(self, tp, location):
while isinstance(tp, model.ArrayType):
tp = tp.item
if isinstance(tp, model.StructOrUnion) and tp.fldtypes is None:
raise TypeError(
"%s is of an opaque type (not declared in cdef())" % location)
def _add_missing_struct_unions(self):
# not very nice, but some struct declarations might be missing
# because they don't have any known C name. Check that they are
# not partial (we can't complete or verify them!) and emit them
# anonymously.
lst = list(self._struct_unions.items())
lst.sort(key=lambda tp_order: tp_order[1])
for tp, order in lst:
if tp not in self._seen_struct_unions:
if tp.partial:
raise NotImplementedError("internal inconsistency: %r is "
"partial but was not seen at "
"this point" % (tp,))
if tp.name.startswith('$') and tp.name[1:].isdigit():
approxname = tp.name[1:]
elif tp.name == '_IO_FILE' and tp.forcename == 'FILE':
approxname = 'FILE'
self._typedef_ctx(tp, 'FILE')
else:
raise NotImplementedError("internal inconsistency: %r" %
(tp,))
self._struct_ctx(tp, None, approxname)
def _generate_cpy_struct_collecttype(self, tp, name):
self._struct_collecttype(tp)
_generate_cpy_union_collecttype = _generate_cpy_struct_collecttype
def _struct_names(self, tp):
cname = tp.get_c_name('')
if ' ' in cname:
return cname, cname.replace(' ', '_')
else:
return cname, '_' + cname
def _generate_cpy_struct_decl(self, tp, name):
self._struct_decl(tp, *self._struct_names(tp))
_generate_cpy_union_decl = _generate_cpy_struct_decl
def _generate_cpy_struct_ctx(self, tp, name):
self._struct_ctx(tp, *self._struct_names(tp))
_generate_cpy_union_ctx = _generate_cpy_struct_ctx
# ----------
# 'anonymous' declarations. These are produced for anonymous structs
# or unions; the 'name' is obtained by a typedef.
def _generate_cpy_anonymous_collecttype(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_cpy_enum_collecttype(tp, name)
else:
self._struct_collecttype(tp)
def _generate_cpy_anonymous_decl(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_cpy_enum_decl(tp)
else:
self._struct_decl(tp, name, 'typedef_' + name)
def _generate_cpy_anonymous_ctx(self, tp, name):
if isinstance(tp, model.EnumType):
self._enum_ctx(tp, name)
else:
self._struct_ctx(tp, name, 'typedef_' + name)
# ----------
# constants, declared with "static const ..."
def _generate_cpy_const(self, is_int, name, tp=None, category='const',
check_value=None):
if (category, name) in self._seen_constants:
raise VerificationError(
"duplicate declaration of %s '%s'" % (category, name))
self._seen_constants.add((category, name))
#
prnt = self._prnt
funcname = '_cffi_%s_%s' % (category, name)
if is_int:
prnt('static int %s(unsigned long long *o)' % funcname)
prnt('{')
prnt(' int n = (%s) <= 0;' % (name,))
prnt(' *o = (unsigned long long)((%s) | 0);'
' /* check that %s is an integer */' % (name, name))
if check_value is not None:
if check_value > 0:
check_value = '%dU' % (check_value,)
prnt(' if (!_cffi_check_int(*o, n, %s))' % (check_value,))
prnt(' n |= 2;')
prnt(' return n;')
prnt('}')
else:
assert check_value is None
prnt('static void %s(char *o)' % funcname)
prnt('{')
prnt(' *(%s)o = %s;' % (tp.get_c_name('*'), name))
prnt('}')
prnt()
def _generate_cpy_constant_collecttype(self, tp, name):
is_int = tp.is_integer_type()
if not is_int or self.target_is_python:
self._do_collect_type(tp)
def _generate_cpy_constant_decl(self, tp, name):
is_int = tp.is_integer_type()
self._generate_cpy_const(is_int, name, tp)
def _generate_cpy_constant_ctx(self, tp, name):
if not self.target_is_python and tp.is_integer_type():
type_op = CffiOp(OP_CONSTANT_INT, -1)
else:
if self.target_is_python:
const_kind = OP_DLOPEN_CONST
else:
const_kind = OP_CONSTANT
type_index = self._typesdict[tp]
type_op = CffiOp(const_kind, type_index)
self._lsts["global"].append(
GlobalExpr(name, '_cffi_const_%s' % name, type_op))
# ----------
# enums
def _generate_cpy_enum_collecttype(self, tp, name):
self._do_collect_type(tp)
def _generate_cpy_enum_decl(self, tp, name=None):
for enumerator in tp.enumerators:
self._generate_cpy_const(True, enumerator)
def _enum_ctx(self, tp, cname):
type_index = self._typesdict[tp]
type_op = CffiOp(OP_ENUM, -1)
if self.target_is_python:
tp.check_not_partial()
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
self._lsts["global"].append(
GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op,
check_value=enumvalue))
#
if cname is not None and '$' not in cname and not self.target_is_python:
size = "sizeof(%s)" % cname
signed = "((%s)-1) <= 0" % cname
else:
basetp = tp.build_baseinttype(self.ffi, [])
size = self.ffi.sizeof(basetp)
signed = int(int(self.ffi.cast(basetp, -1)) < 0)
allenums = ",".join(tp.enumerators)
self._lsts["enum"].append(
EnumExpr(tp.name, type_index, size, signed, allenums))
def _generate_cpy_enum_ctx(self, tp, name):
self._enum_ctx(tp, tp._get_c_name())
# ----------
# macros: for now only for integers
def _generate_cpy_macro_collecttype(self, tp, name):
pass
def _generate_cpy_macro_decl(self, tp, name):
if tp == '...':
check_value = None
else:
check_value = tp # an integer
self._generate_cpy_const(True, name, check_value=check_value)
def _generate_cpy_macro_ctx(self, tp, name):
if tp == '...':
if self.target_is_python:
raise VerificationError(
"cannot use the syntax '...' in '#define %s ...' when "
"using the ABI mode" % (name,))
check_value = None
else:
check_value = tp # an integer
type_op = CffiOp(OP_CONSTANT_INT, -1)
self._lsts["global"].append(
GlobalExpr(name, '_cffi_const_%s' % name, type_op,
check_value=check_value))
# ----------
# global variables
def _global_type(self, tp, global_name):
if isinstance(tp, model.ArrayType):
actual_length = tp.length
if actual_length == '...':
actual_length = '_cffi_array_len(%s)' % (global_name,)
tp_item = self._global_type(tp.item, '%s[0]' % global_name)
tp = model.ArrayType(tp_item, actual_length)
return tp
def _generate_cpy_variable_collecttype(self, tp, name):
self._do_collect_type(self._global_type(tp, name))
def _generate_cpy_variable_decl(self, tp, name):
prnt = self._prnt
tp = self._global_type(tp, name)
if isinstance(tp, model.ArrayType) and tp.length is None:
tp = tp.item
ampersand = ''
else:
ampersand = '&'
# This code assumes that casts from "tp *" to "void *" is a
# no-op, i.e. a function that returns a "tp *" can be called
# as if it returned a "void *". This should be generally true
# on any modern machine. The only exception to that rule (on
# uncommon architectures, and as far as I can tell) might be
# if 'tp' were a function type, but that is not possible here.
# (If 'tp' is a function _pointer_ type, then casts from "fn_t
# **" to "void *" are again no-ops, as far as I can tell.)
decl = '*_cffi_var_%s(void)' % (name,)
prnt('static ' + tp.get_c_name(decl, quals=self._current_quals))
prnt('{')
prnt(' return %s(%s);' % (ampersand, name))
prnt('}')
prnt()
def _generate_cpy_variable_ctx(self, tp, name):
tp = self._global_type(tp, name)
type_index = self._typesdict[tp]
if self.target_is_python:
op = OP_GLOBAL_VAR
else:
op = OP_GLOBAL_VAR_F
self._lsts["global"].append(
GlobalExpr(name, '_cffi_var_%s' % name, CffiOp(op, type_index)))
# ----------
# extern "Python"
def _generate_cpy_extern_python_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
self._do_collect_type(tp)
_generate_cpy_dllexport_python_collecttype = \
_generate_cpy_extern_python_plus_c_collecttype = \
_generate_cpy_extern_python_collecttype
def _extern_python_decl(self, tp, name, tag_and_space):
prnt = self._prnt
if isinstance(tp.result, model.VoidType):
size_of_result = '0'
else:
context = 'result of %s' % name
size_of_result = '(int)sizeof(%s)' % (
tp.result.get_c_name('', context),)
prnt('static struct _cffi_externpy_s _cffi_externpy__%s =' % name)
prnt(' { "%s.%s", %s, 0, 0 };' % (
self.module_name, name, size_of_result))
prnt()
#
arguments = []
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
arg = type.get_c_name(' a%d' % i, context)
arguments.append(arg)
#
repr_arguments = ', '.join(arguments)
repr_arguments = repr_arguments or 'void'
name_and_arguments = '%s(%s)' % (name, repr_arguments)
if tp.abi == "__stdcall":
name_and_arguments = '_cffi_stdcall ' + name_and_arguments
#
def may_need_128_bits(tp):
return (isinstance(tp, model.PrimitiveType) and
tp.name == 'long double')
#
size_of_a = max(len(tp.args)*8, 8)
if may_need_128_bits(tp.result):
size_of_a = max(size_of_a, 16)
if isinstance(tp.result, model.StructOrUnion):
size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % (
tp.result.get_c_name(''), size_of_a,
tp.result.get_c_name(''), size_of_a)
prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments)))
prnt('{')
prnt(' char a[%s];' % size_of_a)
prnt(' char *p = a;')
for i, type in enumerate(tp.args):
arg = 'a%d' % i
if (isinstance(type, model.StructOrUnion) or
may_need_128_bits(type)):
arg = '&' + arg
type = model.PointerType(type)
prnt(' *(%s)(p + %d) = %s;' % (type.get_c_name('*'), i*8, arg))
prnt(' _cffi_call_python(&_cffi_externpy__%s, p);' % name)
if not isinstance(tp.result, model.VoidType):
prnt(' return *(%s)p;' % (tp.result.get_c_name('*'),))
prnt('}')
prnt()
self._num_externpy += 1
def _generate_cpy_extern_python_decl(self, tp, name):
self._extern_python_decl(tp, name, 'static ')
def _generate_cpy_dllexport_python_decl(self, tp, name):
self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ')
def _generate_cpy_extern_python_plus_c_decl(self, tp, name):
self._extern_python_decl(tp, name, '')
def _generate_cpy_extern_python_ctx(self, tp, name):
if self.target_is_python:
raise VerificationError(
"cannot use 'extern \"Python\"' in the ABI mode")
if tp.ellipsis:
raise NotImplementedError("a vararg function is extern \"Python\"")
type_index = self._typesdict[tp]
type_op = CffiOp(OP_EXTERN_PYTHON, type_index)
self._lsts["global"].append(
GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name))
_generate_cpy_dllexport_python_ctx = \
_generate_cpy_extern_python_plus_c_ctx = \
_generate_cpy_extern_python_ctx
def _print_string_literal_in_array(self, s):
prnt = self._prnt
prnt('// # NB. this is not a string because of a size limit in MSVC')
if not isinstance(s, bytes): # unicode
s = s.encode('utf-8') # -> bytes
else:
s.decode('utf-8') # got bytes, check for valid utf-8
try:
s.decode('ascii')
except UnicodeDecodeError:
s = b'# -*- encoding: utf8 -*-\n' + s
for line in s.splitlines(True):
comment = line
if type('//') is bytes: # python2
line = map(ord, line) # make a list of integers
else: # python3
# type(line) is bytes, which enumerates like a list of integers
comment = ascii(comment)[1:-1]
prnt(('// ' + comment).rstrip())
printed_line = ''
for c in line:
if len(printed_line) >= 76:
prnt(printed_line)
printed_line = ''
printed_line += '%d,' % (c,)
prnt(printed_line)
# ----------
# emitting the opcodes for individual types
def _emit_bytecode_VoidType(self, tp, index):
self.cffi_types[index] = CffiOp(OP_PRIMITIVE, PRIM_VOID)
def _emit_bytecode_PrimitiveType(self, tp, index):
prim_index = PRIMITIVE_TO_INDEX[tp.name]
self.cffi_types[index] = CffiOp(OP_PRIMITIVE, prim_index)
def _emit_bytecode_UnknownIntegerType(self, tp, index):
s = ('_cffi_prim_int(sizeof(%s), (\n'
' ((%s)-1) | 0 /* check that %s is an integer type */\n'
' ) <= 0)' % (tp.name, tp.name, tp.name))
self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s)
def _emit_bytecode_UnknownFloatType(self, tp, index):
s = ('_cffi_prim_float(sizeof(%s) *\n'
' (((%s)1) / 2) * 2 /* integer => 0, float => 1 */\n'
' )' % (tp.name, tp.name))
self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s)
def _emit_bytecode_RawFunctionType(self, tp, index):
self.cffi_types[index] = CffiOp(OP_FUNCTION, self._typesdict[tp.result])
index += 1
for tp1 in tp.args:
realindex = self._typesdict[tp1]
if index != realindex:
if isinstance(tp1, model.PrimitiveType):
self._emit_bytecode_PrimitiveType(tp1, index)
else:
self.cffi_types[index] = CffiOp(OP_NOOP, realindex)
index += 1
flags = int(tp.ellipsis)
if tp.abi is not None:
if tp.abi == '__stdcall':
flags |= 2
else:
raise NotImplementedError("abi=%r" % (tp.abi,))
self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags)
def _emit_bytecode_PointerType(self, tp, index):
self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype])
_emit_bytecode_ConstPointerType = _emit_bytecode_PointerType
_emit_bytecode_NamedPointerType = _emit_bytecode_PointerType
def _emit_bytecode_FunctionPtrType(self, tp, index):
raw = tp.as_raw_function()
self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[raw])
def _emit_bytecode_ArrayType(self, tp, index):
item_index = self._typesdict[tp.item]
if tp.length is None:
self.cffi_types[index] = CffiOp(OP_OPEN_ARRAY, item_index)
elif tp.length == '...':
raise VerificationError(
"type %s badly placed: the '...' array length can only be "
"used on global arrays or on fields of structures" % (
str(tp).replace('/*...*/', '...'),))
else:
assert self.cffi_types[index + 1] == 'LEN'
self.cffi_types[index] = CffiOp(OP_ARRAY, item_index)
self.cffi_types[index + 1] = CffiOp(None, str(tp.length))
def _emit_bytecode_StructType(self, tp, index):
struct_index = self._struct_unions[tp]
self.cffi_types[index] = CffiOp(OP_STRUCT_UNION, struct_index)
_emit_bytecode_UnionType = _emit_bytecode_StructType
def _emit_bytecode_EnumType(self, tp, index):
enum_index = self._enums[tp]
self.cffi_types[index] = CffiOp(OP_ENUM, enum_index)
if sys.version_info >= (3,):
NativeIO = io.StringIO
else:
class NativeIO(io.BytesIO):
def write(self, s):
if isinstance(s, unicode):
s = s.encode('ascii')
super(NativeIO, self).write(s)
def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose):
if verbose:
print("generating %s" % (target_file,))
recompiler = Recompiler(ffi, module_name,
target_is_python=(preamble is None))
recompiler.collect_type_table()
recompiler.collect_step_tables()
f = NativeIO()
recompiler.write_source_to_f(f, preamble)
output = f.getvalue()
try:
with open(target_file, 'r') as f1:
if f1.read(len(output) + 1) != output:
raise IOError
if verbose:
print("(already up-to-date)")
return False # already up-to-date
except IOError:
tmp_file = '%s.~%d' % (target_file, os.getpid())
with open(tmp_file, 'w') as f1:
f1.write(output)
try:
os.rename(tmp_file, target_file)
except OSError:
os.unlink(target_file)
os.rename(tmp_file, target_file)
return True
def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False):
assert preamble is not None
return _make_c_or_py_source(ffi, module_name, preamble, target_c_file,
verbose)
def make_py_source(ffi, module_name, target_py_file, verbose=False):
return _make_c_or_py_source(ffi, module_name, None, target_py_file,
verbose)
def _modname_to_file(outputdir, modname, extension):
parts = modname.split('.')
try:
os.makedirs(os.path.join(outputdir, *parts[:-1]))
except OSError:
pass
parts[-1] += extension
return os.path.join(outputdir, *parts), parts
# Aaargh. Distutils is not tested at all for the purpose of compiling
# DLLs that are not extension modules. Here are some hacks to work
# around that, in the _patch_for_*() functions...
def _patch_meth(patchlist, cls, name, new_meth):
old = getattr(cls, name)
patchlist.append((cls, name, old))
setattr(cls, name, new_meth)
return old
def _unpatch_meths(patchlist):
for cls, name, old_meth in reversed(patchlist):
setattr(cls, name, old_meth)
def _patch_for_embedding(patchlist):
if sys.platform == 'win32':
# we must not remove the manifest when building for embedding!
from distutils.msvc9compiler import MSVCCompiler
_patch_meth(patchlist, MSVCCompiler, '_remove_visual_c_ref',
lambda self, manifest_file: manifest_file)
if sys.platform == 'darwin':
# we must not make a '-bundle', but a '-dynamiclib' instead
from distutils.ccompiler import CCompiler
def my_link_shared_object(self, *args, **kwds):
if '-bundle' in self.linker_so:
self.linker_so = list(self.linker_so)
i = self.linker_so.index('-bundle')
self.linker_so[i] = '-dynamiclib'
return old_link_shared_object(self, *args, **kwds)
old_link_shared_object = _patch_meth(patchlist, CCompiler,
'link_shared_object',
my_link_shared_object)
def _patch_for_target(patchlist, target):
from distutils.command.build_ext import build_ext
# if 'target' is different from '*', we need to patch some internal
# method to just return this 'target' value, instead of having it
# built from module_name
if target.endswith('.*'):
target = target[:-2]
if sys.platform == 'win32':
target += '.dll'
elif sys.platform == 'darwin':
target += '.dylib'
else:
target += '.so'
_patch_meth(patchlist, build_ext, 'get_ext_filename',
lambda self, ext_name: target)
def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True,
c_file=None, source_extension='.c', extradir=None,
compiler_verbose=1, target=None, debug=None, **kwds):
if not isinstance(module_name, str):
module_name = module_name.encode('ascii')
if ffi._windows_unicode:
ffi._apply_windows_unicode(kwds)
if preamble is not None:
embedding = (ffi._embedding is not None)
if embedding:
ffi._apply_embedding_fix(kwds)
if c_file is None:
c_file, parts = _modname_to_file(tmpdir, module_name,
source_extension)
if extradir:
parts = [extradir] + parts
ext_c_file = os.path.join(*parts)
else:
ext_c_file = c_file
#
if target is None:
if embedding:
target = '%s.*' % module_name
else:
target = '*'
#
ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds)
updated = make_c_source(ffi, module_name, preamble, c_file,
verbose=compiler_verbose)
if call_c_compiler:
patchlist = []
cwd = os.getcwd()
try:
if embedding:
_patch_for_embedding(patchlist)
if target != '*':
_patch_for_target(patchlist, target)
if compiler_verbose:
if tmpdir == '.':
msg = 'the current directory is'
else:
msg = 'setting the current directory to'
print('%s %r' % (msg, os.path.abspath(tmpdir)))
os.chdir(tmpdir)
outputfilename = ffiplatform.compile('.', ext,
compiler_verbose, debug)
finally:
os.chdir(cwd)
_unpatch_meths(patchlist)
return outputfilename
else:
return ext, updated
else:
if c_file is None:
c_file, _ = _modname_to_file(tmpdir, module_name, '.py')
updated = make_py_source(ffi, module_name, c_file,
verbose=compiler_verbose)
if call_c_compiler:
return c_file
else:
return None, updated
| {
"content_hash": "097e5a12bf235e7cbd716e9caad5402b",
"timestamp": "",
"source": "github",
"line_count": 1571,
"max_line_length": 80,
"avg_line_length": 40.77721196690006,
"alnum_prop": 0.506283073945146,
"repo_name": "sserrot/champion_relationships",
"id": "1aeae5b92a5fc1927f7e7d24056fb68eb8dbc329",
"size": "64061",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/cffi/recompiler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
} |
from distutils.core import setup
from kafkameta import __version__
setup(name='kafkameta',
version=__version__,
description='Query Kafka metadata via Zookeeper',
author='Justin Lintz',
author_email='jlintz@gmail.com',
license='Apache License 2.0',
url='https://github.com/chartbeat/kafkameta',
install_requires=['kazoo'],
packages=['kafkameta'],
)
| {
"content_hash": "489d9edefd6f6dd9d8aa543dbfc4576c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 55,
"avg_line_length": 30.923076923076923,
"alnum_prop": 0.654228855721393,
"repo_name": "chartbeat-labs/kafkameta",
"id": "bf147bcbd4fb6635c7dd6dc9abab6c06cf181f8a",
"size": "425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12980"
}
],
"symlink_target": ""
} |
"""
The logger class responsible for writing the loglines to the given
file. We can also set the log level to one of the following levels
WARNING, INFO, VERBOSE and DEBUG
"""
import sys
import datetime
class Logger(object):
''' Logger class'''
def __init__(self, log_config, verbose=False, debug=False):
"""
Init function for Logger class
Args:
log_config - Log config dictionary
verbose - Verbosity flag
debug - Debug flag
Return:
None
Raise:
None
"""
self.verbose = verbose
self.debug = debug
self.log_config = log_config
self.logger_file_handle = None
self.full_log_config_path = None
self.log_levels = ["WARNING", "INFO", "VERBOSE", "DEBUG"]
def get_current_log_level(self):
"""
Fetches the current log level, the level set to INFO
by default if an unknown level given in the config
Args:
None
Return:
Log level
Raise:
None
"""
try:
if (self.log_config['log_level'] and
self.log_config['log_level'] in self.log_levels):
return self.log_levels.index(self.log_config['log_level'])
else:
print ("[WARNING]: Unknown log_level defined in the log_config"
"configuration: {0}. Overriding it to the default"
"level: INFO".format(self.log_config['log_level']))
self.log_config['log_level'] = "INFO"
return self.log_levels.index(self.log_config['log_level'])
except KeyError:
print ("[FATAL]: Logger config is incomplete. Please define "
"log_config-> log_level option. Set it to false if you "
"want to, disable verbose logging")
sys.exit(1)
def is_verbose_logging_enabled(self):
"""
Check if verbose flag is set
Args:
None
Return:
True if verbose flag is set else False
Raise:
None
"""
try:
if self.log_config['verbose']:
return True
else:
return False
except KeyError:
print ("[FATAL]: Logger config is incomplete. Please define "
"log_config-> verbose option. Set it to false if you want"
" to disable verbose logging")
sys.exit(1)
def is_console_logging_enabled(self):
"""
Check if console logging is enabled
Args:
None
Return:
True if console logging is enabled else False
Raise:
None
"""
try:
if self.log_config['console']:
return True
else:
return False
except KeyError:
print ("[FATAL]: Logger config is incomplete. Please define "
"log_config-> console option. Set it to false if you "
"want to disable logging into console")
sys.exit(1)
def is_file_logging_enabled(self):
"""
Check if file logging is enabled
Args:
None
Return:
True if console logging is enabled else False
Raise:
None
"""
try:
if self.log_config['path']:
return True
else:
return False
except KeyError:
print ("[FATAL]: Logger config is incomplete. Please define "
"log_config-> path option. Set it to false if you want to "
"disable it.")
sys.exit(1)
def get_logger_file_path(self):
"""
Fetch the logger file path
Args:
None
Return:
Path to the log file
Raise:
None
"""
try:
if self.log_config['path']:
return self.log_config['path']
else:
return None
except KeyError:
print "[FATAL]: Logger configuration is not defined"
sys.exit(1)
def get_logger_file_handle(self):
"""
Returns a file object to the log file
Args:
None
Return:
file object to the log file
Raise:
None
"""
if not self.logger_file_handle:
try:
file_desc = open(self.get_logger_file_path(), "a", 0)
self.logger_file_handle = file_desc
return self.logger_file_handle
except Exception as exc:
print ("[FATAL]: Could not open file name for logging: {0}.{1}"
.format(self.get_logger_file_path(), exc))
sys.exit(1)
else:
return self.logger_file_handle
def logit(self, log_type, message, log_level="INFO"):
"""
Add a message to the log if the message log level is acceptable
Args:
log_type - the log type
message - The log message
log_level - the log level
Return:
None
Raise:
None
"""
if self.get_current_log_level() >= self.log_levels.index(log_level):
if self.is_file_logging_enabled():
self.get_logger_file_handle()\
.write("[{0}] [{1}]: {2}\n".format(datetime.datetime.now(),
log_type, message))
if self.is_console_logging_enabled():
sys.stdout.write("[{0}] [{1}]: {2}\n"
.format(datetime.datetime.now(),
log_type, message))
| {
"content_hash": "78125e5571e23cee170e7b776b6d087d",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 79,
"avg_line_length": 33.369565217391305,
"alnum_prop": 0.47133550488599346,
"repo_name": "samklr/simoorg",
"id": "e0bbc4feb9ccdb2f40a6a07a0138b161d03a73c0",
"size": "6637",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "src/simoorg/Logger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "192506"
},
{
"name": "Shell",
"bytes": "9393"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from operator import attrgetter
from uuid import UUID
from flask import flash, jsonify, redirect, request, session
from sqlalchemy.orm import contains_eager, subqueryload
from werkzeug.exceptions import Forbidden, NotFound
from indico.modules.auth.util import redirect_to_login
from indico.modules.events.controllers.base import RHDisplayEventBase
from indico.modules.events.models.events import EventType
from indico.modules.events.payment import payment_event_settings
from indico.modules.events.registration import registration_settings
from indico.modules.events.registration.controllers import RegistrationEditMixin, RegistrationFormMixin
from indico.modules.events.registration.models.forms import RegistrationForm
from indico.modules.events.registration.models.invitations import InvitationState, RegistrationInvitation
from indico.modules.events.registration.models.items import PersonalDataType
from indico.modules.events.registration.models.registrations import Registration, RegistrationState
from indico.modules.events.registration.util import (check_registration_email, create_registration, generate_ticket,
get_event_regforms_registrations, get_event_section_data,
get_title_uuid, make_registration_form)
from indico.modules.events.registration.views import (WPDisplayRegistrationFormConference,
WPDisplayRegistrationFormSimpleEvent,
WPDisplayRegistrationParticipantList)
from indico.util.fs import secure_filename
from indico.util.i18n import _
from indico.web.flask.util import send_file, url_for
class RHRegistrationFormDisplayBase(RHDisplayEventBase):
@property
def view_class(self):
return (WPDisplayRegistrationFormConference
if self.event.type_ == EventType.conference
else WPDisplayRegistrationFormSimpleEvent)
class RHRegistrationFormBase(RegistrationFormMixin, RHRegistrationFormDisplayBase):
def _process_args(self):
RHRegistrationFormDisplayBase._process_args(self)
RegistrationFormMixin._process_args(self)
class RHRegistrationFormRegistrationBase(RHRegistrationFormBase):
"""Base for RHs handling individual registrations"""
REGISTRATION_REQUIRED = True
def _process_args(self):
RHRegistrationFormBase._process_args(self)
self.token = request.args.get('token')
if self.token:
self.registration = self.regform.get_registration(uuid=self.token)
if not self.registration:
raise NotFound
else:
self.registration = self.regform.get_registration(user=session.user) if session.user else None
if self.REGISTRATION_REQUIRED and not self.registration:
raise Forbidden
class RHRegistrationFormList(RHRegistrationFormDisplayBase):
"""List of all registration forms in the event"""
def _process(self):
displayed_regforms, user_registrations = get_event_regforms_registrations(self.event, session.user)
if len(displayed_regforms) == 1:
return redirect(url_for('.display_regform', displayed_regforms[0]))
return self.view_class.render_template('display/regform_list.html', self.event,
regforms=displayed_regforms,
user_registrations=user_registrations)
class RHParticipantList(RHRegistrationFormDisplayBase):
"""List of all public registrations"""
view_class = WPDisplayRegistrationParticipantList
@staticmethod
def _is_checkin_visible(reg):
return reg.registration_form.publish_checkin_enabled and reg.checked_in
def _merged_participant_list_table(self):
def _process_registration(reg, column_names):
personal_data = reg.get_personal_data()
columns = [{'text': personal_data.get(column_name, '')} for column_name in column_names]
return {'checked_in': self._is_checkin_visible(reg), 'columns': columns}
def _deduplicate_reg_data(reg_data_iter):
used = set()
for reg_data in reg_data_iter:
reg_data_hash = tuple(tuple(sorted(x.items())) for x in reg_data['columns'])
if reg_data_hash not in used:
used.add(reg_data_hash)
yield reg_data
column_names = registration_settings.get(self.event, 'participant_list_columns')
headers = [PersonalDataType[column_name].get_title() for column_name in column_names]
query = (Registration.query.with_parent(self.event)
.filter(Registration.is_publishable,
RegistrationForm.publish_registrations_enabled,
~RegistrationForm.is_deleted,
~Registration.is_deleted)
.join(Registration.registration_form)
.options(subqueryload('data').joinedload('field_data'),
contains_eager('registration_form')))
registrations = sorted(_deduplicate_reg_data(_process_registration(reg, column_names) for reg in query),
key=lambda reg: tuple(x['text'].lower() for x in reg['columns']))
return {'headers': headers,
'rows': registrations,
'show_checkin': any(registration['checked_in'] for registration in registrations)}
def _participant_list_table(self, regform):
def _process_registration(reg, column_ids, active_fields):
data_by_field = reg.data_by_field
def _content(column_id):
if column_id in data_by_field:
return data_by_field[column_id].get_friendly_data(for_humans=True)
elif (column_id in active_fields and active_fields[column_id].personal_data_type is not None and
active_fields[column_id].personal_data_type.column is not None):
# some legacy registrations have no data in the firstname/lastname/email field
# so we need to get it from the registration object itself
return getattr(reg, active_fields[column_id].personal_data_type.column)
else:
# no data available for the field
return ''
def _sort_key_date(column_id):
data = data_by_field.get(column_id)
if data and data.field_data.field.input_type == 'date':
return data.data
else:
return None
columns = [{'text': _content(column_id), 'sort_key': _sort_key_date(column_id)} for column_id in column_ids]
return {'checked_in': self._is_checkin_visible(reg), 'columns': columns}
active_fields = {field.id: field for field in regform.active_fields}
column_ids = [column_id
for column_id in registration_settings.get_participant_list_columns(self.event, regform)
if column_id in active_fields]
headers = [active_fields[column_id].title.title() for column_id in column_ids]
active_registrations = sorted(regform.active_registrations, key=attrgetter('last_name', 'first_name', 'id'))
registrations = [_process_registration(reg, column_ids, active_fields) for reg in active_registrations
if reg.is_publishable]
return {'headers': headers,
'rows': registrations,
'title': regform.title,
'show_checkin': any(registration['checked_in'] for registration in registrations)}
def _process(self):
regforms = (RegistrationForm.query.with_parent(self.event)
.filter(RegistrationForm.publish_registrations_enabled,
~RegistrationForm.is_deleted)
.options(subqueryload('registrations').subqueryload('data').joinedload('field_data'))
.all())
if registration_settings.get(self.event, 'merge_registration_forms'):
tables = [self._merged_participant_list_table()]
else:
tables = []
regforms_dict = {regform.id: regform for regform in regforms if regform.publish_registrations_enabled}
for form_id in registration_settings.get_participant_list_form_ids(self.event):
try:
regform = regforms_dict.pop(form_id)
except KeyError:
# The settings might reference forms that are not available
# anymore (publishing was disabled, etc.)
continue
tables.append(self._participant_list_table(regform))
# There might be forms that have not been sorted by the user yet
tables += map(self._participant_list_table, regforms_dict.viewvalues())
published = (RegistrationForm.query.with_parent(self.event)
.filter(RegistrationForm.publish_registrations_enabled)
.has_rows())
num_participants = sum(len(table['rows']) for table in tables)
return self.view_class.render_template(
'display/participant_list.html',
self.event,
regforms=regforms,
tables=tables,
published=published,
num_participants=num_participants
)
class InvitationMixin:
"""Mixin for RHs that accept an invitation token"""
def _process_args(self):
self.invitation = None
try:
token = request.args['invitation']
except KeyError:
return
try:
UUID(hex=token)
except ValueError:
flash(_("Your invitation code is not valid."), 'warning')
return
self.invitation = RegistrationInvitation.find(uuid=token).with_parent(self.regform).first()
if self.invitation is None:
flash(_("This invitation does not exist or has been withdrawn."), 'warning')
class RHRegistrationFormCheckEmail(RHRegistrationFormBase):
"""Checks how an email will affect the registration"""
def _process(self):
email = request.args['email'].lower().strip()
update = request.args.get('update')
management = request.args.get('management') == '1'
if update:
existing = self.regform.get_registration(uuid=update)
return jsonify(check_registration_email(self.regform, email, existing, management=management))
else:
return jsonify(check_registration_email(self.regform, email, management=management))
class RHRegistrationForm(InvitationMixin, RHRegistrationFormRegistrationBase):
"""Display a registration form and registrations, and process submissions"""
REGISTRATION_REQUIRED = False
normalize_url_spec = {
'locators': {
lambda self: self.regform
}
}
def _check_access(self):
RHRegistrationFormRegistrationBase._check_access(self)
if self.regform.require_login and not session.user and request.method != 'GET':
raise Forbidden(response=redirect_to_login(reason=_('You are trying to register with a form '
'that requires you to be logged in')))
def _process_args(self):
RHRegistrationFormRegistrationBase._process_args(self)
InvitationMixin._process_args(self)
if self.invitation and self.invitation.state == InvitationState.accepted and self.invitation.registration:
return redirect(url_for('.display_regform', self.invitation.registration.locator.registrant))
def _can_register(self):
return not self.regform.limit_reached and (self.regform.is_active or self.invitation)
def _process(self):
form = make_registration_form(self.regform)()
if self._can_register() and form.validate_on_submit():
registration = create_registration(self.regform, form.data, self.invitation)
return redirect(url_for('.display_regform', registration.locator.registrant))
elif form.is_submitted():
# not very pretty but usually this never happens thanks to client-side validation
for error in form.error_list:
flash(error, 'error')
user_data = {t.name: getattr(session.user, t.name, None) if session.user else '' for t in PersonalDataType}
if self.invitation:
user_data.update((attr, getattr(self.invitation, attr)) for attr in ('first_name', 'last_name', 'email'))
user_data['title'] = get_title_uuid(self.regform, user_data['title'])
return self.view_class.render_template('display/regform_display.html', self.event,
regform=self.regform,
sections=get_event_section_data(self.regform),
payment_conditions=payment_event_settings.get(self.event, 'conditions'),
payment_enabled=self.event.has_feature('payment'),
user_data=user_data,
invitation=self.invitation,
registration=self.registration,
management=False,
login_required=self.regform.require_login and not session.user)
class RHRegistrationDisplayEdit(RegistrationEditMixin, RHRegistrationFormRegistrationBase):
"""Submit a registration form"""
template_file = 'display/registration_modify.html'
management = False
REGISTRATION_REQUIRED = False
def _process_args(self):
RHRegistrationFormRegistrationBase._process_args(self)
if self.registration is None:
if session.user:
flash(_("We could not find a registration for you. If have already registered, please use the "
"direct access link from the email you received after registering."), 'warning')
else:
flash(_("We could not find a registration for you. If have already registered, please use the "
"direct access link from the email you received after registering or log in to your Indico "
"account."), 'warning')
return redirect(url_for('.display_regform', self.regform))
@property
def success_url(self):
return url_for('.display_regform', self.registration.locator.registrant)
class RHRegistrationFormDeclineInvitation(InvitationMixin, RHRegistrationFormBase):
"""Decline an invitation to register"""
def _process_args(self):
RHRegistrationFormBase._process_args(self)
InvitationMixin._process_args(self)
def _process(self):
if self.invitation.state == InvitationState.pending:
self.invitation.state = InvitationState.declined
flash(_("You declined the invitation to register."))
return redirect(self.event.url)
class RHTicketDownload(RHRegistrationFormRegistrationBase):
"""Generate ticket for a given registration"""
def _check_access(self):
RHRegistrationFormRegistrationBase._check_access(self)
if self.registration.state != RegistrationState.complete:
raise Forbidden
if not self.regform.tickets_enabled:
raise Forbidden
if (not self.regform.ticket_on_event_page and not self.regform.ticket_on_summary_page
and not self.regform.event.can_manage(session.user, 'registration')):
raise Forbidden
if self.registration.is_ticket_blocked:
raise Forbidden
def _process(self):
filename = secure_filename('{}-Ticket.pdf'.format(self.event.title), 'ticket.pdf')
return send_file(filename, generate_ticket(self.registration), 'application/pdf')
| {
"content_hash": "b1ae23b58a5361f5ebeca6c104c830e8",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 120,
"avg_line_length": 48.81325301204819,
"alnum_prop": 0.6326052079476737,
"repo_name": "mic4ael/indico",
"id": "7f22080703255dbe03191e002b096240581cc2ee",
"size": "16420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/modules/events/registration/controllers/display.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "553825"
},
{
"name": "HTML",
"bytes": "1375160"
},
{
"name": "JavaScript",
"bytes": "1852830"
},
{
"name": "Mako",
"bytes": "1340"
},
{
"name": "Python",
"bytes": "4612709"
},
{
"name": "Shell",
"bytes": "2665"
},
{
"name": "TeX",
"bytes": "23292"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
from .contexts import *
from .nddata_base import *
from .nddata import *
from .nduncertainty_converter import *
from .nduncertainty_unknown import *
from .nduncertainty_stddev import *
from .nduncertainty_var import *
from .nduncertainty_relstd import *
from .flag_collection import *
from .nddata_collection import *
from . import utils
from . import mixins
from . import meta
from . import exceptions
| {
"content_hash": "2d2563deae050f3a5049c51adb85c9b7",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 38,
"avg_line_length": 26.933333333333334,
"alnum_prop": 0.7797029702970297,
"repo_name": "MSeifert04/nddata",
"id": "ac0928a9e39ff6b9763d4abe304f97ff5a27feb8",
"size": "469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nddata/nddata/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "845124"
}
],
"symlink_target": ""
} |
"""This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.aws_hook`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.hooks.aws_hook import AwsHook, _parse_s3_config, boto3 # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.aws_hook`.",
DeprecationWarning, stacklevel=2
)
| {
"content_hash": "b27ccdd6ad87b0eda547c16950dfd86d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 96,
"avg_line_length": 35,
"alnum_prop": 0.7584415584415585,
"repo_name": "spektom/incubator-airflow",
"id": "cb9f4c22153f9e8dd2afb8038c4e48de320a2757",
"size": "1172",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/contrib/hooks/aws_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17179"
},
{
"name": "HTML",
"bytes": "148492"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "9768581"
},
{
"name": "Shell",
"bytes": "221415"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
# WARNING:
# If the output voltage is higher than 36V, ensure the relay is in the off state before you operate with the terminal screws.
# The heatsink can get very hot during use.
import time
import grovepi
# Connect the Grove Solid State Relay to digital port D4
# CTR,NC,VCC,GND
relay = 4
grovepi.pinMode(relay,"OUTPUT")
while True:
try:
# switch on for 5 seconds
grovepi.digitalWrite(relay,1)
print ("on")
time.sleep(5)
# switch off for 5 seconds
grovepi.digitalWrite(relay,0)
print ("off")
time.sleep(5)
except KeyboardInterrupt:
grovepi.digitalWrite(relay,0)
break
except IOError:
print ("Error")
| {
"content_hash": "1bfa77414be07f71907d31405b89a547",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 126,
"avg_line_length": 33.70175438596491,
"alnum_prop": 0.7418011452368558,
"repo_name": "lupyuen/RaspberryPiImage",
"id": "8720309b9a7e466520c1a4226facfaa823330935",
"size": "2325",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "home/pi/GrovePi/Software/Python/others/grove_solid_state_relay.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "82308"
},
{
"name": "C",
"bytes": "3197439"
},
{
"name": "C#",
"bytes": "33056"
},
{
"name": "C++",
"bytes": "1020255"
},
{
"name": "CSS",
"bytes": "208338"
},
{
"name": "CoffeeScript",
"bytes": "87200"
},
{
"name": "Eagle",
"bytes": "1632170"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "Groff",
"bytes": "286691"
},
{
"name": "HTML",
"bytes": "41527"
},
{
"name": "JavaScript",
"bytes": "403603"
},
{
"name": "Makefile",
"bytes": "33808"
},
{
"name": "Objective-C",
"bytes": "69457"
},
{
"name": "Perl",
"bytes": "96047"
},
{
"name": "Processing",
"bytes": "1304"
},
{
"name": "Python",
"bytes": "13358098"
},
{
"name": "Shell",
"bytes": "68795"
},
{
"name": "TeX",
"bytes": "4317"
}
],
"symlink_target": ""
} |
'''
Train
Train your nerual network
Author: Tawn Kramer
'''
from __future__ import print_function
import os
import sys
import glob
import time
import fnmatch
import argparse
import random
import json
import numpy as np
from PIL import Image
from tensorflow import keras
import conf
import models
'''
matplotlib can be a pain to setup. So handle the case where it is absent. When present,
use it to generate a plot of training results.
'''
try:
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
do_plot = True
except:
do_plot = False
def shuffle(samples):
'''
randomly mix a list and return a new list
'''
ret_arr = []
len_samples = len(samples)
while len_samples > 0:
iSample = random.randrange(0, len_samples)
ret_arr.append(samples[iSample])
del samples[iSample]
len_samples -= 1
return ret_arr
def load_json(filename):
with open(filename, "rt") as fp:
data = json.load(fp)
return data
def generator(samples, batch_size=64,):
'''
Rather than keep all data in memory, we will make a function that keeps
it's state and returns just the latest batch required via the yield command.
As we load images, we can optionally augment them in some manner that doesn't
change their underlying meaning or features. This is a combination of
brightness, contrast, sharpness, and color PIL image filters applied with random
settings. Optionally a shadow image may be overlayed with some random rotation and
opacity.
We flip each image horizontally and supply it as a another sample with the steering
negated.
'''
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
samples = shuffle(samples)
#divide batch_size in half, because we double each output by flipping image.
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
controls = []
for fullpath in batch_samples:
try:
frame_number = os.path.basename(fullpath).split("_")[0]
json_filename = os.path.join(os.path.dirname(fullpath), "record_" + frame_number + ".json")
data = load_json(json_filename)
steering = float(data["user/angle"])
throttle = float(data["user/throttle"])
try:
image = Image.open(fullpath)
except:
print('failed to open', fullpath)
continue
#PIL Image as a numpy array
image = np.array(image, dtype=np.float32)
images.append(image)
if conf.num_outputs == 2:
controls.append([steering, throttle])
elif conf.num_outputs == 1:
controls.append([steering])
else:
print("expected 1 or 2 outputs")
except Exception as e:
print(e)
print("we threw an exception on:", fullpath)
yield [], []
# final np array to submit to training
X_train = np.array(images)
y_train = np.array(controls)
yield X_train, y_train
def get_files(filemask):
'''
use a filemask and search a path recursively for matches
'''
#matches = glob.glob(os.path.expanduser(filemask))
#return matches
filemask = os.path.expanduser(filemask)
path, mask = os.path.split(filemask)
matches = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, mask):
matches.append(os.path.join(root, filename))
return matches
def train_test_split(lines, test_perc):
'''
split a list into two parts, percentage of test used to seperate
'''
train = []
test = []
for line in lines:
if random.uniform(0.0, 1.0) < test_perc:
test.append(line)
else:
train.append(line)
return train, test
def make_generators(inputs, limit=None, batch_size=64):
'''
load the job spec from the csv and create some generator for training
'''
#get the image/steering pairs from the csv files
lines = get_files(inputs)
print("found %d files" % len(lines))
if limit is not None:
lines = lines[:limit]
print("limiting to %d files" % len(lines))
train_samples, validation_samples = train_test_split(lines, test_perc=0.2)
print("num train/val", len(train_samples), len(validation_samples))
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
n_train = len(train_samples)
n_val = len(validation_samples)
return train_generator, validation_generator, n_train, n_val
def go(model_name, epochs=50, inputs='./log/*.jpg', limit=None):
print('working on model', model_name)
'''
modify config.json to select the model to train.
'''
model = models.get_nvidia_model(conf.num_outputs)
'''
display layer summary and weights info
'''
#models.show_model_summary(model)
callbacks = [
keras.callbacks.EarlyStopping(monitor='val_loss', patience=conf.training_patience, verbose=0),
keras.callbacks.ModelCheckpoint(model_name, monitor='val_loss', save_best_only=True, verbose=0),
]
batch_size = conf.training_batch_size
#Train on session images
train_generator, validation_generator, n_train, n_val = make_generators(inputs, limit=limit, batch_size=batch_size)
if n_train == 0:
print('no training data found')
return
steps_per_epoch = n_train // batch_size
validation_steps = n_val // batch_size
print("steps_per_epoch", steps_per_epoch, "validation_steps", validation_steps)
history = model.fit_generator(train_generator,
steps_per_epoch = steps_per_epoch,
validation_data = validation_generator,
validation_steps = validation_steps,
epochs=epochs,
verbose=1,
callbacks=callbacks)
try:
if do_plot:
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('loss.png')
except:
print("problems with loss graph")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='train script')
parser.add_argument('--model', type=str, help='model name')
parser.add_argument('--epochs', type=int, default=conf.training_default_epochs, help='number of epochs')
parser.add_argument('--inputs', default='../dataset/log/*.jpg', help='input mask to gather images')
parser.add_argument('--limit', type=int, default=None, help='max number of images to train with')
args = parser.parse_args()
go(args.model, epochs=args.epochs, limit=args.limit, inputs=args.inputs)
#python train.py ..\outputs\mymodel_aug_90_x4_e200 --epochs=200
| {
"content_hash": "5e4b86de1d4dc55cc0536eca02fd4e1c",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 119,
"avg_line_length": 31.647302904564317,
"alnum_prop": 0.609545037367248,
"repo_name": "tawnkramer/sdsandbox",
"id": "b14b5f87ce181b4b29c2a7f35038a00fa1e605a8",
"size": "7627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/train.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C#",
"bytes": "1117254"
},
{
"name": "HLSL",
"bytes": "216130"
},
{
"name": "OpenSCAD",
"bytes": "357"
},
{
"name": "Python",
"bytes": "33800"
},
{
"name": "ShaderLab",
"bytes": "182929"
}
],
"symlink_target": ""
} |
import operator
import numpy as np
import pytest
from pandas._libs.tslibs import (
IncompatibleFrequency,
Period,
Timestamp,
to_offset,
)
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
period_range,
)
import pandas._testing as tm
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
from pandas.tests.arithmetic.common import assert_invalid_comparison
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = (
box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
)
pi = period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"scalar", ["foo", Timestamp.now(), Timedelta(days=4), 9, 9.5]
)
def test_compare_invalid_scalar(self, box_with_array, scalar):
# comparison with scalar that cannot be interpreted as a Period
pi = period_range("2000", periods=4)
parr = tm.box_expected(pi, box_with_array)
assert_invalid_comparison(parr, scalar, box_with_array)
@pytest.mark.parametrize(
"other",
[
pd.date_range("2000", periods=4).array,
pd.timedelta_range("1D", periods=4).array,
np.arange(4),
np.arange(4).astype(np.float64),
list(range(4)),
],
)
def test_compare_invalid_listlike(self, box_with_array, other):
pi = period_range("2000", periods=4)
parr = tm.box_expected(pi, box_with_array)
assert_invalid_comparison(parr, other, box_with_array)
@pytest.mark.parametrize("other_box", [list, np.array, lambda x: x.astype(object)])
def test_compare_object_dtype(self, box_with_array, other_box):
pi = period_range("2000", periods=5)
parr = tm.box_expected(pi, box_with_array)
xbox = np.ndarray if box_with_array in [pd.Index, pd.array] else box_with_array
other = other_box(pi)
expected = np.array([True, True, True, True, True])
expected = tm.box_expected(expected, xbox)
result = parr == other
tm.assert_equal(result, expected)
result = parr <= other
tm.assert_equal(result, expected)
result = parr >= other
tm.assert_equal(result, expected)
result = parr != other
tm.assert_equal(result, ~expected)
result = parr < other
tm.assert_equal(result, ~expected)
result = parr > other
tm.assert_equal(result, ~expected)
other = other_box(pi[::-1])
expected = np.array([False, False, True, False, False])
expected = tm.box_expected(expected, xbox)
result = parr == other
tm.assert_equal(result, expected)
expected = np.array([True, True, True, False, False])
expected = tm.box_expected(expected, xbox)
result = parr <= other
tm.assert_equal(result, expected)
expected = np.array([False, False, True, True, True])
expected = tm.box_expected(expected, xbox)
result = parr >= other
tm.assert_equal(result, expected)
expected = np.array([True, True, False, True, True])
expected = tm.box_expected(expected, xbox)
result = parr != other
tm.assert_equal(result, expected)
expected = np.array([True, True, False, False, False])
expected = tm.box_expected(expected, xbox)
result = parr < other
tm.assert_equal(result, expected)
expected = np.array([False, False, False, True, True])
expected = tm.box_expected(expected, xbox)
result = parr > other
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", Period("2017", freq="D")])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
2017,
[2017, 2017, 2017],
np.array([2017, 2017, 2017]),
np.array([2017, 2017, 2017], dtype=object),
pd.Index([2017, 2017, 2017]),
],
)
def test_eq_integer_disallowed(self, other):
# match Period semantics by not treating integers as Periods
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([False, False, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
msg = "|".join(
[
"not supported between instances of 'Period' and 'int'",
r"Invalid comparison between dtype=period\[D\] and ",
]
)
with pytest.raises(TypeError, match=msg):
idx < other
with pytest.raises(TypeError, match=msg):
idx > other
with pytest.raises(TypeError, match=msg):
idx <= other
with pytest.raises(TypeError, match=msg):
idx >= other
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = (
box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
)
pi = period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array in [pd.Index, pd.array] else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array in [pd.Index, pd.array] else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = rf"Invalid comparison between dtype=period\[{freq}\] and Period"
with pytest.raises(TypeError, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(TypeError, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = r"Invalid comparison between dtype=period\[A-DEC\] and PeriodArray"
idx_msg = rev_msg if box_with_array in [tm.to_array, pd.array] else msg
with pytest.raises(TypeError, match=idx_msg):
base <= idx
# Different frequency
msg = rf"Invalid comparison between dtype=period\[{freq}\] and Period"
with pytest.raises(TypeError, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(TypeError, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Invalid comparison between dtype=period\[4M\] and PeriodArray"
idx_msg = rev_msg if box_with_array in [tm.to_array, pd.array] else msg
with pytest.raises(TypeError, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = rf"Invalid comparison between dtype=period\[{freq}\] and PeriodArray"
with pytest.raises(TypeError, match=msg):
idx1 > diff
result = idx1 == diff
expected = np.array([False, False, False, False], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = PeriodIndex([Period("2011-01-01"), pd.NaT, Period("2011-01-03")])
right = PeriodIndex([pd.NaT, pd.NaT, Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
"""Test PeriodIndex and Period Series Ops consistency"""
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = Series(values)
result = func(s)
exp = Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: x > Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool_)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: x < Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool_)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [Period("2015-01", freq="M"), Period("2015-02", freq="M")],
"B": [Period("2014-01", freq="M"), Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [Period("2015-05", freq="M"), Period("2015-06", freq="M")],
"B": [Period("2015-05", freq="M"), Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = period_range("1/1/2000", freq="D", periods=5)
other = period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
msg = r"unsupported operand type\(s\) for \+: .* and .*"
with pytest.raises(TypeError, match=msg):
rng + other
with pytest.raises(TypeError, match=msg):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = period_range("1/1/2000", freq="D", periods=5)
other = period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = period_range("1/1/2000", freq="D", periods=5)
other = period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
msg = r"Input has different freq=[HD] from PeriodArray\(freq=[DH]\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = PeriodIndex([p1_d], freq=tick_classes(n))
p2 = PeriodIndex([p2_d], freq=tick_classes(n))
expected = PeriodIndex([p2_d], freq=p2.freq.base) - PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = PeriodIndex([p1_d], freq=freq)
p2 = PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = PeriodIndex([p2_d], freq=freq.base) - PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
msg = (
r"unsupported operand type\(s\) for [+-]: .* and .*|"
"Concatenation operation is not implemented for NumPy arrays"
)
with pytest.raises(TypeError, match=msg):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
# datetime scalars
Timestamp.now(),
Timestamp.now().to_pydatetime(),
Timestamp.now().to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
pd.date_range("2016-01-01", periods=3, freq="S")._data,
pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,
# Miscellaneous invalid types
],
)
def test_parr_add_sub_invalid(self, other, box_with_array):
# GH#23215
rng = period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
msg = (
r"(:?cannot add PeriodArray and .*)"
r"|(:?cannot subtract .* from (:?a\s)?.*)"
r"|(:?unsupported operand type\(s\) for \+: .* and .*)"
)
with pytest.raises(TypeError, match=msg):
rng + other
with pytest.raises(TypeError, match=msg):
other + rng
with pytest.raises(TypeError, match=msg):
rng - other
with pytest.raises(TypeError, match=msg):
other - rng
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = period_range("1/1/2000", freq="Q", periods=3)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
msg = r"Cannot add or subtract timedelta64\[ns\] dtype from period\[Q-DEC\]"
with pytest.raises(TypeError, match=msg):
rng + tdarr
with pytest.raises(TypeError, match=msg):
tdarr + rng
with pytest.raises(TypeError, match=msg):
rng - tdarr
msg = r"cannot subtract PeriodArray from timedelta64\[ns\]"
with pytest.raises(TypeError, match=msg):
tdarr - rng
def test_pi_add_sub_td64_array_tick(self):
# PeriodIndex + Timedelta-like is allowed only with
# tick-like frequencies
rng = period_range("1/1/2000", freq="90D", periods=3)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = period_range("12/31/1999", freq="90D", periods=3)
result = rng + tdi
tm.assert_index_equal(result, expected)
result = rng + tdarr
tm.assert_index_equal(result, expected)
result = tdi + rng
tm.assert_index_equal(result, expected)
result = tdarr + rng
tm.assert_index_equal(result, expected)
expected = period_range("1/2/2000", freq="90D", periods=3)
result = rng - tdi
tm.assert_index_equal(result, expected)
result = rng - tdarr
tm.assert_index_equal(result, expected)
msg = r"cannot subtract .* from .*"
with pytest.raises(TypeError, match=msg):
tdarr - rng
with pytest.raises(TypeError, match=msg):
tdi - rng
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("tdi_freq", [None, "H"])
def test_parr_sub_td64array(self, box_with_array, tdi_freq, pi_freq):
box = box_with_array
xbox = box if box not in [pd.array, tm.to_array] else pd.Index
tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq)
dti = Timestamp("2018-03-07 17:16:40") + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
td64obj = tm.box_expected(tdi, box)
if pi_freq == "H":
result = pi - td64obj
expected = (pi.to_timestamp("S") - tdi).to_period(pi_freq)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
# Subtract from scalar
result = pi[0] - td64obj
expected = (pi[0].to_timestamp("S") - tdi).to_period(pi_freq)
expected = tm.box_expected(expected, box)
tm.assert_equal(result, expected)
elif pi_freq == "D":
# Tick, but non-compatible
msg = "Input has different freq=None from PeriodArray"
with pytest.raises(IncompatibleFrequency, match=msg):
pi - td64obj
with pytest.raises(IncompatibleFrequency, match=msg):
pi[0] - td64obj
else:
# With non-Tick freq, we could not add timedelta64 array regardless
# of what its resolution is
msg = "Cannot add or subtract timedelta64"
with pytest.raises(TypeError, match=msg):
pi - td64obj
with pytest.raises(TypeError, match=msg):
pi[0] - td64obj
# -----------------------------------------------------------------
# operations with array/Index of DateOffset objects
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_add_offset_array(self, box):
# GH#18849
pi = PeriodIndex([Period("2015Q1"), Period("2016Q2")])
offs = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = PeriodIndex([Period("2015Q2"), Period("2015Q4")])
with tm.assert_produces_warning(PerformanceWarning):
res = pi + offs
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = offs + pi
tm.assert_index_equal(res2, expected)
unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
# addition/subtraction ops with incompatible offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
msg = r"Input cannot be converted to Period\(freq=Q-DEC\)"
with pytest.raises(IncompatibleFrequency, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
pi + unanchored
with pytest.raises(IncompatibleFrequency, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
unanchored + pi
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_sub_offset_array(self, box):
# GH#18824
pi = PeriodIndex([Period("2015Q1"), Period("2016Q2")])
other = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])
with tm.assert_produces_warning(PerformanceWarning):
res = pi - other
tm.assert_index_equal(res, expected)
anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
msg = r"Input has different freq=-1M from Period\(freq=Q-DEC\)"
with pytest.raises(IncompatibleFrequency, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
pi - anchored
with pytest.raises(IncompatibleFrequency, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
anchored - pi
def test_pi_add_iadd_int(self, one):
# Variants of `one` for #19012
rng = period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng + one
expected = period_range("2000-01-01 10:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_int(self, one):
"""
PeriodIndex.__sub__ and __isub__ with several representations of
the integer 1, e.g. int, np.int64, np.uint8, ...
"""
rng = period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng - one
expected = period_range("2000-01-01 08:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng -= one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)])
def test_pi_sub_intlike(self, five):
rng = period_range("2007-01", periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_pi_sub_isub_offset(self):
# offset
# DateOffset
rng = period_range("2014", "2024", freq="A")
result = rng - pd.offsets.YearEnd(5)
expected = period_range("2009", "2019", freq="A")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
rng = period_range("2014-01", "2016-12", freq="M")
result = rng - pd.offsets.MonthEnd(5)
expected = period_range("2013-08", "2016-07", freq="M")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize("transpose", [True, False])
def test_pi_add_offset_n_gt1(self, box_with_array, transpose):
# GH#23215
# add offset to PeriodIndex with freq.n > 1
per = Period("2016-01", freq="2M")
pi = PeriodIndex([per])
expected = PeriodIndex(["2016-03"], freq="2M")
pi = tm.box_expected(pi, box_with_array, transpose=transpose)
expected = tm.box_expected(expected, box_with_array, transpose=transpose)
result = pi + per.freq
tm.assert_equal(result, expected)
result = per.freq + pi
tm.assert_equal(result, expected)
def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array):
# GH#23215
# PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0
pi = PeriodIndex(["2016-01"], freq="2M")
expected = PeriodIndex(["2016-04"], freq="2M")
pi = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = pi + to_offset("3M")
tm.assert_equal(result, expected)
result = to_offset("3M") + pi
tm.assert_equal(result, expected)
# ---------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_pi_add_intarray(self, int_holder, op):
# GH#19959
pi = PeriodIndex([Period("2015Q1"), Period("NaT")])
other = int_holder([4, -1])
result = op(pi, other)
expected = PeriodIndex([Period("2016Q1"), Period("NaT")])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_pi_sub_intarray(self, int_holder):
# GH#19959
pi = PeriodIndex([Period("2015Q1"), Period("NaT")])
other = int_holder([4, -1])
result = pi - other
expected = PeriodIndex([Period("2014Q1"), Period("NaT")])
tm.assert_index_equal(result, expected)
msg = r"bad operand type for unary -: 'PeriodArray'"
with pytest.raises(TypeError, match=msg):
other - pi
# ---------------------------------------------------------------
# Timedelta-like (timedelta, timedelta64, Timedelta, Tick)
# TODO: Some of these are misnomers because of non-Tick DateOffsets
def test_pi_add_timedeltalike_minute_gt1(self, three_days):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# minute frequency with n != 1. A more general case is tested below
# in test_pi_add_timedeltalike_tick_gt1, but here we write out the
# expected result more explicitly.
other = three_days
rng = period_range("2014-05-01", periods=3, freq="2D")
expected = PeriodIndex(["2014-05-04", "2014-05-06", "2014-05-08"], freq="2D")
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = PeriodIndex(["2014-04-28", "2014-04-30", "2014-05-02"], freq="2D")
result = rng - other
tm.assert_index_equal(result, expected)
msg = (
r"(:?bad operand type for unary -: 'PeriodArray')"
r"|(:?cannot subtract PeriodArray from timedelta64\[[hD]\])"
)
with pytest.raises(TypeError, match=msg):
other - rng
@pytest.mark.parametrize("freqstr", ["5ns", "5us", "5ms", "5s", "5T", "5h", "5d"])
def test_pi_add_timedeltalike_tick_gt1(self, three_days, freqstr):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# tick-like frequency with n != 1
other = three_days
rng = period_range("2014-05-01", periods=6, freq=freqstr)
expected = period_range(rng[0] + other, periods=6, freq=freqstr)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = period_range(rng[0] - other, periods=6, freq=freqstr)
result = rng - other
tm.assert_index_equal(result, expected)
msg = (
r"(:?bad operand type for unary -: 'PeriodArray')"
r"|(:?cannot subtract PeriodArray from timedelta64\[[hD]\])"
)
with pytest.raises(TypeError, match=msg):
other - rng
def test_pi_add_iadd_timedeltalike_daily(self, three_days):
# Tick
other = three_days
rng = period_range("2014-05-01", "2014-05-15", freq="D")
expected = period_range("2014-05-04", "2014-05-18", freq="D")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_timedeltalike_daily(self, three_days):
# Tick-like 3 Days
other = three_days
rng = period_range("2014-05-01", "2014-05-15", freq="D")
expected = period_range("2014-04-28", "2014-05-12", freq="D")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_daily(self, not_daily):
other = not_daily
rng = period_range("2014-05-01", "2014-05-15", freq="D")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=D\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = period_range("2014-01-01 12:00", "2014-01-05 12:00", freq="H")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_add_timedeltalike_mismatched_freq_hourly(self, not_hourly):
other = not_hourly
rng = period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=H\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
def test_pi_sub_isub_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = period_range("2014-01-01 08:00", "2014-01-05 08:00", freq="H")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_add_iadd_timedeltalike_annual(self):
# offset
# DateOffset
rng = period_range("2014", "2024", freq="A")
result = rng + pd.offsets.YearEnd(5)
expected = period_range("2019", "2029", freq="A")
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_annual(self, mismatched_freq):
other = mismatched_freq
rng = period_range("2014", "2024", freq="A")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_M(self):
rng = period_range("2014-01", "2016-12", freq="M")
expected = period_range("2014-06", "2017-05", freq="M")
result = rng + pd.offsets.MonthEnd(5)
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_monthly(self, mismatched_freq):
other = mismatched_freq
rng = period_range("2014-01", "2016-12", freq="M")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=M\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
@pytest.mark.parametrize("transpose", [True, False])
def test_parr_add_sub_td64_nat(self, box_with_array, transpose):
# GH#23320 special handling for timedelta64("NaT")
pi = period_range("1994-04-01", periods=9, freq="19D")
other = np.timedelta64("NaT")
expected = PeriodIndex(["NaT"] * 9, freq="19D")
obj = tm.box_expected(pi, box_with_array, transpose=transpose)
expected = tm.box_expected(expected, box_with_array, transpose=transpose)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = r"cannot subtract .* from .*"
with pytest.raises(TypeError, match=msg):
other - obj
@pytest.mark.parametrize(
"other",
[
np.array(["NaT"] * 9, dtype="m8[ns]"),
TimedeltaArray._from_sequence(["NaT"] * 9),
],
)
def test_parr_add_sub_tdt64_nat_array(self, box_with_array, other):
pi = period_range("1994-04-01", periods=9, freq="19D")
expected = PeriodIndex(["NaT"] * 9, freq="19D")
obj = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = r"cannot subtract .* from .*"
with pytest.raises(TypeError, match=msg):
other - obj
# ---------------------------------------------------------------
# Unsorted
def test_parr_add_sub_index(self):
# Check that PeriodArray defers to Index on arithmetic ops
pi = period_range("2000-12-31", periods=3)
parr = pi.array
result = parr - pi
expected = pi - pi
tm.assert_index_equal(result, expected)
def test_parr_add_sub_object_array(self):
pi = period_range("2000-12-31", periods=3, freq="D")
parr = pi.array
other = np.array([Timedelta(days=1), pd.offsets.Day(2), 3])
with tm.assert_produces_warning(PerformanceWarning):
result = parr + other
expected = PeriodIndex(
["2001-01-01", "2001-01-03", "2001-01-05"], freq="D"
).array
tm.assert_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = parr - other
expected = PeriodIndex(["2000-12-30"] * 3, freq="D").array
tm.assert_equal(result, expected)
class TestPeriodSeriesArithmetic:
def test_ops_series_timedelta(self):
# GH#13043
ser = Series(
[Period("2015-01-01", freq="D"), Period("2015-01-02", freq="D")],
name="xxx",
)
assert ser.dtype == "Period[D]"
expected = Series(
[Period("2015-01-02", freq="D"), Period("2015-01-03", freq="D")],
name="xxx",
)
result = ser + Timedelta("1 days")
tm.assert_series_equal(result, expected)
result = Timedelta("1 days") + ser
tm.assert_series_equal(result, expected)
result = ser + pd.tseries.offsets.Day()
tm.assert_series_equal(result, expected)
result = pd.tseries.offsets.Day() + ser
tm.assert_series_equal(result, expected)
def test_ops_series_period(self):
# GH#13043
ser = Series(
[Period("2015-01-01", freq="D"), Period("2015-01-02", freq="D")],
name="xxx",
)
assert ser.dtype == "Period[D]"
per = Period("2015-01-10", freq="D")
off = per.freq
# dtype will be object because of original dtype
expected = Series([9 * off, 8 * off], name="xxx", dtype=object)
tm.assert_series_equal(per - ser, expected)
tm.assert_series_equal(ser - per, -1 * expected)
s2 = Series(
[Period("2015-01-05", freq="D"), Period("2015-01-04", freq="D")],
name="xxx",
)
assert s2.dtype == "Period[D]"
expected = Series([4 * off, 2 * off], name="xxx", dtype=object)
tm.assert_series_equal(s2 - ser, expected)
tm.assert_series_equal(ser - s2, -1 * expected)
class TestPeriodIndexSeriesMethods:
"""Test PeriodIndex and Period Series Ops consistency"""
def _check(self, values, func, expected):
idx = PeriodIndex(values)
result = func(idx)
tm.assert_equal(result, expected)
ser = Series(values)
result = func(ser)
exp = Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
expected = PeriodIndex(
["2011-03", "2011-04", "2011-05", "2011-06"], freq="M", name="idx"
)
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period("2011-01", freq="M")
off = idx.freq
exp = pd.Index([0 * off, 1 * off, 2 * off, 3 * off], name="idx")
tm.assert_index_equal(result, exp)
result = Period("2011-01", freq="M") - idx
exp = pd.Index([0 * off, -1 * off, -2 * off, -3 * off], name="idx")
tm.assert_index_equal(result, exp)
@pytest.mark.parametrize("ng", ["str", 1.5])
@pytest.mark.parametrize(
"func",
[
lambda obj, ng: obj + ng,
lambda obj, ng: ng + obj,
lambda obj, ng: obj - ng,
lambda obj, ng: ng - obj,
lambda obj, ng: np.add(obj, ng),
lambda obj, ng: np.add(ng, obj),
lambda obj, ng: np.subtract(obj, ng),
lambda obj, ng: np.subtract(ng, obj),
],
)
def test_parr_ops_errors(self, ng, func, box_with_array):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
obj = tm.box_expected(idx, box_with_array)
msg = (
r"unsupported operand type\(s\)|can only concatenate|"
r"must be str|object to str implicitly"
)
with pytest.raises(TypeError, match=msg):
func(obj, ng)
def test_pi_ops_nat(self):
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"
)
expected = PeriodIndex(
["2011-03", "2011-04", "NaT", "2011-06"], freq="M", name="idx"
)
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx, lambda x: np.add(x, 2), expected)
self._check(idx + 2, lambda x: x - 2, idx)
self._check(idx + 2, lambda x: np.subtract(x, 2), idx)
# freq with mult
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="2M", name="idx"
)
expected = PeriodIndex(
["2011-07", "2011-08", "NaT", "2011-10"], freq="2M", name="idx"
)
self._check(idx, lambda x: x + 3, expected)
self._check(idx, lambda x: 3 + x, expected)
self._check(idx, lambda x: np.add(x, 3), expected)
self._check(idx + 3, lambda x: x - 3, idx)
self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
def test_pi_ops_array_int(self):
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"
)
f = lambda x: x + np.array([1, 2, 3, 4])
exp = PeriodIndex(
["2011-02", "2011-04", "NaT", "2011-08"], freq="M", name="idx"
)
self._check(idx, f, exp)
f = lambda x: np.add(x, np.array([4, -1, 1, 2]))
exp = PeriodIndex(
["2011-05", "2011-01", "NaT", "2011-06"], freq="M", name="idx"
)
self._check(idx, f, exp)
f = lambda x: x - np.array([1, 2, 3, 4])
exp = PeriodIndex(
["2010-12", "2010-12", "NaT", "2010-12"], freq="M", name="idx"
)
self._check(idx, f, exp)
f = lambda x: np.subtract(x, np.array([3, 2, 3, -2]))
exp = PeriodIndex(
["2010-10", "2010-12", "NaT", "2011-06"], freq="M", name="idx"
)
self._check(idx, f, exp)
def test_pi_ops_offset(self):
idx = PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01", "2011-04-01"],
freq="D",
name="idx",
)
f = lambda x: x + pd.offsets.Day()
exp = PeriodIndex(
["2011-01-02", "2011-02-02", "2011-03-02", "2011-04-02"],
freq="D",
name="idx",
)
self._check(idx, f, exp)
f = lambda x: x + pd.offsets.Day(2)
exp = PeriodIndex(
["2011-01-03", "2011-02-03", "2011-03-03", "2011-04-03"],
freq="D",
name="idx",
)
self._check(idx, f, exp)
f = lambda x: x - pd.offsets.Day(2)
exp = PeriodIndex(
["2010-12-30", "2011-01-30", "2011-02-27", "2011-03-30"],
freq="D",
name="idx",
)
self._check(idx, f, exp)
def test_pi_offset_errors(self):
idx = PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01", "2011-04-01"],
freq="D",
name="idx",
)
ser = Series(idx)
# Series op is applied per Period instance, thus error is raised
# from Period
for obj in [idx, ser]:
msg = r"Input has different freq=2H from Period.*?\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
obj + pd.offsets.Hour(2)
with pytest.raises(IncompatibleFrequency, match=msg):
pd.offsets.Hour(2) + obj
msg = r"Input has different freq=-2H from Period.*?\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
obj - pd.offsets.Hour(2)
def test_pi_sub_period(self):
# GH#13071
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
result = idx - Period("2012-01", freq="M")
off = idx.freq
exp = pd.Index([-12 * off, -11 * off, -10 * off, -9 * off], name="idx")
tm.assert_index_equal(result, exp)
result = np.subtract(idx, Period("2012-01", freq="M"))
tm.assert_index_equal(result, exp)
result = Period("2012-01", freq="M") - idx
exp = pd.Index([12 * off, 11 * off, 10 * off, 9 * off], name="idx")
tm.assert_index_equal(result, exp)
result = np.subtract(Period("2012-01", freq="M"), idx)
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx")
result = idx - Period("NaT", freq="M")
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
result = Period("NaT", freq="M") - idx
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
def test_pi_sub_pdnat(self):
# GH#13071
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"
)
exp = TimedeltaIndex([pd.NaT] * 4, name="idx")
tm.assert_index_equal(pd.NaT - idx, exp)
tm.assert_index_equal(idx - pd.NaT, exp)
def test_pi_sub_period_nat(self):
# GH#13071
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
result = idx - Period("2012-01", freq="M")
off = idx.freq
exp = pd.Index([-12 * off, pd.NaT, -10 * off, -9 * off], name="idx")
tm.assert_index_equal(result, exp)
result = Period("2012-01", freq="M") - idx
exp = pd.Index([12 * off, pd.NaT, 10 * off, 9 * off], name="idx")
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx")
tm.assert_index_equal(idx - Period("NaT", freq="M"), exp)
tm.assert_index_equal(Period("NaT", freq="M") - idx, exp)
@pytest.mark.parametrize("scalars", ["a", False, 1, 1.0, None])
def test_comparison_operations(self, scalars):
# GH 28980
expected = Series([False, False])
s = Series([Period("2019"), Period("2020")], dtype="period[A-DEC]")
result = s == scalars
tm.assert_series_equal(result, expected)
| {
"content_hash": "b4afa6bfa0c5ec868deba038321731fc",
"timestamp": "",
"source": "github",
"line_count": 1544,
"max_line_length": 88,
"avg_line_length": 36.31541450777202,
"alnum_prop": 0.5568118991992296,
"repo_name": "gfyoung/pandas",
"id": "5f93442cae4f6f4314f9d7a95df91622586d15d2",
"size": "56197",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandas/tests/arithmetic/test_period.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4912"
},
{
"name": "C",
"bytes": "404689"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14336547"
},
{
"name": "Shell",
"bytes": "29174"
},
{
"name": "Smarty",
"bytes": "2069"
}
],
"symlink_target": ""
} |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="sci_instr",
version="0.0.1",
author="Tim Hellwig",
author_email="tim.hellwig@gmail.com",
description="VISA communication with scientific instruments",
url="https://github.com/timhellwig/sci_instr",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) | {
"content_hash": "7f579ba7b29b88f6e2f2c2f49b6730d3",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 65,
"avg_line_length": 28.63157894736842,
"alnum_prop": 0.6452205882352942,
"repo_name": "timhellwig/sci_instr",
"id": "e22770af0f719894c9f2625113bddab8a9b74072",
"size": "544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13252"
}
],
"symlink_target": ""
} |
import os
import sys
from db_pool import *
import re
import datetime
import subprocess
from PackageControl.PackageController import *
from MetaControl.MetaController import *
from Sample import *
from Launcher import *
import csv
import json
import string
import random
import re
import logging
import time
import traceback
def get_eset_sig_from_scan(scans):
if scans is None:
return None
for scan in scans:
if scan.get('name') in ['ESET-NOD32', 'NOD32', 'NOD32v2']:
return scan.get('result')
return ''
def is_sha1(maybe_sha):
if len(maybe_sha) != 40:
return False
try:
sha_int = int(maybe_sha, 16)
except ValueError:
return False
return True
def valid_hash(some_hash):
if some_hash is None:
return None
length = len(some_hash)
if length not in [32, 40, 64, 128]:
return False
try:
sha_int = int(some_hash, 16)
except ValueError:
return False
return True
def number_of_jobs_on_queue(queue_name):
w = re.match("^[a-z_]+$", queue_name)
if(w is None):
raise ValueError("invalid queue_name")
command = ["bash", "-c", "rq info --url redis://" + envget('redis.host') + ":" + str(envget(
'redis.port')) + " --raw | grep -E -e \"^queue " + queue_name + " [0-9]+$\" | sed \"s/queue " + queue_name + " //g\" | tr -d \"\\n\""]
output = call_with_output(command)
if(type(output) == str and len(output) == 0):
return 0
logging.debug("output=" + str(output))
if output == '':
return 0
try:
return int(output)
except ValueError, e:
logging.exception("ValueError in int(output): " + str(e))
return 0
def is_iterable(s):
try:
iter(s)
except TypeError:
return False
return True
def is_printable(str_value):
for i in str_value:
if i not in string.printable:
return False
return True
def str_to_hex(str_value):
if str_value is not None:
return " ".join("{:02x}".format(ord(c)).upper() for c in str_value)
else:
return ""
def replace_non_printable_with_dot(str_value):
str_list = list(str_value)
for index, char in enumerate(str_list):
if(not is_printable(char)):
str_list[index] = u"."
return "".join(str_list)
def display_with_hex(str_value):
if str_value is not None:
cleaned_str = replace_non_printable_with_dot(str_value)
return str(str_to_hex(str_value)) + " [" + str(cleaned_str) + "]"
else:
return ""
def clean_tree(s):
if type(s) == dict:
for child in s.keys():
s[child] = clean_tree(s[child])
elif type(s) == list:
for index, value in enumerate(s):
s[index] = clean_tree(value)
elif type(s) == str or type(s) == unicode:
if(not is_printable(s)):
return display_with_hex(s)
else:
return s
elif isinstance(s, (int, long, float)):
if isinstance(s, (int, long)):
return str(s) + " (" + str(hex(s)) + ")"
else:
return s
elif isinstance(s, datetime.datetime):
return str(s)
elif s is None:
return s
else:
if(is_iterable(s) and not is_printable(s)):
return display_with_hex(s)
else:
return str(s)
return s
# This function recives a dictionary like
# {"key1": { "something": 1},
# "key2": { "something": 2}}
# and returns
# [ {"name": "key1", "something": 1 },
# {"name": "key2", "something": 2 }]
# This is useful for converting the format
# VirusTotal API sends the AV scans into
# something easily searchable by mongo.
def key_dict_clean(json):
if json is None:
return None
array = []
for key in json.keys():
tmp_dict = json.get(key)
tmp_dict["name"] = key
array.append(tmp_dict)
return array
# Replace dot with _
# in dictionaries keys
# in order to save them in mongo
def rec_key_replace(obj):
if isinstance(obj, dict):
return {key.replace('.', '_'): rec_key_replace(val) for key, val in obj.items()}
return obj
# This functions recieves a dictionary like
# {"key1": ["something1", "something2"],
# "key2": ["something1", "something2"]}
# and returns
# [ {"name": "key1", "values": ["something1, something2"]},
# {"name": "key2", "values": ["something1, something2"]} ]
# This is useful for converting the format
# VirusTotal API sends the imports into
# something easily searchable by mongo.
def key_list_clean(json):
if json is None:
return None
array = []
for key in json.keys():
tmp_dict = {}
tmp_dict["name"] = key
tmp_dict["values"] = json.get(key)
array.append(tmp_dict)
return array
def to_bool(string):
if string is None:
return False
string = string.strip().lower()
if string == "false" or string is None:
return False
else:
return bool(string)
def jsonize(data):
return json.dumps(data, sort_keys=False, indent=4)
# Checks if the meta has a date. If it doesn't
# it updates it. If a date is found, the oldest
# date will get saved.
def update_date(file_id, date):
if file_id is None or date is None:
return
mdc = MetaController()
res = mdc.save_first_seen(file_id, date)
def log_event(event, file_hash, comments=""):
with open("logs.csv", "a+") as logfile:
csv_writer = csv.writer(logfile)
csv_writer.writerow([str(datetime.datetime.now().isoformat()), str(
event), str(file_hash), str(comments)])
def change_date_to_str(res):
if res is None:
return None
for date_key in ["date", "upload_date", "date_start", "date_end", "date_enqueued"]:
if res.get(date_key) is None:
pass
else:
res[date_key] = str(res.get(date_key))
return res
def process_file(file_hash, force=False):
if not is_sha1(file_hash):
raise ValueError("process_file only accepts sha1")
logging.debug("process_file(" + str(file_hash) + ")")
pc = PackageController()
res = pc.getFile(file_hash)
if res is None:
logging.warning(
"Error: process_file(" + str(file_hash) + "): pc.getFile returned None")
return None
sam_id = file_hash
sample = Sample()
sample.setID(sam_id)
sample.setBinary(res)
if force:
sample.setStorageVersion({})
lc = Launcher()
lc.launchAnalysisByID(sample)
log_event("process", str(file_hash))
return 0
def execute(comand):
process = os.popen(comand)
preprocessed = process.read()
process.close()
# print(preprocessed)
return preprocessed
def recursive_read(object):
files = []
if os.path.isdir(object):
for root, dirs, filenames in os.walk(object):
for name in filenames:
files.append(os.path.join(root, name))
elif os.path.isfile(object):
files.append(object)
else:
logging.error("You must supply a file or directory!")
return None
return files
def call_with_output(array):
p = subprocess.Popen(array, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate()
return output
# strip and change to lowercase
def clean_hash(hash):
if hash is None or hash == "":
return None
else:
hash = hash.strip().lower()
m = re.search('[a-f0-9]+', hash)
if m is None:
return ""
else:
return m.group(0)
# clean hashes and search in the meta collection.
# returns file_id if it was found. None if it was not.
def get_file_id(hash):
hash = clean_hash(hash)
if not valid_hash(hash):
return None
if len(hash) == 32:
ret = search_by_hash_and_type(hash, "md5")
elif len(hash) == 40:
ret = search_by_hash_and_type(hash, "sha1")
elif len(hash) == 64:
ret = search_by_hash_and_type(hash, "sha2")
if ret is not None and ret[0] is not None:
return ret[0].get('hash').get('sha1')
else:
return None
# Given a hash and a type (md5 or file_id (sha1))
# it will search in meta collection.
def search_by_hash_and_type(hash, type):
if type is not "md5" and type is not "sha1" and type is not "sha2":
raise ValueError("type is not valid. (search_by_hash_and_type)")
return None
search = {'$and': [{'hash.' + type: hash}]}
retrieve = {'hash.sha1': 1}
coll_meta = db.meta_container
f1 = coll_meta.find(search, retrieve).limit(1)
if f1.count() == 0:
return None
else:
return f1
# Check the format of
# a textarea hashes.
# (string with \n's)
def check_hashes(hashes):
errors = []
result_hashes = []
for hash_id in hashes.split("\n"):
hash_id_cleaned = clean_hash(hash_id)
if hash_id is None or len(hash_id.strip()) == 0:
continue
if(not valid_hash(hash_id_cleaned)):
errors.append(
{"error": 5, "error_message": "invalid_hash: " + str(hash_id.strip())})
else:
result_hashes.append(hash_id_cleaned)
return {"hashes": result_hashes, "errors": errors}
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def add_error(resp_dict, error_code, error_message):
if type(resp_dict) != dict:
return resp_dict
if resp_dict.get('errors') is None:
resp_dict["errors"] = []
resp_dict["errors"].append({"code": error_code, "message": error_message})
return resp_dict
def cursor_to_dict(f1, retrieve):
results = []
for f in f1:
results.append(f)
ret = []
for a in results:
dic = {}
for key in retrieve.keys():
steps = key.split('.')
partial_res = a
for step in steps:
partial_res = partial_res.get(step)
if partial_res is None:
break
if isinstance(partial_res, list):
partial_res = None
break
legend_to_show = key.split('.')[-1]
if (legend_to_show == "file_id"):
legend_to_show = "sha1"
if (legend_to_show == "TimeDateStamp" and partial_res is not None):
partial_res = time.strftime(
"%Y-%m-%d %H:%M:%S", time.gmtime(int(eval(partial_res), 16)))
if (legend_to_show == "timeDateStamp" and partial_res is not None):
partial_res = time.strftime(
"%Y-%m-%d %H:%M:%S", time.gmtime(partial_res))
dic[legend_to_show] = partial_res
ret.append(dic)
return ret
# ****************TEST_CODE******************
TEST = "-test_Functions"
def testCode():
print("*****ENTROPY*****")
dir = os.getcwd()
file = dir + "//test.exe"
start_time = time.time()
f = open(file, "rb")
data = f.read()
elapsed_read = time.time() - start_time
print("File size: " + str(len(data)))
print("Read time: " + str(elapsed_read) +
" --- Time per byte: " + str(elapsed_read / len(data)))
print("Entropy calculation time: " + str(elapsed_entropy) +
" --- Time per byte: " + str(elapsed_entropy / len(data)))
print("")
print("Read time GB: " + str((1073741824 / len(data)) * elapsed_read))
print("Entropy time GB: " + str((1073741824 / len(data)) * elapsed_entropy))
print("")
print("Entropy file: " + str(res))
file = dir + "//testComp.exe"
f = open(file, "rb")
data = f.read()
res = getEntropy(data)
print("Entropy file paq: " + str(res))
pass
# ***********************TEST***************************
if(len(sys.argv) >= 2):
if(sys.argv[1] == TEST):
try:
print("######## Test of " + str(sys.argv[0]) + " ########")
testCode()
except Exception, e:
print(traceback.format_exc())
raw_input("Press a key...")
| {
"content_hash": "7da7f05aa9cd9d47fa93a1c7b46c1d09",
"timestamp": "",
"source": "github",
"line_count": 447,
"max_line_length": 142,
"avg_line_length": 27.07606263982103,
"alnum_prop": 0.5710154507146988,
"repo_name": "codexgigassys/codex-backend",
"id": "9532b278dc51934c6c30dda43d2f2fac793fe4ce",
"size": "12263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Utils/Functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2944"
},
{
"name": "Python",
"bytes": "292728"
},
{
"name": "Shell",
"bytes": "664"
}
],
"symlink_target": ""
} |
import collections
TestCase = collections.namedtuple('TestCase', ['url', 'description'])
class TestCaseDecorator:
"""An object containing all test cases that make up the test suite for a specific database system.
Exposes its functionality via the decorator pattern.
Attributes:
url_prefix: The path prefix which hosts all test cases for the given
database system.
test_cases: The collected test cases containing the url and pointers to
where injected values are being used in the query.
"""
def __init__(self, url_prefix: str):
if not url_prefix.startswith('/'):
raise ValueError('The url_prefix must start with a /.')
self.url_prefix = url_prefix.rstrip('/')
self.test_cases = list()
def add_test_case(self, relative_url: str, description: str):
"""Adds a testcase to the internal state.
Expands the relative URL with the prefix, matching the routes configured via
the Flask blueprints. Stores the expanded URL together with the description
and returns the identity function as decorator.
Args:
relative_url: the route to the testcase in the blueprint, including query
parameters with default values
description: A description where the provided values will be used in the
query.
Returns:
The identity function
"""
if not relative_url.startswith('/'):
raise ValueError(
'The relative_url provided to the decorator must start with a /.')
self.test_cases.append(
TestCase(self.url_prefix + relative_url, description))
def inner(func):
return func
return inner
def get_test_cases(self):
return self.test_cases
| {
"content_hash": "bd3aa2071bc1cbea72ce8797726d28d5",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 100,
"avg_line_length": 31.81132075471698,
"alnum_prop": 0.697508896797153,
"repo_name": "google/security-testbeds",
"id": "b59a422b154bf321c0a80a96fadc68ce00b5e3fe",
"size": "2363",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "archery_range/sqli/test_case_decorator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7221"
},
{
"name": "Python",
"bytes": "80197"
}
],
"symlink_target": ""
} |
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_snowboydetect', [dirname(__file__)])
except ImportError:
import _snowboydetect
return _snowboydetect
if fp is not None:
try:
_mod = imp.load_module('_snowboydetect', fp, pathname, description)
finally:
fp.close()
return _mod
_snowboydetect = swig_import_helper()
del swig_import_helper
else:
import _snowboydetect
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
class SnowboyDetect(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SnowboyDetect, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SnowboyDetect, name)
__repr__ = _swig_repr
def __init__(self, resource_filename, model_str):
this = _snowboydetect.new_SnowboyDetect(resource_filename, model_str)
try:
self.this.append(this)
except:
self.this = this
def Reset(self):
return _snowboydetect.SnowboyDetect_Reset(self)
def RunDetection(self, *args):
return _snowboydetect.SnowboyDetect_RunDetection(self, *args)
def SetSensitivity(self, sensitivity_str):
return _snowboydetect.SnowboyDetect_SetSensitivity(self, sensitivity_str)
def GetSensitivity(self):
return _snowboydetect.SnowboyDetect_GetSensitivity(self)
def SetAudioGain(self, audio_gain):
return _snowboydetect.SnowboyDetect_SetAudioGain(self, audio_gain)
def UpdateModel(self):
return _snowboydetect.SnowboyDetect_UpdateModel(self)
def NumHotwords(self):
return _snowboydetect.SnowboyDetect_NumHotwords(self)
def SampleRate(self):
return _snowboydetect.SnowboyDetect_SampleRate(self)
def NumChannels(self):
return _snowboydetect.SnowboyDetect_NumChannels(self)
def BitsPerSample(self):
return _snowboydetect.SnowboyDetect_BitsPerSample(self)
__swig_destroy__ = _snowboydetect.delete_SnowboyDetect
__del__ = lambda self: None
SnowboyDetect_swigregister = _snowboydetect.SnowboyDetect_swigregister
SnowboyDetect_swigregister(SnowboyDetect)
# This file is compatible with both classic and new-style classes.
| {
"content_hash": "5e26c62f7f6e2b59db5abb47acb53ff1",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 94,
"avg_line_length": 30.51127819548872,
"alnum_prop": 0.6320847708230656,
"repo_name": "adam-c-fox/smartMirror",
"id": "b9b9b45ca4d14be3215135a6ce12eaf2095d52e2",
"size": "4265",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "snowboy/snowboydetect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "177858"
},
{
"name": "HTML",
"bytes": "6752"
},
{
"name": "JavaScript",
"bytes": "17812"
},
{
"name": "Python",
"bytes": "14649"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from itertools import islice, product
import logging
import MDAnalysis as md
import math
import random
import numpy as np
import pandas as pd
import plotly
import plotly.graph_objs as go
import subprocess
import scipy
import scipy.stats
import string
import time
import settings
class Atom(object):
def __init__(self, identifier, **kwargs):
self.id = identifier
self.type = kwargs.get('type', None)
self.element = kwargs.get('element', None)
self.xyz = kwargs.get('xyz', None)
self.stress = kwargs.get('stress', None)
self.normal = kwargs.get('normal', False)
self.distance = None
self.sin_theta = None
self.cos_theta = None
self.sin_phi = None
self.cos_phi = None
self.spherical_stress = None
self.voro_volume = 0
def calc_spherical_stress(self):
"""
Calculate spherical stress tensor from cartesian one
ref: http://www.brown.edu/Departments/Engineering/Courses/En221/Notes/Polar_Coords/Polar_Coords.htm
"""
xx, yy, zz, xy, xz, yz = self.stress
cart = np.array( [ [xx, xy, xz], [xy, yy, yz], [xz, yz, zz] ] )
# 1 for theta, the angle between xyz and z axis, 2 for phi,
# angle between x axis and the projection on xy-plane
sin1 = self.sin_theta
cos1 = self.cos_theta
sin2 = self.sin_phi
cos2 = self.cos_phi
conv = np.array( [ [sin1*cos2, cos1*cos2, -sin2],
[sin1*sin2, cos1*sin2, -cos2],
[cos1, -sin1, 0], ] )
sphe = np.dot( conv, cart.dot( np.transpose(conv) ) )
# Of format [ [rr, rTheta, rPhi], [rTheta, thetaTheta, thetaPhi], [rPhi, thetaPhi, phiPhi] ]
self.spherical_stress = sphe
class Box(object):
PI = 3.1415926
def __init__(self, timestep=0, radius=None, use_atomic_volume=True, average_on_atom=False, **kwargs):
# Current timestep.
self.timestep = timestep
# Maximum bubble radius in box.
self.radius = radius
self.count = 0
# XYZ boundaries.
self.bx = kwargs.get('bx', None)
self.by = kwargs.get('by', None)
self.bz = kwargs.get('bz', None)
# Bubble center coordinates.
self._center = kwargs.get('center', None)
# All atoms.
self.atoms = []
# Container of atoms for each element.
self._elements = {}
# Container of shell stress for each element.
self._shell_stress = {}
self._shell_stress_r = {}
self._shell_stress_theta = {}
self._shell_stress_phi = { }
# Container of shell atom count for each element.
self.nbins = None
self._shell_atoms = {}
self._shell_atom_objs = []
self._shell_volumes = {}
# Indicator of stats status.
self._stats_finished = False
self._measured = False
# Dump atom coordinates to calculate voro tessellation volume
self.voro_file_name = 'atom_coors'
self.use_atomic_volume = use_atomic_volume
self.average_on_atom = average_on_atom
@property
def measured(self):
"""Returns true if all atoms have a distance (to bubble center)."""
if all([x.distance for x in self.atoms]):
self._measured = True
else:
self._measured = False
return self._measured
@property
def center(self):
return self._center
@center.setter
def center(self, coor):
self._center = coor
self._measured = False
self._stats_finished = False
def combine_water_atoms(self):
"""
Combine H and O together into a new particle
stress = S_h + S_o
coor = center of mass
The sequency of H and O atoms are O H H
"""
self._old_atoms = self.atoms
self.atoms = []
self._old_atoms.sort( key=lambda x: x.id )
water = []
for atom in self._old_atoms:
if atom.element not in ['H', 'O']:
self.atoms.append( atom )
else:
water.append(atom)
if len( water ) == 3:
# need to combine the 3 atoms into 1 now
assert [ _ele.element for _ele in water ] == ['O', 'H', 'H']
new_stress = [a+b+c for a, b, c in zip(water[0].stress, water[1].stress, water[2].stress)]
new_volume = sum( _ele.voro_volume for _ele in water )
masses = [ 16 if _ele.element == 'O' else 1 for _ele in water ]
xs = [ _ele.xyz[0] for _ele in water]
ys = [ _ele.xyz[ 1 ] for _ele in water ]
zs = [ _ele.xyz[ 2 ] for _ele in water ]
cx = sum( m*x for m,x in zip(masses, xs) ) / sum(masses)
cy = sum( m * y for m, y in zip( masses, ys ) ) / sum( masses )
cz = sum( m * z for m, z in zip( masses, zs ) ) / sum( masses )
new_xyz = (cx, cy, cz)
new_id = water[0].id
normal = water[0].normal
self.atoms.append( Atom(new_id, type=3, element='H', xyz=new_xyz, stress=new_stress, normal=normal) )
water = []
def dump_atoms_for_voro( self, length=None ):
'''
Dump atom coordinates so we can calculate Voronoi tessellation using Voro++
from http://math.lbl.gov/voro++/
The input file format for voro++ is
<atom id> <x> <y> <z>
and output file format is
<atom id> <x> <y> <z> <tessellation volume>
'''
logging.info( 'Dump atom coordinates to {}'.format( self.voro_file_name ) )
fmt = '{} {} {} {}\n'
if length:
xmin, xmax = self.center[0] - length, self.center[0] + length
ymin, ymax = self.center[1] - length, self.center[1] + length
zmin, zmax = self.center[2] - length, self.center[2] + length
with open( self.voro_file_name, 'w' ) as output:
for atom in self.atoms:
x, y, z = atom.xyz
if length:
if xmin <= x <= xmax and ymin<= y <= ymax and zmin <= z <= zmax:
output.write( fmt.format( atom.id, x, y, z ) )
else:
output.write( fmt.format( atom.id, x, y, z ) )
def voro_cmd( self, gnuplot=False, length=None ):
'''
CMD to run voro++ in bash
gnuplot=True will also export gnu plot file. Be careful when system is large as
this file will be extremely large
default to use -o to preserve the atom order. This has small memory and performance
impact as the documentation says.
'''
# when have length -o will not work
cmd = 'voro++' if length else 'voro++ -o'
fmt = cmd + ' {opts} {{xmin}} {{xmax}} {{ymin}} {{ymax}} {{zmin}} {{zmax}} {{infile}}'
opts = '-g' if gnuplot else ''
fmt = fmt.format( opts=opts )
if length:
xmin, xmax = self.center[0] - length, self.center[0] + length
ymin, ymax = self.center[1] - length, self.center[1] + length
zmin, zmax = self.center[2] - length, self.center[2] + length
else:
xmin, xmax = self.bx
ymin, ymax = self.by
zmin, zmax = self.bz
return fmt.format( xmin=xmin, xmax=xmax,
ymin=ymin, ymax=ymax,
zmin=zmin, zmax=zmax,
infile=self.voro_file_name)
def run_voro_cmd( self, gnuplot=False, length=None ):
logging.info( 'Calculating voro volumes for atoms' )
cmd = self.voro_cmd( gnuplot=gnuplot, length=length )
logging.info( "Running: {}".format( cmd ))
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = sp.communicate()
if err:
raise Exception(err)
logging.info( "Finished: {}".format( cmd ) )
def read_voro_volumes( self ):
voro_out = self.voro_file_name + '.vol'
logging.info( 'Reading voro volumes from {}'.format( voro_out ) )
with open( voro_out, 'r' ) as volumes:
idx = 0
for line in volumes:
atom_id, x, y, z, vol = [ float(ele) for ele in line.split() ]
atom_id = int( atom_id )
atom = self.atoms[ idx ]
try:
assert( atom.id == atom_id )
except Exception as e:
print( atom.id, atom_id )
raise e
atom.voro_volume = vol
idx += 1
def calc_voro_volumes( self, gnuplot=False, length=None ):
''' Calculate voro tessellation volume using voro '''
self.dump_atoms_for_voro( length=length )
self.run_voro_cmd( gnuplot=gnuplot, length=length )
if not length:
self.read_voro_volumes()
def adjust_water_vol(self, ratio=(0.5, 0.25)):
""" Adjust volume of H and O in water. For pure water system only """
satoms = sorted( self.atoms, key= lambda x: x.id)
assert( len( satoms ) % 3 == 0 )
assert( ratio[0] + 2 * ratio[1] == 1.0)
for idx in xrange( len(satoms) / 3):
o = satoms[ idx * 3 ]
h1 = satoms[ idx * 3 + 1 ]
h2 = satoms[ idx * 3 + 2 ]
vsum = sum( ele.voro_volume for ele in [o, h1, h2])
vo = ratio[0] * vsum
vh = ratio[1] * vsum
o.adj_vol = vo
h1.adj_vol = vh
h2.adj_vol = vh
def set_boundary(self, bx, by, bz):
"""Set bx by bz together."""
self.bx = bx
self.by = by
self.bz = bz
def add_atom(self, atom):
self.atoms.append(atom)
self.count += 1
# Need to run stats after new atom added.
self._stats_finished = False
if atom.element in self._elements:
self._elements[atom.element].append(atom)
else:
self._elements[atom.element] = [atom]
def measure(self):
"""Measure distance to bubble center for each atom."""
for atom in self.atoms:
coor = np.array(atom.xyz)
atom.distance = np.linalg.norm(coor - self.center)
if atom.normal:
# Calculate sin cos for theta and phi
dx = coor[0] - self.center[0]
dy = coor[1] - self.center[1]
dz = coor[2] - self.center[2]
xy_square = math.sqrt(dx*dx + dy*dy)
atom.sin_theta = xy_square / atom.distance
atom.cos_theta = dz / atom.distance
atom.sin_phi = dy / xy_square
atom.cos_phi = dx / xy_square
self.calc_voro_volumes()
def stats(self, dr, normal):
"""
System stats.
Generate data for atom stats and stress stats for each element.
self._shell_atoms = {}
self._shell_stress = {}
"""
if not self.measured:
raise AtomUnmeasuredError("Some atoms are unmeasuerd")
self.nbins = int(math.ceil(self.radius / float(dr)))
self._shell_atom_objs = [ { } for x in range( self.nbins ) ]
for ele, atoms in self._elements.iteritems():
# Do stats for each element.
for atom in atoms:
if atom.distance < self.radius:
shell_idx = int( atom.distance / dr )
self._shell_atom_objs[ shell_idx ].setdefault(ele, []).append( atom )
if normal:
atom.calc_spherical_stress()
self._stats_finished = True
def atom_stats(self, element, dr):
"""Atom ratio stats inside bubble."""
if not self._stats_finished:
self.stats(dr)
nbins = len(self._shell_atoms[element])
bubble_atoms = {}
# Init bubble atoms by copying shell atoms
for ele, count in self._shell_atoms.iteritems():
bubble_atoms[ele] = [x for x in count]
for i in range(1, nbins):
bubble_atoms[ele][i] += bubble_atoms[ele][i - 1]
bubble_atoms[ele] = np.array(bubble_atoms[ele])
return bubble_atoms[element] / sum(bubble_atoms.values())
def pressure_stats(self, elements, dr):
"""Average pressure stats inside bubble for species in elements."""
if not self._stats_finished:
self.stats(dr)
nbins = len(self._shell_stress[elements[0]])
# Calculate stress for all element in elements as whole.
# Convert numpy.Array to mutable list.
stress_in = [x for x in sum([self._shell_stress[ele] for ele in elements])]
stress_out = [x for x in stress_in]
for i in range(1, nbins):
# Cumulative stress.
stress_in[i] += stress_in[i-1]
stress_out[nbins - 1 - i] += stress_out[nbins - i]
for i in range(1, nbins):
# Stress -> pressure.
stress_in[i] = 0 - stress_in[i] / self.vol_sphere((i+1)*dr) / 3.0
stress_out[nbins-1-i] = 0 - stress_out[nbins-1-i] / (self.vol_sphere(self.radius) - self.vol_sphere((nbins-i-1)*dr)) / 3
# Head and tail.
stress_in[0] = 0 - stress_in[0] / self.vol_sphere(dr) / 3
stress_out[nbins - 1] = 0 - stress_out[nbins - 1] / (self.vol_sphere(self.radius) - self.vol_sphere((nbins - 1)*dr)) / 3
return {'in': stress_in, 'out': stress_out}
def shell_pressure_stats(self, elements, dr, normal=False):
"""Average pressure of elements inside shell."""
self.stats(dr, normal=normal)
print( "NNNNNumber of bins: {}".format(self.nbins) )
if not normal:
# atom.stress has 3 elements, xx yy zz components
if self.use_atomic_volume:
if self.average_on_atom:
# atomic volume is used, pressure is calculated for each atom and then averaged together
stress = []
for idx, shell_atoms in enumerate(self._shell_atom_objs):
pressure_raw = {}
for element, atoms in shell_atoms.iteritems():
if element in elements:
# P = -(S_xx + S_yy + S_zz)/3/V
pressure_raw[element] = [ - sum(atom.stress)/atom.voro_volume/3.0 for atom in atoms ]
# Average pressure = sum(Pressure)/n_atoms
n_atoms = sum( len(_ele) for _ele in pressure_raw.values() )
if n_atoms != 0:
pressure_ave = sum( sum(_ele) for _ele in pressure_raw.values() ) / n_atoms
else:
pressure_ave = 0
stress.append(pressure_ave)
return stress
else:
# pressure is calculated as sum(atom stress in a shell) / sum(atom volume in a shell)
stress = []
for idx, shell_atoms in enumerate( self._shell_atom_objs ):
stress_all = 0
volume_all = 0
for element, atoms in shell_atoms.iteritems():
if element in elements:
stress_all += sum( sum(atom.stress[:3]) for atom in atoms )
volume_all += sum( atom.voro_volume for atom in atoms )
if volume_all != 0:
pressure_ave = - stress_all / 3.0 / volume_all
else:
pressure_ave = 0
stress.append( pressure_ave )
return stress
else:
# use shell volume
stress = [ ]
for idx, shell_atoms in enumerate( self._shell_atom_objs ):
r_min, r_max = idx * dr, (idx + 1)*dr
stress_all = 0
volume_all = self.vol_sphere(r_max) - self.vol_sphere(r_min)
for element, atoms in shell_atoms.iteritems():
if element in elements:
stress_all += sum( sum( atom.stress[:3] ) for atom in atoms )
pressure_ave = - stress_all / 3.0 / volume_all
stress.append( pressure_ave )
return stress
else:
# normal pressure, atom.spherical_stress has 6 items: xx, yy, zz, xy, xz, yz.
stress_r = []
stress_theta = []
stress_phi = []
if self.use_atomic_volume:
if self.average_on_atom:
# Pressure is calculate as average of pressure on each atom
for idx, shell_atoms in enumerate( self._shell_atom_objs ):
pressure_r_raw = {}
pressure_theta_raw = {}
pressure_phi_raw = {}
for element, atoms in shell_atoms.iteritems():
if element in elements:
pressure_r_raw[element] = [ - atom.spherical_stress[0][0] / atom.voro_volume for atom in atoms ]
pressure_theta_raw[element] = [ - atom.spherical_stress[1][1] / atom.voro_volume for atom in atoms ]
pressure_phi_raw[element] = [ - atom.spherical_stress[2][2] / atom.voro_volume for atom in atoms ]
n_atoms = sum( len( _ele ) for _ele in pressure_r_raw.values() )
if n_atoms != 0:
pressure_r_ave = sum( sum(_ele) for _ele in pressure_r_raw.values() ) / n_atoms
pressure_theta_ave = sum( sum(_ele) for _ele in pressure_theta_raw.values() ) / n_atoms
pressure_phi_ave = sum( sum(_ele) for _ele in pressure_phi_raw.values() ) / n_atoms
else:
pressure_r_ave = pressure_theta_ave = pressure_phi_ave = 0
stress_r.append( pressure_r_ave )
stress_theta.append( pressure_theta_ave )
stress_phi.append( pressure_phi_ave )
return { 'r': stress_r, 'theta': stress_theta, 'phi': stress_phi, }
else:
# Pressure is calculated as sum(stress)/sum(atomic_volume)
for idx, shell_atoms in enumerate( self._shell_atom_objs ):
stress_r_all = 0
stress_theta_all = 0
stress_phi_all = 0
volume_all = 0
for element, atoms in shell_atoms.iteritems():
if element in elements:
stress_r_all += sum( atom.spherical_stress[0][0] for atom in atoms )
stress_theta_all += sum( atom.spherical_stress[1][1] for atom in atoms )
stress_phi_all += sum( atom.spherical_stress[2][2] for atom in atoms )
volume_all += sum( atom.voro_volume for atom in atoms )
if volume_all != 0:
pressure_r_ave = - stress_r_all / volume_all
pressure_theta_ave = - stress_theta_all / volume_all
pressure_phi_ave = - stress_phi_all / volume_all
else:
pressure_r_ave = pressure_theta_ave = pressure_phi_ave = 0
stress_r.append( pressure_r_ave )
stress_theta.append( pressure_theta_ave )
stress_phi.append( pressure_phi_ave )
return { 'r': stress_r, 'theta': stress_theta, 'phi': stress_phi, }
else:
# Use shell volume
for idx, shell_atoms in enumerate( self._shell_atom_objs ):
r_min, r_max = idx * dr, (idx+1) * dr
stress_r_all = 0
stress_theta_all = 0
stress_phi_all = 0
volume_all = self.vol_sphere(r_max) - self.vol_sphere(r_min)
for element, atoms in shell_atoms.iteritems():
if element in elements:
stress_r_all += sum( atom.spherical_stress[ 0 ][ 0 ] for atom in atoms )
stress_theta_all += sum( atom.spherical_stress[ 1 ][ 1 ] for atom in atoms )
stress_phi_all += sum( atom.spherical_stress[ 2 ][ 2 ] for atom in atoms )
pressure_r_ave = - stress_r_all / volume_all
pressure_theta_ave = - stress_theta_all / volume_all
pressure_phi_ave = - stress_phi_all / volume_all
stress_r.append( pressure_r_ave )
stress_theta.append( pressure_theta_ave )
stress_phi.append( pressure_phi_ave )
return { 'r': stress_r, 'theta': stress_theta, 'phi': stress_phi, }
def pressure_between(self, rlow, rhigh):
"""Return the average pressure and number of atoms between rlow
and rhigh."""
stress = 0
count = 0
for atom in self.atoms:
if atom.distance > rlow and atom.distance <= rhigh:
count += 1
stress += sum(atom.stress)
volume = self.vol_sphere(rhigh) - self.vol_sphere(rlow)
return stress / volume / 3, count
def shell_density(self, elements, mole, dr):
"""Shell density for species inside elements.
mole unit - g/cm^3
dr unit - angstrom
"""
# Usually density_dr is different from stats_dr.
self.stats(dr)
# Avogadro constant. Modified by coefficient used to
# convert angstrom^3 to cm^3.
NA = 6.022 / 10
nbins = len(self._shell_atoms[elements[0]])
# Calculate atom count for all species in elements as whole.
# Convert numpy.Array to mutable list.
count = [x for x in sum([self._shell_atoms[ele] for ele in elements])]
# Calculate density.
for i in range(nbins):
r_low = i * dr
r_high = r_low + dr
# Volume unit is Angstrom^3.
volume = self.vol_sphere(r_high) - self.vol_sphere(r_low)
count[i] = count[i] / NA / volume
return count
def bubble_density(self, elements, mole, dr):
pass
def xyz_density(self, elements, mole, dx):
"""Density distribution along x, y, and z inside box."""
# Avogadro constant. Modified by coefficient used to
# convert angstrom^3 to cm^3.
NA = 6.022 / 10
nx = int(math.ceil((self.bx[1] - self.bx[0]) / dx))
ny = int(math.ceil((self.by[1] - self.by[0]) / dx))
nz = int(math.ceil((self.bz[1] - self.bz[0]) / dx))
dist = {}
dist['x'] = [0 for x in range(nx)]
dist['y'] = [0 for y in range(ny)]
dist['z'] = [0 for z in range(nz)]
for ele in elements:
# Count atoms.
for atom in self._elements[ele]:
dist['x'][int(atom.xyz[0] / dx)] += 1
dist['y'][int(atom.xyz[1] / dx)] += 1
dist['z'][int(atom.xyz[2] / dx)] += 1
volx = (self.by[1] - self.by[0]) * (self.bz[1] - self.bz[0]) * dx
voly = (self.bx[1] - self.bx[0]) * (self.bz[1] - self.bz[0]) * dx
volz = (self.by[1] - self.by[0]) * (self.bx[1] - self.bx[0]) * dx
for i in range(nx):
# Calculate density.
dist['x'][i] = dist['x'][i] / NA / volx
dist['y'][i] = dist['y'][i] / NA / voly
dist['z'][i] = dist['z'][i] / NA / volz
return dist
def vol_sphere(self, r):
"""Volume of sphere with radius r."""
return 4.0/3 * Box.PI * (r ** 3)
def volume(self):
""" Box volume """
return (self.bx[1] - self.bx[0]) * (self.by[1] - self.by[0]) * (self.bz[1] - self.bz[0])
class Trajectory( object ):
'''Gas molecule trajectory class'''
def __init__( self, pdbPath, xtcPath ):
self.universe = md.Universe( pdbPath, xtcPath )
self.set_density_params()
@property
def n_frames( self ):
return self.universe.trajectory.n_frames
@property
def frame( self ):
return self.universe.trajectory.frame
def set_density_params(self, low=0.4, high=0.5, length=60 ):
'''
Generate grid with length of dnesity_grid_length at x,y,z directions.
Grids whose density are between low * max_density and high * max_density
will be used for radius calculation. d
'''
self.density_low = low
self.density_high = high
self.density_grid_length = length
def set_frame( self, frame ):
self.universe.trajectory[ frame ]
def radius( self, frame ):
'''
Bubble radius at one frame.
Method:
1. Load the snapshot at frame
2. Load x, y, z coordinates
3. Calculate density grid mesh at grid points
4. Filter the shell grids with density between low * max density and high * max density
5. Calculate the average radius
'''
start = time.clock()
self.set_frame( frame )
# Load x, y, z coordinates
data = pd.DataFrame( list(self.universe.coord), columns=['x','y','z'])
x = data[ 'x' ].values
y = data[ 'y' ].values
z = data[ 'z' ].values
# Density grid
xyz = scipy.vstack( [ x, y, z ] )
kde = scipy.stats.gaussian_kde( xyz )
xmin, ymin, zmin = x.min(), y.min(), z.min()
xmax, ymax, zmax = x.max(), y.max(), z.max()
NI = complex( imag=self.density_grid_length)
xi, yi, zi = scipy.mgrid[ xmin:xmax:NI, ymin:ymax:NI, zmin:zmax:NI ]
coords = scipy.vstack([item.ravel() for item in [xi, yi, zi]])
density = kde(coords).reshape(xi.shape)
# Filter density grid
density_max = density.max()
density_low = self.density_low * density_max
density_high = self.density_high * density_max
xyzs = []
N = self.density_grid_length
for idx, idy, idz in product( xrange(N), xrange(N), xrange(N) ):
if density_low < density[ idx, idy, idz ] <= density_high:
xyzs.append( [ xi[ idx, idy, idz ], yi[ idx, idy, idz ], zi[ idx, idy, idz ] ] )
xyzs = np.array( xyzs )
# Average radius
center = xyzs.mean( axis=0 )
rs = []
for xyz_ele in xyzs:
rs.append( np.linalg.norm( center - xyz_ele ) )
duration = time.clock() - start
print( "Radius for frame {} calculated in {:.2f} seconds".format( frame, duration ) )
return center, scipy.mean( rs )
def radius_for_frames( self, start, end, step=1 ):
ret = []
for frame in xrange( start, end, step ):
center, radius = self.radius( frame )
ret.append( [ frame, radius ] )
return ret
def all_radius( self ):
return self.radius_for_frames( 0, self.n_frames, 1 )
def regression( self, radiusList ):
''' Input (frame, radius) lists and do linear regression on the data '''
ts = [ ele[0] for ele in radiusList ]
rs = [ ele[1] for ele in radiusList ]
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress( ts, rs )
return slope, intercept, r_value, p_value, std_err
def plot_radius( self, rs, notebook=False ):
''' plot dots and linear regression results '''
xs = [ ele[0] for ele in rs ]
ys = [ ele[1] for ele in rs ]
x_min = min( xs )
x_max = max( xs )
x_min = x_min - ( x_max - x_min ) * 0.05
x_max = x_max + ( x_max - x_min ) * 0.05
slope, intercept, r_value, p_value, std_err = self.regression( rs )
xs_line = [ x_min ] + xs + [ x_max ]
ys_line = [ ele * slope + intercept for ele in xs_line ]
# Scatter plot
scatter = go.Scatter(
x = [ele[0] for ele in rs],
y = [ele[1] for ele in rs],
mode = 'markers',
name = 'Radius'
)
reg_line = go.Scatter(
x = xs_line, y = ys_line,
mode='lines', name='y={:.4f}x+{:.4f}, p-value={:.2f}, StdErr={:.3f}'.format(slope, intercept, p_value, std_err)
)
data = go.Data([scatter, reg_line])
plot = plotly.offline.iplot if notebook else plotly.offline.plot
plot( {
'data': data,
'layout': go.Layout( title='Radius vs Frame', xaxis={'title':'Frame'}, yaxis={'title':'Radius'} )
} )
def flux_info( self, start, end, step=1 ):
'''
Flux info for frames [start:end:step]. Info are, for each step,
nframe, center, radius, n atoms inside sphere
'''
info = []
for nframe in xrange( start, end, step ):
center, radius = self.radius( nframe )
# Selector for AtomGroup in MDAnalysis
selector = 'point ' + ' '.join( str( ele ) for ele in list( center ) + [ radius ] )
# Explicitly set frame here
self.set_frame( nframe )
atoms = self.universe.select_atoms( selector )
natoms = atoms.n_atoms
info.append( (nframe, center, radius, natoms) )
return info
#################################################
################# Exceptions ####################
#################################################
class AtomUnmeasuredError(Exception):
pass
################################################
################## Functions ###################
################################################
def next_n_lines(file_opened, N, strip='right'):
strip_dic = {
'right': string.rstrip,
'left': string.lstrip,
'both': string.strip
}
if strip:
return [strip_dic[strip](x) for x in islice(file_opened, N)]
else:
return list(islice(file_opened, N))
def read_stress(stress_file, N=settings.NLINES, normalPressure=False):
"""
Read dump file into a list of atoms, which have type / coordinates /
stresses info stored as Atom properties.
Dump file data format:
atom_id atom_type x y z stress_x stress_y stress_z
"""
atoms = {}
count = 0
data = next_n_lines(stress_file, N)[9:]
while data:
atoms[count] = []
for line in data:
line = line.strip().split()
identifier = int(line[0])
atom_type = int(line[1])
element = settings.ELEMENTS[atom_type]
xyz = tuple([float(x) for x in line[2:5]])
if normalPressure:
# To calculate normal pressure, we need xx, yy, zz, xy, xz, yz
stress = tuple([float(x) for x in line[5:11]])
else:
# To calculate pressure, we need xx, yy, zz
stress = tuple([float(x) for x in line[5:8]])
atom = Atom(identifier, type=atom_type, element=element, xyz=xyz, stress=stress, normal=normalPressure)
atoms[count].append(atom)
# Process next N lines.
data = next_n_lines(stress_file, N)[9:]
count += 1
return atoms
def read_pdb(filename):
"""
Read pdb file as a list of atoms
"""
logging.info( "Reading {}".format(filename) )
atoms_lines = []
with open(filename, 'r') as pdbfile:
for line in pdbfile:
if line.startswith('CRYST'):
cryst_line = line
elif line.startswith('ATOM'):
atoms_lines.append( line )
x, y, z = [float(ele) for ele in cryst_line.strip().split()[1:4] ]
atoms = []
for line in atoms_lines:
data = line.strip().split()
idx = int(data[1])
element = data[2][:2]
coor = [ float(ele) for ele in data[5:8] ]
atoms.append( Atom(identifier=idx, element=element, xyz=coor) )
return atoms, (x,y,z)
def combine_water(atoms, remove=True):
"""
Combine water atoms
"""
combined = []
ne = [ ele for ele in atoms if ele.element == 'Ne' ]
wat = [ele for ele in atoms if ele.element != 'Ne' ]
logging.info("Before:: {} Ne, {} Water atoms".format(len(ne), len(wat)))
idx_wat = len(ne) + 1
comb_wat = []
for idx in range( len( wat ) / 3 ):
coor1 = np.array( wat[ idx * 3 ].xyz )
coor2 = np.array( wat[ idx * 3 + 1 ].xyz )
coor3 = np.array( wat[ idx * 3 + 2 ].xyz )
coor = (coor1 + coor2 + coor3) / 3.
comb_wat.append(Atom(identifier=idx_wat, element='W', xyz=coor))
idx_wat += 1
if remove:
selected = random.sample(comb_wat, len(comb_wat)/4)
else:
selected = comb_wat
n_ne = len(ne)
for idx in xrange(len(selected)):
selected[idx].id = idx + 1 + n_ne
logging.info("After:: {} Ne, {} Water atoms".format(len(ne), len(selected)))
return ne + selected
def write_lammps_data(atoms, xyz, filename):
"""
LAMMPS data
format: atom idx, molecule idx, atom type, x, y, z,
"""
atom_types = {'Ne':1, 'W':2}
x, y, z = xyz
header = "LAMMPS bubble\n\n" \
"{n_atoms} atoms\n\n" \
"{n_types} atom types\n" \
"0 bond types\n" \
"0 angle types\n\n" \
"0 {x} xlo xhi\n0 {y} ylo yhi\n0 {z} zlo zhi\n\n"\
"Atoms\n\n".format(n_atoms=len(atoms), n_types=2,x=x,y=y,z=z)
print(header)
fmt = "{idx} {mol} {atype} {charge} {x} {y} {z}\n"
for idx, atom in enumerate(atoms):
header += fmt.format(idx=atom.id, mol=atom.id, atype=atom_types[atom.element], charge=0, x=atom.xyz[0], y=atom.xyz[1], z=atom.xyz[2])
with open(filename, 'w') as output:
output.write(header)
def average_atom_stress(write=True, step=0, *args):
"""Calculates averaged stress from multiple stress files.
write determines whether to write output or not.
step determines which timestep to average."""
n_files = float(len(args))
stress_list = []
for ele in args:
stress_list.append(read_stress(ele)[step])
# Sort atoms by id.
stress_list[-1].sort(key=lambda x: x.id)
n_atoms = len(stress_list[0])
atoms = []
# Average stress for each atom id.
for i in range(n_atoms):
sx = sum([x[i].stress[0] for x in stress_list]) / n_files
sy = sum([x[i].stress[1] for x in stress_list]) / n_files
sz = sum([x[i].stress[2] for x in stress_list]) / n_files
atom = stress_list[0][i]
atoms.append(
Atom(atom.id, type=atom.type, element=atom.element, xyz=atom.xyz, stress=(sx, sy, sz))
)
# Write averaged stress to file.
if write:
out_name = '.'.join(args[0].name.split('.')[:-1]) + '_averaged.dat'
with open(out_name, 'w') as output:
# Write header lines to be compatitable with LAMMPS dump files.
output.write('Header line\n' * 9)
for atom in atoms:
# Do not write element here to be compatitable with
# LAMMPS dump files.
output.write("{} {} {} {} {} {} {} {}\n".format(
atom.id, atom.type,
atom.xyz[0], atom.xyz[1], atom.xyz[2],
atom.stress[0], atom.stress[1], atom.stress[2]))
print("Average Stress saved to {}.".format(out_name))
return atoms
def build_box(atoms, timestep, radius, center, use_atomic_volume, average_on_atom, bx, by, bz):
"""Build a box from a list of atoms."""
box = Box(timestep, radius=radius, center=center, use_atomic_volume=use_atomic_volume, average_on_atom=average_on_atom)
for atom in atoms:
box.add_atom(atom)
box.set_boundary(bx=bx, by=by, bz=bz)
box.measure()
return box
def write_density(density, dr, outname, header):
"""Write density (both shell and xyz density) stats to output file.
One density list at a time.
"""
with open(outname, 'w') as output:
output.write(header)
for i, item in enumerate(density):
low = i * dr
high = low + dr
output.write('{l:.3f}\t{h:.3f}\t{d:.13f}\n'.format(l=low, h=high, d=item))
def write_pressure(pressure, dr, outname, header, bubble=False):
"""Write pressure (both bubble and shell pressure) stats to output file.
If bubble is True, r_low is always zero.
"""
logging.info( "Writing output to {}".format(outname) )
if bubble:
# Bubble pressure has in pressure and out pressure.
with open(outname, 'w') as output:
output.write(header)
nbins = len(pressure['in'])
for i in range(nbins):
low = 0
high = (i + 1) * dr
if i < nbins - 1:
output.write('{l:.3f}\t{h:.3f}\t{pin:.13f}\t{pout:.13f}\n'.format(
l=low, h=high,
pin=pressure['in'][i], pout=pressure['out'][i+1]
))
else:
output.write('{l:.3f}\t{h:.3f}\t{pin:.13f}\t{pout:.13f}\n'.format(
l=low, h=high,
pin=pressure['in'][i], pout=0
))
else:
# Shell pressure.
with open(outname, 'w') as output:
output.write(header)
for i, item in enumerate(pressure):
low = i * dr
high = low + dr
output.write('{l:.3f}\t{h:.3f}\t{p:.13f}\n'.format(l=low, h=high, p=item))
def write_ratio(ratio, dr, outname, header, bubble=True):
"""Write atom ratio stats to output file.
If bubble is True, r_low is always zero.
"""
with open(outname, 'w') as output:
output.write(header)
for i, item in enumerate(ratio):
low = 0 if bubble else i * dr
high = (i + 1) * dr
output.write('{l:.3f}\t{h:.3f}\t{r:.13f}\n'.format(l=low, h=high, r=item))
def bubble_ratio(box, elements, out_fmt, header, dr, time, container, debug=False):
"""Calculate bubble ratio stats and write results to disk."""
for eles in elements:
# Ratio stats for each element.
e = ''.join(eles)
print('Bubble ratio stats for {e}'.format(e=e))
# Calculate ratio.
ratio = box.atom_stats(eles[0], dr)
# Write to file.
outname = out_fmt.format(time=time, ele=e)
write_ratio(ratio, dr, outname, header, bubble=True)
if debug:
# For testing.
with open(container, 'a') as cc:
cc.write(outname + '\n')
def shell_ratio(box, elements, out_fmt, header, dr, time, container, debug=False):
"""Calculate shell ratio stats and write results to disk."""
pass
def bubble_pressure(box, elements, out_fmt, header, dr, time, container, debug=False):
"""Calculate bubble pressure and write results to disk."""
for eles in elements:
# Bubble pressure stats for each group of specified elements.
e = ''.join(eles)
print("Bubble pressure stats for {e}\n".format(e=e))
# Calculate bubble pressure.
bubble_pressure = box.pressure_stats(eles, dr)
# Write bubble pressure.
outname = out_fmt.format(time=time, ele=e)
write_pressure(bubble_pressure, dr, outname, header, bubble=True)
if debug:
# For testing.
with open(container, 'a') as cc:
cc.write(outname + '\n')
def shell_pressure(box, elements, out_fmt, header, dr, time, container, normal=False, debug=False):
"""Calculate shell pressure and write results to disk."""
for eles in elements:
# Shell pressure stats for each group of specified elements.
e = ''.join(eles)
print('Shell pressure stats for {e}\n'.format(e=e))
# Shell pressure.
if not normal:
shell_pressure = box.shell_pressure_stats(eles, dr, normal=normal)
# Write to disk.
outname = out_fmt.format(time=time, ele=e)
write_pressure(shell_pressure, dr, outname, header, bubble=False)
if debug:
# For testing.
with open(container, 'a') as cc:
cc.write(outname + '\n')
else:
shell_pressure = box.shell_pressure_stats(eles, dr, normal=normal)
shell_r, shell_theta, shell_phi = shell_pressure['r'], shell_pressure['theta'], shell_pressure['phi']
# Write to disk.
outname1 = out_fmt.format(time=time, ele=e) + '_r'
outname2 = out_fmt.format(time=time, ele=e) + '_theta'
outname3 = out_fmt.format( time=time, ele=e ) + '_phi'
write_pressure(shell_r, dr, outname1, header, bubble=False)
write_pressure(shell_theta, dr, outname2, header, bubble=False)
write_pressure( shell_phi, dr, outname3, header, bubble=False )
if debug:
# For testing.
with open(container, 'a') as cc:
cc.write( outname1 + '\n' )
cc.write( outname2 + '\n' )
cc.write( outname3 + '\n' )
def bubble_density(box, elements, mole, out_fmt, header, dr, time, container, debug=False):
"""Calculate bubble density stats and write results to disk."""
for eles in elements:
# Bubble density stats for each group of specified elements.
e = ''.join(eles)
print('Bubble density stats for {e}\n'.format(e=e))
# Bubble density.
bubble_density = box.bubble_density(eles, mole, dr)
# Write to disk.
outname = out_fmt.format(time=time, ele=e)
write_density(bubble_density, dr, outname, header)
if debug:
# For testing.
with open(container, 'a') as cc:
cc.write(outname + '\n')
def shell_density(box, elements, mole, out_fmt, header, dr, time, container, debug=False):
"""Calculate shell density stats and write results to disk."""
for eles in elements:
# Shell density stats for each group of specified elements.
e = ''.join(eles)
print('Shell density stats for {e}\n'.format(e=e))
# Shell density.
shell_density = box.shell_density(eles, mole, dr)
# Write to disk.
outname = out_fmt.format(time=time, ele=e)
write_density(shell_density, dr, outname, header)
if debug:
# For testing.
with open(container, 'a') as cc:
cc.write(outname + '\n')
def xyz_density(box, elements, mole, out_fmt, header, dr, time, container, debug=False):
"""Calculate xyz density stats and write results to disk."""
for eles in elements:
# XYZ density stats for each group of specified elements.
e = ''.join(eles)
print('XYZ density stats for {e}\n'.format(e=e))
# XYZ density.
xyz_density = box.xyz_density(eles, mole, dr)
# Write to disk.
xout = out_fmt.format(time=time, ele=e, xyz='x')
yout = out_fmt.format(time=time, ele=e, xyz='y')
zout = out_fmt.format(time=time, ele=e, xyz='z')
write_density(xyz_density['x'], dr, xout, header)
write_density(xyz_density['y'], dr, yout, header)
write_density(xyz_density['z'], dr, zout, header)
if debug:
# For testing.
with open(container, 'a') as cc:
out = '\n'.join([xout, yout, zout, ''])
cc.write(out)
def get_radius(box, element, dr, n=1, ratio=0.5):
"""Get the radius of a bubble.
Radius is determined to be r with closest value of n_element / n_atoms
to ratio, i.e. within radius, n_element / n_atoms should be as close to
ratio as possible.
n specifies number of radiuses to return, i.e. n radiuses that have
n_element / n_atoms values closest to ratio."""
bubble_ratio = box.atom_stats(element, dr)
deltas = [abs(x - ratio) for x in bubble_ratio]
# Use nanmin to ignore NaNs in ratio vector.
# Do not select radiuses smaller than 10 angstrom.
min_index = deltas.index(np.nanmin(deltas))
n = n / 2
ret = []
for i in range(-n, n + 1):
index = min_index + i
ret.append((dr * (index + 1), bubble_ratio[index]))
return ret
| {
"content_hash": "5be666535186cc250e720d4c9b18862d",
"timestamp": "",
"source": "github",
"line_count": 1131,
"max_line_length": 141,
"avg_line_length": 39.779840848806366,
"alnum_prop": 0.5237936476184125,
"repo_name": "mikkkee/Bubble",
"id": "53ff78b695acab293155638bf1cce25450d125c6",
"size": "44991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bubble.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64654"
},
{
"name": "Shell",
"bytes": "640"
}
],
"symlink_target": ""
} |
import re
from trac.config import get_configinfo
from trac.core import *
from trac.loader import get_plugin_info
from trac.perm import IPermissionRequestor
from trac.util.html import tag
from trac.util.translation import _
from trac.web.api import IRequestHandler
from trac.web.chrome import Chrome, INavigationContributor, accesskey
class AboutModule(Component):
""""About Trac" page provider, showing version information from
third-party packages, as well as configuration information."""
required = True
implements(INavigationContributor, IPermissionRequestor, IRequestHandler)
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'about'
def get_navigation_items(self, req):
yield ('metanav', 'about',
tag.a(_("About Trac"), href=req.href.about(),
accesskey=accesskey(req, 9)))
# IPermissionRequestor methods
def get_permission_actions(self):
return ['CONFIG_VIEW']
# IRequestHandler methods
def match_request(self, req):
return re.match(r'/about(?:_trac)?$', req.path_info)
def process_request(self, req):
data = {'systeminfo': None, 'plugins': None,
'config': None, 'interface': None}
if 'CONFIG_VIEW' in req.perm('config', 'systeminfo'):
# Collect system information
data['system_info'] = self.env.system_info
Chrome(self.env).add_jquery_ui(req)
if 'CONFIG_VIEW' in req.perm('config', 'plugins'):
# Collect plugin information
data['plugins'] = get_plugin_info(self.env)
if 'CONFIG_VIEW' in req.perm('config', 'interface'):
data['interface'] = \
Chrome(self.env).get_interface_customization_files()
if 'CONFIG_VIEW' in req.perm('config', 'ini'):
# Collect config information
data['config'] = get_configinfo(self.env)
return 'about.html', data
| {
"content_hash": "5497b9a34282a89064861b7bb27cdacd",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 77,
"avg_line_length": 32.08064516129032,
"alnum_prop": 0.6430367018602313,
"repo_name": "rbaumg/trac",
"id": "469856a58838ca7987ff54b596d5b9c79d078b07",
"size": "2764",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "trac/about.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1085"
},
{
"name": "C#",
"bytes": "114293"
},
{
"name": "CSS",
"bytes": "40666"
},
{
"name": "Groff",
"bytes": "1497"
},
{
"name": "JavaScript",
"bytes": "16747"
},
{
"name": "Python",
"bytes": "1287818"
},
{
"name": "Shell",
"bytes": "481"
},
{
"name": "Smalltalk",
"bytes": "11753"
}
],
"symlink_target": ""
} |
import tornado.web
from . import configuration
#from controllers.legacy import Legacy
#from controllers.robot import RobotController
from .handlers.ws_handler import WSHandler
from .handlers.login_handler import LoginHandler
from .handlers.logout_handler import LogoutHandler
from .handlers.signin_handler import SignInHandler
from .handlers.user_handler import UserHandler
from .handlers.index_handler import IndexHandler
from .handlers.javascript_handler import JavascriptHandler
from .handlers.doc_handler import DocHandler
url_mapping = [
#(
# r'/js/?',
# tornado.web.RedirectHandler,
# {'url': '/js/index.html'}
#),
#(
# r'/js/(.+)',
# tornado.web.StaticFileHandler,
# {'path': configuration.settings['static_path']}
#),
(r'/login', LoginHandler),
(r'/logout', LogoutHandler),
(r'/signin', SignInHandler),
(r'/user', UserHandler),
(r'/api', WSHandler),
#(r'/api', WSController),
(r'/', IndexHandler),
(
r'/js/(.+)',
tornado.web.StaticFileHandler,
{'path': configuration.settings['static_path'] + '/js'}
),
(
r'/css/(.+)',
tornado.web.StaticFileHandler,
{'path': configuration.settings['static_path'] + '/css'}
),
(
r'/img/(.+)',
tornado.web.StaticFileHandler,
{'path': configuration.settings['static_path'] + '/img'}
),
(r'/javascript', JavascriptHandler),
(r'/doc(/.*)?', DocHandler),
]
| {
"content_hash": "74be50f5c62b999c76134a785a91d814",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 64,
"avg_line_length": 30.285714285714285,
"alnum_prop": 0.6273584905660378,
"repo_name": "fernandolopez/xremotebot",
"id": "5cca70d2a0ec01153c20c18d8f887c8df4f54708",
"size": "1484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xremotebot/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1221"
},
{
"name": "HTML",
"bytes": "18688"
},
{
"name": "JavaScript",
"bytes": "87241"
},
{
"name": "Makefile",
"bytes": "40"
},
{
"name": "Python",
"bytes": "55214"
}
],
"symlink_target": ""
} |
import asyncio
import discord
from discord.ext import commands
from random import *
yoMumma = ["Yo momma is so fat, I took a picture of her last Christmas and it's still printing.",
'Yo momma is so fat when she got on the scale it said, "I need your weight not your phone number."',
"Yo momma's so fat, that when she fell, no one was laughing but the ground was cracking up.",
'Yo mamma is so ugly when she tried to join an ugly contest they said, "Sorry, no professionals."',
"Yo momma's so fat and old when God said, 'Let there be light,' he asked your mother to move out of the way.",
"Yo momma is so fat that when she went to the beach a whale swam up and sang, 'We are family, even though you're fatter than me.'",
"Yo momma is so fat when she sat on WalMart, she lowered the prices.",
"Your momma is so ugly she made One Direction go another direction.",
"Yo momma is so fat that Dora can't even explore her!",
"Yo momma is so stupid when an intruder broke into her house, she ran downstairs, dialed 9-1-1 on the microwave, and couldn't find the 'CALL' button.",
"Yo momma is so fat her bellybutton gets home 15 minutes before she does.",
"Yo momma's so stupid, she put two quarters in her ears and thought she was listening to 50 Cent.",
"Yo mamma is so fat she doesn't need the internet, because she's already world wide.",
"Yo momma's so dumb, when y'all were driving to Disneyland, she saw a sign that said 'Disneyland left' so she went home.",
"Yo momma is so fat, when she sat on an iPod, she made the iPad!",
'Yo momma so stupid she stuck a battery up her ass and said, "I GOT THE POWER!"',
'Yo Momma is so fat when I told her to touch her toes she said, "What are those"?',
'Yo momma is so stupid she climbed over a glass wall to see what was on the other side.',
'Yo momma is so hairy, when she went to the movie theater to see Star Wars, everybody screamed and said, "IT IS CHEWBACCA!"',
'Yo momma is so stupid she brought a spoon to the super bowl.']
dadJokes = ["What's the difference between a good joke and a bad joke timing.",
"I met my wife on the net; we were both bad trapeze artists.",
"What's the difference between in-laws and outlaws? Outlaws are wanted.",
"Went to KFC the other day, didn't know Kentucky had a football club.",
"What's the leading cause of dry skin? Towels",
"Do I enjoy making courthouse puns? Guilty.",
"I tell you what often gets overlooked - garden fences.",
"I saw an ad in a shop window that said “Television for Sale – £1- Volume Stuck On Full”. I thought: “I can’t turn that down”.",
"My dog used to chase people on a bike a lot. It got so bad, I eventually had to take his bike off him.",
"What's a marsupial's favourite cocktail? A piña koala.",
"Shout out to my grandma, that's the only way she can hear.",
"Today's top fact: 50% of Canada is A",
"3.14% of sailors are pi-rates."]
mixJokes = ["Q: What do you get when you cross a cow with a trampoline? A: A milkshake!",
"Q: What do you get when you cross a ghost and a cat ? A: A scaredy cat!",
"Q: What do you get when you cross a karate expert with a pig? A: A porkchop.",
"Q: What do you get when you cross a tiger and a blizzard? A: Frostbite!",
"Q: What do you get when you cross a cow and a lawnmower? A: A lawnmooer.",
"Q: What do you get if you cross a kangaroo and a elephant? A: Big holes all over Australia!",
"Q: What do you get if you cross cat with an elephant? A: A flat cat."]
knockknock = ["Knock, knock. Who’s there? Canoe. Canoe who? Canoe help me with my homework?",
"Knock, knock. Who’s there? Orange. Orange who? Orange you going to let me in?",
"Knock, knock. Who’s there? Dozen.Dozen who? Dozen anybody want to let me in?",
"Knock, knock. Who’s there? Avenue.Avenue who? Avenue knocked on this door before?",
"Knock, knock. Who’s there? A herd. A herd who? A herd you were home, so I came over!",
"Knock, knock. Who’s there? Lettuce. Lettuce who? Lettuce in it’s cold out here.",
"Knock, knock. Who’s there? Dwayne. Dwayne who? Dwayne the bathtub, It’s overflowing!",
"Knock, knock. Who’s there? Boo. Boo who? Gosh, don’t cry it’s just a knock knock joke.",
"Knock, knock. Who’s there? Justin. Justin who? Justin time for dinner.",
"Knock, knock. Who’s there? Luke. Luke who? Luke through the the peep hole and find out."]
otherJokes = ["Muh vagina"]
class Jokes:
"""The bot's funny side"""
def __init__(self, bot):
self.bot=bot
@commands.command(pass_context=True, description="Tells a joke")
async def joke(self, ctx):
"""Tells a joke"""
channel = ctx.message.channel
await self.bot.send_typing(channel)
jokes = dadJokes+mixJokes+knockknock+yoMumma+otherJokes
joke = choice(jokes)
await self.bot.say(joke)
| {
"content_hash": "4c86e9dbb399e64803aa11749bab10e0",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 162,
"avg_line_length": 68.94805194805195,
"alnum_prop": 0.6328875494443398,
"repo_name": "ImTheTom/discordBot",
"id": "643e2b6de4d6c0d4fed6a41f691ee47aa08180d0",
"size": "5351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cogs/jokes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74188"
},
{
"name": "Shell",
"bytes": "179"
}
],
"symlink_target": ""
} |
from .fill_sinks import SinkFiller
__all__ = ['SinkFiller', ]
| {
"content_hash": "67f78863bec7ac78527cf10d19f2db56",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 34,
"avg_line_length": 16,
"alnum_prop": 0.65625,
"repo_name": "Carralex/landlab",
"id": "a9001ab4c0e7e1cdd8c3f66467bbf6cafb8dd85f",
"size": "64",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "landlab/components/sink_fill/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1452"
},
{
"name": "HTML",
"bytes": "99948"
},
{
"name": "Jupyter Notebook",
"bytes": "13921"
},
{
"name": "Makefile",
"bytes": "1765"
},
{
"name": "PowerShell",
"bytes": "7128"
},
{
"name": "Python",
"bytes": "3555237"
},
{
"name": "Shell",
"bytes": "2370"
},
{
"name": "TeX",
"bytes": "64170"
}
],
"symlink_target": ""
} |
"""Script to run the experiments and plot the results."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from absl import app
from absl import flags
import matplotlib.pyplot as plt
import numpy as np
import scipy.io
import sklearn.datasets
from sklearn.model_selection import train_test_split
from agbt import AGBT
from agbt_b import AGBTB
import functional as F
from gbt import GBT
from tree import Dataset
from tensorflow.python.platform import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string('data_folder', None, 'The directory of datasets.')
flags.DEFINE_enum('dataset_name', 'all_datasets', [
'all_datasets', 'a1a', 'w1a', 'housing', 'w8a', 'a9a', 'colon', 'Year',
'rcv1'
], ('The name of instances.'
'`all_datasets` means all of the instances in the folder.'))
flags.DEFINE_enum('loss', 'L2Loss', ['L2Loss', 'LogisticLoss'],
'The loss function.')
flags.DEFINE_integer(
'early_stopping_rounds', 100000,
('Stop the algorithm if the validation loss does not improve after this'
'number of iterations.'))
flags.DEFINE_float(
'z_shrinkage_parameter', 0.1,
'The shrinkage parameter in the z-update in accelerated method.')
flags.DEFINE_string('output_dir', None,
'The directory where output will be written.')
flags.DEFINE_integer('max_depth', 4, 'Maximal depth of a tree.')
flags.DEFINE_integer('num_trees', 20, 'Number of boosting iterations.')
flags.DEFINE_float('min_split_gain', 0.01, 'Minimal gain for splitting a leaf.')
flags.DEFINE_float('learning_rate', 0.3, 'Learning rate.')
flags.DEFINE_float('regularizer_const', 1, 'Regularizer constant.')
flags.DEFINE_boolean('use_hessian', False, 'Whether to use Hessian.')
TEST_SIZE = 0.2
RANDOM_STATE = 1
LOSS = {'L2Loss': F.L2Loss, 'LogisticLoss': F.LogisticLoss}
def set_up_data(data_folder, dataset_name):
path = os.path.join(data_folder, dataset_name + '.txt')
data = sklearn.datasets.load_svmlight_file(gfile.Open(path, mode='rb'))
x = np.asarray(data[0].todense())
y = np.array(data[1])
return train_test_split(x, y, test_size=TEST_SIZE, random_state=RANDOM_STATE)
def save_output(output_dict, name, params):
dir = os.path.join(FLAGS.output_dir, 'output')
if not gfile.Exists(dir):
gfile.MakeDirs(dir)
matfile_path = dir + '/{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}_max_depth_{:s}.mat'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
str(params.max_depth).replace('.', ''),
)
scipy.io.savemat(gfile.Open(matfile_path, mode='wb'), mdict=output_dict)
return 0
def plot_figures(output_dict, name, params):
"""Plots the figure from the output."""
figure_dir = os.path.join(FLAGS.output_dir, 'figures')
if not gfile.Exists(figure_dir):
gfile.MakeDirs(figure_dir)
fig = plt.figure()
plt.plot(output_dict['gbt_train_losses'], label='gbt')
plt.plot(output_dict['agbt_b_train_losses'], label='agbt_b')
plt.plot(output_dict['agbt_train_losses_1'], label='agbt1')
plt.plot(output_dict['agbt_train_losses_2'], label='agbt01')
plt.plot(output_dict['agbt_train_losses_3'], label='agbt001')
plt.legend()
fig.savefig(
gfile.Open(
figure_dir +
'/train_{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
), 'wb'))
fig = plt.figure()
plt.plot(output_dict['gbt_test_losses'], label='gbt')
plt.plot(output_dict['agbt_b_test_losses'], label='agbt_b')
plt.plot(output_dict['agbt_train_losses_1'], label='agbt1')
plt.plot(output_dict['agbt_train_losses_2'], label='agbt01')
plt.plot(output_dict['agbt_train_losses_3'], label='agbt001')
plt.legend()
fig.savefig(
gfile.Open(
figure_dir +
'test_{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
), 'wb'))
fig = plt.figure()
plt.plot(output_dict['gbt_train_losses'], label='gbt')
plt.plot(output_dict['agbt_b_train_losses'], label='agbt_b')
plt.plot(output_dict['agbt_train_losses_1'], label='agbt1')
plt.plot(output_dict['agbt_train_losses_2'], label='agbt01')
plt.plot(output_dict['agbt_train_losses_3'], label='agbt001')
plt.yscale('log')
plt.legend()
fig.savefig(
gfile.Open(
figure_dir +
'log_train_{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
), 'wb'))
fig = plt.figure()
plt.plot(output_dict['gbt_test_losses'], label='gbt')
plt.plot(output_dict['agbt_b_test_losses'], label='agbt_b')
plt.plot(output_dict['agbt_train_losses_1'], label='agbt1')
plt.plot(output_dict['agbt_train_losses_2'], label='agbt01')
plt.plot(output_dict['agbt_train_losses_3'], label='agbt001')
plt.yscale('log')
plt.legend()
fig.savefig(
gfile.Open(
figure_dir +
'log_test_{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
), 'wb'))
def main(argv):
del argv
if FLAGS.data_folder is None:
raise ValueError('Directory with downloaded datasets must be provided.')
if FLAGS.dataset_name == 'all_datasets':
names = ['a1a', 'w1a', 'housing']
else:
names = [FLAGS.dataset_name]
if FLAGS.output_dir is None:
raise ValueError('Output directory must be provided.')
for name in names:
x_train, x_test, y_train, y_test = set_up_data(FLAGS.data_folder, name)
train_data = Dataset(x_train, y_train)
test_data = Dataset(x_test, y_test)
gbt_params = collections.namedtuple('gbt_params', [
'regularizer_const', 'min_split_gain', 'max_depth', 'learning_rate',
'num_trees', 'early_stopping_rounds', 'loss', 'use_hessian',
'z_shrinkage_parameter'
])
params = gbt_params(
regularizer_const=FLAGS.regularizer_const,
min_split_gain=FLAGS.min_split_gain,
max_depth=FLAGS.max_depth,
learning_rate=FLAGS.learning_rate,
num_trees=FLAGS.num_trees,
early_stopping_rounds=FLAGS.early_stopping_rounds,
loss=FLAGS.loss,
use_hessian=FLAGS.use_hessian,
z_shrinkage_parameter=FLAGS.z_shrinkage_parameter)
gbt_method = GBT(params)
gbt_train_losses, gbt_test_losses = (
gbt_method.train(train_data, valid_set=test_data))
agbt_b_method = AGBTB(params)
agbt_b_train_losses, agbt_b_test_losses = (
agbt_b_method.train(train_data, valid_set=test_data))
params = params._replace(z_shrinkage_parameter=0.5)
agbt_method_1 = AGBT(params)
agbt_train_losses_1, agbt_test_losses_1 = (
agbt_method_1.train(train_data, valid_set=test_data))
params = params._replace(z_shrinkage_parameter=0.3)
agbt_method_2 = AGBT(params)
agbt_train_losses_2, agbt_test_losses_2 = (
agbt_method_2.train(train_data, valid_set=test_data))
params = params._replace(z_shrinkage_parameter=0.1)
agbt_method_3 = AGBT(params)
agbt_train_losses_3, agbt_test_losses_3 = (
agbt_method_3.train(train_data, valid_set=test_data))
output_dict = {
'gbt_train_losses': gbt_train_losses,
'gbt_test_losses': gbt_test_losses,
'agbt_b_train_losses': agbt_b_train_losses,
'agbt_b_test_losses': agbt_b_test_losses,
'agbt_train_losses_1': agbt_train_losses_1,
'agbt_test_losses_1': agbt_test_losses_1,
'agbt_train_losses_2': agbt_train_losses_2,
'agbt_test_losses_2': agbt_test_losses_2,
'agbt_train_losses_3': agbt_train_losses_3,
'agbt_test_losses_3': agbt_test_losses_3
}
save_output(output_dict, name, params)
plot_figures(output_dict, name, params)
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "df5b504f3dd22f6ada3e47645181d22d",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 100,
"avg_line_length": 35.63025210084034,
"alnum_prop": 0.6326650943396226,
"repo_name": "google-research/accelerated_gbm",
"id": "44fd4b28e5a612c1c1566992e521d1ddc59f27a6",
"size": "9146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solve_and_plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59616"
}
],
"symlink_target": ""
} |
import socket
import os
HOST = ''
PORT = 10000
servSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM);
print 'Socket created'
try:
servSock.bind((HOST, PORT))
except socket.error, msg:
print "Bind failed, error code: " + str(msg[0]) + ", message: " + msg[1]
sys.exit()
print "socket bind complete"
while 1:
data, addr = servSock.recvfrom(1024)
print "received data: " + data
os.system('/etc/reset_stp')
| {
"content_hash": "2f9af32258adc96be78d249f887fcbe9",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 76,
"avg_line_length": 19.90909090909091,
"alnum_prop": 0.6552511415525114,
"repo_name": "alxshine/stp-tree-generator",
"id": "919a2fd064b987c94e5b9e407ec3bc9a49637501",
"size": "438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_utils/python_utils/serverStop.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "37247"
},
{
"name": "C++",
"bytes": "289264"
},
{
"name": "LOLCODE",
"bytes": "2324"
},
{
"name": "Makefile",
"bytes": "1759"
},
{
"name": "Python",
"bytes": "6101"
},
{
"name": "Shell",
"bytes": "223"
},
{
"name": "TeX",
"bytes": "170238"
}
],
"symlink_target": ""
} |
from lockedash import app
from lockedash.auth.config import AuthConfig
from unittest import TestCase
from unittest.mock import patch
from lockedash.auth.app import AuthApp
from lockedash.auth.models import User
from lockedash.service import Service, Route
import os
class AuthTest(TestCase):
def setUp(self):
os.environ['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
os.environ['SECRET_KEY'] = 'TEST_SECRET_KEY'
self.app = AuthApp(AuthConfig())
class TestAuthService(AuthTest):
def test_module_load(self):
for x in app.configure(app.static_modules()):
x.name in ['AuthService']
def test_add_user(self):
user = User(name='test_user')
self.app.session.add(user)
found = self.app.session.query(User).filter_by(name=user.name).first()
self.assertTrue(found.id != None)
self.assertEqual(user.name, found.name)
class TestService(AuthTest):
def setUp(self):
super().setUp()
self.service = Service(self.app)
def test_route(self):
@Route('/')
def wrap(self, hello=None):
pass
wrap(self.service)
| {
"content_hash": "f6057551bbbe499997b54de83c951a8f",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 78,
"avg_line_length": 24,
"alnum_prop": 0.6519097222222222,
"repo_name": "andrew-plunk/lockedash",
"id": "708fefe789ea775a32c87cd398684fdf047dd2bc",
"size": "1152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lockedash/test/test_auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6652"
}
],
"symlink_target": ""
} |
"""Tests for data access and related functions in the pysat Instrument object.
Includes:
* data loading
* equality method comparisons
* index tests
* setter and getter functions
* concat
* empty data flags
* variable renaming
Note
----
Base class stored here, but tests inherited by test_instrument.py
"""
import datetime as dt
import logging
import numpy as np
import pandas as pds
import pytest
import xarray as xr
import pysat
logger = pysat.logger
class InstAccessTests(object):
"""Basic tests for `pysat.Instrument` data access.
Note
----
Inherited by classes in test_instrument.py. Setup and teardown methods are
specified there.
See Also
--------
`pysat.tests.test_instrument`
"""
def eval_successful_load(self, end_date=None):
"""Evaluate successful loading of `self.testInst`.
Parameters
----------
end_date : dt.datetime or NoneType
End date for loading data. If None, assumes self.ref_time + 1 day.
(default=None)
Note
----
Success of test is tied to `self.ref_time`.
"""
# Test that the first loaded time matches the first requested time
assert self.testInst.index[0] == self.ref_time, \
"First loaded time is incorrect"
# Test that the Instrument date is set to the requested start date
self.out = dt.datetime(self.ref_time.year, self.ref_time.month,
self.ref_time.day)
assert self.testInst.date == self.out, \
"Incorrect Instrument date attribute"
# Test that the end of the loaded data matches the requested end date
if end_date is None:
end_date = self.ref_time + dt.timedelta(days=1)
assert self.testInst.index[-1] > self.ref_time, \
"Last loaded time is not greater than the start time"
assert self.testInst.index[-1] <= end_date, \
"Last loaded time is greater than the requested end date"
return
@pytest.mark.parametrize("kwargs", [{}, {'num_samples': 30}])
def test_basic_instrument_load(self, kwargs):
"""Test that the correct day loads with input year and doy.
Parameters
----------
kwargs : dict
Dictionary of keywords and arguments to invoke when loading.
"""
# Load data by year and day of year
self.testInst.load(self.ref_time.year, self.ref_doy, **kwargs,
use_header=True)
# Test that the loaded date range is correct
self.eval_successful_load()
return
def test_basic_instrument_load_no_data(self, caplog):
"""Test Instrument load with no data for appropriate log messages."""
# Get a date that is not covered by an Instrument object.
no_data_d = self.testInst.files.files.index[0] - dt.timedelta(weeks=10)
with caplog.at_level(logging.INFO, logger='pysat'):
# Attempt to load data for a date with no data.
# Test doesn't check against loading by filename since that produces
# an error if there is no file. Loading by yr, doy no different
# than date in this case.
self.testInst.load(date=no_data_d, use_header=True)
# Confirm by checking against caplog that metadata was
# not assigned.
captured = caplog.text
assert captured.find("Metadata was not assigned as there") >= 0
# Generate string to verify proper no data message
output_str = '{platform} {name} {tag} {inst_id}'
output_str = output_str.format(platform=self.testInst.platform,
name=self.testInst.name,
tag=self.testInst.tag,
inst_id=self.testInst.inst_id)
output_str = ''.join(("No ", output_str))
# Remove any extra spaces. Follows code in _instrument.
output_str = " ".join(output_str.split())
assert captured.find(output_str) >= 0
return
def test_basic_instrument_load_two_days(self):
"""Test that the correct day loads (checking object date and data)."""
# Load the reference date
end_date = self.ref_time + dt.timedelta(days=2)
end_doy = int(end_date.strftime("%j"))
self.testInst.load(self.ref_time.year, self.ref_doy, end_date.year,
end_doy, use_header=True)
# Test that the loaded date range is correct
self.eval_successful_load(end_date=end_date)
return
def test_basic_instrument_bad_keyword_at_load(self):
"""Check for error when calling load with bad keywords."""
# Test that the correct error is raised
with pytest.raises(TypeError) as terr:
self.testInst.load(date=self.ref_time, unsupported_keyword=True,
use_header=True)
# Evaluate error message
assert str(terr).find("load() got an unexpected keyword") >= 0
return
def test_basic_instrument_load_yr_no_doy(self):
"""Ensure day of year required if year is present."""
# Check that the correct error is raised
with pytest.raises(TypeError) as err:
self.testInst.load(self.ref_time.year, use_header=True)
# Check that the error message is correct
estr = 'Unknown or incomplete input combination.'
assert str(err).find(estr) >= 0
return
@pytest.mark.parametrize('doy', [0, 367, 1000, -1, -10000])
def test_basic_instrument_load_yr_bad_doy(self, doy):
"""Ensure error raised if day of year load argument out of valid range.
Parameters
----------
doy : int
Day of year to create an error when loading.
"""
with pytest.raises(ValueError) as err:
self.testInst.load(self.ref_time.year, doy, use_header=True)
estr = 'Day of year (doy) is only valid between and '
assert str(err).find(estr) >= 0
return
@pytest.mark.parametrize('end_doy', [0, 367, 1000, -1, -10000])
def test_basic_instrument_load_yr_bad_end_doy(self, end_doy):
"""Ensure error raised if `end_doy` keyword out of valid range.
Parameters
----------
end_doy : int
Day of year to create an error when loading by end_date.
"""
with pytest.raises(ValueError) as err:
self.testInst.load(self.ref_time.year, 1, end_yr=self.ref_time.year,
end_doy=end_doy, use_header=True)
estr = 'Day of year (end_doy) is only valid between and '
assert str(err).find(estr) >= 0
return
def test_basic_instrument_load_yr_no_end_doy(self):
"""Ensure `end_doy` required if `end_yr` present."""
with pytest.raises(ValueError) as err:
self.testInst.load(self.ref_time.year, self.ref_doy,
self.ref_time.year, use_header=True)
estr = 'Both end_yr and end_doy must be set'
assert str(err).find(estr) >= 0
return
@pytest.mark.parametrize("kwargs", [{'yr': 2009, 'doy': 1,
'date': dt.datetime(2009, 1, 1)},
{'yr': 2009, 'doy': 1,
'end_date': dt.datetime(2009, 1, 1)},
{'yr': 2009, 'doy': 1,
'fname': 'dummy_str.nofile'},
{'yr': 2009, 'doy': 1,
'stop_fname': 'dummy_str.nofile'},
{'date': dt.datetime(2009, 1, 1),
'fname': 'dummy_str.nofile'},
{'date': dt.datetime(2009, 1, 1),
'stop_fname': 'dummy_str.nofile'},
{'date': dt.datetime(2009, 1, 1),
'fname': 'dummy_str.nofile',
'end_yr': 2009, 'end_doy': 1}])
def test_basic_instrument_load_mixed_inputs(self, kwargs):
"""Ensure mixed load inputs raise ValueError.
Parameters
----------
kwargs : dict
Dictionary of keywords and arguments to produce an error when
instrument is loaded.
"""
with pytest.raises(ValueError) as err:
self.testInst.load(use_header=True, **kwargs)
estr = 'An inconsistent set of inputs have been'
assert str(err).find(estr) >= 0
return
def test_basic_instrument_load_no_input(self):
"""Test that `.load()` loads all data."""
self.testInst.load(use_header=True)
assert (self.testInst.index[0] == self.testInst.files.start_date)
assert (self.testInst.index[-1] >= self.testInst.files.stop_date)
assert (self.testInst.index[-1] <= self.testInst.files.stop_date
+ dt.timedelta(days=1))
return
@pytest.mark.parametrize('load_in,verr',
[('fname', 'have multi_file_day and load by file'),
(None, 'is not supported with multi_file_day')])
def test_instrument_load_errors_with_multifile(self, load_in, verr):
"""Ensure improper use of load with `multi_file_day` raises ValueError.
Parameters
----------
load_in : str or NoneType
If 'fname', load by filename. If None, load without kwargs.
verr : str
Text that should be contained in the error message generated by
the improper load configuration above.
"""
self.testInst.multi_file_day = True
if load_in == 'fname':
load_kwargs = {load_in: self.testInst.files[0]}
else:
load_kwargs = dict()
with pytest.raises(ValueError) as err:
self.testInst.load(use_header=True, **load_kwargs)
assert str(err).find(verr) >= 0
return
def test_basic_instrument_load_by_date(self):
"""Test loading by date."""
self.testInst.load(date=self.ref_time, use_header=True)
self.eval_successful_load()
return
def test_basic_instrument_load_by_dates(self):
"""Test date range loading, `date` and `end_date`."""
end_date = self.ref_time + dt.timedelta(days=2)
self.testInst.load(date=self.ref_time, end_date=end_date,
use_header=True)
self.eval_successful_load(end_date=end_date)
return
def test_basic_instrument_load_by_date_with_extra_time(self):
"""Ensure `.load(date=date)` only uses date portion of datetime."""
# Put in a date that has more than year, month, day
self.testInst.load(date=(self.ref_time + dt.timedelta(minutes=71)),
use_header=True)
self.eval_successful_load()
return
def test_basic_instrument_load_data(self):
"""Test that correct day loads (checking down to the sec)."""
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
self.eval_successful_load()
return
def test_basic_instrument_load_leap_year(self):
"""Test if the correct day is being loaded (Leap-Year)."""
self.ref_time = dt.datetime(2008, 12, 31)
self.ref_doy = 366
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
self.eval_successful_load()
return
@pytest.mark.parametrize("operator,ref_time",
[('next', dt.datetime(2008, 1, 1)),
('prev', dt.datetime(2010, 12, 31))])
def test_file_load_default(self, operator, ref_time):
"""Test if correct day loads by default when first invoking iteration.
Parameters
----------
operator : str
Name of iterator to use.
ref_time : dt.datetime
Expected date to load when iteration is first invoked.
"""
getattr(self.testInst, operator)()
# Modify ref time since iterator changes load date.
self.ref_time = ref_time
self.eval_successful_load()
return
@pytest.mark.parametrize("operator", [('next'), ('prev')])
def test_file_load_bad_start_file(self, operator):
"""Test Error when starting iteration on a file not in iteration list.
Parameters
----------
operator : str
Name of iterator to use.
"""
self.testInst.load(fname=self.testInst.files[12], use_header=True)
# Set new bounds that do not include this date.
self.testInst.bounds = (self.testInst.files[9], self.testInst.files[20],
2, 1)
with pytest.raises(StopIteration) as err:
getattr(self.testInst, operator)()
estr = 'Unable to find loaded filename '
assert str(err).find(estr) >= 0
return
@pytest.mark.parametrize("operator", [('next'), ('prev')])
def test_file_load_bad_start_date(self, operator):
"""Test that day iterators raise Error on bad start date.
Parameters
----------
operator : str
Name of iterator to use.
"""
self.testInst.load(date=self.ref_time, use_header=True)
# Set new bounds that do not include this date.
self.testInst.bounds = (self.ref_time + dt.timedelta(days=1),
self.ref_time + dt.timedelta(days=10),
'2D', dt.timedelta(days=1))
with pytest.raises(StopIteration) as err:
getattr(self.testInst, operator)()
estr = 'Unable to find loaded date '
assert str(err).find(estr) >= 0
return
def test_basic_fname_instrument_load(self):
"""Test loading by filename from attached `.files`."""
# If mangle_file_date is true, index will not match exactly.
# Find the closest point instead.
ind = np.argmin(abs(self.testInst.files.files.index - self.ref_time))
self.testInst.load(fname=self.testInst.files[ind], use_header=True)
self.eval_successful_load()
return
@pytest.mark.parametrize("operator,direction",
[('next', 1),
('prev', -1)])
def test_fname_load_default(self, operator, direction):
"""Test correct day loads when moving by day, starting with `fname`.
Parameters
----------
operator : str
Name of iterator to use.
direction : int
Positive if moving forward, negative if moving backward.
"""
# If mangle_file_date is true, index will not match exactly.
# Find the closest point.
ind = np.argmin(abs(self.testInst.files.files.index - self.ref_time))
self.testInst.load(fname=self.testInst.files[ind], use_header=True)
getattr(self.testInst, operator)()
# Modify ref time since iterator changes load date.
self.ref_time = self.ref_time + direction * dt.timedelta(days=1)
self.eval_successful_load()
return
def test_filename_load(self):
"""Test if file is loadable by filename with no path."""
self.testInst.load(fname=self.ref_time.strftime('%Y-%m-%d.nofile'),
use_header=True)
self.eval_successful_load()
return
def test_filenames_load(self):
"""Test if files are loadable by filename range."""
stop_fname = self.ref_time + dt.timedelta(days=1)
stop_fname = stop_fname.strftime('%Y-%m-%d.nofile')
self.testInst.load(fname=self.ref_time.strftime('%Y-%m-%d.nofile'),
stop_fname=stop_fname, use_header=True)
assert self.testInst.index[0] == self.ref_time
assert self.testInst.index[-1] >= self.ref_time + dt.timedelta(days=1)
assert self.testInst.index[-1] <= self.ref_time + dt.timedelta(days=2)
return
def test_filenames_load_out_of_order(self):
"""Test error raised if fnames out of temporal order."""
stop_fname = self.ref_time + dt.timedelta(days=1)
stop_fname = stop_fname.strftime('%Y-%m-%d.nofile')
with pytest.raises(ValueError) as err:
check_fname = self.ref_time.strftime('%Y-%m-%d.nofile')
self.testInst.load(fname=stop_fname,
stop_fname=check_fname, use_header=True)
estr = '`stop_fname` must occur at a later date '
assert str(err).find(estr) >= 0
return
def test_eq_no_data(self):
"""Test equality when the same object."""
inst_copy = self.testInst.copy()
assert inst_copy == self.testInst
return
def test_eq_both_with_data(self):
"""Test equality when the same object with loaded data."""
self.testInst.load(date=self.ref_time, use_header=True)
inst_copy = self.testInst.copy()
assert inst_copy == self.testInst
return
def test_eq_one_with_data(self):
"""Test equality when the same objects but only one with loaded data."""
self.testInst.load(date=self.ref_time, use_header=True)
inst_copy = self.testInst.copy()
inst_copy.data = self.testInst._null_data
assert inst_copy != self.testInst
return
def test_eq_different_data_type(self):
"""Test equality different data type."""
self.testInst.load(date=self.ref_time, use_header=True)
inst_copy = self.testInst.copy()
if self.testInst.pandas_format:
inst_copy.pandas_format = False
inst_copy.data = xr.Dataset()
else:
inst_copy.pandas_format = True
inst_copy.data = pds.DataFrame()
assert inst_copy != self.testInst
return
def test_eq_different_type(self):
"""Test equality False when non-Instrument object."""
assert self.testInst != np.array([])
return
def test_inequality_modified_object(self):
"""Test that equality is false if other missing attributes."""
self.out = self.testInst.copy()
# Remove attribute
del self.out.platform
assert self.testInst != self.out
return
def test_inequality_reduced_object(self):
"""Test that equality is false if self missing attributes."""
self.out = self.testInst.copy()
self.out.hi_there = 'hi'
assert self.testInst != self.out
return
@pytest.mark.parametrize("prepend, sort_dim_toggle",
[(True, True), (True, False), (False, False)])
def test_concat_data(self, prepend, sort_dim_toggle):
"""Test `pysat.Instrument.data` concatenation.
Parameters
----------
prepend : bool
Behavior of `concat_data`. If True, assign new data before existing
data; if False append new data.
sort_dim_toggle : bool
If True, sort variable names in pandas before concatenation. If
False, do not sort for pandas objects. For xarray objects, rename
the epoch if True.
"""
# Load a data set to concatonate
self.testInst.load(self.ref_time.year, self.ref_doy + 1,
use_header=True)
data2 = self.testInst.data
len2 = len(self.testInst.index)
# Load a different data set into the instrument
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
len1 = len(self.testInst.index)
# Set the keyword arguments
kwargs = {'prepend': prepend}
if sort_dim_toggle:
if self.testInst.pandas_format:
kwargs['sort'] = True
else:
kwargs['dim'] = 'Epoch2'
data2 = data2.rename({self.xarray_epoch_name: 'Epoch2'})
self.testInst.data = self.testInst.data.rename(
{self.xarray_epoch_name: 'Epoch2'})
# Concat together
self.testInst.concat_data(data2, **kwargs)
if sort_dim_toggle and not self.testInst.pandas_format:
# Rename to the standard epoch name
self.testInst.data = self.testInst.data.rename(
{'Epoch2': self.xarray_epoch_name})
# Basic test for concatenation
self.out = len(self.testInst.index)
assert (self.out == len1 + len2)
# Detailed test for concatenation through index
if prepend:
assert np.all(self.testInst.index[:len1]
> self.testInst.index[len1:])
else:
assert np.all(self.testInst.index[:len1]
< self.testInst.index[len1:])
if self.testInst.pandas_format:
if sort_dim_toggle:
assert np.all(self.testInst.data.columns
== np.sort(data2.columns))
else:
assert np.all(self.testInst.data.columns == data2.columns)
return
def test_empty_flag_data_empty(self):
"""Test the status of the empty flag for unloaded data."""
assert self.testInst.empty
return
def test_empty_flag_data_not_empty(self):
"""Test the status of the empty flag for loaded data."""
self.testInst.load(date=self.ref_time, use_header=True)
assert not self.testInst.empty
return
def test_index_attribute(self):
"""Test the index attribute before and after loading data."""
# Test that an index is present, even with an empty Instrument
assert isinstance(self.testInst.index, pds.Index)
# Test an index is present with data loaded in an Instrument
self.testInst.load(date=self.ref_time, use_header=True)
assert isinstance(self.testInst.index, pds.Index)
return
def test_index_return(self):
"""Test that the index is returned in the proper format."""
# Load data
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
# Ensure we get the index back
if self.testInst.pandas_format:
assert np.all(self.testInst.index == self.testInst.data.index)
else:
assert np.all(self.testInst.index
== self.testInst.data.indexes[self.xarray_epoch_name])
return
@pytest.mark.parametrize("labels", [('mlt'),
(['mlt', 'longitude']),
(['longitude', 'mlt'])])
def test_basic_data_access_by_name(self, labels):
"""Check that data can be accessed by name at the instrument level.
Parameters
----------
labels : list of str
List of variable names to access.
"""
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
assert np.all((self.testInst[labels]
== self.testInst.data[labels]).values)
return
@pytest.mark.parametrize("index", [(0),
([0, 1, 2, 3]),
(slice(0, 10)),
(np.arange(0, 10))])
def test_data_access_by_indices_and_name(self, index):
"""Check that variables can be accessed by each supported index type.
Parameters
----------
index : int, list, slice, or np.array
Indices to retrieve data.
"""
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
assert np.all(self.testInst[index, 'mlt']
== self.testInst.data['mlt'][index])
return
def test_data_access_by_row_slicing_and_name_slicing(self):
"""Check that each variable is downsampled."""
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
result = self.testInst[0:10, :]
for variable, array in result.items():
assert len(array) == len(self.testInst.data[variable].values[0:10])
assert np.all(array == self.testInst.data[variable].values[0:10])
return
def test_data_access_by_datetime_and_name(self):
"""Check that datetime can be used to access data."""
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
self.out = dt.datetime(2009, 1, 1, 0, 0, 0)
assert np.all(self.testInst[self.out, 'uts']
== self.testInst.data['uts'].values[0])
return
def test_data_access_by_datetime_slicing_and_name(self):
"""Check that a slice of datetimes can be used to access data."""
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
time_step = (self.testInst.index[1]
- self.testInst.index[0]).value / 1.E9
offset = dt.timedelta(seconds=(10 * time_step))
start = dt.datetime(2009, 1, 1, 0, 0, 0)
stop = start + offset
assert np.all(self.testInst[start:stop, 'uts']
== self.testInst.data['uts'].values[0:11])
return
def test_setting_data_by_name(self):
"""Test setting data by name."""
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
self.testInst['doubleMLT'] = 2. * self.testInst['mlt']
assert np.all(self.testInst['doubleMLT'] == 2. * self.testInst['mlt'])
return
def test_setting_series_data_by_name(self):
"""Test setting series data by name."""
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
self.testInst['doubleMLT'] = \
2. * pds.Series(self.testInst['mlt'].values,
index=self.testInst.index)
assert np.all(self.testInst['doubleMLT'] == 2. * self.testInst['mlt'])
self.testInst['blankMLT'] = pds.Series(None, dtype='float64')
assert np.all(np.isnan(self.testInst['blankMLT']))
return
def test_setting_pandas_dataframe_by_names(self):
"""Test setting pandas dataframe by name."""
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
self.testInst[['doubleMLT', 'tripleMLT']] = \
pds.DataFrame({'doubleMLT': 2. * self.testInst['mlt'].values,
'tripleMLT': 3. * self.testInst['mlt'].values},
index=self.testInst.index)
assert np.all(self.testInst['doubleMLT'] == 2. * self.testInst['mlt'])
assert np.all(self.testInst['tripleMLT'] == 3. * self.testInst['mlt'])
return
def test_setting_data_by_name_single_element(self):
"""Test setting data by name for a single element."""
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
self.testInst['doubleMLT'] = 2.
assert np.all(self.testInst['doubleMLT'] == 2.)
self.testInst['nanMLT'] = np.nan
assert np.all(np.isnan(self.testInst['nanMLT']))
return
def test_setting_data_by_name_with_meta(self):
"""Test setting data by name with meta."""
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
self.testInst['doubleMLT'] = {'data': 2. * self.testInst['mlt'],
'units': 'hours',
'long_name': 'double trouble'}
assert np.all(self.testInst['doubleMLT'] == 2. * self.testInst['mlt'])
assert self.testInst.meta['doubleMLT'].units == 'hours'
assert self.testInst.meta['doubleMLT'].long_name == 'double trouble'
return
def test_setting_partial_data(self):
"""Test setting partial data by index."""
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
self.out = self.testInst
if self.testInst.pandas_format:
self.testInst[0:3] = 0
# First three values should be changed.
assert np.all(self.testInst[0:3] == 0)
# Other data should be unchanged.
assert np.all(self.testInst[3:] == self.out[3:])
else:
pytest.skip("This notation does not make sense for xarray")
return
@pytest.mark.parametrize("changed,fixed",
[(0, slice(1, None)),
([0, 1, 2, 3], slice(4, None)),
(slice(0, 10), slice(10, None)),
(np.array([0, 1, 2, 3]), slice(4, None)),
(dt.datetime(2009, 1, 1), slice(1, None)),
(slice(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 1, 0, 1)),
slice(dt.datetime(2009, 1, 1, 0, 1), None))])
def test_setting_partial_data_by_inputs(self, changed, fixed):
"""Check that data can be set using each supported index type.
Parameters
----------
changed : index-like parameters
Index of values that change during the test.
fixed : index-like parameters
Index of values that should remain the same during the test.
"""
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
self.testInst['doubleMLT'] = 2. * self.testInst['mlt']
self.testInst[changed, 'doubleMLT'] = 0
assert (self.testInst[fixed, 'doubleMLT']
== 2. * self.testInst[fixed, 'mlt']).all
assert (self.testInst[changed, 'doubleMLT'] == 0).all
return
def test_modifying_data_inplace(self):
"""Test modification of data inplace."""
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
self.testInst['doubleMLT'] = 2. * self.testInst['mlt']
self.testInst['doubleMLT'] += 100
assert (self.testInst['doubleMLT']
== 2. * self.testInst['mlt'] + 100).all
return
@pytest.mark.parametrize("index", [([0, 1, 2, 3, 4]),
(np.array([0, 1, 2, 3, 4]))])
def test_getting_all_data_by_index(self, index):
"""Test getting all data by index.
Parameters
----------
index : index-like parameters
Index of values to retrieve.
"""
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
inst_subset = self.testInst[index]
if self.testInst.pandas_format:
assert len(inst_subset) == len(index)
else:
assert inst_subset.sizes[self.xarray_epoch_name] == len(index)
return
@pytest.mark.parametrize("values", [
{'help': 'I need somebody'}, {'UTS': 'litte_uts', 'mlt': 'big_mlt'},
{'utS': 'uts1', 'help': {'me': 'do', 'things': 'well'}}])
def test_unknown_variable_error_renaming(self, values):
"""Test that unknown variable renaming raises a logger warning.
Parameters
----------
values : dict
Variables to be renamed. A dict where each key is the current
variable and its value is the new variable name.
"""
# Check for error for unknown variable name
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
# Capture the ValueError and message
with pytest.raises(ValueError) as verr:
self.testInst.rename(values)
# Evaluate the error message text
assert str(verr).find("cannot rename") >= 0
return
@pytest.mark.parametrize("lowercase", [True, False])
@pytest.mark.parametrize("mapper", [{'uts': 'UTS1'},
{'uts': 'UTs2', 'mlt': 'Mlt2'},
{'uts': 'Long Change with spaces'},
str.upper])
def test_basic_variable_renaming(self, lowercase, mapper):
"""Test new variable names are converted as desired in meta and data.
Parameters
----------
lowercase : bool
Instrument variables will be lowercase if True, as mapped if False
mapper : dict or func
Variables to be renamed. A dict where each key is the current
variable and its value is the new variable name.
"""
# Initialize the testing dict
if isinstance(mapper, dict):
values = mapper
else:
values = {var: mapper(var) for var in self.testInst.variables}
# Test single variable
self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)
self.testInst.rename(mapper, lowercase_data_labels=lowercase)
for key in values:
# Check for new name in the data and metadata
inst_var = values[key].lower() if lowercase else values[key]
assert inst_var in self.testInst.variables
assert values[key] in self.testInst.meta.keys()
# Ensure old name not present
assert key not in self.testInst.variables
assert key not in self.testInst.meta.keys()
return
@pytest.mark.parametrize("mapper", [
{'profiles': {'density': 'ionization'}},
{'profiles': {'density': 'mass'},
'alt_profiles': {'density': 'volume'}},
str.upper])
def test_ho_pandas_variable_renaming(self, mapper):
"""Test rename of higher order pandas variable.
Parameters
----------
mapper : dict or function
A function or dict that maps how the variables will be renamed.
"""
# TODO(#789): Remove when meta children support is dropped.
# Initialize the testing dict
if isinstance(mapper, dict):
values = mapper
else:
values = {var: mapper(var) for var in self.testInst.variables}
# Check for pysat_testing2d instrument
if self.testInst.platform == 'pysat':
if self.testInst.name == 'testing2d':
self.testInst.load(self.ref_time.year, self.ref_doy,
use_header=True)
self.testInst.rename(mapper)
for key in values:
for ikey in values[key]:
# Check column name unchanged
assert key in self.testInst.data
assert key in self.testInst.meta
# Check for new name in HO data
check_var = self.testInst.meta[key]['children']
if isinstance(values[key], dict):
map_val = values[key][ikey]
else:
map_val = mapper(ikey)
assert map_val in self.testInst[0, key]
assert map_val in check_var
# Ensure old name not present
assert ikey not in self.testInst[0, key]
if map_val.lower() != ikey:
assert ikey not in check_var
return
@pytest.mark.parametrize("values", [{'profiles':
{'help': 'I need somebody'}},
{'fake_profi':
{'help': 'Not just anybody'}},
{'wrong_profile':
{'help': 'You know I need someone'},
'fake_profiles':
{'Beatles': 'help!'},
'profiles':
{'density': 'valid_change'}},
{'fake_profile':
{'density': 'valid HO change'}},
{'Nope_profiles':
{'density': 'valid_HO_change'}}])
def test_ho_pandas_unknown_variable_error_renaming(self, values):
"""Test higher order pandas variable rename raises error if unknown.
Parameters
----------
values : dict
Variables to be renamed. A dict where each key is the current
variable and its value is the new variable name.
"""
# TODO(#789): Remove when meta children support is dropped.
# Check for pysat_testing2d instrument
if self.testInst.platform == 'pysat':
if self.testInst.name == 'testing2d':
self.testInst.load(self.ref_time.year, self.ref_doy,
use_header=True)
# Check for error for unknown column or HO variable name
with pytest.raises(ValueError) as verr:
self.testInst.rename(values)
assert str(verr).find("cannot rename") >= 0
else:
pytest.skip("Not implemented for this instrument")
return
@pytest.mark.parametrize("values", [{'profiles': {'density': 'Ionization'}},
{'profiles': {'density': 'MASa'},
'alt_profiles':
{'density': 'VoLuMe'}}])
def test_ho_pandas_variable_renaming_lowercase(self, values):
"""Test rename higher order pandas variable uses lowercase.
Parameters
----------
values : dict
Variables to be renamed. A dict where each key is the current
variable and its value is the new variable name.
"""
# TODO(#789): Remove when meta children support is dropped.
# Check for pysat_testing2d instrument
if self.testInst.platform == 'pysat':
if self.testInst.name == 'testing2d':
self.testInst.load(self.ref_time.year, self.ref_doy,
use_header=True)
self.testInst.rename(values)
for key in values:
for ikey in values[key]:
# Check column name unchanged
assert key in self.testInst.data
assert key in self.testInst.meta
# Check for new name in HO data
test_val = values[key][ikey]
assert test_val in self.testInst[0, key]
check_var = self.testInst.meta[key]['children']
# Case insensitive check
assert values[key][ikey] in check_var
# Ensure new case in there
check_var = check_var[values[key][ikey]].name
assert values[key][ikey] == check_var
# Ensure old name not present
assert ikey not in self.testInst[0, key]
check_var = self.testInst.meta[key]['children']
assert ikey not in check_var
return
| {
"content_hash": "329148a3e72dbc013e1c0188212ee718",
"timestamp": "",
"source": "github",
"line_count": 1030,
"max_line_length": 80,
"avg_line_length": 38.23300970873787,
"alnum_prop": 0.5518537328593195,
"repo_name": "rstoneback/pysat",
"id": "93f784c3b0e073a4be3897a193da87543f5b2cb6",
"size": "39380",
"binary": false,
"copies": "1",
"ref": "refs/heads/nomerge/windows_test",
"path": "pysat/tests/classes/cls_instrument_access.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1126202"
}
],
"symlink_target": ""
} |
import pygame
class GameObject(pygame.sprite.Sprite):
def __init__(self, image, parent, pos=(0,0), vel=(0,0)):
pygame.sprite.Sprite.__init__(self)
self.image = image
self.rect = image.get_rect()
self.rect.center = int(pos[0]), int(pos[1])
self.position = pos
self.velocity = vel
self.parent = parent
self.acceleration = (0,0)
def move(self, delta):
self.position = self.position[0]+delta[0], self.position[1]+delta[1]
self.rect.center = int(self.position[0]), int(self.position[1])
def moveTo(self, pos):
self.position = pos
self.rect.center = int(pos[0]), int(pos[1])
def draw(self, surf):
surf.blit(self.image, self.rect)
def testMaskCollision(self, spriteGroup):
if spriteGroup:
return pygame.sprite.spritecollide(self, spriteGroup,
0, pygame.sprite.collide_mask)
else:
return []
def update(self, *args):
gameTime, frameTime = args[:2]
self.velocity = (frameTime*self.acceleration[0]+self.velocity[0],
frameTime*self.acceleration[1]+self.velocity[1])
self.position = (frameTime*self.velocity[0]+self.position[0],
frameTime*self.velocity[1]+self.position[1])
self.moveTo(self.position)
| {
"content_hash": "932a568587907fe85e75d007785f23e9",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 76,
"avg_line_length": 33.853658536585364,
"alnum_prop": 0.5770893371757925,
"repo_name": "jceipek/Mind-Rush",
"id": "2b134632d77824250f52b630cef0928467707dc9",
"size": "1544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gameObjects/gameObject.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "15951"
},
{
"name": "Matlab",
"bytes": "266"
},
{
"name": "Objective-C",
"bytes": "2766"
},
{
"name": "Python",
"bytes": "86763"
}
],
"symlink_target": ""
} |
'''SMTP/ESMTP client class.
This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP
Authentication) and RFC 2487 (Secure SMTP over TLS).
Notes:
Please remember, when doing ESMTP, that the names of the SMTP service
extensions are NOT the same thing as the option keywords for the RCPT
and MAIL commands!
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> print s.help()
This is Sendmail version 8.8.4
Topics:
HELO EHLO MAIL RCPT DATA
RSET NOOP QUIT HELP VRFY
EXPN VERB ETRN DSN
For more info use "HELP <topic>".
To report bugs in the implementation send email to
sendmail-bugs@sendmail.org.
For local information send email to Postmaster at your site.
End of HELP info
>>> s.putcmd("vrfy","someone@here")
>>> s.getreply()
(250, "Somebody OverHere <somebody@here.my.org>")
>>> s.quit()
'''
# Author: The Dragon De Monsyne <dragondm@integral.org>
# ESMTP support, test code and doc fixes added by
# Eric S. Raymond <esr@thyrsus.com>
# Better RFC 821 compliance (MAIL and RCPT, and CRLF in data)
# by Carey Evans <c.evans@clear.net.nz>, for picky mail servers.
# RFC 2554 (authentication) support by Gerhard Haering <gerhard@bigfoot.de>.
#
# This was modified from the Python 1.5 library HTTP lib.
import socket
import re
import email.utils
import base64
import hmac
from email.base64mime import encode as encode_base64
from sys import stderr
__all__ = ["SMTPException", "SMTPServerDisconnected", "SMTPResponseException",
"SMTPSenderRefused", "SMTPRecipientsRefused", "SMTPDataError",
"SMTPConnectError", "SMTPHeloError", "SMTPAuthenticationError",
"quoteaddr", "quotedata", "SMTP"]
SMTP_PORT = 25
SMTP_SSL_PORT = 465
CRLF = "\r\n"
OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I)
# Exception classes used by this module.
class SMTPException(Exception):
"""Base class for all exceptions raised by this module."""
class SMTPServerDisconnected(SMTPException):
"""Not connected to any SMTP server.
This exception is raised when the server unexpectedly disconnects,
or when an attempt is made to use the SMTP instance before
connecting it to a server.
"""
class SMTPResponseException(SMTPException):
"""Base class for all exceptions that include an SMTP error code.
These exceptions are generated in some instances when the SMTP
server returns an error code. The error code is stored in the
`smtp_code' attribute of the error, and the `smtp_error' attribute
is set to the error message.
"""
def __init__(self, code, msg):
self.smtp_code = code
self.smtp_error = msg
self.args = (code, msg)
class SMTPSenderRefused(SMTPResponseException):
"""Sender address refused.
In addition to the attributes set by on all SMTPResponseException
exceptions, this sets `sender' to the string that the SMTP refused.
"""
def __init__(self, code, msg, sender):
self.smtp_code = code
self.smtp_error = msg
self.sender = sender
self.args = (code, msg, sender)
class SMTPRecipientsRefused(SMTPException):
"""All recipient addresses refused.
The errors for each recipient are accessible through the attribute
'recipients', which is a dictionary of exactly the same sort as
SMTP.sendmail() returns.
"""
def __init__(self, recipients):
self.recipients = recipients
self.args = (recipients,)
class SMTPDataError(SMTPResponseException):
"""The SMTP server didn't accept the data."""
class SMTPConnectError(SMTPResponseException):
"""Error during connection establishment."""
class SMTPHeloError(SMTPResponseException):
"""The server refused our HELO reply."""
class SMTPAuthenticationError(SMTPResponseException):
"""Authentication error.
Most probably the server didn't accept the username/password
combination provided.
"""
def quoteaddr(addr):
"""Quote a subset of the email addresses defined by RFC 821.
Should be able to handle anything rfc822.parseaddr can handle.
"""
m = (None, None)
try:
m = email.utils.parseaddr(addr)[1]
except AttributeError:
pass
if m == (None, None): # Indicates parse failure or AttributeError
# something weird here.. punt -ddm
return "<%s>" % addr
elif m is None:
# the sender wants an empty return address
return "<>"
else:
return "<%s>" % m
def quotedata(data):
"""Quote data for email.
Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
Internet CRLF end-of-line.
"""
return re.sub(r'(?m)^\.', '..',
re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
try:
import ssl
except ImportError:
_have_ssl = False
else:
class SSLFakeFile:
"""A fake file like object that really wraps a SSLObject.
It only supports what is needed in smtplib.
"""
def __init__(self, sslobj):
self.sslobj = sslobj
def readline(self):
str = ""
chr = None
while chr != "\n":
chr = self.sslobj.read(1)
if not chr:
break
str += chr
return str
def close(self):
pass
_have_ssl = True
class SMTP:
"""This class manages a connection to an SMTP or ESMTP server.
SMTP Objects:
SMTP objects have the following attributes:
helo_resp
This is the message given by the server in response to the
most recent HELO command.
ehlo_resp
This is the message given by the server in response to the
most recent EHLO command. This is usually multiline.
does_esmtp
This is a True value _after you do an EHLO command_, if the
server supports ESMTP.
esmtp_features
This is a dictionary, which, if the server supports ESMTP,
will _after you do an EHLO command_, contain the names of the
SMTP service extensions this server supports, and their
parameters (if any).
Note, all extension names are mapped to lower case in the
dictionary.
See each method's docstrings for details. In general, there is a
method of the same name to perform each SMTP command. There is also a
method called 'sendmail' that will do an entire mail transaction.
"""
debuglevel = 0
file = None
helo_resp = None
ehlo_msg = "ehlo"
ehlo_resp = None
does_esmtp = 0
default_port = SMTP_PORT
def __init__(self, host='', port=0, local_hostname=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Initialize a new instance.
If specified, `host' is the name of the remote host to which to
connect. If specified, `port' specifies the port to which to connect.
By default, smtplib.SMTP_PORT is used. An SMTPConnectError is raised
if the specified `host' doesn't respond correctly. If specified,
`local_hostname` is used as the FQDN of the local host. By default,
the local hostname is found using socket.getfqdn().
"""
self.timeout = timeout
self.esmtp_features = {}
if host:
(code, msg) = self.connect(host, port)
if code != 220:
raise SMTPConnectError(code, msg)
if local_hostname is not None:
self.local_hostname = local_hostname
else:
# RFC 2821 says we should use the fqdn in the EHLO/HELO verb, and
# if that can't be calculated, that we should use a domain literal
# instead (essentially an encoded IP address like [A.B.C.D]).
fqdn = socket.getfqdn()
if '.' in fqdn:
self.local_hostname = fqdn
else:
# We can't find an fqdn hostname, so use a domain literal
addr = '127.0.0.1'
try:
addr = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
pass
self.local_hostname = '[%s]' % addr
def set_debuglevel(self, debuglevel):
"""Set the debug output level.
A non-false value results in debug messages for connection and for all
messages sent to and received from the server.
"""
self.debuglevel = debuglevel
def _get_socket(self, port, host, timeout):
# This makes it simpler for SMTP_SSL to use the SMTP connect code
# and just alter the socket connection bit.
if self.debuglevel > 0:
print>>stderr, 'connect:', (host, port)
return socket.create_connection((port, host), timeout)
def connect(self, host='localhost', port=0):
"""Connect to a host on a given port.
If the hostname ends with a colon (`:') followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host is
specified during instantiation.
"""
if not port and (host.find(':') == host.rfind(':')):
i = host.rfind(':')
if i >= 0:
host, port = host[:i], host[i + 1:]
try:
port = int(port)
except ValueError:
raise socket.error, "nonnumeric port"
if not port:
port = self.default_port
if self.debuglevel > 0:
print>>stderr, 'connect:', (host, port)
self.sock = self._get_socket(host, port, self.timeout)
(code, msg) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "connect:", msg
return (code, msg)
def send(self, str):
"""Send `str' to the server."""
if self.debuglevel > 0:
print>>stderr, 'send:', repr(str)
if hasattr(self, 'sock') and self.sock:
try:
self.sock.sendall(str)
except socket.error:
self.close()
raise SMTPServerDisconnected('Server not connected')
else:
raise SMTPServerDisconnected('please run connect() first')
def putcmd(self, cmd, args=""):
"""Send a command to the server."""
if args == "":
str = '%s%s' % (cmd, CRLF)
else:
str = '%s %s%s' % (cmd, args, CRLF)
self.send(str)
def getreply(self):
"""Get a reply from the server.
Returns a tuple consisting of:
- server response code (e.g. '250', or such, if all goes well)
Note: returns -1 if it can't read response code.
- server response string corresponding to response code (multiline
responses are converted to a single, multiline string).
Raises SMTPServerDisconnected if end-of-file is reached.
"""
resp = []
if self.file is None:
self.file = self.sock.makefile('rb')
while 1:
try:
line = self.file.readline()
except socket.error:
line = ''
if line == '':
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed")
if self.debuglevel > 0:
print>>stderr, 'reply:', repr(line)
resp.append(line[4:].strip())
code = line[:3]
# Check that the error code is syntactically correct.
# Don't attempt to read a continuation line if it is broken.
try:
errcode = int(code)
except ValueError:
errcode = -1
break
# Check if multiline response.
if line[3:4] != "-":
break
errmsg = "\n".join(resp)
if self.debuglevel > 0:
print>>stderr, 'reply: retcode (%s); Msg: %s' % (errcode, errmsg)
return errcode, errmsg
def docmd(self, cmd, args=""):
"""Send a command, and return its response code."""
self.putcmd(cmd, args)
return self.getreply()
# std smtp commands
def helo(self, name=''):
"""SMTP 'helo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.putcmd("helo", name or self.local_hostname)
(code, msg) = self.getreply()
self.helo_resp = msg
return (code, msg)
def ehlo(self, name=''):
""" SMTP 'ehlo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.esmtp_features = {}
self.putcmd(self.ehlo_msg, name or self.local_hostname)
(code, msg) = self.getreply()
# According to RFC1869 some (badly written)
# MTA's will disconnect on an ehlo. Toss an exception if
# that happens -ddm
if code == -1 and len(msg) == 0:
self.close()
raise SMTPServerDisconnected("Server not connected")
self.ehlo_resp = msg
if code != 250:
return (code, msg)
self.does_esmtp = 1
#parse the ehlo response -ddm
resp = self.ehlo_resp.split('\n')
del resp[0]
for each in resp:
# To be able to communicate with as many SMTP servers as possible,
# we have to take the old-style auth advertisement into account,
# because:
# 1) Else our SMTP feature parser gets confused.
# 2) There are some servers that only advertise the auth methods we
# support using the old style.
auth_match = OLDSTYLE_AUTH.match(each)
if auth_match:
# This doesn't remove duplicates, but that's no problem
self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") \
+ " " + auth_match.groups(0)[0]
continue
# RFC 1869 requires a space between ehlo keyword and parameters.
# It's actually stricter, in that only spaces are allowed between
# parameters, but were not going to check for that here. Note
# that the space isn't present if there are no parameters.
m = re.match(r'(?P<feature>[A-Za-z0-9][A-Za-z0-9\-]*) ?', each)
if m:
feature = m.group("feature").lower()
params = m.string[m.end("feature"):].strip()
if feature == "auth":
self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \
+ " " + params
else:
self.esmtp_features[feature] = params
return (code, msg)
def has_extn(self, opt):
"""Does the server support a given SMTP service extension?"""
return opt.lower() in self.esmtp_features
def help(self, args=''):
"""SMTP 'help' command.
Returns help text from server."""
self.putcmd("help", args)
return self.getreply()[1]
def rset(self):
"""SMTP 'rset' command -- resets session."""
return self.docmd("rset")
def noop(self):
"""SMTP 'noop' command -- doesn't do anything :>"""
return self.docmd("noop")
def mail(self, sender, options=[]):
"""SMTP 'mail' command -- begins mail xfer session."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd("mail", "FROM:%s%s" % (quoteaddr(sender), optionlist))
return self.getreply()
def rcpt(self, recip, options=[]):
"""SMTP 'rcpt' command -- indicates 1 recipient for this mail."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd("rcpt", "TO:%s%s" % (quoteaddr(recip), optionlist))
return self.getreply()
def data(self, msg):
"""SMTP 'DATA' command -- sends message data to server.
Automatically quotes lines beginning with a period per rfc821.
Raises SMTPDataError if there is an unexpected reply to the
DATA command; the return value from this method is the final
response code received when the all data is sent.
"""
self.putcmd("data")
(code, repl) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "data:", (code, repl)
if code != 354:
raise SMTPDataError(code, repl)
else:
q = quotedata(msg)
if q[-2:] != CRLF:
q = q + CRLF
q = q + "." + CRLF
self.send(q)
(code, msg) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "data:", (code, msg)
return (code, msg)
def verify(self, address):
"""SMTP 'verify' command -- checks for address validity."""
self.putcmd("vrfy", quoteaddr(address))
return self.getreply()
# a.k.a.
vrfy = verify
def expn(self, address):
"""SMTP 'expn' command -- expands a mailing list."""
self.putcmd("expn", quoteaddr(address))
return self.getreply()
# some useful methods
def ehlo_or_helo_if_needed(self):
"""Call self.ehlo() and/or self.helo() if needed.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
if self.helo_resp is None and self.ehlo_resp is None:
if not (200 <= self.ehlo()[0] <= 299):
(code, resp) = self.helo()
if not (200 <= code <= 299):
raise SMTPHeloError(code, resp)
def login(self, user, password):
"""Log in on an SMTP server that requires authentication.
The arguments are:
- user: The user name to authenticate with.
- password: The password for the authentication.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method will return normally if the authentication was successful.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPAuthenticationError The server didn't accept the username/
password combination.
SMTPException No suitable authentication method was
found.
"""
def encode_cram_md5(challenge, user, password):
challenge = base64.decodestring(challenge)
response = user + " " + hmac.HMAC(password, challenge).hexdigest()
return encode_base64(response, eol="")
def encode_plain(user, password):
return encode_base64("\0%s\0%s" % (user, password), eol="")
AUTH_PLAIN = "PLAIN"
AUTH_CRAM_MD5 = "CRAM-MD5"
AUTH_LOGIN = "LOGIN"
self.ehlo_or_helo_if_needed()
if not self.has_extn("auth"):
raise SMTPException("SMTP AUTH extension not supported by server.")
# Authentication methods the server supports:
authlist = self.esmtp_features["auth"].split()
# List of authentication methods we support: from preferred to
# less preferred methods. Except for the purpose of testing the weaker
# ones, we prefer stronger methods like CRAM-MD5:
preferred_auths = [AUTH_CRAM_MD5, AUTH_PLAIN, AUTH_LOGIN]
# Determine the authentication method we'll use
authmethod = None
for method in preferred_auths:
if method in authlist:
authmethod = method
break
if authmethod == AUTH_CRAM_MD5:
(code, resp) = self.docmd("AUTH", AUTH_CRAM_MD5)
if code == 503:
# 503 == 'Error: already authenticated'
return (code, resp)
(code, resp) = self.docmd(encode_cram_md5(resp, user, password))
elif authmethod == AUTH_PLAIN:
(code, resp) = self.docmd("AUTH",
AUTH_PLAIN + " " + encode_plain(user, password))
elif authmethod == AUTH_LOGIN:
(code, resp) = self.docmd("AUTH",
"%s %s" % (AUTH_LOGIN, encode_base64(user, eol="")))
if code != 334:
raise SMTPAuthenticationError(code, resp)
(code, resp) = self.docmd(encode_base64(password, eol=""))
elif authmethod is None:
raise SMTPException("No suitable authentication method found.")
if code not in (235, 503):
# 235 == 'Authentication successful'
# 503 == 'Error: already authenticated'
raise SMTPAuthenticationError(code, resp)
return (code, resp)
def starttls(self, keyfile=None, certfile=None):
"""Puts the connection to the SMTP server into TLS mode.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked. This,
however, depends on whether the socket module really checks the
certificates.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
self.ehlo_or_helo_if_needed()
if not self.has_extn("starttls"):
raise SMTPException("STARTTLS extension not supported by server.")
(resp, reply) = self.docmd("STARTTLS")
if resp == 220:
if not _have_ssl:
raise RuntimeError("No SSL support included in this Python")
self.sock = ssl.wrap_socket(self.sock, keyfile, certfile)
self.file = SSLFakeFile(self.sock)
# RFC 3207:
# The client MUST discard any knowledge obtained from
# the server, such as the list of SMTP service extensions,
# which was not obtained from the TLS negotiation itself.
self.helo_resp = None
self.ehlo_resp = None
self.esmtp_features = {}
self.does_esmtp = 0
return (resp, reply)
def sendmail(self, from_addr, to_addrs, msg, mail_options=[],
rcpt_options=[]):
"""This command performs an entire mail transaction.
The arguments are:
- from_addr : The address sending this mail.
- to_addrs : A list of addresses to send this mail to. A bare
string will be treated as a list with 1 address.
- msg : The message to send.
- mail_options : List of ESMTP options (such as 8bitmime) for the
mail command.
- rcpt_options : List of ESMTP options (such as DSN commands) for
all the rcpt commands.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first. If the server does ESMTP, message size
and each of the specified options will be passed to it. If EHLO
fails, HELO will be tried and ESMTP options suppressed.
This method will return normally if the mail is accepted for at least
one recipient. It returns a dictionary, with one entry for each
recipient that was refused. Each entry contains a tuple of the SMTP
error code and the accompanying error message sent by the server.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPRecipientsRefused The server rejected ALL recipients
(no mail was sent).
SMTPSenderRefused The server didn't accept the from_addr.
SMTPDataError The server replied with an unexpected
error code (other than a refusal of
a recipient).
Note: the connection will be open even after an exception is raised.
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"]
>>> msg = '''\\
... From: Me@my.org
... Subject: testin'...
...
... This is a test '''
>>> s.sendmail("me@my.org",tolist,msg)
{ "three@three.org" : ( 550 ,"User unknown" ) }
>>> s.quit()
In the above example, the message was accepted for delivery to three
of the four addresses, and one was rejected, with the error code
550. If all addresses are accepted, then the method will return an
empty dictionary.
"""
self.ehlo_or_helo_if_needed()
esmtp_opts = []
if self.does_esmtp:
# Hmmm? what's this? -ddm
# self.esmtp_features['7bit']=""
if self.has_extn('size'):
esmtp_opts.append("size=%d" % len(msg))
for option in mail_options:
esmtp_opts.append(option)
(code, resp) = self.mail(from_addr, esmtp_opts)
if code != 250:
self.rset()
raise SMTPSenderRefused(code, resp, from_addr)
senderrs = {}
if isinstance(to_addrs, basestring):
to_addrs = [to_addrs]
for each in to_addrs:
(code, resp) = self.rcpt(each, rcpt_options)
if (code != 250) and (code != 251):
senderrs[each] = (code, resp)
if len(senderrs) == len(to_addrs):
# the server refused all our recipients
self.rset()
raise SMTPRecipientsRefused(senderrs)
(code, resp) = self.data(msg)
if code != 250:
self.rset()
raise SMTPDataError(code, resp)
#if we got here then somebody got our mail
return senderrs
def close(self):
"""Close the connection to the SMTP server."""
if self.file:
self.file.close()
self.file = None
if self.sock:
self.sock.close()
self.sock = None
def quit(self):
"""Terminate the SMTP session."""
res = self.docmd("quit")
self.close()
return res
if _have_ssl:
class SMTP_SSL(SMTP):
""" This is a subclass derived from SMTP that connects over an SSL encrypted
socket (to use this class you need a socket module that was compiled with SSL
support). If host is not specified, '' (the local host) is used. If port is
omitted, the standard SMTP-over-SSL port (465) is used. keyfile and certfile
are also optional - they can contain a PEM formatted private key and
certificate chain file for the SSL connection.
"""
default_port = SMTP_SSL_PORT
def __init__(self, host='', port=0, local_hostname=None,
keyfile=None, certfile=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.keyfile = keyfile
self.certfile = certfile
SMTP.__init__(self, host, port, local_hostname, timeout)
def _get_socket(self, host, port, timeout):
if self.debuglevel > 0:
print>>stderr, 'connect:', (host, port)
new_socket = socket.create_connection((host, port), timeout)
new_socket = ssl.wrap_socket(new_socket, self.keyfile, self.certfile)
self.file = SSLFakeFile(new_socket)
return new_socket
__all__.append("SMTP_SSL")
#
# LMTP extension
#
LMTP_PORT = 2003
class LMTP(SMTP):
"""LMTP - Local Mail Transfer Protocol
The LMTP protocol, which is very similar to ESMTP, is heavily based
on the standard SMTP client. It's common to use Unix sockets for LMTP,
so our connect() method must support that as well as a regular
host:port server. To specify a Unix socket, you must use an absolute
path as the host, starting with a '/'.
Authentication is supported, using the regular SMTP mechanism. When
using a Unix socket, LMTP generally don't support or require any
authentication, but your mileage might vary."""
ehlo_msg = "lhlo"
def __init__(self, host='', port=LMTP_PORT, local_hostname=None):
"""Initialize a new instance."""
SMTP.__init__(self, host, port, local_hostname)
def connect(self, host='localhost', port=0):
"""Connect to the LMTP daemon, on either a Unix or a TCP socket."""
if host[0] != '/':
return SMTP.connect(self, host, port)
# Handle Unix-domain sockets.
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(host)
except socket.error, msg:
if self.debuglevel > 0:
print>>stderr, 'connect fail:', host
if self.sock:
self.sock.close()
self.sock = None
raise socket.error, msg
(code, msg) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "connect:", msg
return (code, msg)
# Test the sendmail method, which tests most of the others.
# Note: This always sends to localhost.
if __name__ == '__main__':
import sys
def prompt(prompt):
sys.stdout.write(prompt + ": ")
return sys.stdin.readline().strip()
fromaddr = prompt("From")
toaddrs = prompt("To").split(',')
print "Enter message, end with ^D:"
msg = ''
while 1:
line = sys.stdin.readline()
if not line:
break
msg = msg + line
print "Message length is %d" % len(msg)
server = SMTP('localhost')
server.set_debuglevel(1)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
| {
"content_hash": "f0a8c2541e4f8c3a1b03e915e22ea789",
"timestamp": "",
"source": "github",
"line_count": 846,
"max_line_length": 89,
"avg_line_length": 36.26359338061466,
"alnum_prop": 0.5777567717331074,
"repo_name": "Symmetry-Innovations-Pty-Ltd/Python-2.7-for-QNX6.5.0-x86",
"id": "5c487e59b8fff5bd84eea9ea8ef760eb03965beb",
"size": "30705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "usr/pkg/lib/python2.7/smtplib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5696782"
},
{
"name": "C++",
"bytes": "532950"
},
{
"name": "Objective-C",
"bytes": "183763"
},
{
"name": "Python",
"bytes": "9562647"
},
{
"name": "Shell",
"bytes": "247"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kumbhmela_metadb.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "2b42c91a0ae45a38292afa6c686771f8",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 80,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.7172995780590717,
"repo_name": "louisdijkstra/kumbhmela-metadata",
"id": "f30af29a0065a941f8c7852035fc683ec6fe0b59",
"size": "259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "38915"
},
{
"name": "HTML",
"bytes": "39275"
},
{
"name": "JavaScript",
"bytes": "894394"
},
{
"name": "Python",
"bytes": "33541"
}
],
"symlink_target": ""
} |
"""This module is deprecated. Please use :mod:`airflow.providers.amazon.aws.operators.emr`."""
import warnings
from airflow.providers.amazon.aws.operators.emr import EmrClusterLink, EmrModifyClusterOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.operators.emr`.",
DeprecationWarning,
stacklevel=2,
)
| {
"content_hash": "f5d0ecf3f640802aad8ca9ef62a7bfa6",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 103,
"avg_line_length": 33.45454545454545,
"alnum_prop": 0.7690217391304348,
"repo_name": "Acehaidrey/incubator-airflow",
"id": "71b44d5364e42c31330b3044210ad2cf680fb6b5",
"size": "1156",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "airflow/providers/amazon/aws/operators/emr_modify_cluster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21727510"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495253"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
"""
Cluster-related objects and collections
"""
import copy
from distutils.version import StrictVersion
import six
import yaml
import sqlalchemy as sa
from nailgun.objects.serializers.cluster import ClusterSerializer
from nailgun.orchestrator import graph_configuration
from nailgun import consts
from nailgun.db import db
from nailgun.db.sqlalchemy import models
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.objects import NailgunCollection
from nailgun.objects import NailgunObject
from nailgun.objects import Release
from nailgun.plugins.manager import PluginManager
from nailgun.settings import settings
from nailgun.utils import AttributesGenerator
from nailgun.utils import dict_merge
from nailgun.utils import traverse
CLUSTER_UI_SETTING = {
"type": "object",
"required": ["view_mode", "grouping"],
"properties": {
"view_mode": {
"type": "string",
"description": "View mode of cluster nodes",
"enum": list(consts.NODE_VIEW_MODES)
},
"grouping": {
"type": "string",
"description": "Grouping mode of cluster nodes",
"enum": list(consts.CLUSTER_GROUPING)
}
}
}
class Attributes(NailgunObject):
"""Cluster attributes object
"""
#: SQLAlchemy model for Cluster attributes
model = models.Attributes
@classmethod
def generate_fields(cls, instance):
"""Generate field values for Cluster attributes using
generators.
:param instance: Attributes instance
:returns: None
"""
instance.generated = traverse(
instance.generated or {},
AttributesGenerator,
{
'cluster': instance.cluster,
'settings': settings,
}
)
# TODO(ikalnitsky):
#
# Think about traversing "editable" attributes. It might be very
# useful to generate default values for editable attribute at
# cluster creation time.
@classmethod
def merged_attrs(cls, instance):
"""Generates merged dict which includes generated Cluster
attributes recursively updated by new values from editable
attributes.
:param instance: Attributes instance
:returns: dict of merged attributes
"""
return dict_merge(
instance.generated,
instance.editable
)
@classmethod
def merged_attrs_values(cls, instance):
"""Transforms raw dict of attributes returned by :func:`merged_attrs`
into dict of facts for sending to orchestrator.
:param instance: Attributes instance
:returns: dict of merged attributes
"""
attrs = cls.merged_attrs(instance)
for group_attrs in attrs.itervalues():
for attr, value in group_attrs.iteritems():
if isinstance(value, dict) and 'value' in value:
group_attrs[attr] = value['value']
if 'common' in attrs:
attrs.update(attrs.pop('common'))
if 'additional_components' in attrs:
for comp, enabled in attrs['additional_components'].iteritems():
if isinstance(enabled, bool):
attrs.setdefault(comp, {}).update({
"enabled": enabled
})
attrs.pop('additional_components')
return attrs
class Cluster(NailgunObject):
"""Cluster object
"""
#: SQLAlchemy model for Cluster
model = models.Cluster
#: Serializer for Cluster
serializer = ClusterSerializer
#: Cluster JSON schema
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Cluster",
"description": "Serialized Cluster object",
"type": "object",
"properties": {
"id": {"type": "number"},
"name": {"type": "string"},
"mode": {
"type": "string",
"enum": list(consts.CLUSTER_MODES)
},
"status": {
"type": "string",
"enum": list(consts.CLUSTER_STATUSES)
},
"net_provider": {
"type": "string",
"enum": list(consts.CLUSTER_NET_PROVIDERS)
},
"ui_settings": CLUSTER_UI_SETTING,
"release_id": {"type": "number"},
"pending_release_id": {"type": "number"},
"replaced_deployment_info": {"type": "object"},
"replaced_provisioning_info": {"type": "object"},
"is_customized": {"type": "boolean"},
"fuel_version": {"type": "string"}
}
}
@classmethod
def create(cls, data):
"""Create Cluster instance with specified parameters in DB.
This includes:
* creating Cluster attributes and generating default values \
(see :func:`create_attributes`)
* creating NetworkGroups for Cluster
* adding default pending changes (see :func:`add_pending_changes`)
* if "nodes" are specified in data then they are added to Cluster \
(see :func:`update_nodes`)
:param data: dictionary of key-value pairs as object fields
:returns: Cluster instance
"""
# TODO(enchantner): fix this temporary hack in clients
if "release_id" not in data:
release_id = data.pop("release", None)
data["release_id"] = release_id
assign_nodes = data.pop("nodes", [])
data["fuel_version"] = settings.VERSION["release"]
new_cluster = super(Cluster, cls).create(data)
cls.create_default_group(new_cluster)
cls.create_attributes(new_cluster)
cls.create_vmware_attributes(new_cluster)
try:
cls.get_network_manager(new_cluster).\
create_network_groups_and_config(new_cluster, data)
cls.add_pending_changes(new_cluster, "attributes")
cls.add_pending_changes(new_cluster, "networks")
cls.add_pending_changes(new_cluster, "vmware_attributes")
if assign_nodes:
cls.update_nodes(new_cluster, assign_nodes)
except (
errors.OutOfVLANs,
errors.OutOfIPs,
errors.NoSuitableCIDR,
errors.InvalidNetworkPool
) as exc:
db().delete(new_cluster)
raise errors.CannotCreate(exc.message)
db().flush()
return new_cluster
@classmethod
def get_default_kernel_params(cls, instance):
kernel_params = instance.attributes.editable.get("kernel_params")
if kernel_params and kernel_params.get("kernel"):
return kernel_params.get("kernel").get("value")
@classmethod
def create_attributes(cls, instance):
"""Create attributes for current Cluster instance and
generate default values for them
(see :func:`Attributes.generate_fields`)
:param instance: Cluster instance
:returns: None
"""
attributes = Attributes.create(
{
"editable": cls.get_default_editable_attributes(instance),
"generated": instance.release.attributes_metadata.get(
"generated"
),
"cluster_id": instance.id
}
)
Attributes.generate_fields(attributes)
db().flush()
return attributes
@classmethod
def get_default_editable_attributes(cls, instance):
"""Get editable attributes from release metadata
:param instance: Cluster instance
:returns: Dict object
"""
editable = instance.release.attributes_metadata.get("editable")
editable = traverse(editable, AttributesGenerator, {
'cluster': instance,
'settings': settings,
})
# when attributes created we need to understand whether should plugin
# be applied for created cluster
plugin_attrs = PluginManager.get_plugin_attributes(instance)
editable = dict(plugin_attrs, **editable)
return editable
@classmethod
def get_attributes(cls, instance):
"""Get attributes for current Cluster instance
:param instance: Cluster instance
:returns: Attributes instance
"""
return db().query(models.Attributes).filter(
models.Attributes.cluster_id == instance.id
).first()
@classmethod
def update_attributes(cls, instance, data):
PluginManager.process_cluster_attributes(instance, data['editable'])
for key, value in data.iteritems():
setattr(instance.attributes, key, value)
cls.add_pending_changes(instance, "attributes")
db().flush()
@classmethod
def patch_attributes(cls, instance, data):
PluginManager.process_cluster_attributes(instance, data['editable'])
instance.attributes.editable = dict_merge(
instance.attributes.editable, data['editable'])
cls.add_pending_changes(instance, "attributes")
db().flush()
@classmethod
def get_editable_attributes(cls, instance):
attrs = cls.get_attributes(instance)
editable = attrs.editable
return {'editable': editable}
@classmethod
def get_updated_editable_attributes(cls, instance, data):
"""Same as get_editable_attributes but also merges given data.
:param instance: Cluster object
:param data: dict
:return: dict
"""
return {'editable': dict_merge(
cls.get_editable_attributes(instance)['editable'],
data.get('editable', {})
)}
@classmethod
def get_network_manager(cls, instance=None):
"""Get network manager for Cluster instance.
If instance is None then the default NetworkManager is returned
:param instance: Cluster instance
:returns: NetworkManager/NovaNetworkManager/NeutronManager
"""
if not instance:
from nailgun.network.manager import NetworkManager
return NetworkManager
if instance.net_provider == 'neutron':
ver = instance.release.environment_version
if StrictVersion(ver) >= StrictVersion(consts.FUEL_NEUTRON_ONLY):
from nailgun.network.neutron import NeutronManager70
return NeutronManager70
from nailgun.network.neutron import NeutronManager
return NeutronManager
else:
from nailgun.network.nova_network import NovaNetworkManager
return NovaNetworkManager
@classmethod
def add_pending_changes(cls, instance, changes_type, node_id=None):
"""Add pending changes for current Cluster.
If node_id is specified then links created changes with node.
:param instance: Cluster instance
:param changes_type: name of changes to add
:param node_id: node id for changes
:returns: None
"""
logger.debug(
u"New pending changes in environment {0}: {1}{2}".format(
instance.id,
changes_type,
u" node_id={0}".format(node_id) if node_id else u""
)
)
# TODO(enchantner): check if node belongs to cluster
ex_chs = db().query(models.ClusterChanges).filter_by(
cluster=instance,
name=changes_type
)
if not node_id:
ex_chs = ex_chs.first()
else:
ex_chs = ex_chs.filter_by(node_id=node_id).first()
# do nothing if changes with the same name already pending
if ex_chs:
return
ch = models.ClusterChanges(
cluster_id=instance.id,
name=changes_type
)
if node_id:
ch.node_id = node_id
db().add(ch)
db().flush()
@classmethod
def get_nodes_not_for_deletion(cls, cluster):
"""All clusters nodes except nodes for deletion."""
return db().query(models.Node).filter_by(
cluster=cluster, pending_deletion=False).order_by(models.Node.id)
@classmethod
def clear_pending_changes(cls, instance, node_id=None):
"""Clear pending changes for current Cluster.
If node_id is specified then only clears changes connected
to this node.
:param instance: Cluster instance
:param node_id: node id for changes
:returns: None
"""
logger.debug(
u"Removing pending changes in environment {0}{1}".format(
instance.id,
u" where node_id={0}".format(node_id) if node_id else u""
)
)
chs = db().query(models.ClusterChanges).filter_by(
cluster_id=instance.id
)
if node_id:
chs = chs.filter_by(node_id=node_id)
map(db().delete, chs.all())
db().flush()
@classmethod
def update(cls, instance, data):
"""Update Cluster object instance with specified parameters in DB.
If "nodes" are specified in data then they will replace existing ones
(see :func:`update_nodes`)
:param instance: Cluster instance
:param data: dictionary of key-value pairs as object fields
:returns: Cluster instance
"""
# fuel_version cannot be changed
data.pop("fuel_version", None)
nodes = data.pop("nodes", None)
changes = data.pop("changes", None)
super(Cluster, cls).update(instance, data)
if nodes is not None:
cls.update_nodes(instance, nodes)
if changes is not None:
cls.update_changes(instance, changes)
return instance
@classmethod
def update_nodes(cls, instance, nodes_ids):
"""Update Cluster nodes by specified node IDs.
Nodes with specified IDs will replace existing ones in Cluster
:param instance: Cluster instance
:param nodes_ids: list of nodes ids
:returns: None
"""
# TODO(NAME): sepatate nodes
# for deletion and addition by set().
new_nodes = []
if nodes_ids:
new_nodes = db().query(models.Node).filter(
models.Node.id.in_(nodes_ids)
)
nodes_to_remove = [n for n in instance.nodes
if n not in new_nodes]
nodes_to_add = [n for n in new_nodes
if n not in instance.nodes]
for node in nodes_to_add:
if not node.online:
raise errors.NodeOffline(
u"Cannot add offline node "
u"'{0}' to environment".format(node.id)
)
map(instance.nodes.remove, nodes_to_remove)
map(instance.nodes.append, nodes_to_add)
net_manager = cls.get_network_manager(instance)
map(
net_manager.clear_assigned_networks,
nodes_to_remove
)
map(
net_manager.clear_bond_configuration,
nodes_to_remove
)
cls.replace_provisioning_info_on_nodes(instance, [], nodes_to_remove)
cls.replace_deployment_info_on_nodes(instance, [], nodes_to_remove)
map(
net_manager.assign_networks_by_default,
nodes_to_add
)
db().flush()
@classmethod
def update_changes(cls, instance, changes):
instance.changes_list = [
models.ClusterChanges(**change) for change in changes
]
db().flush()
@classmethod
def get_ifaces_for_network_in_cluster(
cls, instance, net):
"""Method for receiving node_id:iface pairs for all nodes in
specific cluster
:param instance: Cluster instance
:param net: Nailgun specific network name
:type net: str
:returns: List of node_id, iface pairs for all nodes in cluster.
"""
nics_db = db().query(
models.NodeNICInterface.node_id,
models.NodeNICInterface.name
).filter(
models.NodeNICInterface.node.has(cluster_id=instance.id),
models.NodeNICInterface.assigned_networks_list.any(name=net)
)
bonds_db = db().query(
models.NodeBondInterface.node_id,
models.NodeBondInterface.name
).filter(
models.NodeBondInterface.node.has(cluster_id=instance.id),
models.NodeBondInterface.assigned_networks_list.any(name=net)
)
return nics_db.union(bonds_db)
@classmethod
def replace_provisioning_info_on_nodes(cls, instance, data, nodes):
for node in nodes:
node_data = next((n for n in data if node.uid == n.get('uid')), {})
node.replaced_provisioning_info = node_data
@classmethod
def replace_deployment_info_on_nodes(cls, instance, data, nodes):
for node in instance.nodes:
node_data = [n for n in data if node.uid == n.get('uid')]
node.replaced_deployment_info = node_data
@classmethod
def replace_provisioning_info(cls, instance, data):
received_nodes = data.pop('nodes', [])
instance.is_customized = True
instance.replaced_provisioning_info = data
cls.replace_provisioning_info_on_nodes(
instance, received_nodes, instance.nodes)
return cls.get_provisioning_info(instance)
@classmethod
def replace_deployment_info(cls, instance, data):
instance.is_customized = True
cls.replace_deployment_info_on_nodes(instance, data, instance.nodes)
return cls.get_deployment_info(instance)
@classmethod
def get_provisioning_info(cls, instance):
data = {}
if instance.replaced_provisioning_info:
data.update(instance.replaced_provisioning_info)
nodes = []
for node in instance.nodes:
if node.replaced_provisioning_info:
nodes.append(node.replaced_provisioning_info)
if data:
data['nodes'] = nodes
return data
@classmethod
def get_deployment_info(cls, instance):
data = []
for node in instance.nodes:
if node.replaced_deployment_info:
data.extend(node.replaced_deployment_info)
return data
@classmethod
def get_creds(cls, instance):
return instance.attributes.editable['access']
@classmethod
def should_assign_public_to_all_nodes(cls, instance):
"""Determine whether Public network is to be assigned to all nodes in
this cluster.
:param instance: cluster instance
:returns: True when Public network is to be assigned to all nodes
"""
if instance.net_provider == \
consts.CLUSTER_NET_PROVIDERS.nova_network:
return True
assignment = instance.attributes.editable.get(
'public_network_assignment')
if not assignment or assignment['assign_to_all_nodes']['value']:
return True
return False
@classmethod
def get_roles(cls, instance):
"""Returns a dictionary of node roles available for deployment.
:param instance: cluster instance
:returns: a dictionary of roles metadata
"""
# TODO(ikalnitsky): merge here release roles with plugins one
return instance.release.roles_metadata
@classmethod
def set_primary_role(cls, instance, nodes, role_name):
"""Method for assigning primary attribute for specific role.
- verify that there is no primary attribute of specific role
assigned to cluster nodes with this role in role list
or pending role list, and this node is not marked for deletion
- if there is no primary role assigned, filter nodes which have current
role in roles or pending_roles
- if there is nodes with ready state - they should have higher priority
- if role was in primary_role_list - change primary attribute
for that association, same for role_list, this is required
because deployment_serializer used by cli to generate deployment info
:param instance: Cluster db objects
:param nodes: list of Node db objects
:param role_name: string with known role name
"""
if role_name not in cls.get_roles(instance):
logger.warning(
'Trying to assign primary for non-existing role %s', role_name)
return
node = cls.get_primary_node(instance, role_name)
if not node:
# get nodes with a given role name which are not going to be
# removed
filtered_nodes = []
for node in nodes:
if (not node.pending_deletion and (
role_name in set(node.roles + node.pending_roles))):
filtered_nodes.append(node)
filtered_nodes = sorted(filtered_nodes, key=lambda node: node.id)
if filtered_nodes:
primary_node = next((
node for node in filtered_nodes
if node.status == consts.NODE_STATUSES.ready),
filtered_nodes[0])
primary_node.primary_roles = list(primary_node.primary_roles)
primary_node.primary_roles.append(role_name)
db().flush()
@classmethod
def set_primary_roles(cls, instance, nodes):
"""Idempotent method for assignment of all primary attribute
for all roles that requires it.
To mark role as primary add has_primary: true attribute to release
:param instance: Cluster db object
:param nodes: list of Node db objects
"""
if not instance.is_ha_mode:
return
roles_metadata = cls.get_roles(instance)
for role, meta in six.iteritems(roles_metadata):
if meta.get('has_primary'):
cls.set_primary_role(instance, nodes, role)
@classmethod
def get_nodes_by_role(cls, instance, role_name):
"""Get nodes related to some specific role
:param instance: cluster db object
:type: python object
:param role_name: node role name
:type: string
"""
if role_name not in cls.get_roles(instance):
logger.warning("%s role doesn't exist", role_name)
return []
nodes = db().query(models.Node).filter_by(
cluster_id=instance.id
).filter(sa.or_(
models.Node.roles.any(role_name),
models.Node.pending_roles.any(role_name)
)).all()
return nodes
@classmethod
def get_primary_node(cls, instance, role_name):
"""Get primary node for role_name. If primary node is not
found None will be returned. Pending roles and roles are
used in search.
:param instance: cluster db object
:type: python object
:param role_name: node role name
:type: string
:returns: node db object or None
"""
logger.debug("Getting primary node for role: %s", role_name)
if role_name not in cls.get_roles(instance):
logger.debug("Role not found: %s", role_name)
return None
primary_node = db().query(models.Node).filter_by(
pending_deletion=False,
cluster_id=instance.id
).filter(
models.Node.primary_roles.any(role_name)
).first()
if primary_node is None:
logger.debug("Not found primary node for role: %s", role_name)
else:
logger.debug("Found primary node: %s for role: %s",
primary_node.id, role_name)
return primary_node
@classmethod
def get_controllers_group_id(cls, instance):
nodes = db().query(models.Node).filter_by(
cluster_id=instance.id
).filter(
False == models.Node.pending_deletion
)
controller = nodes.filter(models.Node.roles.any('controller')).first()
if not controller or not controller.group_id:
controller = nodes.filter(
models.Node.pending_roles.any('controller')).first()
if controller and controller.group_id:
return controller.group_id
return cls.get_default_group(instance).id
@classmethod
def get_bond_interfaces_for_all_nodes(cls, instance, networks=None):
bond_interfaces_query = db().query(models.NodeBondInterface).\
join(models.Node).filter(models.Node.cluster_id == instance.id)
if networks:
bond_interfaces_query = bond_interfaces_query.join(
models.NodeBondInterface.assigned_networks_list,
aliased=True).filter(models.NetworkGroup.id.in_(networks))
return bond_interfaces_query.all()
@classmethod
def get_nic_interfaces_for_all_nodes(cls, instance, networks=None):
nic_interfaces_query = db().query(models.NodeNICInterface).\
join(models.Node).filter(models.Node.cluster_id == instance.id)
if networks:
nic_interfaces_query = nic_interfaces_query.join(
models.NodeNICInterface.assigned_networks_list, aliased=True).\
filter(models.NetworkGroup.id.in_(networks))
return nic_interfaces_query.all()
@classmethod
def get_default_group(cls, instance):
return [g for g in instance.node_groups
if g.name == consts.NODE_GROUPS.default][0]
@classmethod
def create_default_group(cls, instance):
node_group = models.NodeGroup(name=consts.NODE_GROUPS.default)
instance.node_groups.append(node_group)
db.add(node_group)
db().flush()
return node_group
@classmethod
def get_deployment_tasks(cls, instance):
"""Return deployment graph for cluster based on cluster attributes
- if there is deployment_graph defined by user - use it instead of
defined
- if instance assigned for patching - return custom patching graph
- else return default for release deployment graph
"""
if instance.deployment_tasks:
return instance.deployment_tasks
elif instance.pending_release_id:
return yaml.load(graph_configuration.PATCHING)
else:
return Release.get_deployment_tasks(instance.release)
@classmethod
def create_vmware_attributes(cls, instance):
"""Store VmwareAttributes instance into DB.
"""
vmware_metadata = instance.release.vmware_attributes_metadata
if vmware_metadata:
return VmwareAttributes.create(
{
"editable": vmware_metadata.get("editable"),
"cluster_id": instance.id
}
)
return None
@classmethod
def get_vmware_attributes(cls, instance):
"""Get VmwareAttributes instance from DB. Now we have
relation with cluster 1:1.
"""
return db().query(models.VmwareAttributes).filter(
models.VmwareAttributes.cluster_id == instance.id
).first()
@classmethod
def get_default_vmware_attributes(cls, instance):
"""Get metadata from release with empty value section.
"""
editable = instance.release.vmware_attributes_metadata.get("editable")
editable = traverse(editable, AttributesGenerator, {
'cluster': instance,
'settings': settings,
})
return editable
@classmethod
def update_vmware_attributes(cls, instance, data):
"""Update Vmware attributes. Actually we allways update only
value section in editable.
"""
metadata = instance.vmware_attributes.editable['metadata']
value = data.get('editable', {}).get('value')
vmware_attr = {
'metadata': metadata,
'value': value
}
setattr(instance.vmware_attributes, 'editable', vmware_attr)
cls.add_pending_changes(instance, "vmware_attributes")
db().flush()
vmware_attr.pop('metadata')
return vmware_attr
@classmethod
def is_vmware_enabled(cls, instance):
"""Check if current cluster support vmware configuration
"""
attributes = cls.get_attributes(instance).editable
return attributes.get('common', {}).get('use_vcenter', {}).get('value')
@staticmethod
def adjust_nodes_lists_on_controller_removing(instance, nodes_to_delete,
nodes_to_deploy):
"""In case of deleting controller(s) adds other controller(s)
to nodes_to_deploy
:param instance: instance of SqlAlchemy cluster
:param nodes_to_delete: list of nodes to be deleted
:param nodes_to_deploy: list of nodes to be deployed
:return:
"""
if instance is None:
return
controllers_ids_to_delete = set([n.id for n in nodes_to_delete
if 'controller' in n.all_roles])
if controllers_ids_to_delete:
ids_to_deploy = set([n.id for n in nodes_to_deploy])
controllers_to_deploy = set(
filter(lambda n: (n.id not in controllers_ids_to_delete
and n.id not in ids_to_deploy
and 'controller' in n.all_roles),
instance.nodes))
nodes_to_deploy.extend(controllers_to_deploy)
@classmethod
def get_repo_urls(self, instance):
repos = instance.attributes.editable['repo_setup']['repos']['value']
return tuple(set([r['uri'] for r in repos]))
@classmethod
def get_nodes_to_spawn_vms(cls, instance):
nodes = []
for node in cls.get_nodes_by_role(instance,
consts.VIRTUAL_NODE_TYPES.kvm):
for vm in node.attributes.vms_conf:
if not vm.get('created'):
nodes.append(node)
return nodes
@classmethod
def mark_vms_as_created(cls, instance):
nodes = cls.get_nodes_by_role(instance, consts.VIRTUAL_NODE_TYPES.kvm)
for node in nodes:
vms_conf = copy.deepcopy(node.attributes.vms_conf)
for vm in vms_conf:
if not vm.get('created'):
vm['created'] = True
node.attributes.vms_conf = vms_conf
db().flush()
class ClusterCollection(NailgunCollection):
"""Cluster collection
"""
#: Single Cluster object class
single = Cluster
class VmwareAttributes(NailgunObject):
model = models.VmwareAttributes
| {
"content_hash": "424620cc2bd881d4ec3c838edca79ddf",
"timestamp": "",
"source": "github",
"line_count": 893,
"max_line_length": 79,
"avg_line_length": 34.63157894736842,
"alnum_prop": 0.5960033628661967,
"repo_name": "prmtl/fuel-web",
"id": "697996e6184d7f2ad0702ad60654d577f4322daf",
"size": "31561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nailgun/nailgun/objects/cluster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "67993"
},
{
"name": "HTML",
"bytes": "7268"
},
{
"name": "JavaScript",
"bytes": "774488"
},
{
"name": "Mako",
"bytes": "1449"
},
{
"name": "Puppet",
"bytes": "282"
},
{
"name": "Python",
"bytes": "4031810"
},
{
"name": "Ruby",
"bytes": "36362"
},
{
"name": "Shell",
"bytes": "30651"
}
],
"symlink_target": ""
} |
import logging
import socket
from .deadline import Deadline
from .message import Message
logger = logging.getLogger(__name__)
BLOCK_SIZE = 4096
DEFAULT_TIMEOUT = 2.0
class Connection(object):
def __init__(self, host: str, port: int):
self.__buffer: bytearray = bytearray()
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__socket.settimeout(DEFAULT_TIMEOUT)
self.__socket.connect((host, port))
logger.debug("Connected to %s:%d!" % (host, port))
def __read_buffered_msg(self) -> Message:
buf = self.__buffer
pos = buf.find(b"\r\n")
if pos < 0:
return None
result = buf[0:pos]
del buf[0 : pos + 2]
return Message.decode(result)
def recv(self, deadline: Deadline) -> Message:
msg = self.__read_buffered_msg()
try:
while msg is None:
self.__socket.settimeout(deadline.remaining(lower_bound=0.001))
tmp = self.__socket.recv(BLOCK_SIZE)
if len(tmp) > 0:
self.__buffer.extend(tmp)
msg = self.__read_buffered_msg()
logger.debug("received: %s" % msg)
else:
logger.debug("Connection shutdown by remote peer")
self.close()
return None
except socket.timeout:
logger.debug("readline timed out")
finally:
self.__socket.settimeout(DEFAULT_TIMEOUT)
return msg
def send(self, msg: Message):
self.__socket.send(msg.encode())
logger.debug("sent: %s" % msg)
def close(self):
try:
self.__socket.close()
logger.debug("closed")
except Exception:
logger.debug("Unable to close connection. Dropping it...")
| {
"content_hash": "2d15d9fb8b28ab6c17367df7687549cf",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 79,
"avg_line_length": 31.559322033898304,
"alnum_prop": 0.5461868958109559,
"repo_name": "goodfield/python-maxcube-api",
"id": "5cd398971983909dd84080045ce79f3669bf6ed9",
"size": "1862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maxcube/connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "72"
},
{
"name": "Python",
"bytes": "31792"
}
],
"symlink_target": ""
} |
from lino.api import _
from lino_xl.lib.lists.models import *
# from lino_xl.lib.coachings.mixins import Coachable
from lino_tera.lib.contacts.models import Partner
class List(List, Partner):
class Meta(List.Meta):
app_label = 'lists'
abstract = dd.is_abstract_model(__name__, 'List')
verbose_name = _("Therapeutical group")
verbose_name = _("Therapeutical groups")
def full_clean(self, *args, **kw):
"""Set the `name` field of this list. This field is visible in the
Partner's detail but not in the Lists's detail where it is
filled automatically from the designation in the site's main
language. and serves for sorting when selecting a List as
Partner.
"""
# self.name = dd.babelattr(self, 'designation', language=)
if self.designation:
self.name = self.designation
else:
self.designation = self.name
super(List, self).full_clean(*args, **kw)
| {
"content_hash": "c99077bde90dd22722b788763def5a3d",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 75,
"avg_line_length": 33.233333333333334,
"alnum_prop": 0.633901705115346,
"repo_name": "lino-framework/tera",
"id": "b6e93daaa358479630716f1653df0252c3b953b9",
"size": "1138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lino_tera/lib/lists/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "612"
},
{
"name": "HTML",
"bytes": "4400"
},
{
"name": "Python",
"bytes": "149487"
}
],
"symlink_target": ""
} |
import unittest
from collections import namedtuple
from common import event
from crawler import prepare, get_items, crawl
import crawler
crawler.add_link = lambda *args, **kwargs: crawl(*args, **kwargs)
crawler.website_sleep_time = 0
html = lambda body: "<html><head></head><body>{}</body></html>".format(body)
test_site_info = namedtuple(
'test_site_info',
'name baseurl urlnosaveregex urlregex html_hints test_html test_answer'
)
tests = [
test_site_info(
name='test1',
baseurl='http://test1/',
test_html={
'http://test1/': html("<a href='/nosave'></a>"),
'http://test1/nosave': html("<a href='/event'></a>"),
'http://test1/event': html("<span>answer</span>"),
},
urlnosaveregex='/nosave',
urlregex='/event',
html_hints={'name': 'span'},
test_answer=event(html=html('<span>answer</span>'))
),
test_site_info(
name='test2',
baseurl='http://test2/',
test_html={
'http://test2/': html("<a href='/nosave'></a>"),
'http://test2/nosave': html("<a href='http://test2/event'></a>"),
'http://test2/event': html("<span>answer</span>"),
},
urlnosaveregex='/nosave',
urlregex='/event',
html_hints={'name': 'span'},
test_answer=event(html=html('<span>answer</span>'))
),
test_site_info(
name='ww_test',
baseurl='http://www.com/portland/e.h',
test_html={
'http://www.com/portland/e.h':
html('<a href="event-169055-miguel_gutierrez_and_the_powerful_people.html"></a>'),
'http://www.com/portland/event-169055-miguel_gutierrez_and_the_powerful_people.html':
html("<span>answer</span>"),
},
urlnosaveregex='/nosave',
urlregex='/portland/event-[0-9]+-.+\.html',
html_hints={'name': 'span'},
test_answer=event(html=html('<span>answer</span>'))
)
]
class CrawlerTest(unittest.TestCase):
def test_crawling(self):
for test in tests:
crawler.test_html = test.test_html
prepare(test._asdict(), test=True)
result = list(get_items())
self.assertNotEqual([], result)
self.assertEquals(result[0].html, test.test_answer.html)
| {
"content_hash": "ce9ce31683255a2e42c6c3ef063635a7",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 98,
"avg_line_length": 33.83098591549296,
"alnum_prop": 0.5441298917568693,
"repo_name": "andychase/eventizer",
"id": "2935eee8b0ef18418a60e07ff1623563113b1af7",
"size": "2402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grab/crawler_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "102471"
},
{
"name": "Shell",
"bytes": "74"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
from contextlib import contextmanager
@contextmanager
def Switch():
D = {}
class _P(Exception): pass
def _mkCase(var):
class _PP(_P):
V = var
def __repr__(self):
return str(self.V)
D[var]=_PP
return _PP
def switch(var):
if D.has_key(var):
raise D[var]()
raise _mkCase(var)()
def case(var):
if D.has_key(var):
return D[var]
return _mkCase(var)
def default():
return _P
yield switch, case, default
if __name__=="__main__":
def test1():
with Switch() as (switch, case, default):
try: switch(55)
except case(1):
print 1
except case(6):
print 6
except case(5):
print 5
except default():
print 'default..'
def test2():
with Switch() as (switch, case, default):
try:switch('hola')
except case(1):
print 1
except case('holaS'):
print 'holaS'
except case('hola'):
print 'hola'
except default():
print 'default..'
test1()
test2()
| {
"content_hash": "4ca2912393979cc703e73b278257f49e",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 49,
"avg_line_length": 23.1864406779661,
"alnum_prop": 0.4283625730994152,
"repo_name": "ActiveState/code",
"id": "f7433cbf523d0f5c2705fb35118db345473a32fd",
"size": "1368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/521914_Switchlike_statement/recipe-521914.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
import argparse, os
import multiprocessing, subprocess
from comm import *
__author__ = "Michael Wang"
__version__ = "0.1.0"
# window platform or linux platform
import platform
iswindow = platform.platform().find("Window")>=0
encodestr = "utf-8"
if iswindow:
encodestr = "gbk"
# 在每一个设备上面运行测试用例 / run testcase on each device
# python pyappium -d emulator-5554
def StartTestRunner(*args):
subp = subprocess.Popen(args[0],shell=True,stdout=subprocess.PIPE)
while True:
l = subp.stdout.readline().decode(encodestr)
l = l.rstrip('\n')
l = l.rstrip('\r')
if l == None or len(l) == 0:
break
print(l)
subp.wait()
if __name__ == "__main__":
# 获取命令行参数 / obtain the commandline parameters
ap = argparse.ArgumentParser()
ap.add_argument("-r", "--remove_suite", required = False, help = "Specify removed testsuit")
ap.add_argument("-a", "--app", required = False, help = "Install app file name")
args = vars(ap.parse_args())
appName = args['app']
remove_suite = args['remove_suite']
# 获取当前正在链接的设备 / the connected mobile phone
devlst = pyLib.getConnectAndroidDevices()
if len(devlst) == 0:
print(u"当前没有设备链接")
exit(0)
# kill all previously appium
os.system('taskkill /IM Appium.exe /F')
# 开启测试
jobs = []
appiumPort = 4723
for dev in devlst:
argstr = "python pyappium.py -p %d -d %s" % (appiumPort, dev)
if appName:
argstr += " -a %s"%appName
if remove_suite:
argstr += " -r %s"%remove_suite
service = multiprocessing.Process(name=dev, target=StartTestRunner, args=(argstr,))
jobs.append(service)
service.start()
# 启动 Appium server
appiumstr = 'appium -p %d -bp %d -U %s' % (appiumPort, appiumPort+1, dev)
subp = subprocess.Popen(appiumstr,shell=True,stdout=subprocess.PIPE)
appiumPort += 2
# waitting
for job in jobs:
job.join()
| {
"content_hash": "b6924661965aff307968b6dacd27abd5",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 93,
"avg_line_length": 26.043478260869566,
"alnum_prop": 0.6750139120756817,
"repo_name": "butyesbutno/pyappium",
"id": "eaf6b5ff790f1fcf1e32b1777d7bd2003d6b4491",
"size": "2523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyMultiDev.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "32"
},
{
"name": "Python",
"bytes": "59644"
}
],
"symlink_target": ""
} |
"""Helper functions for resegmentation.
Resegmentation is local segmentation targeted to specific points in an already
segmented volume. The results of resegmentation can be compared to the original
segments in order to perform object agglomeration.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import logging
import os
import numpy as np
from scipy import ndimage
from scipy.special import expit
from tensorflow import gfile
from . import storage
from .inference_utils import timer_counter
from ..utils import bounding_box
def get_starting_location(dists, exclusion_radius):
z, y, x = np.unravel_index(np.argmax(dists), tuple(dists.shape))
# Mark area around the new point as 'excluded' by clearing the distance
# map around it.
er = exclusion_radius
dists[max(z - er.z, 0):z + er.z + 1,
max(y - er.y, 0):y + er.y + 1,
max(x - er.x, 0):x + er.x + 1] = 0
return z, y, x
def get_target_path(request, point_num):
"""Computes the output path for a specific point.
Args:
request: ResegmentationRequest proto
point_num: index of the point of interest within the proto
Returns:
path to the output file where resegmentation results will be saved
"""
# Prepare the output directory.
output_dir = request.output_directory
id_a = request.points[point_num].id_a
id_b = request.points[point_num].id_b
if request.subdir_digits > 1:
m = hashlib.md5()
m.update(str(id_a))
m.update(str(id_b))
output_dir = os.path.join(output_dir, m.hexdigest()[:request.subdir_digits])
gfile.MakeDirs(output_dir)
# Terminate early if the output already exists.
dp = request.points[point_num].point
target_path = os.path.join(output_dir, '%d-%d_at_%d_%d_%d.npz' % (
id_a, id_b, dp.x, dp.y, dp.z))
if gfile.Exists(target_path):
logging.info('Output already exists: %s', target_path)
return
return target_path
def get_canvas(point, radius, runner):
"""Creates an FFN Canvas.
Args:
point: decision point as (z, y, x)
radius: radius around decision point as (z, y, x)
runner: inference Runner object
Returns:
inference Canvas object
"""
origin = np.array(point)
radius = np.array(radius)
corner = origin - radius
subvol_size = radius * 2 + 1
end = subvol_size + corner
if (np.any(corner < 0) or
runner.init_seg_volume.shape[1] <= end[0] or
runner.init_seg_volume.shape[2] <= end[1] or
runner.init_seg_volume.shape[3] <= end[2]):
logging.error('Not enough context for: %d, %d, %d; corner: %r; end: %r',
point[2], point[1], point[0], corner, end)
return None, None
return runner.make_canvas(corner, subvol_size, keep_history=True)
def process_point(request, runner, point_num, voxel_size):
"""Runs resegmentation for a specific point.
Args:
request: ResegmentationRequest proto
runner: inference Runner object
point_num: index of the point of interest within the proto
voxel_size: (z, y, x) voxel size in physical units
"""
with timer_counter(runner.counters, 'resegmentation'):
target_path = get_target_path(request, point_num)
if target_path is None:
return
curr = request.points[point_num]
point = curr.point
point = point.z, point.y, point.x
radius = (request.radius.z, request.radius.y, request.radius.x)
canvas, alignment = get_canvas(point, radius, runner)
if canvas is None:
logging.warning('Could not get a canvas object.')
return
def unalign_prob(prob):
return alignment.align_and_crop(
canvas.corner_zyx,
prob,
alignment.corner,
alignment.size,
forward=False)
is_shift = (canvas.restrictor is not None and
np.any(canvas.restrictor.shift_mask))
is_endpoint = not curr.HasField('id_b')
seg_a = canvas.segmentation == canvas.local_id(curr.id_a)
size_a = np.sum(seg_a)
if is_endpoint:
size_b = -1
todo = [seg_a]
else:
seg_b = canvas.segmentation == canvas.local_id(curr.id_b)
size_b = np.sum(seg_b)
todo = [seg_a, seg_b]
if size_a == 0 or size_b == 0:
logging.warning('Segments (%d, %d) local ids (%d, %d) not found in input '
'at %r. Current values are: %r.',
curr.id_a, curr.id_b, canvas.local_id(curr.id_a),
canvas.local_id(curr.id_b), point,
np.unique(canvas.segmentation))
canvas._deregister_client() # pylint:disable=protected-access
return
if is_endpoint:
canvas.seg_prob[:] = 0.0
canvas.segmentation[:] = 0
else:
# Clear the two segments in question, but keep everything else as
# context.
canvas.segmentation[seg_a] = 0
canvas.segmentation[seg_b] = 0
canvas.seg_prob[seg_a] = 0.0
canvas.seg_prob[seg_b] = 0.0
transformed_point = alignment.transform(np.array([point]).T)
tz, ty, tx = transformed_point[:, 0]
oz, oy, ox = canvas.corner_zyx
tz -= oz
ty -= oy
tx -= ox
# First index enumerates the original segments. Second index,
# when present, enumerates segmentation attempts.
raw_probs = []
probs = []
deletes = []
histories = []
start_points = [[], []]
if request.HasField('analysis_radius'):
ar = request.analysis_radius
analysis_box = bounding_box.BoundingBox(
start=(radius[2] - ar.x,
radius[1] - ar.y,
radius[0] - ar.z),
size=(2 * ar.x + 1, 2 * ar.y + 1, 2 * ar.z + 1))
else:
analysis_box = bounding_box.BoundingBox(
(0, 0, 0), canvas.image.shape[::-1])
options = request.inference.inference_options
for i, seg in enumerate(todo):
logging.info('processing object %d', i)
with timer_counter(canvas.counters, 'edt'):
dists = ndimage.distance_transform_edt(seg, sampling=voxel_size)
# Do not seed where not enough context is available.
dists[:canvas.margin[0], :, :] = 0
dists[:, :canvas.margin[1], :] = 0
dists[:, :, :canvas.margin[2]] = 0
dists[-canvas.margin[0]:, :, :] = 0
dists[:, -canvas.margin[1]:, :] = 0
dists[:, :, -canvas.margin[2]:] = 0
canvas.log_info('EDT computation done')
# Optionally exclude a region around the decision point from seeding.
if request.HasField('init_exclusion_radius'):
ier = request.init_exclusion_radius
dists[tz - ier.z:tz + ier.z + 1,
ty - ier.y:ty + ier.y + 1,
tx - ier.x:tx + ier.x + 1] = 0
seg_prob = None
recovered = False
for _ in range(request.max_retry_iters):
z0, y0, x0 = get_starting_location(dists, request.exclusion_radius)
if not seg[z0, y0, x0]:
continue
canvas.log_info('.. starting segmentation at (xyz): %d %d %d',
x0, y0, z0)
canvas.segment_at((z0, y0, x0))
seg_prob = expit(canvas.seed)
start_points[i].append((x0, y0, z0))
# Check if we recovered an acceptable fraction of the initial segment
# in which the seed was located.
recovered = True
crop_seg = seg[analysis_box.to_slice()]
crop_prob = seg_prob[analysis_box.to_slice()]
start_size = np.sum(crop_seg)
segmented_voxels = np.sum((crop_prob >= options.segment_threshold) &
crop_seg)
if request.segment_recovery_fraction > 0:
if segmented_voxels / start_size >= request.segment_recovery_fraction:
break
elif segmented_voxels >= options.min_segment_size:
break
recovered = False
# Store resegmentation results.
if seg_prob is not None:
qprob = storage.quantize_probability(seg_prob)
raw_probs.append(qprob)
probs.append(unalign_prob(qprob))
deletes.append(np.array(canvas.history_deleted))
histories.append(np.array(canvas.history))
if request.terminate_early:
if not recovered:
break
if (request.segment_recovery_fraction > 0 and i == 0 and
len(todo) > 1):
seg2 = todo[1]
crop_seg = seg2[analysis_box.to_slice()]
size2 = np.sum(crop_seg)
segmented_voxels2 = np.sum(
(crop_prob >= options.segment_threshold) & crop_seg)
if segmented_voxels2 / size2 < request.segment_recovery_fraction:
break
canvas.log_info('saving results to %s', target_path)
with storage.atomic_file(target_path) as fd:
np.savez_compressed(fd,
probs=np.array(probs),
raw_probs=np.array(raw_probs),
deletes=np.array(deletes),
histories=np.array(histories),
start_points=start_points,
request=request.SerializeToString(),
counters=canvas.counters.dumps(),
corner_zyx=canvas.corner_zyx,
is_shift=is_shift)
canvas.log_info('.. save complete')
# Cannot `del canvas` here in Python 2 -- deleting an object referenced
# in a nested scope is a syntax error.
canvas._deregister_client() # pylint:disable=protected-access
def process(request, runner):
num_points = len(request.points)
for i in range(num_points):
logging.info('processing %d/%d', i, num_points)
process_point(request, runner, i)
| {
"content_hash": "4231df00cdb7ecb8e8f65d5790c97a07",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 80,
"avg_line_length": 32.95189003436426,
"alnum_prop": 0.6092397538846595,
"repo_name": "google/ffn",
"id": "fa919a39abac344eade56ca298a8bd02d7ee0317",
"size": "10246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ffn/inference/resegmentation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "5169"
},
{
"name": "Python",
"bytes": "365810"
}
],
"symlink_target": ""
} |
import os.path
import sys
import imp
PY_VERSION = sys.version_info[:2]
import ctypes
from collections import defaultdict
import math
import random
import timeit
import unittest
from flatbuffers import compat
from flatbuffers import util
from flatbuffers.compat import range_func as compat_range
from flatbuffers.compat import NumpyRequiredForThisFeature
import flatbuffers
from flatbuffers import number_types as N
import MyGame # refers to generated code
import MyGame.Example # refers to generated code
import MyGame.Example.Any # refers to generated code
import MyGame.Example.Color # refers to generated code
import MyGame.Example.Monster # refers to generated code
import MyGame.Example.Test # refers to generated code
import MyGame.Example.Stat # refers to generated code
import MyGame.Example.Vec3 # refers to generated code
import MyGame.MonsterExtra # refers to generated code
import MyGame.Example.ArrayTable # refers to generated code
import MyGame.Example.ArrayStruct # refers to generated code
import MyGame.Example.NestedStruct # refers to generated code
import MyGame.Example.TestEnum # refers to generated code
def assertRaises(test_case, fn, exception_class):
''' Backwards-compatible assertion for exceptions raised. '''
exc = None
try:
fn()
except Exception as e:
exc = e
test_case.assertTrue(exc is not None)
test_case.assertTrue(isinstance(exc, exception_class))
class TestWireFormat(unittest.TestCase):
def test_wire_format(self):
# Verify that using the generated Python code builds a buffer without
# returning errors, and is interpreted correctly, for size prefixed
# representation and regular:
for sizePrefix in [True, False]:
for file_identifier in [None, b"MONS"]:
gen_buf, gen_off = make_monster_from_generated_code(sizePrefix=sizePrefix, file_identifier=file_identifier)
CheckReadBuffer(gen_buf, gen_off, sizePrefix=sizePrefix, file_identifier=file_identifier)
# Verify that the canonical flatbuffer file is readable by the
# generated Python code. Note that context managers are not part of
# Python 2.5, so we use the simpler open/close methods here:
f = open('monsterdata_test.mon', 'rb')
canonicalWireData = f.read()
f.close()
CheckReadBuffer(bytearray(canonicalWireData), 0, file_identifier=b'MONS')
# Write the generated buffer out to a file:
f = open('monsterdata_python_wire.mon', 'wb')
f.write(gen_buf[gen_off:])
f.close()
def CheckReadBuffer(buf, offset, sizePrefix=False, file_identifier=None):
''' CheckReadBuffer checks that the given buffer is evaluated correctly
as the example Monster. '''
def asserter(stmt):
''' An assertion helper that is separated from TestCase classes. '''
if not stmt:
raise AssertionError('CheckReadBuffer case failed')
if file_identifier:
# test prior to removal of size_prefix
asserter(util.GetBufferIdentifier(buf, offset, size_prefixed=sizePrefix) == file_identifier)
asserter(util.BufferHasIdentifier(buf, offset, file_identifier=file_identifier, size_prefixed=sizePrefix))
if sizePrefix:
size = util.GetSizePrefix(buf, offset)
asserter(size == len(buf[offset:])-4)
buf, offset = util.RemoveSizePrefix(buf, offset)
if file_identifier:
asserter(MyGame.Example.Monster.Monster.MonsterBufferHasIdentifier(buf, offset))
else:
asserter(not MyGame.Example.Monster.Monster.MonsterBufferHasIdentifier(buf, offset))
monster = MyGame.Example.Monster.Monster.GetRootAsMonster(buf, offset)
asserter(monster.Hp() == 80)
asserter(monster.Mana() == 150)
asserter(monster.Name() == b'MyMonster')
# initialize a Vec3 from Pos()
vec = monster.Pos()
asserter(vec is not None)
# verify the properties of the Vec3
asserter(vec.X() == 1.0)
asserter(vec.Y() == 2.0)
asserter(vec.Z() == 3.0)
asserter(vec.Test1() == 3.0)
asserter(vec.Test2() == 2)
# initialize a Test from Test3(...)
t = MyGame.Example.Test.Test()
t = vec.Test3(t)
asserter(t is not None)
# verify the properties of the Test
asserter(t.A() == 5)
asserter(t.B() == 6)
# verify that the enum code matches the enum declaration:
union_type = MyGame.Example.Any.Any
asserter(monster.TestType() == union_type.Monster)
# initialize a Table from a union field Test(...)
table2 = monster.Test()
asserter(type(table2) is flatbuffers.table.Table)
# initialize a Monster from the Table from the union
monster2 = MyGame.Example.Monster.Monster()
monster2.Init(table2.Bytes, table2.Pos)
asserter(monster2.Name() == b"Fred")
# iterate through the first monster's inventory:
asserter(monster.InventoryLength() == 5)
invsum = 0
for i in compat_range(monster.InventoryLength()):
v = monster.Inventory(i)
invsum += int(v)
asserter(invsum == 10)
for i in range(5):
asserter(monster.VectorOfLongs(i) == 10 ** (i * 2))
asserter(([-1.7976931348623157e+308, 0, 1.7976931348623157e+308]
== [monster.VectorOfDoubles(i)
for i in range(monster.VectorOfDoublesLength())]))
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
asserter(monster.InventoryAsNumpy().sum() == 10)
asserter(monster.InventoryAsNumpy().dtype == np.dtype('uint8'))
VectorOfLongs = monster.VectorOfLongsAsNumpy()
asserter(VectorOfLongs.dtype == np.dtype('int64'))
for i in range(5):
asserter(VectorOfLongs[i] == 10 ** (i * 2))
VectorOfDoubles = monster.VectorOfDoublesAsNumpy()
asserter(VectorOfDoubles.dtype == np.dtype('float64'))
asserter(VectorOfDoubles[0] == np.finfo('float64').min)
asserter(VectorOfDoubles[1] == 0.0)
asserter(VectorOfDoubles[2] == np.finfo('float64').max)
except ImportError:
# If numpy does not exist, trying to get vector as numpy
# array should raise NumpyRequiredForThisFeature. The way
# assertRaises has been implemented prevents us from
# asserting this error is raised outside of a test case.
pass
asserter(monster.Test4Length() == 2)
# create a 'Test' object and populate it:
test0 = monster.Test4(0)
asserter(type(test0) is MyGame.Example.Test.Test)
test1 = monster.Test4(1)
asserter(type(test1) is MyGame.Example.Test.Test)
# the position of test0 and test1 are swapped in monsterdata_java_wire
# and monsterdata_test_wire, so ignore ordering
v0 = test0.A()
v1 = test0.B()
v2 = test1.A()
v3 = test1.B()
sumtest12 = int(v0) + int(v1) + int(v2) + int(v3)
asserter(sumtest12 == 100)
asserter(monster.TestarrayofstringLength() == 2)
asserter(monster.Testarrayofstring(0) == b"test1")
asserter(monster.Testarrayofstring(1) == b"test2")
asserter(monster.TestarrayoftablesLength() == 0)
asserter(monster.TestnestedflatbufferLength() == 0)
asserter(monster.Testempty() is None)
class TestFuzz(unittest.TestCase):
''' Low level stress/fuzz test: serialize/deserialize a variety of
different kinds of data in different combinations '''
binary_type = compat.binary_types[0] # this will always exist
ofInt32Bytes = binary_type([0x83, 0x33, 0x33, 0x33])
ofInt64Bytes = binary_type([0x84, 0x44, 0x44, 0x44,
0x44, 0x44, 0x44, 0x44])
overflowingInt32Val = flatbuffers.encode.Get(flatbuffers.packer.int32,
ofInt32Bytes, 0)
overflowingInt64Val = flatbuffers.encode.Get(flatbuffers.packer.int64,
ofInt64Bytes, 0)
# Values we're testing against: chosen to ensure no bits get chopped
# off anywhere, and also be different from eachother.
boolVal = True
int8Val = N.Int8Flags.py_type(-127) # 0x81
uint8Val = N.Uint8Flags.py_type(0xFF)
int16Val = N.Int16Flags.py_type(-32222) # 0x8222
uint16Val = N.Uint16Flags.py_type(0xFEEE)
int32Val = N.Int32Flags.py_type(overflowingInt32Val)
uint32Val = N.Uint32Flags.py_type(0xFDDDDDDD)
int64Val = N.Int64Flags.py_type(overflowingInt64Val)
uint64Val = N.Uint64Flags.py_type(0xFCCCCCCCCCCCCCCC)
# Python uses doubles, so force it here
float32Val = N.Float32Flags.py_type(ctypes.c_float(3.14159).value)
float64Val = N.Float64Flags.py_type(3.14159265359)
def test_fuzz(self):
return self.check_once(11, 100)
def check_once(self, fuzzFields, fuzzObjects):
testValuesMax = 11 # hardcoded to the number of scalar types
builder = flatbuffers.Builder(0)
l = LCG()
objects = [0 for _ in compat_range(fuzzObjects)]
# Generate fuzzObjects random objects each consisting of
# fuzzFields fields, each of a random type.
for i in compat_range(fuzzObjects):
builder.StartObject(fuzzFields)
for j in compat_range(fuzzFields):
choice = int(l.Next()) % testValuesMax
if choice == 0:
builder.PrependBoolSlot(int(j), self.boolVal, False)
elif choice == 1:
builder.PrependInt8Slot(int(j), self.int8Val, 0)
elif choice == 2:
builder.PrependUint8Slot(int(j), self.uint8Val, 0)
elif choice == 3:
builder.PrependInt16Slot(int(j), self.int16Val, 0)
elif choice == 4:
builder.PrependUint16Slot(int(j), self.uint16Val, 0)
elif choice == 5:
builder.PrependInt32Slot(int(j), self.int32Val, 0)
elif choice == 6:
builder.PrependUint32Slot(int(j), self.uint32Val, 0)
elif choice == 7:
builder.PrependInt64Slot(int(j), self.int64Val, 0)
elif choice == 8:
builder.PrependUint64Slot(int(j), self.uint64Val, 0)
elif choice == 9:
builder.PrependFloat32Slot(int(j), self.float32Val, 0)
elif choice == 10:
builder.PrependFloat64Slot(int(j), self.float64Val, 0)
else:
raise RuntimeError('unreachable')
off = builder.EndObject()
# store the offset from the end of the builder buffer,
# since it will keep growing:
objects[i] = off
# Do some bookkeeping to generate stats on fuzzes:
stats = defaultdict(int)
def check(table, desc, want, got):
stats[desc] += 1
self.assertEqual(want, got, "%s != %s, %s" % (want, got, desc))
l = LCG() # Reset.
# Test that all objects we generated are readable and return the
# expected values. We generate random objects in the same order
# so this is deterministic.
for i in compat_range(fuzzObjects):
table = flatbuffers.table.Table(builder.Bytes,
len(builder.Bytes) - objects[i])
for j in compat_range(fuzzFields):
field_count = flatbuffers.builder.VtableMetadataFields + j
f = N.VOffsetTFlags.py_type(field_count *
N.VOffsetTFlags.bytewidth)
choice = int(l.Next()) % testValuesMax
if choice == 0:
check(table, "bool", self.boolVal,
table.GetSlot(f, False, N.BoolFlags))
elif choice == 1:
check(table, "int8", self.int8Val,
table.GetSlot(f, 0, N.Int8Flags))
elif choice == 2:
check(table, "uint8", self.uint8Val,
table.GetSlot(f, 0, N.Uint8Flags))
elif choice == 3:
check(table, "int16", self.int16Val,
table.GetSlot(f, 0, N.Int16Flags))
elif choice == 4:
check(table, "uint16", self.uint16Val,
table.GetSlot(f, 0, N.Uint16Flags))
elif choice == 5:
check(table, "int32", self.int32Val,
table.GetSlot(f, 0, N.Int32Flags))
elif choice == 6:
check(table, "uint32", self.uint32Val,
table.GetSlot(f, 0, N.Uint32Flags))
elif choice == 7:
check(table, "int64", self.int64Val,
table.GetSlot(f, 0, N.Int64Flags))
elif choice == 8:
check(table, "uint64", self.uint64Val,
table.GetSlot(f, 0, N.Uint64Flags))
elif choice == 9:
check(table, "float32", self.float32Val,
table.GetSlot(f, 0, N.Float32Flags))
elif choice == 10:
check(table, "float64", self.float64Val,
table.GetSlot(f, 0, N.Float64Flags))
else:
raise RuntimeError('unreachable')
# If enough checks were made, verify that all scalar types were used:
self.assertEqual(testValuesMax, len(stats),
"fuzzing failed to test all scalar types: %s" % stats)
class TestByteLayout(unittest.TestCase):
''' TestByteLayout checks the bytes of a Builder in various scenarios. '''
def assertBuilderEquals(self, builder, want_chars_or_ints):
def integerize(x):
if isinstance(x, compat.string_types):
return ord(x)
return x
want_ints = list(map(integerize, want_chars_or_ints))
want = bytearray(want_ints)
got = builder.Bytes[builder.Head():] # use the buffer directly
self.assertEqual(want, got)
def test_numbers(self):
b = flatbuffers.Builder(0)
self.assertBuilderEquals(b, [])
b.PrependBool(True)
self.assertBuilderEquals(b, [1])
b.PrependInt8(-127)
self.assertBuilderEquals(b, [129, 1])
b.PrependUint8(255)
self.assertBuilderEquals(b, [255, 129, 1])
b.PrependInt16(-32222)
self.assertBuilderEquals(b, [0x22, 0x82, 0, 255, 129, 1]) # first pad
b.PrependUint16(0xFEEE)
# no pad this time:
self.assertBuilderEquals(b, [0xEE, 0xFE, 0x22, 0x82, 0, 255, 129, 1])
b.PrependInt32(-53687092)
self.assertBuilderEquals(b, [204, 204, 204, 252, 0xEE, 0xFE,
0x22, 0x82, 0, 255, 129, 1])
b.PrependUint32(0x98765432)
self.assertBuilderEquals(b, [0x32, 0x54, 0x76, 0x98,
204, 204, 204, 252,
0xEE, 0xFE, 0x22, 0x82,
0, 255, 129, 1])
def test_numbers64(self):
b = flatbuffers.Builder(0)
b.PrependUint64(0x1122334455667788)
self.assertBuilderEquals(b, [0x88, 0x77, 0x66, 0x55,
0x44, 0x33, 0x22, 0x11])
b = flatbuffers.Builder(0)
b.PrependInt64(0x1122334455667788)
self.assertBuilderEquals(b, [0x88, 0x77, 0x66, 0x55,
0x44, 0x33, 0x22, 0x11])
def test_1xbyte_vector(self):
b = flatbuffers.Builder(0)
self.assertBuilderEquals(b, [])
b.StartVector(flatbuffers.number_types.Uint8Flags.bytewidth, 1, 1)
self.assertBuilderEquals(b, [0, 0, 0]) # align to 4bytes
b.PrependByte(1)
self.assertBuilderEquals(b, [1, 0, 0, 0])
b.EndVector(1)
self.assertBuilderEquals(b, [1, 0, 0, 0, 1, 0, 0, 0]) # padding
def test_2xbyte_vector(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Uint8Flags.bytewidth, 2, 1)
self.assertBuilderEquals(b, [0, 0]) # align to 4bytes
b.PrependByte(1)
self.assertBuilderEquals(b, [1, 0, 0])
b.PrependByte(2)
self.assertBuilderEquals(b, [2, 1, 0, 0])
b.EndVector(2)
self.assertBuilderEquals(b, [2, 0, 0, 0, 2, 1, 0, 0]) # padding
def test_1xuint16_vector(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Uint16Flags.bytewidth, 1, 1)
self.assertBuilderEquals(b, [0, 0]) # align to 4bytes
b.PrependUint16(1)
self.assertBuilderEquals(b, [1, 0, 0, 0])
b.EndVector(1)
self.assertBuilderEquals(b, [1, 0, 0, 0, 1, 0, 0, 0]) # padding
def test_2xuint16_vector(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Uint16Flags.bytewidth, 2, 1)
self.assertBuilderEquals(b, []) # align to 4bytes
b.PrependUint16(0xABCD)
self.assertBuilderEquals(b, [0xCD, 0xAB])
b.PrependUint16(0xDCBA)
self.assertBuilderEquals(b, [0xBA, 0xDC, 0xCD, 0xAB])
b.EndVector(2)
self.assertBuilderEquals(b, [2, 0, 0, 0, 0xBA, 0xDC, 0xCD, 0xAB])
def test_create_ascii_string(self):
b = flatbuffers.Builder(0)
b.CreateString(u"foo", encoding='ascii')
# 0-terminated, no pad:
self.assertBuilderEquals(b, [3, 0, 0, 0, 'f', 'o', 'o', 0])
b.CreateString(u"moop", encoding='ascii')
# 0-terminated, 3-byte pad:
self.assertBuilderEquals(b, [4, 0, 0, 0, 'm', 'o', 'o', 'p',
0, 0, 0, 0,
3, 0, 0, 0, 'f', 'o', 'o', 0])
def test_create_utf8_string(self):
b = flatbuffers.Builder(0)
b.CreateString(u"Цлїςσδε")
self.assertBuilderEquals(b, "\x0e\x00\x00\x00\xd0\xa6\xd0\xbb\xd1\x97" \
"\xcf\x82\xcf\x83\xce\xb4\xce\xb5\x00\x00")
b.CreateString(u"フムアムカモケモ")
self.assertBuilderEquals(b, "\x18\x00\x00\x00\xef\xbe\x8c\xef\xbe\x91" \
"\xef\xbd\xb1\xef\xbe\x91\xef\xbd\xb6\xef\xbe\x93\xef\xbd\xb9\xef" \
"\xbe\x93\x00\x00\x00\x00\x0e\x00\x00\x00\xd0\xa6\xd0\xbb\xd1\x97" \
"\xcf\x82\xcf\x83\xce\xb4\xce\xb5\x00\x00")
def test_create_arbitrary_string(self):
b = flatbuffers.Builder(0)
s = "\x01\x02\x03"
b.CreateString(s) # Default encoding is utf-8.
# 0-terminated, no pad:
self.assertBuilderEquals(b, [3, 0, 0, 0, 1, 2, 3, 0])
s2 = "\x04\x05\x06\x07"
b.CreateString(s2) # Default encoding is utf-8.
# 0-terminated, 3-byte pad:
self.assertBuilderEquals(b, [4, 0, 0, 0, 4, 5, 6, 7, 0, 0, 0, 0,
3, 0, 0, 0, 1, 2, 3, 0])
def test_create_byte_vector(self):
b = flatbuffers.Builder(0)
b.CreateByteVector(b"")
# 0-byte pad:
self.assertBuilderEquals(b, [0, 0, 0, 0])
b = flatbuffers.Builder(0)
b.CreateByteVector(b"\x01\x02\x03")
# 1-byte pad:
self.assertBuilderEquals(b, [3, 0, 0, 0, 1, 2, 3, 0])
def test_create_numpy_vector_int8(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Systems endian:
b = flatbuffers.Builder(0)
x = np.array([1, 2, -3], dtype=np.int8)
b.CreateNumpyVector(x)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 2, 256 - 3, 0 # vector value + padding
])
# Reverse endian:
b = flatbuffers.Builder(0)
x_other_endian = x.byteswap().newbyteorder()
b.CreateNumpyVector(x_other_endian)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 2, 256 - 3, 0 # vector value + padding
])
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_uint16(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Systems endian:
b = flatbuffers.Builder(0)
x = np.array([1, 2, 312], dtype=np.uint16)
b.CreateNumpyVector(x)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 0, # 1
2, 0, # 2
312 - 256, 1, # 312
0, 0 # padding
])
# Reverse endian:
b = flatbuffers.Builder(0)
x_other_endian = x.byteswap().newbyteorder()
b.CreateNumpyVector(x_other_endian)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 0, # 1
2, 0, # 2
312 - 256, 1, # 312
0, 0 # padding
])
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_int64(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Systems endian:
b = flatbuffers.Builder(0)
x = np.array([1, 2, -12], dtype=np.int64)
b.CreateNumpyVector(x)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 0, 0, 0, 0, 0, 0, 0, # 1
2, 0, 0, 0, 0, 0, 0, 0, # 2
256 - 12, 255, 255, 255, 255, 255, 255, 255 # -12
])
# Reverse endian:
b = flatbuffers.Builder(0)
x_other_endian = x.byteswap().newbyteorder()
b.CreateNumpyVector(x_other_endian)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 0, 0, 0, 0, 0, 0, 0, # 1
2, 0, 0, 0, 0, 0, 0, 0, # 2
256 - 12, 255, 255, 255, 255, 255, 255, 255 # -12
])
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_float32(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Systems endian:
b = flatbuffers.Builder(0)
x = np.array([1, 2, -12], dtype=np.float32)
b.CreateNumpyVector(x)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
0, 0, 128, 63, # 1
0, 0, 0, 64, # 2
0, 0, 64, 193 # -12
])
# Reverse endian:
b = flatbuffers.Builder(0)
x_other_endian = x.byteswap().newbyteorder()
b.CreateNumpyVector(x_other_endian)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
0, 0, 128, 63, # 1
0, 0, 0, 64, # 2
0, 0, 64, 193 # -12
])
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_float64(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Systems endian:
b = flatbuffers.Builder(0)
x = np.array([1, 2, -12], dtype=np.float64)
b.CreateNumpyVector(x)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
0, 0, 0, 0, 0, 0, 240, 63, # 1
0, 0, 0, 0, 0, 0, 0, 64, # 2
0, 0, 0, 0, 0, 0, 40, 192 # -12
])
# Reverse endian:
b = flatbuffers.Builder(0)
x_other_endian = x.byteswap().newbyteorder()
b.CreateNumpyVector(x_other_endian)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
0, 0, 0, 0, 0, 0, 240, 63, # 1
0, 0, 0, 0, 0, 0, 0, 64, # 2
0, 0, 0, 0, 0, 0, 40, 192 # -12
])
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_bool(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Systems endian:
b = flatbuffers.Builder(0)
x = np.array([True, False, True], dtype=np.bool)
b.CreateNumpyVector(x)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 0, 1, 0 # vector values + padding
])
# Reverse endian:
b = flatbuffers.Builder(0)
x_other_endian = x.byteswap().newbyteorder()
b.CreateNumpyVector(x_other_endian)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 0, 1, 0 # vector values + padding
])
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_reject_strings(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Create String array
b = flatbuffers.Builder(0)
x = np.array(["hello", "fb", "testing"])
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
TypeError)
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_reject_object(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Create String array
b = flatbuffers.Builder(0)
x = np.array([{"m": 0}, {"as": -2.1, 'c': 'c'}])
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
TypeError)
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_empty_vtable(self):
b = flatbuffers.Builder(0)
b.StartObject(0)
self.assertBuilderEquals(b, [])
b.EndObject()
self.assertBuilderEquals(b, [4, 0, 4, 0, 4, 0, 0, 0])
def test_vtable_with_one_true_bool(self):
b = flatbuffers.Builder(0)
self.assertBuilderEquals(b, [])
b.StartObject(1)
self.assertBuilderEquals(b, [])
b.PrependBoolSlot(0, True, False)
b.EndObject()
self.assertBuilderEquals(b, [
6, 0, # vtable bytes
8, 0, # length of object including vtable offset
7, 0, # start of bool value
6, 0, 0, 0, # offset for start of vtable (int32)
0, 0, 0, # padded to 4 bytes
1, # bool value
])
def test_vtable_with_one_default_bool(self):
b = flatbuffers.Builder(0)
self.assertBuilderEquals(b, [])
b.StartObject(1)
self.assertBuilderEquals(b, [])
b.PrependBoolSlot(0, False, False)
b.EndObject()
self.assertBuilderEquals(b, [
4, 0, # vtable bytes
4, 0, # end of object from here
# entry 1 is zero and not stored
4, 0, 0, 0, # offset for start of vtable (int32)
])
def test_vtable_with_one_int16(self):
b = flatbuffers.Builder(0)
b.StartObject(1)
b.PrependInt16Slot(0, 0x789A, 0)
b.EndObject()
self.assertBuilderEquals(b, [
6, 0, # vtable bytes
8, 0, # end of object from here
6, 0, # offset to value
6, 0, 0, 0, # offset for start of vtable (int32)
0, 0, # padding to 4 bytes
0x9A, 0x78,
])
def test_vtable_with_two_int16(self):
b = flatbuffers.Builder(0)
b.StartObject(2)
b.PrependInt16Slot(0, 0x3456, 0)
b.PrependInt16Slot(1, 0x789A, 0)
b.EndObject()
self.assertBuilderEquals(b, [
8, 0, # vtable bytes
8, 0, # end of object from here
6, 0, # offset to value 0
4, 0, # offset to value 1
8, 0, 0, 0, # offset for start of vtable (int32)
0x9A, 0x78, # value 1
0x56, 0x34, # value 0
])
def test_vtable_with_int16_and_bool(self):
b = flatbuffers.Builder(0)
b.StartObject(2)
b.PrependInt16Slot(0, 0x3456, 0)
b.PrependBoolSlot(1, True, False)
b.EndObject()
self.assertBuilderEquals(b, [
8, 0, # vtable bytes
8, 0, # end of object from here
6, 0, # offset to value 0
5, 0, # offset to value 1
8, 0, 0, 0, # offset for start of vtable (int32)
0, # padding
1, # value 1
0x56, 0x34, # value 0
])
def test_vtable_with_empty_vector(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Uint8Flags.bytewidth, 0, 1)
vecend = b.EndVector(0)
b.StartObject(1)
b.PrependUOffsetTRelativeSlot(0, vecend, 0)
b.EndObject()
self.assertBuilderEquals(b, [
6, 0, # vtable bytes
8, 0,
4, 0, # offset to vector offset
6, 0, 0, 0, # offset for start of vtable (int32)
4, 0, 0, 0,
0, 0, 0, 0, # length of vector (not in struct)
])
def test_vtable_with_empty_vector_of_byte_and_some_scalars(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Uint8Flags.bytewidth, 0, 1)
vecend = b.EndVector(0)
b.StartObject(2)
b.PrependInt16Slot(0, 55, 0)
b.PrependUOffsetTRelativeSlot(1, vecend, 0)
b.EndObject()
self.assertBuilderEquals(b, [
8, 0, # vtable bytes
12, 0,
10, 0, # offset to value 0
4, 0, # offset to vector offset
8, 0, 0, 0, # vtable loc
8, 0, 0, 0, # value 1
0, 0, 55, 0, # value 0
0, 0, 0, 0, # length of vector (not in struct)
])
def test_vtable_with_1_int16_and_2vector_of_int16(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Int16Flags.bytewidth, 2, 1)
b.PrependInt16(0x1234)
b.PrependInt16(0x5678)
vecend = b.EndVector(2)
b.StartObject(2)
b.PrependUOffsetTRelativeSlot(1, vecend, 0)
b.PrependInt16Slot(0, 55, 0)
b.EndObject()
self.assertBuilderEquals(b, [
8, 0, # vtable bytes
12, 0, # length of object
6, 0, # start of value 0 from end of vtable
8, 0, # start of value 1 from end of buffer
8, 0, 0, 0, # offset for start of vtable (int32)
0, 0, # padding
55, 0, # value 0
4, 0, 0, 0, # vector position from here
2, 0, 0, 0, # length of vector (uint32)
0x78, 0x56, # vector value 1
0x34, 0x12, # vector value 0
])
def test_vtable_with_1_struct_of_1_int8__1_int16__1_int32(self):
b = flatbuffers.Builder(0)
b.StartObject(1)
b.Prep(4+4+4, 0)
b.PrependInt8(55)
b.Pad(3)
b.PrependInt16(0x1234)
b.Pad(2)
b.PrependInt32(0x12345678)
structStart = b.Offset()
b.PrependStructSlot(0, structStart, 0)
b.EndObject()
self.assertBuilderEquals(b, [
6, 0, # vtable bytes
16, 0, # end of object from here
4, 0, # start of struct from here
6, 0, 0, 0, # offset for start of vtable (int32)
0x78, 0x56, 0x34, 0x12, # value 2
0, 0, # padding
0x34, 0x12, # value 1
0, 0, 0, # padding
55, # value 0
])
def test_vtable_with_1_vector_of_2_struct_of_2_int8(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Int8Flags.bytewidth*2, 2, 1)
b.PrependInt8(33)
b.PrependInt8(44)
b.PrependInt8(55)
b.PrependInt8(66)
vecend = b.EndVector(2)
b.StartObject(1)
b.PrependUOffsetTRelativeSlot(0, vecend, 0)
b.EndObject()
self.assertBuilderEquals(b, [
6, 0, # vtable bytes
8, 0,
4, 0, # offset of vector offset
6, 0, 0, 0, # offset for start of vtable (int32)
4, 0, 0, 0, # vector start offset
2, 0, 0, 0, # vector length
66, # vector value 1,1
55, # vector value 1,0
44, # vector value 0,1
33, # vector value 0,0
])
def test_table_with_some_elements(self):
b = flatbuffers.Builder(0)
b.StartObject(2)
b.PrependInt8Slot(0, 33, 0)
b.PrependInt16Slot(1, 66, 0)
off = b.EndObject()
b.Finish(off)
self.assertBuilderEquals(b, [
12, 0, 0, 0, # root of table: points to vtable offset
8, 0, # vtable bytes
8, 0, # end of object from here
7, 0, # start of value 0
4, 0, # start of value 1
8, 0, 0, 0, # offset for start of vtable (int32)
66, 0, # value 1
0, # padding
33, # value 0
])
def test__one_unfinished_table_and_one_finished_table(self):
b = flatbuffers.Builder(0)
b.StartObject(2)
b.PrependInt8Slot(0, 33, 0)
b.PrependInt8Slot(1, 44, 0)
off = b.EndObject()
b.Finish(off)
b.StartObject(3)
b.PrependInt8Slot(0, 55, 0)
b.PrependInt8Slot(1, 66, 0)
b.PrependInt8Slot(2, 77, 0)
off = b.EndObject()
b.Finish(off)
self.assertBuilderEquals(b, [
16, 0, 0, 0, # root of table: points to object
0, 0, # padding
10, 0, # vtable bytes
8, 0, # size of object
7, 0, # start of value 0
6, 0, # start of value 1
5, 0, # start of value 2
10, 0, 0, 0, # offset for start of vtable (int32)
0, # padding
77, # value 2
66, # value 1
55, # value 0
12, 0, 0, 0, # root of table: points to object
8, 0, # vtable bytes
8, 0, # size of object
7, 0, # start of value 0
6, 0, # start of value 1
8, 0, 0, 0, # offset for start of vtable (int32)
0, 0, # padding
44, # value 1
33, # value 0
])
def test_a_bunch_of_bools(self):
b = flatbuffers.Builder(0)
b.StartObject(8)
b.PrependBoolSlot(0, True, False)
b.PrependBoolSlot(1, True, False)
b.PrependBoolSlot(2, True, False)
b.PrependBoolSlot(3, True, False)
b.PrependBoolSlot(4, True, False)
b.PrependBoolSlot(5, True, False)
b.PrependBoolSlot(6, True, False)
b.PrependBoolSlot(7, True, False)
off = b.EndObject()
b.Finish(off)
self.assertBuilderEquals(b, [
24, 0, 0, 0, # root of table: points to vtable offset
20, 0, # vtable bytes
12, 0, # size of object
11, 0, # start of value 0
10, 0, # start of value 1
9, 0, # start of value 2
8, 0, # start of value 3
7, 0, # start of value 4
6, 0, # start of value 5
5, 0, # start of value 6
4, 0, # start of value 7
20, 0, 0, 0, # vtable offset
1, # value 7
1, # value 6
1, # value 5
1, # value 4
1, # value 3
1, # value 2
1, # value 1
1, # value 0
])
def test_three_bools(self):
b = flatbuffers.Builder(0)
b.StartObject(3)
b.PrependBoolSlot(0, True, False)
b.PrependBoolSlot(1, True, False)
b.PrependBoolSlot(2, True, False)
off = b.EndObject()
b.Finish(off)
self.assertBuilderEquals(b, [
16, 0, 0, 0, # root of table: points to vtable offset
0, 0, # padding
10, 0, # vtable bytes
8, 0, # size of object
7, 0, # start of value 0
6, 0, # start of value 1
5, 0, # start of value 2
10, 0, 0, 0, # vtable offset from here
0, # padding
1, # value 2
1, # value 1
1, # value 0
])
def test_some_floats(self):
b = flatbuffers.Builder(0)
b.StartObject(1)
b.PrependFloat32Slot(0, 1.0, 0.0)
off = b.EndObject()
self.assertBuilderEquals(b, [
6, 0, # vtable bytes
8, 0, # size of object
4, 0, # start of value 0
6, 0, 0, 0, # vtable offset
0, 0, 128, 63, # value 0
])
def make_monster_from_generated_code(sizePrefix = False, file_identifier=None):
''' Use generated code to build the example Monster. '''
b = flatbuffers.Builder(0)
string = b.CreateString("MyMonster")
test1 = b.CreateString("test1")
test2 = b.CreateString("test2")
fred = b.CreateString("Fred")
MyGame.Example.Monster.MonsterStartInventoryVector(b, 5)
b.PrependByte(4)
b.PrependByte(3)
b.PrependByte(2)
b.PrependByte(1)
b.PrependByte(0)
inv = b.EndVector(5)
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddName(b, fred)
mon2 = MyGame.Example.Monster.MonsterEnd(b)
MyGame.Example.Monster.MonsterStartTest4Vector(b, 2)
MyGame.Example.Test.CreateTest(b, 10, 20)
MyGame.Example.Test.CreateTest(b, 30, 40)
test4 = b.EndVector(2)
MyGame.Example.Monster.MonsterStartTestarrayofstringVector(b, 2)
b.PrependUOffsetTRelative(test2)
b.PrependUOffsetTRelative(test1)
testArrayOfString = b.EndVector(2)
MyGame.Example.Monster.MonsterStartVectorOfLongsVector(b, 5)
b.PrependInt64(100000000)
b.PrependInt64(1000000)
b.PrependInt64(10000)
b.PrependInt64(100)
b.PrependInt64(1)
VectorOfLongs = b.EndVector(5)
MyGame.Example.Monster.MonsterStartVectorOfDoublesVector(b, 3)
b.PrependFloat64(1.7976931348623157e+308)
b.PrependFloat64(0)
b.PrependFloat64(-1.7976931348623157e+308)
VectorOfDoubles = b.EndVector(3)
MyGame.Example.Monster.MonsterStart(b)
pos = MyGame.Example.Vec3.CreateVec3(b, 1.0, 2.0, 3.0, 3.0, 2, 5, 6)
MyGame.Example.Monster.MonsterAddPos(b, pos)
MyGame.Example.Monster.MonsterAddHp(b, 80)
MyGame.Example.Monster.MonsterAddName(b, string)
MyGame.Example.Monster.MonsterAddInventory(b, inv)
MyGame.Example.Monster.MonsterAddTestType(b, 1)
MyGame.Example.Monster.MonsterAddTest(b, mon2)
MyGame.Example.Monster.MonsterAddTest4(b, test4)
MyGame.Example.Monster.MonsterAddTestarrayofstring(b, testArrayOfString)
MyGame.Example.Monster.MonsterAddVectorOfLongs(b, VectorOfLongs)
MyGame.Example.Monster.MonsterAddVectorOfDoubles(b, VectorOfDoubles)
mon = MyGame.Example.Monster.MonsterEnd(b)
if sizePrefix:
b.FinishSizePrefixed(mon, file_identifier)
else:
b.Finish(mon, file_identifier)
return b.Bytes, b.Head()
class TestAllCodePathsOfExampleSchema(unittest.TestCase):
def setUp(self, *args, **kwargs):
super(TestAllCodePathsOfExampleSchema, self).setUp(*args, **kwargs)
b = flatbuffers.Builder(0)
MyGame.Example.Monster.MonsterStart(b)
gen_mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(gen_mon)
self.mon = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
def test_default_monster_pos(self):
self.assertTrue(self.mon.Pos() is None)
def test_nondefault_monster_mana(self):
b = flatbuffers.Builder(0)
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddMana(b, 50)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
got_mon = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertEqual(50, got_mon.Mana())
def test_default_monster_hp(self):
self.assertEqual(100, self.mon.Hp())
def test_default_monster_name(self):
self.assertEqual(None, self.mon.Name())
def test_default_monster_inventory_item(self):
self.assertEqual(0, self.mon.Inventory(0))
def test_default_monster_inventory_length(self):
self.assertEqual(0, self.mon.InventoryLength())
def test_default_monster_color(self):
self.assertEqual(MyGame.Example.Color.Color.Blue, self.mon.Color())
def test_nondefault_monster_color(self):
b = flatbuffers.Builder(0)
color = MyGame.Example.Color.Color.Red
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddColor(b, color)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertEqual(MyGame.Example.Color.Color.Red, mon2.Color())
def test_default_monster_testtype(self):
self.assertEqual(0, self.mon.TestType())
def test_default_monster_test_field(self):
self.assertEqual(None, self.mon.Test())
def test_default_monster_test4_item(self):
self.assertEqual(None, self.mon.Test4(0))
def test_default_monster_test4_length(self):
self.assertEqual(0, self.mon.Test4Length())
def test_default_monster_testarrayofstring(self):
self.assertEqual("", self.mon.Testarrayofstring(0))
def test_default_monster_testarrayofstring_length(self):
self.assertEqual(0, self.mon.TestarrayofstringLength())
def test_default_monster_testarrayoftables(self):
self.assertEqual(None, self.mon.Testarrayoftables(0))
def test_nondefault_monster_testarrayoftables(self):
b = flatbuffers.Builder(0)
# make a child Monster within a vector of Monsters:
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddHp(b, 99)
sub_monster = MyGame.Example.Monster.MonsterEnd(b)
# build the vector:
MyGame.Example.Monster.MonsterStartTestarrayoftablesVector(b, 1)
b.PrependUOffsetTRelative(sub_monster)
vec = b.EndVector(1)
# make the parent monster and include the vector of Monster:
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddTestarrayoftables(b, vec)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
# inspect the resulting data:
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Output(), 0)
self.assertEqual(99, mon2.Testarrayoftables(0).Hp())
self.assertEqual(1, mon2.TestarrayoftablesLength())
def test_default_monster_testarrayoftables_length(self):
self.assertEqual(0, self.mon.TestarrayoftablesLength())
def test_nondefault_monster_enemy(self):
b = flatbuffers.Builder(0)
# make an Enemy object:
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddHp(b, 88)
enemy = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(enemy)
# make the parent monster and include the vector of Monster:
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddEnemy(b, enemy)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
# inspect the resulting data:
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertEqual(88, mon2.Enemy().Hp())
def test_default_monster_testnestedflatbuffer(self):
self.assertEqual(0, self.mon.Testnestedflatbuffer(0))
def test_default_monster_testnestedflatbuffer_length(self):
self.assertEqual(0, self.mon.TestnestedflatbufferLength())
def test_nondefault_monster_testnestedflatbuffer(self):
b = flatbuffers.Builder(0)
MyGame.Example.Monster.MonsterStartTestnestedflatbufferVector(b, 3)
b.PrependByte(4)
b.PrependByte(2)
b.PrependByte(0)
sub_buf = b.EndVector(3)
# make the parent monster and include the vector of Monster:
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddTestnestedflatbuffer(b, sub_buf)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
# inspect the resulting data:
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertEqual(3, mon2.TestnestedflatbufferLength())
self.assertEqual(0, mon2.Testnestedflatbuffer(0))
self.assertEqual(2, mon2.Testnestedflatbuffer(1))
self.assertEqual(4, mon2.Testnestedflatbuffer(2))
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
self.assertEqual([0, 2, 4], mon2.TestnestedflatbufferAsNumpy().tolist())
except ImportError:
assertRaises(self,
lambda: mon2.TestnestedflatbufferAsNumpy(),
NumpyRequiredForThisFeature)
def test_nondefault_monster_testempty(self):
b = flatbuffers.Builder(0)
# make a Stat object:
MyGame.Example.Stat.StatStart(b)
MyGame.Example.Stat.StatAddVal(b, 123)
my_stat = MyGame.Example.Stat.StatEnd(b)
b.Finish(my_stat)
# include the stat object in a monster:
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddTestempty(b, my_stat)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
# inspect the resulting data:
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertEqual(123, mon2.Testempty().Val())
def test_default_monster_testbool(self):
self.assertFalse(self.mon.Testbool())
def test_nondefault_monster_testbool(self):
b = flatbuffers.Builder(0)
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddTestbool(b, True)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
# inspect the resulting data:
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertTrue(mon2.Testbool())
def test_default_monster_testhashes(self):
self.assertEqual(0, self.mon.Testhashs32Fnv1())
self.assertEqual(0, self.mon.Testhashu32Fnv1())
self.assertEqual(0, self.mon.Testhashs64Fnv1())
self.assertEqual(0, self.mon.Testhashu64Fnv1())
self.assertEqual(0, self.mon.Testhashs32Fnv1a())
self.assertEqual(0, self.mon.Testhashu32Fnv1a())
self.assertEqual(0, self.mon.Testhashs64Fnv1a())
self.assertEqual(0, self.mon.Testhashu64Fnv1a())
def test_nondefault_monster_testhashes(self):
b = flatbuffers.Builder(0)
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddTesthashs32Fnv1(b, 1)
MyGame.Example.Monster.MonsterAddTesthashu32Fnv1(b, 2)
MyGame.Example.Monster.MonsterAddTesthashs64Fnv1(b, 3)
MyGame.Example.Monster.MonsterAddTesthashu64Fnv1(b, 4)
MyGame.Example.Monster.MonsterAddTesthashs32Fnv1a(b, 5)
MyGame.Example.Monster.MonsterAddTesthashu32Fnv1a(b, 6)
MyGame.Example.Monster.MonsterAddTesthashs64Fnv1a(b, 7)
MyGame.Example.Monster.MonsterAddTesthashu64Fnv1a(b, 8)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
# inspect the resulting data:
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertEqual(1, mon2.Testhashs32Fnv1())
self.assertEqual(2, mon2.Testhashu32Fnv1())
self.assertEqual(3, mon2.Testhashs64Fnv1())
self.assertEqual(4, mon2.Testhashu64Fnv1())
self.assertEqual(5, mon2.Testhashs32Fnv1a())
self.assertEqual(6, mon2.Testhashu32Fnv1a())
self.assertEqual(7, mon2.Testhashs64Fnv1a())
self.assertEqual(8, mon2.Testhashu64Fnv1a())
def test_getrootas_for_nonroot_table(self):
b = flatbuffers.Builder(0)
string = b.CreateString("MyStat")
MyGame.Example.Stat.StatStart(b)
MyGame.Example.Stat.StatAddId(b, string)
MyGame.Example.Stat.StatAddVal(b, 12345678)
MyGame.Example.Stat.StatAddCount(b, 12345)
stat = MyGame.Example.Stat.StatEnd(b)
b.Finish(stat)
stat2 = MyGame.Example.Stat.Stat.GetRootAsStat(b.Bytes, b.Head())
self.assertEqual(b"MyStat", stat2.Id())
self.assertEqual(12345678, stat2.Val())
self.assertEqual(12345, stat2.Count())
class TestAllCodePathsOfMonsterExtraSchema(unittest.TestCase):
def setUp(self, *args, **kwargs):
super(TestAllCodePathsOfMonsterExtraSchema, self).setUp(*args, **kwargs)
b = flatbuffers.Builder(0)
MyGame.MonsterExtra.MonsterExtraStart(b)
gen_mon = MyGame.MonsterExtra.MonsterExtraEnd(b)
b.Finish(gen_mon)
self.mon = MyGame.MonsterExtra.MonsterExtra.GetRootAsMonsterExtra(b.Bytes, b.Head())
def test_default_nan_inf(self):
self.assertTrue(math.isnan(self.mon.F1()))
self.assertEqual(self.mon.F2(), float("inf"))
self.assertEqual(self.mon.F3(), float("-inf"))
self.assertTrue(math.isnan(self.mon.D1()))
self.assertEqual(self.mon.D2(), float("inf"))
self.assertEqual(self.mon.D3(), float("-inf"))
class TestVtableDeduplication(unittest.TestCase):
''' TestVtableDeduplication verifies that vtables are deduplicated. '''
def test_vtable_deduplication(self):
b = flatbuffers.Builder(0)
b.StartObject(4)
b.PrependByteSlot(0, 0, 0)
b.PrependByteSlot(1, 11, 0)
b.PrependByteSlot(2, 22, 0)
b.PrependInt16Slot(3, 33, 0)
obj0 = b.EndObject()
b.StartObject(4)
b.PrependByteSlot(0, 0, 0)
b.PrependByteSlot(1, 44, 0)
b.PrependByteSlot(2, 55, 0)
b.PrependInt16Slot(3, 66, 0)
obj1 = b.EndObject()
b.StartObject(4)
b.PrependByteSlot(0, 0, 0)
b.PrependByteSlot(1, 77, 0)
b.PrependByteSlot(2, 88, 0)
b.PrependInt16Slot(3, 99, 0)
obj2 = b.EndObject()
got = b.Bytes[b.Head():]
want = bytearray([
240, 255, 255, 255, # == -12. offset to dedupped vtable.
99, 0,
88,
77,
248, 255, 255, 255, # == -8. offset to dedupped vtable.
66, 0,
55,
44,
12, 0,
8, 0,
0, 0,
7, 0,
6, 0,
4, 0,
12, 0, 0, 0,
33, 0,
22,
11,
])
self.assertEqual((len(want), want), (len(got), got))
table0 = flatbuffers.table.Table(b.Bytes, len(b.Bytes) - obj0)
table1 = flatbuffers.table.Table(b.Bytes, len(b.Bytes) - obj1)
table2 = flatbuffers.table.Table(b.Bytes, len(b.Bytes) - obj2)
def _checkTable(tab, voffsett_value, b, c, d):
# vtable size
got = tab.GetVOffsetTSlot(0, 0)
self.assertEqual(12, got, 'case 0, 0')
# object size
got = tab.GetVOffsetTSlot(2, 0)
self.assertEqual(8, got, 'case 2, 0')
# default value
got = tab.GetVOffsetTSlot(4, 0)
self.assertEqual(voffsett_value, got, 'case 4, 0')
got = tab.GetSlot(6, 0, N.Uint8Flags)
self.assertEqual(b, got, 'case 6, 0')
val = tab.GetSlot(8, 0, N.Uint8Flags)
self.assertEqual(c, val, 'failed 8, 0')
got = tab.GetSlot(10, 0, N.Uint8Flags)
self.assertEqual(d, got, 'failed 10, 0')
_checkTable(table0, 0, 11, 22, 33)
_checkTable(table1, 0, 44, 55, 66)
_checkTable(table2, 0, 77, 88, 99)
class TestExceptions(unittest.TestCase):
def test_object_is_nested_error(self):
b = flatbuffers.Builder(0)
b.StartObject(0)
assertRaises(self, lambda: b.StartObject(0),
flatbuffers.builder.IsNestedError)
def test_object_is_not_nested_error(self):
b = flatbuffers.Builder(0)
assertRaises(self, lambda: b.EndObject(),
flatbuffers.builder.IsNotNestedError)
def test_struct_is_not_inline_error(self):
b = flatbuffers.Builder(0)
b.StartObject(0)
assertRaises(self, lambda: b.PrependStructSlot(0, 1, 0),
flatbuffers.builder.StructIsNotInlineError)
def test_unreachable_error(self):
b = flatbuffers.Builder(0)
assertRaises(self, lambda: b.PrependUOffsetTRelative(1),
flatbuffers.builder.OffsetArithmeticError)
def test_create_string_is_nested_error(self):
b = flatbuffers.Builder(0)
b.StartObject(0)
s = 'test1'
assertRaises(self, lambda: b.CreateString(s),
flatbuffers.builder.IsNestedError)
def test_create_byte_vector_is_nested_error(self):
b = flatbuffers.Builder(0)
b.StartObject(0)
s = b'test1'
assertRaises(self, lambda: b.CreateByteVector(s),
flatbuffers.builder.IsNestedError)
def test_finished_bytes_error(self):
b = flatbuffers.Builder(0)
assertRaises(self, lambda: b.Output(),
flatbuffers.builder.BuilderNotFinishedError)
class TestFixedLengthArrays(unittest.TestCase):
def test_fixed_length_array(self):
builder = flatbuffers.Builder(0)
a = 0.5
b = range(0, 15)
c = 1
d_a = [[1, 2], [3, 4]]
d_b = [MyGame.Example.TestEnum.TestEnum.B, \
MyGame.Example.TestEnum.TestEnum.C]
d_c = [[MyGame.Example.TestEnum.TestEnum.A, \
MyGame.Example.TestEnum.TestEnum.B], \
[MyGame.Example.TestEnum.TestEnum.C, \
MyGame.Example.TestEnum.TestEnum.B]]
arrayOffset = MyGame.Example.ArrayStruct.CreateArrayStruct(builder, \
a, b, c, d_a, d_b, d_c)
# Create a table with the ArrayStruct.
MyGame.Example.ArrayTable.ArrayTableStart(builder)
MyGame.Example.ArrayTable.ArrayTableAddA(builder, arrayOffset)
tableOffset = MyGame.Example.ArrayTable.ArrayTableEnd(builder)
builder.Finish(tableOffset)
buf = builder.Output()
table = MyGame.Example.ArrayTable.ArrayTable.GetRootAsArrayTable(buf, 0)
# Verify structure.
nested = MyGame.Example.NestedStruct.NestedStruct()
self.assertEqual(table.A().A(), 0.5)
self.assertEqual(table.A().B(), \
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])
self.assertEqual(table.A().C(), 1)
self.assertEqual(table.A().D(nested, 0).A(), [1, 2])
self.assertEqual(table.A().D(nested, 1).A(), [3, 4])
self.assertEqual(table.A().D(nested, 0).B(), \
MyGame.Example.TestEnum.TestEnum.B)
self.assertEqual(table.A().D(nested, 1).B(), \
MyGame.Example.TestEnum.TestEnum.C)
self.assertEqual(table.A().D(nested, 0).C(), \
[MyGame.Example.TestEnum.TestEnum.A, \
MyGame.Example.TestEnum.TestEnum.B])
self.assertEqual(table.A().D(nested, 1).C(), \
[MyGame.Example.TestEnum.TestEnum.C, \
MyGame.Example.TestEnum.TestEnum.B])
def CheckAgainstGoldDataGo():
try:
gen_buf, gen_off = make_monster_from_generated_code()
fn = 'monsterdata_go_wire.mon'
if not os.path.exists(fn):
print('Go-generated data does not exist, failed.')
return False
# would like to use a context manager here, but it's less
# backwards-compatible:
f = open(fn, 'rb')
go_wire_data = f.read()
f.close()
CheckReadBuffer(bytearray(go_wire_data), 0)
if not bytearray(gen_buf[gen_off:]) == bytearray(go_wire_data):
raise AssertionError('CheckAgainstGoldDataGo failed')
except:
print('Failed to test against Go-generated test data.')
return False
print('Can read Go-generated test data, and Python generates bytewise identical data.')
return True
def CheckAgainstGoldDataJava():
try:
gen_buf, gen_off = make_monster_from_generated_code()
fn = 'monsterdata_java_wire.mon'
if not os.path.exists(fn):
print('Java-generated data does not exist, failed.')
return False
f = open(fn, 'rb')
java_wire_data = f.read()
f.close()
CheckReadBuffer(bytearray(java_wire_data), 0)
except:
print('Failed to read Java-generated test data.')
return False
print('Can read Java-generated test data.')
return True
class LCG(object):
''' Include simple random number generator to ensure results will be the
same cross platform.
http://en.wikipedia.org/wiki/Park%E2%80%93Miller_random_number_generator '''
__slots__ = ['n']
InitialLCGSeed = 48271
def __init__(self):
self.n = self.InitialLCGSeed
def Reset(self):
self.n = self.InitialLCGSeed
def Next(self):
self.n = ((self.n * 279470273) % 4294967291) & 0xFFFFFFFF
return self.n
def BenchmarkVtableDeduplication(count):
'''
BenchmarkVtableDeduplication measures the speed of vtable deduplication
by creating `prePop` vtables, then populating `count` objects with a
different single vtable.
When count is large (as in long benchmarks), memory usage may be high.
'''
for prePop in (1, 10, 100, 1000):
builder = flatbuffers.Builder(0)
n = 1 + int(math.log(prePop, 1.5))
# generate some layouts:
layouts = set()
r = list(compat_range(n))
while len(layouts) < prePop:
layouts.add(tuple(sorted(random.sample(r, int(max(1, n / 2))))))
layouts = list(layouts)
# pre-populate vtables:
for layout in layouts:
builder.StartObject(n)
for j in layout:
builder.PrependInt16Slot(j, j, 0)
builder.EndObject()
# benchmark deduplication of a new vtable:
def f():
layout = random.choice(layouts)
builder.StartObject(n)
for j in layout:
builder.PrependInt16Slot(j, j, 0)
builder.EndObject()
duration = timeit.timeit(stmt=f, number=count)
rate = float(count) / duration
print(('vtable deduplication rate (n=%d, vtables=%d): %.2f sec' % (
prePop,
len(builder.vtables),
rate))
)
def BenchmarkCheckReadBuffer(count, buf, off):
'''
BenchmarkCheckReadBuffer measures the speed of flatbuffer reading
by re-using the CheckReadBuffer function with the gold data.
'''
def f():
CheckReadBuffer(buf, off)
duration = timeit.timeit(stmt=f, number=count)
rate = float(count) / duration
data = float(len(buf) * count) / float(1024 * 1024)
data_rate = data / float(duration)
print(('traversed %d %d-byte flatbuffers in %.2fsec: %.2f/sec, %.2fMB/sec')
% (count, len(buf), duration, rate, data_rate))
def BenchmarkMakeMonsterFromGeneratedCode(count, length):
'''
BenchmarkMakeMonsterFromGeneratedCode measures the speed of flatbuffer
creation by re-using the make_monster_from_generated_code function for
generating gold data examples.
'''
duration = timeit.timeit(stmt=make_monster_from_generated_code,
number=count)
rate = float(count) / duration
data = float(length * count) / float(1024 * 1024)
data_rate = data / float(duration)
print(('built %d %d-byte flatbuffers in %.2fsec: %.2f/sec, %.2fMB/sec' % \
(count, length, duration, rate, data_rate)))
def backward_compatible_run_tests(**kwargs):
if PY_VERSION < (2, 6):
sys.stderr.write("Python version less than 2.6 are not supported")
sys.stderr.flush()
return False
# python2.6 has a reduced-functionality unittest.main function:
if PY_VERSION == (2, 6):
try:
unittest.main(**kwargs)
except SystemExit as e:
if not e.code == 0:
return False
return True
# python2.7 and above let us not exit once unittest.main is run:
kwargs['exit'] = False
kwargs['verbosity'] = 0
ret = unittest.main(**kwargs)
if ret.result.errors or ret.result.failures:
return False
return True
def main():
import os
import sys
if not len(sys.argv) == 4:
sys.stderr.write('Usage: %s <benchmark vtable count>'
'<benchmark read count> <benchmark build count>\n'
% sys.argv[0])
sys.stderr.write(' Provide COMPARE_GENERATED_TO_GO=1 to check'
'for bytewise comparison to Go data.\n')
sys.stderr.write(' Provide COMPARE_GENERATED_TO_JAVA=1 to check'
'for bytewise comparison to Java data.\n')
sys.stderr.flush()
sys.exit(1)
kwargs = dict(argv=sys.argv[:-3])
# run tests, and run some language comparison checks if needed:
success = backward_compatible_run_tests(**kwargs)
if success and os.environ.get('COMPARE_GENERATED_TO_GO', 0) == "1":
success = success and CheckAgainstGoldDataGo()
if success and os.environ.get('COMPARE_GENERATED_TO_JAVA', 0) == "1":
success = success and CheckAgainstGoldDataJava()
if not success:
sys.stderr.write('Tests failed, skipping benchmarks.\n')
sys.stderr.flush()
sys.exit(1)
# run benchmarks (if 0, they will be a noop):
bench_vtable = int(sys.argv[1])
bench_traverse = int(sys.argv[2])
bench_build = int(sys.argv[3])
if bench_vtable:
BenchmarkVtableDeduplication(bench_vtable)
if bench_traverse:
buf, off = make_monster_from_generated_code()
BenchmarkCheckReadBuffer(bench_traverse, buf, off)
if bench_build:
buf, off = make_monster_from_generated_code()
BenchmarkMakeMonsterFromGeneratedCode(bench_build, len(buf))
if __name__ == '__main__':
main()
| {
"content_hash": "2f88e721f3022d305e718fd423d36b3c",
"timestamp": "",
"source": "github",
"line_count": 1802,
"max_line_length": 123,
"avg_line_length": 36.405105438401776,
"alnum_prop": 0.5676961068260113,
"repo_name": "yuryleb/osrm-backend",
"id": "b15227091680aa50ab88006ebcf6d123edd23077",
"size": "66237",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "third_party/flatbuffers/tests/py_test.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6654"
},
{
"name": "C++",
"bytes": "3816221"
},
{
"name": "CMake",
"bytes": "152224"
},
{
"name": "Dockerfile",
"bytes": "2342"
},
{
"name": "Gherkin",
"bytes": "1354551"
},
{
"name": "JavaScript",
"bytes": "367366"
},
{
"name": "Lua",
"bytes": "123150"
},
{
"name": "Makefile",
"bytes": "2887"
},
{
"name": "Python",
"bytes": "22321"
},
{
"name": "Shell",
"bytes": "13775"
}
],
"symlink_target": ""
} |
import unittest
from tempfile import *
from util import *
import socket
import subprocess
class TestAsyncIO(unittest.TestCase):
def test_socket(self):
s = socket.socket()
s.connect(('google.com', 80))
x = AsyncIO(s)
data_recvd = BoxedObject(False)
x_closed = BoxedObject(False)
def do_request():
x.write("GET / HTTP/1.0\r\n\r\n")
def on_recv(data):
data_recvd.set(True)
def on_close():
x_closed.set(True)
x.read.add_listener(on_recv)
x.closed.add_listener(on_close)
x.open()
do_request()
MessageLoop.run_until(lambda: x_closed.get())
self.assertTrue(data_recvd.get())
def test_socket_close(self):
s = socket.socket()
s.connect(('google.com', 80))
x = AsyncIO(s)
x_closed = BoxedObject(False)
def on_close():
x_closed.set(True)
x.closed.add_listener(on_close)
x.open()
x.close()
self.assertTrue(x.is_closed)
def test_file(self):
f = NamedTemporaryFile(delete=False)
f.write("HelloWorld\n")
f.close()
x = AsyncIO(open(f.name,'r'))
rcvd_data = BoxedObject("")
did_close = BoxedObject(False)
def on_recv(b):
rcvd_data.set(rcvd_data.get() + b)
def on_close():
did_close.set(True)
x.read.add_listener(on_recv)
x.closed.add_listener(on_close)
x.open()
MessageLoop.run_until(lambda: x.is_closed)
self.assertTrue(did_close.get())
self.assertEquals(rcvd_data.get(), "HelloWorld\n")
os.unlink(f.name)
def test_async_popen(self):
proc = subprocess.Popen(["/bin/echo", "314159"], stdin=None, stderr=None, stdout=subprocess.PIPE)
x = AsyncIO(proc.stdout)
rcvd_data = BoxedObject("")
did_close = BoxedObject(False)
def on_recv(b):
rcvd_data.set(rcvd_data.get() + b)
def on_close():
did_close.set(True)
x.read.add_listener(on_recv)
x.closed.add_listener(on_close)
x.open()
MessageLoop.run_until(lambda: x.is_closed)
self.assertTrue(did_close.get())
self.assertEquals(rcvd_data.get(), "314159\n")
def test_file_close(self):
f = NamedTemporaryFile(delete=False)
f.write("HelloWorld\n")
f.close()
x = AsyncIO(open(f.name,'r'))
rcvd_data = BoxedObject("")
did_close = BoxedObject(False)
def on_close():
did_close.set(True)
x.closed.add_listener(on_close)
x.open()
x.close()
self.assertTrue(x.is_closed)
self.assertTrue(did_close.get())
os.unlink(f.name)
| {
"content_hash": "b847d9e3970ecdaf109f9e55288d4f6d",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 101,
"avg_line_length": 25.91578947368421,
"alnum_prop": 0.628757108042242,
"repo_name": "natduca/ndbg",
"id": "8da52be19403936e08aeda8736bb1445c7c8debd",
"size": "3038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/util/test_async_io.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4728"
},
{
"name": "C++",
"bytes": "5787"
},
{
"name": "Emacs Lisp",
"bytes": "5014"
},
{
"name": "JavaScript",
"bytes": "237"
},
{
"name": "Python",
"bytes": "554374"
},
{
"name": "Shell",
"bytes": "781"
},
{
"name": "VimL",
"bytes": "1848"
}
],
"symlink_target": ""
} |
from collections import Sequence # noqa
import logging
from django.conf import settings # noqa
from horizon import exceptions
__all__ = ('APIResourceWrapper', 'APIDictWrapper',
'get_service_from_catalog', 'url_for',)
LOG = logging.getLogger(__name__)
class APIVersionManager(object):
""" Object to store and manage API versioning data and utility methods. """
SETTINGS_KEY = "OPENSTACK_API_VERSIONS"
def __init__(self, service_type, preferred_version=None):
self.service_type = service_type
self.preferred = preferred_version
self._active = None
self.supported = {}
@property
def active(self):
if self._active is None:
self.get_active_version()
return self._active
def load_supported_version(self, version, data):
self.supported[version] = data
def get_active_version(self):
if self._active is not None:
return self.supported[self._active]
key = getattr(settings, self.SETTINGS_KEY, {}).get(self.service_type)
if key is None:
# TODO(gabriel): support API version discovery here; we'll leave
# the setting in as a way of overriding the latest available
# version.
key = self.preferred
self._active = key
return self.supported[self._active]
class APIResourceWrapper(object):
""" Simple wrapper for api objects
Define _attrs on the child class and pass in the
api object as the only argument to the constructor
"""
_attrs = []
def __init__(self, apiresource):
self._apiresource = apiresource
def __getattr__(self, attr):
if attr in self._attrs:
# __getattr__ won't find properties
return self._apiresource.__getattribute__(attr)
else:
msg = ('Attempted to access unknown attribute "%s" on '
'APIResource object of type "%s" wrapping resource of '
'type "%s".') % (attr, self.__class__,
self._apiresource.__class__)
LOG.debug(exceptions.error_color(msg))
raise AttributeError(attr)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
dict((attr, getattr(self, attr))
for attr in self._attrs
if hasattr(self, attr)))
class APIDictWrapper(object):
""" Simple wrapper for api dictionaries
Some api calls return dictionaries. This class provides identical
behavior as APIResourceWrapper, except that it will also behave as a
dictionary, in addition to attribute accesses.
Attribute access is the preferred method of access, to be
consistent with api resource objects from novaclient.
"""
def __init__(self, apidict):
self._apidict = apidict
def __getattr__(self, attr):
try:
return self._apidict[attr]
except KeyError:
msg = 'Unknown attribute "%(attr)s" on APIResource object ' \
'of type "%(cls)s"' % {'attr': attr, 'cls': self.__class__}
LOG.debug(exceptions.error_color(msg))
raise AttributeError(msg)
def __getitem__(self, item):
try:
return self.__getattr__(item)
except AttributeError as e:
# caller is expecting a KeyError
raise KeyError(e)
def items(self):
return self._apidict.items()
def get(self, item, default=None):
try:
return self.__getattr__(item)
except AttributeError:
return default
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self._apidict)
class Quota(object):
"""Wrapper for individual limits in a quota."""
def __init__(self, name, limit):
self.name = name
self.limit = limit
def __repr__(self):
return "<Quota: (%s, %s)>" % (self.name, self.limit)
class QuotaSet(Sequence):
"""
Wrapper for client QuotaSet objects which turns the individual quotas
into Quota objects for easier handling/iteration.
`QuotaSet` objects support a mix of `list` and `dict` methods; you can use
the bracket notiation (`qs["my_quota"] = 0`) to add new quota values, and
use the `get` method to retrieve a specific quota, but otherwise it
behaves much like a list or tuple, particularly in supporting iteration.
"""
def __init__(self, apiresource=None):
self.items = []
if apiresource:
if hasattr(apiresource, '_info'):
items = apiresource._info.items()
else:
items = apiresource.items()
for k, v in items:
if k == 'id':
continue
self[k] = v
def __setitem__(self, k, v):
v = int(v) if v is not None else v
q = Quota(k, v)
self.items.append(q)
def __getitem__(self, index):
return self.items[index]
def __add__(self, other):
'''Merge another QuotaSet into this one. Existing quotas are
not overriden.'''
if not isinstance(other, QuotaSet):
msg = "Can only add QuotaSet to QuotaSet, " \
"but received %s instead" % type(other)
raise ValueError(msg)
for item in other:
if self.get(item.name).limit is None:
self.items.append(item)
return self
def __len__(self):
return len(self.items)
def __repr__(self):
return repr(self.items)
def get(self, key, default=None):
match = [quota for quota in self.items if quota.name == key]
return match.pop() if len(match) else Quota(key, default)
def add(self, other):
return self.__add__(other)
def get_service_from_catalog(catalog, service_type):
if catalog:
for service in catalog:
if service['type'] == service_type:
return service
return None
def get_version_from_service(service):
if service:
endpoint = service['endpoints'][0]
if 'interface' in endpoint:
return 3
else:
return 2.0
return 2.0
# Mapping of V2 Catalog Endpoint_type to V3 Catalog Interfaces
ENDPOINT_TYPE_TO_INTERFACE = {
'publicURL': 'public',
'internalURL': 'internal',
'adminURL': 'admin',
}
def get_url_for_service(service, region, endpoint_type):
identity_version = get_version_from_service(service)
for endpoint in service['endpoints']:
# ignore region for identity
if service['type'] == 'identity' or region == endpoint['region']:
try:
if identity_version < 3:
return endpoint[endpoint_type]
else:
interface = \
ENDPOINT_TYPE_TO_INTERFACE.get(endpoint_type, '')
if endpoint['interface'] == interface:
return endpoint['url']
except (IndexError, KeyError):
return None
return None
def url_for(request, service_type, endpoint_type=None):
endpoint_type = endpoint_type or getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'publicURL')
fallback_endpoint_type = getattr(settings, 'SECONDARY_ENDPOINT_TYPE', None)
catalog = request.user.service_catalog
service = get_service_from_catalog(catalog, service_type)
if service:
url = get_url_for_service(service,
request.user.services_region,
endpoint_type)
if not url and fallback_endpoint_type:
url = get_url_for_service(service,
request.user.services_region,
fallback_endpoint_type)
if url:
return url
raise exceptions.ServiceCatalogException(service_type)
def is_service_enabled(request, service_type, service_name=None):
service = get_service_from_catalog(request.user.service_catalog,
service_type)
if service:
region = request.user.services_region
for endpoint in service['endpoints']:
# ignore region for identity
if service['type'] == 'identity' or \
endpoint['region'] == region:
if service_name:
return service['name'] == service_name
else:
return True
return False
| {
"content_hash": "940f87a9fcf9fdf09147b36847f9656c",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 79,
"avg_line_length": 32.666666666666664,
"alnum_prop": 0.5629442788351295,
"repo_name": "kaiweifan/horizon",
"id": "0f5f51c4979cbfaceee42c35677f86c2f482db5c",
"size": "9531",
"binary": false,
"copies": "1",
"ref": "refs/heads/vip2",
"path": "openstack_dashboard/api/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "160827"
},
{
"name": "JavaScript",
"bytes": "360901"
},
{
"name": "Python",
"bytes": "2832603"
},
{
"name": "Shell",
"bytes": "12986"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "recast.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| {
"content_hash": "5333ff9a25f0060e0d9e8a832edf5c32",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 37.23809523809524,
"alnum_prop": 0.6202046035805626,
"repo_name": "xurble/recast",
"id": "62a132ae0fa171e7a249f8ecaf93a7c2ed4824fb",
"size": "804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3385"
},
{
"name": "HTML",
"bytes": "20676"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "29451"
},
{
"name": "Shell",
"bytes": "196"
}
],
"symlink_target": ""
} |
import numpy as np
import argparse
import glob
parser = argparse.ArgumentParser(description='''get the specfiles for training validating and testing''')
parser.add_argument('--specname_dir', metavar='DIR', required=True, help='the path to the specname npy file')
parser.add_argument('--train_valid_test_ratio', metavar='NUMBER STRING', required=True, help='''format '1 2 3' ''')
parser.add_argument('--save', help='save to text')
args = parser.parse_args()
name_dir = args.specname_dir
spec_files = glob.glob(name_dir + '/Laura_warp_specnames_*.npy')
tvt_ratio = args.train_valid_test_ratio.strip().split(' ')
tvt_ratio = [int(s) for s in tvt_ratio]
assert len(tvt_ratio) == 3
if args.save:
train_spec = open('train_spec.txt', 'wb')
valid_spec = open('valid_spec.txt', 'wb')
test_spec = open('test_spec.txt', 'wb')
for name_path in spec_files:
fin = open(name_path)
data = np.load(fin)
ttl_frames = 0
for pair in data:
ttl_frames += int(pair[1])
train_valid_break = tvt_ratio[0] * 1.0 / sum(tvt_ratio) * ttl_frames
valid_test_break = (tvt_ratio[0] + tvt_ratio[1]) * 1.0 / sum(tvt_ratio) * ttl_frames
ttl_frames = 0
count = 0
for pair in data:
if ttl_frames < train_valid_break:
print 'train: %s : %s'%tuple(pair)
if args.save:
train_spec.write(pair[0].split('.')[0] + '\n')
count += 1
elif ttl_frames < valid_test_break:
print 'valid: %s : %s'%tuple(pair)
if args.save:
valid_spec.write(pair[0].split('.')[0] + '\n')
count += 1
else:
print 'test: %s : %s'%tuple(pair)
if args.save:
test_spec.write(pair[0].split('.')[0] + '\n')
count += 1
ttl_frames += int(pair[1])
assert count == len(data)
| {
"content_hash": "f1992c93ced383dea1e289e779a5f815",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 115,
"avg_line_length": 31.96551724137931,
"alnum_prop": 0.5814455231930961,
"repo_name": "hycis/Pynet",
"id": "9c6a1ac3f1cb3b32bda26fd089c3ce1714661635",
"size": "1856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/nii/train_valid_test_splits.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "445036"
},
{
"name": "Shell",
"bytes": "38953"
}
],
"symlink_target": ""
} |
import django.db.models.deletion
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("liveblog", "0002_liveblog_title"),
]
operations = [
migrations.AddField(
model_name="liveblog",
name="post_date",
field=models.DateTimeField(blank=True, default=django.utils.timezone.now, verbose_name="post date"),
),
migrations.AlterField(
model_name="liveblog",
name="cmsplugin_ptr",
field=models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
related_name="liveblog_liveblog",
serialize=False,
to="cms.CMSPlugin",
),
),
]
| {
"content_hash": "266a30a85c0db017461449e4de7efb9c",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 112,
"avg_line_length": 29.166666666666668,
"alnum_prop": 0.5622857142857143,
"repo_name": "nephila/djangocms-blog",
"id": "d58cf6eec1e45efd28e709364f20502206d184f2",
"size": "924",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "djangocms_blog/liveblog/migrations/0003_auto_20160917_0123.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "939"
},
{
"name": "HTML",
"bytes": "16041"
},
{
"name": "JavaScript",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "389126"
}
],
"symlink_target": ""
} |
from TimeseriesDB.MessageFormatting import *
import importlib
import unittest
from pytest import raises
import numpy as np
from TimeseriesDB.tsdb_error import *
from TimeseriesDB import DatabaseServer
from TimeseriesDB.MessageFormatting import * #Deserializer
from Similarity.find_most_similar import find_most_similiar, sanity_check
from TimeseriesDB.simsearch_init import initialize_simsearch_parameters
from socketserver import BaseRequestHandler, ThreadingTCPServer, TCPServer
from timeseries.ArrayTimeSeries import ArrayTimeSeries as ts
import threading
from socket import socket, AF_INET, SOCK_STREAM
import sys
from scipy.stats import norm
import multiprocessing
class Server_Tests(unittest.TestCase):
def setUp(self):
ThreadingTCPServer.allow_reuse_address = True
self.port = 2000
try:
self.serv = ThreadingTCPServer(('', self.port), DatabaseServer)
except:
self.port += 1
self.serv = ThreadingTCPServer(('', self.port), DatabaseServer)
self.serv.data = initialize_simsearch_parameters()
self.serv.deserializer = Deserializer()
self.serv_thread = threading.Thread(target=self.serv.serve_forever)
self.serv_thread.setDaemon(True)
self.serv_thread.start()
self.serv_thread2 = threading.Thread(target=self.serv.serve_forever)
self.serv_thread2.setDaemon(True)
self.serv_thread2.start()
def tearDown(self):
self.serv.socket.close()
self.serv.server_close()
def test_suite(self):
###test the serializing and deserializing
msg = {'op':'TSfromID','id':12,'courtesy':'please'}
serialized = serialize(json.dumps(msg))
assert isinstance(serialized, bytes) #check that bytes are passed back
ds = Deserializer()
ds.append(serialized)
ds.ready()
response = ds.deserialize()
#check that serializing and then deserializing leaves us with the original message
assert response == msg
###test the different queries
s = socket(AF_INET, SOCK_STREAM)
s.connect(('localhost', self.port))
#query for similarity search with an ID
d2 = {'op':'simsearch_id','id':12,'n_closest':2,'courtesy':'please'}
s2 = serialize(json.dumps(d2))
s.send(s2)
msg = s.recv(8192)
ds = Deserializer()
ds.append(msg)
ds.ready()
response = ds.deserialize()
assert len(response['id']) == 2 #returned back two ids
assert type(response['id'][0]) == type(response['id'][1]) == int
#query for similarity search with a new timeseries
t = np.arange(0.0, 1.0, 0.01)
v = norm.pdf(t, 100, 100) + 1000*np.random.randn(100)
ts_test = ts(t, v)
d2 = {'op':'simsearch_ts','ts':[list(ts_test.times()), list(ts_test.values())],'courtesy':'please'}
s2 = serialize(json.dumps(d2))
s.send(s2)
msg = s.recv(8192)
ds = Deserializer()
ds.append(msg)
ds.ready()
response = ds.deserialize()
assert len(response['id']) == 5 #returned back five ids
assert type(response['id'][0]) == type(response['id'][1]) == int
#query for timeseries based on id
d2 = {'op':'TSfromID','id':12,'courtesy':'please'}
s2 = serialize(json.dumps(d2))
s.send(s2)
msg = s.recv(8192)
ds = Deserializer()
ds.append(msg)
ds.ready()
response = ds.deserialize()
assert len(response['ts']) == 2 #returned back times and values
#nonpolite query test, passes back a none operation
d2_impolite = {'op':'simsearch_id','id':12,'n_closest':2}
s2_impolite = serialize(json.dumps(d2_impolite))
s.send(s2_impolite)
msg_impolite = s.recv(8192)
ds = Deserializer()
ds.append(msg_impolite)
ds.ready()
response_impolite = ds.deserialize()
assert 'payload' in response_impolite
s.close()
###test multiple queries
def query_1():
#function to compute simsearch
s = socket(AF_INET, SOCK_STREAM)
s.connect(('localhost',self. port))
d2 = {'op':'simsearch_id','id':12,'n_closest':2,'courtesy':'please'}
s2 = serialize(json.dumps(d2))
s.send(s2)
msg = s.recv(8192)
ds = Deserializer()
ds.append(msg)
ds.ready()
response = ds.deserialize()
assert type(response['id'][0]) == type(response['id'][1]) == int
s.close()
return
def query_2():
#function to return timeseries from id
s = socket(AF_INET, SOCK_STREAM)
s.connect(('localhost', self.port))
d2 = {'op':'TSfromID','id':12,'courtesy':'please'}
s2 = serialize(json.dumps(d2))
s.send(s2)
msg = s.recv(8192)
ds = Deserializer()
ds.append(msg)
ds.ready()
response = ds.deserialize()
assert len(response['ts']) == 2
s.close()
return
self.p = multiprocessing.Process(target=query_1)
self.p2 = multiprocessing.Process(target=query_2)
self.p.start()
self.p2.start()
self.p.join()
self.p2.join()
if __name__=='__main__':
try: # pragma: no cover
unittest.main() # pragma: no cover
except SystemExit as inst: # pragma: no cover
if inst.args[0] is True: # pragma: no cover
raise # pragma: no cover
| {
"content_hash": "6749db160da9b02a2718e494f8b5b4dd",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 107,
"avg_line_length": 36.425925925925924,
"alnum_prop": 0.563972208100322,
"repo_name": "slac207/cs207project",
"id": "ba772fedbe29250bc8c5ad05beadcae4505b7d62",
"size": "5901",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1416"
},
{
"name": "HTML",
"bytes": "4380"
},
{
"name": "JavaScript",
"bytes": "135139"
},
{
"name": "Jupyter Notebook",
"bytes": "59899"
},
{
"name": "Python",
"bytes": "274063"
},
{
"name": "Shell",
"bytes": "10378"
}
],
"symlink_target": ""
} |
import sys
from atom.api import Typed
from enaml.widgets.splitter import ProxySplitter
from .QtCore import Qt, QEvent, Signal
from .QtGui import (
QSplitter, QSplitterHandle, QVBoxLayout, QFrame, QApplication
)
from .qt_constraints_widget import QtConstraintsWidget, size_hint_guard
from .qt_split_item import QtSplitItem
ORIENTATION = {
'horizontal': Qt.Horizontal,
'vertical': Qt.Vertical,
}
class QWinSplitterHandle(QSplitterHandle):
""" A custom QSplitterHandle which is used on win32 platforms.
The native Windows style draws the splitter handle the same color as
the widget background, which makes it invisible for most cases. This
subclass overlays a raised line on the splitter to provide a little
bit of visual feedback.
"""
def __init__(self, orientation, parent=None):
super(QWinSplitterHandle, self).__init__(orientation, parent)
self._frame = frame = QFrame(self)
l = QVBoxLayout()
l.addWidget(frame)
l.setSpacing(0)
l.setContentsMargins(0, 0, 0, 0)
self.setLayout(l)
self.updateFrame()
def updateFrame(self):
""" Update the internal frame style for the current orientation.
"""
orientation = self.orientation()
s = QFrame.VLine if orientation == Qt.Horizontal else QFrame.HLine
self._frame.setFrameStyle(s | QFrame.Raised)
class QCustomSplitter(QSplitter):
""" A custom QSplitter which handles children of type QSplitItem.
"""
#: A signal emitted when a LayoutRequest event is posted to the
#: splitter widget. This will typically occur when the size hint
#: of the splitter is no longer valid.
layoutRequested = Signal()
def createHandle(self):
""" A reimplemented virtual method to create splitter handles.
On win32 platforms, this will return a custom QSplitterHandle
which works around an issue with handle not drawing nicely. On
all other platforms, a normal QSplitterHandler widget.
"""
if sys.platform == 'win32':
return QWinSplitterHandle(self.orientation(), self)
return QSplitterHandle(self.orientation(), self)
def setOrientation(self, orientation):
""" Set the orientation of the splitter.
This overriden method will call the `updateFrame` method of the
splitter handles when running on win32 platforms. On any other
platform, this method simply calls the superclass method.
"""
old = self.orientation()
if old != orientation:
super(QCustomSplitter, self).setOrientation(orientation)
if sys.platform == 'win32':
for idx in xrange(self.count()):
handle = self.handle(idx)
handle.updateFrame()
def event(self, event):
""" A custom event handler which handles LayoutRequest events.
When a LayoutRequest event is posted to this widget, it will
emit the `layoutRequested` signal. This allows an external
consumer of this widget to update their external layout.
"""
res = super(QCustomSplitter, self).event(event)
if event.type() == QEvent.LayoutRequest:
self.layoutRequested.emit()
return res
class QtSplitter(QtConstraintsWidget, ProxySplitter):
""" A Qt implementation of an Enaml ProxySplitter.
"""
#: A reference to the widget created by the proxy.
widget = Typed(QCustomSplitter)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Creates the underlying QSplitter control.
"""
self.widget = QCustomSplitter(self.parent_widget())
def init_widget(self):
""" Initialize the underlying control.
"""
super(QtSplitter, self).init_widget()
d = self.declaration
self.set_orientation(d.orientation, sh_guard=False)
self.set_live_drag(d.live_drag)
def init_layout(self):
""" Handle the layout initialization for the splitter.
"""
super(QtSplitter, self).init_layout()
widget = self.widget
for item in self.split_items():
widget.addWidget(item)
widget.layoutRequested.connect(self.on_layout_requested)
# On Windows, messages are consumed from three different queues,
# each with a different priority. The lowest priority is the
# queue which holds WM_PAINT messages. Dragging the splitter bar
# generates WM_MOUSEMOVE messages which have a higher priority.
# These messages (dragging the bar) generate size events in Qt
# which are delivered immediately. This means that if handling
# the resize event from the drag takes too long (> ~800us) then
# another size event will arrive before the paint event, since
# the new WM_MOUSEMOVE will be processed before the WM_PAINT.
# So on Windows, the `splitterMoved` signal, which is emitted
# on every drag, is connected to a handler which will force a
# repaint if opaque resize is turned on. Since paint event are
# collapsed, the effect of this is to restore the order of event
# processing.
if sys.platform == 'win32':
widget.splitterMoved.connect(self.on_win32_splitter_moved)
#--------------------------------------------------------------------------
# Child Events
#--------------------------------------------------------------------------
def child_added(self, child):
""" Handle the child added event for a QtSplitter.
"""
super(QtSplitter, self).child_added(child)
if isinstance(child, QtSplitItem):
for index, dchild in enumerate(self.children()):
if child is dchild:
self.widget.insertWidget(index, child.widget)
# QSplitter automatically removes a widget when it's reparented. The
# base child_removed event handler will set the parent to None, and
# that is all that is needed.
#--------------------------------------------------------------------------
# Utility Methods
#--------------------------------------------------------------------------
def split_items(self):
""" Get the split items defined for the widget.
"""
for d in self.declaration.split_items():
w = d.proxy.widget
if w is not None:
yield w
#--------------------------------------------------------------------------
# Signal Handlers
#--------------------------------------------------------------------------
def on_layout_requested(self):
""" Handle the `layoutRequested` signal from the QSplitter.
"""
self.size_hint_updated()
def on_win32_splitter_moved(self):
""" Handle the 'splitterMoved' signal from the QSplitter.
This handler is only connected when running on Windows and it
serves to make sure paint events get processed during heavy
resize events when opaque resizing is turned on.
"""
if self.widget.opaqueResize():
QApplication.sendPostedEvents()
#--------------------------------------------------------------------------
# ProxySplitter API
#--------------------------------------------------------------------------
def set_orientation(self, orientation, sh_guard=True):
""" Update the orientation of the QSplitter.
"""
if sh_guard:
with size_hint_guard(self):
self.widget.setOrientation(ORIENTATION[orientation])
else:
self.widget.setOrientation(ORIENTATION[orientation])
def set_live_drag(self, live_drag):
""" Update the dragging mode of the QSplitter.
"""
self.widget.setOpaqueResize(live_drag)
| {
"content_hash": "4a98d5865c10bee680d5d55bdb155244",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 79,
"avg_line_length": 36.68493150684932,
"alnum_prop": 0.5848892208115509,
"repo_name": "ContinuumIO/ashiba",
"id": "49d6ee6956ae14876a2d404f06cc7d9c820f3d7a",
"size": "8384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enaml/enaml/qt/qt_splitter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4560"
},
{
"name": "C",
"bytes": "738"
},
{
"name": "C++",
"bytes": "77464"
},
{
"name": "CSS",
"bytes": "2286"
},
{
"name": "Emacs Lisp",
"bytes": "1210"
},
{
"name": "HTML",
"bytes": "4891"
},
{
"name": "JavaScript",
"bytes": "17243"
},
{
"name": "Makefile",
"bytes": "4590"
},
{
"name": "Python",
"bytes": "3241535"
},
{
"name": "Shell",
"bytes": "119"
},
{
"name": "VimL",
"bytes": "1821"
}
],
"symlink_target": ""
} |
from .docker_compose import DockerBaseSettings
class BuildDevSettings(DockerBaseSettings):
@property
def DATABASES(self): # noqa
return {}
DONT_HIT_DB = True
BuildDevSettings.load_settings(__name__)
| {
"content_hash": "9c9a0253ebb2808979b63672e200d0ae",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 46,
"avg_line_length": 18.75,
"alnum_prop": 0.7066666666666667,
"repo_name": "rtfd/readthedocs.org",
"id": "836352f3580ddf5a6539ccd26ef2d6e403597dce",
"size": "225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dockerfiles/settings/build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "66552"
},
{
"name": "Dockerfile",
"bytes": "205"
},
{
"name": "HTML",
"bytes": "196998"
},
{
"name": "JavaScript",
"bytes": "431128"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1821332"
},
{
"name": "Shell",
"bytes": "682"
}
],
"symlink_target": ""
} |
"""Model defination for the Mask-RCNN Model.
Defines model_fn of Mask-RCNN for TF Estimator. The model_fn includes Mask-RCNN
model architecture, loss function, learning rate schedule, and evaluation
procedure.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from REDACTED.tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding
def _rpn_score_loss(score_outputs, score_targets, normalizer=1.0):
"""Computes score loss."""
# score_targets has three values: (1) score_targets[i]=1, the anchor is a
# positive sample. (2) score_targets[i]=0, negative. (3) score_targets[i]=-1,
# the anchor is don't care (ignore).
with tf.name_scope('rpn_score_loss'):
mask = tf.logical_or(tf.equal(score_targets, 1), tf.equal(score_targets, 0))
score_targets = tf.maximum(score_targets, tf.zeros_like(score_targets))
# RPN score loss is sum over all except ignored samples.
score_loss = tf.losses.sigmoid_cross_entropy(
score_targets, score_outputs, weights=mask,
reduction=tf.losses.Reduction.SUM)
score_loss /= normalizer
return score_loss
def _rpn_box_loss(box_outputs, box_targets, normalizer=1.0, delta=1./9):
"""Computes box regression loss."""
# delta is typically around the mean value of regression target.
# for instances, the regression targets of 512x512 input with 6 anchors on
# P2-P6 pyramid is about [0.1, 0.1, 0.2, 0.2].
with tf.name_scope('rpn_box_loss'):
mask = tf.not_equal(box_targets, 0.0)
# The loss is normalized by the sum of non-zero weights before additional
# normalizer provided by the function caller.
box_loss = tf.losses.huber_loss(
box_targets,
box_outputs,
weights=mask,
delta=delta,
reduction=tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS)
box_loss /= normalizer
return box_loss
def rpn_loss(score_outputs, box_outputs, labels, params):
"""Computes total RPN detection loss.
Computes total RPN detection loss including box and score from all levels.
Args:
score_outputs: an OrderDict with keys representing levels and values
representing scores in [batch_size, height, width, num_anchors].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in
[batch_size, height, width, num_anchors * 4].
labels: the dictionary that returned from dataloader that includes
groundturth targets.
params: the dictionary including training parameters specified in
default_haprams() in mask_rcnn_params.py.
Returns:
total_rpn_loss: a float tensor representing total loss reduced from
score and box losses from all levels.
rpn_score_loss: a float tensor representing total score loss.
rpn_box_loss: a float tensor representing total box regression loss.
"""
with tf.name_scope('rpn_loss'):
levels = score_outputs.keys()
score_losses = []
box_losses = []
for level in levels:
b, h, w, n = score_outputs[level].get_shape().as_list()
score_targets_at_level = tf.reshape(
labels['score_targets_%d' % level], [b, h, w, n])
box_targets_at_level = tf.reshape(
labels['box_targets_%d' % level], [b, h, w, n * 4])
score_losses.append(
_rpn_score_loss(
score_outputs[level],
score_targets_at_level,
normalizer=tf.to_float(b * params['rpn_batch_size_per_im'])))
box_losses.append(
_rpn_box_loss(box_outputs[level], box_targets_at_level))
# Sum per level losses to total loss.
rpn_score_loss = tf.add_n(score_losses)
rpn_box_loss = params['rpn_box_loss_weight'] * tf.add_n(box_losses)
total_rpn_loss = rpn_score_loss + rpn_box_loss
return total_rpn_loss, rpn_score_loss, rpn_box_loss
def _fast_rcnn_class_loss(class_outputs, class_targets_one_hot, normalizer=1.0):
"""Computes classification loss."""
with tf.name_scope('fast_rcnn_class_loss'):
# The loss is normalized by the sum of non-zero weights before additional
# normalizer provided by the function caller.
class_loss = tf.losses.softmax_cross_entropy(
class_targets_one_hot, class_outputs,
reduction=tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS)
class_loss /= normalizer
return class_loss
def _fast_rcnn_box_loss(box_outputs, box_targets, class_targets, normalizer=1.0,
delta=1.):
"""Computes box regression loss."""
# delta is typically around the mean value of regression target.
# for instances, the regression targets of 512x512 input with 6 anchors on
# P2-P6 pyramid is about [0.1, 0.1, 0.2, 0.2].
with tf.name_scope('fast_rcnn_box_loss'):
mask = tf.tile(tf.expand_dims(tf.greater(class_targets, 0), axis=2),
[1, 1, 4])
# The loss is normalized by the sum of non-zero weights before additional
# normalizer provided by the function caller.
box_loss = tf.losses.huber_loss(
box_targets,
box_outputs,
weights=mask,
delta=delta,
reduction=tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS)
box_loss /= normalizer
return box_loss
def fast_rcnn_loss(class_outputs, box_outputs, class_targets, box_targets,
params):
"""Computes the box and class loss (Fast-RCNN branch) of Mask-RCNN.
This function implements the classification and box regression loss of the
Fast-RCNN branch in Mask-RCNN. As the `box_outputs` produces `num_classes`
boxes for each RoI, the reference model expands `box_targets` to match the
shape of `box_outputs` and selects only the target that the RoI has a maximum
overlap. (Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/fast_rcnn.py) # pylint: disable=line-too-long
Instead, this function selects the `box_outputs` by the `class_targets` so
that it doesn't expand `box_targets`.
The loss computation has two parts: (1) classification loss is softmax on all
RoIs. (2) box loss is smooth L1-loss on only positive samples of RoIs.
Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/modeling/fast_rcnn_heads.py # pylint: disable=line-too-long
Args:
class_outputs: a float tensor representing the class prediction for each box
with a shape of [batch_size, num_boxes, num_classes].
box_outputs: a float tensor representing the box prediction for each box
with a shape of [batch_size, num_boxes, num_classes * 4].
class_targets: a float tensor representing the class label for each box
with a shape of [batch_size, num_boxes].
box_targets: a float tensor representing the box label for each box
with a shape of [batch_size, num_boxes, 4].
params: the dictionary including training parameters specified in
default_haprams function in this file.
Returns:
total_loss: a float tensor representing total loss reducing from
class and box losses from all levels.
cls_loss: a float tensor representing total class loss.
box_loss: a float tensor representing total box regression loss.
"""
with tf.name_scope('fast_rcnn_loss'):
class_targets = tf.to_int32(class_targets)
class_targets_one_hot = tf.one_hot(class_targets, params['num_classes'])
class_loss = _fast_rcnn_class_loss(
class_outputs, class_targets_one_hot)
# Selects the box from `box_outputs` based on `class_targets`, with which
# the box has the maximum overlap.
batch_size, num_rois, _ = box_outputs.get_shape().as_list()
box_outputs = tf.reshape(box_outputs,
[batch_size, num_rois, params['num_classes'], 4])
box_indices = tf.reshape(
class_targets + tf.tile(
tf.expand_dims(
tf.range(batch_size) * num_rois * params['num_classes'], 1),
[1, num_rois]) + tf.tile(
tf.expand_dims(tf.range(num_rois) * params['num_classes'], 0),
[batch_size, 1]), [-1])
box_outputs = tf.matmul(
tf.one_hot(
box_indices,
batch_size * num_rois * params['num_classes'],
dtype=box_outputs.dtype), tf.reshape(box_outputs, [-1, 4]))
box_outputs = tf.reshape(box_outputs, [batch_size, -1, 4])
box_loss = (params['fast_rcnn_box_loss_weight'] *
_fast_rcnn_box_loss(box_outputs, box_targets, class_targets))
total_loss = class_loss + box_loss
return total_loss, class_loss, box_loss
def mask_rcnn_loss(mask_outputs, mask_targets, select_class_targets, params):
"""Computes the mask loss of Mask-RCNN.
This function implements the mask loss of Mask-RCNN. As the `mask_outputs`
produces `num_classes` masks for each RoI, the reference model expands
`mask_targets` to match the shape of `mask_outputs` and selects only the
target that the RoI has a maximum overlap. (Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/mask_rcnn.py) # pylint: disable=line-too-long
Instead, this function selects the `mask_outputs` by the `class_targets` so
that it doesn't expand `mask_targets`.
Args:
mask_outputs: a float tensor representing the class prediction for each mask
with a shape of
[batch_size, num_masks, mask_height, mask_width, num_classes].
mask_targets: a float tensor representing the binary mask of ground truth
labels for each mask with a shape of
[batch_size, num_masks, mask_height, mask_width].
select_class_targets: a tensor with a shape of [batch_size, num_masks],
representing the foreground mask targets.
params: the dictionary including training parameters specified in
default_haprams function in this file.
Returns:
mask_loss: a float tensor representing total mask loss.
"""
with tf.name_scope('mask_loss'):
# Selects the mask from `mask_outputs` based on `class_targets`, with which
# the mask has the maximum overlap.
num_partitions = params['num_cores_per_replica'] if params['use_spmd'] else 1
if num_partitions is not None and num_partitions > 1:
mask_outputs = xla_sharding.split(
mask_outputs, 1, num_partitions, use_sharding_op=True)
mask_targets = xla_sharding.split(
mask_targets, 1, num_partitions, use_sharding_op=True)
select_class_targets = xla_sharding.split(
select_class_targets, 1, num_partitions, use_sharding_op=True)
(batch_size, num_masks, mask_height,
mask_width) = mask_outputs.get_shape().as_list()
weights = tf.tile(
tf.reshape(tf.greater(select_class_targets, 0),
[batch_size, num_masks, 1, 1]),
[1, 1, mask_height, mask_width])
loss = tf.losses.sigmoid_cross_entropy(
mask_targets, mask_outputs, weights=weights,
reduction=tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS)
return params['mrcnn_weight_loss_mask'] * loss
| {
"content_hash": "a4d94cddb87592df546e7ac5b6733a13",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 180,
"avg_line_length": 45.07786885245902,
"alnum_prop": 0.6833348486226021,
"repo_name": "mlperf/training_results_v0.7",
"id": "7b4f0ba8c46bea5fc80d717dcd3fe6d8454b9f78",
"size": "11688",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Google/benchmarks/maskrcnn/implementations/maskrcnn-research-TF-tpu-v4-512/losses.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Awk",
"bytes": "14530"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "172914"
},
{
"name": "C++",
"bytes": "13037795"
},
{
"name": "CMake",
"bytes": "113458"
},
{
"name": "CSS",
"bytes": "70255"
},
{
"name": "Clojure",
"bytes": "622652"
},
{
"name": "Cuda",
"bytes": "1974745"
},
{
"name": "Dockerfile",
"bytes": "149523"
},
{
"name": "Groovy",
"bytes": "160449"
},
{
"name": "HTML",
"bytes": "171537"
},
{
"name": "Java",
"bytes": "189275"
},
{
"name": "JavaScript",
"bytes": "98224"
},
{
"name": "Julia",
"bytes": "430755"
},
{
"name": "Jupyter Notebook",
"bytes": "11091342"
},
{
"name": "Lua",
"bytes": "17720"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "215967"
},
{
"name": "Perl",
"bytes": "1551186"
},
{
"name": "PowerShell",
"bytes": "13906"
},
{
"name": "Python",
"bytes": "36943114"
},
{
"name": "R",
"bytes": "134921"
},
{
"name": "Raku",
"bytes": "7280"
},
{
"name": "Ruby",
"bytes": "4930"
},
{
"name": "SWIG",
"bytes": "140111"
},
{
"name": "Scala",
"bytes": "1304960"
},
{
"name": "Shell",
"bytes": "1312832"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "Starlark",
"bytes": "69877"
},
{
"name": "TypeScript",
"bytes": "243012"
}
],
"symlink_target": ""
} |
from .culture import *
from .recognizer import *
from .model import *
from .extractor import *
from .parser import *
from .utilities import *
| {
"content_hash": "f134b2d3f52a2a72e06e7cccc71c7c22",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 25,
"avg_line_length": 23.666666666666668,
"alnum_prop": 0.7464788732394366,
"repo_name": "matthewshim-ms/Recognizers-Text",
"id": "bfd07762e1bfbf1a30cb8bba5378ed93125747c0",
"size": "142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/libraries/recognizers-text/recognizers_text/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "120"
},
{
"name": "Batchfile",
"bytes": "15522"
},
{
"name": "C#",
"bytes": "3462115"
},
{
"name": "Dockerfile",
"bytes": "1358"
},
{
"name": "HTML",
"bytes": "6764"
},
{
"name": "Java",
"bytes": "894664"
},
{
"name": "JavaScript",
"bytes": "1801316"
},
{
"name": "PowerShell",
"bytes": "1418"
},
{
"name": "Python",
"bytes": "1564998"
},
{
"name": "Shell",
"bytes": "229"
},
{
"name": "TypeScript",
"bytes": "1484565"
}
],
"symlink_target": ""
} |
from sys import winver
from sys import winver as baz
from sys.fob import winver
from sys.fob import winver as baz
from ...fob import oar
from ....fob import oar
from ......fob import oar
from .......fob import oar
from fob import (fob as oar, baz as quox) | {
"content_hash": "ab25024e6120c6ef673280fed10d18a7",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 41,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.7294117647058823,
"repo_name": "int19h/PTVS",
"id": "38049da1a1428521fe54c65020b588578494f851",
"size": "255",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Python/Tests/TestData/Grammar/FromImportStmt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP.NET",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "7975"
},
{
"name": "C",
"bytes": "21444"
},
{
"name": "C#",
"bytes": "11297254"
},
{
"name": "C++",
"bytes": "175131"
},
{
"name": "CSS",
"bytes": "4109"
},
{
"name": "HTML",
"bytes": "213660"
},
{
"name": "JavaScript",
"bytes": "44401"
},
{
"name": "PowerShell",
"bytes": "18157"
},
{
"name": "Pug",
"bytes": "2807"
},
{
"name": "Python",
"bytes": "620501"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "3663"
},
{
"name": "Tcl",
"bytes": "24968"
},
{
"name": "Vim Snippet",
"bytes": "17303"
}
],
"symlink_target": ""
} |
import datetime
import logging
from django.conf import settings
from django.db import connections
from django.db import DEFAULT_DB_ALIAS
from django_transaction_barrier import postgresql
from django_transaction_barrier import sqlite3
from django_transaction_barrier.models import TransactionCommitBarrier
log = logging.getLogger(__name__)
class TransactionAborted(Exception):
pass
def _db_backend(using):
engine = settings.DATABASES[using]['ENGINE']
return {'django.db.backends.postgresql_psycopg2': postgresql,
'django.db.backends.sqlite3': sqlite3}[engine]
def get_debug_info(barrier=None):
info = []
using = DEFAULT_DB_ALIAS
if barrier:
using = barrier['using']
info.append(u'[Barrier %s]' % unicode(barrier))
commit_barriers = TransactionCommitBarrier.objects.using(using).filter(
uuid=barrier['uuid'])
if commit_barriers:
info.append(u'[TransactionCommitBarrier: %s]' % (
unicode(commit_barriers[0])))
cursor = connections[using].cursor()
info.append(u'[Backend: %s]' % _db_backend(using).get_debug_info(cursor))
return u','.join(info)
def new_transaction_barrier(using=None):
"""Create a new transaction barrier and return it.
The barrier implicity supports only 1 waiter. In other words, for a given
barrier, only one process/thread/task should call poll_transaction_barrier.
If more than one calls poll_transaction_barrier the semantics are undefined.
The caller should consider the barrier opaque and specific to DB backend
type. ``using`` is the Django DB alias to use."""
using = using or DEFAULT_DB_ALIAS
# First save a commit barrier.
commit_barrier = TransactionCommitBarrier()
commit_barrier.save(using=using)
# Second get the current transaction metadata (e.g., a Postgres transaction
# ID). Using the metadata the backend determines if the transaction finishes
# (either aborts of commits).
cursor = connections[using].cursor()
metadata = _db_backend(using).get_transaction_metadata(cursor)
return dict(uuid=commit_barrier.uuid, metadata=metadata, using=using)
def delete_transaction_barrier(barrier):
(TransactionCommitBarrier.objects.using(barrier['using'])
.get(uuid=barrier['uuid']).delete())
def poll_transaction_barrier(barrier):
"""Poll the transaction barrier.
Return True if the barrier is satisfied, return False if it's still waiting.
Raise TransactionAborted if the transaction was aborted.
"""
# Optimistically poll the DB for the TransactionCommitBarrier. If it exists,
# we know that the transaction is committed successfully.
if _poll_transaction_commit_barrier(barrier['uuid'], barrier['using']):
return True
# If the transaction is finished according to the DB-specific functions,
# but we can't find the TransactionCommitBarrier, we know the transaction is
# aborted.
cursor = connections[barrier['using']].cursor()
if _db_backend(barrier['using']).is_transaction_complete(
cursor, barrier['metadata']):
if _poll_transaction_commit_barrier(barrier['uuid'], barrier['using']):
return True
raise TransactionAborted()
return False
def reap_commit_barriers(age_in_seconds, using=None):
"""Delete all TransactionCommitBarriers older than ``age_in_seconds``.
If an applications allocates a barrier with new_transaction_barrier, but
fails to delete it, metadata might linger in the database.
reap_commit_barriers is a hack to help remove that metadata. ``using`` is
the Django DB alias to use. ``using_barrier`` is a barrier to extract the
Django DB alias from (instead of using ``using``).
If your application uses TransactionBarrierTask and never calls
new_transaction_barrier directly, you can ignore this function.
"""
using = using or DEFAULT_DB_ALIAS
time_threshold = (
datetime.datetime.now() - datetime.timedelta(seconds=age_in_seconds))
TransactionCommitBarrier.objects.using(using).filter(
creation_date__lt=time_threshold).delete()
def _poll_transaction_commit_barrier(uuid, using):
try:
TransactionCommitBarrier.objects.using(using).get(uuid=uuid)
return True
except TransactionCommitBarrier.DoesNotExist:
return False
| {
"content_hash": "47e4723859a84c0270dfa229236f8359",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 78,
"avg_line_length": 35.559322033898304,
"alnum_prop": 0.7433269780743565,
"repo_name": "godaddy/django_transaction_barrier",
"id": "83024db57387a47e534bceb59eace093964e7a53",
"size": "4196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_transaction_barrier/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19462"
},
{
"name": "Shell",
"bytes": "176"
}
],
"symlink_target": ""
} |
import json
import random
import string
from SoftLayer import SoftLayerAPIError, SshKeyManager
from jumpgate.common.error_handling import bad_request, duplicate, not_found
NULL_KEY = "AAAAB3NzaC1yc2EAAAABIwAAAIEArkwv9X8eTVK4F7pMlSt45pWoiakFk" \
"ZMwG9BjydOJPGH0RFNAy1QqIWBGWv7vS5K2tr+EEO+F8WL2Y/jK4ZkUoQgoi+n7" \
"DWQVOHsRijcS3LvtO+50Np4yjXYWJKh29JL6GHcp8o7+YKEyVUMB2CSDOP99eF9g5Q0d+1U" \
"2WVdBWQM="
class KeypairsV2(object):
def on_get(self, req, resp, tenant_id):
client = req.env['sl_client']
mgr = SshKeyManager(client)
keypairs = mgr.list_keys()
resp.body = {
'keypairs': [{
'keypair': format_keypair(keypair)} for keypair in keypairs]}
def on_post(self, req, resp, tenant_id):
body = json.loads(req.stream.read().decode())
try:
name = body['keypair']['name']
key = body['keypair'].get('public_key', generate_random_key())
except (KeyError, TypeError):
return bad_request(resp, 'Not all fields exist to create keypair.')
validate_result = validate_keypair_name(resp, name)
if not validate_result:
return
client = req.env['sl_client']
mgr = SshKeyManager(client)
# Make sure the key with that label doesn't already exist
existing_keys = mgr.list_keys(label=name)
if existing_keys:
return duplicate(resp, 'Duplicate key by that name')
try:
keypair = mgr.add_key(key, name)
resp.body = {'keypair': format_keypair(keypair)}
except SoftLayerAPIError as e:
if 'Unable to generate a fingerprint' in e.faultString:
return bad_request(resp, e.faultString)
if 'SSH key already exists' in e.faultString:
return duplicate(resp, e.faultString)
raise
class KeypairV2(object):
def on_get(self, req, resp, tenant_id, keypair_name):
client = req.env['sl_client']
mgr = SshKeyManager(client)
keys = mgr.list_keys(label=keypair_name)
if len(keys) == 0:
return not_found(resp, 'KeyPair not found')
keypair = mgr.get_key(keys[0]['id'])
resp.body = {'keypair': format_keypair(keypair)}
def on_delete(self, req, resp, tenant_id, keypair_name):
# keypair_name
client = req.env['sl_client']
mgr = SshKeyManager(client)
keys = mgr.list_keys(label=keypair_name)
if len(keys) == 0:
return not_found(resp, 'KeyPair not Found')
mgr.delete_key(keys[0]['id'])
resp.status = 202
def format_keypair(keypair):
return {
'fingerprint': keypair['fingerprint'],
'name': keypair['label'],
'public_key': keypair['key'],
'user': None
}
def generate_random_key():
chars = string.digits + string.ascii_letters
key = "".join([random.choice(chars) for _ in range(8)])
return "ssh-rsa %s %s@invalid" % (NULL_KEY, key)
def validate_keypair_name(resp, key_name):
safechars = "_- " + string.digits + string.ascii_letters
clean_value = "".join(x for x in key_name if x in safechars)
if clean_value != key_name:
bad_request(
resp, 'Keypair name contains unsafe characters')
return False
if not 0 < len(key_name) < 256:
bad_request(
resp, 'Keypair name must be between 1 and 255 characters long')
return False
return True
| {
"content_hash": "aa84e838cce363f33ed8190ce93ead25",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 79,
"avg_line_length": 32.04587155963303,
"alnum_prop": 0.612081305468079,
"repo_name": "BillArnold/barnoldjg",
"id": "db985ec5d6816033a017b4ef498ae11eac356173",
"size": "3493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jumpgate/compute/drivers/sl/keypairs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "162307"
},
{
"name": "Shell",
"bytes": "148"
}
],
"symlink_target": ""
} |
"""
Tests for implementations of L{IReactorUDP} and the UDP parts of
L{IReactorSocket}.
"""
from __future__ import division, absolute_import
__metaclass__ = type
import socket
from zope.interface import implementer
from zope.interface.verify import verifyObject
from twisted.python import context
from twisted.python.log import ILogContext, err
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.internet.defer import Deferred, maybeDeferred
from twisted.internet.interfaces import (
ILoggingContext, IListeningPort, IReactorUDP, IReactorSocket)
from twisted.internet.address import IPv4Address, IPv6Address
from twisted.internet.protocol import DatagramProtocol
from twisted.internet.test.connectionmixins import (LogObserverMixin,
findFreePort)
from twisted.internet import defer, error
from twisted.test.test_udp import Server, GoodClient
from twisted.trial.unittest import SkipTest
class DatagramTransportTestsMixin(LogObserverMixin):
"""
Mixin defining tests which apply to any port/datagram based transport.
"""
def test_startedListeningLogMessage(self):
"""
When a port starts, a message including a description of the associated
protocol is logged.
"""
loggedMessages = self.observe()
reactor = self.buildReactor()
@implementer(ILoggingContext)
class SomeProtocol(DatagramProtocol):
def logPrefix(self):
return "Crazy Protocol"
protocol = SomeProtocol()
p = self.getListeningPort(reactor, protocol)
expectedMessage = "Crazy Protocol starting on %d" % (p.getHost().port,)
self.assertEqual((expectedMessage,), loggedMessages[0]['message'])
def test_connectionLostLogMessage(self):
"""
When a connection is lost a message is logged containing an
address identifying the port and the fact that it was closed.
"""
loggedMessages = self.observe()
reactor = self.buildReactor()
p = self.getListeningPort(reactor, DatagramProtocol())
expectedMessage = "(UDP Port %s Closed)" % (p.getHost().port,)
def stopReactor(ignored):
reactor.stop()
def doStopListening():
del loggedMessages[:]
maybeDeferred(p.stopListening).addCallback(stopReactor)
reactor.callWhenRunning(doStopListening)
self.runReactor(reactor)
self.assertEqual((expectedMessage,), loggedMessages[0]['message'])
def test_stopProtocolScheduling(self):
"""
L{DatagramProtocol.stopProtocol} is called asynchronously (ie, not
re-entrantly) when C{stopListening} is used to stop the the datagram
transport.
"""
class DisconnectingProtocol(DatagramProtocol):
started = False
stopped = False
inStartProtocol = False
stoppedInStart = False
def startProtocol(self):
self.started = True
self.inStartProtocol = True
self.transport.stopListening()
self.inStartProtocol = False
def stopProtocol(self):
self.stopped = True
self.stoppedInStart = self.inStartProtocol
reactor.stop()
reactor = self.buildReactor()
protocol = DisconnectingProtocol()
self.getListeningPort(reactor, protocol)
self.runReactor(reactor)
self.assertTrue(protocol.started)
self.assertTrue(protocol.stopped)
self.assertFalse(protocol.stoppedInStart)
class UDPPortTestsMixin(object):
"""
Tests for L{IReactorUDP.listenUDP} and
L{IReactorSocket.adoptDatagramPort}.
"""
def test_interface(self):
"""
L{IReactorUDP.listenUDP} returns an object providing L{IListeningPort}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertTrue(verifyObject(IListeningPort, port))
def test_getHost(self):
"""
L{IListeningPort.getHost} returns an L{IPv4Address} giving a
dotted-quad of the IPv4 address the port is listening on as well as
the port number.
"""
host, portNumber = findFreePort(type=socket.SOCK_DGRAM)
reactor = self.buildReactor()
port = self.getListeningPort(
reactor, DatagramProtocol(), port=portNumber, interface=host)
self.assertEqual(
port.getHost(), IPv4Address('UDP', host, portNumber))
def test_getHostIPv6(self):
"""
L{IListeningPort.getHost} returns an L{IPv6Address} when listening on
an IPv6 interface.
"""
reactor = self.buildReactor()
port = self.getListeningPort(
reactor, DatagramProtocol(), interface='::1')
addr = port.getHost()
self.assertEqual(addr.host, "::1")
self.assertIsInstance(addr, IPv6Address)
def test_invalidInterface(self):
"""
An L{InvalidAddressError} is raised when trying to listen on an address
that isn't a valid IPv4 or IPv6 address.
"""
reactor = self.buildReactor()
self.assertRaises(
error.InvalidAddressError, reactor.listenUDP, DatagramProtocol(),
0, interface='example.com')
def test_logPrefix(self):
"""
Datagram transports implement L{ILoggingContext.logPrefix} to return a
message reflecting the protocol they are running.
"""
class CustomLogPrefixDatagramProtocol(DatagramProtocol):
def __init__(self, prefix):
self._prefix = prefix
self.system = Deferred()
def logPrefix(self):
return self._prefix
def datagramReceived(self, bytes, addr):
if self.system is not None:
system = self.system
self.system = None
system.callback(context.get(ILogContext)["system"])
reactor = self.buildReactor()
protocol = CustomLogPrefixDatagramProtocol("Custom Datagrams")
d = protocol.system
port = self.getListeningPort(reactor, protocol)
address = port.getHost()
def gotSystem(system):
self.assertEqual("Custom Datagrams (UDP)", system)
d.addCallback(gotSystem)
d.addErrback(err)
d.addCallback(lambda ignored: reactor.stop())
port.write(b"some bytes", ('127.0.0.1', address.port))
self.runReactor(reactor)
def test_str(self):
"""
C{str()} on the listening port object includes the port number.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertIn(str(port.getHost().port), str(port))
def test_repr(self):
"""
C{repr()} on the listening port object includes the port number.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertIn(repr(port.getHost().port), str(port))
def test_writeToIPv6Interface(self):
"""
Writing to an IPv6 UDP socket on the loopback interface succeeds.
"""
reactor = self.buildReactor()
server = Server()
serverStarted = server.startedDeferred = defer.Deferred()
self.getListeningPort(reactor, server, interface="::1")
client = GoodClient()
clientStarted = client.startedDeferred = defer.Deferred()
self.getListeningPort(reactor, client, interface="::1")
cAddr = client.transport.getHost()
def cbClientStarted(ignored):
"""
Send a datagram from the client once it's started.
@param ignored: a list of C{[None, None]}, which is ignored
@returns: a deferred which fires when the server has received a
datagram.
"""
client.transport.write(
b"spam", ("::1", server.transport.getHost().port))
serverReceived = server.packetReceived = defer.Deferred()
return serverReceived
def cbServerReceived(ignored):
"""
Stop the reactor after a datagram is received.
@param ignored: C{None}, which is ignored
@returns: C{None}
"""
reactor.stop()
d = defer.gatherResults([serverStarted, clientStarted])
d.addCallback(cbClientStarted)
d.addCallback(cbServerReceived)
d.addErrback(err)
self.runReactor(reactor)
packet = server.packets[0]
self.assertEqual(packet, (b'spam', (cAddr.host, cAddr.port)))
def test_connectedWriteToIPv6Interface(self):
"""
An IPv6 address can be passed as the C{interface} argument to
L{listenUDP}. The resulting Port accepts IPv6 datagrams.
"""
reactor = self.buildReactor()
server = Server()
serverStarted = server.startedDeferred = defer.Deferred()
self.getListeningPort(reactor, server, interface="::1")
client = GoodClient()
clientStarted = client.startedDeferred = defer.Deferred()
self.getListeningPort(reactor, client, interface="::1")
cAddr = client.transport.getHost()
def cbClientStarted(ignored):
"""
Send a datagram from the client once it's started.
@param ignored: a list of C{[None, None]}, which is ignored
@returns: a deferred which fires when the server has received a
datagram.
"""
client.transport.connect("::1", server.transport.getHost().port)
client.transport.write(b"spam")
serverReceived = server.packetReceived = defer.Deferred()
return serverReceived
def cbServerReceived(ignored):
"""
Stop the reactor after a datagram is received.
@param ignored: C{None}, which is ignored
@returns: C{None}
"""
reactor.stop()
d = defer.gatherResults([serverStarted, clientStarted])
d.addCallback(cbClientStarted)
d.addCallback(cbServerReceived)
d.addErrback(err)
self.runReactor(reactor)
packet = server.packets[0]
self.assertEqual(packet, (b'spam', (cAddr.host, cAddr.port)))
def test_writingToHostnameRaisesInvalidAddressError(self):
"""
Writing to a hostname instead of an IP address will raise an
L{InvalidAddressError}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertRaises(
error.InvalidAddressError,
port.write, 'spam', ('example.invalid', 1))
def test_writingToIPv6OnIPv4RaisesInvalidAddressError(self):
"""
Writing to an IPv6 address on an IPv4 socket will raise an
L{InvalidAddressError}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(
reactor, DatagramProtocol(), interface="127.0.0.1")
self.assertRaises(
error.InvalidAddressError, port.write, 'spam', ('::1', 1))
def test_writingToIPv4OnIPv6RaisesInvalidAddressError(self):
"""
Writing to an IPv6 address on an IPv4 socket will raise an
L{InvalidAddressError}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(
reactor, DatagramProtocol(), interface="::1")
self.assertRaises(
error.InvalidAddressError, port.write, 'spam', ('127.0.0.1', 1))
def test_connectingToHostnameRaisesInvalidAddressError(self):
"""
Connecting to a hostname instead of an IP address will raise an
L{InvalidAddressError}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertRaises(
error.InvalidAddressError, port.connect, 'example.invalid', 1)
def test_allowBroadcast(self):
"""
L{IListeningPort.setBroadcastAllowed} sets broadcast to be allowed
on the socket.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
port.setBroadcastAllowed(True)
self.assertTrue(port.getBroadcastAllowed())
class UDPServerTestsBuilder(ReactorBuilder,
UDPPortTestsMixin, DatagramTransportTestsMixin):
"""
Run L{UDPPortTestsMixin} tests using newly created UDP
sockets.
"""
requiredInterfaces = (IReactorUDP,)
def getListeningPort(self, reactor, protocol, port=0, interface='',
maxPacketSize=8192):
"""
Get a UDP port from a reactor.
@param reactor: A reactor used to build the returned
L{IListeningPort} provider.
@type reactor: L{twisted.internet.interfaces.IReactorUDP}
@see: L{twisted.internet.IReactorUDP.listenUDP} for other
argument and return types.
"""
return reactor.listenUDP(port, protocol, interface=interface,
maxPacketSize=maxPacketSize)
class UDPFDServerTestsBuilder(ReactorBuilder,
UDPPortTestsMixin, DatagramTransportTestsMixin):
"""
Run L{UDPPortTestsMixin} tests using adopted UDP sockets.
"""
requiredInterfaces = (IReactorSocket,)
def getListeningPort(self, reactor, protocol, port=0, interface='',
maxPacketSize=8192):
"""
Get a UDP port from a reactor, wrapping an already-initialized file
descriptor.
@param reactor: A reactor used to build the returned
L{IListeningPort} provider.
@type reactor: L{twisted.internet.interfaces.IReactorSocket}
@param port: A port number to which the adopted socket will be
bound.
@type port: C{int}
@param interface: The local IPv4 or IPv6 address to which the
adopted socket will be bound. defaults to '', ie all IPv4
addresses.
@type interface: C{str}
@see: L{twisted.internet.IReactorSocket.adoptDatagramPort} for other
argument and return types.
"""
if IReactorSocket.providedBy(reactor):
if ':' in interface:
domain = socket.AF_INET6
address = socket.getaddrinfo(interface, port)[0][4]
else:
domain = socket.AF_INET
address = (interface, port)
portSock = socket.socket(domain, socket.SOCK_DGRAM)
portSock.bind(address)
portSock.setblocking(False)
try:
return reactor.adoptDatagramPort(
portSock.fileno(), portSock.family, protocol,
maxPacketSize)
finally:
# The socket should still be open; fileno will raise if it is
# not.
portSock.fileno()
# Now clean it up, because the rest of the test does not need
# it.
portSock.close()
else:
raise SkipTest("Reactor does not provide IReactorSocket")
globals().update(UDPServerTestsBuilder.makeTestCaseClasses())
globals().update(UDPFDServerTestsBuilder.makeTestCaseClasses())
| {
"content_hash": "00ddbc567141e7070ffce7767881d952",
"timestamp": "",
"source": "github",
"line_count": 454,
"max_line_length": 79,
"avg_line_length": 35.301762114537446,
"alnum_prop": 0.6006114681475011,
"repo_name": "hlzz/dotfiles",
"id": "53bb2d399a240a70ae78373f7d74b450b9fcd29d",
"size": "16101",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "graphics/VTK-7.0.0/ThirdParty/Twisted/twisted/internet/test/test_udp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "1240"
},
{
"name": "Arc",
"bytes": "38"
},
{
"name": "Assembly",
"bytes": "449468"
},
{
"name": "Batchfile",
"bytes": "16152"
},
{
"name": "C",
"bytes": "102303195"
},
{
"name": "C++",
"bytes": "155056606"
},
{
"name": "CMake",
"bytes": "7200627"
},
{
"name": "CSS",
"bytes": "179330"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "Emacs Lisp",
"bytes": "14892"
},
{
"name": "FORTRAN",
"bytes": "5276"
},
{
"name": "Forth",
"bytes": "3637"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "438205"
},
{
"name": "Gnuplot",
"bytes": "327"
},
{
"name": "Groff",
"bytes": "518260"
},
{
"name": "HLSL",
"bytes": "965"
},
{
"name": "HTML",
"bytes": "2003175"
},
{
"name": "Haskell",
"bytes": "10370"
},
{
"name": "IDL",
"bytes": "2466"
},
{
"name": "Java",
"bytes": "219109"
},
{
"name": "JavaScript",
"bytes": "1618007"
},
{
"name": "Lex",
"bytes": "119058"
},
{
"name": "Lua",
"bytes": "23167"
},
{
"name": "M",
"bytes": "1080"
},
{
"name": "M4",
"bytes": "292475"
},
{
"name": "Makefile",
"bytes": "7112810"
},
{
"name": "Matlab",
"bytes": "1582"
},
{
"name": "NSIS",
"bytes": "34176"
},
{
"name": "Objective-C",
"bytes": "65312"
},
{
"name": "Objective-C++",
"bytes": "269995"
},
{
"name": "PAWN",
"bytes": "4107117"
},
{
"name": "PHP",
"bytes": "2690"
},
{
"name": "Pascal",
"bytes": "5054"
},
{
"name": "Perl",
"bytes": "485508"
},
{
"name": "Pike",
"bytes": "1338"
},
{
"name": "Prolog",
"bytes": "5284"
},
{
"name": "Python",
"bytes": "16799659"
},
{
"name": "QMake",
"bytes": "89858"
},
{
"name": "Rebol",
"bytes": "291"
},
{
"name": "Ruby",
"bytes": "21590"
},
{
"name": "Scilab",
"bytes": "120244"
},
{
"name": "Shell",
"bytes": "2266191"
},
{
"name": "Slash",
"bytes": "1536"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Swift",
"bytes": "331"
},
{
"name": "Tcl",
"bytes": "1911873"
},
{
"name": "TeX",
"bytes": "11981"
},
{
"name": "Verilog",
"bytes": "3893"
},
{
"name": "VimL",
"bytes": "595114"
},
{
"name": "XSLT",
"bytes": "62675"
},
{
"name": "Yacc",
"bytes": "307000"
},
{
"name": "eC",
"bytes": "366863"
}
],
"symlink_target": ""
} |
"""Script for initializing EMA weights."""
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow import logging
import sys
src_cp = sys.argv[1] # the checkpoint with the weights
latest_checkpoint = sys.argv[2] # the checkpoint with the graph
destination = sys.argv[3] # the model destination
meta_graph_location = latest_checkpoint + ".meta"
sess = tf.InteractiveSession()
saver = tf.train.import_meta_graph(meta_graph_location)
saver.restore(sess, latest_checkpoint)
ckpt_reader = pywrap_tensorflow.NewCheckpointReader(src_cp)
# update all variables
for tv in tf.global_variables():
if ("ExponentialMovingAverage" not in tv.name) and ("layers_keep_probs" not in tv.name):
srct = ckpt_reader.get_tensor(tv.name.split(":")[0])
logging.info("Replacing tensor: {} with new values".format(tv.name))
sess.run(tf.assign(tv, srct))
# initialize the moving average
for tv in tf.global_variables():
if ("ExponentialMovingAverage" in tv.name):
logging.info("Placing EMA tensor: {} with new values".format(tv.name))
fetch_tensor = ckpt_reader.get_tensor("/".join(tv.name.split(":")[0].split("/")[1:-1]))
sess.run(tf.assign(tv, fetch_tensor))
saver = tf.train.Saver(max_to_keep=0)
saver.save(sess, destination) | {
"content_hash": "6fe2775bf1cf9a232742929d088d8519",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 95,
"avg_line_length": 37.114285714285714,
"alnum_prop": 0.7128560431100847,
"repo_name": "mpekalski/Y8M",
"id": "ce91bb367b272cdd194d881621616a42f39acf69",
"size": "1875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frame_level_code/generate_EMAmodel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "31269"
},
{
"name": "Makefile",
"bytes": "1455"
},
{
"name": "Python",
"bytes": "593202"
},
{
"name": "Shell",
"bytes": "16226"
}
],
"symlink_target": ""
} |
"""
Contains the DispatcherServer class and Dispatcher XMLRPC client.
The DispatcherServer takes in requests to perform commands and passes
those requests onto available nodes if any.
The DispatcherClient provides and XMLRPC-over-messagebus interface
to the dispatcher for querying the dispatcher status out-of-band.
"""
import os
import signal
from conary.deps import deps
from conary.lib import util
from rmake import errors
from rmake.lib.apiutils import api, api_parameters, api_return, freeze, thaw
from rmake.build import buildjob
from rmake.build import buildtrove
from rmake.lib import apirpc
from rmake.lib import flavorutil
from rmake.lib import logger
from rmake.lib import server
from rmake.server import publish
from rmake.messagebus import busclient
from rmake.multinode import messages
from rmake.multinode import nodeclient
from rmake.multinode import nodetypes
class DispatcherServer(server.Server):
"""
The Dispatcher is given a list of packages to resolve and build
and determines where the best location to build them is.
"""
def __init__(self, cfg, db):
self.client = DispatcherNodeClient(cfg.getMessageBusHost(),
cfg.messageBusPort, cfg, self)
server.Server.__init__(self, self.client.getLogger())
subscriberLog = logger.Logger('subscriber', cfg.getSubscriberLogPath())
# In multinode rMake, publishing events to external subscribers
# is done by a process forked from the dispatcher (since the
# dispatcher gets all events anyway) instead of passing them
# back to the rMake XMLRPC front end.
self._publisher = publish._RmakeServerPublisher(subscriberLog,
db, self._fork)
# detaile data about the nodes is stored in the NodeList.
self._nodes = NodeList(db, self._logger)
# commands that haven't been assigned to a node.
self._queuedCommands = []
self._queuedCommandsById = {}
self.db = db
def getClient(self):
return self.client
def _installSignalHandlers(self):
self.client._installSignalHandlers()
def _interrupt(*args, **kw):
import epdb
if hasattr(epdb, 'serve'):
epdb.serve()
else:
epdb.st()
# if you kill the dispatcher w/ SIGUSR1 you'll get a breakpoint.
import signal
signal.signal(signal.SIGUSR1, _interrupt)
def listNodes(self):
return [ x.sessionId for x in self._nodes.getNodes() ]
def listQueuedCommands(self):
return [ x.getMessageId() for x in self._queuedCommands ]
def listAssignedCommands(self):
return self._nodes.getCommandAssignments()
def getNodeByName(self, nodeName):
try:
return self._nodes.getNodeByName(nodeName)
except IndexError:
raise errors.RmakeError('No such node %s' % nodeName)
def getNamesByIds(self, idList):
return self._nodes.getNamesByIds(idList)
def requestCommandAssignment(self, *commands):
"""
Entry point from message bus to assign commands. We
will assign the command if we can.
"""
for command in commands:
if isinstance(command, messages.StopCommand):
# stop commands _always_ get assigned immediately.
self.handleStopCommand(command)
else:
# just queue this command, we'll pull it off the queue
# ASAP in assignQueuedCommands if possible.
self._queuedCommands.append(command)
self._queuedCommandsById[command.getCommandId()] = command
self._assignQueuedCommands()
def handleStopCommand(self, command):
targetId = command.getTargetCommandId()
node = self._nodes.getNodeForCommand(targetId)
if node is not None:
self._nodes.assignCommand(command, node)
self._sendCommandToNode(command, node)
elif targetId in self._queuedCommandsById:
command = self._queuedCommandsById[targetId]
self._queuedCommands.remove(command)
else:
# guess we don't know about this command or its node is gone.
self.warning('dropped stop command for %s' % targetId)
def _assignQueuedCommands(self):
"""
Attempts to assign queued commands if there are available nodes
"""
if not self._queuedCommands:
return
# attempt to assign commands. See what the node manager
# thinks we can assign.
assignments = self._nodes.assignCommands(list(self._queuedCommands))
for (command, node) in assignments:
self._sendCommandToNode(command, node)
self._queuedCommands.remove(command)
self._queuedCommandsById.pop(command.getCommandId())
def _sendCommandToNode(self, command, node):
self.log('sending %s to node %s' % (command.getCommandId(),
node.sessionId))
self.client.assignCommand(command, node)
def nodeRegistered(self, sessionId, node):
"""
Entry point from messagebus client to alert dispatcher that
a node connected.
"""
self.log('Worker node %s connected' % sessionId)
self._nodes.add(sessionId, node)
self._assignQueuedCommands()
def nodeUpdated(self, sessionId, nodeInfo, commandIds):
"""
Entry point from messagebus client to alert dispatcher that
a node connected.
"""
if sessionId in self._nodes:
self._nodes.updateStatus(sessionId, nodeInfo, commandIds)
self._assignQueuedCommands()
else:
self.log('Discarding heartbeat from unknown %s' % sessionId)
def nodeDisconnected(self, sessionId):
"""
Entry point from messagebus client to alert dispatcher that
a node disconnected.
"""
if sessionId in self._nodes:
self.log('Worker node %s disconnected' % sessionId)
self._nodes.remove(sessionId)
self._assignQueuedCommands()
def commandCompleted(self, commandId):
"""
Entry point from messagebus client.
"""
command = self._nodes.removeCommand(commandId)
self._assignQueuedCommands()
def commandErrored(self, commandId):
"""
Entry point from messagebus client.
"""
command = self._nodes.removeCommand(commandId)
self._assignQueuedCommands()
def commandInProgress(self, commandId):
"""
Entry point from messagebus client.
"""
pass
def eventsOccurred(self, sessionId, jobId, (apiVer, eventList)):
"""
Entry point from messagebus client that new events were sent.
"""
# send all events to the publisher, which will send them
# off to connected subscribers.
self._publisher.addEvent(jobId, eventList)
def _serveLoopHook(self):
self._publisher.emitEvents()
self._collectChildren()
def _pidDied(self, pid, status, name=None):
server.Server._pidDied(self, pid, status, name=name)
if pid == self._publisher._emitPid: # rudimentary locking for emits
self._publisher._emitPid = 0 # only allow one emitEvent process
# at a time.
def serve(self):
self.serve_forever()
def handleRequestIfReady(self, sleepTime=0.1):
self.client.handleRequestIfReady(sleepTime)
self._halt = self._halt or self.client._halt
def log(self, text):
self.client.getBusClient().logger.info(text)
class DispatcherNodeClient(nodeclient.NodeClient):
"""
Low level interface between Dispatcher and Message Bus.
Also provides the XMLRPC-over-messagebus interface.
"""
sessionClass = 'DSP' # type information used by messagebus to classify
# connections.
name = 'dispatcher' # Name used by logging.
# NodeClient uses this list and automatically subscribes to these
# message channels.
subscriptions = ['/register?nodeType=%s' % nodetypes.WorkerNode.nodeType,
'/command',
'/event',
'/internal/nodes',
'/nodestatus',
'/commandstatus']
def _signalHandler(self, sigNum, frame):
if sigNum == signal.SIGINT:
# SIGINT should be handled by the rMakeServer, which is the parent
# pid of Dispatcher.
self.error('SIGINT caught and ignored.')
else:
nodeclient.NodeClient._signalHandler(self, sigNum, frame)
@api(version=1)
@api_return(1, None)
def listNodes(self, callData):
return self.server.listNodes()
@api(version=1)
@api_return(1, None)
def listQueuedCommands(self, callData):
return self.server.listQueuedCommands()
@api(version=1)
@api_return(1, None)
def listAssignedCommands(self, callData):
return self.server.listAssignedCommands()
@api(version=1)
@api_parameters(1, None)
@api_return(1, None)
def getNodeByName(self, callData, nodeName):
node = self.server.getNodeByName(nodeName)
return node.sessionId
@api(version=1)
@api_parameters(1, None)
@api_return(1, None)
def getNamesByIds(self, callData, idList):
return self.server.getNamesByIds(idList)
@api(version=1)
@api_parameters(1, None, None)
@api_return(1, None)
def suspendNodes(self, callData, idList, suspend):
for nodeId in idList:
if suspend:
self.server._nodes.suspend(nodeId)
else:
self.server._nodes.resume(nodeId)
def messageReceived(self, m):
"""
Handles messages from the messagebus.
"""
nodeclient.NodeClient.messageReceived(self, m)
if isinstance(m, messages.RegisterNodeMessage):
self.server.nodeRegistered(m.getSessionId(), m.getNode())
elif isinstance(m, messages.NodeInfo):
self.server.nodeUpdated(m.getSessionId(), m.getNodeInfo(),
m.getCommands())
elif isinstance(m, messages._Command):
if m.getTargetNode():
# we've already assigned this command
return
self.server.log('Received Command: %s' % m.getCommandId())
self.server.requestCommandAssignment(m)
elif isinstance(m, messages.EventList):
self.server.eventsOccurred(m.getSessionId(), m.getJobId(),
m.getEventList())
elif isinstance(m, messages.NodeStatus):
if m.isDisconnected():
self.server.nodeDisconnected(m.getStatusId())
elif isinstance(m, messages.CommandStatus):
if m.isCompleted():
self.server.commandCompleted(m.getCommandId())
elif m.isInProgress():
self.server.commandInProgress(m.getCommandId())
elif m.isErrored():
self.server.commandErrored(m.getCommandId())
def assignCommand(self, command, node):
command.setTargetNode(node.sessionId)
self.getBusClient().sendMessage('/command', command)
class DispatcherRPCClient(object):
def __init__(self, client, sessionId):
self.proxy = busclient.SessionProxy(DispatcherNodeClient, client,
sessionId)
def listNodes(self):
return self.proxy.listNodes()
def listQueuedCommands(self):
return self.proxy.listQueuedCommands()
def listAssignedCommands(self):
return self.proxy.listAssignedCommands()
def getNodeByName(self, nodeName):
return self.proxy.getNodeByName(nodeName)
def getNamesByIds(self, idList):
return self.proxy.getNamesByIds(idList)
def suspendNodes(self, idList, suspend=True):
return self.proxy.suspendNodes(idList, suspend)
class NodeList(object):
def __init__(self, nodeDb, logger=None):
self._nodes = {}
self._assignedCommands = {}
self._commands = {}
self._openSlots = {}
self._openChroots = {}
self._commandsByJob = {}
self._suspended = set()
self.nodeDb = nodeDb
self.logger = logger
def add(self, sessionId, node):
node.sessionId = sessionId
self._nodes[sessionId] = node
self._assignedCommands[sessionId] = []
self._openSlots[sessionId] = node.slots
self._openChroots[sessionId] = node.chrootLimit
self.nodeDb.addNode(node.name, node.host, node.slots, node.buildFlavors,
node.chroots)
def remove(self, sessionId):
node = self._nodes.pop(sessionId, None)
self.nodeDb.removeNode(node.name)
for command in self._assignedCommands.pop(sessionId, []):
self._commands.pop(command.getCommandId())
self._openSlots.pop(sessionId, None)
self._openChroots.pop(sessionId, None)
self._suspended.discard(sessionId)
def suspend(self, sessionId):
if sessionId not in self._nodes:
raise KeyError("Unknown node %r" % (sessionId,))
self.logger.info("Suspending jobs to node %s", sessionId)
self._suspended.add(sessionId)
def resume(self, sessionId):
if sessionId not in self._nodes:
raise KeyError("Unknown node %r" % (sessionId,))
self.logger.info("Resuming jobs to node %s", sessionId)
self._suspended.discard(sessionId)
def getNodeForCommand(self, commandId):
if commandId in self._commands:
sessionId, command = self._commands[commandId]
return self._nodes.get(sessionId, None)
return None
def getNodeByName(self, name):
return [ x for x in self._nodes.values() if x.name == name ][0]
def getNamesByIds(self, idList):
return dict((x, self._nodes[x].name) for x in idList
if x in self._nodes)
def __contains__(self, sessionId):
return sessionId in self._nodes
def _getScore(self, node):
usedSlots = node.slots - self._openSlots[node.sessionId]
if usedSlots == 0:
return 0
else:
return usedSlots / float(node.slots)
def rankNodes(self, nodeList):
return sorted(nodeList, key = lambda x: (self._getScore(x),
int(x.nodeInfo.loadavg[0]),
))
def getNodes(self):
return self._nodes.values()
def getOpenNodes(self, requiresChroot=False):
availNodes = [ self._nodes[x[0]]
for x in self._openSlots.iteritems()
if x[1] > 0 and x[0] not in self._suspended
]
if requiresChroot:
availNodes = [ x for x in availNodes
if self._openChroots[x.sessionId] > 0 ]
# only return nodes whose load average is below their threshold
return [ x for x in availNodes
if x.nodeInfo.getLoadAverage(1) < x.loadThreshold ]
def getCommandAssignments(self):
# returns commandId, sessionId pairs
return [ (x[0], x[1][0]) for x in self._commands.items() ]
def getNodeForFlavors(self, flavors, requiresChroot=False):
nodes = []
for node in self.getOpenNodes(requiresChroot=requiresChroot):
if not flavors:
nodes.append(node)
continue
for flavor in flavors:
found = False
archFlavor = flavorutil.getArchFlags(flavor, getTarget=False,
withFlags=False)
for buildFlavor in node.buildFlavors:
filteredFlavor = deps.filterFlavor(flavor, [buildFlavor,
archFlavor])
if buildFlavor.stronglySatisfies(filteredFlavor):
found = True
break
if not found:
break
if found:
nodes.append(node)
if not nodes:
return None
return self.rankNodes(nodes)[0]
def updateStatus(self, sessionId, nodeInfo, commandIds):
#self.db.updateNode(sessionId, nodeInfo)
self._nodes[sessionId].nodeInfo = nodeInfo
assignedCommandIds = [ x.getCommandId() for x in
self._assignedCommands[sessionId] ]
for commandId in commandIds:
if commandId not in self._commands:
# FIXME: need to tell the node to stop working on it
self.logger.warning('%s working on unknown command %s' % (sessionId, commandId))
pass
for commandId in (set(assignedCommandIds) - set(commandIds)):
self.logger.warning('%s dropped command %s' % (sessionId, commandId))
# FIXME: this command is no longer being worked on.
# how did we miss this?
self.removeCommand(commandId)
def removeCommand(self, commandId):
sessionId, command = self._commands.pop(commandId, (None, None))
if not sessionId:
return
self.logger.info('removing command: %s' % commandId)
if sessionId in self._openSlots:
self._openSlots[sessionId] += 1
if sessionId in self._openChroots and command.requiresChroot():
self._openChroots[sessionId] += 1
commandsByJob = self._commandsByJob.get(command.getJobId(), [])
if command in commandsByJob:
commandsByJob.remove(command)
if command in self._assignedCommands[sessionId]:
self._assignedCommands[sessionId].remove(command)
return command
def _logDict(self, title, data):
self.logger.info('%s:' % title)
for item in sorted([ '\t%s: %s' % (x, y) for x, y in data.iteritems() ]):
self.logger.info(item)
def assignCommand(self, command, node):
sessionId = node.sessionId
self._commands[command.getCommandId()] = sessionId, command
self._commandsByJob.setdefault(command.getJobId(), []).append(
command)
self._assignedCommands[sessionId].append(command)
self._logDict('Current OpenSlots', self._openSlots)
self._openSlots[sessionId] -= 1
if command.requiresChroot():
self._logDict('Current OpenChroots', self._openChroots)
self._openChroots[sessionId] -= 1
self.logger.info('assigned %s to %s' % (command.getCommandId(), node.host))
def assignCommands(self, commands):
l = []
for command in commands:
flavors = command.getRequiredFlavors()
node = self.getNodeForFlavors(flavors,
requiresChroot=command.requiresChroot())
if node is None:
continue
self.assignCommand(command, node)
l.append((command, node))
return l
def getCommandsForJob(self, jobId):
return self._commandsByJob.get(jobId, [])
| {
"content_hash": "6d748b1fe18ad8c8ea430a14e7a7d73a",
"timestamp": "",
"source": "github",
"line_count": 523,
"max_line_length": 96,
"avg_line_length": 37.44168260038241,
"alnum_prop": 0.6009600653661525,
"repo_name": "fedora-conary/rmake-2",
"id": "1a794d99bc997fcb4e7e9b2e68bea908c1160e97",
"size": "20169",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rmake/multinode/server/dispatcher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "35796"
},
{
"name": "C++",
"bytes": "3953"
},
{
"name": "Python",
"bytes": "1682020"
},
{
"name": "Shell",
"bytes": "12415"
}
],
"symlink_target": ""
} |
from .lin_op import LinOp
from ..utils.cuda_codegen import ReverseInOut
import numpy as np
class sum(LinOp):
"""Sums its inputs.
"""
def __init__(self, input_nodes, implem=None):
shape = input_nodes[0].shape
super(sum, self).__init__(input_nodes, shape)
def forward(self, inputs, outputs):
"""The forward operator.
Reads from inputs and writes to outputs.
"""
if len(inputs) > 1:
np.copyto(outputs[0], np.sum(inputs, 0))
else:
np.copyto(outputs[0], inputs[0])
def adjoint(self, inputs, outputs):
"""The adjoint operator.
Reads from inputs and writes to outputs.
"""
for output in outputs:
np.copyto(output, inputs[0])
def forward_cuda_kernel(self, cg, num_tmp_variables, abs_idx, parent):
#print("sum:forward:cuda")
input_nodes = cg.input_nodes(self)
res = "var_%d" % num_tmp_variables
num_tmp_variables += 1
code = "float %(res)s = 0; /*sum/copy*/ \n" % locals()
for n in input_nodes:
icode, ivar, num_tmp_variables = n.forward_cuda_kernel(cg, num_tmp_variables, abs_idx, self)
code += icode
code += "%(res)s += %(ivar)s;\n" % locals()
return code, res, num_tmp_variables
def adjoint_cuda_kernel(self, cg, num_tmp_variables, abs_idx, parent):
#print("sum:adjoint:cuda")
code, var, num_tmp_variables = cg.output_nodes(self)[0].adjoint_cuda_kernel(cg, num_tmp_variables, abs_idx, self)
return code, var, num_tmp_variables
def is_diag(self, freq=False):
"""Is the lin op diagonal (in the frequency domain)?
"""
return all([arg.is_diag(freq) for arg in self.input_nodes])
def get_diag(self, freq=False):
"""Returns the diagonal representation (A^TA)^(1/2).
Parameters
----------
freq : bool
Is the diagonal representation in the frequency domain?
Returns
-------
dict of variable to ndarray
The diagonal operator acting on each variable.
"""
var_diags = {var: np.zeros(var.size) for var in self.variables()}
for arg in self.input_nodes:
arg_diags = arg.get_diag(freq)
for var, diag in arg_diags.items():
var_diags[var] = var_diags[var] + diag
return var_diags
def norm_bound(self, input_mags):
"""Gives an upper bound on the magnitudes of the outputs given inputs.
Parameters
----------
input_mags : list
List of magnitudes of inputs.
Returns
-------
float
Magnitude of outputs.
"""
return np.sum(input_mags)
class copy(sum):
def __init__(self, arg, implem=None):
if type(arg) is tuple:
self.shape = arg
self.input_nodes = []
elif isinstance(arg, LinOp):
super(copy,self).__init__([arg], arg.shape)
def forward(self, inputs, outputs):
"""The forward operator.
Reads from inputs and writes to outputs.
"""
super(copy, self).adjoint(inputs, outputs)
def adjoint(self, inputs, outputs):
"""The adjoint operator.
Reads from inputs and writes to outputs.
"""
super(copy, self).forward(inputs, outputs)
def forward_cuda_kernel(self, cg, num_tmp_variables, abs_idx, parent):
#print("copy:forward:cuda")
return super(copy, self).adjoint_cuda_kernel(ReverseInOut(cg), num_tmp_variables, abs_idx, parent)
def adjoint_cuda_kernel(self, cg, num_tmp_variables, abs_idx, parent):
#print("copy:adjoint:cuda")
return super(copy, self).forward_cuda_kernel(ReverseInOut(cg), num_tmp_variables, abs_idx, parent)
def norm_bound(self, input_mags):
"""Gives an upper bound on the magnitudes of the outputs given inputs.
Parameters
----------
input_mags : list
List of magnitudes of inputs.
Returns
-------
float
Magnitude of outputs.
"""
return input_mags[0]
| {
"content_hash": "0f6995fc6898afd03116daea9324c34f",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 121,
"avg_line_length": 31.68421052631579,
"alnum_prop": 0.5664451827242525,
"repo_name": "timmeinhardt/ProxImaL",
"id": "077cf6df8bb6949e02b0b3f8578a54329740e800",
"size": "4214",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "proximal/lin_ops/sum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "328"
},
{
"name": "C++",
"bytes": "107632"
},
{
"name": "Python",
"bytes": "409090"
},
{
"name": "Shell",
"bytes": "2582"
}
],
"symlink_target": ""
} |
"""
:Author Patrik Valkovic
:Created 03.08.2017 12:28
:Licence MIT
Part of grammpy
"""
from unittest import TestCase, main
from grammpy.old_api import Grammar
class TerminalAddWhenCreatingTest(TestCase):
def test_addOneInArray(self):
gr = Grammar(terminals=['A'])
self.assertTrue(gr.have_term('A'))
def test_addTwoInArray(self):
gr = Grammar(terminals=['A', 0])
self.assertTrue(gr.have_term('A'))
self.assertTrue(gr.have_term(0))
self.assertTrue(gr.have_term([0, 'A']))
def test_addOneSeparate(self):
gr = Grammar(terminals='A')
self.assertTrue(gr.have_term('A'))
def test_addThreeInString(self):
gr = Grammar(terminals='ABC')
self.assertTrue(gr.have_term('A'))
self.assertTrue(gr.have_term('B'))
self.assertTrue(gr.have_term('C'))
self.assertTrue(gr.have_term(('A','B','C')))
self.assertFalse(gr.have_term('D'))
def test_addThreeInTuple(self):
gr = Grammar(terminals=('A', 'B', 'C'))
self.assertTrue(gr.have_term('A'))
self.assertTrue(gr.have_term('B'))
self.assertTrue(gr.have_term('C'))
self.assertTrue(gr.have_term(['A', 'B', 'C']))
self.assertFalse(gr.have_term('D'))
def test_addThreeOneDelete(self):
gr = Grammar(terminals=('A', 'B', 'C'))
self.assertTrue(gr.have_term('A'))
self.assertTrue(gr.have_term('B'))
self.assertTrue(gr.have_term('C'))
self.assertTrue(gr.have_term(['A', 'B', 'C']))
self.assertFalse(gr.have_term('D'))
gr.remove_term('B')
self.assertTrue(gr.have_term('A'))
self.assertFalse(gr.have_term('B'))
self.assertTrue(gr.have_term('C'))
self.assertTrue(gr.have_term(['A', 'C']))
self.assertFalse(gr.have_term('D'))
if __name__ == '__main__':
main() | {
"content_hash": "fab63011f46fb5758ab35e68c87f6386",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 54,
"avg_line_length": 31.116666666666667,
"alnum_prop": 0.5902517407605785,
"repo_name": "PatrikValkovic/grammpy",
"id": "101b40c7b09f7c070a7766ebe88d80af2989eccc",
"size": "1889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/grammpy_test/oldapi_tests/term-nonterm-grammar-handling_tests/TerminalAddWhenCreatingTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "604926"
}
],
"symlink_target": ""
} |
from tempest.lib import exceptions
TempestException = exceptions.TempestException
class QoSLimitReached(TempestException):
message = "Limit reached, limit = %(limit)d"
class SocketConnectionRefused(TempestException):
message = "Unable to connect to %(host)s port %(port)d:Connection Refused"
class ConnectionTimeoutException(TempestException):
message = "Timeout connecting to %(host)s port %(port)d"
class FileCreationFailedException(TempestException):
message = "File %(file)s has not been created or has the wrong size"
| {
"content_hash": "9c767e575083ae6d860a89dfd20d5873",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 78,
"avg_line_length": 28.842105263157894,
"alnum_prop": 0.7700729927007299,
"repo_name": "cloudbase/neutron",
"id": "369a85b3b5c94b389ab58265b208b4f77f4402bd",
"size": "1176",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "neutron/tests/tempest/scenario/exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "9942988"
},
{
"name": "Shell",
"bytes": "14325"
}
],
"symlink_target": ""
} |
import os
import shutil
import signal
import netaddr
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as n_consts
from neutron_lib.utils import runtime
from oslo_log import log as logging
from neutron.agent.l3 import namespaces
from neutron.agent.l3 import router_info as router
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import keepalived
from neutron.common import utils as common_utils
from neutron.extensions import revisions
from neutron.extensions import timestamp
from neutron.ipam import utils as ipam_utils
LOG = logging.getLogger(__name__)
HA_DEV_PREFIX = 'ha-'
IP_MONITOR_PROCESS_SERVICE = 'ip_monitor'
SIGTERM_TIMEOUT = 10
KEEPALIVED_STATE_CHANGE_MONITOR_SERVICE_NAME = (
"neutron-keepalived-state-change-monitor")
# TODO(liuyulong): move to neutron-lib?
STATE_CHANGE_PROC_NAME = 'neutron-keepalived-state-change'
# The multiplier is used to compensate execution time of function sending
# SIGHUP to keepalived process. The constant multiplies ha_vrrp_advert_int
# config option and the result is the throttle delay.
THROTTLER_MULTIPLIER = 1.5
class HaRouterNamespace(namespaces.RouterNamespace):
"""Namespace for HA router.
This namespace sets the ip_nonlocal_bind to 0 for HA router namespaces.
It does so to prevent sending gratuitous ARPs for interfaces that got VIP
removed in the middle of processing.
It also disables ipv6 forwarding by default. Forwarding will be
enabled during router configuration processing only for the primary node.
It has to be disabled on all other nodes to avoid sending MLD packets
which cause lost connectivity to Floating IPs.
"""
def create(self):
super(HaRouterNamespace, self).create(ipv6_forwarding=False)
# HA router namespaces should not have ip_nonlocal_bind enabled
ip_lib.set_ip_nonlocal_bind_for_namespace(self.name, 0)
# Linux should not automatically assign link-local addr for HA routers
# They are managed by keepalived
ip_wrapper = ip_lib.IPWrapper(namespace=self.name)
cmd = ['sysctl', '-w', 'net.ipv6.conf.all.addr_gen_mode=1']
ip_wrapper.netns.execute(cmd, privsep_exec=True)
class HaRouter(router.RouterInfo):
def __init__(self, *args, **kwargs):
super(HaRouter, self).__init__(*args, **kwargs)
self.ha_port = None
self.keepalived_manager = None
self._ha_state = None
self._ha_state_path = None
def create_router_namespace_object(
self, router_id, agent_conf, iface_driver, use_ipv6):
return HaRouterNamespace(
router_id, agent_conf, iface_driver, use_ipv6)
@property
def ha_state_path(self):
if not self._ha_state_path and self.keepalived_manager:
self._ha_state_path = (self.keepalived_manager.
get_full_config_file_path('state'))
return self._ha_state_path
@property
def ha_priority(self):
return self.router.get('priority', keepalived.HA_DEFAULT_PRIORITY)
@property
def ha_vr_id(self):
return self.router.get('ha_vr_id')
def _check_and_set_real_state(self):
# When the physical host was down/up, the 'primary' router may still
# have its original state in the _ha_state_path file. We directly
# reset it to 'backup'.
if (not self.keepalived_manager.check_processes() and
os.path.exists(self.ha_state_path) and
self.ha_state == 'primary'):
LOG.debug("Setting ha_state of router %s to: backup",
self.router_id)
self.ha_state = 'backup'
@property
def ha_state(self):
if self._ha_state:
return self._ha_state
try:
with open(self.ha_state_path, 'r') as f:
# TODO(haleyb): put old code back after a couple releases,
# Y perhaps, just for backwards-compat
# self._ha_state = f.read()
ha_state = f.read()
ha_state = 'primary' if ha_state == 'master' else ha_state
self._ha_state = ha_state
except (OSError, IOError) as error:
LOG.debug('Error while reading HA state for %s: %s',
self.router_id, error)
return self._ha_state or 'unknown'
@ha_state.setter
def ha_state(self, new_state):
self._ha_state = new_state
try:
with open(self.ha_state_path, 'w') as f:
f.write(new_state)
except (OSError, IOError) as error:
LOG.error('Error while writing HA state for %s: %s',
self.router_id, error)
@property
def ha_namespace(self):
return self.ns_name
def is_router_primary(self):
"""this method is normally called before the ha_router object is fully
initialized
"""
if self.router.get('_ha_state') == 'active':
return True
else:
return False
def initialize(self, process_monitor):
ha_port = self.router.get(n_consts.HA_INTERFACE_KEY)
if not ha_port:
msg = ("Unable to process HA router %s without HA port" %
self.router_id)
LOG.exception(msg)
raise Exception(msg)
super(HaRouter, self).initialize(process_monitor)
self.set_ha_port()
self._init_keepalived_manager(process_monitor)
self._check_and_set_real_state()
self.ha_network_added()
self.spawn_state_change_monitor(process_monitor)
def _init_keepalived_manager(self, process_monitor):
self.keepalived_manager = keepalived.KeepalivedManager(
self.router['id'],
keepalived.KeepalivedConf(),
process_monitor,
conf_path=self.agent_conf.ha_confs_path,
namespace=self.ha_namespace,
throttle_restart_value=(
self.agent_conf.ha_vrrp_advert_int * THROTTLER_MULTIPLIER))
# The following call is required to ensure that if the state path does
# not exist it gets created.
self.keepalived_manager.get_full_config_file_path('test')
config = self.keepalived_manager.config
interface_name = self.get_ha_device_name()
subnets = self.ha_port.get('subnets', [])
ha_port_cidrs = [subnet['cidr'] for subnet in subnets]
instance = keepalived.KeepalivedInstance(
'BACKUP',
interface_name,
self.ha_vr_id,
ha_port_cidrs,
nopreempt=True,
advert_int=self.agent_conf.ha_vrrp_advert_int,
priority=self.ha_priority,
vrrp_health_check_interval=(
self.agent_conf.ha_vrrp_health_check_interval),
ha_conf_dir=self.keepalived_manager.get_conf_dir())
instance.track_interfaces.append(interface_name)
if self.agent_conf.ha_vrrp_auth_password:
# TODO(safchain): use oslo.config types when it will be available
# in order to check the validity of ha_vrrp_auth_type
instance.set_authentication(self.agent_conf.ha_vrrp_auth_type,
self.agent_conf.ha_vrrp_auth_password)
config.add_instance(instance)
def enable_keepalived(self):
self.keepalived_manager.spawn()
def disable_keepalived(self):
if not self.keepalived_manager:
LOG.debug('Error while disabling keepalived for %s - no manager',
self.router_id)
return
self.keepalived_manager.disable()
conf_dir = self.keepalived_manager.get_conf_dir()
try:
shutil.rmtree(conf_dir)
except FileNotFoundError:
pass
def _get_keepalived_instance(self):
return self.keepalived_manager.config.get_instance(self.ha_vr_id)
def _get_primary_vip(self):
return self._get_keepalived_instance().get_primary_vip()
def get_ha_device_name(self):
return (HA_DEV_PREFIX + self.ha_port['id'])[:self.driver.DEV_NAME_LEN]
def ha_network_added(self):
interface_name = self.get_ha_device_name()
self.driver.plug(self.ha_port['network_id'],
self.ha_port['id'],
interface_name,
self.ha_port['mac_address'],
namespace=self.ha_namespace,
prefix=HA_DEV_PREFIX,
mtu=self.ha_port.get('mtu'))
ip_cidrs = common_utils.fixed_ip_cidrs(self.ha_port['fixed_ips'])
self.driver.init_l3(interface_name, ip_cidrs,
namespace=self.ha_namespace,
preserve_ips=[self._get_primary_vip()])
def ha_network_removed(self):
if not self.ha_port:
LOG.debug('Error while removing HA network for %s - no port',
self.router_id)
return
self.driver.unplug(self.get_ha_device_name(),
namespace=self.ha_namespace,
prefix=HA_DEV_PREFIX)
self.ha_port = None
def _add_vips(self, port, interface_name):
for ip_cidr in common_utils.fixed_ip_cidrs(port['fixed_ips']):
self._add_vip(ip_cidr, interface_name)
def _add_vip(self, ip_cidr, interface, scope=None):
instance = self._get_keepalived_instance()
instance.add_vip(ip_cidr, interface, scope)
def _remove_vip(self, ip_cidr):
instance = self._get_keepalived_instance()
instance.remove_vip_by_ip_address(ip_cidr)
def _clear_vips(self, interface):
instance = self._get_keepalived_instance()
instance.remove_vips_vroutes_by_interface(interface)
def _get_cidrs_from_keepalived(self, interface_name):
instance = self._get_keepalived_instance()
return instance.get_existing_vip_ip_addresses(interface_name)
def get_router_cidrs(self, device):
return set(self._get_cidrs_from_keepalived(device.name))
def routes_updated(self, old_routes, new_routes):
instance = self._get_keepalived_instance()
instance.virtual_routes.extra_routes = [
keepalived.KeepalivedVirtualRoute(
route['destination'], route['nexthop'])
for route in new_routes]
if self.router.get('distributed', False):
super(HaRouter, self).routes_updated(old_routes, new_routes)
self.keepalived_manager.get_process().reload_cfg()
def _add_default_gw_virtual_route(self, ex_gw_port, interface_name):
gateway_ips = self._get_external_gw_ips(ex_gw_port)
default_gw_rts = []
instance = self._get_keepalived_instance()
for subnet in ex_gw_port.get('subnets', []):
is_gateway_not_in_subnet = (subnet['gateway_ip'] and
not ipam_utils.check_subnet_ip(
subnet['cidr'],
subnet['gateway_ip']))
if is_gateway_not_in_subnet:
default_gw_rts.append(keepalived.KeepalivedVirtualRoute(
subnet['gateway_ip'], None, interface_name, scope='link'))
for gw_ip in gateway_ips:
# TODO(Carl) This is repeated everywhere. A method would
# be nice.
default_gw = n_consts.IP_ANY[netaddr.IPAddress(gw_ip).version]
default_gw_rts.append(keepalived.KeepalivedVirtualRoute(
default_gw, gw_ip, interface_name))
instance.virtual_routes.gateway_routes = default_gw_rts
def _add_extra_subnet_onlink_routes(self, ex_gw_port, interface_name):
extra_subnets = ex_gw_port.get('extra_subnets', [])
instance = self._get_keepalived_instance()
onlink_route_cidrs = set(s['cidr'] for s in extra_subnets)
instance.virtual_routes.extra_subnets = [
keepalived.KeepalivedVirtualRoute(
onlink_route_cidr, None, interface_name, scope='link') for
onlink_route_cidr in onlink_route_cidrs]
def _should_delete_ipv6_lladdr(self, ipv6_lladdr):
"""Only the primary should have any IP addresses configured.
Let keepalived manage IPv6 link local addresses, the same way we let
it manage IPv4 addresses. If the router is not in the primary state,
we must delete the address first as it is autoconfigured by the kernel.
"""
manager = self.keepalived_manager
if manager.get_process().active:
if self.ha_state != 'primary':
conf = manager.get_conf_on_disk()
managed_by_keepalived = conf and ipv6_lladdr in conf
if managed_by_keepalived:
return False
else:
return False
return True
def _disable_ipv6_addressing_on_interface(self, interface_name,
namespace=None):
"""Disable IPv6 link local addressing on the device and add it as
a VIP to keepalived. This means that the IPv6 link local address
will only be present on the primary.
"""
namespace = namespace or self.ha_namespace
device = ip_lib.IPDevice(interface_name, namespace=namespace)
ipv6_lladdr = ip_lib.get_ipv6_lladdr(device.link.address)
if self._should_delete_ipv6_lladdr(ipv6_lladdr):
self.driver.configure_ipv6_ra(namespace, interface_name,
n_consts.ACCEPT_RA_DISABLED)
device.addr.flush(n_consts.IP_VERSION_6)
else:
self.driver.configure_ipv6_ra(
namespace, interface_name,
n_consts.ACCEPT_RA_WITHOUT_FORWARDING)
self._remove_vip(ipv6_lladdr)
self._add_vip(ipv6_lladdr, interface_name, scope='link')
def _add_gateway_vip(self, ex_gw_port, interface_name):
self._add_vips(ex_gw_port, interface_name)
self._add_default_gw_virtual_route(ex_gw_port, interface_name)
self._add_extra_subnet_onlink_routes(ex_gw_port, interface_name)
def add_floating_ip(self, fip, interface_name, device):
fip_ip = fip['floating_ip_address']
ip_cidr = common_utils.ip_to_cidr(fip_ip)
self._add_vip(ip_cidr, interface_name)
return n_consts.FLOATINGIP_STATUS_ACTIVE
def remove_floating_ip(self, device, ip_cidr):
self._remove_vip(ip_cidr)
to = common_utils.cidr_to_ip(ip_cidr)
if device.addr.list(to=to):
super(HaRouter, self).remove_floating_ip(device, ip_cidr)
def internal_network_updated(self, port):
interface_name = self.get_internal_device_name(port['id'])
ip_cidrs = common_utils.fixed_ip_cidrs(port['fixed_ips'])
mtu = port['mtu']
self.driver.set_mtu(interface_name, mtu, namespace=self.ns_name,
prefix=router.INTERNAL_DEV_PREFIX)
self._clear_vips(interface_name)
self._disable_ipv6_addressing_on_interface(interface_name)
for ip_cidr in ip_cidrs:
self._add_vip(ip_cidr, interface_name)
def _plug_ha_router_port(self, port, name_getter, prefix):
port_id = port['id']
interface_name = name_getter(port_id)
self.driver.plug(port['network_id'],
port_id,
interface_name,
port['mac_address'],
namespace=self.ha_namespace,
prefix=prefix,
mtu=port.get('mtu'))
self._disable_ipv6_addressing_on_interface(interface_name)
self._add_vips(port, interface_name)
def internal_network_added(self, port):
self._plug_ha_router_port(
port, self.get_internal_device_name, router.INTERNAL_DEV_PREFIX)
def internal_network_removed(self, port):
super(HaRouter, self).internal_network_removed(port)
interface_name = self.get_internal_device_name(port['id'])
self._clear_vips(interface_name)
def _get_state_change_monitor_process_manager(self):
return external_process.ProcessManager(
self.agent_conf,
'%s.monitor' % self.router_id,
None,
service=KEEPALIVED_STATE_CHANGE_MONITOR_SERVICE_NAME,
default_cmd_callback=self._get_state_change_monitor_callback(),
run_as_root=True)
def _get_state_change_monitor_callback(self):
ha_device = self.get_ha_device_name()
ha_cidr = self._get_primary_vip()
config_dir = self.keepalived_manager.get_conf_dir()
state_change_log = (
"%s/neutron-keepalived-state-change.log") % config_dir
def callback(pid_file):
cmd = [
STATE_CHANGE_PROC_NAME,
'--router_id=%s' % self.router_id,
'--namespace=%s' % self.ha_namespace,
'--conf_dir=%s' % config_dir,
'--log-file=%s' % state_change_log,
'--monitor_interface=%s' % ha_device,
'--monitor_cidr=%s' % ha_cidr,
'--pid_file=%s' % pid_file,
'--state_path=%s' % self.agent_conf.state_path,
'--user=%s' % os.geteuid(),
'--group=%s' % os.getegid()]
return cmd
return callback
def spawn_state_change_monitor(self, process_monitor):
pm = self._get_state_change_monitor_process_manager()
pm.enable()
process_monitor.register(
self.router_id, IP_MONITOR_PROCESS_SERVICE, pm)
LOG.debug("Router %(router_id)s %(process)s pid %(pid)d",
{"router_id": self.router_id,
"process": KEEPALIVED_STATE_CHANGE_MONITOR_SERVICE_NAME,
"pid": pm.pid})
def destroy_state_change_monitor(self, process_monitor):
if not self.ha_port:
LOG.debug('Error while destroying state change monitor for %s - '
'no port', self.router_id)
return
pm = self._get_state_change_monitor_process_manager()
process_monitor.unregister(
self.router_id, IP_MONITOR_PROCESS_SERVICE)
pm.disable(sig=str(int(signal.SIGTERM)))
try:
common_utils.wait_until_true(lambda: not pm.active,
timeout=SIGTERM_TIMEOUT)
except common_utils.WaitTimeout:
pm.disable(sig=str(int(signal.SIGKILL)))
@staticmethod
def _gateway_ports_equal(port1, port2):
def _get_filtered_dict(d, ignore):
return {k: v for k, v in d.items() if k not in ignore}
keys_to_ignore = set([portbindings.HOST_ID, timestamp.UPDATED,
revisions.REVISION])
port1_filtered = _get_filtered_dict(port1, keys_to_ignore)
port2_filtered = _get_filtered_dict(port2, keys_to_ignore)
return port1_filtered == port2_filtered
def external_gateway_added(self, ex_gw_port, interface_name):
self._plug_external_gateway(ex_gw_port, interface_name, self.ns_name)
self._add_gateway_vip(ex_gw_port, interface_name)
self._disable_ipv6_addressing_on_interface(interface_name)
# Enable RA and IPv6 forwarding only for primary instances. This will
# prevent backup routers from sending packets to the upstream switch
# and disrupt connections.
enable = self.ha_state == 'primary'
self._configure_ipv6_params_on_gw(ex_gw_port, self.ns_name,
interface_name, enable)
def external_gateway_updated(self, ex_gw_port, interface_name):
self._plug_external_gateway(
ex_gw_port, interface_name, self.ha_namespace)
ip_cidrs = common_utils.fixed_ip_cidrs(self.ex_gw_port['fixed_ips'])
for old_gateway_cidr in ip_cidrs:
self._remove_vip(old_gateway_cidr)
self._add_gateway_vip(ex_gw_port, interface_name)
def external_gateway_removed(self, ex_gw_port, interface_name):
self._clear_vips(interface_name)
if self.ha_state == 'primary':
super(HaRouter, self).external_gateway_removed(ex_gw_port,
interface_name)
else:
# We are not the primary node, so no need to delete ip addresses.
self.driver.unplug(interface_name,
namespace=self.ns_name,
prefix=router.EXTERNAL_DEV_PREFIX)
def delete(self):
if self.process_monitor:
self.destroy_state_change_monitor(self.process_monitor)
self.disable_keepalived()
self.ha_network_removed()
super(HaRouter, self).delete()
def set_ha_port(self):
ha_port = self.router.get(n_consts.HA_INTERFACE_KEY)
if not ha_port:
return
# NOTE: once HA port is set, it MUST remain this value no matter what
# the server return. Because there is race condition between l3-agent
# side sync router info for processing and server side router deleting.
# TODO(liuyulong): make sure router HA ports never change.
if not self.ha_port or (self.ha_port and
self.ha_port['status'] != ha_port['status']):
self.ha_port = ha_port
def process(self):
super(HaRouter, self).process()
self.set_ha_port()
LOG.debug("Processing HA router with HA port: %s", self.ha_port)
if (self.ha_port and
self.ha_port['status'] == n_consts.PORT_STATUS_ACTIVE):
self.enable_keepalived()
@runtime.synchronized('enable_radvd')
def enable_radvd(self, internal_ports=None):
if (self.keepalived_manager.get_process().active and
self.ha_state == 'primary'):
super(HaRouter, self).enable_radvd(internal_ports)
| {
"content_hash": "b60e3f84fe57f0d8ea98bd6680b3fd99",
"timestamp": "",
"source": "github",
"line_count": 530,
"max_line_length": 79,
"avg_line_length": 41.781132075471696,
"alnum_prop": 0.6013367052023122,
"repo_name": "mahak/neutron",
"id": "222e1d1df36ebd8092b903c35e4991bac4db7ffb",
"size": "22761",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/agent/l3/ha_router.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "2773"
},
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "15942116"
},
{
"name": "Ruby",
"bytes": "1257"
},
{
"name": "Shell",
"bytes": "83270"
}
],
"symlink_target": ""
} |
from coyote_framework.config.abstract_config import ConfigBase
class LogConfig(ConfigBase):
def __init__(self):
super(LogConfig, self).__init__('log') | {
"content_hash": "06f7630fa3ca6775980bbdd66f114e5d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 62,
"avg_line_length": 23.571428571428573,
"alnum_prop": 0.696969696969697,
"repo_name": "Shapeways/coyote_framework",
"id": "2261a9d59e2865c32f17f5ab4fd2d1f9e941bb42",
"size": "165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coyote_framework/config/log_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "737"
},
{
"name": "HTML",
"bytes": "286"
},
{
"name": "JavaScript",
"bytes": "7310"
},
{
"name": "Python",
"bytes": "187655"
}
],
"symlink_target": ""
} |
"""Utility functions for curses-menu."""
import os
import sys
from typing import Callable
def null_input_factory() -> Callable[[int], None]:
"""Create a lambda that takes a single input and does nothing."""
return lambda input_: None
def clear_terminal() -> None:
"""
Call the platform specific function to clear the terminal.
Cls on windows, reset otherwise.
"""
if sys.platform.startswith("win"):
os.system("cls")
else:
os.system("reset")
def soft_clear_terminal() -> None:
"""Use ANSI control sequences to clear the terminal."""
if sys.platform.startswith("win"): # pragma: no cover all
# enables ANSI escape codes to work properly in bare cmd.exe
os.system("")
print(chr(27) + "[2J", end="")
print(chr(27) + "[1;1H", end="")
| {
"content_hash": "cdc13f2ef4d3496af9baae4c1657cf6a",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 69,
"avg_line_length": 26.387096774193548,
"alnum_prop": 0.6308068459657702,
"repo_name": "pmbarrett314/curses-menu",
"id": "ae4c9f6b3f9f28265dc1dede267b163e0169d34f",
"size": "818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cursesmenu/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88730"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.