repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
ericholscher/pinax | refs/heads/master | pinax/apps/profiles/forms.py | 2 | from django import forms
from profiles.models import Profile
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ('user', 'blogrss', 'timezone', 'language',
'twitter_user', 'twitter_password')
|
opennode/nodeconductor-assembly-waldur | refs/heads/develop | src/waldur_jira/migrations/0017_project_action.py | 2 | # Generated by Django 1.11.7 on 2018-05-17 09:51
from django.db import migrations, models
import waldur_core.core.fields
class Migration(migrations.Migration):
dependencies = [
('waldur_jira', '0016_project_template_null'),
]
operations = [
migrations.AddField(
model_name='project',
name='action',
field=models.CharField(blank=True, max_length=50),
),
migrations.AddField(
model_name='project',
name='action_details',
field=waldur_core.core.fields.JSONField(default={}),
),
]
|
fbossy/SickRage | refs/heads/master | tornado/test/log_test.py | 111 | #!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
import contextlib
import glob
import logging
import os
import re
import subprocess
import sys
import tempfile
import warnings
from tornado.escape import utf8
from tornado.log import LogFormatter, define_logging_options, enable_pretty_logging
from tornado.options import OptionParser
from tornado.test.util import unittest
from tornado.util import u, basestring_type
@contextlib.contextmanager
def ignore_bytes_warning():
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=BytesWarning)
yield
class LogFormatterTest(unittest.TestCase):
# Matches the output of a single logging call (which may be multiple lines
# if a traceback was included, so we use the DOTALL option)
LINE_RE = re.compile(b"(?s)\x01\\[E [0-9]{6} [0-9]{2}:[0-9]{2}:[0-9]{2} log_test:[0-9]+\\]\x02 (.*)")
def setUp(self):
self.formatter = LogFormatter(color=False)
# Fake color support. We can't guarantee anything about the $TERM
# variable when the tests are run, so just patch in some values
# for testing. (testing with color off fails to expose some potential
# encoding issues from the control characters)
self.formatter._colors = {
logging.ERROR: u("\u0001"),
}
self.formatter._normal = u("\u0002")
# construct a Logger directly to bypass getLogger's caching
self.logger = logging.Logger('LogFormatterTest')
self.logger.propagate = False
self.tempdir = tempfile.mkdtemp()
self.filename = os.path.join(self.tempdir, 'log.out')
self.handler = self.make_handler(self.filename)
self.handler.setFormatter(self.formatter)
self.logger.addHandler(self.handler)
def tearDown(self):
self.handler.close()
os.unlink(self.filename)
os.rmdir(self.tempdir)
def make_handler(self, filename):
# Base case: default setup without explicit encoding.
# In python 2, supports arbitrary byte strings and unicode objects
# that contain only ascii. In python 3, supports ascii-only unicode
# strings (but byte strings will be repr'd automatically).
return logging.FileHandler(filename)
def get_output(self):
with open(self.filename, "rb") as f:
line = f.read().strip()
m = LogFormatterTest.LINE_RE.match(line)
if m:
return m.group(1)
else:
raise Exception("output didn't match regex: %r" % line)
def test_basic_logging(self):
self.logger.error("foo")
self.assertEqual(self.get_output(), b"foo")
def test_bytes_logging(self):
with ignore_bytes_warning():
# This will be "\xe9" on python 2 or "b'\xe9'" on python 3
self.logger.error(b"\xe9")
self.assertEqual(self.get_output(), utf8(repr(b"\xe9")))
def test_utf8_logging(self):
with ignore_bytes_warning():
self.logger.error(u("\u00e9").encode("utf8"))
if issubclass(bytes, basestring_type):
# on python 2, utf8 byte strings (and by extension ascii byte
# strings) are passed through as-is.
self.assertEqual(self.get_output(), utf8(u("\u00e9")))
else:
# on python 3, byte strings always get repr'd even if
# they're ascii-only, so this degenerates into another
# copy of test_bytes_logging.
self.assertEqual(self.get_output(), utf8(repr(utf8(u("\u00e9")))))
def test_bytes_exception_logging(self):
try:
raise Exception(b'\xe9')
except Exception:
self.logger.exception('caught exception')
# This will be "Exception: \xe9" on python 2 or
# "Exception: b'\xe9'" on python 3.
output = self.get_output()
self.assertRegexpMatches(output, br'Exception.*\\xe9')
# The traceback contains newlines, which should not have been escaped.
self.assertNotIn(br'\n', output)
class UnicodeLogFormatterTest(LogFormatterTest):
def make_handler(self, filename):
# Adding an explicit encoding configuration allows non-ascii unicode
# strings in both python 2 and 3, without changing the behavior
# for byte strings.
return logging.FileHandler(filename, encoding="utf8")
def test_unicode_logging(self):
self.logger.error(u("\u00e9"))
self.assertEqual(self.get_output(), utf8(u("\u00e9")))
class EnablePrettyLoggingTest(unittest.TestCase):
def setUp(self):
super(EnablePrettyLoggingTest, self).setUp()
self.options = OptionParser()
define_logging_options(self.options)
self.logger = logging.Logger('tornado.test.log_test.EnablePrettyLoggingTest')
self.logger.propagate = False
def test_log_file(self):
tmpdir = tempfile.mkdtemp()
try:
self.options.log_file_prefix = tmpdir + '/test_log'
enable_pretty_logging(options=self.options, logger=self.logger)
self.assertEqual(1, len(self.logger.handlers))
self.logger.error('hello')
self.logger.handlers[0].flush()
filenames = glob.glob(tmpdir + '/test_log*')
self.assertEqual(1, len(filenames))
with open(filenames[0]) as f:
self.assertRegexpMatches(f.read(), r'^\[E [^]]*\] hello$')
finally:
for handler in self.logger.handlers:
handler.flush()
handler.close()
for filename in glob.glob(tmpdir + '/test_log*'):
os.unlink(filename)
os.rmdir(tmpdir)
class LoggingOptionTest(unittest.TestCase):
"""Test the ability to enable and disable Tornado's logging hooks."""
def logs_present(self, statement, args=None):
# Each test may manipulate and/or parse the options and then logs
# a line at the 'info' level. This level is ignored in the
# logging module by default, but Tornado turns it on by default
# so it is the easiest way to tell whether tornado's logging hooks
# ran.
IMPORT = 'from tornado.options import options, parse_command_line'
LOG_INFO = 'import logging; logging.info("hello")'
program = ';'.join([IMPORT, statement, LOG_INFO])
proc = subprocess.Popen(
[sys.executable, '-c', program] + (args or []),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = proc.communicate()
self.assertEqual(proc.returncode, 0, 'process failed: %r' % stdout)
return b'hello' in stdout
def test_default(self):
self.assertFalse(self.logs_present('pass'))
def test_tornado_default(self):
self.assertTrue(self.logs_present('parse_command_line()'))
def test_disable_command_line(self):
self.assertFalse(self.logs_present('parse_command_line()',
['--logging=none']))
def test_disable_command_line_case_insensitive(self):
self.assertFalse(self.logs_present('parse_command_line()',
['--logging=None']))
def test_disable_code_string(self):
self.assertFalse(self.logs_present(
'options.logging = "none"; parse_command_line()'))
def test_disable_code_none(self):
self.assertFalse(self.logs_present(
'options.logging = None; parse_command_line()'))
def test_disable_override(self):
# command line trumps code defaults
self.assertTrue(self.logs_present(
'options.logging = None; parse_command_line()',
['--logging=info']))
|
mozeq/fros | refs/heads/master | src/pyfros/plugins/screencastrecordmydesktop.py | 1 | ## Copyright (C) 2013 ABRT team <abrt-devel-list@redhat.com>
## Copyright (C) 2013 Red Hat, Inc.
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
from pyfros.screencastbase import ScreencastBase, ScreencastResult
import pyfros.plugins.const as const
from subprocess import Popen, PIPE
import fcntl
import os
import signal
#pylint: disable=E0611
from gi.repository import GLib
import re
from pyfros.froslogging import warn
def getScreencastPluginInstance():
return ScreencastRecordMyDesktop()
class ScreencastRecordMyDesktop(ScreencastBase):
r = re.compile(r'.(?P<num>\d+).')
recorapp = None
enc_completed = None
recPid = None
screencast_done = None
# pylint: disable=W0613
def enc_progress(self, source, condition):
"""
Callback which is run when there is some data to be read from the
child stdout
It updates the progress bar if the data read from the child contains
a progress information
"""
if self.recorapp.poll() != -1: # process died
#Gtk.main_quit() # encoding finished, so just close the window
print() # print empty line to to align the progress output
self.screencast_done()
strstdout = ""
try:
c = ""
strstdout = ""
# read only until ']' - the string we're trying to read is:
# "[X%]" or "[XX%]"
while self.recorapp.poll() == -1 and c != "]":
c = self.recorapp.stdout.read(1)
strstdout += c
except IOError:
pass # fd is non-blocking so we ignore "Resource unavailable"
except Exception as ex:
warn(ex)
# child process prints some escape sequences to control the output on
# terminal, so we need to filter it only tor ord(c) < 32, otherwise the
# regexp matching is not reliable
cleaned_str = ''.join(c for c in strstdout.strip() if ord(c) >= 32)
num_regexp = self.r.match(cleaned_str)
num = ""
if num_regexp:
num = num_regexp.group("num")
if not num:
return True
self.enc_completed = float(num)
percentage = self.enc_completed/100.0
if percentage > 1.0 or (self.stdout.poll() != -1):
percentage = 1.0
self.progress_update(self.enc_completed)
return True
def __init__(self, *args, **kwargs):
super(ScreencastRecordMyDesktop, self).__init__(*args, **kwargs)
self.output = os.path.join(os.getcwd(), "screencast.ogv")
def ScreencastArea(self):
print("ScreencastArea ScreencastRecordMyDesktop")
def IsSuitable(self):
return const.SUITABLE_DEFAULT # 1 is default
def Screencast(self):
args = [
"recordmydesktop",
"-o", self.output, # where to save the screencast
"--fps", "5", # 5 fps should be enough
"--no-sound", # we don't care about the sound
"--v_quality", "50", # 50 seems to be a good quality/size ratio
"--workdir", "/tmp",
"-x", str(self.x), # top left corner coordinates
"-y", str(self.y),
"--width", str(self.width), # low right corner offset
"--height", str(self.height)
]
self.recorapp = Popen(args, stdin=PIPE, stdout=PIPE, close_fds=True)
flags = fcntl.fcntl(self.recorapp.stdout, fcntl.F_GETFL)
fcntl.fcntl(self.recorapp.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self.recPid = self.recorapp.pid
#if self.recPid != None:
# stop_button.set_sensitive(True)
# button.set_sensitive(False)
return ScreencastResult(self.recPid is not None, self.output)
def StopScreencast(self, end_handler):
"""
Callback to stop the recording process
"""
self.screencast_done = end_handler
os.kill(self.recPid, signal.SIGINT)
giochannel = GLib.IOChannel(filedes=self.recorapp.stdout.fileno())
giochannel.add_watch(GLib.IO_IN | GLib.IO_HUP, self.enc_progress)
|
wyc/django | refs/heads/master | django/contrib/gis/db/models/query.py | 224 | import warnings
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.db.models.fields import (
GeometryField, LineStringField, PointField, get_srid_info,
)
from django.contrib.gis.db.models.lookups import GISLookup
from django.contrib.gis.db.models.sql import (
AreaField, DistanceField, GeomField, GMLField,
)
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
from django.db import connections
from django.db.models.expressions import RawSQL
from django.db.models.fields import Field
from django.db.models.query import QuerySet
from django.utils import six
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
class GeoQuerySet(QuerySet):
"The Geographic QuerySet."
# ### GeoQuerySet Methods ###
def area(self, tolerance=0.05, **kwargs):
"""
Returns the area of the geographic field in an `area` attribute on
each element of this GeoQuerySet.
"""
# Performing setup here rather than in `_spatial_attribute` so that
# we can get the units for `AreaField`.
procedure_args, geo_field = self._spatial_setup(
'area', field_name=kwargs.get('field_name'))
s = {'procedure_args': procedure_args,
'geo_field': geo_field,
'setup': False,
}
connection = connections[self.db]
backend = connection.ops
if backend.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
s['select_field'] = AreaField('sq_m') # Oracle returns area in units of meters.
elif backend.postgis or backend.spatialite:
if backend.geography:
# Geography fields support area calculation, returns square meters.
s['select_field'] = AreaField('sq_m')
elif not geo_field.geodetic(connection):
# Getting the area units of the geographic field.
s['select_field'] = AreaField(Area.unit_attname(geo_field.units_name(connection)))
else:
# TODO: Do we want to support raw number areas for geodetic fields?
raise Exception('Area on geodetic coordinate systems not supported.')
return self._spatial_attribute('area', s, **kwargs)
def centroid(self, **kwargs):
"""
Returns the centroid of the geographic field in a `centroid`
attribute on each element of this GeoQuerySet.
"""
return self._geom_attribute('centroid', **kwargs)
def collect(self, **kwargs):
"""
Performs an aggregate collect operation on the given geometry field.
This is analogous to a union operation, but much faster because
boundaries are not dissolved.
"""
warnings.warn(
"The collect GeoQuerySet method is deprecated. Use the Collect() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango110Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Collect, **kwargs)
def difference(self, geom, **kwargs):
"""
Returns the spatial difference of the geographic field in a `difference`
attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('difference', geom, **kwargs)
def distance(self, geom, **kwargs):
"""
Returns the distance from the given geographic field name to the
given geometry in a `distance` attribute on each element of the
GeoQuerySet.
Keyword Arguments:
`spheroid` => If the geometry field is geodetic and PostGIS is
the spatial database, then the more accurate
spheroid calculation will be used instead of the
quicker sphere calculation.
`tolerance` => Used only for Oracle. The tolerance is
in meters -- a default of 5 centimeters (0.05)
is used.
"""
return self._distance_attribute('distance', geom, **kwargs)
def envelope(self, **kwargs):
"""
Returns a Geometry representing the bounding box of the
Geometry field in an `envelope` attribute on each element of
the GeoQuerySet.
"""
return self._geom_attribute('envelope', **kwargs)
def extent(self, **kwargs):
"""
Returns the extent (aggregate) of the features in the GeoQuerySet. The
extent will be returned as a 4-tuple, consisting of (xmin, ymin, xmax, ymax).
"""
warnings.warn(
"The extent GeoQuerySet method is deprecated. Use the Extent() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango110Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Extent, **kwargs)
def extent3d(self, **kwargs):
"""
Returns the aggregate extent, in 3D, of the features in the
GeoQuerySet. It is returned as a 6-tuple, comprising:
(xmin, ymin, zmin, xmax, ymax, zmax).
"""
warnings.warn(
"The extent3d GeoQuerySet method is deprecated. Use the Extent3D() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango110Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Extent3D, **kwargs)
def force_rhr(self, **kwargs):
"""
Returns a modified version of the Polygon/MultiPolygon in which
all of the vertices follow the Right-Hand-Rule. By default,
this is attached as the `force_rhr` attribute on each element
of the GeoQuerySet.
"""
return self._geom_attribute('force_rhr', **kwargs)
def geojson(self, precision=8, crs=False, bbox=False, **kwargs):
"""
Returns a GeoJSON representation of the geometry field in a `geojson`
attribute on each element of the GeoQuerySet.
The `crs` and `bbox` keywords may be set to True if the user wants
the coordinate reference system and the bounding box to be included
in the GeoJSON representation of the geometry.
"""
backend = connections[self.db].ops
if not backend.geojson:
raise NotImplementedError('Only PostGIS 1.3.4+ and SpatiaLite 3.0+ '
'support GeoJSON serialization.')
if not isinstance(precision, six.integer_types):
raise TypeError('Precision keyword must be set with an integer.')
options = 0
if crs and bbox:
options = 3
elif bbox:
options = 1
elif crs:
options = 2
s = {'desc': 'GeoJSON',
'procedure_args': {'precision': precision, 'options': options},
'procedure_fmt': '%(geo_col)s,%(precision)s,%(options)s',
}
return self._spatial_attribute('geojson', s, **kwargs)
def geohash(self, precision=20, **kwargs):
"""
Returns a GeoHash representation of the given field in a `geohash`
attribute on each element of the GeoQuerySet.
The `precision` keyword may be used to custom the number of
_characters_ used in the output GeoHash, the default is 20.
"""
s = {'desc': 'GeoHash',
'procedure_args': {'precision': precision},
'procedure_fmt': '%(geo_col)s,%(precision)s',
}
return self._spatial_attribute('geohash', s, **kwargs)
def gml(self, precision=8, version=2, **kwargs):
"""
Returns GML representation of the given field in a `gml` attribute
on each element of the GeoQuerySet.
"""
backend = connections[self.db].ops
s = {'desc': 'GML', 'procedure_args': {'precision': precision}}
if backend.postgis:
s['procedure_fmt'] = '%(version)s,%(geo_col)s,%(precision)s'
s['procedure_args'] = {'precision': precision, 'version': version}
if backend.oracle:
s['select_field'] = GMLField()
return self._spatial_attribute('gml', s, **kwargs)
def intersection(self, geom, **kwargs):
"""
Returns the spatial intersection of the Geometry field in
an `intersection` attribute on each element of this
GeoQuerySet.
"""
return self._geomset_attribute('intersection', geom, **kwargs)
def kml(self, **kwargs):
"""
Returns KML representation of the geometry field in a `kml`
attribute on each element of this GeoQuerySet.
"""
s = {'desc': 'KML',
'procedure_fmt': '%(geo_col)s,%(precision)s',
'procedure_args': {'precision': kwargs.pop('precision', 8)},
}
return self._spatial_attribute('kml', s, **kwargs)
def length(self, **kwargs):
"""
Returns the length of the geometry field as a `Distance` object
stored in a `length` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('length', None, **kwargs)
def make_line(self, **kwargs):
"""
Creates a linestring from all of the PointField geometries in the
this GeoQuerySet and returns it. This is a spatial aggregate
method, and thus returns a geometry rather than a GeoQuerySet.
"""
warnings.warn(
"The make_line GeoQuerySet method is deprecated. Use the MakeLine() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango110Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.MakeLine, geo_field_type=PointField, **kwargs)
def mem_size(self, **kwargs):
"""
Returns the memory size (number of bytes) that the geometry field takes
in a `mem_size` attribute on each element of this GeoQuerySet.
"""
return self._spatial_attribute('mem_size', {}, **kwargs)
def num_geom(self, **kwargs):
"""
Returns the number of geometries if the field is a
GeometryCollection or Multi* Field in a `num_geom`
attribute on each element of this GeoQuerySet; otherwise
the sets with None.
"""
return self._spatial_attribute('num_geom', {}, **kwargs)
def num_points(self, **kwargs):
"""
Returns the number of points in the first linestring in the
Geometry field in a `num_points` attribute on each element of
this GeoQuerySet; otherwise sets with None.
"""
return self._spatial_attribute('num_points', {}, **kwargs)
def perimeter(self, **kwargs):
"""
Returns the perimeter of the geometry field as a `Distance` object
stored in a `perimeter` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('perimeter', None, **kwargs)
def point_on_surface(self, **kwargs):
"""
Returns a Point geometry guaranteed to lie on the surface of the
Geometry field in a `point_on_surface` attribute on each element
of this GeoQuerySet; otherwise sets with None.
"""
return self._geom_attribute('point_on_surface', **kwargs)
def reverse_geom(self, **kwargs):
"""
Reverses the coordinate order of the geometry, and attaches as a
`reverse` attribute on each element of this GeoQuerySet.
"""
s = {'select_field': GeomField()}
kwargs.setdefault('model_att', 'reverse_geom')
if connections[self.db].ops.oracle:
s['geo_field_type'] = LineStringField
return self._spatial_attribute('reverse', s, **kwargs)
def scale(self, x, y, z=0.0, **kwargs):
"""
Scales the geometry to a new size by multiplying the ordinates
with the given x,y,z scale factors.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D scaling.')
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s',
'procedure_args': {'x': x, 'y': y},
'select_field': GeomField(),
}
else:
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args': {'x': x, 'y': y, 'z': z},
'select_field': GeomField(),
}
return self._spatial_attribute('scale', s, **kwargs)
def snap_to_grid(self, *args, **kwargs):
"""
Snap all points of the input geometry to the grid. How the
geometry is snapped to the grid depends on how many arguments
were given:
- 1 argument : A single size to snap both the X and Y grids to.
- 2 arguments: X and Y sizes to snap the grid to.
- 4 arguments: X, Y sizes and the X, Y origins.
"""
if False in [isinstance(arg, (float,) + six.integer_types) for arg in args]:
raise TypeError('Size argument(s) for the grid must be a float or integer values.')
nargs = len(args)
if nargs == 1:
size = args[0]
procedure_fmt = '%(geo_col)s,%(size)s'
procedure_args = {'size': size}
elif nargs == 2:
xsize, ysize = args
procedure_fmt = '%(geo_col)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize': xsize, 'ysize': ysize}
elif nargs == 4:
xsize, ysize, xorigin, yorigin = args
procedure_fmt = '%(geo_col)s,%(xorigin)s,%(yorigin)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize': xsize, 'ysize': ysize,
'xorigin': xorigin, 'yorigin': yorigin}
else:
raise ValueError('Must provide 1, 2, or 4 arguments to `snap_to_grid`.')
s = {'procedure_fmt': procedure_fmt,
'procedure_args': procedure_args,
'select_field': GeomField(),
}
return self._spatial_attribute('snap_to_grid', s, **kwargs)
def svg(self, relative=False, precision=8, **kwargs):
"""
Returns SVG representation of the geographic field in a `svg`
attribute on each element of this GeoQuerySet.
Keyword Arguments:
`relative` => If set to True, this will evaluate the path in
terms of relative moves (rather than absolute).
`precision` => May be used to set the maximum number of decimal
digits used in output (defaults to 8).
"""
relative = int(bool(relative))
if not isinstance(precision, six.integer_types):
raise TypeError('SVG precision keyword argument must be an integer.')
s = {
'desc': 'SVG',
'procedure_fmt': '%(geo_col)s,%(rel)s,%(precision)s',
'procedure_args': {
'rel': relative,
'precision': precision,
}
}
return self._spatial_attribute('svg', s, **kwargs)
def sym_difference(self, geom, **kwargs):
"""
Returns the symmetric difference of the geographic field in a
`sym_difference` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('sym_difference', geom, **kwargs)
def translate(self, x, y, z=0.0, **kwargs):
"""
Translates the geometry to a new location using the given numeric
parameters as offsets.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D translation.')
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s',
'procedure_args': {'x': x, 'y': y},
'select_field': GeomField(),
}
else:
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args': {'x': x, 'y': y, 'z': z},
'select_field': GeomField(),
}
return self._spatial_attribute('translate', s, **kwargs)
def transform(self, srid=4326, **kwargs):
"""
Transforms the given geometry field to the given SRID. If no SRID is
provided, the transformation will default to using 4326 (WGS84).
"""
if not isinstance(srid, six.integer_types):
raise TypeError('An integer SRID must be provided.')
field_name = kwargs.get('field_name')
self._spatial_setup('transform', field_name=field_name)
self.query.add_context('transformed_srid', srid)
return self._clone()
def union(self, geom, **kwargs):
"""
Returns the union of the geographic field with the given
Geometry in a `union` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('union', geom, **kwargs)
def unionagg(self, **kwargs):
"""
Performs an aggregate union on the given geometry field. Returns
None if the GeoQuerySet is empty. The `tolerance` keyword is for
Oracle backends only.
"""
warnings.warn(
"The unionagg GeoQuerySet method is deprecated. Use the Union() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango110Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Union, **kwargs)
# ### Private API -- Abstracted DRY routines. ###
def _spatial_setup(self, att, desc=None, field_name=None, geo_field_type=None):
"""
Performs set up for executing the spatial function.
"""
# Does the spatial backend support this?
connection = connections[self.db]
func = getattr(connection.ops, att, False)
if desc is None:
desc = att
if not func:
raise NotImplementedError('%s stored procedure not available on '
'the %s backend.' %
(desc, connection.ops.name))
# Initializing the procedure arguments.
procedure_args = {'function': func}
# Is there a geographic field in the model to perform this
# operation on?
geo_field = self._geo_field(field_name)
if not geo_field:
raise TypeError('%s output only available on GeometryFields.' % func)
# If the `geo_field_type` keyword was used, then enforce that
# type limitation.
if geo_field_type is not None and not isinstance(geo_field, geo_field_type):
raise TypeError('"%s" stored procedures may only be called on %ss.' % (func, geo_field_type.__name__))
# Setting the procedure args.
procedure_args['geo_col'] = self._geocol_select(geo_field, field_name)
return procedure_args, geo_field
def _spatial_aggregate(self, aggregate, field_name=None,
geo_field_type=None, tolerance=0.05):
"""
DRY routine for calling aggregate spatial stored procedures and
returning their result to the caller of the function.
"""
# Getting the field the geographic aggregate will be called on.
geo_field = self._geo_field(field_name)
if not geo_field:
raise TypeError('%s aggregate only available on GeometryFields.' % aggregate.name)
# Checking if there are any geo field type limitations on this
# aggregate (e.g. ST_Makeline only operates on PointFields).
if geo_field_type is not None and not isinstance(geo_field, geo_field_type):
raise TypeError('%s aggregate may only be called on %ss.' % (aggregate.name, geo_field_type.__name__))
# Getting the string expression of the field name, as this is the
# argument taken by `Aggregate` objects.
agg_col = field_name or geo_field.name
# Adding any keyword parameters for the Aggregate object. Oracle backends
# in particular need an additional `tolerance` parameter.
agg_kwargs = {}
if connections[self.db].ops.oracle:
agg_kwargs['tolerance'] = tolerance
# Calling the QuerySet.aggregate, and returning only the value of the aggregate.
return self.aggregate(geoagg=aggregate(agg_col, **agg_kwargs))['geoagg']
def _spatial_attribute(self, att, settings, field_name=None, model_att=None):
"""
DRY routine for calling a spatial stored procedure on a geometry column
and attaching its output as an attribute of the model.
Arguments:
att:
The name of the spatial attribute that holds the spatial
SQL function to call.
settings:
Dictonary of internal settings to customize for the spatial procedure.
Public Keyword Arguments:
field_name:
The name of the geographic field to call the spatial
function on. May also be a lookup to a geometry field
as part of a foreign key relation.
model_att:
The name of the model attribute to attach the output of
the spatial function to.
"""
warnings.warn(
"The %s GeoQuerySet method is deprecated. See GeoDjango Functions "
"documentation to find the expression-based replacement." % att,
RemovedInDjango20Warning, stacklevel=2
)
# Default settings.
settings.setdefault('desc', None)
settings.setdefault('geom_args', ())
settings.setdefault('geom_field', None)
settings.setdefault('procedure_args', {})
settings.setdefault('procedure_fmt', '%(geo_col)s')
settings.setdefault('select_params', [])
connection = connections[self.db]
# Performing setup for the spatial column, unless told not to.
if settings.get('setup', True):
default_args, geo_field = self._spatial_setup(
att, desc=settings['desc'], field_name=field_name,
geo_field_type=settings.get('geo_field_type'))
for k, v in six.iteritems(default_args):
settings['procedure_args'].setdefault(k, v)
else:
geo_field = settings['geo_field']
# The attribute to attach to the model.
if not isinstance(model_att, six.string_types):
model_att = att
# Special handling for any argument that is a geometry.
for name in settings['geom_args']:
# Using the field's get_placeholder() routine to get any needed
# transformation SQL.
geom = geo_field.get_prep_value(settings['procedure_args'][name])
params = geo_field.get_db_prep_lookup('contains', geom, connection=connection)
geom_placeholder = geo_field.get_placeholder(geom, None, connection)
# Replacing the procedure format with that of any needed
# transformation SQL.
old_fmt = '%%(%s)s' % name
new_fmt = geom_placeholder % '%%s'
settings['procedure_fmt'] = settings['procedure_fmt'].replace(old_fmt, new_fmt)
settings['select_params'].extend(params)
# Getting the format for the stored procedure.
fmt = '%%(function)s(%s)' % settings['procedure_fmt']
# If the result of this function needs to be converted.
if settings.get('select_field'):
select_field = settings['select_field']
if connection.ops.oracle:
select_field.empty_strings_allowed = False
else:
select_field = Field()
# Finally, setting the extra selection attribute with
# the format string expanded with the stored procedure
# arguments.
self.query.add_annotation(
RawSQL(fmt % settings['procedure_args'], settings['select_params'], select_field),
model_att)
return self
def _distance_attribute(self, func, geom=None, tolerance=0.05, spheroid=False, **kwargs):
"""
DRY routine for GeoQuerySet distance attribute routines.
"""
# Setting up the distance procedure arguments.
procedure_args, geo_field = self._spatial_setup(func, field_name=kwargs.get('field_name'))
# If geodetic defaulting distance attribute to meters (Oracle and
# PostGIS spherical distances return meters). Otherwise, use the
# units of the geometry field.
connection = connections[self.db]
geodetic = geo_field.geodetic(connection)
geography = geo_field.geography
if geodetic:
dist_att = 'm'
else:
dist_att = Distance.unit_attname(geo_field.units_name(connection))
# Shortcut booleans for what distance function we're using and
# whether the geometry field is 3D.
distance = func == 'distance'
length = func == 'length'
perimeter = func == 'perimeter'
if not (distance or length or perimeter):
raise ValueError('Unknown distance function: %s' % func)
geom_3d = geo_field.dim == 3
# The field's get_db_prep_lookup() is used to get any
# extra distance parameters. Here we set up the
# parameters that will be passed in to field's function.
lookup_params = [geom or 'POINT (0 0)', 0]
# Getting the spatial backend operations.
backend = connection.ops
# If the spheroid calculation is desired, either by the `spheroid`
# keyword or when calculating the length of geodetic field, make
# sure the 'spheroid' distance setting string is passed in so we
# get the correct spatial stored procedure.
if spheroid or (backend.postgis and geodetic and
(not geography) and length):
lookup_params.append('spheroid')
lookup_params = geo_field.get_prep_value(lookup_params)
params = geo_field.get_db_prep_lookup('distance_lte', lookup_params, connection=connection)
# The `geom_args` flag is set to true if a geometry parameter was
# passed in.
geom_args = bool(geom)
if backend.oracle:
if distance:
procedure_fmt = '%(geo_col)s,%(geom)s,%(tolerance)s'
elif length or perimeter:
procedure_fmt = '%(geo_col)s,%(tolerance)s'
procedure_args['tolerance'] = tolerance
else:
# Getting whether this field is in units of degrees since the field may have
# been transformed via the `transform` GeoQuerySet method.
srid = self.query.get_context('transformed_srid')
if srid:
u, unit_name, s = get_srid_info(srid, connection)
geodetic = unit_name.lower() in geo_field.geodetic_units
if geodetic and not connection.features.supports_distance_geodetic:
raise ValueError(
'This database does not support linear distance '
'calculations on geodetic coordinate systems.'
)
if distance:
if srid:
# Setting the `geom_args` flag to false because we want to handle
# transformation SQL here, rather than the way done by default
# (which will transform to the original SRID of the field rather
# than to what was transformed to).
geom_args = False
procedure_fmt = '%s(%%(geo_col)s, %s)' % (backend.transform, srid)
if geom.srid is None or geom.srid == srid:
# If the geom parameter srid is None, it is assumed the coordinates
# are in the transformed units. A placeholder is used for the
# geometry parameter. `GeomFromText` constructor is also needed
# to wrap geom placeholder for SpatiaLite.
if backend.spatialite:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.from_text, srid)
else:
procedure_fmt += ', %%s'
else:
# We need to transform the geom to the srid specified in `transform()`,
# so wrapping the geometry placeholder in transformation SQL.
# SpatiaLite also needs geometry placeholder wrapped in `GeomFromText`
# constructor.
if backend.spatialite:
procedure_fmt += (', %s(%s(%%%%s, %s), %s)' % (
backend.transform, backend.from_text,
geom.srid, srid))
else:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.transform, srid)
else:
# `transform()` was not used on this GeoQuerySet.
procedure_fmt = '%(geo_col)s,%(geom)s'
if not geography and geodetic:
# Spherical distance calculation is needed (because the geographic
# field is geodetic). However, the PostGIS ST_distance_sphere/spheroid()
# procedures may only do queries from point columns to point geometries
# some error checking is required.
if not backend.geography:
if not isinstance(geo_field, PointField):
raise ValueError('Spherical distance calculation only supported on PointFields.')
if not str(Geometry(six.memoryview(params[0].ewkb)).geom_type) == 'Point':
raise ValueError(
'Spherical distance calculation only supported with '
'Point Geometry parameters'
)
# The `function` procedure argument needs to be set differently for
# geodetic distance calculations.
if spheroid:
# Call to distance_spheroid() requires spheroid param as well.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function': backend.distance_spheroid, 'spheroid': params[1]})
else:
procedure_args.update({'function': backend.distance_sphere})
elif length or perimeter:
procedure_fmt = '%(geo_col)s'
if not geography and geodetic and length:
# There's no `length_sphere`, and `length_spheroid` also
# works on 3D geometries.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function': backend.length_spheroid, 'spheroid': params[1]})
elif geom_3d and connection.features.supports_3d_functions:
# Use 3D variants of perimeter and length routines on supported backends.
if perimeter:
procedure_args.update({'function': backend.perimeter3d})
elif length:
procedure_args.update({'function': backend.length3d})
# Setting up the settings for `_spatial_attribute`.
s = {'select_field': DistanceField(dist_att),
'setup': False,
'geo_field': geo_field,
'procedure_args': procedure_args,
'procedure_fmt': procedure_fmt,
}
if geom_args:
s['geom_args'] = ('geom',)
s['procedure_args']['geom'] = geom
elif geom:
# The geometry is passed in as a parameter because we handled
# transformation conditions in this routine.
s['select_params'] = [backend.Adapter(geom)]
return self._spatial_attribute(func, s, **kwargs)
def _geom_attribute(self, func, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute (e.g., `centroid`, `point_on_surface`).
"""
s = {'select_field': GeomField()}
if connections[self.db].ops.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args'] = {'tolerance': tolerance}
return self._spatial_attribute(func, s, **kwargs)
def _geomset_attribute(self, func, geom, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute and takes a Geoemtry parameter. This is used
for geometry set-like operations (e.g., intersection, difference,
union, sym_difference).
"""
s = {
'geom_args': ('geom',),
'select_field': GeomField(),
'procedure_fmt': '%(geo_col)s,%(geom)s',
'procedure_args': {'geom': geom},
}
if connections[self.db].ops.oracle:
s['procedure_fmt'] += ',%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
return self._spatial_attribute(func, s, **kwargs)
def _geocol_select(self, geo_field, field_name):
"""
Helper routine for constructing the SQL to select the geographic
column. Takes into account if the geographic field is in a
ForeignKey relation to the current model.
"""
compiler = self.query.get_compiler(self.db)
opts = self.model._meta
if geo_field not in opts.fields:
# Is this operation going to be on a related geographic field?
# If so, it'll have to be added to the select related information
# (e.g., if 'location__point' was given as the field name).
# Note: the operation really is defined as "must add select related!"
self.query.add_select_related([field_name])
# Call pre_sql_setup() so that compiler.select gets populated.
compiler.pre_sql_setup()
for col, _, _ in compiler.select:
if col.output_field == geo_field:
return col.as_sql(compiler, compiler.connection)[0]
raise ValueError("%r not in compiler's related_select_cols" % geo_field)
elif geo_field not in opts.local_fields:
# This geographic field is inherited from another model, so we have to
# use the db table for the _parent_ model instead.
parent_model = geo_field.model._meta.concrete_model
return self._field_column(compiler, geo_field, parent_model._meta.db_table)
else:
return self._field_column(compiler, geo_field)
# Private API utilities, subject to change.
def _geo_field(self, field_name=None):
"""
Returns the first Geometry field encountered or the one specified via
the `field_name` keyword. The `field_name` may be a string specifying
the geometry field on this GeoQuerySet's model, or a lookup string
to a geometry field via a ForeignKey relation.
"""
if field_name is None:
# Incrementing until the first geographic field is found.
for field in self.model._meta.fields:
if isinstance(field, GeometryField):
return field
return False
else:
# Otherwise, check by the given field name -- which may be
# a lookup to a _related_ geographic field.
return GISLookup._check_geo_field(self.model._meta, field_name)
def _field_column(self, compiler, field, table_alias=None, column=None):
"""
Helper function that returns the database column for the given field.
The table and column are returned (quoted) in the proper format, e.g.,
`"geoapp_city"."point"`. If `table_alias` is not specified, the
database table associated with the model of this `GeoQuerySet` will be
used. If `column` is specified, it will be used instead of the value
in `field.column`.
"""
if table_alias is None:
table_alias = compiler.query.get_meta().db_table
return "%s.%s" % (compiler.quote_name_unless_alias(table_alias),
compiler.connection.ops.quote_name(column or field.column))
|
unsiloai/syntaxnet-ops-hack | refs/heads/master | tensorflow/python/kernel_tests/priority_queue_test.py | 134 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.PriorityQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import random
import threading
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class PriorityQueueTest(test.TestCase):
def testRoundTripInsertReadOnceSorts(self):
with self.test_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
(), ()))
elem = np.random.randint(-5, 5, size=100).astype(np.int64)
side_value_0 = np.random.rand(100).astype(bytes)
side_value_1 = np.random.rand(100).astype(bytes)
enq_list = [
q.enqueue((e, constant_op.constant(v0), constant_op.constant(v1)))
for e, v0, v1 in zip(elem, side_value_0, side_value_1)
]
for enq in enq_list:
enq.run()
deq = q.dequeue_many(100)
deq_elem, deq_value_0, deq_value_1 = sess.run(deq)
allowed = {}
missed = set()
for e, v0, v1 in zip(elem, side_value_0, side_value_1):
if e not in allowed:
allowed[e] = set()
allowed[e].add((v0, v1))
missed.add((v0, v1))
self.assertAllEqual(deq_elem, sorted(elem))
for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):
self.assertTrue((dv0, dv1) in allowed[e])
missed.remove((dv0, dv1))
self.assertEqual(missed, set())
def testRoundTripInsertMultiThreadedReadOnceSorts(self):
with self.test_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
(), ()))
elem = np.random.randint(-5, 5, size=100).astype(np.int64)
side_value_0 = np.random.rand(100).astype(bytes)
side_value_1 = np.random.rand(100).astype(bytes)
enqueue_ops = [
q.enqueue((e, constant_op.constant(v0), constant_op.constant(v1)))
for e, v0, v1 in zip(elem, side_value_0, side_value_1)
]
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
dequeue_op = q.dequeue_many(100)
enqueue_threads = [
self.checkedThread(
target=enqueue, args=(op,)) for op in enqueue_ops
]
for t in enqueue_threads:
t.start()
deq_elem, deq_value_0, deq_value_1 = sess.run(dequeue_op)
for t in enqueue_threads:
t.join()
allowed = {}
missed = set()
for e, v0, v1 in zip(elem, side_value_0, side_value_1):
if e not in allowed:
allowed[e] = set()
allowed[e].add((v0, v1))
missed.add((v0, v1))
self.assertAllEqual(deq_elem, sorted(elem))
for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):
self.assertTrue((dv0, dv1) in allowed[e])
missed.remove((dv0, dv1))
self.assertEqual(missed, set())
def testRoundTripFillsCapacityMultiThreadedEnqueueAndDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.PriorityQueue(10, (dtypes.int64), (()))
num_threads = 40
enqueue_counts = np.random.randint(10, size=num_threads)
enqueue_values = [
np.random.randint(
5, size=count) for count in enqueue_counts
]
enqueue_ops = [
q.enqueue_many((values, values)) for values in enqueue_values
]
shuffled_counts = copy.deepcopy(enqueue_counts)
random.shuffle(shuffled_counts)
dequeue_ops = [q.dequeue_many(count) for count in shuffled_counts]
all_enqueued_values = np.hstack(enqueue_values)
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
dequeued = []
def dequeue(dequeue_op):
(dequeue_indices, dequeue_values) = sess.run(dequeue_op)
self.assertAllEqual(dequeue_indices, dequeue_values)
dequeued.extend(dequeue_indices)
enqueue_threads = [
self.checkedThread(
target=enqueue, args=(op,)) for op in enqueue_ops
]
dequeue_threads = [
self.checkedThread(
target=dequeue, args=(op,)) for op in dequeue_ops
]
# Dequeue and check
for t in dequeue_threads:
t.start()
for t in enqueue_threads:
t.start()
for t in enqueue_threads:
t.join()
for t in dequeue_threads:
t.join()
self.assertAllEqual(sorted(dequeued), sorted(all_enqueued_values))
def testRoundTripInsertManyMultiThreadedReadManyMultithreadedSorts(self):
with self.test_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))
num_threads = 40
enqueue_counts = np.random.randint(10, size=num_threads)
enqueue_values = [
np.random.randint(
5, size=count) for count in enqueue_counts
]
enqueue_ops = [
q.enqueue_many((values, values)) for values in enqueue_values
]
shuffled_counts = copy.deepcopy(enqueue_counts)
random.shuffle(shuffled_counts)
dequeue_ops = [q.dequeue_many(count) for count in shuffled_counts]
all_enqueued_values = np.hstack(enqueue_values)
dequeue_wait = threading.Condition()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
def dequeue(dequeue_op, dequeued):
(dequeue_indices, dequeue_values) = sess.run(dequeue_op)
self.assertAllEqual(dequeue_indices, dequeue_values)
dequeue_wait.acquire()
dequeued.extend(dequeue_indices)
dequeue_wait.release()
dequeued = []
enqueue_threads = [
self.checkedThread(
target=enqueue, args=(op,)) for op in enqueue_ops
]
dequeue_threads = [
self.checkedThread(
target=dequeue, args=(op, dequeued)) for op in dequeue_ops
]
for t in enqueue_threads:
t.start()
for t in enqueue_threads:
t.join()
# Dequeue and check
for t in dequeue_threads:
t.start()
for t in dequeue_threads:
t.join()
# We can't guarantee full sorting because we can't guarantee
# that the dequeued.extend() call runs immediately after the
# sess.run() call. Here we're just happy everything came out.
self.assertAllEqual(set(dequeued), set(all_enqueued_values))
def testRoundTripInsertManyMultiThreadedReadOnceSorts(self):
with self.test_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
(), ()))
elem = np.random.randint(-5, 5, size=100).astype(np.int64)
side_value_0 = np.random.rand(100).astype(bytes)
side_value_1 = np.random.rand(100).astype(bytes)
batch = 5
enqueue_ops = [
q.enqueue_many((elem[i * batch:(i + 1) * batch],
side_value_0[i * batch:(i + 1) * batch],
side_value_1[i * batch:(i + 1) * batch]))
for i in range(20)
]
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
dequeue_op = q.dequeue_many(100)
enqueue_threads = [
self.checkedThread(
target=enqueue, args=(op,)) for op in enqueue_ops
]
for t in enqueue_threads:
t.start()
deq_elem, deq_value_0, deq_value_1 = sess.run(dequeue_op)
for t in enqueue_threads:
t.join()
allowed = {}
missed = set()
for e, v0, v1 in zip(elem, side_value_0, side_value_1):
if e not in allowed:
allowed[e] = set()
allowed[e].add((v0, v1))
missed.add((v0, v1))
self.assertAllEqual(deq_elem, sorted(elem))
for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):
self.assertTrue((dv0, dv1) in allowed[e])
missed.remove((dv0, dv1))
self.assertEqual(missed, set())
def testRoundTripInsertOnceReadOnceSorts(self):
with self.test_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
(), ()))
elem = np.random.randint(-100, 100, size=1000).astype(np.int64)
side_value_0 = np.random.rand(1000).astype(bytes)
side_value_1 = np.random.rand(1000).astype(bytes)
q.enqueue_many((elem, side_value_0, side_value_1)).run()
deq = q.dequeue_many(1000)
deq_elem, deq_value_0, deq_value_1 = sess.run(deq)
allowed = {}
for e, v0, v1 in zip(elem, side_value_0, side_value_1):
if e not in allowed:
allowed[e] = set()
allowed[e].add((v0, v1))
self.assertAllEqual(deq_elem, sorted(elem))
for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):
self.assertTrue((dv0, dv1) in allowed[e])
def testRoundTripInsertOnceReadManySorts(self):
with self.test_session():
q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))
elem = np.random.randint(-100, 100, size=1000).astype(np.int64)
q.enqueue_many((elem, elem)).run()
deq_values = np.hstack((q.dequeue_many(100)[0].eval() for _ in range(10)))
self.assertAllEqual(deq_values, sorted(elem))
def testRoundTripInsertOnceReadOnceLotsSorts(self):
with self.test_session():
q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))
elem = np.random.randint(-100, 100, size=1000).astype(np.int64)
q.enqueue_many((elem, elem)).run()
dequeue_op = q.dequeue()
deq_values = np.hstack(dequeue_op[0].eval() for _ in range(1000))
self.assertAllEqual(deq_values, sorted(elem))
def testInsertingNonInt64Fails(self):
with self.test_session():
q = data_flow_ops.PriorityQueue(2000, (dtypes.string), (()))
with self.assertRaises(TypeError):
q.enqueue_many((["a", "b", "c"], ["a", "b", "c"])).run()
def testInsertingNonScalarFails(self):
with self.test_session() as sess:
input_priority = array_ops.placeholder(dtypes.int64)
input_other = array_ops.placeholder(dtypes.string)
q = data_flow_ops.PriorityQueue(2000, (dtypes.string,), (()))
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
r"Shape mismatch in tuple component 0. Expected \[\], got \[2\]"):
sess.run([q.enqueue((input_priority, input_other))],
feed_dict={
input_priority: np.array(
[0, 2], dtype=np.int64),
input_other: np.random.rand(3, 5).astype(bytes)
})
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
r"Shape mismatch in tuple component 0. Expected \[2\], got \[2,2\]"):
sess.run(
[q.enqueue_many((input_priority, input_other))],
feed_dict={
input_priority: np.array(
[[0, 2], [3, 4]], dtype=np.int64),
input_other: np.random.rand(2, 3).astype(bytes)
})
if __name__ == "__main__":
test.main()
|
bsmrstu-warriors/Moytri---The-Drone-Aider | refs/heads/master | ExtLibs/Mavlink/pymavlink/generator/lib/genxmlif/xmlifElementTree.py | 7 | #
# genxmlif, Release 0.9.0
# file: xmlifElementTree.py
#
# XML interface class to elementtree toolkit by Fredrik Lundh
#
# history:
# 2005-04-25 rl created
# 2007-05-25 rl performance optimization (caching) added, some bugfixes
# 2007-06-29 rl complete re-design, ElementExtension class introduced
# 2008-07-01 rl Limited support of XInclude added
#
# Copyright (c) 2005-2008 by Roland Leuthe. All rights reserved.
#
# --------------------------------------------------------------------
# The generic XML interface is
#
# Copyright (c) 2005-2008 by Roland Leuthe
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
import sys
import string
import urllib
from xml.dom import EMPTY_NAMESPACE, XMLNS_NAMESPACE
from xml.parsers.expat import ExpatError
# from version 2.5 on the elementtree module is part of the standard python distribution
if sys.version_info[:2] >= (2,5):
from xml.etree.ElementTree import ElementTree, _ElementInterface, XMLTreeBuilder, TreeBuilder
from xml.etree import ElementInclude
else:
from elementtree.ElementTree import ElementTree, _ElementInterface, XMLTreeBuilder, TreeBuilder
from elementtree import ElementInclude
from ..genxmlif import XMLIF_ELEMENTTREE, GenXmlIfError
from xmlifUtils import convertToAbsUrl, processWhitespaceAction, collapseString, toClarkQName, splitQName
from xmlifBase import XmlIfBuilderExtensionBase
from xmlifApi import XmlInterfaceBase
#########################################################
# Derived interface class for elementtree toolkit
class XmlInterfaceElementTree (XmlInterfaceBase):
#####################################################
# for description of the interface methods see xmlifbase.py
#####################################################
def __init__ (self, verbose, useCaching, processXInclude):
XmlInterfaceBase.__init__ (self, verbose, useCaching, processXInclude)
self.xmlIfType = XMLIF_ELEMENTTREE
if self.verbose:
print "Using elementtree interface module..."
def createXmlTree (self, namespace, xmlRootTagName, attributeDict={}, publicId=None, systemId=None):
rootNode = ElementExtension(toClarkQName(xmlRootTagName), attributeDict)
rootNode.xmlIfExtSetParentNode(None)
treeWrapper = self.treeWrapperClass(self, ElementTreeExtension(rootNode), self.useCaching)
rootNodeWrapper = self.elementWrapperClass (rootNode, treeWrapper, []) # TODO: namespace handling
return treeWrapper
def parse (self, file, baseUrl="", ownerDoc=None):
absUrl = convertToAbsUrl (file, baseUrl)
fp = urllib.urlopen (absUrl)
try:
tree = ElementTreeExtension()
treeWrapper = self.treeWrapperClass(self, tree, self.useCaching)
parser = ExtXMLTreeBuilder(file, absUrl, self, treeWrapper)
treeWrapper.getTree().parse(fp, parser)
fp.close()
# XInclude support
if self.processXInclude:
loaderInst = ExtXIncludeLoader (self.parse, absUrl, ownerDoc)
try:
ElementInclude.include(treeWrapper.getTree().getroot(), loaderInst.loader)
except IOError, errInst:
raise GenXmlIfError, "%s: IOError: %s" %(file, str(errInst))
except ExpatError, errstr:
fp.close()
raise GenXmlIfError, "%s: ExpatError: %s" %(file, str(errstr))
except ElementInclude.FatalIncludeError, errInst:
fp.close()
raise GenXmlIfError, "%s: XIncludeError: %s" %(file, str(errInst))
return treeWrapper
def parseString (self, text, baseUrl="", ownerDoc=None):
absUrl = convertToAbsUrl ("", baseUrl)
tree = ElementTreeExtension()
treeWrapper = self.treeWrapperClass(self, tree, self.useCaching)
parser = ExtXMLTreeBuilder("", absUrl, self, treeWrapper)
parser.feed(text)
treeWrapper.getTree()._setroot(parser.close())
# XInclude support
if self.processXInclude:
loaderInst = ExtXIncludeLoader (self.parse, absUrl, ownerDoc)
ElementInclude.include(treeWrapper.getTree().getroot(), loaderInst.loader)
return treeWrapper
#########################################################
# Extension (derived) class for ElementTree class
class ElementTreeExtension (ElementTree):
def xmlIfExtGetRootNode (self):
return self.getroot()
def xmlIfExtCreateElement (self, nsName, attributeDict, curNs):
clarkQName = toClarkQName(nsName)
return ElementExtension (clarkQName, attributeDict)
def xmlIfExtCloneTree (self, rootElementCopy):
return self.__class__(element=rootElementCopy)
#########################################################
# Wrapper class for Element class
class ElementExtension (_ElementInterface):
def __init__ (self, xmlRootTagName, attributeDict):
_ElementInterface.__init__(self, xmlRootTagName, attributeDict)
def xmlIfExtUnlink (self):
self.xmlIfExtElementWrapper = None
self.__xmlIfExtParentElement = None
def xmlIfExtCloneNode (self):
nodeCopy = self.__class__(self.tag, self.attrib.copy())
nodeCopy.text = self.text
nodeCopy.tail = self.tail
return nodeCopy
def xmlIfExtGetTagName (self):
return self.tag
def xmlIfExtGetNamespaceURI (self):
prefix, localName = splitQName(self.tag)
return prefix
def xmlIfExtGetParentNode (self):
return self.__xmlIfExtParentElement
def xmlIfExtSetParentNode (self, parentElement):
self.__xmlIfExtParentElement = parentElement
def xmlIfExtGetChildren (self, filterTag=None):
if filterTag == None:
return self.getchildren()
else:
clarkFilterTag = toClarkQName(filterTag)
return self.findall(clarkFilterTag)
def xmlIfExtGetFirstChild (self, filterTag=None):
# replace base method (performance optimized)
if filterTag == None:
children = self.getchildren()
if children != []:
element = children[0]
else:
element = None
else:
clarkFilterTag = toClarkQName(filterTag)
element = self.find(clarkFilterTag)
return element
def xmlIfExtGetElementsByTagName (self, filterTag=(None,None)):
clarkFilterTag = toClarkQName(filterTag)
descendants = []
for node in self.xmlIfExtGetChildren():
descendants.extend(node.getiterator(clarkFilterTag))
return descendants
def xmlIfExtGetIterator (self, filterTag=(None,None)):
clarkFilterTag = toClarkQName(filterTag)
return self.getiterator (clarkFilterTag)
def xmlIfExtAppendChild (self, childElement):
self.append (childElement)
childElement.xmlIfExtSetParentNode(self)
def xmlIfExtInsertBefore (self, childElement, refChildElement):
self.insert (self.getchildren().index(refChildElement), childElement)
childElement.xmlIfExtSetParentNode(self)
def xmlIfExtRemoveChild (self, childElement):
self.remove (childElement)
def xmlIfExtInsertSubtree (self, refChildElement, subTree, insertSubTreeRootNode):
if refChildElement != None:
insertIndex = self.getchildren().index (refChildElement)
else:
insertIndex = 0
if insertSubTreeRootNode:
elementList = [subTree.xmlIfExtGetRootNode(),]
else:
elementList = subTree.xmlIfExtGetRootNode().xmlIfExtGetChildren()
elementList.reverse()
for element in elementList:
self.insert (insertIndex, element)
element.xmlIfExtSetParentNode(self)
def xmlIfExtGetAttributeDict (self):
attrDict = {}
for attrName, attrValue in self.attrib.items():
namespaceEndIndex = string.find (attrName, '}')
if namespaceEndIndex != -1:
attrName = (attrName[1:namespaceEndIndex], attrName[namespaceEndIndex+1:])
else:
attrName = (EMPTY_NAMESPACE, attrName)
attrDict[attrName] = attrValue
return attrDict
def xmlIfExtGetAttribute (self, tupleOrAttrName):
clarkQName = toClarkQName(tupleOrAttrName)
if self.attrib.has_key(clarkQName):
return self.attrib[clarkQName]
else:
return None
def xmlIfExtSetAttribute (self, tupleOrAttrName, attributeValue, curNs):
self.attrib[toClarkQName(tupleOrAttrName)] = attributeValue
def xmlIfExtRemoveAttribute (self, tupleOrAttrName):
clarkQName = toClarkQName(tupleOrAttrName)
if self.attrib.has_key(clarkQName):
del self.attrib[clarkQName]
def xmlIfExtGetElementValueFragments (self, ignoreEmtpyStringFragments):
elementValueList = []
if self.text != None:
elementValueList.append(self.text)
for child in self.getchildren():
if child.tail != None:
elementValueList.append(child.tail)
if ignoreEmtpyStringFragments:
elementValueList = filter (lambda s: collapseString(s) != "", elementValueList)
if elementValueList == []:
elementValueList = ["",]
return elementValueList
def xmlIfExtGetElementText (self):
if self.text != None:
return self.text
else:
return ""
def xmlIfExtGetElementTailText (self):
if self.tail != None:
return self.tail
else:
return ""
def xmlIfExtSetElementValue (self, elementValue):
self.text = elementValue
for child in self.getchildren():
child.tail = None
def xmlIfExtProcessWsElementValue (self, wsAction):
noOfTextFragments = reduce(lambda sum, child: sum + (child.tail != None), self.getchildren(), 0)
noOfTextFragments += (self.text != None)
rstrip = 0
lstrip = 1
if self.text != None:
if noOfTextFragments == 1:
rstrip = 1
self.text = processWhitespaceAction (self.text, wsAction, lstrip, rstrip)
noOfTextFragments -= 1
lstrip = 0
for child in self.getchildren():
if child.tail != None:
if noOfTextFragments == 1:
rstrip = 1
child.tail = processWhitespaceAction (child.tail, wsAction, lstrip, rstrip)
noOfTextFragments -= 1
lstrip = 0
###################################################
# Element tree builder class derived from XMLTreeBuilder
# extended to store related line numbers in the Element object
class ExtXMLTreeBuilder (XMLTreeBuilder, XmlIfBuilderExtensionBase):
def __init__(self, filePath, absUrl, xmlIf, treeWrapper):
XMLTreeBuilder.__init__(self, target=TreeBuilder(element_factory=ElementExtension))
self._parser.StartNamespaceDeclHandler = self._start_ns
self._parser.EndNamespaceDeclHandler = self._end_ns
self.namespaces = []
XmlIfBuilderExtensionBase.__init__(self, filePath, absUrl, treeWrapper, xmlIf.elementWrapperClass)
def _start(self, tag, attrib_in):
elem = XMLTreeBuilder._start(self, tag, attrib_in)
self.start(elem)
def _start_list(self, tag, attrib_in):
elem = XMLTreeBuilder._start_list(self, tag, attrib_in)
self.start(elem, attrib_in)
def _end(self, tag):
elem = XMLTreeBuilder._end(self, tag)
self.end(elem)
def _start_ns(self, prefix, value):
self.namespaces.insert(0, (prefix, value))
def _end_ns(self, prefix):
assert self.namespaces.pop(0)[0] == prefix, "implementation confused"
def start(self, element, attributes):
# bugfix for missing start '{'
for i in range (0, len(attributes), 2):
attrName = attributes[i]
namespaceEndIndex = string.find (attrName, '}')
if namespaceEndIndex != -1 and attrName[0] != "{":
attributes[i] = '{' + attributes[i]
# bugfix end
XmlIfBuilderExtensionBase.startElementHandler (self, element, self._parser.ErrorLineNumber, self.namespaces[:], attributes)
if len(self._target._elem) > 1:
element.xmlIfExtSetParentNode (self._target._elem[-2])
else:
for namespace in self.namespaces:
if namespace[1] != None:
element.xmlIfExtElementWrapper.setAttribute((XMLNS_NAMESPACE, namespace[0]), namespace[1])
def end(self, element):
XmlIfBuilderExtensionBase.endElementHandler (self, element, self._parser.ErrorLineNumber)
###################################################
# XInclude loader
#
class ExtXIncludeLoader:
def __init__(self, parser, baseUrl, ownerDoc):
self.parser = parser
self.baseUrl = baseUrl
self.ownerDoc = ownerDoc
def loader(self, href, parse, encoding=None):
if parse == "xml":
data = self.parser(href, self.baseUrl, self.ownerDoc).getTree().getroot()
else:
absUrl = convertToAbsUrl (href, self.baseUrl)
fp = urllib.urlopen (absUrl)
data = fp.read()
if encoding:
data = data.decode(encoding)
fp.close()
return data
|
ArcherSys/ArcherSys | refs/heads/master | Lib/site-packages/django/contrib/admin/utils.py | 66 | from __future__ import unicode_literals
import datetime
import decimal
from collections import defaultdict
from django.contrib.auth import get_permission_codename
from django.core.exceptions import FieldDoesNotExist
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.forms.forms import pretty_name
from django.utils import formats, six, timezone
from django.utils.encoding import force_str, force_text, smart_text
from django.utils.html import format_html
from django.utils.text import capfirst
from django.utils.translation import ungettext
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
field_name = lookup_path.split('__', 1)[0]
field = opts.get_field(field_name)
if hasattr(field, 'get_path_info') and any(path.m2m for path in field.get_path_info()):
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and the string literals 'false' and '0'
if key.endswith('__isnull'):
if value.lower() in ('', 'false', '0'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' and similarly problematic characters.
Similar to urllib.quote, except that the quoting is slightly different so
that it doesn't get automatically unquoted by the Web browser.
"""
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"[]<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten(fields):
"""Returns a list which is a single level of flattening of the
original list."""
flat = []
for field in fields:
if isinstance(field, (list, tuple)):
flat.extend(field)
else:
flat.append(field)
return flat
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
field_names.extend(
flatten(opts['fields'])
)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
no_edit_link = '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
if has_admin:
try:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.model_name),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
# Change url doesn't exist -- don't display link to edit
return no_edit_link
p = '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{}: <a href="{}">{}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return no_edit_link
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, collector.model_count, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
self.model_count = defaultdict(int)
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source=None, source_attr=None, **kwargs):
for obj in objs:
if source_attr and not source_attr.endswith('+'):
related_name = source_attr % {
'class': source._meta.model_name,
'app_label': source._meta.app_label,
}
self.add_edge(getattr(obj, related_name), obj)
else:
self.add_edge(None, obj)
self.model_count[obj._meta.verbose_name_plural] += 1
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
"""
We always want to load the objects into memory so that we can display
them to the user in confirm page.
"""
return False
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = _get_non_gfk_field(opts, name)
except FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and
hasattr(model_admin, name) and
not name == '__str__' and
not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def _get_non_gfk_field(opts, name):
"""
For historical reasons, the admin app relies on GenericForeignKeys as being
"not found" by get_field(). This could likely be cleaned up.
"""
field = opts.get_field(name)
if field.is_relation and field.many_to_one and not field.related_model:
raise FieldDoesNotExist()
return field
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable,
property (but not created with @property decorator) or the name of an
object's attribute, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = _get_non_gfk_field(model._meta, name)
try:
label = field.verbose_name
except AttributeError:
# field is likely a ForeignObjectRel
label = field.related_model._meta.verbose_name
except FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif (isinstance(attr, property) and
hasattr(attr, "fget") and
hasattr(attr.fget, "short_description")):
label = attr.fget.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
help_text = ""
try:
field = _get_non_gfk_field(model._meta, name)
except FieldDoesNotExist:
pass
else:
if hasattr(field, 'help_text'):
help_text = field.help_text
return smart_text(help_text)
def display_for_field(value, field):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, models.FloatField):
return formats.number_format(value)
elif isinstance(field, models.FileField) and value:
return format_html('<a href="{}">{}</a>', value.url, value)
else:
return smart_text(value)
def display_for_value(value, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if boolean:
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, six.integer_types + (decimal.Decimal, float)):
return formats.number_format(value)
else:
return smart_text(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if hasattr(field, 'get_path_info'):
return field.get_path_info()[-1].to_opts.model
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field = parent._meta.get_field(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces) - 1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
# Field should point to another model
if field.is_relation and not (field.auto_created and not field.concrete):
related_name = field.related_query_name()
parent = field.rel.to
else:
related_name = field.field.name
parent = field.related_model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field(piece))
return fields
def remove_trailing_data_field(fields):
""" Discard trailing non-relation field if extant. """
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
def get_limit_choices_to_from_path(model, path):
""" Return Q object for limiting choices if applicable.
If final model in path is linked via a ForeignKey or ManyToManyField which
has a ``limit_choices_to`` attribute, return it as a Q object.
"""
fields = get_fields_from_path(model, path)
fields = remove_trailing_data_field(fields)
get_limit_choices_to = (
fields and hasattr(fields[-1], 'rel') and
getattr(fields[-1].rel, 'get_limit_choices_to', None))
if not get_limit_choices_to:
return models.Q() # empty Q
limit_choices_to = get_limit_choices_to()
if isinstance(limit_choices_to, models.Q):
return limit_choices_to # already a Q
else:
return models.Q(**limit_choices_to) # convert dict to Q
|
kapilrastogi/Impala | refs/heads/cdh5-trunk | shell/ext-py/sqlparse-0.1.14/tests/test_filters.py | 45 | '''
Created on 24/03/2012
@author: piranna
'''
import unittest
from sqlparse.filters import StripWhitespace, Tokens2Unicode
from sqlparse.lexer import tokenize
class Test__StripWhitespace(unittest.TestCase):
sql = """INSERT INTO dir_entries(type)VALUES(:type);
INSERT INTO directories(inode)
VALUES(:inode)
LIMIT 1"""
sql2 = """SELECT child_entry,asdf AS inode, creation
FROM links
WHERE parent_dir == :parent_dir AND name == :name
LIMIT 1"""
sql3 = """SELECT
0 AS st_dev,
0 AS st_uid,
0 AS st_gid,
dir_entries.type AS st_mode,
dir_entries.inode AS st_ino,
COUNT(links.child_entry) AS st_nlink,
:creation AS st_ctime,
dir_entries.access AS st_atime,
dir_entries.modification AS st_mtime,
COALESCE(files.size,0) AS st_size,
COALESCE(files.size,0) AS size
FROM dir_entries
LEFT JOIN files
ON dir_entries.inode == files.inode
LEFT JOIN links
ON dir_entries.inode == links.child_entry
WHERE dir_entries.inode == :inode
GROUP BY dir_entries.inode
LIMIT 1"""
def test_StripWhitespace1(self):
self.assertEqual(
Tokens2Unicode(StripWhitespace(tokenize(self.sql))),
'INSERT INTO dir_entries(type)VALUES(:type);INSERT INTO '
'directories(inode)VALUES(:inode)LIMIT 1')
def test_StripWhitespace2(self):
self.assertEqual(
Tokens2Unicode(StripWhitespace(tokenize(self.sql2))),
'SELECT child_entry,asdf AS inode,creation FROM links WHERE '
'parent_dir==:parent_dir AND name==:name LIMIT 1')
def test_StripWhitespace3(self):
self.assertEqual(
Tokens2Unicode(StripWhitespace(tokenize(self.sql3))),
'SELECT 0 AS st_dev,0 AS st_uid,0 AS st_gid,dir_entries.type AS '
'st_mode,dir_entries.inode AS st_ino,COUNT(links.child_entry)AS '
'st_nlink,:creation AS st_ctime,dir_entries.access AS st_atime,'
'dir_entries.modification AS st_mtime,COALESCE(files.size,0)AS '
'st_size,COALESCE(files.size,0)AS size FROM dir_entries LEFT JOIN'
' files ON dir_entries.inode==files.inode LEFT JOIN links ON '
'dir_entries.inode==links.child_entry WHERE dir_entries.inode=='
':inode GROUP BY dir_entries.inode LIMIT 1')
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
n0max/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/wptserve/wptserve/request.py | 44 | import base64
import cgi
import Cookie
import StringIO
import tempfile
from six.moves.urllib.parse import parse_qsl, urlsplit
from . import stash
from .utils import HTTPException
missing = object()
class Server(object):
"""Data about the server environment
.. attribute:: config
Environment configuration information with information about the
various servers running, their hostnames and ports.
.. attribute:: stash
Stash object holding state stored on the server between requests.
"""
config = None
def __init__(self, request):
self._stash = None
self._request = request
@property
def stash(self):
if self._stash is None:
address, authkey = stash.load_env_config()
self._stash = stash.Stash(self._request.url_parts.path, address, authkey)
return self._stash
class InputFile(object):
max_buffer_size = 1024*1024
def __init__(self, rfile, length):
"""File-like object used to provide a seekable view of request body data"""
self._file = rfile
self.length = length
self._file_position = 0
if length > self.max_buffer_size:
self._buf = tempfile.TemporaryFile(mode="rw+b")
else:
self._buf = StringIO.StringIO()
@property
def _buf_position(self):
rv = self._buf.tell()
assert rv <= self._file_position
return rv
def read(self, bytes=-1):
assert self._buf_position <= self._file_position
if bytes < 0:
bytes = self.length - self._buf_position
bytes_remaining = min(bytes, self.length - self._buf_position)
if bytes_remaining == 0:
return ""
if self._buf_position != self._file_position:
buf_bytes = min(bytes_remaining, self._file_position - self._buf_position)
old_data = self._buf.read(buf_bytes)
bytes_remaining -= buf_bytes
else:
old_data = ""
assert self._buf_position == self._file_position, (
"Before reading buffer position (%i) didn't match file position (%i)" %
(self._buf_position, self._file_position))
new_data = self._file.read(bytes_remaining)
self._buf.write(new_data)
self._file_position += bytes_remaining
assert self._buf_position == self._file_position, (
"After reading buffer position (%i) didn't match file position (%i)" %
(self._buf_position, self._file_position))
return old_data + new_data
def tell(self):
return self._buf_position
def seek(self, offset):
if offset > self.length or offset < 0:
raise ValueError
if offset <= self._file_position:
self._buf.seek(offset)
else:
self.read(offset - self._file_position)
def readline(self, max_bytes=None):
if max_bytes is None:
max_bytes = self.length - self._buf_position
if self._buf_position < self._file_position:
data = self._buf.readline(max_bytes)
if data.endswith("\n") or len(data) == max_bytes:
return data
else:
data = ""
assert self._buf_position == self._file_position
initial_position = self._file_position
found = False
buf = []
max_bytes -= len(data)
while not found:
readahead = self.read(min(2, max_bytes))
max_bytes -= len(readahead)
for i, c in enumerate(readahead):
if c == "\n":
buf.append(readahead[:i+1])
found = True
break
if not found:
buf.append(readahead)
if not readahead or not max_bytes:
break
new_data = "".join(buf)
data += new_data
self.seek(initial_position + len(new_data))
return data
def readlines(self):
rv = []
while True:
data = self.readline()
if data:
rv.append(data)
else:
break
return rv
def next(self):
data = self.readline()
if data:
return data
else:
raise StopIteration
def __iter__(self):
return self
class Request(object):
"""Object representing a HTTP request.
.. attribute:: doc_root
The local directory to use as a base when resolving paths
.. attribute:: route_match
Regexp match object from matching the request path to the route
selected for the request.
.. attribute:: protocol_version
HTTP version specified in the request.
.. attribute:: method
HTTP method in the request.
.. attribute:: request_path
Request path as it appears in the HTTP request.
.. attribute:: url_base
The prefix part of the path; typically / unless the handler has a url_base set
.. attribute:: url
Absolute URL for the request.
.. attribute:: url_parts
Parts of the requested URL as obtained by urlparse.urlsplit(path)
.. attribute:: request_line
Raw request line
.. attribute:: headers
RequestHeaders object providing a dictionary-like representation of
the request headers.
.. attribute:: raw_headers.
Dictionary of non-normalized request headers.
.. attribute:: body
Request body as a string
.. attribute:: raw_input
File-like object representing the body of the request.
.. attribute:: GET
MultiDict representing the parameters supplied with the request.
Note that these may be present on non-GET requests; the name is
chosen to be familiar to users of other systems such as PHP.
.. attribute:: POST
MultiDict representing the request body parameters. Most parameters
are present as string values, but file uploads have file-like
values.
.. attribute:: cookies
Cookies object representing cookies sent with the request with a
dictionary-like interface.
.. attribute:: auth
Object with username and password properties representing any
credentials supplied using HTTP authentication.
.. attribute:: server
Server object containing information about the server environment.
"""
def __init__(self, request_handler):
self.doc_root = request_handler.server.router.doc_root
self.route_match = None # Set by the router
self.protocol_version = request_handler.protocol_version
self.method = request_handler.command
scheme = request_handler.server.scheme
host = request_handler.headers.get("Host")
port = request_handler.server.server_address[1]
if host is None:
host = request_handler.server.server_address[0]
else:
if ":" in host:
host, port = host.split(":", 1)
self.request_path = request_handler.path
self.url_base = "/"
if self.request_path.startswith(scheme + "://"):
self.url = request_handler.path
else:
self.url = "%s://%s:%s%s" % (scheme,
host,
port,
self.request_path)
self.url_parts = urlsplit(self.url)
self.raw_headers = request_handler.headers
self.request_line = request_handler.raw_requestline
self._headers = None
self.raw_input = InputFile(request_handler.rfile,
int(self.headers.get("Content-Length", 0)))
self._body = None
self._GET = None
self._POST = None
self._cookies = None
self._auth = None
self.server = Server(self)
def __repr__(self):
return "<Request %s %s>" % (self.method, self.url)
@property
def GET(self):
if self._GET is None:
params = parse_qsl(self.url_parts.query, keep_blank_values=True)
self._GET = MultiDict()
for key, value in params:
self._GET.add(key, value)
return self._GET
@property
def POST(self):
if self._POST is None:
#Work out the post parameters
pos = self.raw_input.tell()
self.raw_input.seek(0)
fs = cgi.FieldStorage(fp=self.raw_input,
environ={"REQUEST_METHOD": self.method},
headers=self.headers,
keep_blank_values=True)
self._POST = MultiDict.from_field_storage(fs)
self.raw_input.seek(pos)
return self._POST
@property
def cookies(self):
if self._cookies is None:
parser = Cookie.BaseCookie()
cookie_headers = self.headers.get("cookie", "")
parser.load(cookie_headers)
cookies = Cookies()
for key, value in parser.iteritems():
cookies[key] = CookieValue(value)
self._cookies = cookies
return self._cookies
@property
def headers(self):
if self._headers is None:
self._headers = RequestHeaders(self.raw_headers)
return self._headers
@property
def body(self):
if self._body is None:
pos = self.raw_input.tell()
self.raw_input.seek(0)
self._body = self.raw_input.read()
self.raw_input.seek(pos)
return self._body
@property
def auth(self):
if self._auth is None:
self._auth = Authentication(self.headers)
return self._auth
class RequestHeaders(dict):
"""Dictionary-like API for accessing request headers."""
def __init__(self, items):
for key, value in zip(items.keys(), items.values()):
key = key.lower()
if key in self:
self[key].append(value)
else:
dict.__setitem__(self, key, [value])
def __getitem__(self, key):
"""Get all headers of a certain (case-insensitive) name. If there is
more than one, the values are returned comma separated"""
values = dict.__getitem__(self, key.lower())
if len(values) == 1:
return values[0]
else:
return ", ".join(values)
def __setitem__(self, name, value):
raise Exception
def get(self, key, default=None):
"""Get a string representing all headers with a particular value,
with multiple headers separated by a comma. If no header is found
return a default value
:param key: The header name to look up (case-insensitive)
:param default: The value to return in the case of no match
"""
try:
return self[key]
except KeyError:
return default
def get_list(self, key, default=missing):
"""Get all the header values for a particular field name as
a list"""
try:
return dict.__getitem__(self, key.lower())
except KeyError:
if default is not missing:
return default
else:
raise
def __contains__(self, key):
return dict.__contains__(self, key.lower())
def iteritems(self):
for item in self:
yield item, self[item]
def itervalues(self):
for item in self:
yield self[item]
class CookieValue(object):
"""Representation of cookies.
Note that cookies are considered read-only and the string value
of the cookie will not change if you update the field values.
However this is not enforced.
.. attribute:: key
The name of the cookie.
.. attribute:: value
The value of the cookie
.. attribute:: expires
The expiry date of the cookie
.. attribute:: path
The path of the cookie
.. attribute:: comment
The comment of the cookie.
.. attribute:: domain
The domain with which the cookie is associated
.. attribute:: max_age
The max-age value of the cookie.
.. attribute:: secure
Whether the cookie is marked as secure
.. attribute:: httponly
Whether the cookie is marked as httponly
"""
def __init__(self, morsel):
self.key = morsel.key
self.value = morsel.value
for attr in ["expires", "path",
"comment", "domain", "max-age",
"secure", "version", "httponly"]:
setattr(self, attr.replace("-", "_"), morsel[attr])
self._str = morsel.OutputString()
def __str__(self):
return self._str
def __repr__(self):
return self._str
def __eq__(self, other):
"""Equality comparison for cookies. Compares to other cookies
based on value alone and on non-cookies based on the equality
of self.value with the other object so that a cookie with value
"ham" compares equal to the string "ham"
"""
if hasattr(other, "value"):
return self.value == other.value
return self.value == other
class MultiDict(dict):
"""Dictionary type that holds multiple values for each
key"""
#TODO: this should perhaps also order the keys
def __init__(self):
pass
def __setitem__(self, name, value):
dict.__setitem__(self, name, [value])
def add(self, name, value):
if name in self:
dict.__getitem__(self, name).append(value)
else:
dict.__setitem__(self, name, [value])
def __getitem__(self, key):
"""Get the first value with a given key"""
#TODO: should this instead be the last value?
return self.first(key)
def first(self, key, default=missing):
"""Get the first value with a given key
:param key: The key to lookup
:param default: The default to return if key is
not found (throws if nothing is
specified)
"""
if key in self and dict.__getitem__(self, key):
return dict.__getitem__(self, key)[0]
elif default is not missing:
return default
raise KeyError
def last(self, key, default=missing):
"""Get the last value with a given key
:param key: The key to lookup
:param default: The default to return if key is
not found (throws if nothing is
specified)
"""
if key in self and dict.__getitem__(self, key):
return dict.__getitem__(self, key)[-1]
elif default is not missing:
return default
raise KeyError
def get_list(self, key):
"""Get all values with a given key as a list
:param key: The key to lookup
"""
return dict.__getitem__(self, key)
@classmethod
def from_field_storage(cls, fs):
self = cls()
if fs.list is None:
return self
for key in fs:
values = fs[key]
if not isinstance(values, list):
values = [values]
for value in values:
if value.filename:
value = value
else:
value = value.value
self.add(key, value)
return self
class Cookies(MultiDict):
"""MultiDict specialised for Cookie values"""
def __init__(self):
pass
def __getitem__(self, key):
return self.last(key)
class Authentication(object):
"""Object for dealing with HTTP Authentication
.. attribute:: username
The username supplied in the HTTP Authorization
header, or None
.. attribute:: password
The password supplied in the HTTP Authorization
header, or None
"""
def __init__(self, headers):
self.username = None
self.password = None
auth_schemes = {"Basic": self.decode_basic}
if "authorization" in headers:
header = headers.get("authorization")
auth_type, data = header.split(" ", 1)
if auth_type in auth_schemes:
self.username, self.password = auth_schemes[auth_type](data)
else:
raise HTTPException(400, "Unsupported authentication scheme %s" % auth_type)
def decode_basic(self, data):
decoded_data = base64.decodestring(data)
return decoded_data.split(":", 1)
|
rochacbruno/flask-admin | refs/heads/master | flask_admin/contrib/pymongo/view.py | 12 | import logging
import pymongo
from bson import ObjectId
from bson.errors import InvalidId
from flask import flash
from flask_admin._compat import string_types
from flask_admin.babel import gettext, ngettext, lazy_gettext
from flask_admin.model import BaseModelView
from flask_admin.actions import action
from flask_admin.helpers import get_form_data
from .filters import BasePyMongoFilter
from .tools import parse_like_term
# Set up logger
log = logging.getLogger("flask-admin.pymongo")
class ModelView(BaseModelView):
"""
MongoEngine model scaffolding.
"""
column_filters = None
"""
Collection of the column filters.
Should contain instances of
:class:`flask_admin.contrib.pymongo.filters.BasePyMongoFilter` classes.
Filters will be grouped by name when displayed in the drop-down.
For example::
from flask_admin.contrib.pymongo.filters import BooleanEqualFilter
class MyModelView(BaseModelView):
column_filters = (BooleanEqualFilter(column=User.name, name='Name'),)
or::
from flask_admin.contrib.pymongo.filters import BasePyMongoFilter
class FilterLastNameBrown(BasePyMongoFilter):
def apply(self, query, value):
if value == '1':
return query.filter(self.column == "Brown")
else:
return query.filter(self.column != "Brown")
def operation(self):
return 'is Brown'
class MyModelView(BaseModelView):
column_filters = [
FilterLastNameBrown(
column=User.last_name, name='Last Name',
options=(('1', 'Yes'), ('0', 'No'))
)
]
"""
def __init__(self, coll,
name=None, category=None, endpoint=None, url=None,
menu_class_name=None, menu_icon_type=None, menu_icon_value=None):
"""
Constructor
:param coll:
MongoDB collection object
:param name:
Display name
:param category:
Display category
:param endpoint:
Endpoint
:param url:
Custom URL
:param menu_class_name:
Optional class name for the menu item.
:param menu_icon_type:
Optional icon. Possible icon types:
- `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon
- `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon
- `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory
- `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL
:param menu_icon_value:
Icon glyph name or URL, depending on `menu_icon_type` setting
"""
self._search_fields = []
if name is None:
name = self._prettify_name(coll.name)
if endpoint is None:
endpoint = ('%sview' % coll.name).lower()
super(ModelView, self).__init__(None, name, category, endpoint, url,
menu_class_name=menu_class_name,
menu_icon_type=menu_icon_type,
menu_icon_value=menu_icon_value)
self.coll = coll
def scaffold_pk(self):
return '_id'
def get_pk_value(self, model):
"""
Return primary key value from the model instance
:param model:
Model instance
"""
return model.get('_id')
def scaffold_list_columns(self):
"""
Scaffold list columns
"""
raise NotImplementedError()
def scaffold_sortable_columns(self):
"""
Return sortable columns dictionary (name, field)
"""
return []
def init_search(self):
"""
Init search
"""
if self.column_searchable_list:
for p in self.column_searchable_list:
if not isinstance(p, string_types):
raise ValueError('Expected string')
# TODO: Validation?
self._search_fields.append(p)
return bool(self._search_fields)
def scaffold_filters(self, attr):
"""
Return filter object(s) for the field
:param name:
Either field name or field instance
"""
raise NotImplementedError()
def is_valid_filter(self, filter):
"""
Validate if it is valid MongoEngine filter
:param filter:
Filter object
"""
return isinstance(filter, BasePyMongoFilter)
def scaffold_form(self):
raise NotImplementedError()
def _get_field_value(self, model, name):
"""
Get unformatted field value from the model
"""
return model.get(name)
def _search(self, query, search_term):
values = search_term.split(' ')
queries = []
# Construct inner querie
for value in values:
if not value:
continue
regex = parse_like_term(value)
stmt = []
for field in self._search_fields:
stmt.append({field: {'$regex': regex}})
if stmt:
if len(stmt) == 1:
queries.append(stmt[0])
else:
queries.append({'$or': stmt})
# Construct final query
if queries:
if len(queries) == 1:
final = queries[0]
else:
final = {'$and': queries}
if query:
query = {'$and': [query, final]}
else:
query = final
return query
def get_list(self, page, sort_column, sort_desc, search, filters,
execute=True, page_size=None):
"""
Get list of objects from MongoEngine
:param page:
Page number
:param sort_column:
Sort column
:param sort_desc:
Sort descending
:param search:
Search criteria
:param filters:
List of applied fiters
:param execute:
Run query immediately or not
:param page_size:
Number of results. Defaults to ModelView's page_size. Can be
overriden to change the page_size limit. Removing the page_size
limit requires setting page_size to 0 or False.
"""
query = {}
# Filters
if self._filters:
data = []
for flt, flt_name, value in filters:
f = self._filters[flt]
data = f.apply(data, value)
if data:
if len(data) == 1:
query = data[0]
else:
query['$and'] = data
# Search
if self._search_supported and search:
query = self._search(query, search)
# Get count
count = self.coll.find(query).count() if not self.simple_list_pager else None
# Sorting
sort_by = None
if sort_column:
sort_by = [(sort_column, pymongo.DESCENDING if sort_desc else pymongo.ASCENDING)]
else:
order = self._get_default_order()
if order:
sort_by = [(order[0], pymongo.DESCENDING if order[1] else pymongo.ASCENDING)]
# Pagination
if page_size is None:
page_size = self.page_size
skip = 0
if page and page_size:
skip = page * page_size
results = self.coll.find(query, sort=sort_by, skip=skip, limit=page_size)
if execute:
results = list(results)
return count, results
def _get_valid_id(self, id):
try:
return ObjectId(id)
except InvalidId:
return id
def get_one(self, id):
"""
Return single model instance by ID
:param id:
Model ID
"""
return self.coll.find_one({'_id': self._get_valid_id(id)})
def edit_form(self, obj):
"""
Create edit form from the MongoDB document
"""
return self._edit_form_class(get_form_data(), **obj)
def create_model(self, form):
"""
Create model helper
:param form:
Form instance
"""
try:
model = form.data
self._on_model_change(form, model, True)
self.coll.insert(model)
except Exception as ex:
flash(gettext('Failed to create record. %(error)s', error=str(ex)),
'error')
log.exception('Failed to create record.')
return False
else:
self.after_model_change(form, model, True)
return model
def update_model(self, form, model):
"""
Update model helper
:param form:
Form instance
:param model:
Model instance to update
"""
try:
model.update(form.data)
self._on_model_change(form, model, False)
pk = self.get_pk_value(model)
self.coll.update({'_id': pk}, model)
except Exception as ex:
flash(gettext('Failed to update record. %(error)s', error=str(ex)),
'error')
log.exception('Failed to update record.')
return False
else:
self.after_model_change(form, model, False)
return True
def delete_model(self, model):
"""
Delete model helper
:param model:
Model instance
"""
try:
pk = self.get_pk_value(model)
if not pk:
raise ValueError('Document does not have _id')
self.on_model_delete(model)
self.coll.remove({'_id': pk})
except Exception as ex:
flash(gettext('Failed to delete record. %(error)s', error=str(ex)),
'error')
log.exception('Failed to delete record.')
return False
else:
self.after_model_delete(model)
return True
# Default model actions
def is_action_allowed(self, name):
# Check delete action permission
if name == 'delete' and not self.can_delete:
return False
return super(ModelView, self).is_action_allowed(name)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected records?'))
def action_delete(self, ids):
try:
count = 0
# TODO: Optimize me
for pk in ids:
if self.delete_model(self.get_one(pk)):
count += 1
flash(ngettext('Record was successfully deleted.',
'%(count)s records were successfully deleted.',
count,
count=count), 'success')
except Exception as ex:
flash(gettext('Failed to delete records. %(error)s', error=str(ex)), 'error')
|
cocoatomo/pygments-dmdl | refs/heads/master | dmdl/lexer.py | 1 | # -*- coding: utf-8 -*-
"""
Copyright 2016 cocoatomo
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pygments.lexer import RegexLexer, include, default
from pygments.token import Text, Whitespace, Keyword, Name, Literal, String, Number, Operator, Punctuation, Comment
def list_with_separator(rule_name, element_rule, element_type, separator_rule, separator_type):
sub_rule = '_following-' + rule_name
element_rule_name = '_element-of-' + rule_name
return {
rule_name: [
include('skip'),
(element_rule, element_type, ('#pop', sub_rule)),
],
sub_rule: [
include('skip'),
(separator_rule, separator_type, element_rule_name),
default('#pop'),
],
element_rule_name: [
include('skip'),
(element_rule, element_type, '#pop'),
],
}
class DmdlLexer(RegexLexer):
name = 'Dmdl'
aliases = ['dmdl']
filenames = ['*.dmdl']
import re
flags = re.MULTILINE | re.DOTALL
# regular expressions for tokens
# <name>:
# <first-word>
# <name> '_' <word>
# <first-word>:
# ['a'-'z'] ['a'-'z', '0'-'9']*
# <word>:
# ['a'-'z', '0'-'9']+
NAME = r'[a-z]([a-z0-9])*(_[a-z0-9]+)*'
PSEUDO_ELEMENT = r'<.+?>'
tokens = {
## lexing
'skip': [ # only for include
(r'[ \t\r\n]', Whitespace),
(r'/\*', Comment.Multiline, 'block-comment'),
(r'--.*?$', Comment.Singleline),
(r'//.*?$', Comment.Singleline),
(r'\.\.\.', Punctuation),
],
'block-comment': [
(r'\*/', Comment.Multiline, '#pop'),
(r'.', Comment.Multiline),
],
# <string-literal>:
# '"' <string-char>* '"'
# <string-char>:
# ~['"', '\']
# '\' ['b', 't', 'n', 'f', 'r', '\', '"']
# '\' 'u' ['0'-'9', 'A'-'F', 'a'-'f']{4}
# '\' '0' ['0'-'3']? ['0'-'7']? ['0'-'7']
'string-literal': [
(r'[^"\\]', String.Double),
(r'\\[btnfr\\"]', String.Double),
(r'\\0[0-3]?[0-7]?[0-7]', String.Double),
(r'"', String.Double, '#pop'),
],
# <type>:
# <basic-type>
# <reference-type>
# <collection-type>
'type': [
include('skip'),
(PSEUDO_ELEMENT, Keyword.Type, '#pop'),
include('basic-type'),
include('reference-type'),
include('collection-type'),
],
# <basic-type>:
# 'INT'
# 'LONG'
# 'BYTE'
# 'SHORT'
# 'DECIMAL'
# 'FLOAT'
# 'DOUBLE'
# 'TEXT'
# 'BOOLEAN'
# 'DATE'
# 'DATETIME'
'basic-type': [
(r'INT', Keyword.Type, '#pop'),
(r'LONG', Keyword.Type, '#pop'),
(r'BYTE', Keyword.Type, '#pop'),
(r'SHORT', Keyword.Type, '#pop'),
(r'DECIMAL', Keyword.Type, '#pop'),
(r'FLOAT', Keyword.Type, '#pop'),
(r'DOUBLE', Keyword.Type, '#pop'),
(r'TEXT', Keyword.Type, '#pop'),
(r'BOOLEAN', Keyword.Type, '#pop'),
# avoid a hasty decision
(r'DATETIME', Keyword.Type, '#pop'),
(r'DATE', Keyword.Type, '#pop'),
],
# <reference-type>:
# <name>
'reference-type': [
include('skip'),
(NAME, Keyword.Type, '#pop'),
],
# <collection-type>:
# '{' <type> '}'
# '{' ':' <type> '}'
'collection-type': [
include('skip'),
# '{', ':', '}' should be treated as a part of type?
(r'\{', Punctuation, ('#pop', 'collection-type-array-or-map')),
],
'collection-type-array-or-map': [
include('skip'),
(r':', Punctuation, ('#pop', 'closing-curly-brace', 'type')),
default(('#pop', 'closing-curly-brace', 'type')),
],
# <name>:
# <first-word>
# <name> '_' <word>
# <first-word>:
# ['a'-'z'] ['a'-'z', '0'-'9']*
# <word>:
# ['a'-'z', '0'-'9']+
'name': [
include('skip'),
(NAME, Name, '#pop'),
],
'name-or-pseudo-element': [
include('skip'),
(NAME, Name, '#pop'),
(PSEUDO_ELEMENT, Name, '#pop'),
],
# <literal>:
# <string>
# <integer>
# <decimal>
# <boolean>
'literal': [
include('skip'),
(r'"', String.Double, ('#pop', 'string-literal')),
include('decimal-literal'),
include('integer-literal'),
include('boolean-literal'),
],
# <integer-literal>:
# '0'
# ['1'-'9']['0'-'9']*
'integer-literal': [
(r'0', Number.Integer, '#pop'),
(r'[1-9][0-9]*', Number.Integer, '#pop'),
],
# <decimal-literal>:
# '.' ['0'-'9']+
# '0.' ['0'-'9']*
# ['1'-'9']['0'-'9']* '.' ['0'-'9']*
'decimal-literal': [
(r'\.[0-9]+', Number.Float, '#pop'),
(r'0\.[0-9]*', Number.Float, '#pop'),
(r'[1-9][0-9]*\.[0-9]*', Number.Float, '#pop'),
],
# <boolean-literal>:
# 'TRUE'
# 'FALSE'
'boolean-literal': [
(r'TRUE', Literal, '#pop'),
(r'FALSE', Literal, '#pop'),
],
## parsing
# entry point
# <script>:
# <model-definition>*
# <model-definition>:
# <record-model-definition>
# <projective-model-definition>
# <joined-model-definition>
# <summarized-model-definition>
'root': [
include('skip'),
(r'"', String.Double, ('model-name-bind', 'attribute-list', 'description')),
default(('model-name-bind', 'attribute-list')),
],
# <description>:
# <string>
'description': [
default(('#pop', 'string-literal')),
],
# <attribute-list>:
# <attribute>*
'attribute-list': [
include('skip'),
(r'@', Name.Attribute, 'attribute'),
default('#pop'),
],
# <attribute>:
# '@' <qname>
# '@' <qname> '(' ')'
# '@' <qname> '(' <attribute-element-list> ','? ')'
# rule ','? is processed at following-attribute-element
'attribute': [
include('skip'),
default(('#pop', 'attribute-option-tuple', 'attribute-name')),
],
'attribute-option-tuple': [
include('skip'),
(r'\(', Punctuation, ('#pop', 'attribute-option')),
default('#pop'),
],
'attribute-option': [
include('skip'),
(r'\)', Punctuation, '#pop'),
default(('#pop', 'attribute-element-list')),
],
# <attribute-element-list>:
# <attribute-element-list> ',' <attribute-element>
# <attribute-element>
'attribute-element-list': [
include('skip'),
default(('#pop', 'following-attribute-element', 'attribute-element')),
],
'following-attribute-element': [
include('skip'),
(r'\)', Punctuation, '#pop'),
(r',', Punctuation, 'attribute-element'),
],
# <attribute-element>:
# <name> '=' <attribute-value>
'attribute-element': [
include('skip'),
(r'\)', Punctuation, '#pop:2'),
default(('#pop', 'attribute-value', 'bind', 'name')),
],
'bind': [
include('skip'),
(r'=', Operator, '#pop'),
],
# <attribute-value>:
# <attribute-value-array>
# <attribute-value-map>
# <qname>
# <literal>
'attribute-value': [
include('skip'),
(r'\{', Punctuation, ('#pop', 'attribute-value-array-or-map-1')),
include('literal'),
default(('#pop', 'qualified-name')),
],
# <attribute-value-map>:
# '{' ':' '}'
# '{' <attribute-pair-list> ','? '}'
# rule ','? is processed at following-attribute-pair
'attribute-value-array-or-map-1': [
include('skip'),
# '\}' -> empty array
(r'\}', Punctuation, '#pop'),
# ':' -> empty map
(r':', Punctuation, ('#pop', 'closing-curly-brace')),
# lookahead assertion for literal
(r'(?=[".0-9TF])', Literal, ('#pop', 'attribute-value-array-or-map-2', 'literal')),
# otherwise
default(('#pop', 'attribute-value-array')),
],
'attribute-value-array-or-map-2': [
include('skip'),
# ':' -> map
(r':', Punctuation, ('#pop', 'following-attribute-pair', 'attribute-value')),
# otherwise -> array
default(('#pop', 'following-attribute-value')),
],
'closing-curly-brace': [
include('skip'),
(r'\}', Punctuation, '#pop')
],
# <attribute-value-array>:
# '{' '}'
# '{' <attribute-value-list> ','? '}'
# rule ','? is processed at following-attribute-value
'attribute-value-array': [
include('skip'),
(r'\}', Punctuation, '#pop'),
default(('#pop', 'attribute-value-list')),
],
# <attribute-value-list>:
# <attribute-value-list> ',' <attribute-value>
# <attribute-value>
'attribute-value-list': [
include('skip'),
default(('#pop', 'following-attribute-value', 'attribute-value')),
],
'following-attribute-value': [
include('skip'),
(r'\}', Punctuation, '#pop'),
(r',', Punctuation, 'attribute-value-ext'),
],
'attribute-value-ext': [
include('skip'),
(r'\}', Punctuation, '#pop:2'),
default(('#pop', 'attribute-value')),
],
# <attribute-pair-list>:
# <attribute-pair-list> ',' <attribute-pair>
# <attribute-pair>
'following-attribute-pair': [
include('skip'),
(r'\}', Punctuation, '#pop'),
(r',', Punctuation, 'attribute-pair-ext'),
],
'attribute-pair-ext': [
include('skip'),
(r'\}', Punctuation, '#pop:2'),
default(('#pop', 'attribute-pair')),
],
# <attribute-pair>:
# <literal> ':' <attribute-value>
'attribute-pair': [
include('skip'),
default(('#pop', 'attribute-value', 'colon', 'literal')),
],
'colon': [
include('skip'),
(r':', Punctuation, '#pop'),
],
# <model-definition>:
# <record-model-definition>
# <projective-model-definition>
# <joined-model-definition>
# <summarized-model-definition>
'model-name-bind': [
include('skip'),
# NOTE: this implementation does not allow model names 'projective', 'joined' and 'summarized'
# negative lookahead assertion
(r'projective(?![a-z0-9_])', Keyword.Type, ('#pop', 'record-expression', 'bind', 'model-name')),
(r'joined(?![a-z0-9_])', Keyword.Type, ('#pop', 'join-expression', 'bind', 'model-name')),
(r'summarized(?![a-z0-9_])', Keyword.Type, ('#pop', 'summarize-expression', 'bind', 'model-name')),
default(('#pop', 'record-expression', 'bind', 'model-name')),
],
'model-name': [
include('skip'),
(NAME, Name.Class, '#pop'),
(PSEUDO_ELEMENT, Name.Class, '#pop'),
],
# <record-expression>:
# <record-expression> '+' <record-term>
# <record-term>
'record-expression': [
include('skip'),
default(('#pop', 'following-record-term', 'record-term')),
],
# <record-term>:
# '{' <property-definition>* '}' ← allow empty record-term issue #11
# <model-reference>
'record-term': [
include('skip'),
(PSEUDO_ELEMENT, Name.Variable.Instance, '#pop'),
(r'\{', Punctuation, ('#pop', 'property-definition')),
default(('#pop', 'model-reference')),
],
'following-record-term': [
include('skip'),
(r';', Punctuation, '#pop'),
(r'\+', Operator, 'record-term'),
],
# <property-definition>:
# <description>? <attribute>* <name> ':' <type> ';'
# <description>? <attribute>* <name> '=' <property-expression> ';'
# <description>? <attribute>* <name> ':' <type> '=' <property-expression> ';'
'property-definition': [
include('skip'),
(r'\}', Punctuation, '#pop'),
(r'"', String.Double, ('end-of-declaration', 'property-definition-latter-half', 'name-or-pseudo-element', 'attribute-list', 'description')),
default(('end-of-declaration', 'property-definition-latter-half', 'name-or-pseudo-element', 'attribute-list')),
],
'property-definition-latter-half': [
include('skip'),
(r':', Punctuation, ('#pop', 'property-definition-with-type', 'type')),
(r'=', Operator, ('#pop', 'property-expression')),
],
'property-definition-with-type': [
include('skip'),
(r'=', Operator, ('#pop', 'property-expression')),
default('#pop'),
],
# <property-expression>:
# <property-expression-list> ← <attribute-value-array> と同じ
# <property-expression-map> ← <attribute-value-map> と同じ
# <property-expression-reference> ← <qname> と同じ
'property-expression': [
include('skip'),
(r'\{', Punctuation, ('#pop', 'attribute-value-array-or-map-1')),
default(('#pop', 'qualified-name')),
],
# <model-reference>:
# <name>
'model-reference': [
include('skip'),
(NAME, Name.Class, '#pop'),
(PSEUDO_ELEMENT, Name.Class, '#pop'),
],
# <join-expression>:
# <join-expression> '+' <join-term>
# <join-term>
'join-expression': [
include('skip'),
default(('#pop', 'following-join-term', 'join-term')),
],
# <join-term>:
# <model-reference> <model-mapping>? <grouping>?
'join-term': [
include('skip'),
default(('#pop', 'grouping', 'model-mapping', 'model-reference')),
],
# <model-mapping>:
# '->' '{' <property-mapping>* '}' ← allow empty model-mapping issue #11
'model-mapping': [
include('skip'),
(r'->', Operator, ('#pop', 'model-mapping-body')),
default('#pop'),
],
'model-mapping-body': [
include('skip'),
(r'\{', Punctuation, ('#pop', 'property-mapping')),
(PSEUDO_ELEMENT, Name.Variable.Instance, '#pop'),
],
# <property-mapping>:
# <description>? <attribute>* <name> '->' <name> ';'
'property-mapping': [
include('skip'),
(r'\}', Punctuation, '#pop'),
(r'"', String.Double, ('end-of-declaration', 'name-or-pseudo-element', 'mapping-arrow', 'name-or-pseudo-element', 'attribute-list', 'description')),
default(('end-of-declaration', 'name-or-pseudo-element', 'mapping-arrow', 'name-or-pseudo-element', 'attribute-list')),
],
'mapping-arrow': [
include('skip'),
(r'->', Operator, '#pop'),
],
# <grouping>:
# '%' <property-list>
# <property-list>:
# <property-list> ',' <name>
# <name>
'grouping': [
include('skip'),
(r'%', Operator, ('#pop', 'property-list')),
default('#pop'),
],
'following-join-term': [
include('skip'),
(r';', Punctuation, '#pop'),
(r'\+', Operator, 'join-term'),
],
# <summarize-expression>:
# <summarize-expression> '+' <summarize-term>
# <summarize-term>
'summarize-expression': [
include('skip'),
default(('#pop', 'following-summarize-term', 'summarize-term')),
],
# <summarize-term>:
# <name> <model-folding> <grouping>?
'summarize-term': [
include('skip'),
default(('#pop', 'grouping', 'model-folding', 'model-name')),
],
# similar to model-mapping
# <model-folding>:
# '=>' '{' <property-folding>* '}' ← allow empty model-folding issue #11
'model-folding': [
include('skip'),
(r'=>', Operator, ('#pop', 'model-folding-body')),
],
'model-folding-body': [
include('skip'),
(r'\{', Punctuation, ('#pop', 'property-folding')),
],
# <property-folding>:
# <description>? <attribute>* <aggregator> <name> '->' <name> ';'
'property-folding': [
include('skip'),
(r'\}', Punctuation, '#pop'),
(r'"', String.Double, ('end-of-declaration', 'name-or-pseudo-element', 'mapping-arrow', 'name-or-pseudo-element', 'aggregator', 'attribute-list', 'description')),
default(('end-of-declaration', 'name-or-pseudo-element', 'mapping-arrow', 'name-or-pseudo-element', 'aggregator', 'attribute-list')),
],
# <aggregator>:
# 'any'
# 'sum'
# 'max'
# 'min'
# 'count'
'aggregator': [
include('skip'),
# negative lookahead assertion
(r'any(?![a-z0-9_])', Name.Function, '#pop'),
(r'sum(?![a-z0-9_])', Name.Function, '#pop'),
(r'max(?![a-z0-9_])', Name.Function, '#pop'),
(r'min(?![a-z0-9_])', Name.Function, '#pop'),
(r'count(?![a-z0-9_])', Name.Function, '#pop'),
(PSEUDO_ELEMENT, Name.Function, '#pop'),
],
'following-summarize-term': [
include('skip'),
(r';', Punctuation, '#pop'),
(r'\+', Operator, 'summarize-term'),
],
'end-of-declaration': [
include('skip'),
(r';', Punctuation, '#pop')
],
}
tokens.update(list_with_separator('attribute-name', NAME, Name.Attribute, r'\.', Name.Attribute))
tokens.update(list_with_separator('qualified-name', NAME, Name, r'\.', Name))
tokens.update(list_with_separator('property-list', '|'.join([NAME, PSEUDO_ELEMENT]), Name, r',', Punctuation))
def debug(code):
dl = DmdlLexer()
for t in dl.get_tokens_unprocessed(code):
print(t)
if __name__ == '__main__':
import sys
sys.exit(debug(sys.argv[1]))
|
cloud9ers/gurumate | refs/heads/master | environment/lib/python2.7/site-packages/IPython/nbformat/v3/__init__.py | 6 | """The main API for the v3 notebook format.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .nbbase import (
NotebookNode,
new_code_cell, new_text_cell, new_notebook, new_output, new_worksheet,
new_metadata, new_author, new_heading_cell, nbformat, nbformat_minor
)
from .nbjson import reads as reads_json, writes as writes_json
from .nbjson import reads as read_json, writes as write_json
from .nbjson import to_notebook as to_notebook_json
from .nbpy import reads as reads_py, writes as writes_py
from .nbpy import reads as read_py, writes as write_py
from .nbpy import to_notebook as to_notebook_py
from .convert import convert_to_this_nbformat
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def parse_filename(fname):
"""Parse a notebook filename.
This function takes a notebook filename and returns the notebook
format (json/py) and the notebook name. This logic can be
summarized as follows:
* notebook.ipynb -> (notebook.ipynb, notebook, json)
* notebook.json -> (notebook.json, notebook, json)
* notebook.py -> (notebook.py, notebook, py)
* notebook -> (notebook.ipynb, notebook, json)
Parameters
----------
fname : unicode
The notebook filename. The filename can use a specific filename
extention (.ipynb, .json, .py) or none, in which case .ipynb will
be assumed.
Returns
-------
(fname, name, format) : (unicode, unicode, unicode)
The filename, notebook name and format.
"""
if fname.endswith(u'.ipynb'):
format = u'json'
elif fname.endswith(u'.json'):
format = u'json'
elif fname.endswith(u'.py'):
format = u'py'
else:
fname = fname + u'.ipynb'
format = u'json'
name = fname.split('.')[0]
return fname, name, format
|
FedoraScientific/salome-hexablock | refs/heads/master | doc/test_doc/make_transformation/make_translation.py | 1 | # -*- coding: latin-1 -*-
# Copyright (C) 2009-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
####### Test make translation ###############
import hexablock
doc = hexablock.addDocument("default")
size_x = 1
size_y = 1
size_z = 2
orig = doc.addVertex(0, 0, 0)
dirVr = doc.addVector(1, 1, 1)
grid = doc.makeCartesian1(orig, dirVr, size_x, size_y, size_z, 0, 0, 0)
##### doc.saveVtk ("cartesian.vtk")
devant = doc.addVector(5, 0, 0)
grid1 = doc.makeTranslation(grid, devant)
##### doc.saveVtk ("translation.vtk")
|
dionyziz/ting | refs/heads/master | API/chat/tests/message/test_get_view.py | 3 | from chat.tests.message.common import *
from django_dynamic_fixture import G
class MessageViewGETTests(ChatTests):
def test_request_messages(self):
"""
When a valid request is sent the view should return
a JSON object containing messages. Each message should be
in the form {message_content: ...,username: ..., datetime: ...}.
The messages should be in chronological order(more recent first).
The number of objects is specified by the lim argument.
"""
lim = 2
timestamp = 10 ** 11
message1 = Message.objects.create(
message_content='Message1',
datetime_start=timestamp_to_datetime(timestamp),
datetime_sent=timestamp_to_datetime(timestamp + 10),
username='vitsalis',
typing=True,
channel=self.channel,
message_type='text'
)
message2 = Message.objects.create(
message_content='Message2',
datetime_start=timestamp_to_datetime(timestamp + 60 * 60),
datetime_sent=timestamp_to_datetime(timestamp + 60 * 60 + 10),
username='pkakelas',
typing=True,
channel=self.channel,
message_type='text'
)
response = self.client.get(
reverse('chat:message', args=('channel', self.channel.name,)),
{'lim': lim}
)
messages = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(messages), 2)
# The order is reverse chronological
self.assertEqual(messages[0]['message_content'], message2.message_content)
self.assertEqual(messages[0]['username'], message2.username)
self.assertEqual(messages[0]['datetime_start'], datetime_to_timestamp(message2.datetime_start))
self.assertTrue(messages[0]['typing'])
self.assertEqual(messages[0]['id'], message2.id)
self.assertEqual(messages[0]['datetime_sent'], datetime_to_timestamp(message2.datetime_sent))
self.assertEqual(messages[1]['message_content'], message1.message_content)
self.assertEqual(messages[1]['username'], message1.username)
self.assertEqual(messages[1]['datetime_start'], datetime_to_timestamp(message1.datetime_start))
self.assertTrue(messages[1]['typing'])
self.assertEqual(messages[1]['id'], message1.id)
self.assertEqual(messages[1]['datetime_sent'], datetime_to_timestamp(message1.datetime_sent))
def test_request_messages_with_bigger_limit_than_messages(self):
"""
When the lim is bigger than the number of the messages
on the database for the channel, the server should return
all the messages for the channel.
"""
lim = 100
timestamp = 10 ** 11
create_message(
message_content='Message1',
timestamp=timestamp,
username='vitsalis',
channel=self.channel,
message_type='text'
)
create_message(
message_content='Message2',
timestamp=timestamp + 60 * 60,
username='pkakelas',
channel=self.channel,
message_type='text'
)
messages = json.loads(self.client.get(
reverse('chat:message', args=('channel', self.channel.name,)),
{'lim': lim}
).content)
self.assertEqual(len(messages), 2)
def test_request_messages_with_smaller_limit_than_messages(self):
"""
When the lim is smaller than the number of the messages
on the database for the channel, the server should return
no more than <lim> messages.
"""
lim = 2
timestamp = 10 ** 11
for i in range(100):
create_message(
message_content='Message' + str(i),
timestamp=timestamp + i,
username='vitsalis',
channel=self.channel,
message_type='text'
)
messages = json.loads(self.client.get(
reverse('chat:message', args=('channel', self.channel.name,)),
{'lim': lim}
).content)
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0]['message_content'], 'Message99')
self.assertEqual(messages[1]['message_content'], 'Message98')
def test_request_messages_without_lim(self):
"""
When the lim is not specified the view should return
100 messages(or less if there are less than 100 messages).
"""
timestamp = 10 ** 11
for i in range(200):
create_message(
message_content='Message' + str(i),
timestamp=timestamp + i,
username='vitsalis',
channel=self.channel,
message_type='text'
)
messages = json.loads(self.client.get(
reverse('chat:message', args=('channel', self.channel.name,)),
).content)
self.assertEqual(len(messages), 100)
def test_request_messages_from_one_channel(self):
"""
The view should return the messages from the
channel specified.
"""
channel1 = G(Channel, name='Channel1')
channel2 = G(Channel, name='Channel2')
timestamp = 10 ** 11
message1 = create_message(
message_content='Message1',
timestamp=timestamp,
username='vitsalis',
channel=channel1,
message_type='text'
)
create_message(
message_content='Message2',
timestamp=timestamp,
username='vitsalis',
channel=channel2,
message_type='text'
)
messages = json.loads(self.client.get(
reverse('chat:message', args=('channel', channel1.name,)),
).content)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0]['message_content'], message1.message_content)
def test_request_messages_with_invalid_channel_name(self):
"""
When the channel with the name <channel_name>
does not exist, a 404(Not Found) response code
should be returned from the view.
"""
timestamp = 10 ** 11
create_message(
message_content='Message1',
timestamp=timestamp,
username='vitsalis',
channel=self.channel,
message_type='text'
)
response = self.client.get(
reverse('chat:message', args=('channel', 'invalid_name',)),
)
self.assertEqual(response.status_code, 404)
|
google-code-export/django-hotclub | refs/heads/master | libs/external_libs/python-openid-2.1.1/openid/test/test_openidyadis.py | 87 | import unittest
from openid.consumer.discover import \
OpenIDServiceEndpoint, OPENID_1_1_TYPE, OPENID_1_0_TYPE
from openid.yadis.services import applyFilter
XRDS_BOILERPLATE = '''\
<?xml version="1.0" encoding="UTF-8"?>
<xrds:XRDS xmlns:xrds="xri://$xrds"
xmlns="xri://$xrd*($v*2.0)"
xmlns:openid="http://openid.net/xmlns/1.0">
<XRD>
%s\
</XRD>
</xrds:XRDS>
'''
def mkXRDS(services):
return XRDS_BOILERPLATE % (services,)
def mkService(uris=None, type_uris=None, local_id=None, dent=' '):
chunks = [dent, '<Service>\n']
dent2 = dent + ' '
if type_uris:
for type_uri in type_uris:
chunks.extend([dent2 + '<Type>', type_uri, '</Type>\n'])
if uris:
for uri in uris:
if type(uri) is tuple:
uri, prio = uri
else:
prio = None
chunks.extend([dent2, '<URI'])
if prio is not None:
chunks.extend([' priority="', str(prio), '"'])
chunks.extend(['>', uri, '</URI>\n'])
if local_id:
chunks.extend(
[dent2, '<openid:Delegate>', local_id, '</openid:Delegate>\n'])
chunks.extend([dent, '</Service>\n'])
return ''.join(chunks)
# Different sets of server URLs for use in the URI tag
server_url_options = [
[], # This case should not generate an endpoint object
['http://server.url/'],
['https://server.url/'],
['https://server.url/', 'http://server.url/'],
['https://server.url/',
'http://server.url/',
'http://example.server.url/'],
]
# Used for generating test data
def subsets(l):
"""Generate all non-empty sublists of a list"""
subsets_list = [[]]
for x in l:
subsets_list += [[x] + t for t in subsets_list]
return subsets_list
# A couple of example extension type URIs. These are not at all
# official, but are just here for testing.
ext_types = [
'http://janrain.com/extension/blah',
'http://openid.net/sreg/1.0',
]
# All valid combinations of Type tags that should produce an OpenID endpoint
type_uri_options = [
exts + ts
# All non-empty sublists of the valid OpenID type URIs
for ts in subsets([OPENID_1_0_TYPE, OPENID_1_1_TYPE])
if ts
# All combinations of extension types (including empty extenstion list)
for exts in subsets(ext_types)
]
# Range of valid Delegate tag values for generating test data
local_id_options = [
None,
'http://vanity.domain/',
'https://somewhere/yadis/',
]
# All combinations of valid URIs, Type URIs and Delegate tags
data = [
(uris, type_uris, local_id)
for uris in server_url_options
for type_uris in type_uri_options
for local_id in local_id_options
]
class OpenIDYadisTest(unittest.TestCase):
def __init__(self, uris, type_uris, local_id):
unittest.TestCase.__init__(self)
self.uris = uris
self.type_uris = type_uris
self.local_id = local_id
def shortDescription(self):
# XXX:
return 'Successful OpenID Yadis parsing case'
def setUp(self):
self.yadis_url = 'http://unit.test/'
# Create an XRDS document to parse
services = mkService(uris=self.uris,
type_uris=self.type_uris,
local_id=self.local_id)
self.xrds = mkXRDS(services)
def runTest(self):
# Parse into endpoint objects that we will check
endpoints = applyFilter(
self.yadis_url, self.xrds, OpenIDServiceEndpoint)
# make sure there are the same number of endpoints as
# URIs. This assumes that the type_uris contains at least one
# OpenID type.
self.failUnlessEqual(len(self.uris), len(endpoints))
# So that we can check equality on the endpoint types
type_uris = list(self.type_uris)
type_uris.sort()
seen_uris = []
for endpoint in endpoints:
seen_uris.append(endpoint.server_url)
# All endpoints will have same yadis_url
self.failUnlessEqual(self.yadis_url, endpoint.claimed_id)
# and local_id
self.failUnlessEqual(self.local_id, endpoint.local_id)
# and types
actual_types = list(endpoint.type_uris)
actual_types.sort()
self.failUnlessEqual(actual_types, type_uris)
# So that they will compare equal, because we don't care what
# order they are in
seen_uris.sort()
uris = list(self.uris)
uris.sort()
# Make sure we saw all URIs, and saw each one once
self.failUnlessEqual(uris, seen_uris)
def pyUnitTests():
cases = []
for args in data:
cases.append(OpenIDYadisTest(*args))
return unittest.TestSuite(cases)
|
per06a/python-adsl | refs/heads/develop | adsl/search.py | 1 |
"""
Various searching, selecting and finding algorithms
"""
##### STDLIB
import sys
##### 3RD PARTY
##### PROJECT
import adsl.common
##### INIT AND DECLARATIONS
if sys.version_info.major >= 3:
xrange = range
##### CLASSES AND FUNCTIONS
def binsearch(array, elem, left=None, right=None, cmp_func=cmp):
"""Classic binary search algorithm.
Args:
array (sequence): the sequence of elements that we are searching
elem (object): the element that we are searching for
left (int): the lower bound index of the sub-sequence to
search for the element. Default is None, in which case it will
start at position 0.
right (int): the upper bound index of the sub-sequence to
search for the element. Default is None, in which case it will
start at len(array) - 1.
cmp_func (function): function to compare two arbitrary
elements. Must conform to the "negative for e1 < e2, 0 for e1 ==
e2, positive for e1 > e2" comparison conventions. Default is the
build-in Python 'cmp' function.
Returns:
int: If the element is found in the sequence, the first
position that it was found at. Else, None.
"""
res = None
if left is None:
left = 0
if right is None:
right = len(array) - 1
while left <= right:
pivot = int((left+right)/2.0)
pval = array[pivot]
if cmp_func(elem, pval) == 0:
# This is a position of the element in the array
res = pivot
break
elif cmp_func(elem, pval) < 0:
# The element must be in the lower half of the range if it
# exists
right = pivot - 1
else:
# The element must be in the upper half of the range if it
# exists
left = pivot + 1
return res
def binsearch_bounds(array, elem):
"""Find the (lower, upper) bounds of some element in the array.
Args:
array (list): a Python ArrayList of elements
elem (object): the element to search for
Returns:
tuple(int, int): the (lower, upper) integer bounds (0-index)
where elem is found in the array. If elem is not found, return
None.
Todo:
Support comparison function for element.
"""
pos = binsearch(array, elem)
if pos is None:
return None
lb = pos
left = 0
right = pos - 1
while left <= right:
pivot = int((left+right)/2.0)
pval = array[pivot]
if elem == pval and (lb is None or pivot < lb):
lb = pivot
elif elem < pval:
right = pivot - 1
else:
left = pivot + 1
ub = pos
left = pos + 1
right = len(array) - 1
while left <= right:
pivot = int((left+right)/2.0)
pval = array[pivot]
if elem == pval and (ub is None or pivot > ub):
ub = pivot
elif elem < pval:
right = pivot - 1
else:
left = pivot + 1
return (lb, ub)
def quickselect(array, K):
"""
Find the K-th most element in the sequence.
If we take an unordered sequence of elements and sorted them,
which element would occupy position K (ie position 0, or 5, or
19)?
quickselect answers the above question in expected linear
time. This is less than the usual N*lg(N) time we get for
comparison-based sorting algorithms.
quickselect works by repeatedly partioning sequences to establish
'some element occupies the K+X or X-Y position'. Since we know the
fixed position of one element, we can use that to determine which
sub-range must contain the K-th element (if any).
NOTE: this is essentially a neat combination of ideas from binary
search and quicksort. It is a destructive search in that it
partially sorts the sequence.
"""
res = None
left = 0
right = len(array) - 1
while left <= right:
pivot = adsl.common.mo3(left, right)
pivot = adsl.common.partition(array, left, right, pivot)
# The pivot is now a fixed position of some element. We KNOW
# that all elements <= array[pivot] are located in the lower
# half, and all elements > array[pivot] are located in the
# upper.
if K == pivot:
res = array[pivot]
break
elif K < pivot:
# The K-th element must be in the lower range relative to pivot
right = pivot - 1
else:
# The K-th element must be in the upper range relative to pivot
left = pivot + 1
return res
def hash_substr(string, i, j, prime=31):
"""
Map a sub-sequence of characters to an integer. This works by
mapping each individual character to an integer via ord(), then
multiplying it by a prime number P raised to some power.
Ex:
Lets hash the string "cat"
ord('t') = 116
ord('a') = 97
ord('c') = 99
Let P = prime = 31. Then hash_substr("cat") is:
ord('c')*pow(P, 2) + ord('a')*pow(P, 1) + ord('t')*pow(P, 0)
Args:
string (string): the sequence of characters containing some subsequence we want to hash
i (int): the starting index of the subsequence to hash
j (int): the ending index of the subsequence to hash
prime (int): Optional. The prime number to multiply each ord(character) by
Returns:
(int) The integer representation of the character subsequence
starting at string[i] and ending at string[j]
"""
if i < 0:
raise IndexError("i must be >= 0, is {}".format(i))
if j >= len(string):
raise IndexError("j must be < len(string), is {}".format(j))
if i > j:
raise IndexError("i must be <= j. (i = {}, j = {})".format(i, j))
res = 0
mult = 1
N = j - i + 1
i = N - 1
while i >= 0:
res += (mult * ord(string[i]))
mult *= prime
i -= 1
return res
def hash_str(word, N, prime=31):
return hash_substr(word, 0, N-1, prime=prime)
def find_all_N(string, words, N, res_list=None, P=31):
"""
Find all words of some fixed length N using Rabin-Karp.
"""
# NOTE: let's be thankful that ord() takes into account Unicode:
# https://docs.python.org/2/library/functions.html#ord
for word in words:
if len(word) != N:
raise ValueError("{} with length = {} is not of required length = {}".format(word,
len(word),
N))
if res_list is None:
res = []
else:
res = res_list
M = len(string)
# Table of hashes to words
table = {hash_str(word, N): word for word in words}
max_pow = pow(P, N-1)
rhash = None
ln = M - N + 1
# Unroll the loop once so that we don't check conditionals inside
# of the loop and compute the initial rolling hash
i = 0
rhash = hash_substr(string, i, i+N-1)
if rhash in table:
word = table[rhash]
match = True
j = 0
while j < N:
if string[j+i] != word[j]:
match = False
break
j += 1
if match:
# (word, start, end)
res.append((word, i, i+N))
i = 1
while i < ln:
# Rolling hash function of Rabin-Karp. This is based on the
# observation that H(i+1) can be computed from H(i) in
# constant time.
# Starting term of the previous hash value.
# t1 = ord(string[i-1]) * max_pow
# Ending term of the current hash value. It is multiplied
# by P^0, which is always 1. So just omit for brevity.
# t2 = ord(string[i+N-1])
rhash = ((rhash - (ord(string[i-1]) * max_pow)) * P) + ord(string[i+N-1])
if rhash in table:
word = table[rhash]
# We have a collision via hashing. By the Pigeonhole
# principle, if we map a set of cardinality M to a set of
# cardinality N and M > N, then there must exist at least
# one bucket containing at least two elements. In other
# words, two different strings can map to the same integer
# via hashing. So compare the substring char-by-char to
# make sure we have a match.
match = True
# Use range since N is typically small
j = 0
while j < N:
if string[j+i] != word[j]:
match = False
break
j += 1
if match:
# (word, start, end)
res.append((word, i, i+N))
i += 1
return res
def find_all(string, words):
"""
Find all matching words in some string by bucket-sorting them by
size and running all strings of the same length through
Rabin-Karp.
Let:
M = len(string)
N = the longest length of any word in words
K = the total number of different word lengths
The expected/best-case running time of Rabin-Karp is O(M+N). We
call it at most K times. This gives us an expected running time of
O(K*(M+N)).
We can usually treat K as a constant. This reduces the expected
running time back down to O(C*(M+N)) = O(M+N). For example, for
the English dictionary locatd at /usr/shard/dict/words, K = 23.
"""
res = []
# Do a bucket sort of words by their length.
table = {}
for word in words:
ln = len(word)
if ln not in table:
table[ln] = []
table[ln].append(word)
# Now use find_all_N with the same result list
for N in table:
# These are all the words of length N
words_N = table[N]
find_all_N(string, words_N, N, res_list=res)
return res
|
mozilla/treeherder | refs/heads/master | tests/webapp/api/test_jobs_api.py | 2 | import datetime
import pytest
from dateutil import parser
from django.urls import reverse
from rest_framework.status import HTTP_400_BAD_REQUEST
from treeherder.model.models import Job, TextLogError
@pytest.mark.parametrize(
('offset', 'count', 'expected_num'),
[(None, None, 10), (None, 5, 5), (5, None, 6), (0, 5, 5), (10, 10, 1)],
)
def test_job_list(client, eleven_jobs_stored, test_repository, offset, count, expected_num):
"""
test retrieving a list of json blobs from the jobs-list
endpoint.
"""
url = reverse("jobs-list", kwargs={"project": test_repository.name})
params = '&'.join(
['{}={}'.format(k, v) for k, v in [('offset', offset), ('count', count)] if v]
)
if params:
url += '?{}'.format(params)
resp = client.get(url)
assert resp.status_code == 200
response_dict = resp.json()
jobs = response_dict["results"]
assert isinstance(jobs, list)
assert len(jobs) == expected_num
exp_keys = [
"submit_timestamp",
"start_timestamp",
"push_id",
"result_set_id",
"who",
"option_collection_hash",
"reason",
"id",
"job_guid",
"state",
"result",
"build_platform_id",
"end_timestamp",
"build_platform",
"machine_name",
"job_group_id",
"job_group_symbol",
"job_group_name",
"job_type_id",
"job_type_name",
"job_type_description",
"build_architecture",
"build_system_type",
"job_type_symbol",
"platform",
"job_group_description",
"platform_option",
"machine_platform_os",
"build_os",
"machine_platform_architecture",
"failure_classification_id",
"tier",
"last_modified",
"ref_data_name",
"signature",
"task_id",
"retry_id",
]
for job in jobs:
assert set(job.keys()) == set(exp_keys)
def test_job_list_bad_project(client, transactional_db):
"""
test retrieving a job list with a bad project throws 404.
"""
badurl = reverse("jobs-list", kwargs={"project": "badproject"})
resp = client.get(badurl)
assert resp.status_code == 404
def test_job_list_equals_filter(client, eleven_jobs_stored, test_repository):
"""
test retrieving a job list with a querystring filter.
"""
url = reverse("jobs-list", kwargs={"project": test_repository.name})
final_url = url + "?job_guid=f1c75261017c7c5ce3000931dce4c442fe0a1297"
resp = client.get(final_url)
assert resp.status_code == 200
assert len(resp.json()['results']) == 1
job_filter_values = [
(u'build_architecture', u'x86_64'),
(u'build_os', u'mac'),
(u'build_platform', u'osx-10-7'),
(u'build_platform_id', 3),
(u'build_system_type', u'buildbot'),
(u'end_timestamp', 1384364849),
(u'failure_classification_id', 1),
(u'id', 4),
(u'job_group_id', 2),
(u'job_group_name', u'Mochitest'),
(u'job_group_symbol', u'M'),
(u'job_guid', u'ab952a4bbbc74f1d9fb3cf536073b371029dbd02'),
(u'job_type_id', 2),
(u'job_type_name', u'Mochitest Browser Chrome'),
(u'job_type_symbol', u'bc'),
(u'machine_name', u'talos-r4-lion-011'),
(u'machine_platform_architecture', u'x86_64'),
(u'machine_platform_os', u'mac'),
(u'option_collection_hash', u'32faaecac742100f7753f0c1d0aa0add01b4046b'),
(u'platform', u'osx-10-7'),
(u'reason', u'scheduler'),
(
u'ref_data_name',
u'Rev4 MacOSX Lion 10.7 mozilla-release debug test mochitest-browser-chrome',
),
(u'result', u'success'),
(u'result_set_id', 4),
(u'signature', u'aebe9066ff1c765815ec0513a3389238c80ef166'),
(u'start_timestamp', 1384356880),
(u'state', u'completed'),
(u'submit_timestamp', 1384356854),
(u'tier', 1),
(u'who', u'tests-mozilla-release-lion-debug-unittest'),
]
@pytest.mark.parametrize(('fieldname', 'expected'), job_filter_values)
def test_job_list_filter_fields(client, eleven_jobs_stored, test_repository, fieldname, expected):
"""
test retrieving a job list with a querystring filter.
values chosen above are from the 3rd of the ``eleven_stored_jobs`` so that
we aren't just getting the first one every time.
The field of ``last_modified`` is auto-generated, so just skipping that
to make this test easy.
"""
url = reverse("jobs-list", kwargs={"project": test_repository.name})
final_url = url + "?{}={}".format(fieldname, expected)
resp = client.get(final_url)
assert resp.status_code == 200
first = resp.json()['results'][0]
assert first[fieldname] == expected
def test_job_list_in_filter(client, eleven_jobs_stored, test_repository):
"""
test retrieving a job list with a querystring filter.
"""
url = reverse("jobs-list", kwargs={"project": test_repository.name})
final_url = url + (
"?job_guid__in="
"f1c75261017c7c5ce3000931dce4c442fe0a1297,"
"9abb6f7d54a49d763c584926377f09835c5e1a32"
)
resp = client.get(final_url)
assert resp.status_code == 200
assert len(resp.json()['results']) == 2
def test_job_detail(client, test_job):
"""
test retrieving a single job from the jobs-detail
endpoint.
"""
resp = client.get(
reverse("jobs-detail", kwargs={"project": test_job.repository.name, "pk": test_job.id})
)
assert resp.status_code == 200
assert isinstance(resp.json(), dict)
assert resp.json()["id"] == test_job.id
resp = client.get(
reverse("jobs-detail", kwargs={"project": test_job.repository.name, "pk": test_job.id})
)
assert resp.status_code == 200
assert resp.json()["taskcluster_metadata"] == {
"task_id": 'V3SVuxO8TFy37En_6HcXLs',
"retry_id": 0,
}
def test_job_detail_bad_project(client, transactional_db):
"""
test retrieving a single job from the jobs-detail
endpoint.
"""
badurl = reverse("jobs-detail", kwargs={"project": "badproject", "pk": 1})
resp = client.get(badurl)
assert resp.status_code == 404
def test_job_detail_not_found(client, test_repository):
"""
test retrieving a HTTP 404 from the jobs-detail
endpoint.
"""
resp = client.get(
reverse("jobs-detail", kwargs={"project": test_repository.name, "pk": -32767}),
)
assert resp.status_code == 404
def test_text_log_errors(client, test_job):
TextLogError.objects.create(job=test_job, line='failure 1', line_number=101)
TextLogError.objects.create(job=test_job, line='failure 2', line_number=102)
resp = client.get(
reverse(
"jobs-text-log-errors", kwargs={"project": test_job.repository.name, "pk": test_job.id}
)
)
assert resp.status_code == 200
assert resp.json() == [
{
'id': 1,
'job': 1,
'line': 'failure 1',
'line_number': 101,
'bug_suggestions': {
'search': 'failure 1',
'search_terms': ['failure 1'],
'bugs': {'open_recent': [], 'all_others': []},
'line_number': 101,
},
'metadata': None,
'matches': [],
'classified_failures': [],
},
{
'id': 2,
'job': 1,
'line': 'failure 2',
'line_number': 102,
'bug_suggestions': {
'search': 'failure 2',
'search_terms': ['failure 2'],
'bugs': {'open_recent': [], 'all_others': []},
'line_number': 102,
},
'metadata': None,
'matches': [],
'classified_failures': [],
},
]
@pytest.mark.parametrize(
('offset', 'count', 'expected_num'),
[(None, None, 3), (None, 2, 2), (1, None, 2), (0, 1, 1), (2, 10, 1)],
)
def test_list_similar_jobs(client, eleven_jobs_stored, offset, count, expected_num):
"""
test retrieving similar jobs
"""
job = Job.objects.get(id=1)
url = reverse("jobs-similar-jobs", kwargs={"project": job.repository.name, "pk": job.id})
params = '&'.join(
['{}={}'.format(k, v) for k, v in [('offset', offset), ('count', count)] if v]
)
if params:
url += '?{}'.format(params)
resp = client.get(url)
assert resp.status_code == 200
similar_jobs = resp.json()
assert 'results' in similar_jobs
assert isinstance(similar_jobs['results'], list)
assert len(similar_jobs['results']) == expected_num
@pytest.mark.parametrize(
'lm_key,lm_value,exp_status, exp_job_count',
[
("last_modified__gt", "2016-07-18T22:16:58.000", 200, 8),
("last_modified__lt", "2016-07-18T22:16:58.000", 200, 3),
("last_modified__gt", "-Infinity", HTTP_400_BAD_REQUEST, 0),
("last_modified__gt", "whatever", HTTP_400_BAD_REQUEST, 0),
],
)
def test_last_modified(
client, eleven_jobs_stored, test_repository, lm_key, lm_value, exp_status, exp_job_count
):
try:
param_date = parser.parse(lm_value)
newer_date = param_date - datetime.timedelta(minutes=10)
# modify job last_modified for 3 jobs
Job.objects.filter(id__in=[j.id for j in Job.objects.all()[:3]]).update(
last_modified=newer_date
)
except ValueError:
# no problem. these params are the wrong
pass
url = reverse("jobs-list", kwargs={"project": test_repository.name})
final_url = url + ("?{}={}".format(lm_key, lm_value))
resp = client.get(final_url)
assert resp.status_code == exp_status
if exp_status == 200:
assert len(resp.json()["results"]) == exp_job_count
|
zeffii/SoundPetal | refs/heads/master | nodes/noise/noise.py | 1 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from FLOW.node_tree import SoundPetalUgen
from FLOW.core.node_factory import make_ugen_class
UgenLFNoise0 = make_ugen_class(
'LFNoise0',
"(freq: 500, mul: 1, add: 0)")
UgenLFNoise1 = make_ugen_class(
'LFNoise1',
"(freq: 500, mul: 1, add: 0)")
UgenLFNoise2 = make_ugen_class(
'LFNoise2',
"(freq: 500, mul: 1, add: 0)")
def register():
bpy.utils.register_class(UgenLFNoise0)
bpy.utils.register_class(UgenLFNoise1)
bpy.utils.register_class(UgenLFNoise2)
def unregister():
bpy.utils.unregister_class(UgenLFNoise0)
bpy.utils.unregister_class(UgenLFNoise1)
bpy.utils.unregister_class(UgenLFNoise2)
|
paplorinc/intellij-community | refs/heads/master | python/testData/copyPaste/IndentTabIncrease.after.py | 75 | print "Line 2"
class Test:
def __init__(self):
print "Line 2"
print "Line 1"
|
Austin503/pyglet | refs/heads/master | pyglet/canvas/win32.py | 26 | #!/usr/bin/python
# $Id:$
from base import Display, Screen, ScreenMode, Canvas
from pyglet.libs.win32 import _kernel32, _user32, types, constants
from pyglet.libs.win32.constants import *
from pyglet.libs.win32.types import *
class Win32Display(Display):
def get_screens(self):
screens = []
def enum_proc(hMonitor, hdcMonitor, lprcMonitor, dwData):
r = lprcMonitor.contents
width = r.right - r.left
height = r.bottom - r.top
screens.append(
Win32Screen(self, hMonitor, r.left, r.top, width, height))
return True
enum_proc_ptr = MONITORENUMPROC(enum_proc)
_user32.EnumDisplayMonitors(None, None, enum_proc_ptr, 0)
return screens
class Win32Screen(Screen):
_initial_mode = None
def __init__(self, display, handle, x, y, width, height):
super(Win32Screen, self).__init__(display, x, y, width, height)
self._handle = handle
def get_matching_configs(self, template):
canvas = Win32Canvas(self.display, 0, _user32.GetDC(0))
configs = template.match(canvas)
# XXX deprecate config's being screen-specific
for config in configs:
config.screen = self
return configs
def get_device_name(self):
info = MONITORINFOEX()
info.cbSize = sizeof(MONITORINFOEX)
_user32.GetMonitorInfoW(self._handle, byref(info))
return info.szDevice
def get_modes(self):
device_name = self.get_device_name()
i = 0
modes = []
while True:
mode = DEVMODE()
mode.dmSize = sizeof(DEVMODE)
r = _user32.EnumDisplaySettingsW(device_name, i, byref(mode))
if not r:
break
modes.append(Win32ScreenMode(self, mode))
i += 1
return modes
def get_mode(self):
mode = DEVMODE()
mode.dmSize = sizeof(DEVMODE)
_user32.EnumDisplaySettingsW(self.get_device_name(),
ENUM_CURRENT_SETTINGS,
byref(mode))
return Win32ScreenMode(self, mode)
def set_mode(self, mode):
assert mode.screen is self
if not self._initial_mode:
self._initial_mode = self.get_mode()
r = _user32.ChangeDisplaySettingsExW(self.get_device_name(),
byref(mode._mode),
None,
CDS_FULLSCREEN,
None)
if r == DISP_CHANGE_SUCCESSFUL:
self.width = mode.width
self.height = mode.height
def restore_mode(self):
if self._initial_mode:
self.set_mode(self._initial_mode)
class Win32ScreenMode(ScreenMode):
def __init__(self, screen, mode):
super(Win32ScreenMode, self).__init__(screen)
self._mode = mode
self.width = mode.dmPelsWidth
self.height = mode.dmPelsHeight
self.depth = mode.dmBitsPerPel
self.rate = mode.dmDisplayFrequency
class Win32Canvas(Canvas):
def __init__(self, display, hwnd, hdc):
super(Win32Canvas, self).__init__(display)
self.hwnd = hwnd
self.hdc = hdc
|
Phelimb/cbg | refs/heads/master | bigsi/tests/bloom/test_create_bloomfilter.py | 1 | from bigsi.bloom import generate_hashes
from bigsi.bloom import BloomFilter
def test_generate_hashes():
assert generate_hashes("ATT", 3, 25) == {2, 15, 17}
assert generate_hashes("ATT", 1, 25) == {15}
assert generate_hashes("ATT", 2, 50) == {15, 27}
def test_create_bloom():
for i in range(3):
kmers1 = ["ATT", "ATC"]
bloomfilter1 = BloomFilter(m=25, h=3)
bloomfilter1.update(kmers1)
kmers2 = ["ATT", "ATT"]
bloomfilter2 = BloomFilter(m=25, h=3)
bloomfilter2.update(kmers2)
assert bloomfilter1.bitarray != bloomfilter2.bitarray
|
cosino/u-boot-cosino | refs/heads/u-boot-2012.10-cosino | tools/patman/command.py | 73 | # Copyright (c) 2011 The Chromium OS Authors.
#
# See file CREDITS for list of people who contributed to this
# project.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
import os
import subprocess
"""Shell command ease-ups for Python."""
def RunPipe(pipeline, infile=None, outfile=None,
capture=False, oneline=False, hide_stderr=False):
"""
Perform a command pipeline, with optional input/output filenames.
hide_stderr Don't allow output of stderr (default False)
"""
last_pipe = None
while pipeline:
cmd = pipeline.pop(0)
kwargs = {}
if last_pipe is not None:
kwargs['stdin'] = last_pipe.stdout
elif infile:
kwargs['stdin'] = open(infile, 'rb')
if pipeline or capture:
kwargs['stdout'] = subprocess.PIPE
elif outfile:
kwargs['stdout'] = open(outfile, 'wb')
if hide_stderr:
kwargs['stderr'] = open('/dev/null', 'wb')
last_pipe = subprocess.Popen(cmd, **kwargs)
if capture:
ret = last_pipe.communicate()[0]
if not ret:
return None
elif oneline:
return ret.rstrip('\r\n')
else:
return ret
else:
return os.waitpid(last_pipe.pid, 0)[1] == 0
def Output(*cmd):
return RunPipe([cmd], capture=True)
def OutputOneLine(*cmd):
return RunPipe([cmd], capture=True, oneline=True)
def Run(*cmd, **kwargs):
return RunPipe([cmd], **kwargs)
def RunList(cmd):
return RunPipe([cmd], capture=True)
|
kustodian/ansible | refs/heads/devel | lib/ansible/plugins/cliconf/vyos.py | 7 | #
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
author: Ansible Networking Team
cliconf: vyos
short_description: Use vyos cliconf to run command on VyOS platform
description:
- This vyos plugin provides low level abstraction apis for
sending and receiving CLI commands from VyOS network devices.
version_added: "2.4"
"""
import re
import json
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.network.common.config import NetworkConfig
from ansible.module_utils.network.common.utils import to_list
from ansible.plugins.cliconf import CliconfBase
class Cliconf(CliconfBase):
def get_device_info(self):
device_info = {}
device_info['network_os'] = 'vyos'
reply = self.get('show version')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'Version:\s*(.*)', data)
if match:
device_info['network_os_version'] = match.group(1)
match = re.search(r'HW model:\s*(\S+)', data)
if match:
device_info['network_os_model'] = match.group(1)
reply = self.get('show host name')
device_info['network_os_hostname'] = to_text(reply, errors='surrogate_or_strict').strip()
return device_info
def get_config(self, flags=None, format=None):
if format:
option_values = self.get_option_values()
if format not in option_values['format']:
raise ValueError("'format' value %s is invalid. Valid values of format are %s" % (format, ', '.join(option_values['format'])))
if not flags:
flags = []
if format == 'text':
command = 'show configuration'
else:
command = 'show configuration commands'
command += ' '.join(to_list(flags))
command = command.strip()
out = self.send_command(command)
return out
def edit_config(self, candidate=None, commit=True, replace=None, comment=None):
resp = {}
operations = self.get_device_operations()
self.check_edit_config_capability(operations, candidate, commit, replace, comment)
results = []
requests = []
self.send_command('configure')
for cmd in to_list(candidate):
if not isinstance(cmd, Mapping):
cmd = {'command': cmd}
results.append(self.send_command(**cmd))
requests.append(cmd['command'])
out = self.get('compare')
out = to_text(out, errors='surrogate_or_strict')
diff_config = out if not out.startswith('No changes') else None
if diff_config:
if commit:
try:
self.commit(comment)
except AnsibleConnectionFailure as e:
msg = 'commit failed: %s' % e.message
self.discard_changes()
raise AnsibleConnectionFailure(msg)
else:
self.send_command('exit')
else:
self.discard_changes()
else:
self.send_command('exit')
if to_text(self._connection.get_prompt(), errors='surrogate_or_strict').strip().endswith('#'):
self.discard_changes()
if diff_config:
resp['diff'] = diff_config
resp['response'] = results
resp['request'] = requests
return resp
def get(self, command=None, prompt=None, answer=None, sendonly=False, output=None, newline=True, check_all=False):
if not command:
raise ValueError('must provide value of command to execute')
if output:
raise ValueError("'output' value %s is not supported for get" % output)
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
def commit(self, comment=None):
if comment:
command = 'commit comment "{0}"'.format(comment)
else:
command = 'commit'
self.send_command(command)
def discard_changes(self):
self.send_command('exit discard')
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace=None):
diff = {}
device_operations = self.get_device_operations()
option_values = self.get_option_values()
if candidate is None and device_operations['supports_generate_diff']:
raise ValueError("candidate configuration is required to generate diff")
if diff_match not in option_values['diff_match']:
raise ValueError("'match' value %s in invalid, valid values are %s" % (diff_match, ', '.join(option_values['diff_match'])))
if diff_replace:
raise ValueError("'replace' in diff is not supported")
if diff_ignore_lines:
raise ValueError("'diff_ignore_lines' in diff is not supported")
if path:
raise ValueError("'path' in diff is not supported")
set_format = candidate.startswith('set') or candidate.startswith('delete')
candidate_obj = NetworkConfig(indent=4, contents=candidate)
if not set_format:
config = [c.line for c in candidate_obj.items]
commands = list()
# this filters out less specific lines
for item in config:
for index, entry in enumerate(commands):
if item.startswith(entry):
del commands[index]
break
commands.append(item)
candidate_commands = ['set %s' % cmd.replace(' {', '') for cmd in commands]
else:
candidate_commands = str(candidate).strip().split('\n')
if diff_match == 'none':
diff['config_diff'] = list(candidate_commands)
return diff
running_commands = [str(c).replace("'", '') for c in running.splitlines()]
updates = list()
visited = set()
for line in candidate_commands:
item = str(line).replace("'", '')
if not item.startswith('set') and not item.startswith('delete'):
raise ValueError('line must start with either `set` or `delete`')
elif item.startswith('set') and item not in running_commands:
updates.append(line)
elif item.startswith('delete'):
if not running_commands:
updates.append(line)
else:
item = re.sub(r'delete', 'set', item)
for entry in running_commands:
if entry.startswith(item) and line not in visited:
updates.append(line)
visited.add(line)
diff['config_diff'] = list(updates)
return diff
def run_commands(self, commands=None, check_rc=True):
if commands is None:
raise ValueError("'commands' value is required")
responses = list()
for cmd in to_list(commands):
if not isinstance(cmd, Mapping):
cmd = {'command': cmd}
output = cmd.pop('output', None)
if output:
raise ValueError("'output' value %s is not supported for run_commands" % output)
try:
out = self.send_command(**cmd)
except AnsibleConnectionFailure as e:
if check_rc:
raise
out = getattr(e, 'err', e)
responses.append(out)
return responses
def get_device_operations(self):
return {
'supports_diff_replace': False,
'supports_commit': True,
'supports_rollback': False,
'supports_defaults': False,
'supports_onbox_diff': True,
'supports_commit_comment': True,
'supports_multiline_delimiter': False,
'supports_diff_match': True,
'supports_diff_ignore_lines': False,
'supports_generate_diff': False,
'supports_replace': False
}
def get_option_values(self):
return {
'format': ['text', 'set'],
'diff_match': ['line', 'none'],
'diff_replace': [],
'output': []
}
def get_capabilities(self):
result = super(Cliconf, self).get_capabilities()
result['rpc'] += ['commit', 'discard_changes', 'get_diff', 'run_commands']
result['device_operations'] = self.get_device_operations()
result.update(self.get_option_values())
return json.dumps(result)
def set_cli_prompt_context(self):
"""
Make sure we are in the operational cli mode
:return: None
"""
if self._connection.connected:
self._update_cli_prompt_context(config_context='#', exit_command='exit discard')
|
mcepl/youtube-dl | refs/heads/master | youtube_dl/extractor/amp.py | 27 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
mimetype2ext,
determine_ext,
)
class AMPIE(InfoExtractor):
# parse Akamai Adaptive Media Player feed
def _extract_feed_info(self, url):
item = self._download_json(
url, None, 'Downloading Akamai AMP feed',
'Unable to download Akamai AMP feed')['channel']['item']
video_id = item['guid']
def get_media_node(name, default=None):
media_name = 'media-%s' % name
media_group = item.get('media-group') or item
return media_group.get(media_name) or item.get(media_name) or item.get(name, default)
thumbnails = []
media_thumbnail = get_media_node('thumbnail')
if media_thumbnail:
if isinstance(media_thumbnail, dict):
media_thumbnail = [media_thumbnail]
for thumbnail_data in media_thumbnail:
thumbnail = thumbnail_data['@attributes']
thumbnails.append({
'url': self._proto_relative_url(thumbnail['url'], 'http:'),
'width': int_or_none(thumbnail.get('width')),
'height': int_or_none(thumbnail.get('height')),
})
subtitles = {}
media_subtitle = get_media_node('subTitle')
if media_subtitle:
if isinstance(media_subtitle, dict):
media_subtitle = [media_subtitle]
for subtitle_data in media_subtitle:
subtitle = subtitle_data['@attributes']
lang = subtitle.get('lang') or 'en'
subtitles[lang] = [{'url': subtitle['href']}]
formats = []
media_content = get_media_node('content')
if isinstance(media_content, dict):
media_content = [media_content]
for media_data in media_content:
media = media_data.get('@attributes', {})
media_url = media.get('url')
if not media_url:
continue
ext = mimetype2ext(media.get('type')) or determine_ext(media_url)
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
media_url + '?hdcore=3.4.0&plugin=aasp-3.4.0.132.124',
video_id, f4m_id='hds', fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
media_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
else:
formats.append({
'format_id': media_data.get('media-category', {}).get('@attributes', {}).get('label'),
'url': media['url'],
'tbr': int_or_none(media.get('bitrate')),
'filesize': int_or_none(media.get('fileSize')),
'ext': ext,
})
self._sort_formats(formats)
timestamp = parse_iso8601(item.get('pubDate'), ' ') or parse_iso8601(item.get('dc-date'))
return {
'id': video_id,
'title': get_media_node('title'),
'description': get_media_node('description'),
'thumbnails': thumbnails,
'timestamp': timestamp,
'duration': int_or_none(media_content[0].get('@attributes', {}).get('duration')),
'subtitles': subtitles,
'formats': formats,
}
|
CIBC-Internal/itk | refs/heads/master | Modules/ThirdParty/pygccxml/src/pygccxml/declarations/dependencies.py | 2 | # Copyright 2014-2015 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
"""
this module contains class that keeps dependency information of some
declaration
"""
from . import cpptypes
class impl_details:
@staticmethod
def dig_declarations(depend_on_it):
# prevent recursive import
from pygccxml import declarations
if isinstance(depend_on_it, declarations.declaration_t):
return [depend_on_it]
base_type = declarations.base_type(
declarations.remove_alias(depend_on_it))
if isinstance(base_type, cpptypes.declarated_t):
return [base_type.declaration]
elif isinstance(base_type, cpptypes.calldef_type_t):
result = []
result.extend(impl_details.dig_declarations(base_type.return_type))
for argtype in base_type.arguments_types:
result.extend(impl_details.dig_declarations(argtype))
if isinstance(base_type, cpptypes.member_function_type_t):
result.extend(
impl_details.dig_declarations(
base_type.class_inst))
return result
return []
class dependency_info_t(object):
def __init__(self, declaration, depend_on_it, access_type=None, hint=None):
object.__init__(self)
# prevent recursive import
from . import class_declaration
assert isinstance(
depend_on_it,
(class_declaration.class_t,
cpptypes.type_t))
self._declaration = declaration
self._depend_on_it = depend_on_it
self._access_type = access_type
self._hint = hint
@property
def declaration(self):
return self._declaration
# short name
decl = declaration
@property
def depend_on_it(self):
return self._depend_on_it
@property
def access_type(self):
return self._access_type
@access_type.setter
def access_type(self, access_type):
self._access_type = access_type
def __str__(self):
return 'declaration "%s" depends( %s ) on "%s" ' \
% (self.declaration, self.access_type, self.depend_on_it)
@property
def hint(self):
"""The declaration, that report dependency can put some additional
inforamtion about dependency. It can be used later"""
return self._hint
def find_out_depend_on_it_declarations(self):
"""If declaration depends on other declaration and not on some type
this function will return reference to it. Otherwise None will be
returned
"""
return impl_details.dig_declarations(self.depend_on_it)
@staticmethod
def i_depend_on_them(decl):
"""Returns set of declarations. every item in the returned set,
depends on a declaration from the input"""
from . import class_declaration # prevent cyclic imports
to_be_included = set()
for dependency_info in decl.i_depend_on_them():
for ddecl in dependency_info.find_out_depend_on_it_declarations():
if ddecl:
to_be_included.add(ddecl)
if isinstance(decl.parent, class_declaration.class_t):
to_be_included.add(decl.parent)
return to_be_included
@staticmethod
def we_depend_on_them(decls):
"""Returns set of declarations. every item in the returned set,
depends on a declaration from the input"""
to_be_included = set()
for decl in decls:
to_be_included.update(dependency_info_t.i_depend_on_them(decl))
return to_be_included
|
chaowyc/youtube-dl | refs/heads/master | youtube_dl/extractor/tubitv.py | 48 | # coding: utf-8
from __future__ import unicode_literals
import codecs
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request
)
from ..utils import (
ExtractorError,
int_or_none,
)
class TubiTvIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tubitv\.com/video\?id=(?P<id>[0-9]+)'
_LOGIN_URL = 'http://tubitv.com/login'
_NETRC_MACHINE = 'tubitv'
_TEST = {
'url': 'http://tubitv.com/video?id=54411&title=The_Kitchen_Musical_-_EP01',
'info_dict': {
'id': '54411',
'ext': 'mp4',
'title': 'The Kitchen Musical - EP01',
'thumbnail': 're:^https?://.*\.png$',
'description': 'md5:37532716166069b353e8866e71fefae7',
'duration': 2407,
},
'params': {
'skip_download': 'HLS download',
},
}
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
self.report_login()
form_data = {
'username': username,
'password': password,
}
payload = compat_urllib_parse.urlencode(form_data).encode('utf-8')
request = compat_urllib_request.Request(self._LOGIN_URL, payload)
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
login_page = self._download_webpage(
request, None, False, 'Wrong login info')
if not re.search(r'id="tubi-logout"', login_page):
raise ExtractorError(
'Login failed (invalid username/password)', expected=True)
def _real_initialize(self):
self._login()
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if re.search(r"<(?:DIV|div) class='login-required-screen'>", webpage):
raise ExtractorError(
'This video requires login, use --username and --password '
'options to provide account credentials.', expected=True)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
duration = int_or_none(self._html_search_meta(
'video:duration', webpage, 'duration'))
apu = self._search_regex(r"apu='([^']+)'", webpage, 'apu')
m3u8_url = codecs.decode(apu, 'rot_13')[::-1]
formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4')
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'description': description,
'duration': duration,
}
|
rjschof/gem5 | refs/heads/master | src/mem/ruby/system/VIPERCoalescer.py | 21 | # Copyright (c) 2015 Advanced Micro Devices, Inc.
# All rights reserved.
#
# For use for simulation and test purposes only
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Brad Beckmann
from m5.params import *
from m5.proxy import *
from GPUCoalescer import *
class VIPERCoalescer(RubyGPUCoalescer):
type = 'VIPERCoalescer'
cxx_class = 'VIPERCoalescer'
cxx_header = "mem/ruby/system/VIPERCoalescer.hh"
max_inv_per_cycle = Param.Int(32, "max invalidations per cycle")
max_wb_per_cycle = Param.Int(32, "max writebacks per cycle")
assume_rfo = False
|
Emergya/icm-openedx-educamadrid-platform-basic | refs/heads/ci | lms/djangoapps/mobile_api/social_facebook/courses/views.py | 42 | """
Views for courses info API
"""
from rest_framework import generics, status
from rest_framework.response import Response
from courseware.access import is_mobile_available_for_user
from student.models import CourseEnrollment
from lms.djangoapps.mobile_api.social_facebook.courses import serializers
from ...users.serializers import CourseEnrollmentSerializer
from ...utils import mobile_view
from ..utils import get_friends_from_facebook, get_linked_edx_accounts, share_with_facebook_friends
@mobile_view()
class CoursesWithFriends(generics.ListAPIView):
"""
**Use Case**
API endpoint for retrieving all the courses that a user's friends are in.
Note that only friends that allow their courses to be shared will be included.
**Example request**
GET /api/mobile/v0.5/social/facebook/courses/friends
**Response Values**
See UserCourseEnrollmentsList in lms/djangoapps/mobile_api/users for the structure of the response values.
"""
serializer_class = serializers.CoursesWithFriendsSerializer
def list(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.GET)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Get friends from Facebook
result = get_friends_from_facebook(serializer)
if not isinstance(result, list):
return result
friends_that_are_edx_users = get_linked_edx_accounts(result)
# Filter by sharing preferences
users_with_sharing = [
friend for friend in friends_that_are_edx_users if share_with_facebook_friends(friend)
]
# Get unique enrollments
enrollments = []
for friend in users_with_sharing:
query_set = CourseEnrollment.objects.filter(
user_id=friend['edX_id']
).exclude(course_id__in=[enrollment.course_id for enrollment in enrollments])
enrollments.extend(query_set)
# Get course objects
courses = [
enrollment for enrollment in enrollments if enrollment.course
and is_mobile_available_for_user(self.request.user, enrollment.course)
]
serializer = CourseEnrollmentSerializer(courses, context={'request': request}, many=True)
return Response(serializer.data)
|
blmoore/blogR | refs/heads/master | blogpy/imdb_getActors.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import imdb
import sys, time
from itertools import chain
def getActor(name, count):
"""
For a given actor, get their film list from IMDb,
seek budget, gross, release date, then double-check
they are *in* the film, THEN print to stdout.
"""
print >>sys.stderr, "(%d) Trying %s ..." % (count, actor)
a = i.get_person(poi[actor])
_in = 0
_out = 0
movies = [movie['title'] for movie in a['actor']]
# Get the first two hits for movie title. i.e. Arnie
# starred in some cheap TV movie "Lincoln" a while back,
# not in the 2012 Lincoln which is the first hit. Try
# first couple of hits, check at the end if they contain
# the actor you're looking at.
m_obj = [i.search_movie(m)[0:2] for m in movies]
m_obj = list(chain(*m_obj))
for film in m_obj:
i.update(film, ['business', 'release dates'])
try:
budget = ";".join(film['business']['budget'])
except KeyError:
budget = "U"
try:
# This field is awful, split by country and into
# random time periods. Need "worldwide" + date for
# inflation adjustment. Just grab all and clean it
# up later, I guess.
gross = ";;".join(film['business']['gross'])
except KeyError:
gross = "U"
try:
releaseDate = film['release dates'][0].encode("utf-8")
except KeyError:
releaseDate = "U"
# Check for "kind" == film not TV series etc.
# Double-check actos is in film
if film in a:
print actor.encode('utf-8') + "\t" + \
film['smart long imdb canonical title'].encode("utf-8") + \
"\t" + budget.encode('utf-8') + "\t" + gross.encode("utf-8") + \
"\t" + releaseDate
print >>sys.stderr, "\t%s IS in %s" % \
(a['name'], film['smart long imdb canonical title'])
_in += 1
else:
print >>sys.stderr, "\t\t %s not in %s" % \
(a['name'], film['smart long imdb canonical title'])
_out += 1
print >>sys.stderr, "\t... done! (in: %d; not in: %d)" %(_in, _out)
sys.stderr.flush()
if __name__ == "__main__":
i = imdb.IMDb('http')
# Better way, get all, sum action film gross?
# Need to focus on those who almost exclusively
# star in action films, e.g. Jolie, Will Smith
# probably not. Tom Cruise? Idk.
# Not enough data for Bruce Lee
# Actor IMDb ID
poi = {"Bruce Willis": "0000246",
"Arnold Schwarzenegger": "0000216",
"Sylvester Stallone": "0000230",
"Jackie Chan": "0000329",
"Dolph Lundgren": "0000185",
"Chuck Norris": "0001569",
"Steven Seagal": "0000219",
"Jet Li": "0001472",
"Dwayne Johnson": "0425005",
"Vin Diesel": "0004874",
"John Wayne": "0000078",
"Jason Statham": "0005458",
"Steve McQueen": "0000537",
"Clint Eastwood": "0000142",
"Charles Bronson": "0000314"
}
alist = sorted(poi.keys())
# ## n.b. could do this properly w/ argparse or w/e
try:
skip = int(sys.argv[1])
except IndexError:
skip = 0
# You *will* get various errors from time to time, best
# approach is to pipe stdout to seperate files and cat them
# together at the end. Indices are printed with each list
# member to allow restarts -- pass number of next
c = 0 + skip
for actor in alist[skip:]:
getActor(actor, c)
c += 1
time.sleep(5)
|
snasoft/QtCreatorPluginsPack | refs/heads/master | Bin/3rdParty/vera/bin/lib/test/test_cl.py | 93 | #! /usr/bin/env python
"""Whimpy test script for the cl module
Roger E. Masse
"""
from test.test_support import verbose, import_module
cl = import_module('cl')
clattrs = ['ADDED_ALGORITHM_ERROR', 'ALAW', 'ALGORITHM_ID',
'ALGORITHM_VERSION', 'AUDIO', 'AWARE_ERROR', 'AWARE_MPEG_AUDIO',
'AWARE_MULTIRATE', 'AWCMP_CONST_QUAL', 'AWCMP_FIXED_RATE',
'AWCMP_INDEPENDENT', 'AWCMP_JOINT_STEREO', 'AWCMP_LOSSLESS',
'AWCMP_MPEG_LAYER_I', 'AWCMP_MPEG_LAYER_II', 'AWCMP_STEREO',
'Algorithm', 'AlgorithmNumber', 'AlgorithmType', 'AudioFormatName',
'BAD_ALGORITHM_NAME', 'BAD_ALGORITHM_TYPE', 'BAD_BLOCK_SIZE',
'BAD_BOARD', 'BAD_BUFFERING', 'BAD_BUFFERLENGTH_NEG',
'BAD_BUFFERLENGTH_ODD', 'BAD_BUFFER_EXISTS', 'BAD_BUFFER_HANDLE',
'BAD_BUFFER_POINTER', 'BAD_BUFFER_QUERY_SIZE', 'BAD_BUFFER_SIZE',
'BAD_BUFFER_SIZE_POINTER', 'BAD_BUFFER_TYPE',
'BAD_COMPRESSION_SCHEME', 'BAD_COMPRESSOR_HANDLE',
'BAD_COMPRESSOR_HANDLE_POINTER', 'BAD_FRAME_SIZE',
'BAD_FUNCTIONALITY', 'BAD_FUNCTION_POINTER', 'BAD_HEADER_SIZE',
'BAD_INITIAL_VALUE', 'BAD_INTERNAL_FORMAT', 'BAD_LICENSE',
'BAD_MIN_GT_MAX', 'BAD_NO_BUFFERSPACE', 'BAD_NUMBER_OF_BLOCKS',
'BAD_PARAM', 'BAD_PARAM_ID_POINTER', 'BAD_PARAM_TYPE', 'BAD_POINTER',
'BAD_PVBUFFER', 'BAD_SCHEME_POINTER', 'BAD_STREAM_HEADER',
'BAD_STRING_POINTER', 'BAD_TEXT_STRING_PTR', 'BEST_FIT',
'BIDIRECTIONAL', 'BITRATE_POLICY', 'BITRATE_TARGET',
'BITS_PER_COMPONENT', 'BLENDING', 'BLOCK_SIZE', 'BOTTOM_UP',
'BUFFER_NOT_CREATED', 'BUF_DATA', 'BUF_FRAME', 'BytesPerPixel',
'BytesPerSample', 'CHANNEL_POLICY', 'CHROMA_THRESHOLD', 'CODEC',
'COMPONENTS', 'COMPRESSED_BUFFER_SIZE', 'COMPRESSION_RATIO',
'COMPRESSOR', 'CONTINUOUS_BLOCK', 'CONTINUOUS_NONBLOCK',
'CompressImage', 'DATA', 'DECOMPRESSOR', 'DecompressImage',
'EDGE_THRESHOLD', 'ENABLE_IMAGEINFO', 'END_OF_SEQUENCE', 'ENUM_VALUE',
'EXACT_COMPRESSION_RATIO', 'EXTERNAL_DEVICE', 'FLOATING_ENUM_VALUE',
'FLOATING_RANGE_VALUE', 'FRAME', 'FRAME_BUFFER_SIZE',
'FRAME_BUFFER_SIZE_ZERO', 'FRAME_RATE', 'FRAME_TYPE', 'G711_ALAW',
'G711_ULAW', 'GRAYSCALE', 'GetAlgorithmName', 'HDCC',
'HDCC_SAMPLES_PER_TILE', 'HDCC_TILE_THRESHOLD', 'HEADER_START_CODE',
'IMAGE_HEIGHT', 'IMAGE_WIDTH', 'INTERNAL_FORMAT',
'INTERNAL_IMAGE_HEIGHT', 'INTERNAL_IMAGE_WIDTH', 'INTRA', 'JPEG',
'JPEG_ERROR', 'JPEG_NUM_PARAMS', 'JPEG_QUALITY_FACTOR',
'JPEG_QUANTIZATION_TABLES', 'JPEG_SOFTWARE', 'JPEG_STREAM_HEADERS',
'KEYFRAME', 'LAST_FRAME_INDEX', 'LAYER', 'LUMA_THRESHOLD',
'MAX_NUMBER_OF_AUDIO_ALGORITHMS', 'MAX_NUMBER_OF_ORIGINAL_FORMATS',
'MAX_NUMBER_OF_PARAMS', 'MAX_NUMBER_OF_VIDEO_ALGORITHMS', 'MONO',
'MPEG_VIDEO', 'MVC1', 'MVC2', 'MVC2_BLENDING', 'MVC2_BLENDING_OFF',
'MVC2_BLENDING_ON', 'MVC2_CHROMA_THRESHOLD', 'MVC2_EDGE_THRESHOLD',
'MVC2_ERROR', 'MVC2_LUMA_THRESHOLD', 'NEXT_NOT_AVAILABLE',
'NOISE_MARGIN', 'NONE', 'NUMBER_OF_FRAMES', 'NUMBER_OF_PARAMS',
'ORIENTATION', 'ORIGINAL_FORMAT', 'OpenCompressor',
'OpenDecompressor', 'PARAM_OUT_OF_RANGE', 'PREDICTED', 'PREROLL',
'ParamID', 'ParamNumber', 'ParamType', 'QUALITY_FACTOR',
'QUALITY_LEVEL', 'QueryAlgorithms', 'QueryMaxHeaderSize',
'QueryScheme', 'QuerySchemeFromName', 'RANGE_VALUE', 'RGB', 'RGB332',
'RGB8', 'RGBA', 'RGBX', 'RLE', 'RLE24', 'RTR', 'RTR1',
'RTR_QUALITY_LEVEL', 'SAMPLES_PER_TILE', 'SCHEME_BUSY',
'SCHEME_NOT_AVAILABLE', 'SPEED', 'STEREO_INTERLEAVED',
'STREAM_HEADERS', 'SetDefault', 'SetMax', 'SetMin', 'TILE_THRESHOLD',
'TOP_DOWN', 'ULAW', 'UNCOMPRESSED', 'UNCOMPRESSED_AUDIO',
'UNCOMPRESSED_VIDEO', 'UNKNOWN_SCHEME', 'VIDEO', 'VideoFormatName',
'Y', 'YCbCr', 'YCbCr422', 'YCbCr422DC', 'YCbCr422HC', 'YUV', 'YUV422',
'YUV422DC', 'YUV422HC', '__doc__', '__name__', 'cvt_type', 'error']
# This is a very inobtrusive test for the existence of the cl
# module and all its attributes.
def test_main():
# touch all the attributes of al without doing anything
if verbose:
print 'Touching cl module attributes...'
for attr in clattrs:
if verbose:
print 'touching: ', attr
getattr(cl, attr)
if __name__ == '__main__':
test_main()
|
ryandougherty/mwa-capstone | refs/heads/heroku | MWA_Tools/build/matplotlib/lib/mpl_examples/pylab_examples/date_index_formatter.py | 3 |
"""
When plotting daily data, a frequent request is to plot the data
ignoring skips, eg no extra spaces for weekends. This is particularly
common in financial time series, when you may have data for M-F and
not Sat, Sun and you don't want gaps in the x axis. The approach is
to simply use the integer index for the xdata and a custom tick
Formatter to get the appropriate date string for a given index.
"""
import numpy
from matplotlib.mlab import csv2rec
from pylab import figure, show
import matplotlib.cbook as cbook
from matplotlib.ticker import Formatter
datafile = cbook.get_sample_data('msft.csv', asfileobj=False)
print 'loading', datafile
r = csv2rec(datafile)[-40:]
class MyFormatter(Formatter):
def __init__(self, dates, fmt='%Y-%m-%d'):
self.dates = dates
self.fmt = fmt
def __call__(self, x, pos=0):
'Return the label for time x at position pos'
ind = int(round(x))
if ind>=len(self.dates) or ind<0: return ''
return self.dates[ind].strftime(self.fmt)
formatter = MyFormatter(r.date)
fig = figure()
ax = fig.add_subplot(111)
ax.xaxis.set_major_formatter(formatter)
ax.plot(numpy.arange(len(r)), r.close, 'o-')
fig.autofmt_xdate()
show()
|
GuessWhoSamFoo/pandas | refs/heads/master | pandas/tests/arrays/categorical/test_warnings.py | 2 | # -*- coding: utf-8 -*-
import pytest
import pandas as pd
import pandas.util.testing as tm
class TestCategoricalWarnings(object):
def test_tab_complete_warning(self, ip):
# https://github.com/pandas-dev/pandas/issues/16409
pytest.importorskip('IPython', minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; c = Categorical([])"
ip.run_code(code)
with tm.assert_produces_warning(None):
with provisionalcompleter('ignore'):
list(ip.Completer.completions('c.', 1))
def test_CategoricalAccessor_categorical_deprecation(object):
with tm.assert_produces_warning(FutureWarning):
pd.Series(['a', 'b'], dtype='category').cat.categorical
def test_CategoricalAccessor_name_deprecation(object):
with tm.assert_produces_warning(FutureWarning):
pd.Series(['a', 'b'], dtype='category').cat.name
def test_CategoricalAccessor_index_deprecation(object):
with tm.assert_produces_warning(FutureWarning):
pd.Series(['a', 'b'], dtype='category').cat.index
|
giganoto/py_gdrive | refs/heads/master | __main__.py | 1 | from oauth2client import client
from oauth2client import tools
from oauth2client import file as g_file
import oauth2client
from apiclient import discovery
import os
import httplib2
from apiclient.http import MediaFileUpload
from mimetypes import types_map as python_dict
import pprint
pp = pprint.PrettyPrinter(indent=4)
#helper code for gdrive library.
try:
mime_dict = python_dict.copy()
#over writing mime type values for proper display at google drive.
gdrive_mimes = {
".doc":"application/vnd.google-apps.document",
"gd_folder":"application/vnd.google-apps.folder",
"fallback":"application/vnd.google-apps.unknown"
}
common_mimes = {
".apk":"application/vnd.android.package-archive",
".ipa":"application/zip"
}
mime_dict.update(gdrive_mimes)
mime_dict.update(common_mimes)
except:
pass
#log.e("error occured in mime dictionaries. | google drive module.")
def get_mime_type(file_ext):
if file_ext == "":
mime = "gd_folder"
else:
if file_ext[0] != ".":
file_ext = "." + file_ext
if file_ext in mime_dict.keys():
mime = file_ext
else:
mime = "fallback"
return mime_dict[mime]
def get_file_details_from_path(file_path):
if not os.path.exists(file_path):
error_msg = """No such file found, won't be able to upload to google drive.
please check the path specified: """ + file_path
#log.e(error_msg)
file_path, file_ext = os.path.splitext(file_path)
return os.path.split(file_path)[1] , file_ext
#core gdrive api
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
SCOPES = "https://www.googleapis.com/auth/drive"
CLIENT_SECRET_FILE = "google_secrets.json"
APPLICATION_NAME = "build system octro"
def get_credentials():
#credential_file
home_dir = os.path.expanduser("~")
credentials_dir = os.path.join(home_dir,".google_credentials")
if not os.path.exists(credentials_dir):
os.mkdir(credentials_dir)
credentials_file = os.path.join(credentials_dir,"credential_file.json")
#store
store = g_file.Storage(credentials_file)
credentials = store.get()
if not credentials or credentials.invalid:
#flow
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE,SCOPES)
flow.user_agent = APPLICATION_NAME
tools.run_flow(flow, store, flags)
return credentials
def gdrive_command(*args):
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build("drive","v2",http=http)
files_object = service.files()
if args[0] == "emptyTrash":
x = files_object.emptyTrash()
elif args[0] == "upload":
if len(args) == 3:
x = files_object.insert(body=args[1], media_body=args[2])
else:
x = files_object.insert(body=args[1])
else:
#log.e("unknown or wrong command given to google drive.")
pass
gd_file = x.execute()
pp.pprint(gd_file)
#wrappers:
def gd_empty_trash():
"""
deletes permanently all files from Trash of google drive.
"""
gdrive_command("emptyTrash")
def gd_upload_file(src, description=None, title=None, parentID=None):
"""
uploads any kind of file/folder on Google Drive.
src: path of source file.
title: name of file on gdrive.
"""
file_name, file_ext = get_file_details_from_path(src)
mime_type = get_mime_type(file_ext)
if title == None:
title = file_name
if description == None:
description = file_name
body = {
'title': title,
'description': description,
'mimeType': mime_type,
'iconLink':"http://uploads.webflow.com/5363239db0358ce4250001ad/536324dc6ccf2f8908000159_octro_logo.png"
}
if mime_type != mime_dict["gd_folder"]:
media_body = MediaFileUpload(src, mimetype=mime_type, resumable=True)
gdrive_command("upload", body, media_body)
else:
gdrive_command("upload", body)
gd_upload_file("./yoyoyo.txt")
|
florianholzapfel/home-assistant | refs/heads/dev | tests/components/media_player/test_cast.py | 22 | """The tests for the Cast Media player platform."""
# pylint: disable=protected-access
import unittest
from unittest.mock import patch
from homeassistant.components.media_player import cast
class FakeChromeCast(object):
"""A fake Chrome Cast."""
def __init__(self, host, port):
"""Initialize the fake Chrome Cast."""
self.host = host
self.port = port
class TestCastMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
@patch('homeassistant.components.media_player.cast.CastDevice')
@patch('pychromecast.get_chromecasts')
def test_filter_duplicates(self, mock_get_chromecasts, mock_device):
"""Test filtering of duplicates."""
mock_get_chromecasts.return_value = [
FakeChromeCast('some_host', cast.DEFAULT_PORT)
]
# Test chromecasts as if they were hardcoded in configuration.yaml
cast.setup_platform(None, {
'host': 'some_host'
}, lambda _: _)
assert mock_device.called
mock_device.reset_mock()
assert not mock_device.called
# Test chromecasts as if they were automatically discovered
cast.setup_platform(None, {}, lambda _: _, ('some_host',
cast.DEFAULT_PORT))
assert not mock_device.called
|
vlachoudis/sl4a | refs/heads/master | python/src/Lib/plat-irix5/GET.py | 132 | # Symbols from <gl/get.h>
from warnings import warnpy3k
warnpy3k("the GET module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
BCKBUFFER = 0x1
FRNTBUFFER = 0x2
DRAWZBUFFER = 0x4
DMRGB = 0
DMSINGLE = 1
DMDOUBLE = 2
DMRGBDOUBLE = 5
HZ30 = 0
HZ60 = 1
NTSC = 2
HDTV = 3
VGA = 4
IRIS3K = 5
PR60 = 6
PAL = 9
HZ30_SG = 11
A343 = 14
STR_RECT = 15
VOF0 = 16
VOF1 = 17
VOF2 = 18
VOF3 = 19
SGI0 = 20
SGI1 = 21
SGI2 = 22
HZ72 = 23
GL_VIDEO_REG = 0x00800000
GLV_GENLOCK = 0x00000001
GLV_UNBLANK = 0x00000002
GLV_SRED = 0x00000004
GLV_SGREEN = 0x00000008
GLV_SBLUE = 0x00000010
GLV_SALPHA = 0x00000020
GLV_TTLGENLOCK = 0x00000080
GLV_TTLSYNC = GLV_TTLGENLOCK
GLV_GREENGENLOCK = 0x0000100
LEFTPLANE = 0x0001
RIGHTPLANE = 0x0002
BOTTOMPLANE = 0x0004
TOPPLANE = 0x0008
NEARPLANE = 0x0010
FARPLANE = 0x0020
## GETDEF = __GL_GET_H__
NOBUFFER = 0x0
BOTHBUFFERS = 0x3
DMINTENSITYSINGLE = 3
DMINTENSITYDOUBLE = 4
MONSPECIAL = 0x20
HZ50 = 3
MONA = 5
MONB = 6
MONC = 7
MOND = 8
MON_ALL = 12
MON_GEN_ALL = 13
CMAPMULTI = 0
CMAPONE = 1
|
AkA84/edx-platform | refs/heads/master | cms/djangoapps/contentstore/management/commands/create_course.py | 163 | """
Django management command to create a course in a specific modulestore
"""
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from xmodule.modulestore import ModuleStoreEnum
from contentstore.views.course import create_new_course_in_store
from contentstore.management.commands.utils import user_from_str
class Command(BaseCommand):
"""
Create a course in a specific modulestore.
"""
# can this query modulestore for the list of write accessible stores or does that violate command pattern?
help = "Create a course in one of {}".format([ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split])
args = "modulestore user org course run"
def parse_args(self, *args):
"""
Return a tuple of passed in values for (modulestore, user, org, course, run).
"""
if len(args) != 5:
raise CommandError(
"create_course requires 5 arguments: "
"a modulestore, user, org, course, run. Modulestore is one of {}".format(
[ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split]
)
)
if args[0] not in [ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split]:
raise CommandError(
"Modulestore (first arg) must be one of {}".format(
[ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split]
)
)
storetype = args[0]
try:
user = user_from_str(args[1])
except User.DoesNotExist:
raise CommandError(
"No user {user} found: expected args are {args}".format(
user=args[1],
args=self.args,
),
)
org = args[2]
course = args[3]
run = args[4]
return storetype, user, org, course, run
def handle(self, *args, **options):
storetype, user, org, course, run = self.parse_args(*args)
new_course = create_new_course_in_store(storetype, user, org, course, run, {})
self.stdout.write(u"Created {}".format(unicode(new_course.id)))
|
AndrewSallans/osf.io | refs/heads/develop | website/addons/dropbox/tests/test_models.py | 1 | # -*- coding: utf-8 -*-
import os
import hashlib
from nose.tools import * # noqa (PEP8 asserts)
from framework.auth import Auth
from website.addons.dropbox.model import (
DropboxUserSettings, DropboxNodeSettings, DropboxFile
)
from tests.base import OsfTestCase
from tests.factories import UserFactory, ProjectFactory
from website.addons.dropbox.tests.utils import MockDropbox
from website.addons.dropbox.tests.factories import (
DropboxUserSettingsFactory, DropboxNodeSettingsFactory,
DropboxFileFactory
)
from website.util import web_url_for
class TestUserSettingsModel(OsfTestCase):
def setUp(self):
super(TestUserSettingsModel, self).setUp()
self.user = UserFactory()
def test_fields(self):
user_settings = DropboxUserSettings(
access_token='12345',
dropbox_id='abc',
owner=self.user)
user_settings.save()
retrieved = DropboxUserSettings.load(user_settings._primary_key)
assert_true(retrieved.access_token)
assert_true(retrieved.dropbox_id)
assert_true(retrieved.owner)
def test_has_auth(self):
user_settings = DropboxUserSettingsFactory(access_token=None)
assert_false(user_settings.has_auth)
user_settings.access_token = '12345'
user_settings.save()
assert_true(user_settings.has_auth)
def test_clear_clears_associated_node_settings(self):
node_settings = DropboxNodeSettingsFactory.build()
user_settings = DropboxUserSettingsFactory()
node_settings.user_settings = user_settings
node_settings.save()
user_settings.clear()
user_settings.save()
# Node settings no longer associated with user settings
assert_is(node_settings.user_settings, None)
assert_is(node_settings.folder, None)
def test_clear(self):
node_settings = DropboxNodeSettingsFactory.build()
user_settings = DropboxUserSettingsFactory(access_token='abcde',
dropbox_id='abc')
node_settings.user_settings = user_settings
node_settings.save()
assert_true(user_settings.access_token)
user_settings.clear()
user_settings.save()
assert_false(user_settings.access_token)
assert_false(user_settings.dropbox_id)
def test_delete(self):
user_settings = DropboxUserSettingsFactory()
assert_true(user_settings.has_auth)
user_settings.delete()
user_settings.save()
assert_false(user_settings.access_token)
assert_false(user_settings.dropbox_id)
assert_true(user_settings.deleted)
def test_delete_clears_associated_node_settings(self):
node_settings = DropboxNodeSettingsFactory.build()
user_settings = DropboxUserSettingsFactory()
node_settings.user_settings = user_settings
node_settings.save()
user_settings.delete()
user_settings.save()
# Node settings no longer associated with user settings
assert_is(node_settings.user_settings, None)
assert_is(node_settings.folder, None)
assert_false(node_settings.deleted)
def test_to_json(self):
user_settings = DropboxUserSettingsFactory()
result = user_settings.to_json()
assert_equal(result['has_auth'], user_settings.has_auth)
class TestDropboxNodeSettingsModel(OsfTestCase):
def setUp(self):
super(TestDropboxNodeSettingsModel, self).setUp()
self.user = UserFactory()
self.user.add_addon('dropbox')
self.user.save()
self.user_settings = self.user.get_addon('dropbox')
self.project = ProjectFactory()
self.node_settings = DropboxNodeSettingsFactory(
user_settings=self.user_settings,
owner=self.project
)
def test_fields(self):
node_settings = DropboxNodeSettings(user_settings=self.user_settings)
node_settings.save()
assert_true(node_settings.user_settings)
assert_equal(node_settings.user_settings.owner, self.user)
assert_true(hasattr(node_settings, 'folder'))
assert_true(hasattr(node_settings, 'registration_data'))
def test_folder_defaults_to_none(self):
node_settings = DropboxNodeSettings(user_settings=self.user_settings)
node_settings.save()
assert_is_none(node_settings.folder)
def test_has_auth(self):
settings = DropboxNodeSettings(user_settings=self.user_settings)
settings.save()
assert_false(settings.has_auth)
settings.user_settings.access_token = '123abc'
settings.user_settings.save()
assert_true(settings.has_auth)
def test_to_json(self):
settings = self.node_settings
user = UserFactory()
result = settings.to_json(user)
assert_equal(result['addon_short_name'], 'dropbox')
def test_delete(self):
assert_true(self.node_settings.user_settings)
assert_true(self.node_settings.folder)
old_logs = self.project.logs
self.node_settings.delete()
self.node_settings.save()
assert_is(self.node_settings.user_settings, None)
assert_is(self.node_settings.folder, None)
assert_true(self.node_settings.deleted)
assert_equal(self.project.logs, old_logs)
def test_deauthorize(self):
assert_true(self.node_settings.user_settings)
assert_true(self.node_settings.folder)
self.node_settings.deauthorize(auth=Auth(self.user))
self.node_settings.save()
assert_is(self.node_settings.user_settings, None)
assert_is(self.node_settings.folder, None)
last_log = self.project.logs[-1]
assert_equal(last_log.action, 'dropbox_node_deauthorized')
params = last_log.params
assert_in('node', params)
assert_in('project', params)
assert_in('folder', params)
def test_set_folder(self):
folder_name = 'queen/freddie'
self.node_settings.set_folder(folder_name, auth=Auth(self.user))
self.node_settings.save()
# Folder was set
assert_equal(self.node_settings.folder, folder_name)
# Log was saved
last_log = self.project.logs[-1]
assert_equal(last_log.action, 'dropbox_folder_selected')
def test_set_user_auth(self):
node_settings = DropboxNodeSettingsFactory()
user_settings = DropboxUserSettingsFactory()
node_settings.set_user_auth(user_settings)
node_settings.save()
assert_true(node_settings.has_auth)
assert_equal(node_settings.user_settings, user_settings)
# A log was saved
last_log = node_settings.owner.logs[-1]
assert_equal(last_log.action, 'dropbox_node_authorized')
log_params = last_log.params
assert_equal(log_params['folder'], node_settings.folder)
assert_equal(log_params['node'], node_settings.owner._primary_key)
assert_equal(last_log.user, user_settings.owner)
class TestNodeSettingsCallbacks(OsfTestCase):
def setUp(self):
super(TestNodeSettingsCallbacks, self).setUp()
# Create node settings with auth
self.user_settings = DropboxUserSettingsFactory(access_token='123abc')
self.node_settings = DropboxNodeSettingsFactory(
user_settings=self.user_settings,
folder='',
)
self.project = self.node_settings.owner
self.user = self.user_settings.owner
def test_after_fork_by_authorized_dropbox_user(self):
fork = ProjectFactory()
clone, message = self.node_settings.after_fork(
node=self.project, fork=fork, user=self.user_settings.owner
)
assert_equal(clone.user_settings, self.user_settings)
def test_after_fork_by_unauthorized_dropbox_user(self):
fork = ProjectFactory()
user = UserFactory()
clone, message = self.node_settings.after_fork(
node=self.project, fork=fork, user=user,
save=True
)
# need request context for url_for
assert_is(clone.user_settings, None)
def test_before_fork(self):
node = ProjectFactory()
message = self.node_settings.before_fork(node, self.user)
assert_true(message)
def test_before_remove_contributor_message(self):
message = self.node_settings.before_remove_contributor(
self.project, self.user)
assert_true(message)
assert_in(self.user.fullname, message)
assert_in(self.project.project_or_component, message)
def test_after_remove_authorized_dropbox_user(self):
message = self.node_settings.after_remove_contributor(
self.project, self.user_settings.owner)
self.node_settings.save()
assert_is_none(self.node_settings.user_settings)
assert_true(message)
def test_after_delete(self):
self.project.remove_node(Auth(user=self.project.creator))
# Ensure that changes to node settings have been saved
self.node_settings.reload()
assert_true(self.node_settings.user_settings is None)
assert_true(self.node_settings.folder is None)
class TestDropboxGuidFile(OsfTestCase):
def test_verbose_url(self):
project = ProjectFactory()
file_obj = DropboxFile(node=project, path='foo.txt')
file_obj.save()
file_url = file_obj.url(guid=False)
url = web_url_for('dropbox_view_file',
pid=project._primary_key, path=file_obj.path, rev='')
assert_equal(url, file_url)
def test_guid_url(self):
file_obj = DropboxFileFactory()
result = file_obj.url(guid=True, rev='123')
assert_equal(result, '/{guid}/?rev=123'.format(guid=file_obj._primary_key))
def test_cache_file_name(self):
project = ProjectFactory()
path = 'My Project/foo.txt'
file_obj = DropboxFile(node=project, path=path)
mock_client = MockDropbox()
file_obj.update_metadata(client=mock_client)
file_obj.save()
result = file_obj.get_cache_filename(client=mock_client)
assert_equal(
result,
'{0}_{1}.html'.format(
hashlib.md5(file_obj.path).hexdigest(),
file_obj.metadata['rev'],
)
)
def test_cache_file_name_encode(self):
project = ProjectFactory()
path = 'à/ é éà'
file_obj = DropboxFile(node=project, path=path)
mock_client = MockDropbox()
file_obj.update_metadata(client=mock_client)
file_obj.save()
result = file_obj.get_cache_filename(client=mock_client)
assert_equal(
result,
'{0}_{1}.html'.format(
hashlib.md5(path).hexdigest(),
file_obj.metadata['rev'],
)
)
def test_download_url(self):
file_obj = DropboxFileFactory()
dl_url = file_obj.download_url(guid=False)
expected = file_obj.node.web_url_for('dropbox_download', path=file_obj.path,
rev='', _absolute=True)
assert_equal(dl_url, expected)
def test_download_url_guid(self):
file_obj = DropboxFileFactory()
dl_url = file_obj.download_url(guid=True, rev='123')
expected = os.path.join('/', file_obj._primary_key, 'download/') + "?rev=123"
assert_equal(dl_url, expected)
def test_update_metadata(self):
client = MockDropbox()
file_obj = DropboxFileFactory(metadata=None)
file_obj.update_metadata(client=client)
file_obj.save()
assert_equal(file_obj.metadata, client.metadata(file_obj.path, list=False))
|
risicle/django | refs/heads/master | tests/field_subclassing/fields.py | 170 | from __future__ import unicode_literals
import json
import warnings
from django.db import models
from django.utils import six
from django.utils.deconstruct import deconstructible
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text, python_2_unicode_compatible
# Catch warning about subfieldbase -- remove in Django 1.10
warnings.filterwarnings(
'ignore',
'SubfieldBase has been deprecated. Use Field.from_db_value instead.',
RemovedInDjango110Warning
)
@deconstructible
@python_2_unicode_compatible
class Small(object):
"""
A simple class to show that non-trivial Python objects can be used as
attributes.
"""
def __init__(self, first, second):
self.first, self.second = first, second
def __str__(self):
return '%s%s' % (force_text(self.first), force_text(self.second))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.first == other.first and self.second == other.second
return False
class SmallField(six.with_metaclass(models.SubfieldBase, models.Field)):
"""
Turns the "Small" class into a Django field. Because of the similarities
with normal character fields and the fact that Small.__unicode__ does
something sensible, we don't need to implement a lot here.
"""
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 2
super(SmallField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'CharField'
def to_python(self, value):
if isinstance(value, Small):
return value
return Small(value[0], value[1])
def get_db_prep_save(self, value, connection):
return six.text_type(value)
def get_prep_lookup(self, lookup_type, value):
if lookup_type == 'exact':
return force_text(value)
if lookup_type == 'in':
return [force_text(v) for v in value]
if lookup_type == 'isnull':
return []
raise TypeError('Invalid lookup type: %r' % lookup_type)
class SmallerField(SmallField):
pass
class JSONField(six.with_metaclass(models.SubfieldBase, models.TextField)):
description = ("JSONField automatically serializes and deserializes values to "
"and from JSON.")
def to_python(self, value):
if not value:
return None
if isinstance(value, six.string_types):
value = json.loads(value)
return value
def get_db_prep_save(self, value, connection):
if value is None:
return None
return json.dumps(value)
class CustomTypedField(models.TextField):
def db_type(self, connection):
return 'custom_field'
|
Gustry/QGIS | refs/heads/master | tests/src/python/test_qgsrastertransparencywidget.py | 32 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsRasterRange.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '07/06/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
import qgis # NOQA switch sip api
import pathlib
from qgis.gui import QgsRasterTransparencyWidget, QgsMapCanvas
from qgis.core import QgsRasterLayer, QgsRasterRange
from qgis.testing import TestCase, unittest
from qgis.testing.mocked import get_iface
from utilities import unitTestDataPath
class TestQgsRasterTransparencyWidget(TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.iface = get_iface()
@staticmethod
def no_data_values(layer: QgsRasterLayer):
return [n.min() for n in layer.dataProvider().userNoDataValues(1)]
def test_transparency_widget(self):
path = pathlib.Path(unitTestDataPath()) / 'landsat_4326.tif'
self.assertTrue(path.is_file())
layer = QgsRasterLayer(path.as_posix())
self.assertTrue(layer.isValid())
canvas = QgsMapCanvas()
canvas.setLayers([layer])
no_data_value = -99
nd_ref = [no_data_value]
layer.dataProvider().setUserNoDataValue(1, [QgsRasterRange(no_data_value, no_data_value)])
nd0 = self.no_data_values(layer)
self.assertListEqual(nd0, nd_ref)
w = QgsRasterTransparencyWidget(layer, canvas)
self.assertIsInstance(w, QgsRasterTransparencyWidget)
nd1 = self.no_data_values(layer)
self.assertListEqual(nd1, nd_ref, msg='Widget initialization should not change the "no data value"')
w.syncToLayer()
nd2 = self.no_data_values(layer)
self.assertListEqual(nd2, nd_ref, msg='syncToLayer changed the "no data value"')
w.syncToLayer()
nd3 = self.no_data_values(layer)
self.assertListEqual(nd3, nd_ref, msg='repeated syncToLayer changed the "no data value"')
w.apply()
nd4 = self.no_data_values(layer)
self.assertListEqual(nd4, nd_ref, msg='apply changed the "no data value" but should not')
w.apply()
nd5 = self.no_data_values(layer)
self.assertListEqual(nd5, nd_ref, msg='repeated apply changed the "no data value" but should not')
if __name__ == '__main__':
unittest.main()
|
Hyperyon/p3-labyrinthe | refs/heads/master | setup.py | 1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import labyrinth
with open('requirements.txt') as f:
requires = f.read().split('\n')
setup(
name='nz-labyrinth',
version=3.6,
packages=find_packages(),
install_requires=requires,
author='Nico Zhan',
author_email='nicozhan@hyperloop.fr',
description='Help Mc Gyver to leave the maze',
long_description=open('README.md').read(),
# include file from manifest.in
include_package_data=True,
url='https://github.com/Hyperyon/p3-labyrinthe',
classifiers=[
'Programming Language :: Python',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
],
) |
SymbiFlow/python-fpga-interchange | refs/heads/master | fpga_interchange/field_cache.py | 1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
""" Provides a FieldCache which caches data about a capnp struct field.
Because capnp structs are constant, and parsing the dynamic capnp data is
a non-trival operation, the FieldCache converts commonly accessed fields into
namedtuple's to avoid overhead.
"""
from collections import namedtuple
# List of scalar capnp types.
SCALAR_TYPES = [
'bool',
'int8',
'int16',
'int32',
'int64',
'uint8',
'uint16',
'uint32',
'uint64',
'float32',
'float64',
'text',
]
FieldProtoData = namedtuple(
'FieldProtoData',
'field_proto ref_annotation imp_annotation hide_field field_type field_which list_which schema_node_id'
)
ReferenceAnnotation = namedtuple('ReferenceAnnotation', 'type field depth')
def make_reference_annotation(annotation_value):
""" Convert a reference annotation capnp message to a ReferenceAnnotation.
"""
type = annotation_value.type
field = annotation_value.field
depth = None
if type == 'parent':
depth = annotation_value.depth
return ReferenceAnnotation(type=type, field=field, depth=depth)
def make_field_proto(annotation_cache, schema_node_id, field_idx, field_proto):
""" Convert a field proto message into a FieldProtoData object. """
field_type = field_proto.slot.type
field_which = field_type.which()
ref_annotation = None
imp_annotation = None
hide_field = False
for annotation_idx, annotation in enumerate(field_proto.annotations):
_, annotation_value = annotation_cache.get_annotation_value(
schema_node_id, field_idx, annotation_idx, annotation)
if annotation_cache.is_reference_annotation(annotation):
assert ref_annotation is None
ref_annotation = make_reference_annotation(annotation_value)
if annotation_cache.is_implementation_annotation(annotation):
assert imp_annotation is None
imp_annotation = annotation_value
hide_field = imp_annotation.hide
list_which = None
schema_node_id = None
if field_which == 'list':
list_which = field_type.list.elementType.which()
if list_which == 'struct':
schema_node_id = field_type.list.elementType.struct.typeId
elif field_which == 'struct':
schema_node_id = field_type.struct.typeId
return FieldProtoData(
field_proto=field_proto,
ref_annotation=ref_annotation,
imp_annotation=imp_annotation,
hide_field=hide_field,
field_type=field_type,
field_which=field_which,
list_which=list_which,
schema_node_id=schema_node_id)
class FieldData():
""" Object to cache data about a field.
Note: This cannot be a simple flat object in the event where the field
is a union group.
"""
def __init__(self, field_cache, field_index, field):
self.field_cache = field_cache
self.field_index = field_index
self.field = field
field_proto = field.proto
self.key = field_proto.name
self.which = field_proto.which()
if self.which == 'group':
self.field_proto = None
self.group_protos = {}
else:
assert self.which == 'slot', self.which
self.field_proto = make_field_proto(
annotation_cache=self.field_cache.annotation_cache,
schema_node_id=self.field_cache.schema_node_id,
field_idx=field_index,
field_proto=field_proto,
)
self.group_protos = None
def get_field_proto(self):
""" Return field proto data when which == 'slot'. """
return self.field_proto
def get_group_proto(self, inner_key):
""" Return group field proto data when which == 'group'. """
group_proto = self.group_protos.get(inner_key, None)
if group_proto is None:
group_proto = self.field.schema.fields[inner_key].proto
self.group_protos[inner_key] = make_field_proto(
annotation_cache=self.field_cache.annotation_cache,
schema_node_id=self.field_cache.schema_node_id,
field_idx=self.field_index,
field_proto=group_proto,
)
return self.group_protos[inner_key]
class FieldCache():
""" Provides field data caching for a specific message schema. """
def __init__(self, annotation_cache, schema):
self.annotation_cache = annotation_cache
self.schema = schema
self.schema_node_id = schema.node.id
self.has_union_fields = bool(schema.union_fields)
self.field_data = {}
self.base_fields = set(schema.non_union_fields)
self.union_fields = set(schema.union_fields)
self.fields_list = []
for idx, field in enumerate(schema.fields_list):
self.fields_list.append(FieldData(self, idx, field))
def fields(self, struct_reader):
""" Return list of fields in specified message reader. """
if self.has_union_fields:
fields = set(self.base_fields)
fields.add(struct_reader.which())
return fields
else:
return self.base_fields
def get_reader_fields(self, input_fields):
""" Return information to build message from list of input_fields.
Returns:
fields - List of all fields in output.
defered_fields - Map of fields to defer and their implemenation
annotation.
union_field - Which field (if any) is a union field that needs
special handling.
"""
fields = set(self.base_fields)
defered_fields = {}
union_field = None
for field in self.union_fields:
if field in input_fields:
assert union_field is None, (field, union_field)
union_field = field
fields.add(field)
for field_data in self.fields_list:
key = field_data.key
if key not in fields:
continue
which = field_data.which
if which != 'slot':
continue
if field_data.field_proto.imp_annotation is not None:
defered_fields[key] = field_data.field_proto.imp_annotation
return fields, defered_fields, union_field
|
srinathv/bokeh | refs/heads/master | examples/plotting/server/simple_stream.py | 43 | # The plot server must be running. To start the server, run
# $ bokeh-server
# If you don't wish to persist the data/objects from this plot,
# then a faster backend is the "memory" backend:
# $ bokeh-server --backend=memory
# Go to http://localhost:5006/bokeh to view this plot
import time
import numpy as np
from bokeh.plotting import cursession, figure, show, output_server
from bokeh.models import GlyphRenderer
x = np.linspace(0, 4*np.pi, 200)
y = np.sin(x)
output_server("simple_stream")
p = figure(title="Simple streaming example")
p.line(x,y, color="#2222aa", line_width=2)
show(p)
ds = p.select({"type": GlyphRenderer})[0].data_source
while True:
oldx = ds.data["x"]
newx = np.hstack([oldx, [oldx[-1] + 4*np.pi/200]])
ds.data["x"] = newx
ds.data["y"] = np.sin(newx)
cursession().store_objects(ds)
time.sleep(0.5)
|
makerbot/ReplicatorG | refs/heads/master | skein_engines/skeinforge-47/fabmetheus_utilities/geometry/creation/linear_bearing_cage.py | 12 | """
Linear bearing cage.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.creation import extrude
from fabmetheus_utilities.geometry.creation import lineation
from fabmetheus_utilities.geometry.creation import peg
from fabmetheus_utilities.geometry.creation import solid
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities.geometry.geometry_utilities import matrix
from fabmetheus_utilities.geometry.manipulation_matrix import translate
from fabmetheus_utilities.geometry.solids import cylinder
from fabmetheus_utilities.geometry.solids import sphere
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import euclidean
import math
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def addAssemblyCage(derivation, negatives, positives):
'Add assembly linear bearing cage.'
addCageGroove(derivation, negatives, positives)
for pegCenterX in derivation.pegCenterXs:
addPositivePeg(derivation, positives, pegCenterX, -derivation.pegY)
addPositivePeg(derivation, positives, pegCenterX, derivation.pegY)
translate.translateNegativesPositives(negatives, positives, Vector3(0.0, -derivation.halfSeparationWidth))
femaleNegatives = []
femalePositives = []
addCageGroove(derivation, femaleNegatives, femalePositives)
for pegCenterX in derivation.pegCenterXs:
addNegativePeg(derivation, femaleNegatives, pegCenterX, -derivation.pegY)
addNegativePeg(derivation, femaleNegatives, pegCenterX, derivation.pegY)
translate.translateNegativesPositives(femaleNegatives, femalePositives, Vector3(0.0, derivation.halfSeparationWidth))
negatives += femaleNegatives
positives += femalePositives
def addCage(derivation, height, negatives, positives):
'Add linear bearing cage.'
copyShallow = derivation.elementNode.getCopyShallow()
copyShallow.attributes['path'] = [Vector3(), Vector3(0.0, 0.0, height)]
extrudeDerivation = extrude.ExtrudeDerivation(copyShallow)
roundedExtendedRectangle = getRoundedExtendedRectangle(derivation.demiwidth, derivation.rectangleCenterX, 14)
outsidePath = euclidean.getVector3Path(roundedExtendedRectangle)
extrude.addPositives(extrudeDerivation, [outsidePath], positives)
for bearingCenterX in derivation.bearingCenterXs:
addNegativeSphere(derivation, negatives, bearingCenterX)
def addCageGroove(derivation, negatives, positives):
'Add cage and groove.'
addCage(derivation, derivation.demiheight, negatives, positives)
addGroove(derivation, negatives)
def addGroove(derivation, negatives):
'Add groove on each side of cage.'
copyShallow = derivation.elementNode.getCopyShallow()
extrude.setElementNodeToEndStart(copyShallow, Vector3(-derivation.demilength), Vector3(derivation.demilength))
extrudeDerivation = extrude.ExtrudeDerivation(copyShallow)
bottom = derivation.demiheight - 0.5 * derivation.grooveWidth
outside = derivation.demiwidth
top = derivation.demiheight
leftGroove = [
complex(-outside, bottom),
complex(-derivation.innerDemiwidth, derivation.demiheight),
complex(-outside, top)]
rightGroove = [
complex(outside, top),
complex(derivation.innerDemiwidth, derivation.demiheight),
complex(outside, bottom)]
extrude.addNegatives(extrudeDerivation, negatives, euclidean.getVector3Paths([leftGroove, rightGroove]))
def addNegativePeg(derivation, negatives, x, y):
'Add negative cylinder at x and y.'
negativePegRadius = derivation.pegRadiusArealized + derivation.halfPegClearance
inradius = complex(negativePegRadius, negativePegRadius)
copyShallow = derivation.elementNode.getCopyShallow()
start = Vector3(x, y, derivation.height)
sides = evaluate.getSidesMinimumThreeBasedOnPrecision(copyShallow, negativePegRadius)
cylinder.addCylinderOutputByEndStart(0.0, inradius, negatives, sides, start, derivation.topOverBottom)
def addNegativeSphere(derivation, negatives, x):
'Add negative sphere at x.'
radius = Vector3(derivation.radiusPlusClearance, derivation.radiusPlusClearance, derivation.radiusPlusClearance)
sphereOutput = sphere.getGeometryOutput(derivation.elementNode.getCopyShallow(), radius)
euclidean.translateVector3Path(matrix.getVertexes(sphereOutput), Vector3(x, 0.0, derivation.demiheight))
negatives.append(sphereOutput)
def addPositivePeg(derivation, positives, x, y):
'Add positive cylinder at x and y.'
positivePegRadius = derivation.pegRadiusArealized - derivation.halfPegClearance
radiusArealized = complex(positivePegRadius, positivePegRadius)
copyShallow = derivation.elementNode.getCopyShallow()
start = Vector3(x, y, derivation.demiheight)
endZ = derivation.height
peg.addPegOutput(derivation.pegBevel, endZ, positives, radiusArealized, derivation.sides, start, derivation.topOverBottom)
def getBearingCenterXs(bearingCenterX, numberOfSteps, stepX):
'Get the bearing center x list.'
bearingCenterXs = []
for stepIndex in xrange(numberOfSteps + 1):
bearingCenterXs.append(bearingCenterX)
bearingCenterX += stepX
return bearingCenterXs
def getGeometryOutput(elementNode):
'Get vector3 vertexes from attribute dictionary.'
derivation = LinearBearingCageDerivation(elementNode)
negatives = []
positives = []
if derivation.typeStringFirstCharacter == 'a':
addAssemblyCage(derivation, negatives, positives)
else:
addCage(derivation, derivation.height, negatives, positives)
return extrude.getGeometryOutputByNegativesPositives(elementNode, negatives, positives)
def getGeometryOutputByArguments(arguments, elementNode):
'Get vector3 vertexes from attribute dictionary by arguments.'
evaluate.setAttributesByArguments(['length', 'radius'], arguments, elementNode)
return getGeometryOutput(elementNode)
def getNewDerivation(elementNode):
'Get new derivation.'
return LinearBearingCageDerivation(elementNode)
def getPegCenterXs(numberOfSteps, pegCenterX, stepX):
'Get the peg center x list.'
pegCenterXs = []
for stepIndex in xrange(numberOfSteps):
pegCenterXs.append(pegCenterX)
pegCenterX += stepX
return pegCenterXs
def getRoundedExtendedRectangle(radius, rectangleCenterX, sides):
'Get the rounded extended rectangle.'
roundedExtendedRectangle = []
halfSides = int(sides / 2)
halfSidesPlusOne = abs(halfSides + 1)
sideAngle = math.pi / float(halfSides)
extensionMultiplier = 1.0 / math.cos(0.5 * sideAngle)
center = complex(rectangleCenterX, 0.0)
startAngle = 0.5 * math.pi
for halfSide in xrange(halfSidesPlusOne):
unitPolar = euclidean.getWiddershinsUnitPolar(startAngle)
unitPolarExtended = complex(unitPolar.real * extensionMultiplier, unitPolar.imag)
roundedExtendedRectangle.append(unitPolarExtended * radius + center)
startAngle += sideAngle
center = complex(-rectangleCenterX, 0.0)
startAngle = -0.5 * math.pi
for halfSide in xrange(halfSidesPlusOne):
unitPolar = euclidean.getWiddershinsUnitPolar(startAngle)
unitPolarExtended = complex(unitPolar.real * extensionMultiplier, unitPolar.imag)
roundedExtendedRectangle.append(unitPolarExtended * radius + center)
startAngle += sideAngle
return roundedExtendedRectangle
def processElementNode(elementNode):
'Process the xml element.'
solid.processElementNodeByGeometry(elementNode, getGeometryOutput(elementNode))
class LinearBearingCageDerivation:
'Class to hold linear bearing cage variables.'
def __init__(self, elementNode):
'Set defaults.'
self.length = evaluate.getEvaluatedFloat(50.0, elementNode, 'length')
self.demilength = 0.5 * self.length
self.elementNode = elementNode
self.radius = lineation.getFloatByPrefixBeginEnd(elementNode, 'radius', 'diameter', 5.0)
self.cageClearanceOverRadius = evaluate.getEvaluatedFloat(0.05, elementNode, 'cageClearanceOverRadius')
self.cageClearance = self.cageClearanceOverRadius * self.radius
self.cageClearance = evaluate.getEvaluatedFloat(self.cageClearance, elementNode, 'cageClearance')
self.racewayClearanceOverRadius = evaluate.getEvaluatedFloat(0.1, elementNode, 'racewayClearanceOverRadius')
self.racewayClearance = self.racewayClearanceOverRadius * self.radius
self.racewayClearance = evaluate.getEvaluatedFloat(self.racewayClearance, elementNode, 'racewayClearance')
self.typeMenuRadioStrings = 'assembly integral'.split()
self.typeString = evaluate.getEvaluatedString('assembly', elementNode, 'type')
self.typeStringFirstCharacter = self.typeString[: 1 ].lower()
self.wallThicknessOverRadius = evaluate.getEvaluatedFloat(0.5, elementNode, 'wallThicknessOverRadius')
self.wallThickness = self.wallThicknessOverRadius * self.radius
self.wallThickness = evaluate.getEvaluatedFloat(self.wallThickness, elementNode, 'wallThickness')
self.zenithAngle = evaluate.getEvaluatedFloat(45.0, elementNode, 'zenithAngle')
self.zenithRadian = math.radians(self.zenithAngle)
self.demiheight = self.radius * math.cos(self.zenithRadian) - self.racewayClearance
self.height = self.demiheight + self.demiheight
self.radiusPlusClearance = self.radius + self.cageClearance
self.cageRadius = self.radiusPlusClearance + self.wallThickness
self.demiwidth = self.cageRadius
self.bearingCenterX = self.cageRadius - self.demilength
separation = self.cageRadius + self.radiusPlusClearance
bearingLength = -self.bearingCenterX - self.bearingCenterX
self.numberOfSteps = int(math.floor(bearingLength / separation))
self.stepX = bearingLength / float(self.numberOfSteps)
self.bearingCenterXs = getBearingCenterXs(self.bearingCenterX, self.numberOfSteps, self.stepX)
if self.typeStringFirstCharacter == 'a':
self.setAssemblyCage()
self.rectangleCenterX = self.demiwidth - self.demilength
def setAssemblyCage(self):
'Set two piece assembly parameters.'
self.grooveDepthOverRadius = evaluate.getEvaluatedFloat(0.15, self.elementNode, 'grooveDepthOverRadius')
self.grooveDepth = self.grooveDepthOverRadius * self.radius
self.grooveDepth = evaluate.getEvaluatedFloat(self.grooveDepth, self.elementNode, 'grooveDepth')
self.grooveWidthOverRadius = evaluate.getEvaluatedFloat(0.6, self.elementNode, 'grooveWidthOverRadius')
self.grooveWidth = self.grooveWidthOverRadius * self.radius
self.grooveWidth = evaluate.getEvaluatedFloat(self.grooveWidth, self.elementNode, 'grooveWidth')
self.pegClearanceOverRadius = evaluate.getEvaluatedFloat(0.0, self.elementNode, 'pegClearanceOverRadius')
self.pegClearance = self.pegClearanceOverRadius * self.radius
self.pegClearance = evaluate.getEvaluatedFloat(self.pegClearance, self.elementNode, 'pegClearance')
self.halfPegClearance = 0.5 * self.pegClearance
self.pegRadiusOverRadius = evaluate.getEvaluatedFloat(0.5, self.elementNode, 'pegRadiusOverRadius')
self.pegRadius = self.pegRadiusOverRadius * self.radius
self.pegRadius = evaluate.getEvaluatedFloat(self.pegRadius, self.elementNode, 'pegRadius')
self.sides = evaluate.getSidesMinimumThreeBasedOnPrecision(self.elementNode, self.pegRadius)
self.pegRadiusArealized = evaluate.getRadiusArealizedBasedOnAreaRadius(self.elementNode, self.pegRadius, self.sides)
self.pegBevelOverPegRadius = evaluate.getEvaluatedFloat(0.25, self.elementNode, 'pegBevelOverPegRadius')
self.pegBevel = self.pegBevelOverPegRadius * self.pegRadiusArealized
self.pegBevel = evaluate.getEvaluatedFloat(self.pegBevel, self.elementNode, 'pegBevel')
self.pegMaximumRadius = self.pegRadiusArealized + abs(self.halfPegClearance)
self.separationOverRadius = evaluate.getEvaluatedFloat(0.5, self.elementNode, 'separationOverRadius')
self.separation = self.separationOverRadius * self.radius
self.separation = evaluate.getEvaluatedFloat(self.separation, self.elementNode, 'separation')
self.topOverBottom = evaluate.getEvaluatedFloat(0.8, self.elementNode, 'topOverBottom')
peg.setTopOverBottomByRadius(self, 0.0, self.pegRadiusArealized, self.height)
self.quarterHeight = 0.5 * self.demiheight
self.pegY = 0.5 * self.wallThickness + self.pegMaximumRadius
cagePegRadius = self.cageRadius + self.pegMaximumRadius
halfStepX = 0.5 * self.stepX
pegHypotenuse = math.sqrt(self.pegY * self.pegY + halfStepX * halfStepX)
if cagePegRadius > pegHypotenuse:
self.pegY = math.sqrt(cagePegRadius * cagePegRadius - halfStepX * halfStepX)
self.demiwidth = max(self.pegY + self.pegMaximumRadius + self.wallThickness, self.demiwidth)
self.innerDemiwidth = self.demiwidth
self.demiwidth += self.grooveDepth
self.halfSeparationWidth = self.demiwidth + 0.5 * self.separation
if self.pegRadiusArealized <= 0.0:
self.pegCenterXs = []
else:
self.pegCenterXs = getPegCenterXs(self.numberOfSteps, self.bearingCenterX + halfStepX, self.stepX)
|
camon/Flexget | refs/heads/develop | flexget/utils/log.py | 2 | """Logging utilities"""
from __future__ import unicode_literals, division, absolute_import
import logging
import hashlib
from datetime import datetime, timedelta
from sqlalchemy import Column, Integer, String, DateTime, Index
from flexget import db_schema
from flexget import logger as f_logger
from flexget.utils.sqlalchemy_utils import table_schema
from flexget.manager import Session
from flexget.event import event
log = logging.getLogger('util.log')
Base = db_schema.versioned_base('log_once', 0)
@db_schema.upgrade('log_once')
def upgrade(ver, session):
if ver is None:
log.info('Adding index to md5sum column of log_once table.')
table = table_schema('log_once', session)
Index('log_once_md5sum', table.c.md5sum, unique=True).create()
ver = 0
return ver
class LogMessage(Base):
"""Declarative"""
__tablename__ = 'log_once'
id = Column(Integer, primary_key=True)
md5sum = Column(String, unique=True)
added = Column(DateTime, default=datetime.now())
def __init__(self, md5sum):
self.md5sum = md5sum
def __repr__(self):
return "<LogMessage('%s')>" % self.md5sum
@event('manager.db_cleanup')
def purge(session):
"""Purge old messages from database"""
old = datetime.now() - timedelta(days=365)
result = session.query(LogMessage).filter(LogMessage.added < old).delete()
if result:
log.verbose('Purged %s entries from log_once table.' % result)
def log_once(message, logger=logging.getLogger('log_once'), once_level=logging.INFO, suppressed_level=f_logger.VERBOSE):
"""
Log message only once using given logger`. Returns False if suppressed logging.
When suppressed, `suppressed_level` level is still logged.
"""
# If there is no active manager, don't access the db
from flexget.manager import manager
if not manager:
log.warning('DB not initialized. log_once will not work properly.')
logger.log(once_level, message)
return
digest = hashlib.md5()
digest.update(message.encode('latin1', 'replace')) # ticket:250
md5sum = digest.hexdigest()
session = Session()
try:
# abort if this has already been logged
if session.query(LogMessage).filter_by(md5sum=md5sum).first():
logger.log(suppressed_level, message)
return False
row = LogMessage(md5sum)
session.add(row)
session.commit()
finally:
session.close()
logger.log(once_level, message)
return True
|
ratnania/pigasus | refs/heads/master | python/fem/tracelog.py | 1 | # -*- coding: UTF-8 -*-
#! /usr/bin/python
__author__="ARA"
__all__ = ['tracelog']
__date__ ="$Mai 09, 2014 10:34:00 PM$"
from . import common_obj as com
class tracelog:
def __init__(self):
self._com = com.common_obj()
def printlog(self, message, condition=True, level=0):
self._com.pyfem.pyfem_printlog(message, condition, level)
|
ceph/samba | refs/heads/ceph | buildtools/wafsamba/samba_pidl.py | 11 | # waf build tool for building IDL files with pidl
import os
import Build
from TaskGen import feature, before
from samba_utils import SET_TARGET_TYPE, TO_LIST, LOCAL_CACHE
def SAMBA_PIDL(bld, pname, source,
options='',
output_dir='.',
generate_tables=True):
'''Build a IDL file using pidl.
This will produce up to 13 output files depending on the options used'''
bname = source[0:-4]; # strip off the .idl suffix
bname = os.path.basename(bname)
name = "%s_%s" % (pname, bname.upper())
if not SET_TARGET_TYPE(bld, name, 'PIDL'):
return
bld.SET_BUILD_GROUP('build_source')
# the output files depend on the options used. Use this dictionary
# to map between the options and the resulting file names
options_map = { '--header' : '%s.h',
'--ndr-parser' : 'ndr_%s.c ndr_%s.h',
'--samba3-ndr-server' : 'srv_%s.c srv_%s.h',
'--samba3-ndr-client' : 'cli_%s.c cli_%s.h',
'--server' : 'ndr_%s_s.c',
'--client' : 'ndr_%s_c.c ndr_%s_c.h',
'--python' : 'py_%s.c',
'--tdr-parser' : 'tdr_%s.c tdr_%s.h',
'--dcom-proxy' : '%s_p.c',
'--com-header' : 'com_%s.h'
}
table_header_idx = None
out_files = []
options_list = TO_LIST(options)
for o in options_list:
if o in options_map:
ofiles = TO_LIST(options_map[o])
for f in ofiles:
out_files.append(os.path.join(output_dir, f % bname))
if f == 'ndr_%s.h':
# remember this one for the tables generation
table_header_idx = len(out_files) - 1
# depend on the full pidl sources
source = TO_LIST(source)
try:
pidl_src_nodes = bld.pidl_files_cache
except AttributeError:
bld.pidl_files_cache = bld.srcnode.ant_glob('pidl/lib/Parse/**/*.pm', flat=False)
bld.pidl_files_cache.extend(bld.srcnode.ant_glob('pidl', flat=False))
pidl_src_nodes = bld.pidl_files_cache
# the cd .. is needed because pidl currently is sensitive to the directory it is run in
cpp = ""
cc = ""
if bld.CONFIG_SET("CPP") and bld.CONFIG_GET("CPP") != "":
if isinstance(bld.CONFIG_GET("CPP"), list):
cpp = 'CPP="%s"' % " ".join(bld.CONFIG_GET("CPP"))
else:
cpp = 'CPP="%s"' % bld.CONFIG_GET("CPP")
if cpp == "CPP=xlc_r":
cpp = ""
if bld.CONFIG_SET("CC"):
if isinstance(bld.CONFIG_GET("CC"), list):
cc = 'CC="%s"' % " ".join(bld.CONFIG_GET("CC"))
else:
cc = 'CC="%s"' % bld.CONFIG_GET("CC")
t = bld(rule='cd .. && %s %s ${PERL} "${PIDL}" --quiet ${OPTIONS} --outputdir ${OUTPUTDIR} -- "${SRC[0].abspath(env)}"' % (cpp, cc),
ext_out = '.c',
before = 'cc',
update_outputs = True,
shell = True,
source = source,
target = out_files,
name = name,
samba_type = 'PIDL')
# prime the list of nodes we are dependent on with the cached pidl sources
t.allnodes = pidl_src_nodes
t.env.PIDL = os.path.join(bld.srcnode.abspath(), 'pidl/pidl')
t.env.OPTIONS = TO_LIST(options)
t.env.OUTPUTDIR = bld.bldnode.name + '/' + bld.path.find_dir(output_dir).bldpath(t.env)
if generate_tables and table_header_idx is not None:
pidl_headers = LOCAL_CACHE(bld, 'PIDL_HEADERS')
pidl_headers[name] = [bld.path.find_or_declare(out_files[table_header_idx])]
t.more_includes = '#' + bld.path.relpath_gen(bld.srcnode)
Build.BuildContext.SAMBA_PIDL = SAMBA_PIDL
def SAMBA_PIDL_LIST(bld, name, source,
options='',
output_dir='.',
generate_tables=True):
'''A wrapper for building a set of IDL files'''
for p in TO_LIST(source):
bld.SAMBA_PIDL(name, p, options=options, output_dir=output_dir, generate_tables=generate_tables)
Build.BuildContext.SAMBA_PIDL_LIST = SAMBA_PIDL_LIST
#################################################################
# the rule for generating the NDR tables
@feature('collect')
@before('exec_rule')
def collect(self):
pidl_headers = LOCAL_CACHE(self.bld, 'PIDL_HEADERS')
for (name, hd) in pidl_headers.items():
y = self.bld.get_tgen_by_name(name)
self.bld.ASSERT(y is not None, 'Failed to find PIDL header %s' % name)
y.post()
for node in hd:
self.bld.ASSERT(node is not None, 'Got None as build node generating PIDL table for %s' % name)
self.source += " " + node.relpath_gen(self.path)
def SAMBA_PIDL_TABLES(bld, name, target):
'''generate the pidl NDR tables file'''
bld.SET_BUILD_GROUP('main')
t = bld(
features = 'collect',
rule = '${PERL} ${SRC} --output ${TGT} | sed "s|default/||" > ${TGT}',
ext_out = '.c',
before = 'cc',
update_outputs = True,
shell = True,
source = '../../librpc/tables.pl',
target = target,
name = name)
t.env.LIBRPC = os.path.join(bld.srcnode.abspath(), 'librpc')
Build.BuildContext.SAMBA_PIDL_TABLES = SAMBA_PIDL_TABLES
|
snahelou/awx | refs/heads/devel | awx/main/notifications/hipchat_backend.py | 1 | # Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
import logging
import requests
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
from awx.main.notifications.base import AWXBaseEmailBackend
logger = logging.getLogger('awx.main.notifications.hipchat_backend')
class HipChatBackend(AWXBaseEmailBackend):
init_parameters = {"token": {"label": "Token", "type": "password"},
"rooms": {"label": "Destination Rooms", "type": "list"},
"color": {"label": "Notification Color", "type": "string"},
"api_url": {"label": "API Url (e.g: https://mycompany.hipchat.com)", "type": "string"},
"notify": {"label": "Notify room", "type": "bool"},
"message_from": {"label": "Label to be shown with notification", "type": "string"}}
recipient_parameter = "rooms"
sender_parameter = "message_from"
def __init__(self, token, color, api_url, notify, fail_silently=False, **kwargs):
super(HipChatBackend, self).__init__(fail_silently=fail_silently)
self.token = token
self.color = color
self.api_url = api_url
self.notify = notify
def send_messages(self, messages):
sent_messages = 0
for m in messages:
for rcp in m.recipients():
r = requests.post("{}/v2/room/{}/notification".format(self.api_url, rcp),
params={"auth_token": self.token},
verify=False,
json={"color": self.color,
"message": m.subject,
"notify": self.notify,
"from": m.from_email,
"message_format": "text"})
if r.status_code != 204:
logger.error(smart_text(_("Error sending messages: {}").format(r.text)))
if not self.fail_silently:
raise Exception(smart_text(_("Error sending message to hipchat: {}").format(r.text)))
sent_messages += 1
return sent_messages
|
sysalexis/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/unittest/test/dummy.py | 1061 | # Empty module for testing the loading of modules
|
lgarren/spack | refs/heads/develop | var/spack/repos/builtin/packages/zoltan/package.py | 3 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import re
import os
import glob
class Zoltan(Package):
"""The Zoltan library is a toolkit of parallel combinatorial algorithms
for parallel, unstructured, and/or adaptive scientific
applications. Zoltan's largest component is a suite of dynamic
load-balancing and paritioning algorithms that increase
applications' parallel performance by reducing idle time. Zoltan
also has graph coloring and graph ordering algorithms, which are
useful in task schedulers and parallel preconditioners.
"""
homepage = "http://www.cs.sandia.gov/zoltan"
url = "http://www.cs.sandia.gov/~kddevin/Zoltan_Distributions/zoltan_distrib_v3.83.tar.gz"
version('3.83', '1ff1bc93f91e12f2c533ddb01f2c095f')
version('3.8', '9d8fba8a990896881b85351d4327c4a9')
version('3.6', '9cce794f7241ecd8dbea36c3d7a880f9')
version('3.3', '5eb8f00bda634b25ceefa0122bd18d65')
variant('debug', default=False, description='Builds a debug version of the library.')
variant('shared', default=True, description='Builds a shared version of the library.')
variant('fortran', default=True, description='Enable Fortran support.')
variant('mpi', default=True, description='Enable MPI support.')
depends_on('mpi', when='+mpi')
def install(self, spec, prefix):
# FIXME: The older Zoltan versions fail to compile the F90 MPI wrappers
# because of some complicated generic type problem.
if spec.satisfies('@:3.6+fortran+mpi'):
raise RuntimeError(('Cannot build Zoltan v{0} with +fortran and '
'+mpi; please disable one of these features '
'or upgrade versions.').format(self.version))
config_args = [
self.get_config_flag('f90interface', 'fortran'),
self.get_config_flag('mpi', 'mpi'),
]
config_cflags = [
'-O0' if '+debug' in spec else '-O3',
'-g' if '+debug' in spec else '-g0',
]
if '+shared' in spec:
config_args.append('RANLIB=echo')
config_args.append('--with-ar=$(CXX) -shared $(LDFLAGS) -o')
config_cflags.append(self.compiler.pic_flag)
if spec.satisfies('%gcc'):
config_args.append('--with-libs={0}'.format('-lgfortran'))
if '+mpi' in spec:
config_args.append('CC={0}'.format(spec['mpi'].mpicc))
config_args.append('CXX={0}'.format(spec['mpi'].mpicxx))
config_args.append('FC={0}'.format(spec['mpi'].mpifc))
config_args.append('--with-mpi={0}'.format(spec['mpi'].prefix))
mpi_libs = self.get_mpi_libs()
# NOTE: Some external mpi installations may have empty lib
# directory (e.g. bg-q). In this case we need to explicitly
# pass empty library name.
if mpi_libs:
mpi_libs = ' -l'.join(mpi_libs)
config_args.append('--with-mpi-libs=-l{0}'.format(mpi_libs))
else:
config_args.append('--with-mpi-libs= ')
# NOTE: Early versions of Zoltan come packaged with a few embedded
# library packages (e.g. ParMETIS, Scotch), which messes with Spack's
# ability to descend directly into the package's source directory.
source_directory = self.stage.source_path
if spec.satisfies('@:3.6'):
zoltan_directory = 'Zoltan_v{0}'.format(self.version)
source_directory = join_path(source_directory, zoltan_directory)
build_directory = join_path(source_directory, 'build')
with working_dir(build_directory, create=True):
config = Executable(join_path(source_directory, 'configure'))
config(
'--prefix={0}'.format(prefix),
'--with-cflags={0}'.format(' '.join(config_cflags)),
'--with-cxxflags={0}'.format(' '.join(config_cflags)),
'--with-fcflags={0}'.format(' '.join(config_cflags)),
*config_args
)
# NOTE: Earlier versions of Zoltan cannot be built in parallel
# because they contain nested Makefile dependency bugs.
make(parallel=not spec.satisfies('@:3.6+fortran'))
make('install')
# NOTE: Unfortunately, Zoltan doesn't provide any configuration
# options for the extension of the output library files, so this
# script must change these extensions as a post-processing step.
if '+shared' in spec:
for lib_path in glob.glob(join_path(prefix, 'lib', '*.a')):
lib_static_name = os.path.basename(lib_path)
lib_shared_name = re.sub(r'\.a$', '.{0}'.format(dso_suffix),
lib_static_name)
move(lib_path, join_path(prefix, 'lib', lib_shared_name))
def get_config_flag(self, flag_name, flag_variant):
flag_pre = 'en' if '+{0}'.format(flag_variant) in self.spec else 'dis'
return '--{0}able-{1}'.format(flag_pre, flag_name)
# NOTE: Zoltan assumes that it's linking against an MPI library that can
# be found with '-lmpi,' which isn't the case for many MPI packages. This
# function finds the names of the actual libraries for Zoltan's MPI dep.
def get_mpi_libs(self):
mpi_libs = set()
for lib_path in glob.glob(join_path(self.spec['mpi'].prefix.lib, '*')):
mpi_lib_match = re.match(
r'^(lib)((\w*)mpi(\w*))\.((a)|({0}))$'.format(dso_suffix),
os.path.basename(lib_path))
if mpi_lib_match:
mpi_libs.add(mpi_lib_match.group(2))
return list(mpi_libs)
|
hsinhuang/yaly | refs/heads/master | yaly/lex.py | 1 | #!/usr/bin/env python
# coding:utf-8
"""lexical analysis"""
class Token:
"""
A token is a string of one or more characters that is significant
as a group.
"""
def __init__(self, lexical_unit, value, lineno, lexer):
assert type(lexical_unit) == str
self.__lexical_unit__ = lexical_unit
self.__raw__ = value
self.skip = False
self.value = value
self.lexer = lexer
self.lineno = lineno
def __str__(self):
return "<%s, %s, line %d>" % \
(self.__lexical_unit__, repr(self.value), self.lineno)
def lexical_unit(self):
"""getter : __lexical_unit__"""
return self.__lexical_unit__
class Lexer:
"""Lexer performs lexical analysis"""
def __init__(self, tokens, raw_tokens, regex):
"""
`tokens` is a dict map token name (i.e. lexical unit) to a tuple,
of which the first position is a compiled regular expression
(type: pyre.RegEx) and the second one is the function
`raw_tokens` is an iterable object which contains all tokens name
and the order in it is the precedence of each token
`regex` is a compiled RegEx object which accepts all valid
string
"""
self.__tokens__ = tokens
self.__raw_tokens__ = raw_tokens
self.__string__ = None
self.__re__ = regex
self.lineno = 0
def get_next_token(self):
"""
return a token(type: Token) stream
"""
if self.__string__ is None:
raise UserWarning('having not specify input string')
while self.__string__:
next_idx = self.__re__.match_prefix(self.__string__)
if not next_idx:
raise SyntaxWarning("remaining `%s` cannot be parsed" % \
self.__string__)
lexeme = self.__string__[:next_idx]
self.__string__ = self.__string__[next_idx:]
found = False
for token in self.__raw_tokens__:
assert token in self.__tokens__
if self.__tokens__[token][0].match(lexeme):
found = True
next_token = self.__tokens__[token][1](
Token(token, lexeme, self.lineno, self)
)
if next_token.skip:
continue
yield next_token
break
if not found:
raise AssertionError("lexeme `%s` is valid " % lexeme + \
"but not found the corresponding lexical unit" )
def set_string(self, string):
"""set input string"""
self.__string__ = string
self.lineno = 1
def lex():
"""
return a Lexer
"""
compiled_tokens = {}
regexs = []
import sys
all_vars = sys._getframe(1).f_locals
if 'tokens' not in all_vars:
raise NotImplementedError(
'Lex need variable `tokens` but not defined'
)
tokens = all_vars['tokens']
if not hasattr(tokens, '__iter__'):
raise TypeError(
'Lex expected variable `tokens` to be iterable'
)
for token in tokens:
if not token.isupper():
raise SyntaxError(
'token `%s` is not uppercase' % token
)
if tokens.count(token) > 1:
raise SyntaxWarning(
'declared token `%s` %d times' % \
(token, tokens.count(token))
)
func_name = 't_' + token
if func_name not in all_vars:
raise NotImplementedError(
'declared token `%s` but not define `%s`' % \
(token, func_name)
)
func = all_vars[func_name]
if type(func) is str:
all_vars[func_name] = lambda t : t
all_vars[func_name].__doc__ = func
func = all_vars[func_name]
import yare
try:
compiled_tokens[token] = (yare.compile(func.__doc__), func)
except SyntaxError, e:
raise SyntaxError(
'regular expression `%s` specified' % func.__doc__ + \
'in function `%s` not valid. Detail: %s' % (func_name, e)
)
regexs.append(func.__doc__)
return Lexer(compiled_tokens, tokens, yare.compile(yare.select(regexs)))
|
Tao-Ma/gpdb | refs/heads/master | src/test/tinc/ext/unittest2/__init__.py | 155 | """
unittest2
unittest2 is a backport of the new features added to the unittest testing
framework in Python 2.7. It is tested to run on Python 2.4 - 2.6.
To use unittest2 instead of unittest simply replace ``import unittest`` with
``import unittest2``.
Copyright (c) 1999-2003 Steve Purcell
Copyright (c) 2003-2010 Python Software Foundation
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
__all__ = ['TestResult', 'TestCase', 'TestSuite',
'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main',
'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless',
'expectedFailure', 'TextTestResult', '__version__', 'collector']
__version__ = '0.5.1'
# Expose obsolete functions for backwards compatibility
__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
from unittest2.collector import collector
from unittest2.result import TestResult
from unittest2.case import (
TestCase, FunctionTestCase, SkipTest, skip, skipIf,
skipUnless, expectedFailure
)
from unittest2.suite import BaseTestSuite, TestSuite
from unittest2.loader import (
TestLoader, defaultTestLoader, makeSuite, getTestCaseNames,
findTestCases
)
from unittest2.main import TestProgram, main, main_
from unittest2.runner import TextTestRunner, TextTestResult
try:
from unittest2.signals import (
installHandler, registerResult, removeResult, removeHandler
)
except ImportError:
# Compatibility with platforms that don't have the signal module
pass
else:
__all__.extend(['installHandler', 'registerResult', 'removeResult',
'removeHandler'])
# deprecated
_TextTestResult = TextTestResult
__unittest = True |
ondrokrc/gramps | refs/heads/master | gramps/gen/lib/citation.py | 1 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2010 Michiel D. Nauta
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Citation object for Gramps.
"""
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
import logging
LOG = logging.getLogger(".citation")
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .primaryobj import PrimaryObject
from .mediabase import MediaBase
from .notebase import NoteBase
from .datebase import DateBase
from .tagbase import TagBase
from .attrbase import SrcAttributeBase
from .citationbase import IndirectCitationBase
from .handle import Handle
#-------------------------------------------------------------------------
#
# Citation class
#
#-------------------------------------------------------------------------
class Citation(MediaBase, NoteBase, SrcAttributeBase, IndirectCitationBase,
DateBase, PrimaryObject):
"""
A record of a citation of a source of information.
In GEDCOM this is called a SOURCE_CITATION.
The data provided in the <<SOURCE_CITATION>> structure is source-related
information specific to the data being cited.
"""
CONF_VERY_HIGH = 4
CONF_HIGH = 3
CONF_NORMAL = 2
CONF_LOW = 1
CONF_VERY_LOW = 0
def __init__(self):
"""Create a new Citation instance."""
PrimaryObject.__init__(self)
MediaBase.__init__(self) # 7
NoteBase.__init__(self) # 6
DateBase.__init__(self) # 2
self.source_handle = None # 5
self.page = "" # 3
self.confidence = Citation.CONF_NORMAL # 4
SrcAttributeBase.__init__(self) # 8
def serialize(self, no_text_date = False):
"""
Convert the object to a serialized tuple of data.
"""
return (self.handle, # 0
self.gramps_id, # 1
DateBase.serialize(self, no_text_date),# 2
str(self.page), # 3
self.confidence, # 4
self.source_handle, # 5
NoteBase.serialize(self), # 6
MediaBase.serialize(self), # 7
SrcAttributeBase.serialize(self), # 8
self.change, # 9
TagBase.serialize(self), # 10
self.private) # 11
def to_struct(self):
"""
Convert the data held in this object to a structure (eg,
struct) that represents all the data elements.
This method is used to recursively convert the object into a
self-documenting form that can easily be used for various
purposes, including diffs and queries.
These structures may be primitive Python types (string,
integer, boolean, etc.) or complex Python types (lists,
tuples, or dicts). If the return type is a dict, then the keys
of the dict match the fieldname of the object. If the return
struct (or value of a dict key) is a list, then it is a list
of structs. Otherwise, the struct is just the value of the
attribute.
:returns: Returns a struct containing the data of the object.
:rtype: dict
"""
return {"_class": "Citation",
"handle": Handle("Citation", self.handle), # 0
"gramps_id": self.gramps_id, # 1
"date": DateBase.to_struct(self), # 2
"page": str(self.page), # 3
"confidence": self.confidence, # 4
"source_handle": Handle("Source", self.source_handle), # 5
"note_list": NoteBase.to_struct(self), # 6
"media_list": MediaBase.to_struct(self), # 7
"srcattr_list": SrcAttributeBase.to_struct(self),# 8
"change": self.change, # 9
"tag_list": TagBase.to_struct(self), # 10
"private": self.private} # 11
@classmethod
def from_struct(cls, struct):
"""
Given a struct data representation, return a serialized object.
:returns: Returns a serialized object
"""
default = Citation()
return (Handle.from_struct(struct.get("handle", default.handle)),
struct.get("gramps_id", default.gramps_id),
DateBase.from_struct(struct.get("date", {})),
struct.get("page", default.page),
struct.get("confidence", default.confidence),
Handle.from_struct(struct.get("source_handle", default.source_handle)),
NoteBase.from_struct(struct.get("note_list", default.note_list)),
MediaBase.from_struct(struct.get("media_list", default.media_list)),
SrcAttributeBase.from_struct(struct.get("srcattr_list", [])),
struct.get("change", default.change),
TagBase.from_struct(struct.get("tag_list", default.tag_list)),
struct.get("private", default.private))
def unserialize(self, data):
"""
Convert the data held in a tuple created by the serialize method
back into the data in a Citation structure.
"""
(self.handle, # 0
self.gramps_id, # 1
date, # 2
self.page, # 3
self.confidence, # 4
self.source_handle, # 5
note_list, # 6
media_list, # 7
srcattr_list, # 8
self.change, # 9
tag_list, # 10
self.private # 11
) = data
DateBase.unserialize(self, date)
NoteBase.unserialize(self, note_list)
MediaBase.unserialize(self, media_list)
TagBase.unserialize(self, tag_list)
SrcAttributeBase.unserialize(self, srcattr_list)
return self
def _has_handle_reference(self, classname, handle):
"""
Return True if the object has reference to a given handle of given
primary object type.
:param classname: The name of the primary object class.
:type classname: str
:param handle: The handle to be checked.
:type handle: str
:returns: Returns whether the object has reference to this handle of
this object type.
:rtype: bool
"""
if classname == 'Note':
return handle in [ref.ref for ref in self.note_list]
elif classname == 'Media':
return handle in [ref.ref for ref in self.media_list]
elif classname == 'Source':
return handle == self.get_reference_handle()
return False
def _remove_handle_references(self, classname, handle_list):
"""
Remove all references in this object to object handles in the list.
:param classname: The name of the primary object class.
:type classname: str
:param handle_list: The list of handles to be removed.
:type handle_list: str
"""
if classname == 'Source' and \
self.get_reference_handle() in handle_list:
self.set_reference_handle(None)
def _replace_handle_reference(self, classname, old_handle, new_handle):
"""
Replace all references to old handle with those to the new handle.
:param classname: The name of the primary object class.
:type classname: str
:param old_handle: The handle to be replaced.
:type old_handle: str
:param new_handle: The handle to replace the old one with.
:type new_handle: str
"""
if classname == 'Source' and \
self.get_reference_handle() == old_handle:
self.set_reference_handle(new_handle)
def get_citation_child_list(self):
"""
Return the list of child secondary objects that may refer citations.
:returns: Returns the list of child secondary child objects that may
refer citations.
:rtype: list
"""
return self.media_list
def get_text_data_list(self):
"""
Return the list of all textual attributes of the object.
:returns: Returns the list of all textual attributes of the object.
:rtype: list
"""
return [self.page, self.gramps_id]
def get_text_data_child_list(self):
"""
Return the list of child objects that may carry textual data.
:returns: Returns the list of child objects that may carry textual data.
:rtype: list
"""
return self.media_list + self.attribute_list
def get_note_child_list(self):
"""
Return the list of child secondary objects that may refer notes.
:returns: Returns the list of child secondary child objects that may
refer notes.
:rtype: list
"""
return self.media_list
def get_handle_referents(self):
"""
Return the list of child objects which may, directly or through
their children, reference primary objects.
:returns: Returns the list of objects referencing primary objects.
:rtype: list
"""
return self.media_list
def get_referenced_handles(self):
"""
Return the list of (classname, handle) tuples for all directly
referenced primary objects.
:returns: List of (classname, handle) tuples for referenced objects.
:rtype: list
"""
ret = (self.get_referenced_note_handles() +
self.get_referenced_tag_handles())
if self.get_reference_handle():
ret += [('Source', self.get_reference_handle())]
return ret
def merge(self, acquisition):
"""
Merge the content of acquisition into this citation.
:param acquisition: The citation to merge with the present citation.
:type acquisition: Citation
"""
self._merge_privacy(acquisition)
self._merge_note_list(acquisition)
self._merge_media_list(acquisition)
self._merge_tag_list(acquisition)
# merge confidence
level_priority = [0, 4, 1, 3, 2]
idx = min(level_priority.index(self.confidence),
level_priority.index(acquisition.confidence))
self.confidence = level_priority[idx]
self._merge_attribute_list(acquisition)
# N.B. a Citation can refer to only one 'Source', so the
# 'Source' from acquisition cannot be merged in
def set_confidence_level(self, val):
"""Set the confidence level."""
self.confidence = val
def get_confidence_level(self):
"""Return the confidence level."""
return self.confidence
def set_page(self, page):
"""Set the page indicator of the Citation."""
self.page = page
def get_page(self):
"""Get the page indicator of the Citation."""
return self.page
def set_reference_handle(self, val):
self.source_handle = val
def get_reference_handle(self):
return self.source_handle
|
caioserra/apiAdwords | refs/heads/master | examples/adspygoogle/dfp/v201308/audience_segment_service/get_all_audience_segments.py | 2 | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all audience segments.
To create audience segments, run create_audience_segments.py.
"""
__author__ = 'Nicholas Chen'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
SUGGESTED_PAGE_LIMIT = 500
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
# Initialize appropriate service.
audience_segment_service = client.GetService(
'AudienceSegmentService', version='v201308')
# Get all audience segments.
offset, result_set_size = 0, 0
while True:
# Create a statement to select audience segments.
filter_statement = {'query': 'LIMIT %s OFFSET %s' % (
SUGGESTED_PAGE_LIMIT, offset)}
response = audience_segment_service.getAudienceSegmentsByStatement(
filter_statement)[0]
if 'results' in response:
segments = response['results']
result_set_size = len(segments)
for segment in segments:
print ('Audience segment with id \'%s\' and name '
'\'%s\' and type \'%s\' was found.' %
(segment['id'], segment['name'], segment['type']))
offset += result_set_size
if result_set_size != SUGGESTED_PAGE_LIMIT:
break
elif offset == 0:
print 'No Results Found'
break
print 'Number of results found: %d' % offset
|
Simclass/EDXD_Analysis | refs/heads/master | pyxe/data_io.py | 1 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 20 17:40:07 2015
@author: casimp
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from six import string_types, binary_type
import h5py
import os
import numpy as np
from pyxpb.detectors import MonoDetector, EnergyDetector
def pyxe_to_hdf5(fname, pyxe, overwrite=False):
""" Saves pyxe data object - specifically the analyzed data and basic
detector/experimental setup - to a hdf5 file. Stores progression point
so analysis can be continued where it was left off.
Args:
fname (str): File path.
pyxe: pyXe data object
overwrite (bool): Option to overwrite if same filename is specified.
"""
data_ids = ['ndim', 'd1', 'd2', 'd3', 'T', 'q', 'I', 'phi',
'peaks', 'peaks_err', 'fwhm', 'fwhm_err',
'strain', 'strain_err', 'strain_tensor',
'E', 'v', 'G', 'stress_state', 'analysis_state']
detector_ids = ['method', '_det_param', '_back', 'materials']
write = 'w' if overwrite else 'w-'
with h5py.File(fname, write) as f:
for name in data_ids:
try:
d_path = 'pyxe_analysis/%s' % name
d = getattr(pyxe, name)
d = d.encode() if isinstance(d, string_types) else d
if d is not None:
if name == 'I':
f.create_dataset(d_path, data=d, compression='gzip')
else:
f.create_dataset(d_path, data=d)
except AttributeError:
pass
for name in detector_ids:
d_path = 'setup/%s' % name
data = getattr(pyxe.detector, name)
data = data.encode() if isinstance(data, string_types) else data
if data is not None:
if name == 'materials':
for mat in data:
d = [data[mat][x] for x in ['a', 'b', 'weight']]
d_path_new = '{}/{}'.format(d_path, mat)
f.create_dataset(d_path_new, data=np.array(d))
elif name == '_det_param':
for param in data:
d = data[param]
d_path_new = '{}/{}'.format(d_path, param)
f.create_dataset(d_path_new, data=np.array(d))
else:
f.create_dataset(d_path, data=data)
def data_extract(pyxe_h5, variable_id):
""" Takes pyxe hdf5 file and variable type and extract/returns data."""
data_ids = {'dims': ['ndim', 'd1', 'd2', 'd3'],
'raw': ['q', 'I', 'phi', 'T'],
'peaks': ['peaks', 'peaks_err'],
'fwhm': ['fwhm', 'fwhm_err'],
'strain': ['strain', 'strain_err'],
'tensor': ['strain_tensor'],
'material': ['E', 'v', 'G'],
'state': ['stress_state', 'analysis_state']}
extract = data_ids[variable_id]
data = []
for ext in extract:
try:
d = pyxe_h5['pyxe_analysis/{}'.format(ext)]
d = d[()].decode() if isinstance(d[()], binary_type) else d[()]
data.append(d)
except KeyError:
data.append(None)
return data
def detector_extract(pyxe_h5):
""" Takes pyxe hdf5 file and extracts/returns detector object."""
materials = {}
try:
mat = pyxe_h5['setup/materials']
for i in mat:
data = mat[i][()]
materials[i] = {'a': data[0], 'b': data[1], 'weight': data[2]}
except KeyError:
pass
det_param = {}
params = pyxe_h5['setup/_det_param']
for param in params:
data = params[param][()]
det_param[param] = data
back = pyxe_h5['setup/_back'][()]
method = pyxe_h5['setup/method'][()].decode()
detector = detector_recreate(method, det_param, materials, back)
return detector
def detector_recreate(method, det_params, materials, back):
""" Recreates detector instance from data extracted from pyxe hdf5 file.
Args:
method (str): mono or edxd
det_params (dict): Dictionary correct parameters for method
materials (dict): Dictionary containing materials and their params
back (ndarray): Chebyshev polynomial terms (wrt. az_idx)
Returns:
pyxpb.peaks.Peak: pyxpb detector instance
"""
Detector = MonoDetector if method == 'mono' else EnergyDetector
detector = Detector(**det_params)
detector._back = back
for mat in materials:
detector.add_material(mat, b=materials[mat]['b'],
weight=materials[mat]['weight'])
return detector
def dim_fill(co_array):
""" Splits ndarray of co-ords into d1, d2 (or None), d3 (or None).
Args:
co_array (ndarray): Data co-ordinates
Returns:
tuple: co_ords (d1, d2, d3), dims (list of valid dimensions)
"""
co_ords, dims = [], []
if co_array.ndim == 1:
return [co_array, None, None], [b'ss2_x']
for axis, dim in zip(range(3), [b'ss2_x', b'ss2_y', b'ss2_z']):
try:
co_ords.append(co_array[:, axis])
dims.append(dim)
except IndexError:
co_ords.append(None)
return co_ords, dims
def dimension_fill(i12_nxs, dim_id):
""" Extracts correct spatial array from NeXus file. Returns None if the
dimension doesn't exist.
Args:
i12_nxs: Raw data (hdf5 format)
dim_id (str): Dimension ID (ss_x, ss2_y or ss2_z)
"""
try:
dimension_data = i12_nxs['entry1/EDXD_elements/' + dim_id][()]
except KeyError:
dimension_data = None
return dimension_data
def dimension_fill_pixium10(i12_nxs, dim_id):
""" Extracts correct spatial array from NeXus file. Returns None if the
dimension doesn't exist.
Args:
i12_nxs: Raw data (hdf5 format)
dim_id (str): Dimension ID (ss_x, ss2_y or ss2_z)
"""
try:
dimension_data = i12_nxs['entry1/pixium10_tif/' + dim_id][()]
except KeyError:
dimension_data = None
return dimension_data
def extract_fnames(folder, f_ext):
""" Extracts file names (with specified file extension) from folder"""
fnames = sorted([x for x in os.listdir(folder) if x.endswith(f_ext)])
return fnames
|
dplbsd/zcaplib | refs/heads/master | head/crypto/heimdal/lib/wind/rfc4518.py | 88 | #!/usr/local/bin/python
# -*- coding: iso-8859-1 -*-
# $Id$
# Copyright (c) 2004, 2008 Kungliga Tekniska Högskolan
# (Royal Institute of Technology, Stockholm, Sweden).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Institute nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import re
import string
def read():
"""return a dict of tables from rfc4518"""
ret = {}
#2.2. Map
#
# SOFT HYPHEN (U+00AD) and MONGOLIAN TODO SOFT HYPHEN (U+1806) code
# points are mapped to nothing. COMBINING GRAPHEME JOINER (U+034F) and
# VARIATION SELECTORs (U+180B-180D, FF00-FE0F) code points are also
# mapped to nothing. The OBJECT REPLACEMENT CHARACTER (U+FFFC) is
# mapped to nothing.
t = []
t.append(" 00AD; ; Map to nothing")
t.append(" 1806; ; Map to nothing")
t.append(" 034F; ; Map to nothing")
t.append(" 180B; ; Map to nothing")
t.append(" 180C; ; Map to nothing")
t.append(" 180D; ; Map to nothing")
t.append(" FE00; ; Map to nothing")
t.append(" FE01; ; Map to nothing")
t.append(" FE02; ; Map to nothing")
t.append(" FE03; ; Map to nothing")
t.append(" FE04; ; Map to nothing")
t.append(" FE05; ; Map to nothing")
t.append(" FE06; ; Map to nothing")
t.append(" FE07; ; Map to nothing")
t.append(" FE08; ; Map to nothing")
t.append(" FE09; ; Map to nothing")
t.append(" FE0A; ; Map to nothing")
t.append(" FE0B; ; Map to nothing")
t.append(" FE0C; ; Map to nothing")
t.append(" FE0D; ; Map to nothing")
t.append(" FE0E; ; Map to nothing")
t.append(" FE0F; ; Map to nothing")
t.append(" FFFC; ; Map to nothing")
# CHARACTER TABULATION (U+0009), LINE FEED (LF) (U+000A), LINE
# TABULATION (U+000B), FORM FEED (FF) (U+000C), CARRIAGE RETURN (CR)
# (U+000D), and NEXT LINE (NEL) (U+0085) are mapped to SPACE (U+0020).
t.append(" 0009; 0020 ; Map to SPACE")
t.append(" 000A; 0020 ; Map to SPACE")
t.append(" 000B; 0020 ; Map to SPACE")
t.append(" 000C; 0020 ; Map to SPACE")
t.append(" 000D; 0020 ; Map to SPACE")
t.append(" 0085; 0020 ; Map to SPACE")
# All other control code (e.g., Cc) points or code points with a
# control function (e.g., Cf) are mapped to nothing. The following is
# a complete list of these code points: U+0000-0008, 000E-001F, 007F-
# 0084, 0086-009F, 06DD, 070F, 180E, 200C-200F, 202A-202E, 2060-2063,
# 206A-206F, FEFF, FFF9-FFFB, 1D173-1D17A, E0001, E0020-E007F.
t.append(" 0000-0008; ; Map to nothing")
t.append(" 000E-001F; ; Map to nothing")
t.append(" 007F-0084; ; Map to nothing")
t.append(" 0086-009F; ; Map to nothing")
t.append(" 06DD; ; Map to nothing")
t.append(" 070F; ; Map to nothing")
t.append(" 180E; ; Map to nothing")
t.append(" 200C-200F; ; Map to nothing")
t.append(" 202A-202E; ; Map to nothing")
t.append(" 2060-2063; ; Map to nothing")
t.append(" 206A-206F; ; Map to nothing")
t.append(" FEFF; ; Map to nothing")
t.append(" FFF9-FFFB; ; Map to nothing")
t.append(" 1D173-1D17A; ; Map to nothing")
t.append(" E0001; ; Map to nothing")
t.append(" E0020-E007F; ; Map to nothing")
# ZERO WIDTH SPACE (U+200B) is mapped to nothing. All other code
# points with Separator (space, line, or paragraph) property (e.g., Zs,
# Zl, or Zp) are mapped to SPACE (U+0020). The following is a complete
# list of these code points: U+0020, 00A0, 1680, 2000-200A, 2028-2029,
# 202F, 205F, 3000.
t.append(" 200B; ; Map to nothing")
t.append(" 0020; 0020; Map to SPACE")
t.append(" 00A0; 0020; Map to SPACE")
t.append(" 1680; 0020; Map to SPACE")
t.append(" 2000-200A; 0020; Map to SPACE")
t.append(" 2028-2029; 0020; Map to SPACE")
t.append(" 202F; 0020; Map to SPACE")
t.append(" 205F; 0020; Map to SPACE")
t.append(" 3000; 0020; Map to SPACE")
ret["rfc4518-map"] = t
# For case ignore, numeric, and stored prefix string matching rules,
# characters are case folded per B.2 of [RFC3454].
t = []
#2.4. Prohibit
# The REPLACEMENT CHARACTER (U+FFFD) code point is prohibited.
t.append(" FFFD;")
ret["rfc4518-error"] = t
t = []
return ret
|
llluiop/bitcoin | refs/heads/master | qa/rpc-tests/zapwallettxes.py | 6 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework import BitcoinTestFramework
from util import *
class ZapWalletTXesTest (BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
print "Mining blocks..."
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
txid0 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
txid1 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
txid2 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
txid3 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
tx0 = self.nodes[0].gettransaction(txid0)
assert_equal(tx0['txid'], txid0) #tx0 must be available (confirmed)
tx1 = self.nodes[0].gettransaction(txid1)
assert_equal(tx1['txid'], txid1) #tx1 must be available (confirmed)
tx2 = self.nodes[0].gettransaction(txid2)
assert_equal(tx2['txid'], txid2) #tx2 must be available (unconfirmed)
tx3 = self.nodes[0].gettransaction(txid3)
assert_equal(tx3['txid'], txid3) #tx3 must be available (unconfirmed)
#restart bitcoind
self.nodes[0].stop()
bitcoind_processes[0].wait()
self.nodes[0] = start_node(0,self.options.tmpdir)
tx3 = self.nodes[0].gettransaction(txid3)
assert_equal(tx3['txid'], txid3) #tx must be available (unconfirmed)
self.nodes[0].stop()
bitcoind_processes[0].wait()
#restart bitcoind with zapwallettxes
self.nodes[0] = start_node(0,self.options.tmpdir, ["-zapwallettxes=1"])
aException = False
try:
tx3 = self.nodes[0].gettransaction(txid3)
except JSONRPCException,e:
print e
aException = True
assert_equal(aException, True) #there must be a expection because the unconfirmed wallettx0 must be gone by now
tx0 = self.nodes[0].gettransaction(txid0)
assert_equal(tx0['txid'], txid0) #tx0 (confirmed) must still be available because it was confirmed
if __name__ == '__main__':
ZapWalletTXesTest ().main ()
|
EricNeedham/assignment-1 | refs/heads/master | venv/lib/python2.7/copy_reg.py | 4 | /usr/lib/python2.7/copy_reg.py |
daxxi13/CouchPotatoServer | refs/heads/develop | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/jeuxvideo.py | 25 | # coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
class JeuxVideoIE(InfoExtractor):
_VALID_URL = r'http://.*?\.jeuxvideo\.com/.*/(.*?)-\d+\.htm'
_TEST = {
'url': 'http://www.jeuxvideo.com/reportages-videos-jeux/0004/00046170/tearaway-playstation-vita-gc-2013-tearaway-nous-presente-ses-papiers-d-identite-00115182.htm',
'md5': '046e491afb32a8aaac1f44dd4ddd54ee',
'info_dict': {
'id': '5182',
'ext': 'mp4',
'title': 'GC 2013 : Tearaway nous présente ses papiers d\'identité',
'description': 'Lorsque les développeurs de LittleBigPlanet proposent un nouveau titre, on ne peut que s\'attendre à un résultat original et fort attrayant.\n',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title = mobj.group(1)
webpage = self._download_webpage(url, title)
xml_link = self._html_search_regex(
r'<param name="flashvars" value="config=(.*?)" />',
webpage, 'config URL')
video_id = self._search_regex(
r'http://www\.jeuxvideo\.com/config/\w+/\d+/(.*?)/\d+_player\.xml',
xml_link, 'video ID')
config = self._download_xml(
xml_link, title, 'Downloading XML config')
info_json = config.find('format.json').text
info = json.loads(info_json)['versions'][0]
video_url = 'http://video720.jeuxvideo.com/' + info['file']
return {
'id': video_id,
'title': config.find('titre_video').text,
'ext': 'mp4',
'url': video_url,
'description': self._og_search_description(webpage),
'thumbnail': config.find('image').text,
}
|
w1ll1am23/home-assistant | refs/heads/dev | tests/components/smarttub/test_sensor.py | 4 | """Test the SmartTub sensor platform."""
import pytest
@pytest.mark.parametrize(
"entity_suffix,expected_state",
[
("state", "normal"),
("flow_switch", "open"),
("ozone", "off"),
("uv", "off"),
("blowout_cycle", "inactive"),
("cleanup_cycle", "inactive"),
],
)
async def test_sensor(spa, setup_entry, hass, entity_suffix, expected_state):
"""Test simple sensors."""
entity_id = f"sensor.{spa.brand}_{spa.model}_{entity_suffix}"
state = hass.states.get(entity_id)
assert state is not None
assert state.state == expected_state
async def test_primary_filtration(spa, setup_entry, hass):
"""Test the primary filtration cycle sensor."""
entity_id = f"sensor.{spa.brand}_{spa.model}_primary_filtration_cycle"
state = hass.states.get(entity_id)
assert state is not None
assert state.state == "inactive"
assert state.attributes["duration"] == 4
assert state.attributes["cycle_last_updated"] is not None
assert state.attributes["mode"] == "normal"
assert state.attributes["start_hour"] == 2
async def test_secondary_filtration(spa, setup_entry, hass):
"""Test the secondary filtration cycle sensor."""
entity_id = f"sensor.{spa.brand}_{spa.model}_secondary_filtration_cycle"
state = hass.states.get(entity_id)
assert state is not None
assert state.state == "inactive"
assert state.attributes["cycle_last_updated"] is not None
assert state.attributes["mode"] == "away"
|
ppwwyyxx/tensorflow | refs/heads/master | tensorflow/lite/experimental/micro/tools/make/merge_arduino_zips.py | 23 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resolves non-system C/C++ includes to their full paths to help Arduino."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import shutil
import tempfile
import zipfile
def main(unparsed_args):
"""Merges multiple Arduino zipfiles into a single result."""
output_zip_path = unparsed_args[0]
input_zip_paths = unparsed_args[1::]
working_dir = tempfile.mkdtemp()
for input_zip_path in input_zip_paths:
with zipfile.ZipFile(input_zip_path, 'r') as input_zip:
input_zip.extractall(path=working_dir)
output_path_without_zip = output_zip_path.replace('.zip', '')
shutil.make_archive(output_path_without_zip, 'zip', working_dir)
def parse_args():
"""Converts the raw arguments into accessible flags."""
parser = argparse.ArgumentParser()
_, unparsed_args = parser.parse_known_args()
main(unparsed_args)
if __name__ == '__main__':
parse_args()
|
mattesno1/Sick-Beard | refs/heads/master | lib/hachoir_parser/file_system/linux_swap.py | 90 | """
Linux swap file.
Documentation: Linux kernel source code, files:
- mm/swapfile.c
- include/linux/swap.h
Author: Victor Stinner
Creation date: 25 december 2006 (christmas ;-))
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (ParserError, GenericVector,
UInt32, String,
Bytes, NullBytes, RawBytes)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.tools import humanFilesize
from lib.hachoir_core.bits import str2hex
PAGE_SIZE = 4096
# Definition of MAX_SWAP_BADPAGES in Linux kernel:
# (__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)
MAX_SWAP_BADPAGES = ((PAGE_SIZE - 10) - 1536) // 4
class Page(RawBytes):
static_size = PAGE_SIZE*8
def __init__(self, parent, name):
RawBytes.__init__(self, parent, name, PAGE_SIZE)
class UUID(Bytes):
static_size = 16*8
def __init__(self, parent, name):
Bytes.__init__(self, parent, name, 16)
def createDisplay(self):
text = str2hex(self.value, format=r"%02x")
return "%s-%s-%s-%s-%s" % (
text[:8], text[8:12], text[12:16], text[16:20], text[20:])
class LinuxSwapFile(Parser):
PARSER_TAGS = {
"id": "linux_swap",
"file_ext": ("",),
"category": "file_system",
"min_size": PAGE_SIZE*8,
"description": "Linux swap file",
"magic": (
("SWAP-SPACE", (PAGE_SIZE-10)*8),
("SWAPSPACE2", (PAGE_SIZE-10)*8),
("S1SUSPEND\0", (PAGE_SIZE-10)*8),
),
}
endian = LITTLE_ENDIAN
def validate(self):
magic = self.stream.readBytes((PAGE_SIZE-10)*8, 10)
if magic not in ("SWAP-SPACE", "SWAPSPACE2", "S1SUSPEND\0"):
return "Unknown magic string"
if MAX_SWAP_BADPAGES < self["nb_badpage"].value:
return "Invalid number of bad page (%u)" % self["nb_badpage"].value
return True
def getPageCount(self):
"""
Number of pages which can really be used for swapping:
number of page minus bad pages minus one page (used for the header)
"""
# -1 because first page is used for the header
return self["last_page"].value - self["nb_badpage"].value - 1
def createDescription(self):
if self["magic"].value == "S1SUSPEND\0":
text = "Suspend swap file version 1"
elif self["magic"].value == "SWAPSPACE2":
text = "Linux swap file version 2"
else:
text = "Linux swap file version 1"
nb_page = self.getPageCount()
return "%s, page size: %s, %s pages" % (
text, humanFilesize(PAGE_SIZE), nb_page)
def createFields(self):
# First kilobyte: boot sectors
yield RawBytes(self, "boot", 1024, "Space for disklabel etc.")
# Header
yield UInt32(self, "version")
yield UInt32(self, "last_page")
yield UInt32(self, "nb_badpage")
yield UUID(self, "sws_uuid")
yield UUID(self, "sws_volume")
yield NullBytes(self, "reserved", 117*4)
# Read bad pages (if any)
count = self["nb_badpage"].value
if count:
if MAX_SWAP_BADPAGES < count:
raise ParserError("Invalid number of bad page (%u)" % count)
yield GenericVector(self, "badpages", count, UInt32, "badpage")
# Read magic
padding = self.seekByte(PAGE_SIZE - 10, "padding", null=True)
if padding:
yield padding
yield String(self, "magic", 10, charset="ASCII")
# Read all pages
yield GenericVector(self, "pages", self["last_page"].value, Page, "page")
# Padding at the end
padding = self.seekBit(self.size, "end_padding", null=True)
if padding:
yield padding
|
dronefly/dronefly.github.io | refs/heads/master | flask/lib/python2.7/site-packages/coverage/annotate.py | 76 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Source file annotation for coverage.py."""
import io
import os
import re
from coverage.files import flat_rootname
from coverage.misc import isolate_module
from coverage.report import Reporter
os = isolate_module(os)
class AnnotateReporter(Reporter):
"""Generate annotated source files showing line coverage.
This reporter creates annotated copies of the measured source files. Each
.py file is copied as a .py,cover file, with a left-hand margin annotating
each line::
> def h(x):
- if 0: #pragma: no cover
- pass
> if x == 1:
! a = 1
> else:
> a = 2
> h(2)
Executed lines use '>', lines not executed use '!', lines excluded from
consideration use '-'.
"""
def __init__(self, coverage, config):
super(AnnotateReporter, self).__init__(coverage, config)
self.directory = None
blank_re = re.compile(r"\s*(#|$)")
else_re = re.compile(r"\s*else\s*:\s*(#|$)")
def report(self, morfs, directory=None):
"""Run the report.
See `coverage.report()` for arguments.
"""
self.report_files(self.annotate_file, morfs, directory)
def annotate_file(self, fr, analysis):
"""Annotate a single file.
`fr` is the FileReporter for the file to annotate.
"""
statements = sorted(analysis.statements)
missing = sorted(analysis.missing)
excluded = sorted(analysis.excluded)
if self.directory:
dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename()))
if dest_file.endswith("_py"):
dest_file = dest_file[:-3] + ".py"
dest_file += ",cover"
else:
dest_file = fr.filename + ",cover"
with io.open(dest_file, 'w', encoding='utf8') as dest:
i = 0
j = 0
covered = True
source = fr.source()
for lineno, line in enumerate(source.splitlines(True), start=1):
while i < len(statements) and statements[i] < lineno:
i += 1
while j < len(missing) and missing[j] < lineno:
j += 1
if i < len(statements) and statements[i] == lineno:
covered = j >= len(missing) or missing[j] > lineno
if self.blank_re.match(line):
dest.write(u' ')
elif self.else_re.match(line):
# Special logic for lines containing only 'else:'.
if i >= len(statements) and j >= len(missing):
dest.write(u'! ')
elif i >= len(statements) or j >= len(missing):
dest.write(u'> ')
elif statements[i] == missing[j]:
dest.write(u'! ')
else:
dest.write(u'> ')
elif lineno in excluded:
dest.write(u'- ')
elif covered:
dest.write(u'> ')
else:
dest.write(u'! ')
dest.write(line)
|
shoopio/shoop-simple-theme | refs/heads/master | shoop_simple_theme/__init__.py | 1 | # This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
"""
A Bootstrap-powered simple theme for Shoop.
"""
import shoop.apps
import shoop.xtheme
class Theme(shoop.xtheme.Theme):
identifier = "shoop_simple_theme"
name = "Shoop Simple Theme"
template_dir = "shoop_simple_theme/"
class AppConfig(shoop.apps.AppConfig):
name = __name__
verbose_name = Theme.name
label = "shoop_simple_theme"
provides = {
"xtheme": __name__ + ":Theme"
}
default_app_config = __name__ + ".AppConfig"
|
xoner/mate-grid | refs/heads/master | create-shotcuts.py | 1 | from subprocess import call
# Change to match mate-grid installation path
mate_grid_path = '/opt/mate-grid/mate-grid.py'
shortcuts = {
'mate-grid-move-next-mon': {
'name': 'Move window to next Monitor',
'action': 'python {} -cm'.format(mate_grid_path),
'binding': '<Shift><Mod4>Right'
},
'mate-grid-move-prev-mon': {
'name': 'Move window to previous Monitor',
'action': 'python {} -cm'.format(mate_grid_path),
'binding': '<Shift><Mod4>Left'
},
'mate-grid-move-left': {
'name': "Move window to the left half",
'action': 'python {} -ml'.format(mate_grid_path),
'binding': '<Mod4>Left'
},
'mate-grid-move-right': {
'name': "Move window to the left half",
'action': 'python {} -mr'.format(mate_grid_path),
'binding': '<Mod4>Right'
},
'mate-grid-move-top': {
'name': "Move window to the top half",
'action': 'python {} -mt'.format(mate_grid_path),
'binding': '<Ctrl><Mod4>Up'
},
'mate-grid-move-bottom': {
'name': "Move window to the bottom half",
'action': 'python {} -mb'.format(mate_grid_path),
'binding': '<Ctrl><Mod4>Down'
},
'mate-grid-move-top-left': {
'name': "Move window to the top left quarter",
'action': 'python {} -mtl'.format(mate_grid_path),
'binding': '<Shift><Ctrl><Mod4>Up'
},
'mate-grid-move-top-right': {
'name': "Move window to the top right quarter",
'action': 'python {} -mtr'.format(mate_grid_path),
'binding': '<Shift><Ctrl><Mod4>Right'
},
'mate-grid-move-bottom-left': {
'name': "Move window to the bottom left quarter",
'action': 'python {} -mbl'.format(mate_grid_path),
'binding': '<Shift><Ctrl><Mod4>Left'
},
'mate-grid-move-bottom-right': {
'name': "Move window to the bottom right quarter",
'action': 'python {} -mbr'.format(mate_grid_path),
'binding': '<Shift><Ctrl><Mod4>Down'
}
}
for key_container, params in shortcuts.iteritems():
for key, value in params.iteritems():
command = ['gsettings', 'set',
'org.mate.control-center.keybinding:\
/org/mate/desktop/keybindings/{}/'.format(key_container),
key,
'"{}"'.format(value)
]
call(command)
# Debug.
# print " ".join(command)
|
UQ-UQx/edx-platform_lti | refs/heads/master | common/djangoapps/student/management/commands/create_random_users.py | 9 | """
A script to create some dummy users
"""
import uuid
from django.core.management.base import BaseCommand
from student.models import CourseEnrollment
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from student.views import _do_create_account
def get_random_post_override():
"""
Generate unique user data for dummy users.
"""
identification = uuid.uuid4().hex[:8]
return {
'username': 'user_{id}'.format(id=identification),
'email': 'email_{id}@example.com'.format(id=identification),
'password': '12345',
'name': 'User {id}'.format(id=identification),
}
def create(num, course_key):
"""Create num users, enrolling them in course_key if it's not None"""
for idx in range(num):
(user, user_profile, __) = _do_create_account(get_random_post_override())
if course_key is not None:
CourseEnrollment.enroll(user, course_key)
class Command(BaseCommand):
help = """Create N new users, with random parameters.
Usage: create_random_users.py N [course_id_to_enroll_in].
Examples:
create_random_users.py 1
create_random_users.py 10 MITx/6.002x/2012_Fall
create_random_users.py 100 HarvardX/CS50x/2012
"""
def handle(self, *args, **options):
if len(args) < 1 or len(args) > 2:
print Command.help
return
num = int(args[0])
if len(args) == 2:
try:
course_key = CourseKey.from_string(args[1])
except InvalidKeyError:
course_key = SlashSeparatedCourseKey.from_deprecated_string(args[1])
else:
course_key = None
create(num, course_key)
|
h3llrais3r/Auto-Subliminal | refs/heads/master | autosubliminal/providers/__init__.py | 1054 | # coding=utf-8
|
matthiasdiener/spack | refs/heads/develop | var/spack/repos/builtin/packages/py-guidata/package.py | 5 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyGuidata(PythonPackage):
"""Automatic graphical user interfaces generation for easy dataset editing
and display"""
homepage = "https://github.com/PierreRaybaut/guidata"
url = "https://pypi.io/packages/source/g/guidata/guidata-1.7.5.zip"
version('1.7.5', '915188c02ad3c89951ee260db65d84a7')
depends_on('py-setuptools', type='build')
depends_on('py-pyqt@4:', type=('build', 'run'))
depends_on('py-spyder@2.0:2.9.9', type=('build', 'run'))
depends_on('py-h5py', type=('build', 'run'))
|
google-code/android-scripting | refs/heads/master | python/src/Lib/test/bad_coding2.py | 181 | #coding: utf8
print '我'
|
timokoola/finnkinotxt | refs/heads/master | requests/packages/urllib3/_collections.py | 201 | from __future__ import absolute_import
from collections import Mapping, MutableMapping
try:
from threading import RLock
except ImportError: # Platform-specific: No threads available
class RLock:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
try: # Python 2.7+
from collections import OrderedDict
except ImportError:
from .packages.ordered_dict import OrderedDict
from .packages.six import iterkeys, itervalues, PY3
__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
_Null = object()
class RecentlyUsedContainer(MutableMapping):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
ContainerCls = OrderedDict
def __init__(self, maxsize=10, dispose_func=None):
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self.lock = RLock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self.lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
# If we didn't evict an existing value, we might have to evict the
# least recently used item from the beginning of the container.
if len(self._container) > self._maxsize:
_key, evicted_value = self._container.popitem(last=False)
if self.dispose_func and evicted_value is not _Null:
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self.lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self.lock:
# Copy pointers to all values, then wipe the mapping
values = list(itervalues(self._container))
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self):
with self.lock:
return list(iterkeys(self._container))
class HTTPHeaderDict(MutableMapping):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
"""
def __init__(self, headers=None, **kwargs):
super(HTTPHeaderDict, self).__init__()
self._container = {}
if headers is not None:
if isinstance(headers, HTTPHeaderDict):
self._copy_from(headers)
else:
self.extend(headers)
if kwargs:
self.extend(kwargs)
def __setitem__(self, key, val):
self._container[key.lower()] = (key, val)
return self._container[key.lower()]
def __getitem__(self, key):
val = self._container[key.lower()]
return ', '.join(val[1:])
def __delitem__(self, key):
del self._container[key.lower()]
def __contains__(self, key):
return key.lower() in self._container
def __eq__(self, other):
if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
return False
if not isinstance(other, type(self)):
other = type(self)(other)
return (dict((k.lower(), v) for k, v in self.itermerged()) ==
dict((k.lower(), v) for k, v in other.itermerged()))
def __ne__(self, other):
return not self.__eq__(other)
if not PY3: # Python 2
iterkeys = MutableMapping.iterkeys
itervalues = MutableMapping.itervalues
__marker = object()
def __len__(self):
return len(self._container)
def __iter__(self):
# Only provide the originally cased names
for vals in self._container.values():
yield vals[0]
def pop(self, key, default=__marker):
'''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
# Using the MutableMapping function directly fails due to the private marker.
# Using ordinary dict.pop would expose the internal structures.
# So let's reinvent the wheel.
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def discard(self, key):
try:
del self[key]
except KeyError:
pass
def add(self, key, val):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
"""
key_lower = key.lower()
new_vals = key, val
# Keep the common case aka no item present as fast as possible
vals = self._container.setdefault(key_lower, new_vals)
if new_vals is not vals:
# new_vals was not inserted, as there was a previous one
if isinstance(vals, list):
# If already several items got inserted, we have a list
vals.append(val)
else:
# vals should be a tuple then, i.e. only one item so far
# Need to convert the tuple to list for further extension
self._container[key_lower] = [vals[0], vals[1], val]
def extend(self, *args, **kwargs):
"""Generic import function for any type of header-like object.
Adapted version of MutableMapping.update in order to insert items
with self.add instead of self.__setitem__
"""
if len(args) > 1:
raise TypeError("extend() takes at most 1 positional "
"arguments ({0} given)".format(len(args)))
other = args[0] if len(args) >= 1 else ()
if isinstance(other, HTTPHeaderDict):
for key, val in other.iteritems():
self.add(key, val)
elif isinstance(other, Mapping):
for key in other:
self.add(key, other[key])
elif hasattr(other, "keys"):
for key in other.keys():
self.add(key, other[key])
else:
for key, value in other:
self.add(key, value)
for key, value in kwargs.items():
self.add(key, value)
def getlist(self, key):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
try:
vals = self._container[key.lower()]
except KeyError:
return []
else:
if isinstance(vals, tuple):
return [vals[1]]
else:
return vals[1:]
# Backwards compatibility for httplib
getheaders = getlist
getallmatchingheaders = getlist
iget = getlist
def __repr__(self):
return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
def _copy_from(self, other):
for key in other:
val = other.getlist(key)
if isinstance(val, list):
# Don't need to convert tuples
val = list(val)
self._container[key.lower()] = [key] + val
def copy(self):
clone = type(self)()
clone._copy_from(self)
return clone
def iteritems(self):
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = self._container[key.lower()]
for val in vals[1:]:
yield vals[0], val
def itermerged(self):
"""Iterate over all headers, merging duplicate ones together."""
for key in self:
val = self._container[key.lower()]
yield val[0], ', '.join(val[1:])
def items(self):
return list(self.iteritems())
@classmethod
def from_httplib(cls, message): # Python 2
"""Read headers from a Python 2 httplib message object."""
# python2.7 does not expose a proper API for exporting multiheaders
# efficiently. This function re-reads raw lines from the message
# object and extracts the multiheaders properly.
headers = []
for line in message.headers:
if line.startswith((' ', '\t')):
key, value = headers[-1]
headers[-1] = (key, value + '\r\n' + line.rstrip())
continue
key, value = line.split(':', 1)
headers.append((key, value.strip()))
return cls(headers)
|
ymero/learn-python3 | refs/heads/master | samples/async/coroutine.py | 20 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def consumer():
r = ''
while True:
n = yield r
if not n:
return
print('[CONSUMER] Consuming %s...' % n)
r = '200 OK'
def produce(c):
c.send(None)
n = 0
while n < 5:
n = n + 1
print('[PRODUCER] Producing %s...' % n)
r = c.send(n)
print('[PRODUCER] Consumer return: %s' % r)
c.close()
c = consumer()
produce(c)
|
jlward/django-authority | refs/heads/master | example/exampleapp/models.py | 10644 | from django.db import models
# Create your models here.
|
SnappleCap/oh-mainline | refs/heads/master | vendor/packages/python-otp/setup.py | 16 | #!/usr/bin/python
from distutils.core import setup
setup(name="python-otp",
version = "1.0",
description = "Python module for one-time passwords",
author = "Gustavo Niemeyer",
author_email = "niemeyer@conectiva.com",
url = "http://moin.conectiva.com.br/PythonOtp",
license = "GPL",
long_description =
"""python-otp is a module which implements support for all requirements,
recommendations, and optional features described in RFC2289. This RFC
defines a standard for the implementation of OTP, or one-time passwords.
""",
packages = ["otp"],
)
|
monmaks/ckanext-ceon | refs/heads/master | ckanext/ceon/config.py | 3 | #!/usr/bin/python
# vim: set fileencoding=utf-8
from logging import getLogger
from pylons import config
from paste.deploy.converters import asbool
DOI_TEST_PREFIX = '10.5072'
DOI_ENDPOINT = 'https://mds.datacite.org'
DOI_TEST_ENDPOINT = 'https://test.datacite.org/mds'
def get_doi_prefix():
return DOI_TEST_PREFIX if asbool(config.get("ckanext.ceon.doi_test_mode", True)) else config.get("ckanext.ceon.doi_prefix")
def get_doi_endpoint():
return DOI_TEST_ENDPOINT if asbool(config.get("ckanext.ceon.doi_test_mode", True)) else DOI_ENDPOINT
def get_site_url():
"""
Get the site URL
Try and use ckanext.doi.site_url but if that's not set use
ckan.site_url
@return:
"""
site_url = config.get("ckanext.ceon.doi_site_url")
if not site_url:
site_url = config.get('ckan.site_url')
return site_url.rstrip('/')
|
dwcarder/sdn-ix-demo | refs/heads/master | exabgp-3.4.3/lib/exabgp/protocol/ip/__init__.py | 1 | # encoding: utf-8
"""
ip/__init__.py
Created by Thomas Mangin on 2010-01-15.
Copyright (c) 2009-2013 Exa Networks. All rights reserved.
"""
import socket
from exabgp.protocol.family import AFI
from exabgp.protocol.family import SAFI
# =========================================================================== IP
#
class IP (object):
_known = dict()
_UNICAST = SAFI(SAFI.unicast)
_MULTICAST = SAFI(SAFI.multicast)
_multicast_range = set(range(224,240)) # 239
__slots__ = ['ip','packed']
def __init__ (self):
raise Exception("You should use IP.create() to use IP")
def init (self,ip,packed):
self.ip = ip
self.packed = packed
return self
@staticmethod
def pton (ip):
return socket.inet_pton(IP.toaf(ip),ip)
@staticmethod
def ntop (data):
return socket.inet_ntop(socket.AF_INET if len(data) == 4 else socket.AF_INET6,data)
@staticmethod
def toaf (ip):
# the orders matters as ::FFFF:<ipv4> is an IPv6 address
if ':' in ip:
return socket.AF_INET6
if '.' in ip:
return socket.AF_INET
raise Exception('unrecognised ip address %s' % ip)
@staticmethod
def toafi (ip):
# the orders matters as ::FFFF:<ipv4> is an IPv6 address
if ':' in ip:
return AFI.ipv6
if '.' in ip:
return AFI.ipv4
raise Exception('unrecognised ip address %s' % ip)
@staticmethod
def tosafi (ip):
if ':' in ip:
# XXX: FIXME: I assume that ::FFFF:<ip> must be treated unicast
# if int(ip.split(':')[-1].split('.')[0]) in IP._multicast_range:
return SAFI.unicast
elif '.' in ip:
if int(ip.split('.')[0]) in IP._multicast_range:
return SAFI.multicast
return SAFI.unicast
raise Exception('unrecognised ip address %s' % ip)
def ipv4 (self):
return True if len(self.packed) == 4 else False
def ipv6 (self):
return False if len(self.packed) == 4 else True
@staticmethod
def length (afi):
return 4 if afi == AFI.ipv4 else 16
def pack (self):
return self.packed
def __str__ (self):
return self.ip
def __repr__ (self):
return str(self)
def __cmp__ (self,other):
if not isinstance(other, self.__class__):
return -1
if self.packed == other.packed:
return 0
if self.packed < other.packed:
return -1
return 1
def __hash__ (self):
return hash(str(self.__class__.__name__) + self.packed)
@classmethod
def klass (cls,ip):
# the orders matters as ::FFFF:<ipv4> is an IPv6 address
if ':' in ip:
afi = IPv6.afi
elif '.' in ip:
afi = IPv4.afi
else:
raise Exception('can not decode this ip address : %s' % ip)
if afi in cls._known:
return cls._known[afi]
@classmethod
def create (cls,ip,data=None,klass=None):
if klass:
return klass(ip,data)
return cls.klass(ip)(ip,data)
@classmethod
def register (cls):
cls._known[cls.afi] = cls
@classmethod
def unpack (cls,data,klass=None):
return cls.create(IP.ntop(data),data,klass)
# ========================================================================= NoIP
#
class _NoIP (object):
packed = ''
def pack (self,data,negotiated=None):
return ''
def __str__ (self):
return 'none'
NoIP = _NoIP()
# ========================================================================= IPv4
#
class IPv4 (IP):
# lower case to match the class Address API
afi = AFI.ipv4
__slots__ = []
def __init__ (self,ip,packed=None):
self.init(ip,packed if packed else socket.inet_pton(socket.AF_INET,ip))
def __len__ (self):
return 4
def unicast (self):
return not self.multicast()
def multicast (self):
return ord(self.packed[0]) in set(range(224,240)) # 239 is last
def ipv4 (self):
return True
def ipv6 (self):
return False
@staticmethod
def pton (ip):
return socket.inet_pton(socket.AF_INET,ip)
@staticmethod
def ntop (data):
return socket.inet_ntop(socket.AF_INET,data)
# klass is a trick for subclasses of IP/IPv4 such as NextHop / OriginatorID
@classmethod
def unpack (cls,data,klass=None):
ip = socket.inet_ntop(socket.AF_INET,data)
if klass:
return klass(ip,data)
return cls(ip,data)
IPv4.register()
# ========================================================================= IPv6
#
class IPv6 (IP):
# lower case to match the class Address API
afi = AFI.ipv6
__slots__ = []
def __init__ (self,ip,packed=None):
self.init(ip,packed if packed else socket.inet_pton(socket.AF_INET6,ip))
def __len__ (self):
return 16
def ipv4 (self):
return False
def ipv6 (self):
return True
def unicast (self):
return True
def multicast (self):
return False
@staticmethod
def pton (ip):
return socket.inet_pton(socket.AF_INET6,ip)
@staticmethod
def ntop (data):
return socket.inet_ntop(socket.AF_INET6,data)
@classmethod
def unpack (cls,data,klass=None):
ip6 = socket.inet_ntop(socket.AF_INET6,data)
if klass:
return klass(ip6)
return cls(ip6)
IPv6.register()
|
uwosh/uwosh.addcard | refs/heads/master | uwosh/addcard/validators.py | 2 | #from Products.validation.interfaces import ivalidator
from Products.validation.interfaces.IValidator import IValidator
from zope.interface import implements
import xmlrpclib
import socket
webServiceBaseURL = 'http://ws.it.uwosh.edu:8080/ws/'
webService = xmlrpclib.Server(webServiceBaseURL, allow_none=1)
class CourseNumberValidator:
implements(IValidator)
# __implements__ = (IValidator,)
def __init__(self, name="CourseNumberValidator"):
self.name = name
#def __call__(self, value, instance, field, *args, **kwargs):
def __call__(self, value, *args, **kwargs):
psterm = 0660
subject1 = 'ENGLISH'
import pdb; pdb.set_trace()
class_info = webService.getCatalogNumbersAndSectionsByTermAndSubjectCX(psterm, subject1)
import pdb; pdb.set_trace()
for i in class_info:
if value in [class_info.keys()[0] for i in class_info]:
pass
else:
return "No way!" |
AstroPrint/AstroBox | refs/heads/master | src/ext/sockjs/tornado/transports/rawwebsocket.py | 9 | # -*- coding: utf-8 -*-
"""
sockjs.tornado.transports.rawwebsocket
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Raw websocket transport implementation
"""
import logging
import socket
from sockjs.tornado import websocket, session
from sockjs.tornado.transports import base
LOG = logging.getLogger("tornado.general")
class RawSession(session.BaseSession):
"""Raw session without any sockjs protocol encoding/decoding. Simply
works as a proxy between `SockJSConnection` class and `RawWebSocketTransport`."""
def send_message(self, msg, stats=True, binary=False):
self.handler.send_pack(msg, binary)
def on_message(self, msg):
self.conn.on_message(msg)
class RawWebSocketTransport(websocket.SockJSWebSocketHandler, base.BaseTransportMixin):
"""Raw Websocket transport"""
name = 'rawwebsocket'
def initialize(self, server):
self.server = server
self.session = None
self.active = True
def open(self):
# Stats
self.server.stats.on_conn_opened()
# Disable nagle if needed
if self.server.settings['disable_nagle']:
self.stream.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
# Create and attach to session
self.session = RawSession(self.server.get_connection_class(), self.server)
self.session.set_handler(self)
self.session.verify_state()
def _detach(self):
if self.session is not None:
self.session.remove_handler(self)
self.session = None
def on_message(self, message):
# SockJS requires that empty messages should be ignored
if not message or not self.session:
return
try:
self.session.on_message(message)
except Exception:
LOG.exception('RawWebSocket')
# Close running connection
self.abort_connection()
def on_close(self):
# Close session if websocket connection was closed
if self.session is not None:
# Stats
self.server.stats.on_conn_closed()
session = self.session
self._detach()
session.close()
def send_pack(self, message, binary=False):
# Send message
try:
self.write_message(message, binary)
except IOError:
self.server.io_loop.add_callback(self.on_close)
def session_closed(self):
try:
self.close()
except IOError:
pass
finally:
self._detach()
# Websocket overrides
def allow_draft76(self):
return True
|
mhoffma/micropython | refs/heads/master | tests/basics/async_for2.py | 49 | # test waiting within "async for" aiter/anext functions
import sys
if sys.implementation.name == 'micropython':
# uPy allows normal generators to be awaitables
coroutine = lambda f: f
else:
import types
coroutine = types.coroutine
@coroutine
def f(x):
print('f start:', x)
yield x + 1
yield x + 2
return x + 3
class ARange:
def __init__(self, high):
print('init')
self.cur = 0
self.high = high
async def __aiter__(self):
print('aiter')
print('f returned:', await f(10))
return self
async def __anext__(self):
print('anext')
print('f returned:', await f(20))
if self.cur < self.high:
val = self.cur
self.cur += 1
return val
else:
raise StopAsyncIteration
async def coro():
async for x in ARange(4):
print('x', x)
o = coro()
try:
while True:
print('coro yielded:', o.send(None))
except StopIteration:
print('finished')
|
adrienbrault/home-assistant | refs/heads/dev | tests/components/owntracks/test_device_tracker.py | 8 | """The tests for the Owntracks device tracker."""
import json
from unittest.mock import patch
import pytest
from homeassistant.components import owntracks
from homeassistant.const import STATE_NOT_HOME
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry, async_fire_mqtt_message, mock_coro
USER = "greg"
DEVICE = "phone"
LOCATION_TOPIC = f"owntracks/{USER}/{DEVICE}"
EVENT_TOPIC = f"owntracks/{USER}/{DEVICE}/event"
WAYPOINTS_TOPIC = f"owntracks/{USER}/{DEVICE}/waypoints"
WAYPOINT_TOPIC = f"owntracks/{USER}/{DEVICE}/waypoint"
USER_BLACKLIST = "ram"
WAYPOINTS_TOPIC_BLOCKED = f"owntracks/{USER_BLACKLIST}/{DEVICE}/waypoints"
LWT_TOPIC = f"owntracks/{USER}/{DEVICE}/lwt"
BAD_TOPIC = f"owntracks/{USER}/{DEVICE}/unsupported"
DEVICE_TRACKER_STATE = f"device_tracker.{USER}_{DEVICE}"
IBEACON_DEVICE = "keys"
MOBILE_BEACON_FMT = "device_tracker.beacon_{}"
CONF_MAX_GPS_ACCURACY = "max_gps_accuracy"
CONF_WAYPOINT_IMPORT = owntracks.CONF_WAYPOINT_IMPORT
CONF_WAYPOINT_WHITELIST = owntracks.CONF_WAYPOINT_WHITELIST
CONF_SECRET = owntracks.CONF_SECRET
CONF_MQTT_TOPIC = owntracks.CONF_MQTT_TOPIC
CONF_EVENTS_ONLY = owntracks.CONF_EVENTS_ONLY
CONF_REGION_MAPPING = owntracks.CONF_REGION_MAPPING
TEST_ZONE_LAT = 45.0
TEST_ZONE_LON = 90.0
TEST_ZONE_DEG_PER_M = 0.0000127
FIVE_M = TEST_ZONE_DEG_PER_M * 5.0
# Home Assistant Zones
INNER_ZONE = {
"name": "zone",
"latitude": TEST_ZONE_LAT + 0.1,
"longitude": TEST_ZONE_LON + 0.1,
"radius": 50,
}
OUTER_ZONE = {
"name": "zone",
"latitude": TEST_ZONE_LAT,
"longitude": TEST_ZONE_LON,
"radius": 100000,
}
def build_message(test_params, default_params):
"""Build a test message from overrides and another message."""
new_params = default_params.copy()
new_params.update(test_params)
return new_params
# Default message parameters
DEFAULT_LOCATION_MESSAGE = {
"_type": "location",
"lon": OUTER_ZONE["longitude"],
"lat": OUTER_ZONE["latitude"],
"acc": 60,
"tid": "user",
"t": "u",
"batt": 92,
"cog": 248,
"alt": 27,
"p": 101.3977584838867,
"vac": 4,
"tst": 1,
"vel": 0,
}
# Owntracks will publish a transition when crossing
# a circular region boundary.
ZONE_EDGE = TEST_ZONE_DEG_PER_M * INNER_ZONE["radius"]
DEFAULT_TRANSITION_MESSAGE = {
"_type": "transition",
"t": "c",
"lon": INNER_ZONE["longitude"],
"lat": INNER_ZONE["latitude"] - ZONE_EDGE,
"acc": 60,
"event": "enter",
"tid": "user",
"desc": "inner",
"wtst": 1,
"tst": 2,
}
# iBeacons that are named the same as an HA zone
# are used to trigger enter and leave updates
# for that zone. In this case the "inner" zone.
#
# iBeacons that do not share an HA zone name
# are treated as mobile tracking devices for
# objects which can't track themselves e.g. keys.
#
# iBeacons are typically configured with the
# default lat/lon 0.0/0.0 and have acc 0.0 but
# regardless the reported location is not trusted.
#
# Owntracks will send both a location message
# for the device and an 'event' message for
# the beacon transition.
DEFAULT_BEACON_TRANSITION_MESSAGE = {
"_type": "transition",
"t": "b",
"lon": 0.0,
"lat": 0.0,
"acc": 0.0,
"event": "enter",
"tid": "user",
"desc": "inner",
"wtst": 1,
"tst": 2,
}
# Location messages
LOCATION_MESSAGE = DEFAULT_LOCATION_MESSAGE
LOCATION_MESSAGE_INACCURATE = build_message(
{
"lat": INNER_ZONE["latitude"] - ZONE_EDGE,
"lon": INNER_ZONE["longitude"] - ZONE_EDGE,
"acc": 2000,
},
LOCATION_MESSAGE,
)
LOCATION_MESSAGE_ZERO_ACCURACY = build_message(
{
"lat": INNER_ZONE["latitude"] - ZONE_EDGE,
"lon": INNER_ZONE["longitude"] - ZONE_EDGE,
"acc": 0,
},
LOCATION_MESSAGE,
)
LOCATION_MESSAGE_NOT_HOME = build_message(
{
"lat": OUTER_ZONE["latitude"] - 2.0,
"lon": INNER_ZONE["longitude"] - 2.0,
"acc": 100,
},
LOCATION_MESSAGE,
)
# Region GPS messages
REGION_GPS_ENTER_MESSAGE = DEFAULT_TRANSITION_MESSAGE
REGION_GPS_LEAVE_MESSAGE = build_message(
{
"lon": INNER_ZONE["longitude"] - ZONE_EDGE * 10,
"lat": INNER_ZONE["latitude"] - ZONE_EDGE * 10,
"event": "leave",
},
DEFAULT_TRANSITION_MESSAGE,
)
REGION_GPS_ENTER_MESSAGE_INACCURATE = build_message(
{"acc": 2000}, REGION_GPS_ENTER_MESSAGE
)
REGION_GPS_LEAVE_MESSAGE_INACCURATE = build_message(
{"acc": 2000}, REGION_GPS_LEAVE_MESSAGE
)
REGION_GPS_ENTER_MESSAGE_ZERO = build_message({"acc": 0}, REGION_GPS_ENTER_MESSAGE)
REGION_GPS_LEAVE_MESSAGE_ZERO = build_message({"acc": 0}, REGION_GPS_LEAVE_MESSAGE)
REGION_GPS_LEAVE_MESSAGE_OUTER = build_message(
{
"lon": OUTER_ZONE["longitude"] - 2.0,
"lat": OUTER_ZONE["latitude"] - 2.0,
"desc": "outer",
"event": "leave",
},
DEFAULT_TRANSITION_MESSAGE,
)
REGION_GPS_ENTER_MESSAGE_OUTER = build_message(
{
"lon": OUTER_ZONE["longitude"],
"lat": OUTER_ZONE["latitude"],
"desc": "outer",
"event": "enter",
},
DEFAULT_TRANSITION_MESSAGE,
)
# Region Beacon messages
REGION_BEACON_ENTER_MESSAGE = DEFAULT_BEACON_TRANSITION_MESSAGE
REGION_BEACON_LEAVE_MESSAGE = build_message(
{"event": "leave"}, DEFAULT_BEACON_TRANSITION_MESSAGE
)
# Mobile Beacon messages
MOBILE_BEACON_ENTER_EVENT_MESSAGE = build_message(
{"desc": IBEACON_DEVICE}, DEFAULT_BEACON_TRANSITION_MESSAGE
)
MOBILE_BEACON_LEAVE_EVENT_MESSAGE = build_message(
{"desc": IBEACON_DEVICE, "event": "leave"}, DEFAULT_BEACON_TRANSITION_MESSAGE
)
# Waypoint messages
WAYPOINTS_EXPORTED_MESSAGE = {
"_type": "waypoints",
"_creator": "test",
"waypoints": [
{
"_type": "waypoint",
"tst": 3,
"lat": 47,
"lon": 9,
"rad": 10,
"desc": "exp_wayp1",
},
{
"_type": "waypoint",
"tst": 4,
"lat": 3,
"lon": 9,
"rad": 500,
"desc": "exp_wayp2",
},
],
}
WAYPOINTS_UPDATED_MESSAGE = {
"_type": "waypoints",
"_creator": "test",
"waypoints": [
{
"_type": "waypoint",
"tst": 4,
"lat": 9,
"lon": 47,
"rad": 50,
"desc": "exp_wayp1",
}
],
}
WAYPOINT_MESSAGE = {
"_type": "waypoint",
"tst": 4,
"lat": 9,
"lon": 47,
"rad": 50,
"desc": "exp_wayp1",
}
WAYPOINT_ENTITY_NAMES = [
"zone.greg_phone_exp_wayp1",
"zone.greg_phone_exp_wayp2",
"zone.ram_phone_exp_wayp1",
"zone.ram_phone_exp_wayp2",
]
LWT_MESSAGE = {"_type": "lwt", "tst": 1}
BAD_MESSAGE = {"_type": "unsupported", "tst": 1}
BAD_JSON_PREFIX = "--$this is bad json#--"
BAD_JSON_SUFFIX = "** and it ends here ^^"
# pylint: disable=invalid-name, len-as-condition, redefined-outer-name
@pytest.fixture
def setup_comp(hass, mock_device_tracker_conf, mqtt_mock):
"""Initialize components."""
assert hass.loop.run_until_complete(
async_setup_component(hass, "persistent_notification", {})
)
hass.loop.run_until_complete(async_setup_component(hass, "device_tracker", {}))
hass.states.async_set("zone.inner", "zoning", INNER_ZONE)
hass.states.async_set("zone.inner_2", "zoning", INNER_ZONE)
hass.states.async_set("zone.outer", "zoning", OUTER_ZONE)
yield
async def setup_owntracks(hass, config, ctx_cls=owntracks.OwnTracksContext):
"""Set up OwnTracks."""
MockConfigEntry(
domain="owntracks", data={"webhook_id": "owntracks_test", "secret": "abcd"}
).add_to_hass(hass)
with patch.object(owntracks, "OwnTracksContext", ctx_cls):
assert await async_setup_component(hass, "owntracks", {"owntracks": config})
await hass.async_block_till_done()
@pytest.fixture
def context(hass, setup_comp):
"""Set up the mocked context."""
orig_context = owntracks.OwnTracksContext
context = None
# pylint: disable=no-value-for-parameter
def store_context(*args):
"""Store the context."""
nonlocal context
context = orig_context(*args)
return context
hass.loop.run_until_complete(
setup_owntracks(
hass,
{
CONF_MAX_GPS_ACCURACY: 200,
CONF_WAYPOINT_IMPORT: True,
CONF_WAYPOINT_WHITELIST: ["jon", "greg"],
},
store_context,
)
)
def get_context():
"""Get the current context."""
return context
yield get_context
async def send_message(hass, topic, message, corrupt=False):
"""Test the sending of a message."""
str_message = json.dumps(message)
if corrupt:
mod_message = BAD_JSON_PREFIX + str_message + BAD_JSON_SUFFIX
else:
mod_message = str_message
async_fire_mqtt_message(hass, topic, mod_message)
await hass.async_block_till_done()
await hass.async_block_till_done()
def assert_location_state(hass, location):
"""Test the assertion of a location state."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.state == location
def assert_location_latitude(hass, latitude):
"""Test the assertion of a location latitude."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.attributes.get("latitude") == latitude
def assert_location_longitude(hass, longitude):
"""Test the assertion of a location longitude."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.attributes.get("longitude") == longitude
def assert_location_accuracy(hass, accuracy):
"""Test the assertion of a location accuracy."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.attributes.get("gps_accuracy") == accuracy
def assert_location_source_type(hass, source_type):
"""Test the assertion of source_type."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.attributes.get("source_type") == source_type
def assert_mobile_tracker_state(hass, location, beacon=IBEACON_DEVICE):
"""Test the assertion of a mobile beacon tracker state."""
dev_id = MOBILE_BEACON_FMT.format(beacon)
state = hass.states.get(dev_id)
assert state.state == location
def assert_mobile_tracker_latitude(hass, latitude, beacon=IBEACON_DEVICE):
"""Test the assertion of a mobile beacon tracker latitude."""
dev_id = MOBILE_BEACON_FMT.format(beacon)
state = hass.states.get(dev_id)
assert state.attributes.get("latitude") == latitude
def assert_mobile_tracker_accuracy(hass, accuracy, beacon=IBEACON_DEVICE):
"""Test the assertion of a mobile beacon tracker accuracy."""
dev_id = MOBILE_BEACON_FMT.format(beacon)
state = hass.states.get(dev_id)
assert state.attributes.get("gps_accuracy") == accuracy
async def test_location_invalid_devid(hass, context):
"""Test the update of a location."""
await send_message(hass, "owntracks/paulus/nexus-5x", LOCATION_MESSAGE)
state = hass.states.get("device_tracker.paulus_nexus_5x")
assert state.state == "outer"
async def test_location_update(hass, context):
"""Test the update of a location."""
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_source_type(hass, "gps")
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
assert_location_accuracy(hass, LOCATION_MESSAGE["acc"])
assert_location_state(hass, "outer")
async def test_location_update_no_t_key(hass, context):
"""Test the update of a location when message does not contain 't'."""
message = LOCATION_MESSAGE.copy()
message.pop("t")
await send_message(hass, LOCATION_TOPIC, message)
assert_location_source_type(hass, "gps")
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
assert_location_accuracy(hass, LOCATION_MESSAGE["acc"])
assert_location_state(hass, "outer")
async def test_location_inaccurate_gps(hass, context):
"""Test the location for inaccurate GPS information."""
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_INACCURATE)
# Ignored inaccurate GPS. Location remains at previous.
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
assert_location_longitude(hass, LOCATION_MESSAGE["lon"])
async def test_location_zero_accuracy_gps(hass, context):
"""Ignore the location for zero accuracy GPS information."""
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_ZERO_ACCURACY)
# Ignored inaccurate GPS. Location remains at previous.
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
assert_location_longitude(hass, LOCATION_MESSAGE["lon"])
# ------------------------------------------------------------------------
# GPS based event entry / exit testing
async def test_event_gps_entry_exit(hass, context):
"""Test the entry event."""
# Entering the owntracks circular region named "inner"
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Updates ignored when in a zone
# note that LOCATION_MESSAGE is actually pretty far
# from INNER_ZONE and has good accuracy. I haven't
# received a transition message though so I'm still
# associated with the inner zone regardless of GPS.
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
# Exit switches back to GPS
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE["lat"])
assert_location_accuracy(hass, REGION_GPS_LEAVE_MESSAGE["acc"])
assert_location_state(hass, "outer")
# Left clean zone state
assert not context().regions_entered[USER]
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Now sending a location update moves me again.
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
assert_location_accuracy(hass, LOCATION_MESSAGE["acc"])
async def test_event_gps_with_spaces(hass, context):
"""Test the entry event."""
message = build_message({"desc": "inner 2"}, REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner 2")
message = build_message({"desc": "inner 2"}, REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# Left clean zone state
assert not context().regions_entered[USER]
async def test_event_gps_entry_inaccurate(hass, context):
"""Test the event for inaccurate entry."""
# Set location to the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE_INACCURATE)
# I enter the zone even though the message GPS was inaccurate.
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
async def test_event_gps_entry_exit_inaccurate(hass, context):
"""Test the event for inaccurate exit."""
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_INACCURATE)
# Exit doesn't use inaccurate gps
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
# But does exit region correctly
assert not context().regions_entered[USER]
async def test_event_gps_entry_exit_zero_accuracy(hass, context):
"""Test entry/exit events with accuracy zero."""
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE_ZERO)
# Enter uses the zone's gps co-ords
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_ZERO)
# Exit doesn't use zero gps
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
# But does exit region correctly
assert not context().regions_entered[USER]
async def test_event_gps_exit_outside_zone_sets_away(hass, context):
"""Test the event for exit zone."""
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_state(hass, "inner")
# Exit message far away GPS location
message = build_message({"lon": 90.0, "lat": 90.0}, REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# Exit forces zone change to away
assert_location_state(hass, STATE_NOT_HOME)
async def test_event_gps_entry_exit_right_order(hass, context):
"""Test the event for ordering."""
# Enter inner zone
# Set location to the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_state(hass, "inner")
# Enter inner2 zone
message = build_message({"desc": "inner_2"}, REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner_2")
# Exit inner_2 - should be in 'inner'
message = build_message({"desc": "inner_2"}, REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner")
# Exit inner - should be in 'outer'
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE["lat"])
assert_location_accuracy(hass, REGION_GPS_LEAVE_MESSAGE["acc"])
assert_location_state(hass, "outer")
async def test_event_gps_entry_exit_wrong_order(hass, context):
"""Test the event for wrong order."""
# Enter inner zone
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_state(hass, "inner")
# Enter inner2 zone
message = build_message({"desc": "inner_2"}, REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner_2")
# Exit inner - should still be in 'inner_2'
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
assert_location_state(hass, "inner_2")
# Exit inner_2 - should be in 'outer'
message = build_message({"desc": "inner_2"}, REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE["lat"])
assert_location_accuracy(hass, REGION_GPS_LEAVE_MESSAGE["acc"])
assert_location_state(hass, "outer")
async def test_event_gps_entry_unknown_zone(hass, context):
"""Test the event for unknown zone."""
# Just treat as location update
message = build_message({"desc": "unknown"}, REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_latitude(hass, REGION_GPS_ENTER_MESSAGE["lat"])
assert_location_state(hass, "inner")
async def test_event_gps_exit_unknown_zone(hass, context):
"""Test the event for unknown zone."""
# Just treat as location update
message = build_message({"desc": "unknown"}, REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE["lat"])
assert_location_state(hass, "outer")
async def test_event_entry_zone_loading_dash(hass, context):
"""Test the event for zone landing."""
# Make sure the leading - is ignored
# Owntracks uses this to switch on hold
message = build_message({"desc": "-inner"}, REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner")
async def test_events_only_on(hass, context):
"""Test events_only config suppresses location updates."""
# Sending a location message that is not home
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
assert_location_state(hass, STATE_NOT_HOME)
context().events_only = True
# Enter and Leave messages
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE_OUTER)
assert_location_state(hass, "outer")
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_OUTER)
assert_location_state(hass, STATE_NOT_HOME)
# Sending a location message that is inside outer zone
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Ignored location update. Location remains at previous.
assert_location_state(hass, STATE_NOT_HOME)
async def test_events_only_off(hass, context):
"""Test when events_only is False."""
# Sending a location message that is not home
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
assert_location_state(hass, STATE_NOT_HOME)
context().events_only = False
# Enter and Leave messages
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE_OUTER)
assert_location_state(hass, "outer")
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_OUTER)
assert_location_state(hass, STATE_NOT_HOME)
# Sending a location message that is inside outer zone
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Location update processed
assert_location_state(hass, "outer")
async def test_event_source_type_entry_exit(hass, context):
"""Test the entry and exit events of source type."""
# Entering the owntracks circular region named "inner"
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
# source_type should be gps when entering using gps.
assert_location_source_type(hass, "gps")
# owntracks shouldn't send beacon events with acc = 0
await send_message(
hass, EVENT_TOPIC, build_message({"acc": 1}, REGION_BEACON_ENTER_MESSAGE)
)
# We should be able to enter a beacon zone even inside a gps zone
assert_location_source_type(hass, "bluetooth_le")
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
# source_type should be gps when leaving using gps.
assert_location_source_type(hass, "gps")
# owntracks shouldn't send beacon events with acc = 0
await send_message(
hass, EVENT_TOPIC, build_message({"acc": 1}, REGION_BEACON_LEAVE_MESSAGE)
)
assert_location_source_type(hass, "bluetooth_le")
# Region Beacon based event entry / exit testing
async def test_event_region_entry_exit(hass, context):
"""Test the entry event."""
# Seeing a beacon named "inner"
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Updates ignored when in a zone
# note that LOCATION_MESSAGE is actually pretty far
# from INNER_ZONE and has good accuracy. I haven't
# received a transition message though so I'm still
# associated with the inner zone regardless of GPS.
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
# Exit switches back to GPS but the beacon has no coords
# so I am still located at the center of the inner region
# until I receive a location update.
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
# Left clean zone state
assert not context().regions_entered[USER]
# Now sending a location update moves me again.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
assert_location_accuracy(hass, LOCATION_MESSAGE["acc"])
async def test_event_region_with_spaces(hass, context):
"""Test the entry event."""
message = build_message({"desc": "inner 2"}, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner 2")
message = build_message({"desc": "inner 2"}, REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# Left clean zone state
assert not context().regions_entered[USER]
async def test_event_region_entry_exit_right_order(hass, context):
"""Test the event for ordering."""
# Enter inner zone
# Set location to the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# See 'inner' region beacon
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
assert_location_state(hass, "inner")
# See 'inner_2' region beacon
message = build_message({"desc": "inner_2"}, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner_2")
# Exit inner_2 - should be in 'inner'
message = build_message({"desc": "inner_2"}, REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner")
# Exit inner - should be in 'outer'
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
# I have not had an actual location update yet and my
# coordinates are set to the center of the last region I
# entered which puts me in the inner zone.
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
async def test_event_region_entry_exit_wrong_order(hass, context):
"""Test the event for wrong order."""
# Enter inner zone
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
assert_location_state(hass, "inner")
# Enter inner2 zone
message = build_message({"desc": "inner_2"}, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner_2")
# Exit inner - should still be in 'inner_2'
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
assert_location_state(hass, "inner_2")
# Exit inner_2 - should be in 'outer'
message = build_message({"desc": "inner_2"}, REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# I have not had an actual location update yet and my
# coordinates are set to the center of the last region I
# entered which puts me in the inner_2 zone.
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner_2")
async def test_event_beacon_unknown_zone_no_location(hass, context):
"""Test the event for unknown zone."""
# A beacon which does not match a HA zone is the
# definition of a mobile beacon. In this case, "unknown"
# will be turned into device_tracker.beacon_unknown and
# that will be tracked at my current location. Except
# in this case my Device hasn't had a location message
# yet so it's in an odd state where it has state.state
# None and no GPS coords to set the beacon to.
hass.states.async_set(DEVICE_TRACKER_STATE, None)
message = build_message({"desc": "unknown"}, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# My current state is None because I haven't seen a
# location message or a GPS or Region # Beacon event
# message. None is the state the test harness set for
# the Device during test case setup.
assert_location_state(hass, "None")
# We have had no location yet, so the beacon status
# set to unknown.
assert_mobile_tracker_state(hass, "unknown", "unknown")
async def test_event_beacon_unknown_zone(hass, context):
"""Test the event for unknown zone."""
# A beacon which does not match a HA zone is the
# definition of a mobile beacon. In this case, "unknown"
# will be turned into device_tracker.beacon_unknown and
# that will be tracked at my current location. First I
# set my location so that my state is 'outer'
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_state(hass, "outer")
message = build_message({"desc": "unknown"}, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# My state is still outer and now the unknown beacon
# has joined me at outer.
assert_location_state(hass, "outer")
assert_mobile_tracker_state(hass, "outer", "unknown")
async def test_event_beacon_entry_zone_loading_dash(hass, context):
"""Test the event for beacon zone landing."""
# Make sure the leading - is ignored
# Owntracks uses this to switch on hold
message = build_message({"desc": "-inner"}, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner")
# ------------------------------------------------------------------------
# Mobile Beacon based event entry / exit testing
async def test_mobile_enter_move_beacon(hass, context):
"""Test the movement of a beacon."""
# I am in the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# I see the 'keys' beacon. I set the location of the
# beacon_keys tracker to my current device location.
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
assert_mobile_tracker_latitude(hass, LOCATION_MESSAGE["lat"])
assert_mobile_tracker_state(hass, "outer")
# Location update to outside of defined zones.
# I am now 'not home' and neither are my keys.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
assert_location_state(hass, STATE_NOT_HOME)
assert_mobile_tracker_state(hass, STATE_NOT_HOME)
not_home_lat = LOCATION_MESSAGE_NOT_HOME["lat"]
assert_location_latitude(hass, not_home_lat)
assert_mobile_tracker_latitude(hass, not_home_lat)
async def test_mobile_enter_exit_region_beacon(hass, context):
"""Test the enter and the exit of a mobile beacon."""
# I am in the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# I see a new mobile beacon
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
assert_mobile_tracker_latitude(hass, OUTER_ZONE["latitude"])
assert_mobile_tracker_state(hass, "outer")
# GPS enter message should move beacon
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
assert_mobile_tracker_state(hass, REGION_GPS_ENTER_MESSAGE["desc"])
# Exit inner zone to outer zone should move beacon to
# center of outer zone
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
assert_mobile_tracker_latitude(hass, REGION_GPS_LEAVE_MESSAGE["lat"])
assert_mobile_tracker_state(hass, "outer")
async def test_mobile_exit_move_beacon(hass, context):
"""Test the exit move of a beacon."""
# I am in the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# I see a new mobile beacon
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
assert_mobile_tracker_latitude(hass, OUTER_ZONE["latitude"])
assert_mobile_tracker_state(hass, "outer")
# Exit mobile beacon, should set location
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert_mobile_tracker_latitude(hass, OUTER_ZONE["latitude"])
assert_mobile_tracker_state(hass, "outer")
# Move after exit should do nothing
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
assert_mobile_tracker_latitude(hass, OUTER_ZONE["latitude"])
assert_mobile_tracker_state(hass, "outer")
async def test_mobile_multiple_async_enter_exit(hass, context):
"""Test the multiple entering."""
# Test race condition
for _ in range(0, 20):
async_fire_mqtt_message(
hass, EVENT_TOPIC, json.dumps(MOBILE_BEACON_ENTER_EVENT_MESSAGE)
)
async_fire_mqtt_message(
hass, EVENT_TOPIC, json.dumps(MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
)
async_fire_mqtt_message(
hass, EVENT_TOPIC, json.dumps(MOBILE_BEACON_ENTER_EVENT_MESSAGE)
)
await hass.async_block_till_done()
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert len(context().mobile_beacons_active["greg_phone"]) == 0
async def test_mobile_multiple_enter_exit(hass, context):
"""Test the multiple entering."""
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert len(context().mobile_beacons_active["greg_phone"]) == 0
async def test_complex_movement(hass, context):
"""Test a complex sequence representative of real-world use."""
# I am in the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_state(hass, "outer")
# gps to inner location and event, as actually happens with OwnTracks
location_message = build_message(
{
"lat": REGION_GPS_ENTER_MESSAGE["lat"],
"lon": REGION_GPS_ENTER_MESSAGE["lon"],
},
LOCATION_MESSAGE,
)
await send_message(hass, LOCATION_TOPIC, location_message)
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_state(hass, "inner")
# region beacon enter inner event and location as actually happens
# with OwnTracks
location_message = build_message(
{
"lat": location_message["lat"] + FIVE_M,
"lon": location_message["lon"] + FIVE_M,
},
LOCATION_MESSAGE,
)
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_state(hass, "inner")
# see keys mobile beacon and location message as actually happens
location_message = build_message(
{
"lat": location_message["lat"] + FIVE_M,
"lon": location_message["lon"] + FIVE_M,
},
LOCATION_MESSAGE,
)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
assert_location_state(hass, "inner")
assert_mobile_tracker_state(hass, "inner")
# Slightly odd, I leave the location by gps before I lose
# sight of the region beacon. This is also a little odd in
# that my GPS coords are now in the 'outer' zone but I did not
# "enter" that zone when I started up so my location is not
# the center of OUTER_ZONE, but rather just my GPS location.
# gps out of inner event and location
location_message = build_message(
{
"lat": REGION_GPS_LEAVE_MESSAGE["lat"],
"lon": REGION_GPS_LEAVE_MESSAGE["lon"],
},
LOCATION_MESSAGE,
)
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE["lat"])
assert_mobile_tracker_latitude(hass, REGION_GPS_LEAVE_MESSAGE["lat"])
assert_location_state(hass, "outer")
assert_mobile_tracker_state(hass, "outer")
# region beacon leave inner
location_message = build_message(
{
"lat": location_message["lat"] - FIVE_M,
"lon": location_message["lon"] - FIVE_M,
},
LOCATION_MESSAGE,
)
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, location_message["lat"])
assert_mobile_tracker_latitude(hass, location_message["lat"])
assert_location_state(hass, "outer")
assert_mobile_tracker_state(hass, "outer")
# lose keys mobile beacon
lost_keys_location_message = build_message(
{
"lat": location_message["lat"] - FIVE_M,
"lon": location_message["lon"] - FIVE_M,
},
LOCATION_MESSAGE,
)
await send_message(hass, LOCATION_TOPIC, lost_keys_location_message)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert_location_latitude(hass, lost_keys_location_message["lat"])
assert_mobile_tracker_latitude(hass, lost_keys_location_message["lat"])
assert_location_state(hass, "outer")
assert_mobile_tracker_state(hass, "outer")
# gps leave outer
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_OUTER)
assert_location_latitude(hass, LOCATION_MESSAGE_NOT_HOME["lat"])
assert_mobile_tracker_latitude(hass, lost_keys_location_message["lat"])
assert_location_state(hass, "not_home")
assert_mobile_tracker_state(hass, "outer")
# location move not home
location_message = build_message(
{
"lat": LOCATION_MESSAGE_NOT_HOME["lat"] - FIVE_M,
"lon": LOCATION_MESSAGE_NOT_HOME["lon"] - FIVE_M,
},
LOCATION_MESSAGE_NOT_HOME,
)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, location_message["lat"])
assert_mobile_tracker_latitude(hass, lost_keys_location_message["lat"])
assert_location_state(hass, "not_home")
assert_mobile_tracker_state(hass, "outer")
async def test_complex_movement_sticky_keys_beacon(hass, context):
"""Test a complex sequence which was previously broken."""
# I am not_home
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_state(hass, "outer")
# gps to inner location and event, as actually happens with OwnTracks
location_message = build_message(
{
"lat": REGION_GPS_ENTER_MESSAGE["lat"],
"lon": REGION_GPS_ENTER_MESSAGE["lon"],
},
LOCATION_MESSAGE,
)
await send_message(hass, LOCATION_TOPIC, location_message)
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_state(hass, "inner")
# see keys mobile beacon and location message as actually happens
location_message = build_message(
{
"lat": location_message["lat"] + FIVE_M,
"lon": location_message["lon"] + FIVE_M,
},
LOCATION_MESSAGE,
)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
assert_location_state(hass, "inner")
assert_mobile_tracker_state(hass, "inner")
# region beacon enter inner event and location as actually happens
# with OwnTracks
location_message = build_message(
{
"lat": location_message["lat"] + FIVE_M,
"lon": location_message["lon"] + FIVE_M,
},
LOCATION_MESSAGE,
)
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_state(hass, "inner")
# This sequence of moves would cause keys to follow
# greg_phone around even after the OwnTracks sent
# a mobile beacon 'leave' event for the keys.
# leave keys
await send_message(hass, LOCATION_TOPIC, location_message)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_state(hass, "inner")
assert_mobile_tracker_state(hass, "inner")
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
# leave inner region beacon
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_state(hass, "inner")
assert_mobile_tracker_state(hass, "inner")
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
# enter inner region beacon
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_state(hass, "inner")
assert_mobile_tracker_state(hass, "inner")
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
# enter keys
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_state(hass, "inner")
assert_mobile_tracker_state(hass, "inner")
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
# leave keys
await send_message(hass, LOCATION_TOPIC, location_message)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert_location_state(hass, "inner")
assert_mobile_tracker_state(hass, "inner")
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
# leave inner region beacon
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_state(hass, "inner")
assert_mobile_tracker_state(hass, "inner")
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
# GPS leave inner region, I'm in the 'outer' region now
# but on GPS coords
leave_location_message = build_message(
{
"lat": REGION_GPS_LEAVE_MESSAGE["lat"],
"lon": REGION_GPS_LEAVE_MESSAGE["lon"],
},
LOCATION_MESSAGE,
)
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, leave_location_message)
assert_location_state(hass, "outer")
assert_mobile_tracker_state(hass, "inner")
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE["lat"])
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
async def test_waypoint_import_simple(hass, context):
"""Test a simple import of list of waypoints."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC, waypoints_message)
# Check if it made it into states
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[0])
assert wayp is not None
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[1])
assert wayp is not None
async def test_waypoint_import_block(hass, context):
"""Test import of list of waypoints for blocked user."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC_BLOCKED, waypoints_message)
# Check if it made it into states
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[2])
assert wayp is None
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[3])
assert wayp is None
async def test_waypoint_import_no_whitelist(hass, setup_comp):
"""Test import of list of waypoints with no whitelist set."""
await setup_owntracks(
hass,
{
CONF_MAX_GPS_ACCURACY: 200,
CONF_WAYPOINT_IMPORT: True,
CONF_MQTT_TOPIC: "owntracks/#",
},
)
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC_BLOCKED, waypoints_message)
# Check if it made it into states
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[2])
assert wayp is not None
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[3])
assert wayp is not None
async def test_waypoint_import_bad_json(hass, context):
"""Test importing a bad JSON payload."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC, waypoints_message, True)
# Check if it made it into states
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[2])
assert wayp is None
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[3])
assert wayp is None
async def test_waypoint_import_existing(hass, context):
"""Test importing a zone that exists."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC, waypoints_message)
# Get the first waypoint exported
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[0])
# Send an update
waypoints_message = WAYPOINTS_UPDATED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC, waypoints_message)
new_wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[0])
assert wayp == new_wayp
async def test_single_waypoint_import(hass, context):
"""Test single waypoint message."""
waypoint_message = WAYPOINT_MESSAGE.copy()
await send_message(hass, WAYPOINT_TOPIC, waypoint_message)
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[0])
assert wayp is not None
async def test_not_implemented_message(hass, context):
"""Handle not implemented message type."""
patch_handler = patch(
"homeassistant.components.owntracks.messages.async_handle_not_impl_msg",
return_value=mock_coro(False),
)
patch_handler.start()
assert not await send_message(hass, LWT_TOPIC, LWT_MESSAGE)
patch_handler.stop()
async def test_unsupported_message(hass, context):
"""Handle not implemented message type."""
patch_handler = patch(
"homeassistant.components.owntracks.messages.async_handle_unsupported_msg",
return_value=mock_coro(False),
)
patch_handler.start()
assert not await send_message(hass, BAD_TOPIC, BAD_MESSAGE)
patch_handler.stop()
def generate_ciphers(secret):
"""Generate test ciphers for the DEFAULT_LOCATION_MESSAGE."""
# PyNaCl ciphertext generation will fail if the module
# cannot be imported. However, the test for decryption
# also relies on this library and won't be run without it.
import base64
import pickle
try:
from nacl.encoding import Base64Encoder
from nacl.secret import SecretBox
keylen = SecretBox.KEY_SIZE
key = secret.encode("utf-8")
key = key[:keylen]
key = key.ljust(keylen, b"\0")
msg = json.dumps(DEFAULT_LOCATION_MESSAGE).encode("utf-8")
ctxt = SecretBox(key).encrypt(msg, encoder=Base64Encoder).decode("utf-8")
except (ImportError, OSError):
ctxt = ""
mctxt = base64.b64encode(
pickle.dumps(
(
secret.encode("utf-8"),
json.dumps(DEFAULT_LOCATION_MESSAGE).encode("utf-8"),
)
)
).decode("utf-8")
return ctxt, mctxt
TEST_SECRET_KEY = "s3cretkey"
CIPHERTEXT, MOCK_CIPHERTEXT = generate_ciphers(TEST_SECRET_KEY)
ENCRYPTED_LOCATION_MESSAGE = {
# Encrypted version of LOCATION_MESSAGE using libsodium and TEST_SECRET_KEY
"_type": "encrypted",
"data": CIPHERTEXT,
}
MOCK_ENCRYPTED_LOCATION_MESSAGE = {
# Mock-encrypted version of LOCATION_MESSAGE using pickle
"_type": "encrypted",
"data": MOCK_CIPHERTEXT,
}
def mock_cipher():
"""Return a dummy pickle-based cipher."""
def mock_decrypt(ciphertext, key):
"""Decrypt/unpickle."""
import base64
import pickle
(mkey, plaintext) = pickle.loads(base64.b64decode(ciphertext))
if key != mkey:
raise ValueError()
return plaintext
return len(TEST_SECRET_KEY), mock_decrypt
@pytest.fixture
def config_context(hass, setup_comp):
"""Set up the mocked context."""
patch_load = patch(
"homeassistant.components.device_tracker.async_load_config",
return_value=mock_coro([]),
)
patch_load.start()
patch_save = patch(
"homeassistant.components.device_tracker.DeviceTracker.async_update_config"
)
patch_save.start()
yield
patch_load.stop()
patch_save.stop()
@pytest.fixture(name="not_supports_encryption")
def mock_not_supports_encryption():
"""Mock non successful nacl import."""
with patch(
"homeassistant.components.owntracks.messages.supports_encryption",
return_value=False,
):
yield
@pytest.fixture(name="get_cipher_error")
def mock_get_cipher_error():
"""Mock non successful cipher."""
with patch(
"homeassistant.components.owntracks.messages.get_cipher", side_effect=OSError()
):
yield
@patch("homeassistant.components.owntracks.messages.get_cipher", mock_cipher)
async def test_encrypted_payload(hass, setup_comp):
"""Test encrypted payload."""
await setup_owntracks(hass, {CONF_SECRET: TEST_SECRET_KEY})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
@patch("homeassistant.components.owntracks.messages.get_cipher", mock_cipher)
async def test_encrypted_payload_topic_key(hass, setup_comp):
"""Test encrypted payload with a topic key."""
await setup_owntracks(hass, {CONF_SECRET: {LOCATION_TOPIC: TEST_SECRET_KEY}})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
async def test_encrypted_payload_not_supports_encryption(
hass, setup_comp, not_supports_encryption
):
"""Test encrypted payload with no supported encryption."""
await setup_owntracks(hass, {CONF_SECRET: TEST_SECRET_KEY})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
async def test_encrypted_payload_get_cipher_error(hass, setup_comp, get_cipher_error):
"""Test encrypted payload with no supported encryption."""
await setup_owntracks(hass, {CONF_SECRET: TEST_SECRET_KEY})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
@patch("homeassistant.components.owntracks.messages.get_cipher", mock_cipher)
async def test_encrypted_payload_no_key(hass, setup_comp):
"""Test encrypted payload with no key, ."""
assert hass.states.get(DEVICE_TRACKER_STATE) is None
await setup_owntracks(hass, {CONF_SECRET: {}})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
@patch("homeassistant.components.owntracks.messages.get_cipher", mock_cipher)
async def test_encrypted_payload_wrong_key(hass, setup_comp):
"""Test encrypted payload with wrong key."""
await setup_owntracks(hass, {CONF_SECRET: "wrong key"})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
@patch("homeassistant.components.owntracks.messages.get_cipher", mock_cipher)
async def test_encrypted_payload_wrong_topic_key(hass, setup_comp):
"""Test encrypted payload with wrong topic key."""
await setup_owntracks(hass, {CONF_SECRET: {LOCATION_TOPIC: "wrong key"}})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
@patch("homeassistant.components.owntracks.messages.get_cipher", mock_cipher)
async def test_encrypted_payload_no_topic_key(hass, setup_comp):
"""Test encrypted payload with no topic key."""
await setup_owntracks(
hass, {CONF_SECRET: {"owntracks/{}/{}".format(USER, "otherdevice"): "foobar"}}
)
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
async def test_encrypted_payload_libsodium(hass, setup_comp):
"""Test sending encrypted message payload."""
try:
import nacl # noqa: F401 pylint: disable=unused-import
except (ImportError, OSError):
pytest.skip("PyNaCl/libsodium is not installed")
return
await setup_owntracks(hass, {CONF_SECRET: TEST_SECRET_KEY})
await send_message(hass, LOCATION_TOPIC, ENCRYPTED_LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
async def test_customized_mqtt_topic(hass, setup_comp):
"""Test subscribing to a custom mqtt topic."""
await setup_owntracks(hass, {CONF_MQTT_TOPIC: "mytracks/#"})
topic = f"mytracks/{USER}/{DEVICE}"
await send_message(hass, topic, LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
async def test_region_mapping(hass, setup_comp):
"""Test region to zone mapping."""
await setup_owntracks(hass, {CONF_REGION_MAPPING: {"foo": "inner"}})
hass.states.async_set("zone.inner", "zoning", INNER_ZONE)
message = build_message({"desc": "foo"}, REGION_GPS_ENTER_MESSAGE)
assert message["desc"] == "foo"
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner")
async def test_restore_state(hass, hass_client):
"""Test that we can restore state."""
entry = MockConfigEntry(
domain="owntracks", data={"webhook_id": "owntracks_test", "secret": "abcd"}
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
client = await hass_client()
resp = await client.post(
"/api/webhook/owntracks_test",
json=LOCATION_MESSAGE,
headers={"X-Limit-u": "Paulus", "X-Limit-d": "Pixel"},
)
assert resp.status == 200
await hass.async_block_till_done()
state_1 = hass.states.get("device_tracker.paulus_pixel")
assert state_1 is not None
await hass.config_entries.async_reload(entry.entry_id)
await hass.async_block_till_done()
state_2 = hass.states.get("device_tracker.paulus_pixel")
assert state_2 is not None
assert state_1 is not state_2
assert state_1.state == state_2.state
assert state_1.name == state_2.name
assert state_1.attributes["latitude"] == state_2.attributes["latitude"]
assert state_1.attributes["longitude"] == state_2.attributes["longitude"]
assert state_1.attributes["battery_level"] == state_2.attributes["battery_level"]
assert state_1.attributes["source_type"] == state_2.attributes["source_type"]
async def test_returns_empty_friends(hass, hass_client):
"""Test that an empty list of persons' locations is returned."""
entry = MockConfigEntry(
domain="owntracks", data={"webhook_id": "owntracks_test", "secret": "abcd"}
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
client = await hass_client()
resp = await client.post(
"/api/webhook/owntracks_test",
json=LOCATION_MESSAGE,
headers={"X-Limit-u": "Paulus", "X-Limit-d": "Pixel"},
)
assert resp.status == 200
assert await resp.text() == "[]"
async def test_returns_array_friends(hass, hass_client):
"""Test that a list of persons' current locations is returned."""
otracks = MockConfigEntry(
domain="owntracks", data={"webhook_id": "owntracks_test", "secret": "abcd"}
)
otracks.add_to_hass(hass)
await hass.config_entries.async_setup(otracks.entry_id)
await hass.async_block_till_done()
# Setup device_trackers
assert await async_setup_component(
hass,
"person",
{
"person": [
{
"name": "person 1",
"id": "person1",
"device_trackers": ["device_tracker.person_1_tracker_1"],
},
{
"name": "person2",
"id": "person2",
"device_trackers": ["device_tracker.person_2_tracker_1"],
},
]
},
)
hass.states.async_set(
"device_tracker.person_1_tracker_1", "home", {"latitude": 10, "longitude": 20}
)
client = await hass_client()
resp = await client.post(
"/api/webhook/owntracks_test",
json=LOCATION_MESSAGE,
headers={"X-Limit-u": "Paulus", "X-Limit-d": "Pixel"},
)
assert resp.status == 200
response_json = json.loads(await resp.text())
assert response_json[0]["lat"] == 10
assert response_json[0]["lon"] == 20
assert response_json[0]["tid"] == "p1"
|
ghchinoy/tensorflow | refs/heads/master | tensorflow/python/tpu/topology_test.py | 15 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for topology.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import test
from tensorflow.python.tpu import topology
class TopologyTest(test.TestCase):
def testSerialization(self):
"""Tests if the class is able to generate serialized strings."""
original_topology = topology.Topology(
mesh_shape=[1, 1, 2],
device_coordinates=[[[0, 0, 0], [0, 0, 1]]],
)
serialized_str = original_topology.serialized()
new_topology = topology.Topology(serialized=serialized_str)
# Make sure the topology recovered from serialized str is same as the
# original topology.
self.assertAllEqual(
original_topology.mesh_shape, new_topology.mesh_shape)
self.assertAllEqual(
original_topology.device_coordinates, new_topology.device_coordinates)
if __name__ == "__main__":
test.main()
|
krishauser/pyOptimalMotionPlanning | refs/heads/master | pomp/planners/test.py | 1 | from __future__ import print_function,division
from six import iteritems
from builtins import range
from .profiler import Profiler
import time
def testPlanner(planner,numTrials,maxTime,filename):
print("Testing planner for %d trials, %f seconds"%(numTrials,maxTime))
print("Saving to",filename)
f = open(filename,'w')
f.write("trial,plan iters,plan time,best cost\n")
for trial in range(numTrials):
print()
print("Trial",trial+1)
planner.reset()
curCost = float('inf')
t0 = time.time()
numupdates = 0
iters = 0
hadException = False
while time.time()-t0 < maxTime:
try:
planner.planMore(10)
except Exception as e:
if hadException:
print("Warning, planner raise two exceptions in a row. Quitting")
break
else:
import traceback
traceback.print_exc()
print("Warning, planner raised an exception... soldiering on")
print(e)
hadException = True
continue
iters += 10
if planner.bestPathCost != None and planner.bestPathCost != curCost:
numupdates += 1
curCost = planner.bestPathCost
t1 = time.time()
f.write(str(trial)+","+str(iters)+","+str(t1-t0)+","+str(curCost)+'\n')
if hasattr(planner,'stats'):
print
temp = Profiler()
temp.items["Stats:"] = planner.stats
temp.pretty_print()
print()
print("Final cost:",curCost)
print()
f.write(str(trial)+","+str(iters)+","+str(maxTime)+","+str(curCost)+'\n')
f.close()
|
nrhine1/scikit-learn | refs/heads/master | examples/neighbors/plot_regression.py | 349 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
|
passiweinberger/nupic | refs/heads/master | tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_multi_step/base.py | 62 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2012-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
VERBOSITY = 0
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
'fields': [(u'c1', 'first'), (u'c0', 'first')],
},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : VERBOSITY,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'c0_timeOfDay': { 'fieldname': u'c0',
'name': u'c0_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
u'c0_dayOfWeek': { 'dayOfWeek': (21, 1),
'fieldname': u'c0',
'name': u'c0_dayOfWeek',
'type': 'DateEncoder'},
u'c0_weekend': { 'fieldname': u'c0',
'name': u'c0_weekend',
'type': 'DateEncoder',
'weekend': 21},
u'c1': { 'clipInput': True,
'fieldname': u'c1',
'n': 100,
'name': u'c1',
'type': 'AdaptiveScalarEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : VERBOSITY,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : VERBOSITY,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '24',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
dataPath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data.csv'))
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : { 'aggregation': config['aggregationInfo'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'streams': [ { u'columns': [u'c0', u'c1'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'source': 'file://%s' % (dataPath),
u'first_record': config['firstRecord'],
u'last_record': config['lastRecord'],
u'types': [u'datetime', u'float']}],
u'timeField': u'c0',
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'c1', u'predictionSteps': [24]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'c1', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [24], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
|
lzimann/tgstation | refs/heads/master | tools/expand_filedir_paths.py | 166 | #!/usr/bin/env python
import re, os, sys, fnmatch
# Regex pattern to extract the directory path in a #define FILE_DIR
filedir_pattern = re.compile(r'^#define\s*FILE_DIR\s*"(.*?)"')
# Regex pattern to extract any single quoted piece of text. This can also
# match single quoted strings inside of double quotes, which is part of a
# regular text string and should not be replaced. The replacement function
# however will any match that doesn't appear to be a filename so these
# extra matches should not be a problem.
rename_pattern = re.compile(r"'(.+?)'")
# Only filenames matching this pattern will have their resources renamed
source_pattern = re.compile(r"^.*?\.(dm|dmm)$")
# Open the .dme file and return a list of all FILE_DIR paths in it
def read_filedirs(filename):
result = []
dme_file = file(filename, "rt")
# Read each line from the file and check for regex pattern match
for row in dme_file:
match = filedir_pattern.match(row)
if match:
result.append(match.group(1))
dme_file.close()
return result
# Search through a list of directories, and build a dictionary which
# maps every file to its full pathname (relative to the .dme file)
# If the same filename appears in more than one directory, the earlier
# directory in the list takes preference.
def index_files(file_dirs):
result = {}
# Reverse the directory list so the earlier directories take precedence
# by replacing the previously indexed file of the same name
for directory in reversed(file_dirs):
for name in os.listdir(directory):
# Replace backslash path separators on Windows with forward slash
# Force "name" to lowercase when used as a key since BYOND resource
# names are case insensitive, even on Linux.
if name.find(".") == -1:
continue
result[name.lower()] = directory.replace('\\', '/') + '/' + name
return result
# Recursively search for every .dm/.dmm file in the .dme file directory. For
# each file, search it for any resource names in single quotes, and replace
# them with the full path previously found by index_files()
def rewrite_sources(resources):
# Create a closure for the regex replacement function to capture the
# resources dictionary which can't be passed directly to this function
def replace_func(name):
key = name.group(1).lower()
if key in resources:
replacement = resources[key]
else:
replacement = name.group(1)
return "'" + replacement + "'"
# Search recursively for all .dm and .dmm files
for (dirpath, dirs, files) in os.walk("."):
for name in files:
if source_pattern.match(name):
path = dirpath + '/' + name
source_file = file(path, "rt")
output_file = file(path + ".tmp", "wt")
# Read file one line at a time and perform replacement of all
# single quoted resource names with the fullpath to that resource
# file. Write the updated text back out to a temporary file.
for row in source_file:
row = rename_pattern.sub(replace_func, row)
output_file.write(row)
output_file.close()
source_file.close()
# Delete original source file and replace with the temporary
# output. On Windows, an atomic rename() operation is not
# possible like it is under POSIX.
os.remove(path)
os.rename(path + ".tmp", path)
dirs = read_filedirs("tgstation.dme");
resources = index_files(dirs)
rewrite_sources(resources)
|
jrief/djangocms-cascade | refs/heads/master | cmsplugin_cascade/render_template.py | 1 | from django.forms import MediaDefiningClass
from django.forms.fields import ChoiceField
from django.utils.translation import gettext_lazy as _
from django.template.loader import get_template, TemplateDoesNotExist
from entangled.forms import EntangledModelFormMixin
from cmsplugin_cascade import app_settings
class RenderTemplateFormMixin(EntangledModelFormMixin):
render_template = ChoiceField(
label=_("Render template"),
help_text=_("Use alternative template for rendering this plugin."),
required=False,
)
class Meta:
entangled_fields = {'glossary': ['render_template']}
class RenderTemplateMixin(metaclass=MediaDefiningClass):
"""
If a Cascade plugin is listed in ``settings.CMSPLUGIN_CASCADE['plugins_with_extra_render_templates']``,
then this ``RenderTemplateMixin`` class is added automatically to its plugin class in order
to add an additional select box used for choosing an alternative render template.
"""
@classmethod
def get_template_choices(cls):
return app_settings.CMSPLUGIN_CASCADE['plugins_with_extra_render_templates'][cls.__name__]
def get_form(self, request, obj=None, **kwargs):
form = kwargs.get('form', self.form)
assert issubclass(form, EntangledModelFormMixin), "Form must inherit from EntangledModelFormMixin"
choices = self.get_template_choices()
if isinstance(choices, (list, tuple)):
form = type(form.__name__, (RenderTemplateFormMixin, form), {})
form.base_fields['render_template'].choices = choices
kwargs['form'] = form
return super().get_form(request, obj, **kwargs)
def get_render_template(self, context, instance, placeholder):
try:
template = instance.glossary.get('render_template', self.get_template_choices()[0][0])
get_template(template) # check if template exists
except (KeyError, IndexError, TemplateDoesNotExist, TypeError):
template = self.render_template
return template
|
dacjames/mara-lang | refs/heads/develop | bootstrap/mara/test/test_parser.py | 1 | from .. import parser as parser_module
from .. import node as n
from programs import program_name_resolution
import pytest
# pylint: disable=W0621
# pylint: disable=C0330
xfail = pytest.mark.xfail
def maramodule(name, code):
return 'module {n} {c} end'.format(n=name, c=code)
@pytest.fixture
def parser():
return parser_module.Parser()
def test_parse_literals(parser):
assert (
parser.parse(maramodule('test', '''
10
''')) == n.Module(name='test', exprs=[
n.Int(value='10')
])
)
assert (
parser.parse('module test; 10.0 end') ==
n.Module(name='test', exprs=[
n.Real(value='10.0')
])
)
assert (
parser.parse('module test; 1e10 end') ==
n.Module(name='test', exprs=[
n.Sci(value='1e10')
])
)
def test_parse_simple_expr(parser):
given = 'module; x * 1 end'
expected = n.Module(
name='_anon_module_0',
exprs=[
n.BinOp(
func=n.SymbolId('*'),
args=[
n.ValueId('x'),
n.Int('1'),
],
),
],
)
result = parser.parse(given)
assert expected == result
def test_parse_binding(parser):
given = maramodule('test', '''
x::y
X::y
x::Y
X::Y
''')
expected = n.Module(name='test', exprs=[
n.Binding(left=n.ValueId('x'), right=n.ValueId('y')),
n.Binding(left=n.TypeId('X'), right=n.ValueId('y')),
n.Binding(left=n.ValueId('x'), right=n.TypeId('Y')),
n.Binding(left=n.TypeId('X'), right=n.TypeId('Y')),
])
result = parser.parse(given)
assert expected.exprs[0] == result.exprs[0]
assert expected.exprs[1] == result.exprs[1]
assert expected.exprs[2] == result.exprs[2]
assert expected.exprs[3] == result.exprs[3]
assert expected == result
def test_parse_function_call(parser):
given = maramodule('test', '''
foo(10)
foo 10
10.foo
x.foo
x.foo 10
x.foo(3,5)
foo 10 { 2 + 4}
x.foo {2 + 4 }
x.foo 10 { 2 + 4 }
x.foo(3, 5){2 + 4}
''')
block = n.Block(
exprs=[n.BinOp(
func=n.SymbolId('+'),
args=[n.Int('2'), n.Int('4')],
)]
)
expected = n.Module('test', [
n.Call(
func=n.ValueId('foo'),
arg=n.Tuple([
n.Int('10'),
]),
),
n.Call(
func=n.ValueId('foo'),
arg=n.Tuple([
n.Int('10'),
]),
),
n.Call(
func=n.ValueId('foo'),
arg=n.Tuple([
n.Int('10'),
]),
),
n.Call(
func=n.ValueId('foo'),
arg=n.Tuple([
n.ValueId('x')
]),
),
n.Call(
func=n.ValueId('foo'),
arg=n.Tuple(values=[
n.ValueId('x'),
n.Int('10'),
])
),
n.Call(
func=n.ValueId('foo'),
arg=n.Tuple(values=[
n.ValueId('x'),
n.Int('3'),
n.Int('5'),
])
),
n.Call(func=n.ValueId('foo'), arg=n.Tuple(values=[n.Int('10')]), block=block),
n.Call(func=n.ValueId('foo'), arg=n.Tuple(values=[n.ValueId('x')]), block=block),
n.Call(
func=n.ValueId('foo'),
arg=n.Tuple(values=[
n.ValueId('x'),
n.Int('10'),
]),
block=block,
),
n.Call(
func=n.ValueId('foo'),
arg=n.Tuple(values=[
n.ValueId('x'),
n.Int('3'),
n.Int('5'),
]),
block=block,
),
])
result = parser.parse(given)
assert expected.exprs[0] == result.exprs[0]
assert expected.exprs[1] == result.exprs[1]
assert expected.exprs[2] == result.exprs[2]
assert expected.exprs[3] == result.exprs[3]
assert expected.exprs[4] == result.exprs[4]
assert expected.exprs[5] == result.exprs[5]
assert expected.exprs[6] == result.exprs[6]
assert expected == result
def test_function_call_if_regression(parser):
given = maramodule('test', '''
def bool (x) {
if x == 0 { true }
else { false }
}
if bool(1) {
42
} else {
99
}
''')
expected = n.Module(name='test', exprs=[
n.Def(
name=n.ValueId(value='bool'),
param=n.Tuple(values=[n.Param(name=n.ValueId(value='x'), type_=n.InferType())]),
body=n.Block(exprs=[
n.If(
pred=n.BinOp(args=[n.ValueId(value='x'), n.Int(value='0')], func=n.SymbolId(value='==')),
if_body=n.Block(exprs=[n.Bool(value='1')]),
else_body=n.Unit(),
),
n.Else(
body=n.Block(exprs=[n.Bool(value='0')]),
expr=None,
)
]),
return_type=n.InferType()
),
n.If(
pred=n.Call(
arg=n.Tuple(values=[n.Int(value='1')]),
func=n.ValueId(value='bool'),
),
if_body=n.Block(exprs=[n.Int(value='42')]),
else_body=n.Block(exprs=[n.Int(value='99')]),
),
])
result = parser.parse(given)
assert expected == result
def test_exprs_parse_assignment(parser):
given = maramodule('assignment', '''
a = 10
''')
expected = n.Module('assignment', [
n.Assign(name=n.ValueId('a'), value=n.Int('10'))
])
result = parser.parse(given)
assert expected == result
given = 'module assignment; val a Real = 1.0 end'
expected = n.Module('assignment', [
n.Val(name=n.ValueId('a'), value=n.Real('1.0'), type_=n.TypeId('Real'))
])
result = parser.parse(given)
assert expected == result
def test_parse_booleans(parser):
given = maramodule('test', '''
true
false
''')
expected = n.Module(
name='test',
exprs=[
n.Bool('1'),
n.Bool('0'),
]
)
result = parser.parse(given)
assert expected == result
def test_parse_unwrapped_if(parser):
given = 'module simple; x * 2.0 if x > 0 end'
expected = n.Module(
name='simple',
exprs=[
n.If(
pred=n.BinOp(
func=n.SymbolId('>'),
args=[
n.ValueId('x'),
n.Int(value='0'),
]
),
if_body=n.BinOp(
func=n.SymbolId('*'),
args=[
n.ValueId('x'),
n.Real(value='2.0')
]
),
),
],
)
result = parser.parse(given)
assert expected.exprs[0].pred == result.exprs[0].pred
assert expected == result
def test_parse_wrapped_if(parser):
given = 'module simple; if (x > 0) {x * 2.0} end'
expected = n.Module(
name='simple',
exprs=[
n.If(
pred=n.BinOp(
func=n.SymbolId('>'),
args=[
n.ValueId('x'),
n.Int(value='0'),
]
),
if_body=n.Block(
exprs=[
n.BinOp(
func=n.SymbolId('*'),
args=[
n.ValueId('x'),
n.Real(value='2.0')
]
),
]
),
),
],
)
result = parser.parse(given)
assert expected == result
def test_parse_simple_control_flow(parser):
given = maramodule('control_flow', '''
if 1 < 2 {
10
} else {
20
}
''')
expected = n.Module(
name='control_flow',
exprs=[
n.If(
pred=n.BinOp(
func=n.SymbolId('<'),
args=[
n.Int('1'),
n.Int('2'),
]
),
if_body=n.Block(
exprs=[
n.Int('10')
]
),
else_body=n.Block(
exprs=[
n.Int('20')
]
),
),
]
)
result = parser.parse(given)
print(result.exprs[0])
assert expected.exprs[0] == result.exprs[0]
assert expected == result
def test_parse_else(parser):
given = maramodule('elses', '''
if (x > 0) { x * 2.0 }
else { 10.0 }
20 else 10
''')
expected = n.Module(
name='elses',
exprs=[
n.If(
pred=n.BinOp(
func=n.SymbolId('>'),
args=[
n.ValueId('x'),
n.Int(value='0'),
]
),
if_body=n.Block(
exprs=[
n.BinOp(
func=n.SymbolId('*'),
args=[
n.ValueId('x'),
n.Real(value='2.0')
]
),
]
),
),
n.Else(
expr=None,
body=n.Block(exprs=[
n.Real('10.0')
])
),
n.Else(
expr=n.Int('20'),
body=n.Int('10'),
),
],
)
result = parser.parse(given)
assert result == expected
def test_parse_postfix_while(parser):
given = maramodule('test', '''
(x + 2) while (x < 0)
(x + 2) if (x < 0)
(x + 2) else (x < 0)
''')
expected = n.Module(
name='test',
exprs=[
n.While(
pred=n.BinOp(
func=n.SymbolId('<'),
args=[
n.ValueId('x'),
n.Int(value='0'),
]
),
body=n.BinOp(
func=n.SymbolId('+'),
args=[
n.ValueId('x'),
n.Int(value='2')
]
),
),
n.If(
pred=n.BinOp(
func=n.SymbolId('<'),
args=[
n.ValueId('x'),
n.Int(value='0'),
]
),
if_body=n.BinOp(
func=n.SymbolId('+'),
args=[
n.ValueId('x'),
n.Int(value='2')
]
),
),
n.Else(
expr=n.BinOp(
func=n.SymbolId('+'),
args=[
n.ValueId('x'),
n.Int(value='2'),
]
),
body=n.BinOp(
func=n.SymbolId('<'),
args=[
n.ValueId('x'),
n.Int(value='0'),
]
),
),
]
)
result = parser.parse(given)
print expected.exprs[2]
print result.exprs[2]
assert expected.exprs[0] == result.exprs[0]
assert expected.exprs[1] == result.exprs[1]
assert expected.exprs[2] == result.exprs[2]
assert expected == result
def test_exprs_and_blocks(parser):
given = '''module blocks
block = {
x = 10
y = x
z =
5
t = 10 +
5
t = 10 \
+ 5
}
empty = {}
end'''
expected = n.Module(
name='blocks',
exprs=[
n.Assign(
name=n.ValueId('block'),
value=n.Block(
exprs=[
n.Assign(name=n.ValueId('x'), value=n.Int('10')),
n.Assign(name=n.ValueId('y'), value=n.ValueId('x')),
n.Assign(name=n.ValueId('z'), value=n.Int('5')),
n.Assign(name=n.ValueId('t'),
value=n.BinOp(
func=n.SymbolId('+'),
args=[n.Int('10'), n.Int('5')]
)
),
n.Assign(name=n.ValueId('t'),
value=n.BinOp(
func=n.SymbolId('+'),
args=[n.Int('10'), n.Int('5')]
)
),
],
),
),
n.Assign(
name=n.ValueId('empty'),
value=n.Block(
exprs=[],
),
),
],
) # noqa
result = parser.parse(given)
assert result.name == expected.name
assert result.exprs[0].name == expected.exprs[0].name
assert result.exprs[0].value.exprs[0] == expected.exprs[0].value.exprs[0]
assert result.exprs[0].value.exprs[1] == expected.exprs[0].value.exprs[1]
assert result.exprs[0].value.exprs[2] == expected.exprs[0].value.exprs[2]
assert expected == result
def test_declarations(parser):
given = '''module declarations
var x = 10
val y = 20
end'''
expected = n.Module(
name='declarations',
exprs=[
n.Var(name=n.ValueId('x'), value=n.Int('10'), type_=n.InferType()),
n.Val(name=n.ValueId('y'), value=n.Int('20'), type_=n.InferType()),
],
)
result = parser.parse(given)
assert expected.exprs[0] == result.exprs[0]
assert expected.exprs[1] == result.exprs[1]
assert expected == result
def test_definitions(parser):
given = maramodule('test', '''
def foo ()
def foo () Int
def foo() {}
def foo(x Int) { 3 + 5 }
def foo(
x,
y
) {
3 +
5
}
def bar(x, y Real) Float { 1000 * 0.9 }
def Int::foo ()
def Int::foo () {}
''')
expected = n.Module(
name='test',
exprs=[
n.Def(
name=n.ValueId('foo'),
param=n.Tuple([]),
body=n.Unit(),
),
n.Def(
name=n.ValueId('foo'),
param=n.Tuple([]),
body=n.Unit(),
return_type=n.TypeId('Int'),
),
n.Def(
name=n.ValueId('foo'),
param=n.Tuple([]),
body=n.Block([]),
),
n.Def(
name=n.ValueId('foo'),
param=n.Tuple(values=[
n.Param(name=n.ValueId('x'), type_=n.TypeId('Int'))
]),
body=n.Block(exprs=[
n.BinOp(func=n.SymbolId('+'), args=[
n.Int('3'),
n.Int('5'),
])
])
),
n.Def(
name=n.ValueId('foo'),
param=n.Tuple(values=[
n.Param(name=n.ValueId('x')),
n.Param(name=n.ValueId('y')),
]),
body=n.Block(exprs=[
n.BinOp(func=n.SymbolId('+'), args=[
n.Int('3'),
n.Int('5'),
])
])
),
n.Def(
name=n.ValueId('bar'),
param=n.Tuple(values=[
n.Param(name=n.ValueId('x')),
n.Param(name=n.ValueId('y'), type_=n.TypeId('Real')),
]),
return_type=n.TypeId('Float'),
body=n.Block(exprs=[
n.BinOp(func=n.SymbolId('*'), args=[
n.Int('1000'),
n.Real('0.9'),
]),
])
),
n.Def(
name=n.Binding(left=n.TypeId('Int'), right=n.ValueId('foo')),
param=n.Tuple([]),
body=n.Unit(),
),
n.Def(
name=n.Binding(left=n.TypeId('Int'), right=n.ValueId('foo')),
param=n.Tuple([]),
body=n.Block([]),
),
],
)
result = parser.parse(given)
assert expected.exprs[0].name == result.exprs[0].name
assert expected.exprs[0].param == result.exprs[0].param
assert expected.exprs[0] == result.exprs[0]
assert expected.exprs[1].name == result.exprs[1].name
assert expected.exprs[1].param == result.exprs[1].param
assert expected.exprs[1].body == result.exprs[1].body
assert expected.exprs[1].return_type == result.exprs[1].return_type
assert expected == result
def test_parse_tuples(parser):
given = maramodule('tuples', '''
x = ()
x = (1,)
x = (1, 2, 3)
''')
expected = n.Module(
name='tuples',
exprs=[
n.Assign(
name=n.ValueId('x'),
value=n.Tuple(values=[]),
),
n.Assign(
name=n.ValueId('x'),
value=n.Tuple(values=[
n.Int('1'),
]),
),
n.Assign(
name=n.ValueId('x'),
value=n.Tuple(values=[
n.Int('1'),
n.Int('2'),
n.Int('3'),
]),
),
]
)
stream = parser.simple_stream(given)
result = parser.parse(given)
assert expected == result
def test_parse_lists(parser):
given = maramodule('lists', '''
x = []
x = [1, ]
x = [1, 2, 3]
[
(1),
2,
3,
]
''')
expected = n.Module(
name='lists',
exprs=[
n.Assign(
name=n.ValueId('x'),
value=n.List(values=[]),
),
n.Assign(
name=n.ValueId('x'),
value=n.List(values=[
n.Int('1'),
]),
),
n.Assign(
name=n.ValueId('x'),
value=n.List(values=[
n.Int('1'),
n.Int('2'),
n.Int('3'),
]),
),
n.List(values=[
n.Int('1'),
n.Int('2'),
n.Int('3'),
]),
]
)
stream = parser.simple_stream(given) # pylint:disable=W0612
result = parser.parse(given)
assert expected == result
def test_parse_kvs(parser):
given = maramodule('kvs', '''
x : 10
y :
20
[x: 10, y: 20]
[x: 10,
y:
20
]
''')
expected = n.Module(
name='kvs',
exprs=[
n.KV(key=n.ValueId('x'), value=n.Int('10')),
n.KV(key=n.ValueId('y'), value=n.Int('20')),
n.List(values=[
n.KV(key=n.ValueId('x'), value=n.Int('10')),
n.KV(key=n.ValueId('y'), value=n.Int('20')),
]),
n.List(values=[
n.KV(key=n.ValueId('x'), value=n.Int('10')),
n.KV(key=n.ValueId('y'), value=n.Int('20')),
]),
]
)
stream = parser.simple_stream(given) # pylint:disable=W0612
result = parser.parse(given)
assert expected == result
def test_parse_comments(parser):
given = maramodule('comments', '''
# asdf
## asdf
###
asdf
qwerty
###
''')
expected = n.Module(name='comments', exprs=[
n.TempComment(' asdf'),
n.DocComment(' asdf'),
n.BlockComment('''
asdf
qwerty
'''),
])
result = parser.parse(given)
assert expected == result
def test_parse_specifications(parser):
given = maramodule('test', '''
proto Compare {
def foo () {}
def bar () {}
}
proto Compare (T Any) {
def foo () T {}
def bar (t T) {}
}
trait Qua {
def foo () {}
def bar () {}
}
trait Qua (T Any) {
def foo () T {}
def bar (t T) {}
}
object Qua {
val x Int
val y Int
}
object Qua (T Num) {
val x T
val y T
}
''')
expected = n.Module(name='test',
exprs=[
n.Proto(
name=n.TypeId(value='Compare'),
body=n.Block(exprs=[
n.Def(
name=n.ValueId(value='foo'),
param=n.Tuple(values=[]),
body=n.Block(exprs=[]),
return_type=n.InferType()
),
n.Def(
name=n.ValueId(value='bar'),
param=n.Tuple(values=[]),
body=n.Block(exprs=[]),
return_type=n.InferType()
),
]),
param=n.Tuple(values=[])
),
n.Proto(
name=n.TypeId(value='Compare'),
body=n.Block(exprs=[
n.Def(
body=n.Block(exprs=[]),
name=n.ValueId(value='foo'),
param=n.Tuple(values=[]),
return_type=n.TypeId(value='T')
),
n.Def(
name=n.ValueId(value='bar'),
body=n.Block(exprs=[]),
param=n.Tuple(values=[
n.Param(
name=n.ValueId(value='t'),
type_=n.TypeId(value='T')
),
]),
return_type=n.InferType())
]),
param=n.Tuple(values=[
n.Param(
name=n.TypeId(value='T'),
type_=n.TypeId(value='Any')
),
]),
),
n.Trait(
name=n.TypeId(value='Qua'),
body=n.Block(exprs=[
n.Def(
name=n.ValueId(value='foo'),
param=n.Tuple(values=[]),
body=n.Block(exprs=[]),
return_type=n.InferType()
),
n.Def(
name=n.ValueId(value='bar'),
param=n.Tuple(values=[]),
body=n.Block(exprs=[]),
return_type=n.InferType()
),
]),
param=n.Tuple(values=[])
),
n.Trait(
name=n.TypeId(value='Qua'),
body=n.Block(exprs=[
n.Def(
body=n.Block(exprs=[]),
name=n.ValueId(value='foo'),
param=n.Tuple(values=[]),
return_type=n.TypeId(value='T')
),
n.Def(
name=n.ValueId(value='bar'),
body=n.Block(exprs=[]),
param=n.Tuple(values=[
n.Param(
name=n.ValueId(value='t'),
type_=n.TypeId(value='T')
),
]),
return_type=n.InferType())
]),
param=n.Tuple(values=[
n.Param(
name=n.TypeId(value='T'),
type_=n.TypeId(value='Any')
),
]),
),
n.Object(
name=n.TypeId(value='Qua'),
body=n.Block(exprs=[
n.Val(
name=n.ValueId(value='x'),
value=n.Unit(),
type_=n.TypeId('Int'),
),
n.Val(
name=n.ValueId(value='y'),
value=n.Unit(),
type_=n.TypeId('Int'),
),
]),
param=n.Tuple(values=[])
),
n.Object(
name=n.TypeId(value='Qua'),
body=n.Block(exprs=[
n.Val(
name=n.ValueId(value='x'),
value=n.Unit(),
type_=n.TypeId('T'),
),
n.Val(
name=n.ValueId(value='y'),
value=n.Unit(),
type_=n.TypeId('T'),
),
]),
param=n.Tuple(values=[
n.Param(
name=n.TypeId(value='T'),
type_=n.TypeId(value='Num')
),
]),
)
],
)
result = parser.parse(given)
assert expected == result
def test_name_resolution_program(parser, program_name_resolution):
ast = parser.parse(program_name_resolution)
assert ast is not None
|
YanjieGao/sparrow | refs/heads/master | deploy/third_party/boto-2.1.1/boto/ec2/elb/instancestate.py | 26 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class InstanceState(object):
"""
Represents the state of an EC2 Load Balancer Instance
"""
def __init__(self, load_balancer=None, description=None,
state=None, instance_id=None, reason_code=None):
self.load_balancer = load_balancer
self.description = description
self.state = state
self.instance_id = instance_id
self.reason_code = reason_code
def __repr__(self):
return 'InstanceState:(%s,%s)' % (self.instance_id, self.state)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Description':
self.description = value
elif name == 'State':
self.state = value
elif name == 'InstanceId':
self.instance_id = value
elif name == 'ReasonCode':
self.reason_code = value
else:
setattr(self, name, value)
|
CanalTP/navitia | refs/heads/dev | source/tyr/tyr/external_service.py | 1 | # coding: utf-8
# Copyright (c) 2001-2021, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, division, unicode_literals
from flask import request
import flask_restful
from flask_restful import marshal_with, marshal, abort
import sqlalchemy
from tyr.fields import external_service_list_fields, external_service_fields
from tyr.formats import external_service_format
from navitiacommon import models, utils
import logging
from tyr.validations import InputJsonValidator
class ExternalService(flask_restful.Resource):
@marshal_with(external_service_list_fields)
def get(self, id=None, version=0):
if id:
try:
return {'external_services': [models.ExternalService.find_by_id(id)]}
except sqlalchemy.orm.exc.NoResultFound:
return {'external_services': []}, 404
else:
return {'external_services': models.ExternalService.all()}
@InputJsonValidator(external_service_format)
def put(self, id=None, version=0):
"""
Create or update an external service like free_floatings, vehicle_occupancies in db
"""
input_json = request.get_json(force=True, silent=False)
try:
service = models.ExternalService.find_by_id(id)
status = 200
except sqlalchemy.orm.exc.NoResultFound:
logging.getLogger(__name__).info("Create a new service {}".format(id))
service = models.ExternalService(id)
models.db.session.add(service)
status = 201
service.from_json(input_json)
try:
models.db.session.commit()
except sqlalchemy.exc.IntegrityError as ex:
abort(400, status="error", message=str(ex))
return {'external_services': [marshal(service, external_service_fields)]}, status
def delete(self, id=None, version=0):
"""
Delete an external service in db, i.e. set parameter DISCARDED to TRUE
"""
if not id:
abort(400, status="error", message='id is required')
try:
provider = models.ExternalService.find_by_id(id)
provider.discarded = True
models.db.session.commit()
return None, 204
except sqlalchemy.orm.exc.NoResultFound:
abort(404, status="error", message='object not found')
|
iamahuman/angr | refs/heads/master | angr/misc/hookset.py | 3 | """
These classes perform some python magic that we use to implement the nesting of exploration technique methods.
This process is formalized as a "hooking" of a python method - each exploration technique's methods "hooks" a method of the same name on the simulation manager class.
"""
class HookSet:
"""
A HookSet is a static class that provides the capability to apply many hooks to an object.
"""
@staticmethod
def install_hooks(target, **hooks):
"""
Given the target `target`, apply the hooks given as keyword arguments to it.
If any targeted method has already been hooked, the hooks will not be overridden but will instead be pushed
into a list of pending hooks. The final behavior should be that all hooks call each other in a nested stack.
:param target: Any object. Its methods named as keys in `hooks` will be replaced by `HookedMethod` objects.
:param hooks: Any keywords will be interpreted as hooks to apply. Each method named will hooked with the
coresponding function value.
"""
for name, hook in hooks.items():
func = getattr(target, name)
if not isinstance(func, HookedMethod):
func = HookedMethod(func)
setattr(target, name, func)
func.pending.append(hook)
@staticmethod
def remove_hooks(target, **hooks):
"""
Remove the given hooks from the given target.
:param target: The object from which to remove hooks. If all hooks are removed from a given method, the
HookedMethod object will be removed and replaced with the original function.
:param hooks: Any keywords will be interpreted as hooks to remove. You must provide the exact hook that was applied
so that it can it can be identified for removal among any other hooks.
"""
for name, hook in hooks.items():
hooked = getattr(target, name)
if hook in hooked.pending:
try:
hooked.pending.remove(hook)
except ValueError as e:
raise ValueError("%s is not hooked by %s" % (target, hook)) from e
if not hooked.pending:
setattr(target, name, hooked.func)
class HookedMethod:
"""
HookedMethod is a callable object which provides a stack of nested hooks.
:param func: The bottom-most function which provides the original functionality that is being hooked
:ivar func: Same as the eponymous parameter
:ivar pending: The stack of hooks that have yet to be called. When this object is called, it will pop the last
function in this list and call it. The function should call this object again in order to request
the functionality of the original method, at which point the pop-dispatch mechanism will run
recursively until the stack is exhausted, at which point the original function will be called.
When the call returns, the hook will be restored to the stack.
"""
def __init__(self, func):
self.func = func
self.pending = []
def __repr__(self):
return "<HookedMethod(%s.%s, %d pending)>" % \
(self.func.__self__.__class__.__name__, self.func.__name__, len(self.pending))
def __call__(self, *args, **kwargs):
if self.pending:
current_hook = self.pending.pop()
try:
result = current_hook(self.func.__self__, *args, **kwargs)
finally:
self.pending.append(current_hook)
return result
else:
return self.func(*args, **kwargs)
|
pombredanne/scrapy | refs/heads/master | scrapy/core/spidermw.py | 1 | """
Spider Middleware manager
See documentation in docs/topics/spider-middleware.rst
"""
import six
from twisted.python.failure import Failure
from scrapy.middleware import MiddlewareManager
from scrapy.utils.defer import mustbe_deferred
from scrapy.utils.conf import build_component_list
def _isiterable(possible_iterator):
return hasattr(possible_iterator, '__iter__')
class SpiderMiddlewareManager(MiddlewareManager):
component_name = 'spider middleware'
@classmethod
def _get_mwlist_from_settings(cls, settings):
return build_component_list(settings._getcomposite('SPIDER_MIDDLEWARES'))
def _add_middleware(self, mw):
super(SpiderMiddlewareManager, self)._add_middleware(mw)
if hasattr(mw, 'process_spider_input'):
self.methods['process_spider_input'].append(mw.process_spider_input)
if hasattr(mw, 'process_spider_output'):
self.methods['process_spider_output'].insert(0, mw.process_spider_output)
if hasattr(mw, 'process_spider_exception'):
self.methods['process_spider_exception'].insert(0, mw.process_spider_exception)
if hasattr(mw, 'process_start_requests'):
self.methods['process_start_requests'].insert(0, mw.process_start_requests)
def scrape_response(self, scrape_func, response, request, spider):
fname = lambda f:'%s.%s' % (
six.get_method_self(f).__class__.__name__,
six.get_method_function(f).__name__)
def process_spider_input(response):
for method in self.methods['process_spider_input']:
try:
result = method(response=response, spider=spider)
assert result is None, \
'Middleware %s must returns None or ' \
'raise an exception, got %s ' \
% (fname(method), type(result))
except:
return scrape_func(Failure(), request, spider)
return scrape_func(response, request, spider)
def process_spider_exception(_failure):
exception = _failure.value
for method in self.methods['process_spider_exception']:
result = method(response=response, exception=exception, spider=spider)
assert result is None or _isiterable(result), \
'Middleware %s must returns None, or an iterable object, got %s ' % \
(fname(method), type(result))
if result is not None:
return result
return _failure
def process_spider_output(result):
for method in self.methods['process_spider_output']:
result = method(response=response, result=result, spider=spider)
assert _isiterable(result), \
'Middleware %s must returns an iterable object, got %s ' % \
(fname(method), type(result))
return result
dfd = mustbe_deferred(process_spider_input, response)
dfd.addErrback(process_spider_exception)
dfd.addCallback(process_spider_output)
return dfd
def process_start_requests(self, start_requests, spider):
return self._process_chain('process_start_requests', start_requests, spider)
|
rversteegen/commandergenius | refs/heads/sdl_android | project/jni/python/src/Lib/uuid.py | 59 | r"""UUID objects (universally unique identifiers) according to RFC 4122.
This module provides immutable UUID objects (class UUID) and the functions
uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
UUIDs as specified in RFC 4122.
If all you want is a unique ID, you should probably call uuid1() or uuid4().
Note that uuid1() may compromise privacy since it creates a UUID containing
the computer's network address. uuid4() creates a random UUID.
Typical usage:
>>> import uuid
# make a UUID based on the host ID and current time
>>> uuid.uuid1()
UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
# make a UUID using an MD5 hash of a namespace UUID and a name
>>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
# make a random UUID
>>> uuid.uuid4()
UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
# make a UUID using a SHA-1 hash of a namespace UUID and a name
>>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
# make a UUID from a string of hex digits (braces and hyphens ignored)
>>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
# convert a UUID to a string of hex digits in standard form
>>> str(x)
'00010203-0405-0607-0809-0a0b0c0d0e0f'
# get the raw 16 bytes of the UUID
>>> x.bytes
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
# make a UUID from a 16-byte string
>>> uuid.UUID(bytes=x.bytes)
UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
"""
__author__ = 'Ka-Ping Yee <ping@zesty.ca>'
RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
'reserved for NCS compatibility', 'specified in RFC 4122',
'reserved for Microsoft compatibility', 'reserved for future definition']
class UUID(object):
"""Instances of the UUID class represent UUIDs as specified in RFC 4122.
UUID objects are immutable, hashable, and usable as dictionary keys.
Converting a UUID to a string with str() yields something in the form
'12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
five possible forms: a similar string of hexadecimal digits, or a tuple
of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
48-bit values respectively) as an argument named 'fields', or a string
of 16 bytes (with all the integer fields in big-endian order) as an
argument named 'bytes', or a string of 16 bytes (with the first three
fields in little-endian order) as an argument named 'bytes_le', or a
single 128-bit integer as an argument named 'int'.
UUIDs have these read-only attributes:
bytes the UUID as a 16-byte string (containing the six
integer fields in big-endian byte order)
bytes_le the UUID as a 16-byte string (with time_low, time_mid,
and time_hi_version in little-endian byte order)
fields a tuple of the six integer fields of the UUID,
which are also available as six individual attributes
and two derived attributes:
time_low the first 32 bits of the UUID
time_mid the next 16 bits of the UUID
time_hi_version the next 16 bits of the UUID
clock_seq_hi_variant the next 8 bits of the UUID
clock_seq_low the next 8 bits of the UUID
node the last 48 bits of the UUID
time the 60-bit timestamp
clock_seq the 14-bit sequence number
hex the UUID as a 32-character hexadecimal string
int the UUID as a 128-bit integer
urn the UUID as a URN as specified in RFC 4122
variant the UUID variant (one of the constants RESERVED_NCS,
RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
version the UUID version number (1 through 5, meaningful only
when the variant is RFC_4122)
"""
def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None,
int=None, version=None):
r"""Create a UUID from either a string of 32 hexadecimal digits,
a string of 16 bytes as the 'bytes' argument, a string of 16 bytes
in little-endian order as the 'bytes_le' argument, a tuple of six
integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
the 'fields' argument, or a single 128-bit integer as the 'int'
argument. When a string of hex digits is given, curly braces,
hyphens, and a URN prefix are all optional. For example, these
expressions all yield the same UUID:
UUID('{12345678-1234-5678-1234-567812345678}')
UUID('12345678123456781234567812345678')
UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
UUID(bytes='\x12\x34\x56\x78'*4)
UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' +
'\x12\x34\x56\x78\x12\x34\x56\x78')
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must
be given. The 'version' argument is optional; if given, the resulting
UUID will have its variant and version set according to RFC 4122,
overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'.
"""
if [hex, bytes, bytes_le, fields, int].count(None) != 4:
raise TypeError('need one of hex, bytes, bytes_le, fields, or int')
if hex is not None:
hex = hex.replace('urn:', '').replace('uuid:', '')
hex = hex.strip('{}').replace('-', '')
if len(hex) != 32:
raise ValueError('badly formed hexadecimal UUID string')
int = long(hex, 16)
if bytes_le is not None:
if len(bytes_le) != 16:
raise ValueError('bytes_le is not a 16-char string')
bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] +
bytes_le[5] + bytes_le[4] + bytes_le[7] + bytes_le[6] +
bytes_le[8:])
if bytes is not None:
if len(bytes) != 16:
raise ValueError('bytes is not a 16-char string')
int = long(('%02x'*16) % tuple(map(ord, bytes)), 16)
if fields is not None:
if len(fields) != 6:
raise ValueError('fields is not a 6-tuple')
(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node) = fields
if not 0 <= time_low < 1<<32L:
raise ValueError('field 1 out of range (need a 32-bit value)')
if not 0 <= time_mid < 1<<16L:
raise ValueError('field 2 out of range (need a 16-bit value)')
if not 0 <= time_hi_version < 1<<16L:
raise ValueError('field 3 out of range (need a 16-bit value)')
if not 0 <= clock_seq_hi_variant < 1<<8L:
raise ValueError('field 4 out of range (need an 8-bit value)')
if not 0 <= clock_seq_low < 1<<8L:
raise ValueError('field 5 out of range (need an 8-bit value)')
if not 0 <= node < 1<<48L:
raise ValueError('field 6 out of range (need a 48-bit value)')
clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low
int = ((time_low << 96L) | (time_mid << 80L) |
(time_hi_version << 64L) | (clock_seq << 48L) | node)
if int is not None:
if not 0 <= int < 1<<128L:
raise ValueError('int is out of range (need a 128-bit value)')
if version is not None:
if not 1 <= version <= 5:
raise ValueError('illegal version number')
# Set the variant to RFC 4122.
int &= ~(0xc000 << 48L)
int |= 0x8000 << 48L
# Set the version number.
int &= ~(0xf000 << 64L)
int |= version << 76L
self.__dict__['int'] = int
def __cmp__(self, other):
if isinstance(other, UUID):
return cmp(self.int, other.int)
return NotImplemented
def __hash__(self):
return hash(self.int)
def __int__(self):
return self.int
def __repr__(self):
return 'UUID(%r)' % str(self)
def __setattr__(self, name, value):
raise TypeError('UUID objects are immutable')
def __str__(self):
hex = '%032x' % self.int
return '%s-%s-%s-%s-%s' % (
hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
def get_bytes(self):
bytes = ''
for shift in range(0, 128, 8):
bytes = chr((self.int >> shift) & 0xff) + bytes
return bytes
bytes = property(get_bytes)
def get_bytes_le(self):
bytes = self.bytes
return (bytes[3] + bytes[2] + bytes[1] + bytes[0] +
bytes[5] + bytes[4] + bytes[7] + bytes[6] + bytes[8:])
bytes_le = property(get_bytes_le)
def get_fields(self):
return (self.time_low, self.time_mid, self.time_hi_version,
self.clock_seq_hi_variant, self.clock_seq_low, self.node)
fields = property(get_fields)
def get_time_low(self):
return self.int >> 96L
time_low = property(get_time_low)
def get_time_mid(self):
return (self.int >> 80L) & 0xffff
time_mid = property(get_time_mid)
def get_time_hi_version(self):
return (self.int >> 64L) & 0xffff
time_hi_version = property(get_time_hi_version)
def get_clock_seq_hi_variant(self):
return (self.int >> 56L) & 0xff
clock_seq_hi_variant = property(get_clock_seq_hi_variant)
def get_clock_seq_low(self):
return (self.int >> 48L) & 0xff
clock_seq_low = property(get_clock_seq_low)
def get_time(self):
return (((self.time_hi_version & 0x0fffL) << 48L) |
(self.time_mid << 32L) | self.time_low)
time = property(get_time)
def get_clock_seq(self):
return (((self.clock_seq_hi_variant & 0x3fL) << 8L) |
self.clock_seq_low)
clock_seq = property(get_clock_seq)
def get_node(self):
return self.int & 0xffffffffffff
node = property(get_node)
def get_hex(self):
return '%032x' % self.int
hex = property(get_hex)
def get_urn(self):
return 'urn:uuid:' + str(self)
urn = property(get_urn)
def get_variant(self):
if not self.int & (0x8000 << 48L):
return RESERVED_NCS
elif not self.int & (0x4000 << 48L):
return RFC_4122
elif not self.int & (0x2000 << 48L):
return RESERVED_MICROSOFT
else:
return RESERVED_FUTURE
variant = property(get_variant)
def get_version(self):
# The version bits are only meaningful for RFC 4122 UUIDs.
if self.variant == RFC_4122:
return int((self.int >> 76L) & 0xf)
version = property(get_version)
def _find_mac(command, args, hw_identifiers, get_index):
import os
for dir in ['', '/sbin/', '/usr/sbin']:
executable = os.path.join(dir, command)
if not os.path.exists(executable):
continue
try:
# LC_ALL to get English output, 2>/dev/null to
# prevent output on stderr
cmd = 'LC_ALL=C %s %s 2>/dev/null' % (executable, args)
pipe = os.popen(cmd)
except IOError:
continue
for line in pipe:
words = line.lower().split()
for i in range(len(words)):
if words[i] in hw_identifiers:
return int(words[get_index(i)].replace(':', ''), 16)
return None
def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
# This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes.
for args in ('', '-a', '-av'):
mac = _find_mac('ifconfig', args, ['hwaddr', 'ether'], lambda i: i+1)
if mac:
return mac
import socket
ip_addr = socket.gethostbyname(socket.gethostname())
# Try getting the MAC addr from arp based on our IP address (Solaris).
mac = _find_mac('arp', '-an', [ip_addr], lambda i: -1)
if mac:
return mac
# This might work on HP-UX.
mac = _find_mac('lanscan', '-ai', ['lan0'], lambda i: 0)
if mac:
return mac
return None
def _ipconfig_getnode():
"""Get the hardware address on Windows by running ipconfig.exe."""
import os, re
dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
try:
import ctypes
buffer = ctypes.create_string_buffer(300)
ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
dirs.insert(0, buffer.value.decode('mbcs'))
except:
pass
for dir in dirs:
try:
pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all')
except IOError:
continue
for line in pipe:
value = line.split(':')[-1].strip().lower()
if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
return int(value.replace('-', ''), 16)
def _netbios_getnode():
"""Get the hardware address on Windows using NetBIOS calls.
See http://support.microsoft.com/kb/118623 for details."""
import win32wnet, netbios
ncb = netbios.NCB()
ncb.Command = netbios.NCBENUM
ncb.Buffer = adapters = netbios.LANA_ENUM()
adapters._pack()
if win32wnet.Netbios(ncb) != 0:
return
adapters._unpack()
for i in range(adapters.length):
ncb.Reset()
ncb.Command = netbios.NCBRESET
ncb.Lana_num = ord(adapters.lana[i])
if win32wnet.Netbios(ncb) != 0:
continue
ncb.Reset()
ncb.Command = netbios.NCBASTAT
ncb.Lana_num = ord(adapters.lana[i])
ncb.Callname = '*'.ljust(16)
ncb.Buffer = status = netbios.ADAPTER_STATUS()
if win32wnet.Netbios(ncb) != 0:
continue
status._unpack()
bytes = map(ord, status.adapter_address)
return ((bytes[0]<<40L) + (bytes[1]<<32L) + (bytes[2]<<24L) +
(bytes[3]<<16L) + (bytes[4]<<8L) + bytes[5])
# Thanks to Thomas Heller for ctypes and for his help with its use here.
# If ctypes is available, use it to find system routines for UUID generation.
_uuid_generate_random = _uuid_generate_time = _UuidCreate = None
try:
import ctypes, ctypes.util
# The uuid_generate_* routines are provided by libuuid on at least
# Linux and FreeBSD, and provided by libc on Mac OS X.
for libname in ['uuid', 'c']:
try:
lib = ctypes.CDLL(ctypes.util.find_library(libname))
except:
continue
if hasattr(lib, 'uuid_generate_random'):
_uuid_generate_random = lib.uuid_generate_random
if hasattr(lib, 'uuid_generate_time'):
_uuid_generate_time = lib.uuid_generate_time
# On Windows prior to 2000, UuidCreate gives a UUID containing the
# hardware address. On Windows 2000 and later, UuidCreate makes a
# random UUID and UuidCreateSequential gives a UUID containing the
# hardware address. These routines are provided by the RPC runtime.
# NOTE: at least on Tim's WinXP Pro SP2 desktop box, while the last
# 6 bytes returned by UuidCreateSequential are fixed, they don't appear
# to bear any relationship to the MAC address of any network device
# on the box.
try:
lib = ctypes.windll.rpcrt4
except:
lib = None
_UuidCreate = getattr(lib, 'UuidCreateSequential',
getattr(lib, 'UuidCreate', None))
except:
pass
def _unixdll_getnode():
"""Get the hardware address on Unix using ctypes."""
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw).node
def _windll_getnode():
"""Get the hardware address on Windows using ctypes."""
_buffer = ctypes.create_string_buffer(16)
if _UuidCreate(_buffer) == 0:
return UUID(bytes=_buffer.raw).node
def _random_getnode():
"""Get a random node ID, with eighth bit set as suggested by RFC 4122."""
import random
return random.randrange(0, 1<<48L) | 0x010000000000L
_node = None
def getnode():
"""Get the hardware address as a 48-bit positive integer.
The first time this runs, it may launch a separate program, which could
be quite slow. If all attempts to obtain the hardware address fail, we
choose a random 48-bit number with its eighth bit set to 1 as recommended
in RFC 4122.
"""
global _node
if _node is not None:
return _node
import sys
if sys.platform == 'win32':
getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
else:
getters = [_unixdll_getnode, _ifconfig_getnode]
for getter in getters + [_random_getnode]:
try:
_node = getter()
except:
continue
if _node is not None:
return _node
_last_timestamp = None
def uuid1(node=None, clock_seq=None):
"""Generate a UUID from a host ID, sequence number, and the current time.
If 'node' is not given, getnode() is used to obtain the hardware
address. If 'clock_seq' is given, it is used as the sequence number;
otherwise a random 14-bit sequence number is chosen."""
# When the system provides a version-1 UUID generator, use it (but don't
# use UuidCreate here because its UUIDs don't conform to RFC 4122).
if _uuid_generate_time and node is clock_seq is None:
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw)
global _last_timestamp
import time
nanoseconds = int(time.time() * 1e9)
# 0x01b21dd213814000 is the number of 100-ns intervals between the
# UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
timestamp = int(nanoseconds/100) + 0x01b21dd213814000L
if timestamp <= _last_timestamp:
timestamp = _last_timestamp + 1
_last_timestamp = timestamp
if clock_seq is None:
import random
clock_seq = random.randrange(1<<14L) # instead of stable storage
time_low = timestamp & 0xffffffffL
time_mid = (timestamp >> 32L) & 0xffffL
time_hi_version = (timestamp >> 48L) & 0x0fffL
clock_seq_low = clock_seq & 0xffL
clock_seq_hi_variant = (clock_seq >> 8L) & 0x3fL
if node is None:
node = getnode()
return UUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1)
def uuid3(namespace, name):
"""Generate a UUID from the MD5 hash of a namespace UUID and a name."""
from hashlib import md5
hash = md5(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=3)
def uuid4():
"""Generate a random UUID."""
# When the system provides a version-4 UUID generator, use it.
if _uuid_generate_random:
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_random(_buffer)
return UUID(bytes=_buffer.raw)
# Otherwise, get randomness from urandom or the 'random' module.
try:
import os
return UUID(bytes=os.urandom(16), version=4)
except:
import random
bytes = [chr(random.randrange(256)) for i in range(16)]
return UUID(bytes=bytes, version=4)
def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
from hashlib import sha1
hash = sha1(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=5)
# The following standard UUIDs are for use with uuid3() or uuid5().
NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')
|
anryko/ansible | refs/heads/devel | lib/ansible/modules/remote_management/wakeonlan.py | 52 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: wakeonlan
version_added: '2.2'
short_description: Send a magic Wake-on-LAN (WoL) broadcast packet
description:
- The C(wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets.
options:
mac:
description:
- MAC address to send Wake-on-LAN broadcast packet for.
required: true
broadcast:
description:
- Network broadcast address to use for broadcasting magic Wake-on-LAN packet.
default: 255.255.255.255
port:
description:
- UDP port to use for magic Wake-on-LAN packet.
default: 7
todo:
- Add arping support to check whether the system is up (before and after)
- Enable check-mode support (when we have arping support)
- Does not have SecureOn password support
notes:
- This module sends a magic packet, without knowing whether it worked
- Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS)
- Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first).
seealso:
- module: win_wakeonlan
author:
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
- name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66
wakeonlan:
mac: '00:00:5E:00:53:66'
broadcast: 192.0.2.23
delegate_to: localhost
- wakeonlan:
mac: 00:00:5E:00:53:66
port: 9
delegate_to: localhost
'''
RETURN = r'''
# Default return values
'''
import socket
import struct
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def wakeonlan(module, mac, broadcast, port):
""" Send a magic Wake-on-LAN packet. """
mac_orig = mac
# Remove possible separator from MAC address
if len(mac) == 12 + 5:
mac = mac.replace(mac[2], '')
# If we don't end up with 12 hexadecimal characters, fail
if len(mac) != 12:
module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig)
# Test if it converts to an integer, otherwise fail
try:
int(mac, 16)
except ValueError:
module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig)
# Create payload for magic packet
data = b''
padding = ''.join(['FFFFFFFFFFFF', mac * 20])
for i in range(0, len(padding), 2):
data = b''.join([data, struct.pack('B', int(padding[i: i + 2], 16))])
# Broadcast payload to network
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
if not module.check_mode:
try:
sock.sendto(data, (broadcast, port))
except socket.error as e:
sock.close()
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
sock.close()
def main():
module = AnsibleModule(
argument_spec=dict(
mac=dict(type='str', required=True),
broadcast=dict(type='str', default='255.255.255.255'),
port=dict(type='int', default=7),
),
supports_check_mode=True,
)
mac = module.params['mac']
broadcast = module.params['broadcast']
port = module.params['port']
wakeonlan(module, mac, broadcast, port)
module.exit_json(changed=True)
if __name__ == '__main__':
main()
|
TangHao1987/intellij-community | refs/heads/master | python/testData/paramInfo/KwdFunction.py | 83 | def foo(a, b, **c):
pass
foo(<arg1>1, <arg2>2, <arg3>x=3, <arg4>**{'y':4}, <arg5>)
|
acshan/odoo | refs/heads/8.0 | addons/website_event_sale/tests/test_ui.py | 338 | import openerp.tests
@openerp.tests.common.at_install(False)
@openerp.tests.common.post_install(True)
class TestUi(openerp.tests.HttpCase):
def test_admin(self):
self.phantom_js("/", "openerp.Tour.run('event_buy_tickets', 'test')", "openerp.Tour.tours.event_buy_tickets")
def test_demo(self):
self.phantom_js("/", "openerp.Tour.run('event_buy_tickets', 'test')", "openerp.Tour.tours.event_buy_tickets", login="demo", password="demo");
def test_public(self):
self.phantom_js("/", "openerp.Tour.run('event_buy_tickets', 'test')", "openerp.Tour.tours.event_buy_tickets", login=None);
|
morucci/repoxplorer | refs/heads/master | repoxplorer/index/projects.py | 1 | # Copyright 2016, Fabien Boucher
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
from datetime import datetime
from elasticsearch.helpers import bulk
from elasticsearch.helpers import BulkIndexError
from elasticsearch.helpers import scan as scanner
from pecan import conf
from repoxplorer import index
from repoxplorer.index import YAMLDefinition
from repoxplorer.index import add_params
from repoxplorer.index import date2epoch
logger = logging.getLogger(__name__)
project_templates_schema = r"""
$schema: http://json-schema.org/draft-04/schema
definitions:
release:
type: object
additionalProperties: false
required:
- name
- date
properties:
name:
type: string
date:
type: string
type: object
properties:
project-templates:
type: object
additionalProperties: false
patternProperties:
^[a-zA-Z0-9_/\. \-\+]+$:
type: object
additionalProperties: false
required:
- uri
- branches
properties:
uri:
type: string
gitweb:
type: string
branches:
type: array
items:
type: string
minItems: 1
tags:
type: array
items:
type: string
paths:
type: array
items:
type: string
parsers:
type: array
items:
type: string
releases:
type: array
items:
$ref: "#/definitions/release"
index-tags:
type: boolean
"""
project_templates_example = """
templates:
default:
uri: https://github.com/%(name)s
branches:
- master
- stable/mitaka
- stable/newton
- stable/ocata
gitweb: https://github.com/%(name)s/commit/%%(sha)s
parsers:
- .*(blueprint) ([^ .]+).*
releases:
- name: 1.0
date: 2016-12-20
- name: 2.0
date: 2016-12-31
tags:
- openstack
- language:python
- type:cloud
paths:
- project/tests/
index-tags: true
"""
projects_schema = r"""
$schema: http://json-schema.org/draft-04/schema
definitions:
release:
type: object
additionalProperties: false
required:
- name
- date
properties:
name:
type: string
date:
type: string
type: object
properties:
projects:
type: object
additionalProperties: false
patternProperties:
^[a-zA-Z0-9_/\. \-\+]+$:
type: object
additionalProperties: false
properties:
description:
type: string
logo:
type: string
meta-ref:
type: boolean
bots-group:
type: string
releases:
type: array
items:
$ref: "#/definitions/release"
repos:
type: object
additionalProperties: false
patternProperties:
^[a-zA-Z0-9_/\. \-\+]+$:
type: object
additionalProperties: false
required:
- template
properties:
template:
type: string
description:
type: string
paths:
type: array
items:
type: string
tags:
type: array
items:
type: string
forks:
type: integer
stars:
type: integer
watchers:
type: integer
branches:
type: array
items:
type: string
minItems: 1
"""
projects_example = """
projects:
Barbican:
description: The Barbican project
bots-group: openstack-ci-bots
releases:
- name: ocata
date: 2017-02-22
repos:
openstack/barbican:
template: default
openstack/python-barbicanclient:
template: default
description: The barbican client
forks: 10
watchers: 20
stars: 30
tags:
- client
- language:python
paths:
- project/tests/
Swift:
repos:
openstack/swift:
template: default
branches:
- dev
openstack/python-swiftclient:
template: default
"""
class EProjects(object):
PROPERTIES = {
"aname": {"type": "text"},
"name": {"type": "keyword"},
"description": {"type": "text"},
"logo": {"type": "binary"},
"meta-ref": {"type": "boolean"},
"bots-group": {"type": "keyword"},
"index-tags": {"type": "boolean"},
"project": {"type": "keyword"},
"releases": {
"type": "nested",
"properties": {
"name": {"type": "keyword"},
"date": {"type": "keyword"},
}
},
"refs": {
"type": "nested",
"properties": {
"aname": {"type": "text"},
"name": {"type": "keyword"},
"description": {"type": "text"},
"forks": {"type": "integer"},
"watchers": {"type": "integer"},
"stars": {"type": "integer"},
"uri": {"type": "keyword"},
"gitweb": {"type": "keyword"},
"branch": {"type": "keyword"},
"tags": {"type": "keyword"},
"fullrid": {"type": "keyword"},
"shortrid": {"type": "keyword"},
"paths": {"type": "keyword"},
"parsers": {"type": "keyword"},
"index-tags": {"type": "boolean"},
"releases": {
"type": "nested",
"properties": {
"name": {"type": "keyword"},
"date": {"type": "keyword"},
}
}
}
}
}
def __init__(self, connector=None):
self.es = connector.es
self.ic = connector.ic
self.index = connector.index
self.dbname = 'projects'
self.mapping = {
self.dbname: {
"properties": self.PROPERTIES,
}
}
if not self.ic.exists_type(index=self.index,
doc_type=self.dbname):
kwargs = add_params(self.es)
self.ic.put_mapping(
index=self.index, doc_type=self.dbname, body=self.mapping,
**kwargs)
def manage_bulk_err(self, exc):
errs = [e['create']['error'] for e in exc[1]]
if not all([True for e in errs if
e['type'] == 'document_already_exists_exception']):
raise Exception(
"Unable to create one or more doc: %s" % errs)
def create(self, docs):
def gen():
for pid, doc in docs:
d = {}
d['_index'] = self.index
d['_type'] = self.dbname
d['_op_type'] = 'create'
d['_id'] = pid
d['_source'] = doc
yield d
try:
bulk(self.es, gen())
except BulkIndexError as exc:
self.manage_bulk_err(exc)
self.es.indices.refresh(index=self.index)
def delete_all(self):
def gen(docs):
for doc in docs:
d = {}
d['_index'] = self.index
d['_type'] = self.dbname
d['_op_type'] = 'delete'
d['_id'] = doc['_id']
yield d
bulk(self.es,
gen(self.get_all(source=False)))
self.es.indices.refresh(index=self.index)
def load(self, projects, rid2projects):
self.delete_all()
self.create(projects.items())
self.create(rid2projects.items())
def get_all(self, source=True, type=None):
query = {
'_source': source,
'query': {
'match_all': {}
}
}
return scanner(self.es, query=query, index=self.index)
def get_by_id(self, id, source=True):
try:
res = self.es.get(index=self.index,
doc_type=self.dbname,
_source=source,
id=id)
return res['_source']
except Exception as e:
logger.error('Unable to get the doc. %s' % e)
def exists(self, id):
return self.es.exists(
index=self.index, doc_type=self.dbname, id=id)
def get_by_attr_match(self, attribute, value, source=True):
params = {'index': self.index}
body = {
"query": {
'bool': {
'must': {'term': {attribute: value}},
}
}
}
params['body'] = body
params['_source'] = source
# TODO(fbo): Improve by doing it by bulk instead
params['size'] = 10000
res = self.es.search(**params)
took = res['took']
hits = res['hits']['total']
docs = [r['_source'] for r in res['hits']['hits']]
return took, hits, docs
def get_by_nested_attr_match(
self, attribute, values, source=True,
inner_source=True, inner_hits_max=100):
if not isinstance(values, list):
values = (values,)
params = {'index': self.index}
body = {
"query": {
"bool": {
"must": {
"nested": {
"path": "refs",
"inner_hits": {
"_source": inner_source,
"size": inner_hits_max,
},
"query": {
"bool": {
"should": [
{"term":
{"refs.%s" % attribute: value}}
for value in values
]
}
}
}
}
}
}
}
params['body'] = body
params['_source'] = source
# TODO(fbo): Improve by doing it by bulk instead
params['size'] = 10000
res = self.es.search(**params)
inner_hits = [r['inner_hits'] for r in res['hits']['hits']]
took = res['took']
hits = res['hits']['total']
docs = [r['_source'] for r in res['hits']['hits']]
return took, hits, docs, inner_hits
def get_projects_by_fullrids(self, fullrids):
body = {"ids": fullrids}
try:
res = self.es.mget(index=self.index,
doc_type=self.dbname,
_source=True,
body=body)
return res['docs']
except Exception as e:
logger.error('Unable to get projects by fullrids. %s' % e)
class Projects(YAMLDefinition):
""" This class manages definition of projects
"""
def __init__(self, db_path=None, db_default_file=None, db_cache_path=None,
con=None, dump_yaml_in_index=None, vonly=False):
self.db_path = db_path or conf.get('db_path')
self.db_default_file = db_default_file or conf.get('db_default_file')
self.db_cache_path = db_cache_path or conf.get('db_cache_path')
if vonly:
return
# Use a separate index for projects (same as for users) as mapping
# name/type collision will occured as commits have dynamic mapping
self.eprojects = EProjects(
connector=(con or index.Connector(index_suffix='projects')))
self.el_version = self.eprojects.es.info().get(
'version', {}).get('number', '')
if dump_yaml_in_index:
YAMLDefinition.__init__(
self, self.db_path, self.db_default_file, self.db_cache_path)
issues = self.validate()
if issues:
raise RuntimeError(issues)
self._enrich_projects()
projects, rid2projects = self._flatten_projects()
self.eprojects.load(projects, rid2projects)
def _merge(self):
""" Merge self.data and inherites from default_data
"""
merged_templates = {}
merged_projects = {}
for d in self.data:
templates = d.get('project-templates', {})
projects = d.get('projects', {})
merged_templates.update(copy.copy(templates))
for p, v in projects.items():
merged_projects.setdefault(p, copy.copy(v))
merged_projects[p]['repos'].update(copy.copy(v['repos']))
self.templates = {}
self.projects = {}
if self.default_data:
self.templates = copy.copy(
self.default_data.get('project-templates', {}))
self.projects = copy.copy(
self.default_data.get('projects', {}))
self.templates.update(merged_templates)
self.projects.update(merged_projects)
def _enrich_projects(self):
for detail in list(self.projects.values()):
if 'meta-ref' not in detail:
detail['meta-ref'] = False
for rid, repo in list(detail['repos'].items()):
# Save tags mentioned for a repo
tags = []
if 'tags' in repo and repo['tags']:
tags = copy.copy(repo['tags'])
# Save branches mentioned for a repo
branches = []
if 'branches' in repo:
branches = copy.copy(repo['branches'])
# Save paths mentioned for a repo
paths = []
if 'paths' in repo:
paths = copy.copy(repo['paths'])
# Apply the template
if 'template' in repo:
repo.update(copy.deepcopy(
self.templates[repo['template']]))
del repo['template']
# Process uri and gitweb string
for key in ('uri', 'gitweb'):
if key in repo:
repo[key] = repo[key] % {'name': rid}
# Re-apply saved tags
if 'tags' not in repo:
repo['tags'] = []
repo['tags'].extend(tags)
repo['tags'] = list(set(repo['tags']))
# Restore defined branches at repo level
if branches:
repo['branches'] = branches
# Restore defined paths at repo level
if paths:
repo['paths'] = paths
# Apply default values
if 'parsers' not in repo:
repo['parsers'] = []
if 'releases' not in repo:
repo['releases'] = []
if 'index-tags' not in repo:
repo['index-tags'] = True
# Transform date to epoch
for release in repo['releases']:
release['date'] = date2epoch(release['date'])
def _flatten_projects(self):
flatten = {}
rid2projects = {}
for pid, detail in self.projects.items():
flatten[pid] = {
'name': pid,
'aname': pid,
'meta-ref': detail.get('meta-ref'),
'refs': [],
'description': detail.get('description'),
'logo': detail.get('logo'),
'bots-group': detail.get('bots-group'),
'releases': detail.get('releases', []),
}
for release in flatten[pid]['releases']:
release['date'] = date2epoch(release['date'])
for rid, repo in detail['repos'].items():
for branch in repo['branches']:
r = {}
r.update(copy.deepcopy(repo))
r['name'] = rid
r['aname'] = rid
r['branch'] = branch
del r['branches']
r['fullrid'] = "%s:%s:%s" % (
r['uri'], r['name'], r['branch'])
r['shortrid'] = "%s:%s" % (r['uri'], r['name'])
flatten[pid]['refs'].append(r)
rid2projects.setdefault(r['fullrid'], {'project': []})
if pid not in rid2projects[r['fullrid']]['project']:
rid2projects[r['fullrid']]['project'].append(pid)
return flatten, rid2projects
def _validate_templates(self):
""" Validate self.data consistencies for templates
"""
ids, issues = self._check_basic('project-templates',
project_templates_schema,
'Project template')
if issues:
return ids, issues
# Check uncovered by the schema validator
for d in self.data:
templates = d.get('project-templates', {})
for tid, templates in templates.items():
if 'releases' in templates:
for r in templates['releases']:
try:
datetime.strptime(r['date'], "%Y-%m-%d")
except Exception:
issues.append("Wrong date format %s defined "
"in template %s" % (r['date'], tid))
return ids, issues
def _validate_projects(self, tids):
""" Validate self.data consistencies for projects
"""
_, issues = self._check_basic('projects',
projects_schema,
'Project')
if issues:
return issues
# Check template dependencies
for d in self.data:
projects = d.get('projects', {})
for pid, detail in projects.items():
for rid, repo in detail['repos'].items():
template = repo['template']
if template not in tids:
issues.append("Project ID '%s' Repo ID '%s' "
"references an unknown template %s" % (
pid, rid, template))
return issues
def validate(self):
if not hasattr(self, 'data'):
YAMLDefinition.__init__(
self, self.db_path, self.db_default_file, self.db_cache_path)
validation_issues = []
tids, issues = self._validate_templates()
validation_issues.extend(issues)
issues = self._validate_projects(tids)
validation_issues.extend(issues)
return validation_issues
def get_projects(self, source=True):
if isinstance(source, list) and 'name' not in source:
source.append('name')
projects = {}
for project in list(self.eprojects.get_all(source)):
if 'name' not in list(project['_source'].keys()):
# We skip rid2projects objects as they have
# only a 'project' key
continue
projects[project['_source']['name']] = project['_source']
return projects
def get(self, pid, source=True):
return self.eprojects.get_by_id(pid, source)
def exists(self, pid):
return self.eprojects.exists(pid)
def get_tags(self):
projects = self.get_projects(source=['refs'])
tags = set()
for project in projects.values():
for ref in project['refs']:
for tag in ref.get('tags', []):
tags.add(tag)
return list(tags)
def get_gitweb_link(self, fullrid):
source = 'name'
inner_source = 'refs.gitweb'
ret = self.eprojects.get_by_nested_attr_match(
'fullrid', fullrid, source, inner_source, 1)
# Get the first inner hit / let's see later if that cause limitations
if not ret[3]:
return ''
ref = ret[3][0]['refs']['hits']['hits'][0]['_source']
if self.el_version.find('5.') == 0:
return ref.get('refs', {}).get('gitweb', '')
else:
return ref.get('gitweb', '')
def get_projects_from_references(self, fullrids):
if not fullrids:
return []
projects = set()
ret = self.eprojects.get_projects_by_fullrids(fullrids)
for _projects in [
d.get('_source', {}).get('project', []) for d in ret]:
for project in _projects:
projects.add(project)
return list(projects)
def get_references_from_tags(self, tags):
source = 'name'
inner_source = [
'refs.fullrid', 'refs.paths', 'refs.name', 'refs.branch']
ret = self.eprojects.get_by_nested_attr_match(
'tags', tags, source, inner_source)
refs = []
for hit in ret[3]:
if self.el_version.find('5.') == 0:
refs.extend([r['_source']['refs'] for r
in hit['refs']['hits']['hits']])
else:
refs.extend([r['_source'] for r
in hit['refs']['hits']['hits']])
return refs
|
plotly/plotly.py | refs/heads/master | packages/python/plotly/plotly/graph_objs/isosurface/slices/__init__.py | 10 | import sys
if sys.version_info < (3, 7):
from ._x import X
from ._y import Y
from ._z import Z
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._x.X", "._y.Y", "._z.Z"]
)
|
dostavro/dotfiles | refs/heads/master | sublime2/Packages/Package Control/package_control/downloaders/no_ca_cert_exception.py | 11 | from .downloader_exception import DownloaderException
class NoCaCertException(DownloaderException):
"""
An exception for when there is no CA cert for a domain name
"""
def __init__(self, message, domain):
self.domain = domain
super(NoCaCertException, self).__init__(message)
|
stroykova/luigi | refs/heads/master | test/contrib/dataproc_test.py | 7 | """This is an integration test for the Dataproc-luigi binding.
This test requires credentials that can access GCS & access to a bucket below.
Follow the directions in the gcloud tools to set up local credentials.
"""
import unittest
try:
import oauth2client
from luigi.contrib import dataproc
from googleapiclient import discovery
default_credentials = oauth2client.client.GoogleCredentials.get_application_default()
default_client = discovery.build('dataproc', 'v1', credentials=default_credentials)
dataproc.set_dataproc_client(default_client)
except ImportError:
raise unittest.SkipTest('Unable to load google cloud dependencies')
import luigi
import os
import time
from nose.plugins.attrib import attr
# In order to run this test, you should set these to your GCS project.
# Unfortunately there's no mock
PROJECT_ID = os.environ.get('DATAPROC_TEST_PROJECT_ID', 'your_project_id_here')
CLUSTER_NAME = os.environ.get('DATAPROC_TEST_CLUSTER', 'unit-test-cluster')
REGION = os.environ.get('DATAPROC_REGION', 'global')
IMAGE_VERSION = '1-0'
class _DataprocBaseTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@attr('gcloud')
class DataprocTaskTest(_DataprocBaseTestCase):
def test_1_create_cluster(self):
success = luigi.run(['--local-scheduler',
'--no-lock',
'CreateDataprocClusterTask',
'--gcloud-project-id=' + PROJECT_ID,
'--dataproc-cluster-name=' + CLUSTER_NAME])
self.assertTrue(success)
def test_2_create_cluster_should_notice_existing_cluster_and_return_immediately(self):
job_start = time.time()
success = luigi.run(['--local-scheduler',
'--no-lock',
'CreateDataprocClusterTask',
'--gcloud-project-id=' + PROJECT_ID,
'--dataproc-cluster-name=' + CLUSTER_NAME])
self.assertTrue(success)
self.assertLess(time.time() - job_start, 3)
def test_3_submit_minimal_job(self):
# The job itself will fail because the job files don't exist
# We don't care, because then we would be testing spark
# We care the job was submitted correctly, so that's what we test
luigi.run(['--local-scheduler',
'--no-lock',
'DataprocSparkTask',
'--gcloud-project-id=' + PROJECT_ID,
'--dataproc-cluster-name=' + CLUSTER_NAME,
'--main-class=my.MinimalMainClass'])
response = dataproc.get_dataproc_client().projects().regions().jobs() \
.list(projectId=PROJECT_ID, region=REGION, clusterName=CLUSTER_NAME).execute()
lastJob = response['jobs'][0]['sparkJob']
self.assertEqual(lastJob['mainClass'], "my.MinimalMainClass")
def test_4_submit_spark_job(self):
# The job itself will fail because the job files don't exist
# We don't care, because then we would be testing spark
# We care the job was submitted correctly, so that's what we test
luigi.run(['--local-scheduler',
'--no-lock',
'DataprocSparkTask',
'--gcloud-project-id=' + PROJECT_ID,
'--dataproc-cluster-name=' + CLUSTER_NAME,
'--main-class=my.MainClass',
'--jars=one.jar,two.jar',
'--job-args=foo,bar'])
response = dataproc.get_dataproc_client().projects().regions().jobs() \
.list(projectId=PROJECT_ID, region=REGION, clusterName=CLUSTER_NAME).execute()
lastJob = response['jobs'][0]['sparkJob']
self.assertEqual(lastJob['mainClass'], "my.MainClass")
self.assertEqual(lastJob['jarFileUris'], ["one.jar", "two.jar"])
self.assertEqual(lastJob['args'], ["foo", "bar"])
def test_5_submit_pyspark_job(self):
# The job itself will fail because the job files don't exist
# We don't care, because then we would be testing pyspark
# We care the job was submitted correctly, so that's what we test
luigi.run(['--local-scheduler',
'--no-lock',
'DataprocPysparkTask',
'--gcloud-project-id=' + PROJECT_ID,
'--dataproc-cluster-name=' + CLUSTER_NAME,
'--job-file=main_job.py',
'--extra-files=extra1.py,extra2.py',
'--job-args=foo,bar'])
response = dataproc.get_dataproc_client().projects().regions().jobs()\
.list(projectId=PROJECT_ID, region=REGION, clusterName=CLUSTER_NAME).execute()
lastJob = response['jobs'][0]['pysparkJob']
self.assertEqual(lastJob['mainPythonFileUri'], "main_job.py")
self.assertEqual(lastJob['pythonFileUris'], ["extra1.py", "extra2.py"])
self.assertEqual(lastJob['args'], ["foo", "bar"])
def test_6_delete_cluster(self):
success = luigi.run(['--local-scheduler',
'--no-lock',
'DeleteDataprocClusterTask',
'--gcloud-project-id=' + PROJECT_ID,
'--dataproc-cluster-name=' + CLUSTER_NAME])
self.assertTrue(success)
def test_7_delete_cluster_should_return_immediately_if_no_cluster(self):
job_start = time.time()
success = luigi.run(['--local-scheduler',
'--no-lock',
'DeleteDataprocClusterTask',
'--gcloud-project-id=' + PROJECT_ID,
'--dataproc-cluster-name=' + CLUSTER_NAME])
self.assertTrue(success)
self.assertLess(time.time() - job_start, 3)
def test_8_create_cluster_image_version(self):
success = luigi.run(['--local-scheduler',
'--no-lock',
'CreateDataprocClusterTask',
'--gcloud-project-id=' + PROJECT_ID,
'--dataproc-cluster-name=' + CLUSTER_NAME + '-' + IMAGE_VERSION,
'--image-version=1.0'])
self.assertTrue(success)
def test_9_delete_cluster_image_version(self):
success = luigi.run(['--local-scheduler',
'--no-lock',
'DeleteDataprocClusterTask',
'--gcloud-project-id=' + PROJECT_ID,
'--dataproc-cluster-name=' + CLUSTER_NAME + '-' + IMAGE_VERSION])
self.assertTrue(success)
|
fnouama/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/core/management/commands/compilemessages.py | 373 | import codecs
import os
import sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
def has_bom(fn):
f = open(fn, 'r')
sample = f.read(4)
return sample[:3] == '\xef\xbb\xbf' or \
sample.startswith(codecs.BOM_UTF16_LE) or \
sample.startswith(codecs.BOM_UTF16_BE)
def compile_messages(stderr, locale=None):
basedirs = [os.path.join('conf', 'locale'), 'locale']
if os.environ.get('DJANGO_SETTINGS_MODULE'):
from django.conf import settings
basedirs.extend(settings.LOCALE_PATHS)
# Gather existing directories.
basedirs = set(map(os.path.abspath, filter(os.path.isdir, basedirs)))
if not basedirs:
raise CommandError("This script should be run from the Django SVN tree or your project or app tree, or with the settings module specified.")
for basedir in basedirs:
if locale:
basedir = os.path.join(basedir, locale, 'LC_MESSAGES')
for dirpath, dirnames, filenames in os.walk(basedir):
for f in filenames:
if f.endswith('.po'):
stderr.write('processing file %s in %s\n' % (f, dirpath))
fn = os.path.join(dirpath, f)
if has_bom(fn):
raise CommandError("The %s file has a BOM (Byte Order Mark). Django only supports .po files encoded in UTF-8 and without any BOM." % fn)
pf = os.path.splitext(fn)[0]
# Store the names of the .mo and .po files in an environment
# variable, rather than doing a string replacement into the
# command, so that we can take advantage of shell quoting, to
# quote any malicious characters/escaping.
# See http://cyberelk.net/tim/articles/cmdline/ar01s02.html
os.environ['djangocompilemo'] = pf + '.mo'
os.environ['djangocompilepo'] = pf + '.po'
if sys.platform == 'win32': # Different shell-variable syntax
cmd = 'msgfmt --check-format -o "%djangocompilemo%" "%djangocompilepo%"'
else:
cmd = 'msgfmt --check-format -o "$djangocompilemo" "$djangocompilepo"'
os.system(cmd)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--locale', '-l', dest='locale',
help='The locale to process. Default is to process all.'),
)
help = 'Compiles .po files to .mo files for use with builtin gettext support.'
requires_model_validation = False
can_import_settings = False
def handle(self, **options):
locale = options.get('locale')
compile_messages(self.stderr, locale=locale)
|
kisna72/django | refs/heads/master | tests/template_tests/filter_tests/test_wordcount.py | 521 | from django.template.defaultfilters import wordcount
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class WordcountTests(SimpleTestCase):
@setup({'wordcount01': '{% autoescape off %}{{ a|wordcount }} {{ b|wordcount }}{% endautoescape %}'})
def test_wordcount01(self):
output = self.engine.render_to_string('wordcount01', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, '3 3')
@setup({'wordcount02': '{{ a|wordcount }} {{ b|wordcount }}'})
def test_wordcount02(self):
output = self.engine.render_to_string('wordcount02', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, '3 3')
class FunctionTests(SimpleTestCase):
def test_empty_string(self):
self.assertEqual(wordcount(''), 0)
def test_count_one(self):
self.assertEqual(wordcount('oneword'), 1)
def test_count_multiple(self):
self.assertEqual(wordcount('lots of words'), 3)
def test_non_string_input(self):
self.assertEqual(wordcount(123), 1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.