repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
zimeon/iiif | iiif/manipulator_pil.py | Python | gpl-3.0 | 7,627 | 0.000787 | """Implementation of IIIF Image API manipulations using the Python Image Library.
Uses the Python Image Library (PIL) for in-python manipulation:
http://www.pythonware.com/products/pil/index.htm
"""
import re
import os
import os.path
import subprocess
import tempfile
from PIL import Image
from .error import IIIFError
from .request import IIIFRequest
from .manipulator import IIIFManipulator
class IIIFManipulatorPIL(IIIFManipulator):
"""Class to manipulate an image with PIL according to IIIF.
All exceptions are raised as IIIFError objects which directly
determine the HTTP response.
"""
tmpdir = '/tmp'
filecmd = None
pnmdir = None
def __init__(self, **kwargs):
"""Initialize IIIFManipulatorPIL object.
Keyword arguments are passes to superclass initialize method.
"""
super(IIIFManipulatorPIL, self).__init__(**kwargs)
# Does not support jp2 output
self.compliance_level = 2
self.image = None
self.outtmp = None
def set_max_image_pixels(self, pixels):
"""Set PIL limit on pixel size of images to load if non-zero.
WARNING: This is a global setting in PIL, it is
not local to this manipulator instance!
Setting a value here will not only set the given limit but
also convert the PIL "DecompressionBombWarning" into an
error. Thus setting a moderate limit sets a hard limit on
image size loaded, setting a very large limit will have the
effect of disabling the warning.
"""
if (pixels):
Image.MAX_IMAGE_PIXELS = pixels
Image.warnings.simplefilter(
'error', Image.DecompressionBombWarning)
def do_first(self):
"""Create PIL object from input image file.
Image location must be in self.srcfile. Will result in
self.width and self.height being set to the image dimensions.
Will raise an IIIFError on failure to load the image
"""
self.logger.debug("do_first: src=%s" % (self.srcfile))
try:
self.image = Image.open(self.srcfile)
except Image.DecompressionBombWarning as e:
# This exception will be raised only if PIL has been
# configured to raise an error in the case of images
# that exceeed Image.MAX_IMAGE_PIXELS, with
# Image.warnings.simplefilter('error', Image.DecompressionBombWarning)
raise IIIFError(text=("Image size limit exceeded (PIL: %s)" % (str(e))))
except Exception as e:
raise IIIFError(text=("Failed to read image (PIL: %s)" % (str(e))))
(self.width, self.height) = self.image.size
def do_region(self, x, y, w, h):
"""Apply region selection."""
if (x is None):
self.logger.debug("region: full (nop)")
else:
self.logger.debug("region: (%d,%d,%d,%d)" % (x, y, w, h))
self.image = self.image.crop((x, y, x + w, y + h))
self.width = w
self.height = h
def do_size(self, w, h):
"""Apply size scaling."""
if (w is None):
self.logger.debug("size: no scaling (nop)")
else:
self.logger.debug("size: scaling to (%d,%d)" % (w, h))
self.image = self.image.resize((w, h))
self.width = w
self.height = h
def do_rotation(self, mirror, rot):
"""Apply rotation and/or mirroring."""
if (not mirror and rot == 0.0):
self.logger.debug("rotation: no rotation (nop)")
else:
# FIXME - with PIL one can use the transpose() method to do 90deg
# FIXME - rotations as well as mirroring. This would be more efficient
# FIXME - for these cases than mirror _then_ rotate.
if (mirror):
self.logger.debug("rotation: mirror (about vertical axis)")
self.image = self.image.transpose(Image.FLIP_LEFT_RIGHT)
if (rot != 0.0):
self.logger.debug("rotation: by %f degrees clockwise" % (rot))
self.image = self.image.rotate(-rot, expand=True)
def do_quality(self, quality):
"""Apply value of quality parameter.
For PIL docs see
<http://pillow.readthedocs.org/en/latest/reference/Image.html#PIL.Image.Image.convert>
"""
if (quality == 'grey' or quality == 'gray'):
# Checking for 1.1 gray or 20.0 grey elsewhere
self.logger.debug("quality: converting to gray")
self.image = self.image.convert('L')
elif (quality == 'bitonal'):
self.logger.debug("quality: converting to bitonal")
self.image = self.image.convert('1')
else: # color or default/native (which we take as color)
# Deal first with conversions from I;16* formats which Pillow
# appears not to handle properly, resulting in mostly white images
# if we convert directly. See:
# <http://stackoverflow.com/questions/7247371/python-and-16-bit-tiff>
if (self.image.mode.startswith('I;16')):
self.logger.debug("quality: fudged conversion from mode %s to I"
% (self.image.mode))
self.image = self.image.convert('I')
self.image = self.image.point(lambda i: i * (1.0 / 256.0))
if (self.image.mode not in ('1', 'L', 'RGB', 'RGBA')):
# Need to convert from palette etc. in order to write out
self.logger.debug("quality: converting from mode %s to RGB"
% (self.image.mode))
self.image = self.image.convert('RGB')
else:
self.logger.debug("quality: quality (nop)")
def do_format(self, format):
"""Apply format selection.
Assume that for tiling applications we want jpg so return
that unless an ex | plicit format is requested.
"""
fmt = ('jpg' if (format is None) else format)
if (fmt == 'png'):
self.logger.debug("format: png")
self.mime_type = "image/png"
self.output_format = fmt
format = 'png'
elif (fmt == 'jpg'):
self.logger.debug("format: jpg")
self.mime_type = "image/jpeg"
sel | f.output_format = fmt
format = 'jpeg'
elif (fmt == 'webp'):
self.logger.debug("format: webp")
self.mime_type = "image/webp"
self.output_format = fmt
format = 'webp'
else:
raise IIIFError(code=415, parameter='format',
text="Unsupported output file format (%s), only png,jpg,webp are supported." % (fmt))
if (self.outfile is None):
# Create temp
f = tempfile.NamedTemporaryFile(delete=False)
self.outfile = f.name
self.outtmp = f.name
self.image.save(f, format=format)
else:
# Save to specified location
self.image.save(self.outfile, format=format)
def cleanup(self):
"""Cleanup: ensure image closed and remove temporary output file."""
if self.image:
try:
self.image.close()
except Exception:
pass
if (self.outtmp is not None):
try:
os.unlink(self.outtmp)
except OSError as e:
self.logger.warning("Failed to cleanup tmp output file %s"
% (self.outtmp))
|
ianjuma/octopus | manage.py | Python | apache-2.0 | 789 | 0.003802 | import os
f | rom gevent.pywsgi import WSGIServer
from gevent import monkey
monkey.patch_all()
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
from app i | mport app
def push_to_server():
# just use git hooks already
pass
def background_task(instance):
# gunicorn -c config_gunicorn.py instance:app
pass
def runserver(instance):
port = int(os.environ.get("PORT", 8001))
instance.config['DEBUG'] = True
instance.config['use_reloader'] = True
instance.config['threaded'] = True
print 'Awesome running at - ' + str(port)
http_server = WSGIServer(('0.0.0.0', port), instance)
http_server.serve_forever()
if __name__ == '__main__':
manager = Manager()
manager.run(commands=None)
# run(app)
|
yunstanford/GraphiteSetup | carbon_relay.py | Python | mit | 1,348 | 0.030415 | import subprocess
import sys
import string
import os
def start_carbon_relay_instance(name):
path = os.path.realpath(__fil | e__)
subprocess.call(["python", "{0}/carbon-relay.py".format(os.path.dirname(pat | h)), "--instance={0}".format(name), "start"])
def stop_carbon_relay_instance(name):
path = os.path.realpath(__file__)
subprocess.call(["python", "{0}/carbon-relay.py".format(os.path.dirname(path)), "--instance={0}".format(name), "stop"])
def usage():
print("carbon_relay [start/stop] [instance name type: letter or number] [number of instances]")
print("instance names should be continuous")
print("For example: 1, 2, 3,... or a, b, c,...")
print("Usage: python carbon_relay start n 5")
def main():
if len(sys.argv) < 4:
print("Too few arguments")
usage()
return
if len(sys.argv) > 4:
print("Too many arguments")
usage()
return
if sys.argv[1] not in ['start', 'stop']:
print("Wrong operation! start or stop only!")
return;
if sys.argv[2] not in ['n', 'l']:
print("Wrong Type! l or n only!")
return
num = int(sys.argv[3])
if sys.argv[1] == 'start':
func = start_carbon_relay_instance
else:
func = stop_carbon_relay_instance
if sys.argv[2] == 'n':
for i in range(num):
func(i + 1)
else:
li = list(string.ascii_lowercase)[:num]
for i in li:
func(i)
if __name__ == '__main__':
main()
|
klmitch/nova | nova/tests/unit/api/openstack/compute/test_server_groups.py | Python | apache-2.0 | 37,885 | 0.002191 | # Copyright (c) 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_utils.fixture import uuidsentinel
from oslo_utils import uuidutils
import webob
from nova.api.openstack import api_version_request as avr
from nova.api.openstack.compute import server_groups as sg_v21
from nova import context
from nova import exception
from nova im | port objects
from nova.policies import server_groups as sg_policies
from nova import test
from nova.tests import fixtures
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import policy_fixture
class AttrDict(dict):
def __getattr__(self, k):
ret | urn self[k]
def server_group_template(**kwargs):
sgroup = kwargs.copy()
sgroup.setdefault('name', 'test')
return sgroup
def server_group_resp_template(**kwargs):
sgroup = kwargs.copy()
sgroup.setdefault('name', 'test')
if 'policy' not in kwargs:
sgroup.setdefault('policies', [])
sgroup.setdefault('members', [])
return sgroup
def server_group_db(sg):
attrs = copy.deepcopy(sg)
if 'id' in attrs:
attrs['uuid'] = attrs.pop('id')
if 'policies' in attrs:
policies = attrs.pop('policies')
attrs['policies'] = policies
else:
attrs['policies'] = []
if 'policy' in attrs:
del attrs['policies']
if 'members' in attrs:
members = attrs.pop('members')
attrs['members'] = members
else:
attrs['members'] = []
attrs['deleted'] = 0
attrs['deleted_at'] = None
attrs['created_at'] = None
attrs['updated_at'] = None
if 'user_id' not in attrs:
attrs['user_id'] = fakes.FAKE_USER_ID
if 'project_id' not in attrs:
attrs['project_id'] = fakes.FAKE_PROJECT_ID
attrs['id'] = 7
return AttrDict(attrs)
class ServerGroupTestV21(test.NoDBTestCase):
USES_DB_SELF = True
validation_error = exception.ValidationError
wsgi_api_version = '2.1'
def setUp(self):
super(ServerGroupTestV21, self).setUp()
self._setup_controller()
self.req = fakes.HTTPRequest.blank('')
self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.foo_req = fakes.HTTPRequest.blank('', project_id='foo')
self.policy = self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(fixtures.Database(database='api'))
cells = fixtures.CellDatabases()
cells.add_cell_database(uuidsentinel.cell1)
cells.add_cell_database(uuidsentinel.cell2)
self.useFixture(cells)
ctxt = context.get_admin_context()
self.cells = {}
for uuid in (uuidsentinel.cell1, uuidsentinel.cell2):
cm = objects.CellMapping(context=ctxt,
uuid=uuid,
database_connection=uuid,
transport_url=uuid)
cm.create()
self.cells[cm.uuid] = cm
def _setup_controller(self):
self.controller = sg_v21.ServerGroupController()
def test_create_server_group_with_no_policies(self):
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def _create_server_group_normal(self, policies=None, policy=None,
rules=None):
sgroup = server_group_template()
sgroup['policies'] = policies
res_dict = self.controller.create(self.req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'test')
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
self.assertEqual(res_dict['server_group']['policies'], policies)
def test_create_server_group_with_new_policy_before_264(self):
req = fakes.HTTPRequest.blank('', version='2.63')
policy = 'anti-affinity'
rules = {'max_server_per_host': 3}
# 'policy' isn't an acceptable request key before 2.64
sgroup = server_group_template(policy=policy)
result = self.assertRaises(
self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
self.assertIn(
"Invalid input for field/attribute server_group",
str(result)
)
# 'rules' isn't an acceptable request key before 2.64
sgroup = server_group_template(rules=rules)
result = self.assertRaises(
self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
self.assertIn(
"Invalid input for field/attribute server_group",
str(result)
)
def test_create_server_group(self):
policies = ['affinity', 'anti-affinity']
for policy in policies:
self._create_server_group_normal(policies=[policy])
def test_create_server_group_rbac_default(self):
sgroup = server_group_template()
sgroup['policies'] = ['affinity']
# test as admin
self.controller.create(self.admin_req, body={'server_group': sgroup})
# test as non-admin
self.controller.create(self.req, body={'server_group': sgroup})
def test_create_server_group_rbac_admin_only(self):
sgroup = server_group_template()
sgroup['policies'] = ['affinity']
# override policy to restrict to admin
rule_name = sg_policies.POLICY_ROOT % 'create'
rules = {rule_name: 'is_admin:True'}
self.policy.set_rules(rules, overwrite=False)
# check for success as admin
self.controller.create(self.admin_req, body={'server_group': sgroup})
# check for failure as non-admin
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller.create, self.req,
body={'server_group': sgroup})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def _create_instance(self, ctx, cell):
with context.target_cell(ctx, cell) as cctx:
instance = objects.Instance(context=cctx,
image_ref=uuidsentinel.fake_image_ref,
node='node1', reservation_id='a',
host='host1',
project_id=fakes.FAKE_PROJECT_ID,
vm_state='fake',
system_metadata={'key': 'value'})
instance.create()
im = objects.InstanceMapping(context=ctx,
project_id=ctx.project_id,
user_id=ctx.user_id,
cell_mapping=cell,
instance_uuid=instance.uuid)
im.create()
return instance
def _create_instance_group(self, context, members):
ig = objects.InstanceGroup(context=context, name='fake_name',
user_id='fake_user', project_id=fakes.FAKE_PROJECT_ID,
members=members)
ig.create()
return ig.uuid
def _create_groups_and_instances(self, ctx):
cell1 = self.cells[uuidsentinel.cell1]
cell2 = self.cells[uuidsentinel.cell2]
instances = [self._create_instance(ctx, cell=cell1),
self._create_instance(ctx, cell=cell2 |
bizoru/sqlite-cake | sqlitecake/core/__init__.py | Python | gpl-2.0 | 130 | 0 | from .cakesync | import CakeSync
from .caketable import CakeTable
from .caketypes import CakeTypes
from .cakemodel impor | t CakeModel
|
sjsucohort6/openstack | python/venv/lib/python2.7/site-packages/openstack/network/v2/vpn_service.py | Python | mit | 1,751 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.network import network_service
from openstack import resource
class VPNService(resource.Resource):
resource_key = 'vpnservice'
resources_key = 'vpnservices'
base_path = '/vpn/vpnservices'
service = network_service.NetworkService()
# capabilities
allow_create = True
allow_retrieve = True
allow_update = True
allow_delete = True
allow_list = True
# Properties
#: The administrative state of the vpnservice, which is up ``True`` or
#: down ``False``. *Type: bool*
admin_state_up = resource.prop('admin_state_up', | type=bool)
#: Human-readable descripti | on for the vpnservice.
description = resource.prop('description')
#: The unique ID for the vpnservice.
id = resource.prop('id')
#: The vpnservice name.
name = resource.prop('name')
#: ID of the router into which the VPN service is inserted.
router_id = resource.prop('router_id')
#: The project this vpnservice is associated with.
project_id = resource.prop('tenant_id')
#: The vpnservice status.
status = resource.prop('status')
#: The subnet on which the tenant wants the vpnservice.
subnet_id = resource.prop('subnet_id')
|
ClusterHQ/pyzfs | libzfs_core/_error_translation.py | Python | apache-2.0 | 22,509 | 0.001333 | # Copyright 2015 ClusterHQ. See LICENSE file for details.
"""
Helper routines for converting ``errno`` style error codes from C functions
to Python exceptions defined by `libzfs_core` API.
The conversion heavily depends on the context of the error: the attempted
operation and the input parameters. For this reason, there is a conversion
routine for each `libzfs_core` interface function. The conversion routines
have the return code as a parameter as well as all the parameters of the
corresponding interface functions.
The parameters and exceptions are documented in the `libzfs_core` interfaces.
"""
import errno
import re
import string
from . import exceptions as lzc_exc
from ._constants import MAXNAMELEN
def lzc_create_translate_error(ret, name, ds_type, props):
if ret == 0:
return
if ret == errno.EINVAL:
_validate_fs_name(name)
raise lzc_exc.PropertyInvalid(name)
if ret == errno.EEXIST:
raise lzc_exc.FilesystemExists(name)
if ret == errno.ENOENT:
raise lzc_exc.ParentNotFound(name)
raise _generic_exception(ret, name, "Failed to create filesystem")
def lzc_clone_translate_error(ret, name, origin, props):
if ret == 0:
return
if ret == errno.EINVAL:
_validate_fs_name(name)
_validate_snap_name(origin)
if _pool_name(name) != _pool_name(origin):
raise lzc_exc.PoolsDiffer(name) # see https://www.illumos.org/issues/5824
else:
raise lzc_exc.PropertyInvalid(name)
if ret == errno.EEXIST:
raise lzc_exc.FilesystemExists(name)
if ret == errno.ENOENT:
if not _is_valid_snap_name(origin):
raise lzc_exc.SnapshotNameInvalid(origin)
raise lzc_exc.DatasetNotFound(name)
raise _generic_exception(ret, name, "Failed to create clone")
def lzc_rollback_translate_error(ret, name):
if ret == 0:
return
if ret == errno.EINVAL:
_validate_fs_name(name)
raise lzc_exc.SnapshotNotFound(name)
if ret == errno.ENOENT:
if not _is_valid_fs_name(name):
raise lzc_exc.NameInvalid(name)
else:
raise lzc_exc.FilesystemNotFound(name)
raise _generic_exception(ret, name, "Failed to rollback")
def lzc_snapshot_translate_errors(ret, errlist, snaps, props):
if ret == 0:
return
def _map(ret, name):
if ret == errno.EXDEV:
pool_names = map(_pool_name, snaps)
same_pool = all(x == pool_names[0] for x in pool_names)
if same_pool:
return lzc_exc.DuplicateSnapshots(name)
else:
return lzc_exc.PoolsDiffer(name)
elif ret == errno.EINVAL:
if any(not _is_valid_snap_name(s) for s in snaps):
return lzc_exc.NameInvalid(name)
elif any(len(s) > MAXNAMELEN for s in snaps):
return lzc_exc.NameTooLong(name)
else:
return lzc_exc.PropertyInvalid(name)
if ret == errno.EEXIST:
return lzc_exc.SnapshotExists(name)
if ret == errno.ENOENT:
return lzc_exc.FilesystemNotFound(name)
return _generic_exception(ret, name, "Failed to create snapshot")
_handle_err_list(ret, errlist, snaps, lzc_exc.SnapshotFailure, _map)
def lzc_destroy_snaps_translate_errors(ret, errlist, snaps, defer):
if ret == 0:
return
def _map(ret, name):
if ret == errno.EEXIST:
return lzc_exc.SnapshotIsCloned(name)
if ret == errno.ENOENT:
return lzc_exc.PoolNotFound(name)
if ret == errno.EBUSY:
return lzc_exc.SnapshotIsHeld(name)
return _generic_exception(ret, name, "Failed to destroy snapshot")
_handle_err_list(ret, errlist, snaps, lzc_exc.SnapshotDestructionFailure, _map)
def lzc_bookmark_translate_errors(ret, errlist, bookmarks):
if ret == 0:
return
def _map(ret, name):
if ret == errno.EINVAL:
if name:
snap = bookmarks[name]
pool_names = map(_pool_name, bookmarks.keys())
if not _is_valid_bmark_name(name):
return lzc_exc.BookmarkNameInvalid(name)
elif not _is_valid_snap_name(snap):
return lzc_exc.SnapshotNameInvalid(snap)
elif _fs_name(name) != _fs_name(snap):
return lzc_exc.BookmarkMismatch(name)
elif any(x != _pool_name(name) for x in pool_names):
return lzc_exc.PoolsDiffer(name)
else:
invalid_names = [b for b in bookmarks.keys() if not _is_valid_bmark_name(b)]
if invalid_names:
return lzc_exc.BookmarkNameInvalid(invalid_names[0])
if ret == errno.EEXIST:
return lzc_exc.BookmarkExists(name)
if ret == errno.ENOENT:
return lzc_exc.SnapshotNotFound(name)
if ret == errno.ENOTSUP:
return lzc_exc.BookmarkNotSupported(name)
return _generic_exception(ret, name, "Failed to create bookmark")
_handle_err_list(ret, errlist, bookmarks.keys(), lzc_exc.BookmarkFailure, _map)
def lzc_get_bookmarks_translate_error(ret, fsname, props):
if ret == 0:
return
if ret == errno.ENOENT:
raise lzc_exc.FilesystemNotFound(fsname)
raise _generic_exception(ret, fsname, "Failed to list bookmarks")
def lzc_destroy_bookmarks_translate_errors(ret, errlist, bookmarks):
if ret == 0:
return
def _map(ret, name):
if ret == errno.EINVAL:
return lzc_exc.NameInvalid(name)
return _generic_exception(ret, name, "Failed to destroy bookmark")
_handle_err_list(ret, errlist, bookmarks, lzc_exc.BookmarkDestructionFailure, _map)
def lzc_snaprange_space_translate_error(ret, firstsnap, lastsnap):
if ret == 0:
return
if ret == errno.EXDEV and firstsnap is not None:
if _pool_name(firstsnap) != _pool_name(lastsnap):
raise lzc_exc.PoolsDiffer(lastsnap)
else:
raise lzc_exc.SnapshotMismatch(lastsnap)
if ret == errno.EINVAL:
if not _is_valid_snap_name(firstsnap):
raise lzc_exc.NameInvalid(firstsnap)
elif not _is_valid_snap_name(lastsnap | ):
raise lzc_exc.NameInvalid(lastsnap)
elif len(firstsnap) > MAXNAMELEN:
raise lzc_exc.N | ameTooLong(firstsnap)
elif len(lastsnap) > MAXNAMELEN:
raise lzc_exc.NameTooLong(lastsnap)
elif _pool_name(firstsnap) != _pool_name(lastsnap):
raise lzc_exc.PoolsDiffer(lastsnap)
else:
raise lzc_exc.SnapshotMismatch(lastsnap)
if ret == errno.ENOENT:
raise lzc_exc.SnapshotNotFound(lastsnap)
raise _generic_exception(ret, lastsnap, "Failed to calculate space used by range of snapshots")
def lzc_hold_translate_errors(ret, errlist, holds, fd):
if ret == 0:
return
def _map(ret, name):
if ret == errno.EXDEV:
return lzc_exc.PoolsDiffer(name)
elif ret == errno.EINVAL:
if name:
pool_names = map(_pool_name, holds.keys())
if not _is_valid_snap_name(name):
return lzc_exc.NameInvalid(name)
elif len(name) > MAXNAMELEN:
return lzc_exc.NameTooLong(name)
elif any(x != _pool_name(name) for x in pool_names):
return lzc_exc.PoolsDiffer(name)
else:
invalid_names = [b for b in holds.keys() if not _is_valid_snap_name(b)]
if invalid_names:
return lzc_exc.NameInvalid(invalid_names[0])
fs_name = None
hold_name = None
pool_name = None
if name is not None:
fs_name = _fs_name(name)
pool_name = _pool_name(name)
hold_name = holds[name]
if ret == errno.ENOENT:
return lzc_exc.FilesystemNotFound(fs_name)
if ret == errno.EEXIST:
return lzc_exc.HoldExists(name)
if ret == errno.E2BIG:
return lzc_exc.NameTooLong(hold_name)
|
xiang12835/python_web | py2_web2py/web2py/gluon/packages/dal/pydal/dialects/postgre.py | Python | apache-2.0 | 11,219 | 0.000535 | from ..adapters.postgres import Postgre, PostgreNew, PostgreBoolean
from ..helpers.methods import varquote_aux
from ..objects import Expression
from .base import SQLDialect
from . import dialects, sqltype_for, register_expression
@dialects.register_for(Postgre)
class PostgreDialect(SQLDialect):
true_exp = "TRUE"
false_exp = "FALSE"
@sqltype_for('blob')
def type_blob(self):
return 'BYTEA'
@sqltype_for('bigint')
def type_bigint(self):
return 'BIGINT'
@sqltype_for('double')
def type_double(self):
return 'FLOAT8'
@sqltype_for('id')
def type_id(self):
return 'SERIAL PRIMARY KEY'
@sqltype_for('big-id')
def type_big_id(self):
return 'BIGSERIAL PRIMARY KEY'
@sqltype_for('big-reference')
def type_big_reference(self):
return 'BIGINT REFERENCES %(foreign_key)s ' + \
'ON DELETE %(on_delete_action)s %(null)s %(unique)s'
@sqltype_for('reference TFK')
def type_reference_tfk(self):
return ' CONSTRAINT "FK_%(constraint_name)s_PK" FOREIGN KEY ' + \
'(%(field_name)s) REFERENCES %(foreign_table)s' + \
'(%(foreign_key)s) ON DELETE %(on_delete_action)s'
@sqltype_for('geometry')
def type_geometry(self):
return 'GEOMETRY'
@sqltype_for('geography')
def type_geography(self):
return 'GEOGRAPHY'
def varquote(self, val):
return varquote_aux(val, '"%s"')
def sequence_name(self, tablename):
return self.quote('%s_id_seq' % tablename)
def insert(self, table, fields, values, returning=None):
ret = ''
if returning:
ret = 'RETURNING %s' % returning
return 'INSERT INTO %s(%s) VALUES (%s)%s;' % (
table, fields, values, ret)
@property
def random(self):
return 'RANDOM()'
def add(self, first, second, query_env={}):
t = first.type
if t in ('text', 'string', 'password', 'json', 'jsonb', 'upload', 'blob'):
return '(%s || %s)' % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env))
else:
return '(%s + %s)' % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env))
def regexp(self, first, second, query_env={}):
return '(%s ~ %s)' % (
self.expand(first, query_env=query_env),
self.expand(second, 'string', query_env=query_env))
def like(self, first, second, escape=None, query_env={}):
if isinstance(second, Expression):
second = self.expand(second, 'string', query_env=query_env)
else:
second = self.expand(second, 'string', query_env=query_env)
if escape is None:
escape = '\\'
second = second.replace(escape, escape * 2)
if first.type not in ('string', 'text', 'json', 'jsonb'):
return "(%s LIKE %s ESCAPE '%s')" % (
self.cast(self.expand(first, query_env=query_env),
'CHAR(%s)' % first.length), second, escape)
return "(%s LIKE %s ESCAPE '%s')" % (
self.expand(first, query_env=query_env), second, escape)
def ilike(self, first, second, escape=None, query_env={}):
if isinstance(second, Expression):
second = self.expand(second, 'string', query_env=query_env)
else:
second = self.expand(second, 'string', query_env=query_env)
if escape is None:
escape = '\\'
second = second.replace(escape, escape * 2)
if first.type not in ('string', 'text', 'json', 'jsonb', 'list:string'):
return "(%s ILIKE %s ESCAPE '%s')" % (
self.cast(self.expand(first, query_env=query_env),
'CHAR(%s)' % first.length), second, escape)
return "(%s ILIKE %s ESCAPE '%s')" % (
self.expand(first, query_env=query_env), second, escape)
def drop_table(self, table, mode):
if mode not in ['restrict', 'cascade', '']:
raise ValueError('Invalid mode: %s' % mode)
return ['DROP TABLE ' + table._rname + ' ' + mode + ';']
def create_index(self, name, table, expressions, unique=False, where=None):
uniq = ' UNIQUE' if unique else ''
whr = ''
if where:
whr = ' %s' % self.where(where)
with self.adapter.index_expander():
rv = 'CREATE%s INDEX %s ON %s (%s)%s;' % (
uniq, self.quote(name), table._rname, ','.join(
self.expand(field) for field in expressions), whr)
return rv
def st_asgeojson(self, first, second, query_env={}):
return 'ST_AsGeoJSON(%s,%s,%s,%s)' % (
second['version'], self.expand(first, query_env=query_env),
second['precision'], second['options'])
def st_astext(self, first, query_env={}):
return 'ST_AsText(%s)' % self.expand(first, query_env=query_env)
def st_x(self, first, query_env={}):
return 'ST_X(%s)' % (self.expand(first, query_env=query_env))
def st_y(self, first, query_env={}):
return 'ST_Y(%s)' % (self.expand(first, query_env=query_env))
def st_contains(self, first, second, query_env={}):
return 'ST_Contains(%s,%s)' % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env))
def st_distance(self, first, second, query_env={}):
return 'ST_Distance(%s,%s)' % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env))
def st_equals(self, first, second, query_env={}):
return 'ST_Equals(%s,%s)' % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env))
def st_intersects(self, first, second, query_env={}):
return 'ST_Intersects(%s,%s)' % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env))
def st_overlaps(self, first, second, query_env={}):
return 'ST_Overlaps(%s,%s)' % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env))
def st_simplify(self, first, second, query_env={}):
return 'ST_Simplify(%s,%s)' % (
self.expand(first, query_env=query_env),
self.expand(second, 'double', query_env=query_env))
def st_simplifypreservetopology(self, first, second, query_env={}):
return 'ST_SimplifyPreserveTopology(%s,%s)' % (
self.expand(first, query_env=query_env),
self.expand(second, 'double', query_env=query_env))
def st_touches(self, first, second, query_env={}):
return 'ST_Touches(%s,%s)' % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env))
def st_within(self, fi | rst, second, query_env={}):
return 'ST_Within(%s,%s)' % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env))
def st_dwithin(self, first, tup, query_env={}):
return 'ST_DWithin(%s,%s,%s)' % (
| self.expand(first, query_env=query_env),
self.expand(tup[0], first.type, query_env=query_env),
self.expand(tup[1], 'double', query_env=query_env))
@register_expression('doy')
def extract_doy(self, expr):
return Expression(expr.db, self.extract, expr, 'doy', 'integer')
@register_expression('dow')
def extract_dow(self, expr):
return Expression(expr.db, self.extract, expr, 'dow', 'integer')
@register_expression('isodow')
def extract_isodow(self, expr):
return Expression(expr.db, self.extract, expr, 'isodow', 'integer')
@register_expression('isoyear')
def extract_isoyear(self, expr):
return Expression(expr.db, self.extract, expr, 'isoyear', 'integer')
@register_expression('quarter')
def extract_quarter(self, |
buffalotheory/critter_list | pyplayer/pyplayerd.py | Python | gpl-2.0 | 7,138 | 0.003502 | #!/usr/bin/env python3
# daemon.py
"""
originally from the Python Cookbook (version 3)
11.8. Implementing Remote Procedure Calls
12.2. Determining If a Thread Has Started
12.14. Launching a Daemon Process on Unix
(perhaps more)
"""
import os
import sys
import atexit
import signal
import json
from datetime import datetime, timedelta
from multiprocessing.connection import Listener
from threading import Thread
import pyplayer
from inspect import getmembers, isfunction, ismethod
#debuglog = StringIO()
def TRACE(msg):
print(
"%s: %s"
% (datetime.now().strftime("%Y-%m-%d_%H:%M:%S.%f")[0:-3], msg),
file=sys.stderr)
#debuglog.write("%s\n" % m)
#logging.info(m)
#print('%s' % m, file=sys.stderr)
class RPCHandler:
def __init__(self):
self._functions = { }
self.mplif = pyplayer.MPlayerIF()
def register_function(self, func):
print(
"adding function %s (type = %s) isfunction = %s"
% (str(func.__name__), str(type(func)), str(isfunction(func)))
)
self._functions[func.__name__] = func
def handle_connection(self, connection):
try:
while True:
# Receive a message
func_name, args, kwargs = json.loads(connection.recv())
sys.stdout.write(
"RPCHandler.handle_connection: (%s) called\n"
% (func_name)
)
sys.stdout.flush()
# Run the RPC and send a response
try:
r = self._functions[func_name](*args,**kwargs)
sres = json.dumps(r)
sys.stdout.write(
"RPCHandler.handle_connection: "
"function %s result = %s, str(result) = %s type = %s\n"
% (func_name, str(r), str(sres), str(type(r)))
)
connection.send(sres)
except Exception as e:
connection.send(json.dumps(str(e)))
except EOFError:
pass
# test function #1
def add(x, y):
sys.stdout.write("add(%d, %d)\n" % (x, y))
sys.stdout.flush()
return x + y
# test function #2
def sub(x, y):
sys.stdout.write("sub(%d, %d)\n" % (x, y))
sys.stdout.flush()
return x - y
def daemonize(pidfile, *,
stdin='/dev/null',
stdout='/dev/null',
stderr='/dev/null'):
# TODO: check for existence of stdin, stdout and stderr files and
# the directories containing them
# TODO: verify the directory that contains the PID file, both the existence
# and the writability for the current user
if os.path.exists(pidfile):
raise RuntimeError('Already running')
# First fork (detaches from parent)
try:
if os.fork() > 0:
raise SystemExit(0) # Parent exit
except OSError as e:
raise RuntimeError('fork #1 failed.')
os.chdir('/')
os.umask(0)
os.setsid()
# Second fork (relinquish session leadership)
try:
if os.fork() > 0:
raise SystemExit(0)
except OSError as e:
raise RuntimeError('fork #2 failed.')
TRACE('%s started' % DAEMON)
# Flush I/O buffers
sys.stdout.flush()
sys.stderr.flush()
# Replace file descriptors for stdin, stdout, and stderr
with open(stdin, 'rb', 0) as f:
os.dup2(f.fileno(), sys.stdin.fileno())
with open(stdout, 'ab', 0) as f:
os.dup2(f.fileno(), sys.stdout.fileno())
with open(stderr, 'ab', 0) as f:
os.dup2(f.fileno(), sys.stderr.fileno())
# Write the PID file
with open(pidfile,'w') as f:
print(os.getpid(),file=f)
# Arrange to have the PID file removed on exit/signal
atexit.register(lambda: os.remove(pidfile))
# Signal handler for termination (required)
def sigterm_handler(signo, frame):
raise SystemExit(1)
signal.signal(signal.SIGTERM, sigterm_handler)
def rpc_server(handler, address, authkey):
sock = Listener(address, authkey=authkey)
while True:
client = sock.accept()
t = Thread(target=handler.handle_connection, args=(client,))
t.daemon = True
t.start()
# Some remote functions
def daemon_start():
try:
daemonize(
| PIDFILE,
stdout=pyplayer.config.stdout_file,
stderr=pyplayer.config.stderr_file
)
TRACE('%s started' % PIDFILE)
except RuntimeError as e:
TRACE(e)
raise SystemExit(1)
main()
def daemon_stop():
if os.path.exists(PIDFILE):
with open(PIDFILE) as f:
TRACE('stopping %s...' % DAEMON)
os.kill(int(f.read()), signal.SIGTERM)
# FIXME: | don't exit until the process is confirmed dead
TRACE('%s stopped' % DAEMON)
else:
TRACE('Not running')
raise SystemExit(1)
def daemon_status():
if not os.path.exists(PIDFILE):
TRACE('Not running. %s does not exist.' % PIDFILE)
raise SystemExit(1)
pid = int(open(PIDFILE).read())
# example: cmdlinefile="/projects/6124/cmdline"
cmdlinefile = "/proc/%d/cmdline" % pid
if not os.path.exists(cmdlinefile):
TRACE(
'Not running. cmdline file %s does not exist in /proc tree.'
% cmdlinefile
)
raise SystemExit(1)
s = open(cmdlinefile).readline()
# example: s='python3\x00./pyplayerd.py\x00start\x00'
s2 = s.split("\0")[1]
# example: s2='./pyplayerd.py'
if not s2.endswith(DAEMON):
TRACE(
'Not running. cmdline file %s indicates %s; expected %s'
% (cmdlinefile, s2, DAEMON)
)
raise SystemExit(1)
TRACE('%s is running (pid=%d)' % (DAEMON, pid))
def daemon_restart():
try:
daemon_stop()
except:
pass
daemon_start()
def main():
import time
sys.stdout.write('Daemon started with pid {}\n'.format(os.getpid()))
# Register with a handler
handler = RPCHandler()
handler.register_function(add)
handler.register_function(sub)
for f in getmembers(handler.mplif):
if f[1] is None: continue
print(
"handler.mplif: %s (type = %s)"
% (str(f[1]), str(type(f[1])))
)
if ismethod(f[1]) and f[0][0] != '_':
handler.register_function(f[1])
# Run the server
rpc_server(handler, ('localhost', 17000), authkey=b'super_secret_auth_key__CHANGEME')
if __name__ == '__main__':
PIDFILE = '/run/pyplayer/pyplayer.pid'
DAEMON = sys.argv[0]
if len(sys.argv) != 2:
print(
'Usage: {} [start|stop|restart|status]'.format(sys.argv[0])
)
raise SystemExit(1)
if sys.argv[1] == 'start':
daemon_start()
elif sys.argv[1] == 'stop':
daemon_stop()
elif sys.argv[1] == 'status':
daemon_status()
elif sys.argv[1] == 'restart':
daemon_restart()
else:
TRACE('Unknown command {!r}'.format(sys.argv[1]))
raise SystemExit(1)
|
emgee/formal | formal/test/test_types.py | Python | mit | 6,660 | 0.003904 | from datetime import date, time
try:
from decimal import Decimal
haveDecimal = True
except ImportError:
haveDecimal = False
from twisted.internet import defer
from twisted.trial import unittest
import formal
from formal import validation
class TestValidators(unittest.TestCase):
def testHasValidator(self):
t = formal.String(validators=[validation.LengthValidator(max=10)])
self.assertEquals(t.hasValidator(validation.LengthValidator), True)
def testRequired(self):
t = formal.String(required=True)
self.assertEquals(t.hasValidator(validation.RequiredValidator), True)
self.assertEquals(t.required, True)
class TestCreation(unittest.TestCase):
def test_immutablility(self):
self.assertEquals(formal.String().immutable, False)
self.assertEquals(formal.String(immutable=False).immutable, False)
self.assertEquals(formal.String(immutable=True).immutable, True)
def test_immutablilityOverride(self):
class String(formal.String):
immutable = True
self.assertEquals(String().immutable, True)
self.assertEquals(String(immutable=False).immutable, False)
self.assertEquals(String(immutable=True).immutable, True)
class TestValidate(unittest.TestCase):
@defer.deferredGenerator
def runSuccessTests(self, type, tests):
for test in tests:
d = type(*test[0], **test[1]).validate(test[2])
d = defer.waitForDeferred(d)
yield d
self.assertEquals(d.getResult(), test[3])
@defer.deferredGenerator
def runFailureTests(self, type, tests):
for test in tests:
d = type(*test[0], **test[1]).validate(test[2])
d = defer.waitForDeferred(d)
yield d
self.assertRaises(test[3], d.getResult)
def testStringSuccess(self):
return self.runSuccessTests(formal.String, [
([], {}, None, None),
([], {}, '', None),
([], {}, ' ', ' '),
([], {}, 'foo', 'foo'),
([], {}, u'foo', u'foo'),
([], {'strip': True}, ' ', None),
([], {'strip': True}, ' foo ', 'foo'),
([], {'missing': 'bar'}, 'foo', 'foo'),
([], {'missing': 'bar'}, '', 'bar'),
([], {'strip': True, 'missing': ''}, ' ', ''),
])
def testStringFailure(self):
return self.runFailureTests(formal.String, [
([], {'required': True}, '', formal.FieldValidationError),
([], {'required': True}, None, formal.FieldValidationError),
])
def testIntegerSuccess(self):
return self.runSuccessTests(formal.Integer, [
([], {}, None, None),
([], {}, 0, 0),
([], {}, 1, 1),
([], {}, -1, -1),
([], {'missing': 1}, None, 1),
([], {'missing': 1}, 2, 2),
])
def testIntegerFailure(self):
return self.runFailureTests(formal.Integer, [
([], {'required': True}, None, formal.FieldValidationError),
])
def testFloatSuccess(self):
self.runSuccessTests(formal.Float, [
([], {}, None, None),
([], {}, 0, 0.0),
([], {}, 0.0, 0.0),
([], {}, .1, .1),
([], {}, 1, 1.0),
([], {}, -1, -1.0),
([], {}, -1.86, -1.86),
([], {'missing': 1.0}, None, 1.0),
([], {'missing': 1.0}, 2.0, 2.0),
])
def tes | tFloatFailure(self):
self.runFailureTests(formal.Float, [
([], {'required': True}, None, formal.FieldValidationError),
])
if haveDecimal:
def testDecimalSuccess(self):
return self.runSuccessTests(formal.Decimal | , [
([], {}, None, None),
([], {}, Decimal('0'), Decimal('0')),
([], {}, Decimal('0.0'), Decimal('0.0')),
([], {}, Decimal('.1'), Decimal('.1')),
([], {}, Decimal('1'), Decimal('1')),
([], {}, Decimal('-1'), Decimal('-1')),
([], {}, Decimal('-1.86'), Decimal('-1.86')),
([], {'missing': Decimal('1.0')}, None, Decimal('1.0')),
([], {'missing': Decimal('1.0')}, Decimal('2.0'), Decimal('2.0')),
])
def testDecimalFailure(self):
return self.runFailureTests(formal.Decimal, [
([], {'required': True}, None, formal.FieldValidationError),
])
def testBooleanSuccess(self):
return self.runSuccessTests(formal.Boolean, [
([], {}, None, None),
([], {}, True, True),
([], {}, False, False),
([], {'missing' :True}, None, True),
([], {'missing': True}, False, False)
])
def testDateSuccess(self):
return self.runSuccessTests(formal.Date, [
([], {}, None, None),
([], {}, date(2005, 1, 1), date(2005, 1, 1)),
([], {'missing': date(2005, 1, 2)}, None, date(2005, 1, 2)),
([], {'missing': date(2005, 1, 2)}, date(2005, 1, 1), date(2005, 1, 1)),
])
def testDateFailure(self):
return self.runFailureTests(formal.Date, [
([], {'required': True}, None, formal.FieldValidationError),
])
def testTimeSuccess(self):
self.runSuccessTests(formal.Time, [
([], {}, None, None),
([], {}, time(12, 30, 30), time(12, 30, 30)),
([], {'missing': time(12, 30, 30)}, None, time(12, 30, 30)),
([], {'missing': time(12, 30, 30)}, time(12, 30, 31), time(12, 30, 31)),
])
def testTimeFailure(self):
self.runFailureTests(formal.Time, [
([], {'required': True}, None, formal.FieldValidationError),
])
def testSequenceSuccess(self):
self.runSuccessTests(formal.Sequence, [
([formal.String()], {}, None, None),
([formal.String()], {}, ['foo'], ['foo']),
([formal.String()], {'missing': ['foo']}, None, ['foo']),
([formal.String()], {'missing': ['foo']}, ['bar'], ['bar']),
])
def testSequenceFailure(self):
self.runFailureTests(formal.Sequence, [
([formal.String()], {'required': True}, None, formal.FieldValidationError),
([formal.String()], {'required': True}, [], formal.FieldValidationError),
])
def test_file(self):
pass
test_file.skip = "write tests"
|
AzamYahya/shogun | examples/undocumented/python_modular/graphical/kernel_ridge_regression.py | Python | gpl-3.0 | 969 | 0.026832 | from pylab import figure,pcolor,scatter,contour,colorbar,show,subplot,plot,connect
from numpy import array,meshgrid,reshape,linspace,min,max
from numpy import concatenate,transpose,ravel
from modshogun import *
from modshogun import *
from modshogun import *
import util
util.set_title('KernelRidgeRegression')
width=20
# positive examples
pos=util.get_realdata(True)
plot(pos[0,:], pos[1,:], | "r.")
# negative examples
neg=util.get_realdata(False)
plot(neg[0,:], neg[1,:], "b.")
# train svm
labels = util.get_labels(type='regression')
train = util.get_realfeatures(pos, neg)
gk=GaussianKernel(train, train, width)
krr = KernelRidgeRegression()
krr.set_labels(labels)
krr.set_kernel(gk)
krr.set_tau(1e-3)
krr.train()
# compute output plot iso-lines
x, y, z=util.comput | e_output_plot_isolines(krr, gk, train, regression=True)
pcolor(x, y, z, shading='interp')
contour(x, y, z, linewidths=1, colors='black', hold=True)
connect('key_press_event', util.quit)
show()
|
auto-mat/klub | apps/api/frontend/unknown_user_apply_for_membership_unit.py | Python | gpl-3.0 | 4,109 | 0.000731 | from rest_framework import generics
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from aklub.models import UserProfile, AdministrativeUnit
from oauth2_provider.contrib.rest_framework import TokenHasReadWriteScope
from rest_framework import serializers
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework import status
from interactions.models import Interaction
from interactions.interaction_types import *
from ..serializers import GetOrCreateUserprofile, get_or_create_user_profile_fields
class ApplyForMembershipSerializer(
GetOrCreateUserprofile,
):
administrative_unit = serializers.SlugRelatedField(
required=True,
queryset=AdministrativeUnit.objects.filter(),
slug_field="id",
)
skills = serializers.CharField(required=False, allow_blank=True)
class Meta:
model = UserProfile
fields = get_or_create_user_profile_fields + [
"administrative_unit",
"skills",
]
class ApplyForMembershipView(generics.CreateAPIView):
permission_classes = [TokenHasReadWriteScope | IsAuthenticated]
required_scopes = ["can_create_userprofile_interaction"]
serializer_class = ApplyForMembershipSerializer
def post(self, request, *args, **kwargs):
serializer = ApplyForMembershipSerializer(data=self.request.data)
serializer.is_valid(raise_exception=True)
user, created = serializer.get_or_create_user_profile()
administrative_unit = serializer.validated_data.get("administrative_unit")
user.administrative_units.add(administrative_unit),
interaction_type = membership_application_interaction_type()
Interaction.objects.create(
user=user,
type=interaction_type,
administrative_unit=administrative_unit,
date_from=timezone.now(),
subject=interaction_type.name,
)
return Response(
{"user_id": user.pk},
status=status.HTTP_200_OK,
)
def test_apply_for_membership(administrative_unit_1, app_request):
from rest_framework.reverse import reverse
from freezegun import freeze_time
url = reverse("unknown_user_apply_for_membership")
post_data = {
"first_name": "John",
"last_name": "Dock",
"telephone": "720000000",
"email": "john@rock.com",
"note": | "iam alergic to bees",
"age_group": 2012,
"birth_month": 12,
"birth_day": 12,
"street": "Belmont Avenu | e 2414",
"city": "New York",
"zip_code": "10458",
"administrative_unit": administrative_unit_1.pk,
"skills": "cooking",
}
current_date = timezone.now()
with freeze_time(current_date):
response = app_request.post(url, post_data)
assert response.status_code == 200
new_user = UserProfile.objects.get(profileemail__email=post_data["email"])
assert new_user.pk == response.json()["user_id"]
assert new_user.first_name == post_data["first_name"]
assert new_user.last_name == post_data["last_name"]
assert new_user.age_group == post_data["age_group"]
assert new_user.birth_month == post_data["birth_month"]
assert new_user.birth_day == post_data["birth_day"]
assert new_user.street == post_data["street"]
assert new_user.city == post_data["city"]
assert new_user.zip_code == post_data["zip_code"]
assert new_user.administrative_units.first() == administrative_unit_1
assert new_user.interaction_set.count() == 1
interaction = new_user.interaction_set.first()
assert interaction.administrative_unit == administrative_unit_1
assert interaction.subject == "Žadost o Členství"
assert interaction.date_from == current_date
# second registration => user recognized and only new interaction is created!
post_data["skills"] = "drawing"
response = app_request.post(url, post_data)
assert response.status_code == 200
assert new_user.interaction_set.count() == 2
|
ioam/geoviews | geoviews/element/__init__.py | Python | bsd-3-clause | 3,017 | 0.001989 | from holoviews.element import (
ElementConversion, Points as HvPoints, Polygons as HvPolygons,
Path as HvPath
)
from .geo import (_Element, Feature, Tiles, is_geographic, # noqa (API import)
WMTS, Points, Image, Text, LineContours, RGB,
FilledContours, Path, Polygons, Shape, Dataset,
Contours, TriMesh, Graph, Nodes, EdgePaths, QuadMesh,
VectorField, Labels, HexTiles, Rectangles, Segments)
class GeoConversion(ElementConversion):
"""
GeoConversion is a very simple container object which can
be given an existing Dataset and provides methods to convert
the Dataset into most other Element types. If the requested
key dimensions correspond to geographical coordinates the
conversion interface will automatically use a geographical
Element type while all other plot will use regular HoloViews
Elements.
"""
def __init__(self, cube):
self._element = cube
def __call__(self, *args, **kwargs):
group_type = args[0]
if 'crs' not in kwargs and issubclass(group_type, _Element):
kwargs['crs'] = self._element.crs
is_gpd = self._element.interface.datatype == 'geodataframe'
if is_gpd:
kdims = args[1] if len(args) > 1 else kwargs.get('kdims', None)
if len(args) > 1:
args = (Dataset, [])+args[2:]
else:
args = (Dataset,)
kwargs['kdims'] = []
converted = super(GeoConversion, self).__call__(*args, **kwargs)
if is_gpd:
if kdims is None: kdims = group_type.kdims
converted = converted.map(lambda x: x.clone(kdims=kdims, new_type=group_type), Dataset)
return converted
def linecontours(self, kdims=None, vdims=None, mdims=None, **kwargs):
return self(LineContours, kdims, vdims, mdims, **kwargs)
def filledcontours(self, kdims=None, vdims=None, mdims=None, **kwargs):
return self(FilledContours, kdims, vdims | , mdims, **kwargs)
def image(self, kdims=None, vdims=None, mdims=None, **kwargs):
return self(Image, kdims, vdims, mdims, **kwargs)
def points(self, kdims=None, vdims=None, mdims=None, **kwargs):
if kdims is None: kdims = self._element.kdims
el_type = Points if is_geographic(self._element, kdims) else HvPoints
return self(el_type, kdims, vdims, mdims, **kwargs)
|
def polygons(self, kdims=None, vdims=None, mdims=None, **kwargs):
if kdims is None: kdims = self._element.kdims
el_type = Polygons if is_geographic(self._element, kdims) else HvPolygons
return self(el_type, kdims, vdims, mdims, **kwargs)
def path(self, kdims=None, vdims=None, mdims=None, **kwargs):
if kdims is None: kdims = self._element.kdims
el_type = Path if is_geographic(self._element, kdims) else HvPath
return self(el_type, kdims, vdims, mdims, **kwargs)
Dataset._conversion_interface = GeoConversion
|
UnbDroid/robomagellan | Codigos/Raspberry/desenvolvimentoRos/build/tf2_py/catkin_generated/pkg.installspace.context.pc.py | Python | gpl-3.0 | 395 | 0 | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_IN | CLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "rospy;tf2".replace(';', ' ')
PKG_CO | NFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "tf2_py"
PROJECT_SPACE_DIR = "/home/pi/Documents/desenvolvimentoRos/install"
PROJECT_VERSION = "0.5.13"
|
tommartensen/arion-backend | arionBackend/tests/api/__init__.py | Python | mit | 47 | 0 | """
This module | contains all api tests.
""" | |
nicko96/Chrome-Infra | infra/services/mastermon/test/pollers_test.py | Python | bsd-3-clause | 8,592 | 0.004772 | # Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import tempfile
import unittest
import mock
import requests
from infra_libs import temporary_directory
from infra.services.mastermon import pollers
class FakePoller(pollers.Poller):
endpoint = '/foo'
def __init__(self, base_url):
super(FakePoller, self).__init__(base_url, {})
self.called_with_data = None
def handle_response(self, data):
self.called_with_data = data
@mock.patch('requests.get')
class PollerTest(unittest.TestCase):
def test_requests_url(self, mock_get):
response = mock_get.return_value
response.json.return_value = {'foo': 'bar'}
response.status_code = 200
p = FakePoller('http://foobar')
self.assertTrue(p.poll())
self.assertEquals(1, mock_get.call_count)
self.assertEquals('http://foobar/json/foo', mock_get.call_args[0][0])
def test_strips_trailing_slashes(self, mock_get):
response = mock_get.return_value
response.json.return_value = {'foo': 'bar'}
response.status_code = 200
p = FakePoller('http://foobar////')
self.assertTrue(p.poll())
self.assertEquals(1, mock_get.call_count)
self.assertEquals('http://foobar/json/foo', mock_get.call_args[0][0])
def test_returns_false_for_non_200(self, mock_get):
response = mock_get.return_value
response.status_code = 404
p = FakePoller('http://foobar')
self.assertFalse(p.poll())
def test_returns_false_for_exception(self, mock_get):
mock_get.side_effect = requests.exceptions.ConnectionError
p = FakePoller('http://foobar')
self.assertFalse(p.poll())
def test_calls_handle_response(self, mock_get):
response = mock_get.return_value
response.json.return_value = {'foo': 'bar'}
response.status_code = 200
p = FakePoller('http://foobar')
self.assertTrue(p.poll())
self.assertEqual({'foo': 'bar'}, p.called_with_data)
def test_handles_invalid_json(self, mock_get):
response = mock_get.return_value
response.json.side_effect = ValueError
response.status_code = 200
p = FakePoller('http://foobar')
self.assertFalse(p.poll())
self.assertIsNone(p.called_with_data)
class VarzPollerTest(unittest.TestCase):
def test_response(self):
p = pollers.VarzPoller('', {'x': 'y'})
p.handle_response({
'server_uptime': 123,
'accepting_builds': True,
'builders': {
'foo': {
'connected_slaves': 1,
'current_builds': 2,
'pending_builds': 3,
'state': "offline",
'total_slaves': 4,
'recent_builds_by_status': {
'0': 1,
'2': 2,
'4': 3,
'building': 4,
},
'recent_finished_build_times': [1, 2, 3],
'recent_successful_build_times': [1, 2, 3],
},
'bar': {
'connected_slaves': 5,
'current_builds': 6,
'pending_builds': 7,
'state': "idle",
'total_slaves': 8,
'recent_builds_by_status': {
'0': 1,
'2': 2,
'4': 3,
'building': 4,
},
'recent_finished_build_times': [1, 2, 3],
'recent_successful_build_times': [1, 2, 3],
},
},
})
self.assertEqual(123, p.uptime.get({'x': 'y'}))
self.assertEqual(True, p.accepting_builds.get({'x': 'y'}))
self.assertEqual(1, p.connected.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual(2, p.current_builds.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual(3, p.pending_builds.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual(4, p.total.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual('offline', p.state.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual(5, p.connected.get({'builder': 'bar', 'x': 'y'}))
self.assertEqual(6, p.current_builds.get({'builder': 'bar', 'x': 'y'}))
self.assertEqual(7, p.pending_builds.get({'builder': 'bar', 'x': 'y'}))
self.assertEqual(8, p.total.get({'builder': 'bar', 'x': 'y'}))
self.assertEqual('idle', p.state.get({'builder': 'bar', 'x': 'y'}))
self.assertEqual(1, p.recent_builds.get(
{'builder': 'foo', 'x': 'y', 'status': 'success'}))
self.assertEqual(4, p.recent_builds.get(
{'builder': 'foo', 'x': 'y', 'status': 'building'}))
self.assertIsNotNone(p.recent_finished_build_times.get(
{'builder': 'foo', 'x': 'y'}))
self.assertIsNotNone(p.recent_successful_build_times.get(
{'builder': 'foo', 'x': 'y'}))
def test_response_with_missing_data(self):
p = pollers.VarzPoller('', {'x': 'y'})
p.handle_response({
'server_uptime': 123,
'accepting_builds': True,
'builders': {
'foo': {
'state': "offline",
'total_slaves': 4,
},
'bar': {
'connected_slaves': 5,
'current_builds': 6,
'pending_builds': 7,
},
},
})
self.assertEqual(123, p.uptime.get({'x': 'y'}))
self.assertEqual(True, p.accepting_builds.get({'x': 'y'}))
self.assertEqual(0, p.connected.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual(0, p.current_builds.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual(0, p.pending_builds.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual(4, p.total.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual('offline', p.state.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual(5, p.connected.get({'builder': 'bar', 'x': 'y'}))
self.assertEqual(6, p.current_builds.get({'builder': 'bar', 'x': 'y'}))
self.assertEqual(7, p.pending_builds.get({'builder': 'bar', 'x': 'y'}))
self.assertEqual(0, p.total.get({'builder': 'bar', 'x': 'y'}))
self.assertEqual('unknown', p.state.get({'builder': 'bar', 'x': 'y'}))
class FilePollerTest(unittest.TestCase):
@staticmethod
def create_data_file(dirname, data_list):
with open(os.path.join(dirname, 'ts_mon.log'), 'w') as f:
for data in data_list:
f.write('%s\n' % json.dumps(data))
return f.name
def test_no_file(self):
with temporary_directory(prefix='poller-test-') as tempdir:
filename = os.path.join(tempdir, 'no-such-file')
p = pollers.FilePoller(filename, {})
self.assertTrue(p.poll())
self.assertFalse(os.path.isfile(pollers.rotated_filename(filename)))
@mock.patch('infra_libs.ts_mon.CounterMetric.increment')
@mock.patch('infra_libs.ts_mon.CumulativeDistributionMetric.add')
def test_file_has_data(self, fake_add, fa | ke_increment):
result1 = {'builder': 'b1', 'slave': 's1',
'result': 'r1', 'project_id': 'chromium'}
r | esult2 = {'builder': 'b1', 'slave': 's1',
'result': 'r1', 'project_id': 'unknown'}
# Check that we've listed all the required metric fields.
self.assertEqual(set(result1), set(pollers.FilePoller.field_keys))
self.assertEqual(set(result2), set(pollers.FilePoller.field_keys))
data1 = result1.copy()
data2 = result2.copy()
data1['random'] = 'value' # Extra field, should be ignored.
del data2['project_id'] # Missing field, should become 'unknown'.
data2['duration_s'] = 5
with temporary_directory(prefix='poller-test-') as tempdir:
filename = self.create_data_file(tempdir, [data1, data2])
p = pollers.FilePoller(filename, {})
self.assertTrue(p.poll())
fake_increment.assert_any_call(result1)
fake_increment.assert_any_call(result2)
fake_add.assert_any_call(data2['duration_s'], result2)
self.assertFalse(os.path.isfile(filename))
# Make sure the rotated file is still there - for debugging.
self.assertTrue(os.path.isfile(pollers.rotated_filename(filename)))
def test_file_has_bad_data(self):
"""Mostly a smoke test: don't crash on bad data."""
with temporary_directory(prefix='poller-test-') as tempdir:
filename = self.create_data_file(tempdir, [])
with open(filename, 'a') as f:
f.write('}')
|
akshaybabloo/Car-ND | Term_1/Finding_Lane_Lines_1/2_color_region_masking.py | Python | mit | 2,501 | 0.003599 | import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import sys
try:
image = mpimg.imread('test.jpg')
except FileNotFoundError as e:
print(e)
sys.exit(1)
print('This image is: {}, with dimensions: {}'.format(type(image), image.shape))
ysize = image.shape[0]
xsize = image.shape[1]
color_select = np.copy(image)
line_image = np.copy(image)
red_threshold = 200
green_threshold = 200
blue_threshold = 200
rgb_threshold = [red_threshold, green_threshold, blue_threshold]
# Init a triangle (this part is variable)
left_bottom = [100, 525] # [0, 539]
right_bottom = [850, 550] # [900, 539]
apex = [460, 310] # [475, 320]
# See https://docs.scipy.org/doc/numpy/reference/generated/numpy.polyfit.html for more info
fit_left = np.polyfit((left_bottom[0], apex[0]), (left_bottom[1], apex[1]), 1)
fit_right = np.polyfit((right_bottom[0], apex[0]), (right_bottom[1], apex[1]), 1)
fit_bottom = np.polyfit((left_bottom[0], right_bottom[0]), (left_bottom[1], right_bottom[1]), 1)
color_thresholds = (image[:, :, 0] < rgb_threshold[0]) | (image[:, :, 1] < rgb_threshold[1]) | (
image[:, : | , 2] < rgb_threshold[2])
# See https://docs.scipy.org/doc/numpy/reference/generated/numpy.meshgrid.html for more info
xx, yy = np.meshgrid(np.arange | (0, xsize), np.arange(0, ysize))
region_thresholds = (yy > (xx * fit_left[0] + fit_left[1])) & (yy > (xx * fit_right[0] + fit_right[1])) & (
yy < (xx * fit_bottom[0] + fit_bottom[1]))
# Mask color selection
color_select[color_thresholds | ~region_thresholds] = [0, 0, 0]
# Find where image is both colored right and in the region
line_image[~color_thresholds & region_thresholds] = [255, 0, 0]
# Show figures
f = plt.figure()
x = [left_bottom[0], right_bottom[0], apex[0], left_bottom[0]]
y = [left_bottom[1], right_bottom[1], apex[1], left_bottom[1]]
f.add_subplot(2, 3, 1)
plt.plot(x, y, 'b--', lw=4)
plt.imshow(image) # Image superimposed with plotted lines
plt.title("Step 1: Original image superimposed with plotted lines")
f.add_subplot(2, 3, 2)
plt.imshow(color_thresholds, cmap='gray')
plt.title("Step 2: Get the threshold image")
f.add_subplot(2, 3, 3)
plt.imshow(region_thresholds, cmap='gray')
plt.title("Step 3: Get the regional threshold")
f.add_subplot(2, 3, 4)
plt.imshow(color_select) # bright color line with triangle
plt.title("Step 4: Bright lines with triangle selected")
f.add_subplot(2, 3, 5)
plt.imshow(line_image) # Masked image
plt.title("Final: Original image with masked color")
plt.show()
|
D4wN/brickv | src/build_data/windows/OpenGL/raw/GL/EXT/vertex_array.py | Python | gpl-2.0 | 8,036 | 0.036212 | '''OpenGL extension EXT.vertex_array
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_EXT_vertex_array'
_DEPRECATED = False
GL_VERTEX_ARRAY_EXT = constant.Constant( 'GL_VERTEX_ARRAY_EXT', 0x8074 )
glget.addGLGetConstant( GL_VERTEX_ARRAY_EXT, (1,) )
GL_NORMAL_ARRAY_EXT = constant.Constant( 'GL_NORMAL_ARRAY_EXT', 0x8075 )
glget.addGLGetConstant( GL_NORMAL_ARRAY_EXT, (1,) )
GL_COLOR_ARRAY_EXT = constant.Constant( 'GL_COLOR_ARRAY_EXT', 0x8076 )
GL_INDEX_ARRAY_EXT = constant.Constant( 'GL_INDEX_ARRAY_EXT', 0x8077 )
glget.addGLGetConstant( GL_INDEX_ARRAY_EXT, (1,) )
GL_TEXTURE_COORD_ARRAY_EXT = constant.Constant( 'GL_TEXTURE_COORD_ARRAY_EXT', 0x8078 )
glget.addGLGetConstant( GL_TEXTURE_COORD_ARRAY_EXT, (1,) )
GL_EDGE_FLAG_ARRAY_EXT = constant.Constant( 'GL_EDGE_FLAG_ARRAY_EXT', 0x8079 )
glget.addGLGetConstant( GL_EDGE_FLAG_ARRAY_EXT, (1,) )
GL_VERTEX_ARRAY_SIZE_EXT = constant.Constant( 'GL_VERTEX_ARRAY_SIZE_EXT', 0x807A )
glget.addGLGetConstant( GL_VERTEX_ARRAY_SIZE_EXT, (1,) )
GL_VERTEX_ARRAY_TYPE_EXT = constant.Constant( 'GL_VERTEX_ARRAY_TYPE_EXT', 0x807B )
glget.addGLGetConstant( GL_VERTEX_ARRAY_TYPE_EXT, (1,) )
GL_VERTEX_ARRAY_STRIDE_EXT = constant.Constant( 'GL_VERTEX_ARRAY_STRIDE_EXT', 0x807C )
glget.addGLGetConstant( GL_VERTEX_ARRAY_STRIDE_EXT, (1,) )
GL_VERTEX_ARRAY_COUNT_EXT = constant.Constant( 'GL_VERTEX_ARRAY_COUNT_EXT', 0x807D )
glget.addGLGetConstant( GL_VERTEX_ARRAY_COUNT_EXT, (1,) )
GL_NORMAL_ARRAY_TYPE_EXT = constant.Constant( 'GL_NORMAL_ARRAY_TYPE_EXT', 0x807E )
glget.addGLGetConstant( GL_NORMAL_ARRAY_TYPE_EXT, (1,) )
GL_NORMAL_ARRAY_STRIDE_EXT = constant.Constant( 'GL_NORMAL_ARRAY_STRIDE_EXT', 0x807F )
glget.addGLGetConstant( GL_NORMAL_ARRAY_STRIDE_EXT, (1,) )
GL_NORMAL_ARRAY_COUNT_EXT = constant.Constant( 'GL_NORMAL_ARRAY_COUNT_EXT', 0x8080 )
glget.addGLGetConstant( GL_NORMAL_ARRAY_COUNT_EXT, (1,) )
GL_COLOR_ARRAY_SIZE_EXT = constant.Constant( 'GL_COLOR_ARRAY_SIZE_EXT', 0x8081 )
glget.addGLGetConstant( GL_COLOR_ARRAY_SIZE_EXT, (1,) )
GL_COLOR_ARRAY_TYPE_EXT = constant.Constant( 'GL_COLOR_ARRAY_TYPE_EXT', 0x8082 )
glget.addGLGetConstant( GL_COLOR_ARRAY_TYPE_EXT, (1,) )
GL_COLOR_ARRAY_STRIDE_EXT = constant.Constant( 'GL_COLOR_ARRAY_STRIDE_EXT', 0x8083 )
glget.addGLGetConstant( GL_COLOR_ARRAY_STRIDE_EXT, (1,) )
GL_COLOR_ARRAY_COUNT_EXT = constant.Constant( 'GL_COLOR_ARRAY_COUNT_EXT', 0x8084 )
glget.addGLGetConstant( GL_COLOR_ARRAY_COUNT_EXT, (1,) )
GL_INDEX_ARRAY_TYPE_EXT = constant.Constant( 'GL_INDEX_ARRAY_TYPE_EXT', 0x8085 )
glget.addGLGetConstant( GL_INDEX_ARRAY_TYPE_EXT, (1,) )
GL_INDEX_ARRAY_STRIDE_EXT = constant.Constant( 'GL_INDEX_ARRAY_STRIDE_EXT', 0x8086 )
glget.addGLGetConstant( GL_INDEX_ARRAY_STRIDE_EXT, (1,) )
GL_INDEX_ARRAY_COUNT_EXT = constant.Constant( 'GL_INDEX_ARRAY_COUNT_EXT', 0x8087 )
glget.addGLGetConstant( GL_INDEX_ARRAY_COUNT_EXT, (1,) )
GL_TEXTURE_COORD_ARRAY_SIZE_EXT = constant.Constant( 'GL_TEXTURE_COORD_ARRAY_SIZE_EXT', 0x8088 )
glget.addGLGetConstant( GL_TEXTURE_COORD_ARRAY_SIZE_EXT, (1,) )
GL_TEXTURE_COORD_ARRAY_TYPE_EXT = constant.Constant( 'GL_TEXTURE_COORD_ARRAY_TYPE_EXT', 0x8089 )
glget.addGLGetConstant( GL_TEXTURE_COORD_ARRAY_TYPE_EXT, (1,) )
GL_TEXTURE_COORD_ARRAY_STRIDE_EXT = constant.Constant( 'GL_TEXTURE_COORD_ARRAY_STRIDE_EXT', 0x808A )
glget.addGLGetConstant( GL_TEXTURE_COORD_ARRAY_STRIDE_EXT, (1,) )
GL_TEXTURE_COORD_ARRAY_COUNT_EXT = constant.Constant( 'GL_TEXTURE_COORD_ARRAY_COUNT_EXT', 0x808B )
glget.addGLGetConstant( GL_TEXTURE_COORD_ARRAY_COUNT_EXT, (1,) )
GL_EDGE_FLAG_ARRAY_STRIDE_EXT = constant.Constant( 'GL_EDGE_FLAG_ARRAY_STRIDE_EXT', 0x808C )
glget.addGLGetConstant( GL_EDGE_FLAG_ARRAY_STRIDE_EXT, (1,) )
GL_EDGE_FLAG_ARRAY_COUNT_EXT = constant.Constant( 'GL_EDGE_FLAG_ARRAY_COUNT_EXT', 0x808D )
glget.addGLGetConstant( GL_EDGE_FLAG_ARRAY_COUNT_EXT, (1,) )
GL_VERTEX_ARRAY_POINTER_EXT = constant.Constant( 'GL_VERTEX_ARRAY_POINTER_EXT', 0x808E )
GL_NORMAL_ARRAY_POINTER_EXT = constant.Constant( 'GL_NORMAL_ARRAY_POINTER_EXT', 0x808F )
GL_COLOR_ARRAY_POINTER_EXT = constant.Constant( 'GL_COLOR_ARRAY_POINTER_EXT', 0x8090 )
GL_INDEX_ARRAY_POINTER_EXT = constant.Constant( 'GL_INDEX_ARRAY_POINTER_EXT', 0x8091 )
GL_TEXTURE_COORD_ARRAY_POINTER_EXT = constant.Constant( 'GL_TEXTURE_COORD_ARRAY_POINTER_EXT', 0x8092 )
GL_EDGE_FLAG_ARRAY_POINTER_EXT = constant.Constant( 'GL_EDGE_FLAG_ARRAY_POINTER_EXT', 0x8093 )
glArrayElementEXT = platform.createExtensionFunction(
'glArrayElementEXT',dll=platform.GL,
extension=EXTENSION_N | AME,
resultType=None,
argTypes=(constants.GLint,),
doc='glArrayElementEXT(GLint(i)) -> None',
argNames=('i',),
deprecated=_DEPRECATED,
)
glColorPointerEXT = platform.createExtensionFunction(
'glColorPointerEXT',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLint,constants.GLenum,constants.GLsizei,con | stants.GLsizei,ctypes.c_void_p,),
doc='glColorPointerEXT(GLint(size), GLenum(type), GLsizei(stride), GLsizei(count), c_void_p(pointer)) -> None',
argNames=('size','type','stride','count','pointer',),
deprecated=_DEPRECATED,
)
glDrawArraysEXT = platform.createExtensionFunction(
'glDrawArraysEXT',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLint,constants.GLsizei,),
doc='glDrawArraysEXT(GLenum(mode), GLint(first), GLsizei(count)) -> None',
argNames=('mode','first','count',),
deprecated=_DEPRECATED,
)
glEdgeFlagPointerEXT = platform.createExtensionFunction(
'glEdgeFlagPointerEXT',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLsizei,constants.GLsizei,arrays.GLbooleanArray,),
doc='glEdgeFlagPointerEXT(GLsizei(stride), GLsizei(count), GLbooleanArray(pointer)) -> None',
argNames=('stride','count','pointer',),
deprecated=_DEPRECATED,
)
glGetPointervEXT = platform.createExtensionFunction(
'glGetPointervEXT',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,ctypes.POINTER(ctypes.c_void_p),),
doc='glGetPointervEXT(GLenum(pname), POINTER(ctypes.c_void_p)(params)) -> None',
argNames=('pname','params',),
deprecated=_DEPRECATED,
)
glIndexPointerEXT = platform.createExtensionFunction(
'glIndexPointerEXT',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLsizei,constants.GLsizei,ctypes.c_void_p,),
doc='glIndexPointerEXT(GLenum(type), GLsizei(stride), GLsizei(count), c_void_p(pointer)) -> None',
argNames=('type','stride','count','pointer',),
deprecated=_DEPRECATED,
)
glNormalPointerEXT = platform.createExtensionFunction(
'glNormalPointerEXT',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLsizei,constants.GLsizei,ctypes.c_void_p,),
doc='glNormalPointerEXT(GLenum(type), GLsizei(stride), GLsizei(count), c_void_p(pointer)) -> None',
argNames=('type','stride','count','pointer',),
deprecated=_DEPRECATED,
)
glTexCoordPointerEXT = platform.createExtensionFunction(
'glTexCoordPointerEXT',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLint,constants.GLenum,constants.GLsizei,constants.GLsizei,ctypes.c_void_p,),
doc='glTexCoordPointerEXT(GLint(size), GLenum(type), GLsizei(stride), GLsizei(count), c_void_p(pointer)) -> None',
argNames=('size','type','stride','count','pointer',),
deprecated=_DEPRECATED,
)
glVertexPointerEXT = platform.createExtensionFunction(
'glVertexPointerEXT',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLint,constants.GLenum,constants.GLsizei,constants.GLsizei,ctypes.c_void_p,),
doc='glVertexPointerEXT(GLint(size), GLenum(type), GLsizei(stride), GLsizei(count), c_void_p(pointer)) -> None',
argNames=('size','type','stride','count','pointer',),
deprecated=_DEPRECATED,
)
def glInitVertexArrayEXT():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
EdDev/vdsm | tests/storage_volume_test.py | Python | gpl-2.0 | 7,900 | 0 | #
# Copyright 2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at you | r option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have receive | d a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
from contextlib import contextmanager
from monkeypatch import MonkeyPatchScope
from storagefakelib import FakeStorageDomainCache
from storagetestlib import FakeSD
from storagetestlib import fake_env
from testlib import expandPermutations, permutations
from testlib import make_uuid
from testlib import recorded
from testlib import VdsmTestCase
from vdsm.storage import constants as sc
from vdsm.storage import exception as se
from vdsm.storage import resourceManager as rm
from storage import sd
from storage import volume
HOST_ID = 1
MB = 1048576
class FakeSDManifest(object):
@recorded
def acquireVolumeLease(self, hostId, imgUUID, volUUID):
pass
@recorded
def releaseVolumeLease(self, imgUUID, volUUID):
pass
@expandPermutations
class VolumeLeaseTest(VdsmTestCase):
def test_properties(self):
a = volume.VolumeLease(HOST_ID, 'dom', 'img', 'vol')
self.assertEqual(sd.getNamespace(sc.VOLUME_LEASE_NAMESPACE, 'dom'),
a.ns)
self.assertEqual('vol', a.name)
self.assertEqual(rm.EXCLUSIVE, a.mode)
@permutations((
(('domA', 'img', 'vol'), ('domB', 'img', 'vol')),
(('dom', 'img', 'volA'), ('dom', 'img', 'volB')),
))
def test_less_than(self, a, b):
b = volume.VolumeLease(HOST_ID, *b)
a = volume.VolumeLease(HOST_ID, *a)
self.assertLess(a, b)
def test_equality(self):
a = volume.VolumeLease(HOST_ID, 'dom', 'img', 'vol')
b = volume.VolumeLease(HOST_ID, 'dom', 'img', 'vol')
self.assertEqual(a, b)
def test_equality_different_image(self):
a = volume.VolumeLease(HOST_ID, 'dom', 'img1', 'vol')
b = volume.VolumeLease(HOST_ID, 'dom', 'img2', 'vol')
self.assertEqual(a, b)
def test_equality_different_host_id(self):
a = volume.VolumeLease(0, 'dom', 'img', 'vol')
b = volume.VolumeLease(1, 'dom', 'img', 'vol')
self.assertEqual(a, b)
def test_acquire_release(self):
sdcache = FakeStorageDomainCache()
manifest = FakeSDManifest()
sdcache.domains['dom'] = FakeSD(manifest)
expected = [('acquireVolumeLease', (HOST_ID, 'img', 'vol'), {}),
('releaseVolumeLease', ('img', 'vol'), {})]
with MonkeyPatchScope([(volume, 'sdCache', sdcache)]):
lock = volume.VolumeLease(HOST_ID, 'dom', 'img', 'vol')
lock.acquire()
self.assertEqual(expected[:1], manifest.__calls__)
lock.release()
self.assertEqual(expected, manifest.__calls__)
def test_repr(self):
lock = volume.VolumeLease(HOST_ID, 'dom', 'img', 'vol')
lock_string = str(lock)
self.assertIn("VolumeLease", lock_string)
self.assertIn("ns=04_lease_dom", lock_string)
self.assertIn("name=vol", lock_string)
self.assertIn("mode=exclusive", lock_string)
self.assertIn("%x" % id(lock), lock_string)
@expandPermutations
class VolumeManifestTest(VdsmTestCase):
@contextmanager
def volume(self):
img_id = make_uuid()
vol_id = make_uuid()
with fake_env('file') as env:
env.make_volume(MB, img_id, vol_id)
vol = env.sd_manifest.produceVolume(img_id, vol_id)
yield vol
def test_operation(self):
with self.volume() as vol:
vol.setMetadata = CountedInstanceMethod(vol.setMetadata)
self.assertEqual(sc.LEGAL_VOL, vol.getLegality())
with vol.operation():
self.assertEqual(sc.ILLEGAL_VOL, vol.getLegality())
self.assertEqual(1, vol.setMetadata.nr_calls)
self.assertEqual(sc.LEGAL_VOL, vol.getLegality())
self.assertEqual(2, vol.setMetadata.nr_calls)
def test_operation_fail_inside_context(self):
with self.volume() as vol:
self.assertEqual(sc.LEGAL_VOL, vol.getLegality())
with self.assertRaises(ValueError):
with vol.operation():
raise ValueError()
self.assertEqual(sc.ILLEGAL_VOL, vol.getLegality())
@permutations(((None, 0), (100, 100)))
def test_get_info_generation_id(self, orig_gen, info_gen):
with self.volume() as vol:
vol.getLeaseStatus = lambda: 'unused'
if orig_gen is not None:
vol.setMetaParam(sc.GENERATION, orig_gen)
self.assertEqual(info_gen, vol.getInfo()['generation'])
def test_operation_valid_generation(self):
generation = 100
with self.volume() as vol:
vol.setMetaParam(sc.GENERATION, generation)
with vol.operation(generation):
pass
self.assertEqual(generation + 1, vol.getMetaParam(sc.GENERATION))
@permutations(((100, 99), (100, 101)))
def test_operation_invalid_generation_raises(self, actual_generation,
requested_generation):
with self.volume() as vol:
vol.setMetaParam(sc.GENERATION, actual_generation)
with self.assertRaises(se.GenerationMismatch):
with vol.operation(requested_generation):
pass
self.assertEqual(actual_generation,
vol.getMetaParam(sc.GENERATION))
@permutations((
(sc.MAX_GENERATION, 0),
(sc.MAX_GENERATION - 1, sc.MAX_GENERATION),
))
def test_generation_wrapping(self, first_gen, next_gen):
with self.volume() as vol:
vol.setMetaParam(sc.GENERATION, first_gen)
with vol.operation(first_gen):
pass
self.assertEqual(next_gen, vol.getMetaParam(sc.GENERATION))
def test_operation_on_illegal_volume(self):
with self.volume() as vol:
# This volume was illegal before the operation
vol.setMetaParam(sc.LEGALITY, sc.ILLEGAL_VOL)
vol.setMetaParam(sc.GENERATION, 0)
with vol.operation(requested_gen=0, set_illegal=False):
# It should remain illegal during the operation
self.assertEqual(sc.ILLEGAL_VOL, vol.getMetaParam(sc.LEGALITY))
pass
self.assertEqual(1, vol.getMetaParam(sc.GENERATION))
# It should remain illegal after the operation
self.assertEqual(sc.ILLEGAL_VOL, vol.getMetaParam(sc.LEGALITY))
def test_operation_modifying_metadata(self):
with self.volume() as vol:
with vol.operation(requested_gen=0, set_illegal=False):
vol.setMetaParam(sc.DESCRIPTION, "description")
# Metadata changes inside the context should not be overriden by
# wirting the new generation.
self.assertEqual("description", vol.getMetaParam(sc.DESCRIPTION))
class CountedInstanceMethod(object):
def __init__(self, method):
self._method = method
self.nr_calls = 0
def __call__(self, *args, **kwargs):
self.nr_calls += 1
return self._method(*args, **kwargs)
|
Happyholic1203/ransomcare | ransomcare/sniffers.py | Python | mit | 5,790 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
import json
import logging
import threading
import thread
import time
import signal
import psutil
import eventlet
from . import event
logger = logging.getLogger(__name__)
pid_cwd = {}
def get_absolute_path(event_raw):
'''
Keeps a cache of processes' cwds, in case that their events might come
after they're terminated.
'''
pid = event_raw.get('pid')
path = event_raw.get('path')
if path and path[0] == '/':
return os.path.realpath(path)
cwd = None
logger.debug('%r' % pid_cwd)
try:
process = psutil.Process(pid)
cwd = process.cwd()
pid_cwd[pid] = cwd # cache every pid's cwd
except (psutil.NoSuchProcess, psutil.AccessDenied):
cwd = pid_cwd.get(pid)
if not cwd:
return None
return os.path.realpath(os.path.join(cwd, path))
def to_absolute(pid, fd, path):
if not path:
return None
if path[0] == '/':
return path
try:
process = psutil.Process(pid)
cwd = process.cwd()
pid_cwd[pid] = cwd # cache every pid's cwd
except (psutil.NoSuchProcess, psutil.AccessDenied):
cwd = pid_cwd.get(pid)
if not cwd:
return None
return os.path.realpath(os.path.join(cwd, path))
class DTraceSniffer(object):
'''
Sniffs and gene | rates file events:
EventFileOpen (pid, path)
EventListDir (pid, path)
EventFileRead (pid, path, size)
EventFileWrite (pid, path, size)
EventFileUnlink (pid, path)
EventFileClose (pid, path)
The path generated from dtrace might be relative paths, sniffer is
reponsible for translating them into absolute paths.
'''
def __init__(self):
self._sniffer = None
self._should_stop = False
self.files = {} # files[pid | ][fd] gives the filename
self._sniffer_reader = None # eventlet greenthread
self._sniffer_output = eventlet.Queue()
def read_sniffer(self):
logger.info('Sniffer started')
while True:
try:
line = self._sniffer.stdout.readline()
event_raw = json.loads(line)
self._sniffer_output.put(event_raw)
eventlet.sleep(0) # yield control to engine
except ValueError:
if line != '\n' and line != "''":
logger.warn('Failed to JSON-decode: "%r"' % line)
continue
except IOError:
logger.debug('DTrace exited')
break
def start(self):
logger.info('Starting sniffer...')
logger.debug('Starting dtrace... excluding self pid: %d' % os.getpid())
DEVNULL = open(os.devnull, 'wb')
self._sniffer = subprocess.Popen(
['./ransomcare/sniffer', '-x', str(os.getpid())],
stdout=subprocess.PIPE, stderr=DEVNULL, preexec_fn=os.setsid)
self._sniffer_reader = eventlet.spawn(self.read_sniffer)
while not self._should_stop:
try:
event_raw = self._sniffer_output.get()
except KeyboardInterrupt:
break
action = event_raw.get('action')
pid = event_raw.get('pid')
fd = event_raw.get('fd')
path = event_raw.get('path')
if action == 'stop':
continue
if action == 'open':
path = self.update_path(pid, fd, path)
if not path:
continue
elif action in ('close', 'unlink'):
path = self.remove_path(pid, fd)
if not path:
continue
else:
path = self.get_path(pid, fd)
if not path:
continue
size = event_raw.get('size')
timestamp = event_raw.get('t')
if action == 'open':
event.EventFileOpen(timestamp, pid, path).fire()
elif action == 'listdir':
event.EventListDir(timestamp, pid, path).fire()
elif action == 'read':
event.EventFileRead(timestamp, pid, path, size).fire()
elif action == 'write':
event.EventFileWrite(timestamp, pid, path, size).fire()
elif action == 'close':
event.EventFileClose(timestamp, pid, path).fire()
elif action == 'unlink':
event.EventFileUnlink(timestamp, pid, path).fire()
self._sniffer.terminate()
self._should_stop = True
logger.info('Sniffer stopped')
def stop(self):
if self._should_stop:
return
logger.info('Stopping sniffer...')
self._should_stop = True
self._sniffer_output.put({'action': 'stop'})
def update_path(self, pid, fd, path):
if not path:
return None
self.files.setdefault(pid, {})
if path[0] != '/':
abspath = to_absolute(pid, fd, path)
if not abspath:
return None
else:
abspath = path
self.files[pid][fd] = abspath
return abspath
def remove_path(self, pid, fd):
'''
Removes the file path associated with (pid, fd)
Args:
pid (int)
fd (int)
Returns:
string: absolute path to the file associated with (pid, fd)
'''
path = self.files.get(pid, {}).get(fd, None)
if path:
del self.files[pid][fd]
if len(self.files[pid]) == 0:
del self.files[pid]
return path
def get_path(self, pid, fd):
return self.files.get(pid, {}).get(fd, None)
|
bikeshare/bikeshare-web | bikeshare.py | Python | gpl-2.0 | 4,352 | 0.006893 | import syslog
from flask import Flask, request, render_template, send_from_directory
from flask.ext.bootstrap import Bootstrap
import json
import re
import syslog
import urllib2
from datetime import timedelta
from flask import make_response, request, current_app
from functools import update_wrapper
import sstoreclient
import requests
app = Flask(__name__)
app.config.setdefault('BOOTSTRAP_SERVE_LOCAL',True)
bootstrap = Bootstrap(app)
# Set debug mode.
debug = False
apiurl = 'http://api.bikeshare.cs.pdx.edu'
# ================
# Main app function definitions
# ================
# This is a GET route to display the "view all stations" page
@app.route('/stations')
def view_all_stations():
apiroute = '/REST/1.0/stations/all'
r = requests.get(apiurl + apiroute)
if r.status_code == 200:
data = r.json()
return render_template('stations.html',stations_list=data['stations'],
list_len=len(data))
else:
return render_template('500.html')
# This is a GET route to display the "view all bikes" page
@app.route('/bikes')
def view_all_bikes():
apiroute = '/REST/1.0/bikes/all'
r = requests.get(apiurl + apiroute)
if r.status_code == 200:
data = r.json()
return render_template('bikes.html', bikes=data['bikes'])
else:
return render_template('500.html')
# This is a GET route to | display the "view all riders" page
@app.rou | te('/users')
def view_all_riders():
apiroute = '/REST/1.0/users/all'
r = requests.get(apiurl + apiroute)
if r.status_code == 200:
data = r.json()
return render_template('users.html', users=data['users'])
else:
return render_template('500.html')
# This is a GET route to display the landing page of bikeshare
@app.route('/')
def home():
apiroute = '/REST/1.0/stats'
r = requests.get(apiurl + apiroute)
if r.status_code == 200:
data = r.json()
return render_template('index.html', bikes=data['BIKES'],
active_bikes = data['ACTIVE_BIKES'], stations=data['STATIONS'],
users=data['USERS'], bikes_per_station=data['BIKES_PER_STATION'])
else:
return render_template('index.html')
# This is a GET route to display a single bike page of a given name
@app.route('/bike/<int:bike_id>')
def view_bike(bike_id):
apiroute = '/REST/1.0/bikes/info/' + str(bike_id)
print apiurl + apiroute
r = requests.get(apiurl + apiroute)
if r.status_code == 200:
data = r.json()
print data
return render_template('bike.html',bike_id=bike_id,
user_id=data['USER_ID'], lat=data['LATITUDE'],
lon=data['LONGITUDE'])
else:
return render_template('500.html')
# This is a GET route to display a single station page of a given name
@app.route('/station/<int:station_id>')
def view_station(station_id):
apiroute = '/REST/1.0/stations/info/' + str(station_id)
r = requests.get(apiurl + apiroute)
if r.status_code == 200:
data = r.json()
return render_template('station.html',station_id=station_id,
name=data['STATION_NAME'],addr=data['STREET_ADDRESS'],
lat=data['LATITUDE'],lon=data['LONGITUDE'],
num_bikes=data['CURRENT_BIKES'],num_docks=data['CURRENT_DOCKS'],
discount=data['CURRENT_DISCOUNT'])
else:
return render_template('500.html')
# This is a GET route to display a single user page of a given name
@app.route('/user/<user_name>')
def view_user(user_name):
apiroute = '/REST/1.0/users/info/' + user_name
r = requests.get(apiurl + apiroute)
if r.status_code == 200:
data = r.json ()
return render_template('user.html', user=data)
else:
return render_template('500.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/javascript/<path:path>', methods=['GET','OPTIONS'])
def js_proxy(path):
return send_from_directory(app.root_path + '/javascript/', path)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
if __name__ == '__main__':
if debug:
app.run(host='127.0.0.1', port=8081, debug=True)
else:
app.run(host='0.0.0.0', port=8081, debug=True)
|
google/rekall | rekall-core/rekall/plugins/response/interpolators.py | Python | gpl-2.0 | 3,264 | 0 | # Rekall Memory Forensics
# Copyright 2016 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""This module defines interpolators for the common OSs.
Globs and Artifacts may expand interpolations from the KnowledgeBase. This
module provides a live, on demand, KnowledgeBase.
"""
from builtins import object
import os
import re
import platform
from rekall import kb
from rekall_lib import registry
class KnowledgeBase(object):
def __init__(self, session):
self.session = session
def expand(self, variable):
return []
class LinuxKnowledgeBase(KnowledgeBase):
@registry.memoize
def _get_users_homedir(self):
homedirs = []
for user in open("/etc/passwd"):
user = user.strip()
homedirs.append(user.split(":")[5])
return homedirs
def expand(self, variable):
if variable == "%%users.homedir%%":
return self._get_users_homedir()
self.session.logging.warn("Unable to interpolate %s", variable)
return []
class WindowsKnowledgeBase(KnowledgeBase):
@registry.memoize
def _get_sids(self):
result = []
for hit in self.session.plugins.glob(
r"HKEY_USERS\*", filesystem="Reg", root="\\",
path_sep="\\").collect():
| path = hit["path"]
m = re.search(
r"(S-(\d+-)+\d+)$", path.filename.name or "", re.I)
if m:
result.append(m.group(1))
return result
@registry.memoize
def _get_homedirs(self):
""" | On windows the homedirs are the paths of the user's profile."""
result = []
for artifact_hit in self.session.plugins.artifact_collector(
"WindowsRegistryProfiles"):
for hit_result in artifact_hit.get("result", []):
profile_path = hit_result.get("value")
if profile_path:
result.append(profile_path)
return result
def expand(self, variable):
if variable == "%%users.sid%%":
return self._get_sids()
if variable == "%%users.homedir%%":
return self._get_homedirs()
if variable == "%%environ_systemroot%%":
return [os.environ["systemroot"]]
return []
class KnowledgeBaseHook(kb.ParameterHook):
name = "knowledge_base"
def calculate(self):
if platform.system() == "Linux":
return LinuxKnowledgeBase(self.session)
elif platform.system() == "Windows":
return WindowsKnowledgeBase(self.session)
|
nuodb/nuodb-django | test/auth/tests/views.py | Python | bsd-3-clause | 22,169 | 0.001443 | import os
import re
from django.conf import global_settings, settings
from django.contrib.sites.models import Site, RequestSite
from django.contrib.auth.models import User
from django.core import mail
from django.core.exceptions import SuspiciousOperation
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import QueryDict
from django.utils.encoding import force_text
from django.utils.html import escape
from django.utils.http import urlquote
from django.utils._os import upath
from django.test import TestCase
from django.test.utils import override_settings
from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import (AuthenticationForm, PasswordChangeForm,
SetPasswordForm, PasswordResetForm)
from django.contrib.auth.tests.utils import skipIfCustomUser
@override_settings(
LANGUAGES=(
('en', 'English'),
),
LANGUAGE_CODE='en',
TEMPLATE_LOADERS=global_settings.TEMPLATE_LOADERS,
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
USE_TZ=False,
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
fixtures = ['authtestdata.json']
urls = 'django.contrib.auth.tests.urls'
def login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith(settings.LOGIN_REDIRECT_URL))
self.assertTrue(SESSION_KEY in self.client.session)
def assertContainsEscaped(self, response, text, **kwargs):
return self.assertContains(response, escape(force_text(text)), **kwargs)
@skipIfCustomUser
class AuthViewNamedURLTests(AuthViewsTestCase):
urls = 'django.contrib.auth.urls'
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb36': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
@skipIfCustomUser
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"Error is raised if the provided email address isn't currently registered"
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'})
self.assertContainsEscaped(response, PasswordResetForm.error_messages['unknown'])
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("staffmember@example.com", mail.outbox[0].from_email)
@override_settings(ALLOWED_HOSTS=['adminsite.com'])
def test_admin_reset(self):
"If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override."
response = self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='adminsite.com'
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://adminsite.com" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST | headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_h | ost()
# is invoked, but we check here as a practical consequence.
with self.assertRaises(SuspiciousOperation):
self.client.post('/password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(len(mail.outbox), 0)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
with self.assertRaises(SuspiciousOperation):
self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(len(mail.outbox), 0)
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertTrue(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0" * 4) + path[-1]
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existant user, not a 404
response = self.client.get('/reset/123456-1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user(self):
# Ensure that we get a 200 response for a base36 user id that overflows int
response = self.client.get('/reset/zzzzzzzzzzzzz-1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/tool_shed/grids/admin_grids.py | Python | gpl-3.0 | 22,408 | 0.025036 | import logging
import os
from galaxy.web.framework.helpers import grids
from galaxy.web.framework.helpers import time_ago
from galaxy.webapps.tool_shed import model
from galaxy.model.orm import and_
import tool_shed.util.shed_util_common as suc
from tool_shed.util import hg_util
from tool_shed.grids.repository_grids import CategoryGrid
from tool_shed.grids.repository_grids import RepositoryGrid
from galaxy import eggs
eggs.require( 'mercurial' )
from mercurial import commands
from mercurial import hg
from mercurial import ui
log = logging.getLogger( __name__ )
class UserGrid( grids.Grid ):
class UserLoginColumn( grids.TextColumn ):
def get_value( self, trans, grid, user ):
return user.email
class UserNameColumn( grids.TextColumn ):
def get_value( self, trans, grid, user ):
if user.username:
return user.username
return 'not set'
class GroupsColumn( grids.GridColumn ):
def get_value( self, trans, grid, user ):
if user.groups:
return len( user.groups )
return 0
class RolesColumn( grids.GridColumn ):
def get_value( self, trans, grid, user ):
if user.roles:
return len( user.roles )
return 0
class ExternalColumn( grids.GridColumn ):
def get_value( self, trans, grid, user ):
if user.external:
return 'yes'
return 'no'
class LastLoginColumn( grids.GridColumn ):
def get_value( self, trans, grid, user ):
if user.galaxy_sessions:
return self.format( user.galaxy_sessions[ 0 ].update_time )
return 'never'
class StatusColum | n( grids.GridColumn ):
def get_value( self, trans, grid, user ):
if user.purged:
| return "purged"
elif user.deleted:
return "deleted"
return ""
class EmailColumn( grids.GridColumn ):
def filter( self, trans, user, query, column_filter ):
if column_filter == 'All':
return query
return query.filter( and_( model.Tool.table.c.user_id == model.User.table.c.id,
model.User.table.c.email == column_filter ) )
title = "Users"
model_class = model.User
template='/admin/user/grid.mako'
default_sort_key = "email"
columns = [
UserLoginColumn( "Email",
key="email",
link=( lambda item: dict( operation="information", id=item.id ) ),
attach_popup=True,
filterable="advanced" ),
UserNameColumn( "User Name",
key="username",
attach_popup=False,
filterable="advanced" ),
GroupsColumn( "Groups", attach_popup=False ),
RolesColumn( "Roles", attach_popup=False ),
ExternalColumn( "External", attach_popup=False ),
LastLoginColumn( "Last Login", format=time_ago ),
StatusColumn( "Status", attach_popup=False ),
# Columns that are valid for filtering but are not visible.
EmailColumn( "Email",
key="email",
visible=False )
]
columns.append( grids.MulticolFilterColumn( "Search",
cols_to_filter=[ columns[0], columns[1] ],
key="free-text-search",
visible=False,
filterable="standard" ) )
global_actions = [
grids.GridAction( "Create new user",
dict( controller='admin', action='users', operation='create' ) )
]
operations = [
grids.GridOperation( "Manage Roles and Groups",
condition=( lambda item: not item.deleted ),
allow_multiple=False,
url_args=dict( action="manage_roles_and_groups_for_user" ) ),
grids.GridOperation( "Reset Password",
condition=( lambda item: not item.deleted ),
allow_multiple=True,
allow_popup=False,
url_args=dict( action="reset_user_password" ) )
]
standard_filters = [
grids.GridColumnFilter( "Active", args=dict( deleted=False ) ),
grids.GridColumnFilter( "Deleted", args=dict( deleted=True, purged=False ) ),
grids.GridColumnFilter( "Purged", args=dict( purged=True ) ),
grids.GridColumnFilter( "All", args=dict( deleted='All' ) )
]
use_paging = False
def get_current_item( self, trans, **kwargs ):
return trans.user
class RoleGrid( grids.Grid ):
class NameColumn( grids.TextColumn ):
def get_value( self, trans, grid, role ):
return str( role.name )
class DescriptionColumn( grids.TextColumn ):
def get_value( self, trans, grid, role ):
if role.description:
return str( role.description )
return ''
class TypeColumn( grids.TextColumn ):
def get_value( self, trans, grid, role ):
return str( role.type )
class StatusColumn( grids.GridColumn ):
def get_value( self, trans, grid, role ):
if role.deleted:
return "deleted"
return ""
class GroupsColumn( grids.GridColumn ):
def get_value( self, trans, grid, role ):
if role.groups:
return len( role.groups )
return 0
class RepositoriesColumn( grids.GridColumn ):
def get_value( self, trans, grid, role ):
if role.repositories:
return len( role.repositories )
return 0
class UsersColumn( grids.GridColumn ):
def get_value( self, trans, grid, role ):
if role.users:
return len( role.users )
return 0
title = "Roles"
model_class = model.Role
template='/admin/dataset_security/role/grid.mako'
default_sort_key = "name"
columns = [
NameColumn( "Name",
key="name",
link=( lambda item: dict( operation="Manage role associations", id=item.id ) ),
attach_popup=True,
filterable="advanced" ),
DescriptionColumn( "Description",
key='description',
attach_popup=False,
filterable="advanced" ),
GroupsColumn( "Groups", attach_popup=False ),
RepositoriesColumn( "Repositories", attach_popup=False ),
UsersColumn( "Users", attach_popup=False ),
# Columns that are valid for filtering but are not visible.
grids.DeletedColumn( "Deleted",
key="deleted",
visible=False,
filterable="advanced" )
]
columns.append( grids.MulticolFilterColumn( "Search",
cols_to_filter=[ columns[0] ],
key="free-text-search",
visible=False,
filterable="standard" ) )
global_actions = [
grids.GridAction( "Add new role",
dict( controller='admin', action='roles', operation='create' ) )
]
# Repository admin roles currently do not have any operations since they are managed automatically based
# on other events. For example, if a repository is renamed, its associated admin role is automatically
# renamed accordingly and if a repository is deleted its associated admin role is automatically deleted.
operations = [ grids.GridOperation( "Rename",
condition=( lambda item: not item.deleted and not item.is_repository_admin_role ),
|
GoogleCloudPlatform/cloudml-samples | sklearn/notebooks/census_training/train.py | Python | apache-2.0 | 5,507 | 0.002179 | # [START setup]
import datetime
import pandas as pd
import joblib
from google.cloud import storage
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectKBest
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelBinarizer
# TODO: REPLACE 'YOUR_BUCKET_NAME' with your GCS Bucket name.
BUCKET_NAME = 'YOUR_BUCKET_NAME'
# [END setup]
# ---------------------------------------
# 1. Add code to download the data from GCS (in this case, using the publicly hosted data).
# AI Platform will then be able to use the data when training your model.
# ---------------------------------------
# [START download-data]
# Public bucket holding the census data
bucket = storage.Client().bucket('cloud-samples-data')
# Path to the data inside the public bucket
blob = bucket.blob('ai-platform/sklearn/census_data/adult.data')
# Download the data
blob.download_to_filename('adult.data')
# [END download-data]
# ---------------------------------------
# This is where your model code would go. Below is an example model using the census dataset.
# ---------------------------------------
# [START define-and-load-data]
# Define the format of your input data including unused columns (These are the columns from the census data files)
COLUMNS = (
'age',
'workclass',
'fnlwgt',
'education',
'education-num',
'marital-status',
'occupation',
'relationship',
'race',
'sex',
'capital-gain',
'capital-loss',
'hours-per-week',
'native-country',
'income-level'
)
# Categorical columns are columns that need to be turned into a numerical value to be used by scikit-learn
CATEGORICAL_COLUMNS = (
'workclass',
'education',
'marital-status',
'occupation',
'relationship',
'race',
'sex',
'native-country'
)
# Load the training census dataset
with open('./adult.data', 'r') as train_data:
raw_training_data = pd.read_csv(train_data, header=None, names=COLUMNS)
# Remove the column we are trying to predict ('income-level') from our features list
# Convert the Dataframe to a lists of lists
train_features = raw_training_data.drop('income-level', axis=1).values.tolist()
# Create our training labels list, convert the Dataframe to a lists of lists
train_labels = (raw_training_data['income-level'] == ' >50K').values.tolist()
# [END define-and-load-data]
# [START categorical-feature-conversion]
# Since the census data set has categorical features, we need to convert
# them to numerical values. We'll use a list of pipelines to convert each
# categorical column and then use FeatureUnion to combine them before calling
# the RandomForestC | lassifier.
categorical_pipelines = []
# Each categorical column needs to be extracted individually an | d converted to a numerical value.
# To do this, each categorical column will use a pipeline that extracts one feature column via
# SelectKBest(k=1) and a LabelBinarizer() to convert the categorical value to a numerical one.
# A scores array (created below) will select and extract the feature column. The scores array is
# created by iterating over the COLUMNS and checking if it is a CATEGORICAL_COLUMN.
for i, col in enumerate(COLUMNS[:-1]):
if col in CATEGORICAL_COLUMNS:
# Create a scores array to get the individual categorical column.
# Example:
# data = [39, 'State-gov', 77516, 'Bachelors', 13, 'Never-married', 'Adm-clerical',
# 'Not-in-family', 'White', 'Male', 2174, 0, 40, 'United-States']
# scores = [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
#
# Returns: [['State-gov']]
# Build the scores array.
scores = [0] * len(COLUMNS[:-1])
# This column is the categorical column we want to extract.
scores[i] = 1
skb = SelectKBest(k=1)
skb.scores_ = scores
# Convert the categorical column to a numerical value
lbn = LabelBinarizer()
r = skb.transform(train_features)
lbn.fit(r)
# Create the pipeline to extract the categorical feature
categorical_pipelines.append(
('categorical-{}'.format(i), Pipeline([
('SKB-{}'.format(i), skb),
('LBN-{}'.format(i), lbn)])))
# [END categorical-feature-conversion]
# [START create-pipeline]
# Create pipeline to extract the numerical features
skb = SelectKBest(k=6)
# From COLUMNS use the features that are numerical
skb.scores_ = [1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0]
categorical_pipelines.append(('numerical', skb))
# Combine all the features using FeatureUnion
preprocess = FeatureUnion(categorical_pipelines)
# Create the classifier
classifier = RandomForestClassifier()
# Transform the features and fit them to the classifier
classifier.fit(preprocess.transform(train_features), train_labels)
# Create the overall model as a single pipeline
pipeline = Pipeline([
('union', preprocess),
('classifier', classifier)
])
# [END create-pipeline]
# ---------------------------------------
# 2. Export and save the model to GCS
# ---------------------------------------
# [START export-to-gcs]
# Export the model to a file
model = 'model.joblib'
joblib.dump(pipeline, model)
# Upload the model to GCS
bucket = storage.Client().bucket(BUCKET_NAME)
blob = bucket.blob('{}/{}'.format(
datetime.datetime.now().strftime('census_%Y%m%d_%H%M%S'),
model))
blob.upload_from_filename(model)
# [END export-to-gcs]
|
rackerlabs/lambda-uploader | lambda_uploader/uploader.py | Python | apache-2.0 | 8,157 | 0 | # Copyright 2015-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto3
import logging
from os import path
LOG = logging.getLogger(__name__)
MAX_PACKAGE_SIZE = 50000000
class PackageUploader(object):
'''TODO: Should we decouple the config from the Object Init'''
def __init__(self, config, profile_name):
self._config = config
self._vpc_config = self._format_vpc_config()
self._aws_session = boto3.session.Session(region_name=config.region,
profile_name=profile_name)
self._lambda_client = self._aws_session.client('lambda')
self.version = None
'''
Calls the AWS methods to upload an existing package and update
the function configuration
returns the package version
'''
def upload_existing(self, pkg):
environment = {'Variables': self._config.variables}
self._validate_package_size(pkg.zip_file)
with open(pkg.zip_file, "rb") as fil:
zip_file = fil.read()
LOG.debug('running update_function_code')
conf_update_resp = None
if self._config.s3_bucket:
self._upload_s3(pkg.zip_file)
conf_update_resp = self._lambda_client.update_function_code(
FunctionName=self._config.name,
S3Bucket=self._config.s3_bucket,
S3Key=self._config.s3_package_name(),
Publish=False,
)
else:
conf_update_resp = self._lambda_client.update_function_code(
FunctionName=self._config.name,
ZipFile=zip_file,
Publish=False,
)
LOG.debug("AWS update_function_code response: %s"
% conf_update_resp)
LOG.debug('running update_function_configuration')
response = self._lambda_client.update_function_configuration(
FunctionName=self._config.name,
Handler=self._config.handler,
Role=self._config.role,
Description=self._config.description,
Timeout=self._config.timeout,
MemorySize=self._config.memory,
VpcConfig=self._vpc_config,
Environment=environment,
TracingConfig=self._config.tracing,
Runtime=self._config.runtime,
)
LOG.debug("AWS update_function_configuration response: %s"
% response)
version = response.get('Version')
# Publish the version after upload and config update if needed
if self._config.publish:
resp = self._lambda_client.publish_version(
FunctionName=self._config.name,
)
LOG.debug("AWS publish_version response: %s" % resp)
version = resp.get('Version')
return version
'''
Creates and uploads a new lambda function
returns the package version
'''
def upload_new(self, pkg):
environment = {'Variables': self._config.variables}
code = {}
if self._config.s3_bucket:
code = {'S3Bucket': self._config.s3_bucket,
'S3Key': self._config.s3_package_name()}
self._upload_s3(pkg.zip_file)
else:
self._validate_package_size(pkg.zip_file)
with open(pkg.zip_file, "rb") as fil:
zip_file = fil.read()
code = {'ZipFile': zip_file}
LOG.debug('running create_function_code')
response = self._lambda_client.create_function(
FunctionName=self._config.name,
Runtime=self._config.runtime,
Handler=self._config.handler,
Role=self._config.role,
Code=code,
Description=self._config.description,
Timeout=self._config.timeout,
MemorySize=self._config.memory,
Publish=self._config.publish,
VpcConfig=self._vpc_config,
Environment=environment,
TracingConfig=self._config.tracing,
)
LOG.debug("AWS create_function response: %s" % response)
return response.get('Version')
'''
Auto determines whether the function exists or not and calls
the appropriate method (upload_existing or upload_new).
'''
def upload(self, pkg):
existing_function = True
try:
get_resp = self._lambda_client.get_function_configuration(
FunctionName=self._config.name)
LOG.debug("AWS get_function_configuration response: %s" % get_resp)
except: # noqa: E722
existing_function = False
LOG.debug("function not found creating new function")
if existing_function:
self.version = self.upload_existing(pkg)
else:
self.version = self.upload_new(pkg)
'''
Create/update an alias to point to the package. Raises an
exception if the package has not been uploaded.
'''
def alias(self):
# if self.version is still None raise exception
if self.version is None:
raise Exception('Must upload package before applying alias')
if self._alias_exists():
self._update_alias()
else:
self._create_alias()
'''
Pulls down the current list of aliases and checks to see if
an alias exists.
'''
def _alias_exists(self):
resp = self._lambda_client.list_aliases(
FunctionName=self._config.name)
for alias in resp.get('Aliases'):
if alias.get('Name') == self._config.alias:
return True
return False
'''Creates alias'''
def _create_alias(self):
LOG.debug("Creating new alias %s" % se | lf._config.alias)
resp = self._lambda_client.create_alias(
FunctionName=self._config.name,
Name=self._config.alias,
FunctionVersion=self.version,
Description=self._config.alias_description,
)
LOG.debug("AWS create_alias resp | onse: %s" % resp)
'''Update alias'''
def _update_alias(self):
LOG.debug("Updating alias %s" % self._config.alias)
resp = self._lambda_client.update_alias(
FunctionName=self._config.name,
Name=self._config.alias,
FunctionVersion=self.version,
Description=self._config.alias_description,
)
LOG.debug("AWS update_alias response: %s" % resp)
def _validate_package_size(self, pkg):
'''
Logs a warning if the package size is over the current max package size
'''
if path.getsize(pkg) > MAX_PACKAGE_SIZE:
LOG.warning("Size of your deployment package is larger than 50MB!")
def _format_vpc_config(self):
'''
Returns {} if the VPC config is set to None by Config,
returns the formatted config otherwise
'''
if self._config.raw['vpc']:
return {
'SubnetIds': self._config.raw['vpc']['subnets'],
'SecurityGroupIds': self._config.raw['vpc']['security_groups']
}
else:
return {
'SubnetIds': [],
'SecurityGroupIds': [],
}
def _upload_s3(self, zip_file):
'''
Uploads the lambda package to s3
'''
s3_client = self._aws_session.client('s3')
transfer = boto3.s3.transfer.S3Transfer(s3_client)
transfer.upload_file(zip_file, self._config.s3_bucket,
self._config.s3_package_name())
|
tmr232/ida-plugins | autostruct/autostruct.py | Python | mit | 1,657 | 0.001207 | import idaapi
import idc
import sark
class AutoStruct(idaapi.plugin_t):
flags = 0
comment | = "AutoStruct struct creator"
help = "Automagically Create and Apply Structs"
wanted_name = "Auto | Struct"
wanted_hotkey = "Shift+T"
def init(self):
self._prev_struct_name = ""
return idaapi.PLUGIN_KEEP
def term(self):
pass
def run(self, arg):
start, end = sark.get_selection()
struct_name = idc.AskStr(self._prev_struct_name, "Struct Name")
if not struct_name:
return
self._prev_struct_name = struct_name
common_reg = sark.structure.get_common_register(start, end)
reg_name = idc.AskStr(common_reg, "Register")
if not reg_name:
return
offsets, operands = sark.structure.infer_struct_offsets(start, end, reg_name)
try:
sark.structure.create_struct_from_offsets(struct_name, offsets)
except sark.exceptions.SarkStructAlreadyExists:
yes_no_cancel = idc.AskYN(idaapi.ASKBTN_NO,
"Struct already exists. Modify?\n"
"Cancel to avoid applying the struct.")
if yes_no_cancel == idaapi.ASKBTN_CANCEL:
return
elif yes_no_cancel == idaapi.ASKBTN_YES:
sid = sark.structure.get_struct(struct_name)
sark.structure.set_struct_offsets(offsets, sid)
else: # yes_no_cancel == idaapi.ASKBTN_NO:
pass
sark.structure.apply_struct(start, end, reg_name, struct_name)
def PLUGIN_ENTRY():
return AutoStruct() |
JShadowMan/package | python/zdl/error_logger/error_logger/net/sms_notification.py | Python | mit | 187 | 0.005348 | #!/usr/bin/env python
#
# Copyright (C) 2017 DL
#
from error_logger.net import email_notificatio | n
from error_logger.utils import logger
def send_sms(config, to_list, | contents):
pass |
epuzanov/ZenPacks.community.HPMon | ZenPacks/community/HPMon/cpqScsiPhyDrv.py | Python | gpl-2.0 | 1,513 | 0.00727 | ################################################################################
#
# This program is part of the HPMon Zenpack for Zenoss.
# Copyright (C) 2008, 2009, 2010, 2011 Egor Puzanov.
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
################################################################################
__doc__="""cpqScsiPhyDrv
cpqScsiPhyDrv is an abstraction of a HP SCSI Hard Disk.
$Id: cpqScsiPhyDrv.py,v 1.2 2011/01/04 23:27:26 egor Exp $"""
__version__ = "$Revision: 1.2 $"[11:-2]
from HPHardDisk import HPHardDisk
from HPComponent import *
class cpqScsiPhyDrv(HPHardDisk):
"""cpqScsiPhyDrv object
"""
statusmap ={1: (DOT_GREY, SEV_WARNING, 'other'),
2: (DOT_GREEN, SEV_CLEAN, 'Ok'),
3: (DOT_RED, SEV_CRITICAL, 'Failed'),
4: (DOT_YELLOW, SEV_WARNING, 'Not Configured'),
5: (DOT_ORANGE, SEV_ERROR | , 'Bad Cable'),
6: (DOT_RED, SEV_CRITICAL, 'Missing was Ok'),
7: (DOT_RED, SEV_CRITICAL, 'Missing was Failed'),
8: (DOT_ORANGE, SEV_ERROR, 'Predictive Failure'),
9: (DOT_RED, SEV_CRITICAL, 'Missing was Predictive Failure'),
10:(DOT_RED, SEV_CRITICAL, 'Offline'),
11:(DOT_RED, SEV_CRITICAL, 'Missing was Offline'),
12:(DOT_RED, SEV_CRITICAL, 'Hard Error'),
}
Initialize | Class(cpqScsiPhyDrv)
|
viktorRock/myFirstPythonAPI | pythapp/nltk/nltk_constants.py | Python | unlicense | 222 | 0.009009 | DEFAULT_STEMMER = 'snowball'
DEFAULT_TOKENIZER = 'word' |
DEFAULT_TAGGER = 'pos'
TRAINERS = ['news', 'editorial', 'reviews', 'religion',
'learned', | 'science_fiction', 'romance', 'humor']
DEFAULT_TRAIN = 'news' |
hpeter/Linot | test_temp/Plugins/TestPluginBase.py | Python | bsd-2-clause | 1,918 | 0.001043 | from linot.Plugins.PluginBase import PluginBase
from linot.LinotArgParser import LinotParser
from nose.tools import ok_
from nose.tools import raises
class MockLine:
def __init__(self):
self.msg_recv = []
self.msg_text = []
def sendMessageToClient(self, recv, msg):
self.msg_recv.append(recv)
self.msg_text.append(msg)
class TestPluginBase:
def setUp(self):
self.plugin = PluginBase('mock_line')
def test_init(self):
ok_(self.plugin._line == 'mock_line')
@raises(NotImplementedError)
def test_setup(self):
parser = LinotParser()
self.plugin.setup(parser)
@raises(NotImplementedError)
def test_start(self):
ok_(self.plugin.is_start() is False)
self.plugin.start()
def test_start_normal(self):
def mock_start():
pass
self.plugin._start = mock_start
self.plugin.start()
ok_(self.plugin.is_start() is True)
self.plugin = PluginBase('dummy')
self.plugin._started = True
self.plugin.start()
@raises(NotImplementedError)
def test_stop(self):
self.plugin._started = True
| self.plugin.stop()
def test_stop_normal(self):
def mock_stop():
pass
self.plugin.stop()
self.plugin._started = True
self.plugin._stop = mock_stop
self.plugin.stop()
ok_(self.plugin.is_start() is False)
def test_cmd_process(self):
self.plugin = PluginBase(MockLine())
self.plugin._cmd_process(None, 'sender')
ok_(self.plug | in._line.msg_recv[0] == 'sender')
ok_('Unknown' in self.plugin._line.msg_text[0], self.plugin._line.msg_text[0])
self.plugin._cmd_process('test', 'sender')
ok_(self.plugin._line.msg_recv[1] == 'sender')
ok_('not implemented' in self.plugin._line.msg_text[1], self.plugin._line.msg_text[1])
|
rmelo19/rmelo19-arduino | python/readSerial.py | Python | gpl-3.0 | 430 | 0.025581 | #!/usr/bin/env python
import serial
sr = serial.Serial('/dev/ttyACM0', 115200)
currentLine = sr.readline()
while | currentLine.find('PRESSURES: ') == -1:
currentLine = sr.readline()
while True:
currentLine = sr.readline()
parsedLine = currentLine[currentLine.find('[') | +1:currentLine.find(']')].split(',')
intParsedLine = [int(element) for element in parsedLine]
for element in intParsedLine:
print element
sr.close() |
ToontownUprising/src | toontown/golf/GolfGlobals.py | Python | mit | 12,858 | 0.001555 | from direct.directnotify import DirectNotifyGlobal
import random
MAX_PLAYERS_PER_HOLE = 4
GOLF_BALL_RADIUS = 0.25
GOLF_BALL_VOLUME = 4.0 / 3.0 * 3.14159 * GOLF_BALL_RADIUS ** 3
GOLF_BALL_MASS = 0.5
GOLF_BALL_DENSITY = GOLF_BALL_MASS / GOLF_BALL_VOLUME
GRASS_SURFACE = 0
BALL_SURFACE = 1
HARD_SURFACE = 2
HOLE_SURFACE = 3
SLICK_SURFACE = 4
OOB_RAY_COLLIDE_ID = -1
GRASS_COLLIDE_ID = 2
HARD_COLLIDE_ID = 3
TOON_RAY_COLLIDE_ID = 4
MOVER_COLLIDE_ID = 7
WINDMILL_BASE_COLLIDE_ID = 8
CAMERA_RAY_COLLIDE_ID = 10
BALL_COLLIDE_ID = 42
HOLE_CUP_COLLIDE_ID = 64
SKY_RAY_COLLIDE_ID = 78
SLICK_COLLIDE_ID = 13
BALL_CONTACT_FRAME = 9
BALL_CONTACT_TIME = (BALL_CONTACT_FRAME + 1) / 24.0
AIM_DURATION = 60
TEE_DURATION = 15
RANDOM_HOLES = True
KICKOUT_SWINGS = 2
TIME_TIE_BREAKER = True
CourseInfo = {0: {'name': '',
'numHoles': 3,
'holeIds': (2,
3,
4,
5,
6,
7,
8,
12,
13,
15,
16)},
1: {'name': '',
'numHoles': 6,
'holeIds': ((0, 5),
(1, 5),
2,
3,
4,
5,
6,
7,
8,
9,
10,
(11, 5),
12,
13,
(14, 5),
15,
16,
(17, 5),
(20, 5),
(21, 5),
(22, 5),
(23, 5),
(24, 5),
(25, 5),
(26, 5),
(28, 5),
(30, 5),
(31, 5),
(33, 5),
(34, 5))},
2: {'name': '',
'numHoles': 9,
'holeIds': ((1, 5),
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
(14, 5),
15,
(17, 5),
(18, 20),
(19, 20),
(20, 20),
(21, 5),
(22, 5),
(23, 20),
(24, 20),
(25, 20),
(26, 20),
(27, 20),
(28, 20),
(29, 20),
(30, 5),
(31, 20),
(32, 20),
(33, 5),
(34, 20),
(35, 20))}}
HoleInfo = {0: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole18.bam',
'physicsData': 'golfGreen18',
'blockers': (),
'optionalMovers': ()},
1: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole1.bam',
'physicsData': 'golfGreen1',
'blockers': ()},
2: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole2.bam',
'physicsData': 'golfGreen2',
'blockers': ()},
3: {'name': '',
'par': 2,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole3.bam',
'physicsData': 'golfGreen3',
'blockers': ()},
4: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole4.bam',
'physicsData': 'golfGreen4',
'blockers': ()},
5: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole5.bam',
'physicsData': 'golfGreen2',
'blockers': ()},
6: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole6.bam',
'physicsData': 'golfGreen6',
'blockers': ()},
7: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole7.bam',
'physicsData': 'golfGreen7',
'blockers': ()},
8: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole8.bam',
'physicsData': 'golfGreen8',
'blockers': ()},
9: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole9.bam',
'physicsData': 'golfGreen9',
'blockers': 2},
10: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/ho | le10.bam',
'physicsData': 'golfGreen10',
'blockers': ()},
11: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole11.bam',
'physicsData': 'golfGreen11',
'blockers': ()},
12: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole12.bam',
'p | hysicsData': 'golfGreen12',
'blockers': ()},
13: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole13.bam',
'physicsData': 'golfGreen13',
'blockers': ()},
14: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole14.bam',
'physicsData': 'golfGreen14',
'blockers': ()},
15: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole15.bam',
'physicsData': 'golfGreen15',
'blockers': ()},
16: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole16.bam',
'physicsData': 'golfGreen16',
'blockers': ()},
17: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole17.bam',
'physicsData': 'golfGreen17',
'blockers': ()},
18: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole18.bam',
'physicsData': 'golfGreen18',
'blockers': (1, 2),
'optionalMovers': 1},
19: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole1.bam',
'physicsData': 'golfGreen1',
'blockers': (2, 5)},
20: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole2.bam',
'physicsData': 'golfGreen2',
'blockers': (1, 3)},
21: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole3.bam',
'physicsData': 'golfGreen3',
'blockers': (1, 2, 3)},
22: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole4.bam',
'physicsData': 'golfGreen4',
'blockers': 2},
23: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole5.bam',
'physicsData': 'golfGreen5',
'blockers': (3, 4),
'optionalMovers': 1},
24: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole6.bam',
'physicsData': 'golfGreen6',
'blockers': 1,
'optionalMovers': 1},
25: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole7.bam',
'physicsData': 'golfGreen7',
'blockers': 3,
'optionalMovers': 1},
26: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole8.bam',
'physicsData': 'golfGreen8',
'blockers': (),
'optionalMovers': 1},
27: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole9.bam',
'physicsData': 'golfGreen9',
'blockers': (),
'optionalMovers': (1, 2)},
28: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole10.bam',
'physicsData': 'golfGreen10',
'blockers': (),
'optionalMovers': (1, 2)},
29: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole11.bam',
'physicsData': 'golfGreen11',
'blockers': (),
'optionalMovers': 1},
30: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole12.bam',
'physicsData': 'golfGreen12',
'blockers': (1, 2, 3)},
31: {'name': '',
'par': 4,
'maxSwing': 7,
'terrainModel': 'phase_6/models/golf/hole |
MyRobotLab/pyrobotlab | home/steveh110/parser.py | Python | apache-2.0 | 833 | 0.062425 |
def parser(lien,nb) :
if lien == 1:
lien = "http://www.lesbonsnumeros.com/euromillions/rss.xml"
elif lien == 2:
lien = "http://www.20min.ch/rss/rss.tmpl?type=rubrik&get=784&lang=ro"
elif li | en == 3:
lien = "http://www.20min.ch/rss/rss.tmpl?type=rubrik&get=313&lang=ro"
elif lien == 4:
lien = "http://www.20min.ch/rss/rss.tmpl?ty | pe=rubrik&get=526&lang=ro"
else :
lien = "http://www.20min.ch/rss/rss.tmpl?type=rubrik&get=784&lang=ro"
d = feedparser.parse(lien)
i = 0
if nb == "de" :
nb = 2
if nb <= len(d['entries']) and nb != 0 :
nb = nb
else :
nb = len(d['entries'])
while i < nb :
texte = d['entries'][i]['title'] + ". " + d['entries'][i]['description']
texte = re.sub('<[A-Za-z\/][^>]*>', '', texte)
for x in texte.split("\n"):
print x;
mouth.speakBlocking(x)
i += 1
|
stvstnfrd/edx-platform | openedx/core/lib/blockstore_api/models.py | Python | agpl-3.0 | 2,551 | 0.001176 | """
Data models used for Blockstore API Client
"""
from datetime import datetime
from uuid import UUID
import attr
import six
def _convert_to_uuid(value):
if not isinstance(value, UUID):
return UUID(value)
return value
@attr.s(frozen=True)
class Collection(object):
"""
Metadata about a blockstore collection
"""
uuid = attr.ib(type=UUID, converter=_convert_to_uuid)
title = attr.ib(type=six.text_type)
@attr.s(frozen=True)
class Bundle(object):
"""
Metadata about a blockstore bundle
"""
uuid = attr.ib(type=UUID, converter=_convert_to_uuid)
title = attr.ib(type=six.text_type)
description = attr.ib(type=six.text_type)
slug = attr.ib(type=six.text_type)
drafts = attr.ib(type=dict) # Dict of drafts, where keys are the draft names and values are draft UUIDs
# Note that if latest_version is 0, it means that no versions yet exist
latest_version = attr.ib(type=int, validator=attr.validators.instance_of(int))
@attr.s(frozen=True)
class Draft(object):
"""
Metadata about a blockstore draft
"""
uuid = attr.ib(type=UUID, converter=_convert_to_uuid)
bundle_uuid = attr.ib(type=UUID, converter=_convert_to_uuid)
name = attr.ib(type=six.text_type)
updated_at = attr.ib(type=datetime, validator=attr.validators.instance_of(datetime))
files = attr.ib(type=dict)
links = attr.ib(type=dict)
@attr.s(frozen=True)
class BundleFile(object):
"""
Metadata about a file in a blockstore bundle or draft.
"""
path = attr.ib(type=six.text_type)
size = attr.ib(type=int)
url = attr.ib(type=six.text_type)
hash_ | digest = attr.ib(type=six.text_type)
@attr.s(frozen=True)
class DraftFile(BundleFile):
"""
Metadata about a file in a blockstore draft.
"""
modified = attr.ib(type=bool) # Was this file modified in the draft?
@attr.s(frozen=True)
class LinkReference(object):
"""
A pointer to a speci | fic BundleVersion
"""
bundle_uuid = attr.ib(type=UUID, converter=_convert_to_uuid)
version = attr.ib(type=int)
snapshot_digest = attr.ib(type=six.text_type)
@attr.s(frozen=True)
class LinkDetails(object):
"""
Details about a specific link in a BundleVersion or Draft
"""
name = attr.ib(type=str)
direct = attr.ib(type=LinkReference)
indirect = attr.ib(type=list) # List of LinkReference objects
@attr.s(frozen=True)
class DraftLinkDetails(LinkDetails):
"""
Details about a specific link in a Draft
"""
modified = attr.ib(type=bool)
|
hiorws/ulakbus | ulakbus/services/personel/hitap/hizmet_nufus_sorgula_kaydet.py | Python | gpl-3.0 | 5,170 | 0.003095 | # -*- coding: utf-8 -*-
"""
HITAP HizmetNufusSorgula Zato Servisi
"""
# Copyright (C) 2015 ZetaOps Inc.
#
# This file is licensed under the GNU General Public License v3
# (GPLv3). See LICENSE.txt for details.
from zato.server.service import Service
import os
import urllib2
from pyoko.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
os.environ["PYOKO_SETTINGS"] = 'ulakbus.settings'
from ulakbus.models.personel import Personel
H_USER = os.environ["HITAP_USER"]
H_PASS = os.environ["HITAP_PASS"]
class HizmetNufusSorgula(Service):
def handle(self):
def pass_nufus_kayitlari(nufus_kayitlari_passed, record_values):
nufus_kayitlari_passed.tckn = record_values['tckn']
nufus_kayitlari_passed.ad = record_values['ad']
nufus_kayitlari_passed.soyad = record_values['soyad']
nufus_kayitlari_passed.ilk_soy_ad = record_values['ilk_soy_ad']
nufus_kayitlari_passed.dogum_tarihi = record_values['dogum_tarihi']
nufus_kayitlari_passed.cinsiyet = record_values['cinsiyet']
nufus_kayitlari_passed.emekli_sicil_no = record_values['emekli_sicil_no']
nufus_kayitlari_passed.memuriyet_baslama_tarihi = record_values['memuriyet_baslama_tarihi']
nufus_kayitlari_passed.kurum_sicil = record_values['kurum_sicil']
nufus_kayitlari_passed.maluliyet_kod = record_values['maluliyet_kod']
nufus_kayitlari_passed.yetki_seviyesi = record_values['yetki_seviyesi']
nufus_kayitlari_passed.aciklama = record_values['aciklama']
nufus_kayitlari_p | assed.kuruma_baslama_tarihi = record_values['kuruma_baslama_tarihi']
nufus_kayitlari_passed.emekli_sicil_6495 = record_values['emekli_sicil_6495']
nufus_kayitlari_passed.gorev_tarihi_6495 = record_ | values['gorev_tarihi_6495']
nufus_kayitlari_passed.durum = record_values['durum']
nufus_kayitlari_passed.sebep = record_values['sebep']
tckn = self.request.payload['personel']['tckn']
conn = self.outgoing.soap['HITAP'].conn
hitap_dict = {}
# connects with soap client to the HITAP
try:
with conn.client() as client:
service_bean = client.service.HizmetNufusSorgula(H_USER, H_PASS, tckn)
self.logger.info("zato service started to work.")
# collects data from HITAP
hitap_dict['nufus_sorgula'] = {
'tckn': service_bean.tckn,
'ad': service_bean.ad,
'soyad': service_bean.soyad,
'ilk_soy_ad': service_bean.ilkSoyad,
'dogum_tarihi': service_bean.dogumTarihi,
'cinsiyet': service_bean.cinsiyet,
'emekli_sicil_no': service_bean.emekliSicilNo,
'memuriyet_baslama_tarihi': service_bean.memuriyetBaslamaTarihi,
'kurum_sicil': service_bean.kurumSicili,
'maluliyet_kod': service_bean.maluliyetKod,
'yetki_seviyesi': service_bean.yetkiSeviyesi,
'aciklama': service_bean.aciklama,
'kuruma_baslama_tarihi': service_bean.kurumaBaslamaTarihi,
'emekli_sicil_6495': service_bean.emekliSicil6495,
'gorev_tarihi_6495': '01.01.1900' if
service_bean.gorevTarihi6495 == "01.01.0001" else service_bean.gorevTarihi6495,
'durum': service_bean.durum,
'sebep': service_bean.sebep
}
self.logger.info("hitap_dict created.")
self.logger.info("TCKN : %s" % hitap_dict['nufus_sorgula']['tckn'])
try:
personel = Personel.objects.filter(nufus_kayitlari__tckn=service_bean.tckn).get()
new = False
except ObjectDoesNotExist:
new = True
if new:
self.logger.info("Personel not found in db. New created.")
personel = Personel()
nufus_kayitlari = personel.NufusKayitlari()
pass_nufus_kayitlari(nufus_kayitlari, hitap_dict['nufus_sorgula'])
nufus_kayitlari.sync = 1
personel.save()
if not new and personel.NufusKayitlari.sync != 1:
self.logger.info("Personel also in db. But not up to date.")
nufus_kayitlari = Personel()
pass_nufus_kayitlari(nufus_kayitlari, hitap_dict['nufus_sorgula'])
nufus_kayitlari.sync = 1
personel.save()
if not new and personel.NufusKayitlari.sync == 1:
self.logger.info("Nufus kayitlari is up to date also.")
self.logger.info("Nufus kayitlari successfully saved.")
self.logger.info("RIAK KEY: %s " % personel.key)
except AttributeError:
self.logger.info("TCKN should be wrong!")
except urllib2.URLError:
self.logger.info("No internet connection!")
|
stormandco/aiohttp_debugtoolbar | aiohttp_debugtoolbar/middlewares.py | Python | apache-2.0 | 6,287 | 0 | import asyncio
import sys
import aiohttp_jinja2
from aiohttp import web
from .toolbar import DebugToolbar
from .tbtools.tbtools import get_traceback
from .utils import addr_in, REDIRECT_CODES, APP_KEY, TEMPLATE_KEY, hexlify, \
ContextSwitcher
__all__ = ['toolbar_middleware_factory', 'middleware']
HTML_TYPES = ('text/html', 'application/xhtml+xml')
@asyncio.coroutine
def middleware(app, handler):
if APP_KEY not in app:
raise RuntimeError('Please setup debug toolbar with '
'aiohttp_debugtoolbar.setup method')
# just create namespace for handler
settings = app[APP_KEY]['settings']
request_history = app[APP_KEY]['request_history']
exc_history = app[APP_KEY]['exc_history']
intercept_exc = app[APP_KEY]['settings']['intercept_exc']
if not app[APP_KEY]['settings']['enabled']:
return handler
@asyncio.coroutine
def toolbar_middleware(request):
# request['exc_history'] = exc_history
panel_classes = settings.get('panels', [])
global_panel_classes = settings.get('global_panels', [])
hosts = settings.get('hosts', [])
show_on_exc_only = settings.get('show_on_exc_only')
intercept_redirects = settings['intercept_redirects']
root_url = request.app.router['debugtoolbar.main'].url()
exclude_prefixes = settings.get('exclude_prefixes')
exclude = [root_url] + exclude_prefixes
p = request.path
starts_with_excluded = list(filter(None, map(p.startswith, exclude)))
peername = request.transport.get_extra_info('peername')
remote_host, remote_port = peername[:2]
last_proxy_addr = remote_host
# TODO: rethink access policy by host
if starts_with_excluded or not addr_in(last_proxy_addr, hosts):
retu | rn (yield from handler(request))
toolbar = DebugToolbar(r | equest, panel_classes, global_panel_classes)
_handler = handler
context_switcher = ContextSwitcher()
for panel in toolbar.panels:
_handler = panel.wrap_handler(_handler, context_switcher)
try:
response = yield from context_switcher(_handler(request))
except (web.HTTPSuccessful, web.HTTPRedirection,
web.HTTPClientError) as e:
# TODO: fix dirty hack
response = e
except Exception as e:
if intercept_exc:
tb = get_traceback(info=sys.exc_info(),
skip=1,
show_hidden_frames=False,
ignore_system_exceptions=True,
exc=e)
for frame in tb.frames:
exc_history.frames[frame.id] = frame
exc_history.tracebacks[tb.id] = tb
request['pdbt_tb'] = tb
# TODO: find out how to port following to aiohttp
# or just remove it
# token = request.app[APP_KEY]['pdtb_token']
# qs = {'token': token, 'tb': str(tb.id)}
# msg = 'Exception at %s\ntraceback url: %s'
#
# exc_url = request.app.router['debugtoolbar.exception']\
# .url(query=qs)
# assert exc_url, msg
# exc_msg = msg % (request.path, exc_url)
# logger.exception(exc_msg)
# subenviron = request.environ.copy()
# del subenviron['PATH_INFO']
# del subenviron['QUERY_STRING']
# subrequest = type(request).blank(exc_url, subenviron)
# subrequest.script_name = request.script_name
# subrequest.path_info = \
# subrequest.path_info[len(request.script_name):]
#
# response = request.invoke_subrequest(subrequest)
body = tb.render_full(request).encode('utf-8', 'replace')
response = web.Response(
body=body, status=500,
content_type='text/html')
yield from toolbar.process_response(request, response)
request['id'] = str((id(request)))
toolbar.status = response.status
request_history.put(request['id'], toolbar)
toolbar.inject(request, response)
return response
else:
# logger.exception('Exception at %s' % request.path)
raise e
toolbar.status = response.status
if intercept_redirects:
# Intercept http redirect codes and display an html page with a
# link to the target.
if response.status in REDIRECT_CODES and response.location:
context = {'redirect_to': response.location,
'redirect_code': response.status}
_response = aiohttp_jinja2.render_template(
'redirect.jinja2', request, context,
app_key=TEMPLATE_KEY)
response = _response
yield from toolbar.process_response(request, response)
request['id'] = hexlify(id(request))
# Don't store the favicon.ico request
# it's requested by the browser automatically
if not "/favicon.ico" == request.path:
request_history.put(request['id'], toolbar)
if not show_on_exc_only and response.content_type in HTML_TYPES:
toolbar.inject(request, response)
return response
return toolbar_middleware
# Deprecated, will drop it in 0.3+
toolbar_middleware_factory = middleware
toolbar_html_template = """\
<script type="text/javascript">
var fileref=document.createElement("link")
fileref.setAttribute("rel", "stylesheet")
fileref.setAttribute("type", "text/css")
fileref.setAttribute("href", "%(css_path)s")
document.getElementsByTagName("head")[0].appendChild(fileref)
</script>
<div id="pDebug">
<div style="display: block; %(button_style)s" id="pDebugToolbarHandle">
<a title="Show Toolbar" id="pShowToolBarButton"
href="%(toolbar_url)s" target="pDebugToolbar">«
FIXME: Debug Toolbar</a>
</div>
</div>
"""
|
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/virtual_network_usage.py | Python | mit | 1,905 | 0 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# -------------------------------------------------- | ------------------------
from msrest.serialization import Model
class VirtualNetworkUsage(Model):
"""Usage details for subnet.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar current_value: Indicates number of IPs used from the Subnet.
: | vartype current_value: float
:ivar id: Subnet identifier.
:vartype id: str
:ivar limit: Indicates the size of the subnet.
:vartype limit: float
:ivar name: The name containing common and localized value for usage.
:vartype name:
~azure.mgmt.network.v2017_09_01.models.VirtualNetworkUsageName
:ivar unit: Usage units. Returns 'Count'
:vartype unit: str
"""
_validation = {
'current_value': {'readonly': True},
'id': {'readonly': True},
'limit': {'readonly': True},
'name': {'readonly': True},
'unit': {'readonly': True},
}
_attribute_map = {
'current_value': {'key': 'currentValue', 'type': 'float'},
'id': {'key': 'id', 'type': 'str'},
'limit': {'key': 'limit', 'type': 'float'},
'name': {'key': 'name', 'type': 'VirtualNetworkUsageName'},
'unit': {'key': 'unit', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualNetworkUsage, self).__init__(**kwargs)
self.current_value = None
self.id = None
self.limit = None
self.name = None
self.unit = None
|
nlhkabu/connect | connect/accounts/view_utils.py | Python | bsd-3-clause | 2,407 | 0 | from urllib.parse import urlsplit
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db | impo | rt IntegrityError, transaction
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from connect.accounts.models import UserLink, UserSkill, LinkBrand
def save_paired_items(request, user, formset, Model,
item_name, counterpart_name):
"""
Handle saving skills or links to the database.
"""
paired_items = []
for form in formset:
if form.is_valid():
item = form.cleaned_data.get(item_name, None)
counterpart = form.cleaned_data.get(counterpart_name, None)
if item and counterpart:
model_instance = Model(user=user)
setattr(model_instance, item_name, item)
setattr(model_instance, counterpart_name, counterpart)
paired_items.append(model_instance)
# Replace old pairs with new
# Do this in a transaction to avoid a case where we delete the old
# but cannot save the new
try:
with transaction.atomic():
Model.objects.filter(user=user).delete()
Model.objects.bulk_create(paired_items)
except IntegrityError:
messages.error(request, _('There was an error updating your profile.'))
return redirect(reverse('accounts:profile-settings'))
def save_skills(request, user, formset):
"""Wrapper function to save paired skills and proficiencies."""
save_paired_items(request, user, formset, UserSkill, 'skill',
'proficiency')
def save_links(request, user, formset):
"""Wrapper function to save paired link anchors and URLs."""
save_paired_items(request, user, formset, UserLink, 'anchor', 'url')
def match_link_to_brand(user_links):
"""
Attempt to match a user's links to recognised brands (LinkBrand).
This functionality also exists as a custom save() method on the model.
-- Use this with functions that create and update in bulk.
"""
for link in user_links:
domain = urlsplit(link.url).netloc
try:
brand = LinkBrand.objects.get(domain=domain)
link.icon = brand
link.save()
except ObjectDoesNotExist:
pass
return user_links
|
ashutoshvt/psi4 | psi4/share/psi4/databases/A24alt.py | Python | lgpl-3.0 | 29,758 | 0.00746 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Hobza) of interaction energies for bimolecular complexes.
| Geometries from <Reference>.
| Reference interaction energies from Rezac and Hobza, JCTC (in press).
- **cp** ``'off'`` <erase this comment and after unless on is a valid option> || ``'on'``
- **rlxd** ``'off'`` <erase this comment and after unless on is valid option> || ``'on'``
- **benchmark**
- ``'<benchmark_name>'`` <Reference>.
- |dl| ``'<default_benchmark_name>'`` |dr| <Reference>.
- **subset**
- ``'small'`` <members_description>
- ``'large'`` <members_description>
- ``'<subset>'`` <members_description>
"""
import re
import qcdb
# <<< A24 Database Module >>>
dbse = 'A24'
# <<< Database Members >>>
HRXN = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
HRXN_SM = []
HRXN_LG = []
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supermolecular calculations
for rxn in HRXN:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
| '%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : -1,
'%s-%s-monoB-unCP' % (dbse, rxn) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
' | %s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn) ]
ACTV['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
# <<< Reference Values [kcal/mol] from Rezac and Hobza dx.doi.org/10.1021/ct400057w >>>
BIND = {}
BIND['%s-%s' % (dbse, 1 )] = -6.524
BIND['%s-%s' % (dbse, 2 )] = -5.014
BIND['%s-%s' % (dbse, 3 )] = -4.749
BIND['%s-%s' % (dbse, 4 )] = -4.572
BIND['%s-%s' % (dbse, 5 )] = -3.157
BIND['%s-%s' % (dbse, 6 )] = -1.679
BIND['%s-%s' % (dbse, 7 )] = -0.779
BIND['%s-%s' % (dbse, 8 )] = -0.672
BIND['%s-%s' % (dbse, 9 )] = -4.474
BIND['%s-%s' % (dbse, 10 )] = -2.578
BIND['%s-%s' % (dbse, 11 )] = -1.629
BIND['%s-%s' % (dbse, 12 )] = -1.537
BIND['%s-%s' % (dbse, 13 )] = -1.389
BIND['%s-%s' % (dbse, 14 )] = -1.110
BIND['%s-%s' % (dbse, 15 )] = -0.514
BIND['%s-%s' % (dbse, 16 )] = -1.518
BIND['%s-%s' % (dbse, 17 )] = -0.837
BIND['%s-%s' % (dbse, 18 )] = -0.615
BIND['%s-%s' % (dbse, 19 )] = -0.538
BIND['%s-%s' % (dbse, 20 )] = -0.408
BIND['%s-%s' % (dbse, 21 )] = -0.370
BIND['%s-%s' % (dbse, 22 )] = 0.784
BIND['%s-%s' % (dbse, 23 )] = 0.897
BIND['%s-%s' % (dbse, 24 )] = 1.075
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, 1)] = """ water_ammonia_Cs """
TAGL['%s-%s-dimer' % (dbse, 1)] = """Dimer from water_ammonia_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 1)] = """Monomer A water_ammonia_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 1)] = """Monomer B water_ammonia_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 1)] = """Monomer A water_ammonia_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 1)] = """Monomer B water_ammonia_Cs """
TAGL['%s-%s' % (dbse, 2)] = """ water_water_Cs """
TAGL['%s-%s-dimer' % (dbse, 2)] = """Dimer from water_water_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 2)] = """Monomer A from water_water_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 2)] = """Monomer B from water_water_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 2)] = """Monomer A from water_water_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 2)] = """Monomer B from water_water_Cs """
TAGL['%s-%s' % (dbse, 3)] = """ HCN_HCN_Cxv """
TAGL['%s-%s-dimer' % (dbse, 3)] = """Dimer from HCN_HCN_Cxv """
TAGL['%s-%s-monoA-CP' % (dbse, 3)] = """Monomer A from HCN_HCN_Cxv """
TAGL['%s-%s-monoB-CP' % (dbse, 3)] = """Monomer B from HCN_HCN_Cxv """
TAGL['%s-%s-monoA-unCP' % (dbse, 3)] = """Monomer A from HCN_HCN_Cxv """
TAGL['%s-%s-monoB-unCP' % (dbse, 3)] = """Monomer B from HCN_HCN_Cxv """
TAGL['%s-%s' % (dbse, 4)] = """ HF_HF_Cs """
TAGL['%s-%s-dimer' % (dbse, 4)] = """Dimer from HF_HF_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 4)] = """Monomer A from HF_HF_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 4)] = """Monomer B from HF_HF_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 4)] = """Monomer A from HF_HF_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 4)] = """Monomer B from HF_HF_Cs """
TAGL['%s-%s' % (dbse, 5)] = """ ammonia_ammonia_C2h """
TAGL['%s-%s-dimer' % (dbse, 5)] = """Dimer from ammonia_ammonia_C2h """
TAGL['%s-%s-monoA-CP' % (dbse, 5)] = """Monomer A from ammonia_ammonia_C2h """
TAGL['%s-%s-monoB-CP' % (dbse, 5)] = """Monomer B from ammonia_ammonia_C2h """
TAGL['%s-%s-monoA-unCP' % (dbse, 5)] = """Monomer A from ammonia_ammonia_C2h """
TAGL['%s-%s-monoB-unCP' % (dbse, 5)] = """Monomer B from ammonia_ammonia_C2h """
TAGL['%s-%s' % (dbse, 6)] = """ methane_HF_C3v """
TAGL['%s-%s-dimer' % (dbse, 6)] = """Dimer from methane_HF_C3v """
TAGL['%s-%s-monoA-CP' % (dbse, 6)] = """Monomer A from methane_HF_C3v """
TAGL['%s-%s-monoB-CP' % (dbse, 6)] = """Monomer B from methane_HF_C3v """
TAGL['%s-%s-monoA-unCP' % (dbse, 6)] = """Monomer A from methane_HF_C3v """
TAGL['%s-%s-monoB-unCP' % (dbse, 6)] = """Monomer B from methane_HF_C3v """
TAGL['%s-%s' % (dbse, 7)] = """ ammmonia_methane_C3v """
TAGL['%s-%s-dimer' % (dbse, 7)] = """Dimer from ammmonia_methane_C3v """
TAGL['%s-%s-monoA-CP' % (dbse, 7)] = """Monomer A from ammmonia_methane_C3v """
TAGL['%s-%s-monoB-CP' % (dbse, 7)] = """Monomer B from ammmonia_methane_C3v """
TAGL['%s-%s-monoA-unCP' % (dbse, 7)] = """Monomer A from ammmonia_methane_C3v """
TAGL['%s-%s-monoB-unCP' % (dbse, 7)] = """Monomer B from ammmonia_methane_C3v """
TAGL['%s-%s' % (dbse, 8)] = """ methane_water_Cs """
TAGL['%s-%s-dimer' % (dbse, 8)] = """Dimer from methane_water_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 8)] = """Monomer A from methane_water_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 8)] = """Monomer B from methane_water_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 8)] = """Monomer A from methane_water_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 8)] = """Monomer B from methane_water_Cs """
TAGL['%s-%s' % (dbse, 9)] = """ formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-dimer' % (dbse, 9)] = """Dimer from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 9)] = """Monomer A from formaldehyde_formaldehyde_Cs """
TAGL[ |
invenfantasy/software-factory | docs/conf.py | Python | apache-2.0 | 8,403 | 0 | # -*- coding: utf-8 -*-
#
# software-factory documentation build configuration file, created by
# sphinx-quickstart on Mon May 5 13:05:13 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.graphviz']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'software-factory'
copyright = u'2014, eNovance'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = filter(
lambda x: x.startswith("SF_REL="),
open("../role_configrc").readlines()
)[0].strip().split('=')[1]
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon | file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static | files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'software-factorydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'software-factory.tex', u'software-factory Documentation',
u'eNovance', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'softwarefactory', u'software-factory Documentation',
[u'eNovance'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'software-factory', u'software-factory Documentation',
u'eNovance', 'software-factory', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texi |
Diacamma2/syndic | diacamma/condominium/printmodel/Owner_0001.py | Python | gpl-3.0 | 28,968 | 0.004108 | # -*- coding: utf-8 -*-
'''
Printmodel django module for condominium
@author: Laurent GAY
@organization: sd-libre.fr
@contact: info@sd-libre.fr
@copyright: 2016 sd-libre.fr
@license: This file is part of Lucterios.
Lucterios is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Lucterios is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Lucterios. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from diacamma.condominium.models import Owner
name = _("owner")
kind = 2
modelname = Owner.get_long_name()
value = """
<model hmargin="10.0" vmargin="10.0" page_width="210.0" page_height="297.0">
<header extent="25.0">
<text height="10.0" width="120.0" top="0.0" left="70.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="center" line_height="20" font_family="sans-serif" font_weight="" font_size="20">
{[b]}#OUR_DETAIL.name{[/b]}
</text>
<text height="10.0" width="120.0" top="10.0" left="70.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="center" line_height="8" font_family="sans-serif" font_weight="" font_size="8">
{[italic]}
#OUR_DETAIL.address - #OUR_DETAIL.postal_code #OUR_DETAIL.city - #OUR_DETAIL.tel1 #OUR_DETAIL.tel2 #OUR_DETAIL.email{[br/]}#OUR_DETAIL.identify_number
{[/italic]}
</text>
<image height="25.0" width="30.0" top="0.0" left="10.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2">
#OUR_DETAIL.image
</image>
</header>
<bottom extent="10.0">
</bottom>
<body>
<text height="8.0" width="190.0" top="0.0" left="0.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="center" line_height="15" font_family="sans-serif" font_weight="" font_size="15">
{[b]}%(title)s{[/b]}
</text>
<text height="8.0" width="190.0" top="8.0" left="0.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="center" line_height="13" font_family="sans-serif" font_weight="" font_size="13">
#date_begin - #date_end
</text>
<text height="20.0" width="100.0" top="25.0" left="80.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="center" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}#third.contact.str{[/b]}{[br/]}#third.contact.address{[br/]}#third.contact.postal_code #third.contact.city
</text>
<text height="20.0" width="100.0" top="25.0" left="0.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="center" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}%(info)s{[/b]}: #information
</text>
<text height="10.0" width="75.0" top="45.0" left="00.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="left" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}%(current)s{[/b]}
</text>
<table height="40.0" width="75.0" top="55.0" left="00.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2">
<columns width="50.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="right" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
</columns>
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="right" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[i]}%(value)s{[/i]}
</columns>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_current_initial)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_current_initial
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_current_call)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_current_call{[br/]}
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_current_payoff)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_current_payoff
</cell>
</rows>
<rows>
<cell border_color="black" | border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_current_owner)s{[/b]}
</cell>
<cell border_color="black | " border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_current_owner
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_current_regularization)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_current_regularization
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_current_ventilated)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_current_ventilated
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_recoverable_load)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_recoverable_load
</cell>
</rows>
</table>
<text height="10.0" width="75.0" top="45.0" left="100.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="left" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}%(exceptional)s{[/b]}
</text>
<table height="40.0" width="75.0" top="55.0" left="100.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2">
<columns width="50.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="right" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
</columns>
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="right" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[i]}%(value)s{[/i]}
</columns>
<rows>
<cell border_color="bla |
Royce/GammaJS | support/APIgen/generate.py | Python | mit | 4,851 | 0.024531 | ########################
###
### IMPORTS
###
########################
########################
### BUILTIN
########################
from datetime import date
import codecs
import shutil
import time
import sys
import re
import os
########################
### LOCAL
########################
from utility import mkdir
from containers import *
from const import *
########################
### LOGGING
########################
import logging, logging.config
try:
logging.config.fileConfig(os.path.join(sys.path[0], LOGCONFIG))
except:
pass
log = logging.getLogger('parser.generate')
here = os.sep.join(__file__.split(os.sep)[:-1])
########################
###
### TEMPLATES
###
########################
def createPage(filename, templates, context):
from django.template import Context, loader
t = loader.select_template(templates)
f = open(filename, "w")
log.info("Creating page %s" % filename)
f.write(t.render(Context(context)))
########################
###
### GE | NERATOR
###
########################
class Generator(object):
def __init__(self
, outDir = os.path.join(here, "docs")
, tempdir = os.path.join(here, "tmp")
, assetDirs = None
, showPrivate = False
, templateDirs = None
):
self.outDir = os.path.abspath(outDir)
self.tempDir = os.path.abspath(tempdir)
self.assetDirs = []
self.showPrivate = showPriva | te
self.templateDirs = templateDirs
if not self.templateDirs:
self.templateDirs = [os.path.join(here, "templates"), ""]
for new, onKls in [(templateDirs, self.templateDirs), (assetDirs, self.assetDirs)]:
if new:
if type(new) in (str, unicode):
new = (new, )
for directory in new:
directory = os.path.abspath(directory)
if os.path.exists(directory) and directory not in onKls:
onKls.append(directory)
########################
### UTILITY
########################
def createPage(self, information, filename, templates, **context):
context['information'] = information
filename = os.path.join(self.outDir, filename)
if type(templates) in (str, unicode):
templates = (templates, )
createPage(filename, templates, context)
########################
### PROCESS
########################
def process(self, information):
# Setup django for templates
from django.conf import settings
settings.configure(
TEMPLATE_DIRS=self.templateDirs,
INSTALLED_APPS = ('APIgen.tags', )
)
# Reset temp dir
if os.path.exists(self.tempDir):
shutil.rmtree(self.tempDir)
# Make sure we have out and temp directories
mkdir(self.outDir)
mkdir(self.tempDir)
# Copy assets to output
for directory in self.assetDirs:
shutil.copytree(directory, self.tempDir, ignore=shutil.ignore_patterns(IGNORE_PATTERNS))
log.info("\n---------------------GENERATING------------------------\n")
for module in information[MODULES].values():
self.gen_module(information, module)
log.info("\n---------------------DONE------------------------\n")
def gen_module(self, information, module):
moduleName = module[NAME]
self.createPage(
information
, "%s.txt" % moduleName
, [ os.sep.join(['modules', moduleName, 'module.rst'])
, 'module.rst'
]
, module = module
, current = module
, fullname = moduleName
)
moduleDir = os.path.join(self.outDir, moduleName)
mkdir(moduleDir)
for kls in module[CLASS_LIST]:
klsName = kls[NAME]
fullName = "%s.%s" % (moduleName, klsName)
if moduleName == klsName:
fullName = klsName
self.createPage(
information
, os.sep.join([moduleName, "%s.txt" % klsName])
, [ os.sep.join(["classes", "%s.rst" % klsName])
, os.sep.join(["classes", moduleName, "%s.rst" % klsName])
, os.sep.join(["modules", moduleName, "class.rst"])
, os.sep.join(["modules", moduleName, "classes", "%s.rst" % klsName])
, "class.rst"
]
, module = module
, current = kls
, fullname = fullName
)
|
matslindh/codingchallenges | knowit2017/23.py | Python | mit | 2,089 | 0.004787 | import copy
def execute(moves):
players = [
{'name': 'Xena', 'score': 0},
{'name': 'Ophelia', 'score': 0},
]
idx = 0
first_player = 0
draw_count = 0
move_count = 0
init_map = [[False]*3, [False]*3, [False]*3]
map = copy.deepcopy(init_map)
for move | in moves:
| move = int(move)
player_idx = (idx + first_player) % 2
player = players[player_idx]
idx += 1
row = (move - 1) // 3
column = (move - 1) % 3
move_count += 1
map[row][column] = 'X' if player_idx == first_player else 'O'
done = False
if (check_winning(map)):
done = True
draw_count = 0
players[player_idx]['score'] += 1
first_player = 0 if player_idx else 1
print("win " + str(player_idx))
elif move_count == 9:
done = True
draw_count += 1
print("draw")
if draw_count == 3:
print("three draws, resetting")
draw_count = 0
first_player = 0 if first_player else 1
if done:
idx = 0
print_map(map)
move_count = 0
map = copy.deepcopy(init_map)
print(players)
def print_map(map):
for row in map:
for column in row:
print(column if column else '.', end='')
print('')
print('')
def check_winning(map):
if map[1][1] and map[0][0] == map[1][1] == map[2][2]:
print("win diag 1")
return map[0][0]
if map[1][1] and map[0][2] == map[1][1] == map[2][0]:
print("win diag 2")
return map[0][2]
for i in range(0, 3):
if map[i][0] and map[i][0] == map[i][1] == map[i][2]:
print("win vertical " + str(i))
return map[i][0]
if map[0][i] and map[0][i] == map[1][i] == map[2][i]:
print("win horizontal " + str(i))
return map[0][i]
return None
execute(open("input/dec23").read())
|
mozilla/normandy | normandy/studies/urls.py | Python | mpl-2.0 | 242 | 0 | from django.conf.urls import u | rl, include
app_name = "studies"
urlpatterns = [
url(r"^api/v1/", include("normandy.studies.api.v1.urls", namespace="v1")),
url( | r"^api/v3/", include("normandy.studies.api.v3.urls", namespace="v3")),
]
|
sametmax/Django--an-app-at-a-time | ignore_this_directory/django/utils/formats.py | Python | mit | 8,973 | 0.000892 | import datetime
import decimal
import unicodedata
from importlib import import_module
from django.conf import settings
from django.utils import dateformat, datetime_safe, numberformat
from django.utils.functional import lazy
from django.utils.translation import (
check_for_language, get_language, to_locale,
)
# format_cache is a mapping from (format_type, lang) to the format string.
# By using the cache, it is possible to avoid running get_format_modules
# repeatedly.
_format_cache = {}
_format_modules_cache = {}
ISO_INPUT_FORMATS = {
'DATE_INPUT_FORMATS': ['%Y-%m-%d'],
'TIME_INPUT_FORMATS': ['%H:%M:%S', '%H:%M:%S.%f', '%H:%M'],
'DATETIME_INPUT_FORMATS': [
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M',
'%Y-%m-%d'
],
}
FORMAT_SETTINGS = frozenset([
'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR',
'NUMBER_GROUPING',
'FIRST_DAY_OF_WEEK',
'MONTH_DAY_FORMAT',
'TIME_FORMAT',
'DATE_FORMAT',
'DATETIME_FORMAT',
'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT',
'YEAR_MONTH_FORMAT',
'DATE_INPUT_FORMATS',
'TIME_INPUT_FORMATS',
'DATETIME_INPUT_FORMATS',
])
def reset_format_cache():
"""Clear any cached formats.
This method is provided primarily for testing purposes,
so that the effects of cached formats can be removed.
"""
global _format_cache, _format_modules_cache
_format_cache = {}
_format_modules_cache = {}
def iter_format_modules(lang, format_module_path=None):
"""Find format modules."""
if not check_for_language(lang):
return
if format_module_path is None:
format_module_path = settings.FORMAT_MODULE_PATH
format_locations = []
if format_module_path:
if isinstance(format_module_path, str):
format_module_path = [format_module_path]
for path in format_module_path:
format_locations.append(path + '.%s')
format_locations.append('django.conf.locale.%s')
locale = to_locale(lang)
locales = [locale]
if '_' in locale:
locales.append(locale.split('_')[0])
for location in format_locations:
for loc in locales:
try:
yield import_module('%s.formats' % (location % loc))
except ImportError:
pass
def get_format_modules(lang=None, reverse=False):
"""Return a list of the format modules found."""
if lang is None:
lang = get_language()
if lang not in _format_modules_cache:
_format_modules_cache[lang] = list(iter_format_modules(lang, settings.FORMAT_MODULE_PATH))
modules = _format_modules_cache[lang]
if reverse:
return list(reversed(modules))
return modules
def get_format(format_type, lang=None, use_l10n=None):
"""
For a specific format type, return the format for the current
language (locale). Default to the format in the settings.
format_type is the name of the format, e.g. 'DATE_FORMAT'.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
use_l10n = use_l10n or (use_l10n is None and settings.USE_L10N)
if use_l10n and lang is None:
lang = get_language()
cache_key = (format_type, lang)
try:
return _format_cache[cache_key]
except KeyError:
pass
# The requested format_type has not been cached yet. Try to find it in any
# of the format_modules for the given lang if l10n is enabled. If it's not
# there or if l10n is disabled, fall back to the project settings.
val = None
if use_l10n:
for module in get_format_modules(lang):
val = getattr(module, format_type, None)
if val is not None | :
break
if val is None:
if format_type not in FORMAT_SETTINGS:
return format_type
val = getattr(settings, format_type)
elif format_type in ISO_INPUT_FORMATS:
# If a list of input formats from on | e of the format_modules was
# retrieved, make sure the ISO_INPUT_FORMATS are in this list.
val = list(val)
for iso_input in ISO_INPUT_FORMATS.get(format_type, ()):
if iso_input not in val:
val.append(iso_input)
_format_cache[cache_key] = val
return val
get_format_lazy = lazy(get_format, str, list, tuple)
def date_format(value, format=None, use_l10n=None):
"""
Format a datetime.date or datetime.datetime object using a
localizable format.
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.format(value, get_format(format or 'DATE_FORMAT', use_l10n=use_l10n))
def time_format(value, format=None, use_l10n=None):
"""
Format a datetime.time object using a localizable format.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n))
def number_format(value, decimal_pos=None, use_l10n=None, force_grouping=False):
"""
Format a numeric value using localization settings.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if use_l10n or (use_l10n is None and settings.USE_L10N):
lang = get_language()
else:
lang = None
return numberformat.format(
value,
get_format('DECIMAL_SEPARATOR', lang, use_l10n=use_l10n),
decimal_pos,
get_format('NUMBER_GROUPING', lang, use_l10n=use_l10n),
get_format('THOUSAND_SEPARATOR', lang, use_l10n=use_l10n),
force_grouping=force_grouping,
use_l10n=use_l10n,
)
def localize(value, use_l10n=None):
"""
Check if value is a localizable type (date, number...) and return it
formatted as a string using current locale format.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if isinstance(value, str): # Handle strings first for performance reasons.
return value
elif isinstance(value, bool): # Make sure booleans don't get treated as numbers
return str(value)
elif isinstance(value, (decimal.Decimal, float, int)):
return number_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.datetime):
return date_format(value, 'DATETIME_FORMAT', use_l10n=use_l10n)
elif isinstance(value, datetime.date):
return date_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.time):
return time_format(value, 'TIME_FORMAT', use_l10n=use_l10n)
return value
def localize_input(value, default=None):
"""
Check if an input value is a localizable type and return it
formatted with the appropriate formatting string of the current locale.
"""
if isinstance(value, str): # Handle strings first for performance reasons.
return value
elif isinstance(value, bool): # Don't treat booleans as numbers.
return str(value)
elif isinstance(value, (decimal.Decimal, float, int)):
return number_format(value)
elif isinstance(value, datetime.datetime):
value = datetime_safe.new_datetime(value)
format = default or get_format('DATETIME_INPUT_FORMATS')[0]
return value.strftime(format)
elif isinstance(value, datetime.date):
value = datetime_safe.new_date(value)
format = default or get_format('DATE_INPUT_FORMATS')[0]
return value.strftime(format)
elif isinstance(value, datetime.time):
format = default or get_format('TIME_INPUT_FORMATS')[0]
return value.strftime(format)
return value
def sanitize_separators(value):
"""
Sanitize a value according to the current decimal and
thousand separator setting. Used with form field input.
"""
if isinstance(value, str):
parts = []
|
GNOME/orca | test/keystrokes/firefox/aria_slider_tpg.py | Python | lgpl-2.1 | 8,352 | 0.000239 | #!/usr/bin/python
"""Test of ARIA horizontal sliders using Firefox."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(10000))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"1. Tab to Volume Horizontal Slider",
["BRAILLE LINE: 'Volume 0 % horizontal slider'",
" VISIBLE: 'Volume 0 % horizontal slider', cursor=1",
"SPEECH OUTPUT: 'Volume horizontal slider 0 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"2. Volume Right Arrow",
["BRAILLE LINE: 'Volume 1 % horizontal slider'",
" VISIBLE: 'Volume 1 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '1 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"3. Volume Right Arrow",
["BRAILLE LINE: 'Volume 2 % horizontal slider'",
" VISIBLE: 'Volume 2 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '2 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Left"))
sequence.append(utils.AssertPresentationAction(
"4. Volume Left Arrow",
["BRAILLE LINE: 'Volume 1 % horizontal slider'",
" VISIBLE: 'Volume 1 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '1 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Left"))
sequence.append(utils.AssertPresentationAction(
"5. Volume Left Arrow",
["BRAILLE LINE: 'Volume 0 % horizontal slider'",
" VISIBLE: 'Volume 0 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '0 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"6. Volume Up Arrow",
["BRAILLE LINE: 'Volume 1 % horizontal slider'",
" VISIBLE: 'Volume 1 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '1 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"7. Volume Up Arrow",
["BRAILLE LINE: 'Volume 2 % horizontal slider'",
" VISIBLE: 'Volume 2 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '2 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"8. Volume Down Arrow",
["BRAILLE LINE: 'Volume 1 % horizontal slider'",
" VISIBLE: 'Volume 1 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '1 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"9. Volume Down Arrow",
["BRAILLE LINE: 'Volume 0 % horizontal slider'",
" VISIBLE: 'Volume 0 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '0 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Page_Up"))
sequence.append(utils.AssertPresentationAction(
"10. Volume Page Up",
["BRAILLE LIN | E: 'Volume 25 % horizontal slider'",
" VISIBLE: 'Volume 25 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '25 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Page_Down"))
sequence.append(utils.AssertPresentationAction(
"11. Volume Page | Down",
["BRAILLE LINE: 'Volume 0 % horizontal slider'",
" VISIBLE: 'Volume 0 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '0 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("End"))
sequence.append(utils.AssertPresentationAction(
"12. Volume End",
["BRAILLE LINE: 'Volume 100 % horizontal slider'",
" VISIBLE: 'Volume 100 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '100 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Home"))
sequence.append(utils.AssertPresentationAction(
"13. Volume Home",
["BRAILLE LINE: 'Volume 0 % horizontal slider'",
" VISIBLE: 'Volume 0 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '0 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"14. Tab to Food Quality Horizontal Slider",
["KNOWN ISSUE: The double-presentation is because of the authoring, putting the name and value into the description",
"BRAILLE LINE: 'Food Quality terrible horizontal slider'",
" VISIBLE: 'Food Quality terrible horizontal', cursor=1",
"SPEECH OUTPUT: 'Food Quality horizontal slider terrible.'",
"SPEECH OUTPUT: 'Food Quality: terrible (1 of 5)'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"15. Food Quality Right Arrow",
["BRAILLE LINE: 'Food Quality bad horizontal slider'",
" VISIBLE: 'Food Quality bad horizontal slid', cursor=1",
"SPEECH OUTPUT: 'bad'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"16. Food Quality Right Arrow",
["BRAILLE LINE: 'Food Quality decent horizontal slider'",
" VISIBLE: 'Food Quality decent horizontal s', cursor=1",
"SPEECH OUTPUT: 'decent'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Left"))
sequence.append(utils.AssertPresentationAction(
"17. Food Quality Left Arrow",
["BRAILLE LINE: 'Food Quality bad horizontal slider'",
" VISIBLE: 'Food Quality bad horizontal slid', cursor=1",
"SPEECH OUTPUT: 'bad'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"18. Food Quality Up Arrow",
["BRAILLE LINE: 'Food Quality decent horizontal slider'",
" VISIBLE: 'Food Quality decent horizontal s', cursor=1",
"SPEECH OUTPUT: 'decent'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"19. Food Quality Down Arrow",
["BRAILLE LINE: 'Food Quality bad horizontal slider'",
" VISIBLE: 'Food Quality bad horizontal slid', cursor=1",
"SPEECH OUTPUT: 'bad'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"20. Food Quality Down Arrow",
["BRAILLE LINE: 'Food Quality terrible horizontal slider'",
" VISIBLE: 'Food Quality terrible horizontal', cursor=1",
"SPEECH OUTPUT: 'terrible'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Page_Up"))
sequence.append(utils.AssertPresentationAction(
"21. Food Quality Page Up",
["BRAILLE LINE: 'Food Quality bad horizontal slider'",
" VISIBLE: 'Food Quality bad horizontal slid', cursor=1",
"SPEECH OUTPUT: 'bad'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Page_Down"))
sequence.append(utils.AssertPresentationAction(
"22. Food Quality Page Down",
["BRAILLE LINE: 'Food Quality terrible horizontal slider'",
" VISIBLE: 'Food Quality terrible horizontal', cursor=1",
"SPEECH OUTPUT: 'terrible'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("End"))
sequence.append(utils.AssertPresentationAction(
"23. Food Quality End",
["BRAILLE LINE: 'Food Quality excellent horizontal slider'",
" VISIBLE: 'Food Quality excellent horizonta', cursor=1",
"SPEECH OUTPUT: 'excellent'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Home"))
sequence.append(utils.AssertPresentationAction(
"24. Food Quality Home",
["BRAILLE LINE: 'Food Quality terrible horizontal slider'" |
lynxis/libavg | src/samples/rotcustompivot.py | Python | lgpl-2.1 | 291 | 0.024055 | #!/usr/bin/env python
# -*- coding: ut | f-8 -*-
from libavg import avg, player
canvas = player.createMainCanvas(size=(160,120))
rootNode = canvas.getRootNode()
avg.ImageNode(pos=(80,30), size=(40,30), href="rgb24-64x64.png", angle=1.570, pivot=(0,0),
par | ent=rootNode)
player.play()
|
asweigart/tortuga | docs/conf.py | Python | bsd-3-clause | 8,154 | 0.006255 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Tortuga documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 9 16:36:12 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Tortuga'
copyright = '2015, Al Sweigart'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Tortugadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Tortuga.tex', 'Tortuga Documentation',
'Al Sweigart', 'manual'),
]
# The name of an image file (relative to th | is directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show pa | ge references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tortuga', 'Tortuga Documentation',
['Al Sweigart'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Tortuga', 'Tortuga Documentation',
'Al Sweigart', 'Tortuga', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
thijsmie/madmin | gui_lib/container.py | Python | mit | 2,228 | 0.002244 | from widget import Widget
from gui_util import intersect
# Simple container, list of children is not meant to be mutable
class Container(Widget):
def __init__(self, width, height):
super(Container, self).__init__(width, height)
self.children = []
self.curFocus = 0
def draw(self, canvas, offsetx, offsety, minx, miny, maxx, maxy):
for (child_x, child_y, child_widget) in self.children:
(child_width, child_height) = child_widget.size()
regions = intersect((minx, miny, maxx, maxy),
(child_x, child_y, child_x+child_width, child_y+child_height))
for (cminx, cminy, cmaxx, cmaxy) in regions:
child_widget.draw(canvas, offsetx+child_x, offsety+child_y,
cminx-child_x, cminy-child_y, cmaxx-child_x, cmaxy-child_y)
def addChild(self, child_x, child_y, child_widget):
self.children.append((child_x, child_y, child_widget))
return len(self.children)-1
def getChild(self, index):
return self.children[index]
def remChild(self, index):
self.children.pop(index)
def setChildPos(self, index, chi | ld_x, child_y): |
self.children[index] = (child_x, child_y, self.children[index][2])
def numChildren(self):
return len(self.children)
def onFocus(self):
while self.curFocus < len(self.children):
if self.children[self.curFocus][2].onFocus():
return True
self.curFocus += 1
self.curFocus = 0
return False
def changeFocus(self):
# child that has focus gets chance to handle it self first
if self.children[self.curFocus][2].changeFocus():
return True
# Then we handle it
self.curFocus += 1
while self.curFocus < len(self.children):
if self.children[self.curFocus][2].onFocus():
return True
self.curFocus += 1
self.curFocus = 0
return False
def offFocus(self):
self.children[self.curFocus][2].offFocus()
def keyEvent(self, key):
if self.curFocus < len(self.children):
return self.children[self.curFocus][2].keyEvent(key)
|
sjsucohort6/openstack | python/venv/lib/python2.7/site-packages/novaclient/tests/unit/fixture_data/availability_zones.py | Python | mit | 3,264 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
fr | om novaclient.tests.unit.fixture_ | data import base
class V1(base.Fixture):
base_url = 'os-availability-zone'
zone_info_key = 'availabilityZoneInfo'
zone_name_key = 'zoneName'
zone_state_key = 'zoneState'
def setUp(self):
super(V1, self).setUp()
get_os_availability_zone = {
self.zone_info_key: [
{
self.zone_name_key: "zone-1",
self.zone_state_key: {"available": True},
"hosts": None
},
{
self.zone_name_key: "zone-2",
self.zone_state_key: {"available": False},
"hosts": None
}
]
}
self.requests.register_uri('GET', self.url(),
json=get_os_availability_zone,
headers=self.json_headers)
get_os_zone_detail = {
self.zone_info_key: [
{
self.zone_name_key: "zone-1",
self.zone_state_key: {"available": True},
"hosts": {
"fake_host-1": {
"nova-compute": {
"active": True,
"available": True,
"updated_at": '2012-12-26 14:45:25'
}
}
}
},
{
self.zone_name_key: "internal",
self.zone_state_key: {"available": True},
"hosts": {
"fake_host-1": {
"nova-sched": {
"active": True,
"available": True,
"updated_at": '2012-12-26 14:45:25'
}
},
"fake_host-2": {
"nova-network": {
"active": True,
"available": False,
"updated_at": '2012-12-26 14:45:24'
}
}
}
},
{
self.zone_name_key: "zone-2",
self.zone_state_key: {"available": False},
"hosts": None
}
]
}
self.requests.register_uri('GET', self.url('detail'),
json=get_os_zone_detail,
headers=self.json_headers)
|
alex-mitrevski/delta-execution-models | rule_learner/utils/conversion.py | Python | gpl-3.0 | 1,716 | 0.002914 | '''
Copyright 2017 by Alex Mitrevski <aleksandar.mitrevski@h-brs.de>
This file is part of delta-execution-models.
delta-execution-models is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
delta-execution-models is distributed in the hope tha | t it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Gen | eral Public License for more details.
You should have received a copy of the GNU General Public License
along with delta-execution-models. If not, see <http://www.gnu.org/licenses/>.
'''
class Types(object):
BOOL = 0
INT = 1
class TypeConverter(object):
'''An interface to various static type converions methods.
Author -- Alex Mitrevski
'''
@staticmethod
def bool_to_int(x):
'''Converts a boolean to an integer value.
Keyword arguments:
x -- A boolean variable.
'''
if x:
return 1
else:
return 0
@staticmethod
def return_correct_type(x, return_type):
'''Returns the value of 'x' in the type 'return_type'.
Doesn't return anything if 'return_type' is not 'Types.BOOL' or 'Types.INT'.
Keyword arguments:
x -- A boolean or integer variable.
return_type -- A value of type 'Types'.
'''
if return_type == Types.BOOL:
return x
elif return_type == Types.INT:
return TypeConverter.bool_to_int(x)
|
barisariburnu/viaspider | viaspider/items.py | Python | mit | 2,467 | 0.008107 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from viaspider.settings import SUMMARY_LIMIT
class ViaspiderItem(scrapy.Item):
# define the fields for your item here like:
url = scrapy.Field()
title = scrapy.Field()
summary = scrapy.Field()
categories = scrapy.Field()
tags = scrapy.Field()
image = scrapy.Field()
source = scrapy.Field()
created = scrapy.Field()
class ItemParser(object):
def __init__(self, source, response, seperator, bloginfo):
self.source = source
self.response = response
self.seperator = seperator
self.bloginfo = " " + self.seperator + " " + bloginfo
@property
def url(self):
return self.response.url
@property
def title(self):
result = self.response.xpath('//head/title/text()').extract()[0].encode('utf-8')
if result.endswith(self.bloginfo):
re | turn (self.seperator.join(result.split(self. | seperator)[:-1])).strip()
else:
return result
@property
def summary(self):
result = self.response.xpath('//head/meta[@property="og:description"]/@content').extract()[0]
return result[:-(SUMMARY_LIMIT + 3)] + '...' if len(result) > SUMMARY_LIMIT else result
@property
def categories(self):
results = self.response.xpath('//head/meta[@property="article:section"]/@content').extract()
return results
@property
def tags(self):
results = self.response.xpath('//head/meta[@property="article:tag"]/@content').extract()
return results if len(results) > 0 else self.categories
@property
def image(self):
result = self.response.xpath('//head/meta[@property="og:image"]/@content').extract()[0]
return result
@property
def created(self):
result = self.response.xpath('//head/meta[@property="article:published_time"]/@content').extract()[0]
return result
def parse(self):
item = ViaspiderItem()
item['url'] = self.url
item['source'] = self.source
item['title'] = self.title
item['summary'] = self.summary
item['categories'] = self.categories
item['tags'] = self.tags
item['image'] = self.image
item['created'] = self.created
return item |
abulte/docker-dbaas | manage.py | Python | mpl-2.0 | 757 | 0.003968 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013 Alexandre Bulté <alexandre[at]bulte[dot]net>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0 | /.
from flask.ext.script import | Manager
from contextlib import closing
import docker_lib as docker
from webapp import app, connect_db
manager = Manager(app)
@manager.command
def init_db():
print 'Starting...'
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
print 'Done.'
if __name__ == "__main__":
manager.run()
|
ActiveState/code | recipes/Python/435694_Evaluate_upperletters_variable_name_constants/recipe-435694.py | Python | mit | 7,266 | 0.002477 | ####
# compile.py
#!/usr/bin/env python
import re
import os
import sys
import new
import imp
import time
import struct
import marshal
import compiler
from compiler.ast import Const, AssName, AssTuple
__author__ = 'Shimomura Ikkei'
__date__ = '2005-06-23'
__all__ = ['ConstantCompiler']
# Check the string is valid constant name,
isConstName = re.compile('^[A-Z][A-Z_]+$').match
def ispyfile(filename):
"ispyfile(filename) ... The file is python source file."
assert isinstance(filename, str) and filename
return filename.endswith('.py') and os.path.isfile(filename)
def change_extension(name, ext='.pyc'):
"change_extension(name, ext) ... Rename exstension."
assert isinstance(name, str) and name
assert isinstance(ext, str) and ext
assert ext.startswith('.'), 'File extension must starts with dot.'
return os.path.splitext(name)[0] + ext
class ConstantVisitor:
def __init__(self, constants):
self.constants = constants
def __registerConstant(self, node, assign, const):
assert isinstance(assign, AssName)
if isConstName(assign.name):
if self.constants.has_key(assign.name):
print "Warning: %s at line %d: '%s' is already defined." % \
(node.filename, node.lineno, assign.name)
else:
if isinstance(const, Const):
self.constants[assign.name] = const.value
else:
self.constants[assign.name] = None # dummy data
def visitAssign(self, node):
nodes = node.getChildren()
if isinstance(nodes[0], AssName):
name, const = nodes
self.__registerConstant(node, name, const)
elif isinstance(nodes[0], AssTuple):
names, consts = nodes
names = names.getChildren()
consts = consts.getChildren()
assert len(names) == len(consts)
for name, const in zip(names, consts):
self.__registerConstant(node, name, const)
def visitName(self, node):
assert isinstance(node, compiler.ast.Name)
if isConstName(node.name) and self.constants.has_key(node.name):
value = self.constants.get(node.name)
# If the value can be constant(int, long, float, str, ...)
if [True for type in (int, long, float, str) if isinstance(value, type)]:
node.__class__ = Const
node.value = value
del node.name
class ConstantCompiler:
def __init__(self, filename=None):
self.constants = {}
if os.path.isfile(filename) and filename.endswith('.py'):
self.__load_constants(filename)
def __load_constants(self, filename):
assert isinstance(filename, str) and filename.endswith('.py')
assert os.path.isfile(filename) and os.access(filename, os.R_OK)
try:
fh, filename, opts = imp.find_module(os.path.splitext(filename)[0])
mod = imp.load_module("", fh, filename, opts)
for k,v in ((x,getattr(mod,x)) for x in dir(mod) if isConstName(x)):
self.constants[k] = v
except ImportError:
print "Failed to import module '%s'" % filename
def __walk_ast(self, ast):
compiler.walk(ast, ConstantVisitor(self.constants))
def compile(self, filename):
assert isinstance(filename, str) and filename
assert os.path.isfile(filename) and filename.endswith('.py')
# Parse python source -> AST(Abstract Syntax Tree)
src = open(filename, 'r')
ast = compiler.parse(src.read())
src.close()
# Syntax Macro (Expand constant values before compile)
compiler.misc.set_filename(filename, ast)
compiler.syntax.check(ast)
self.__walk_ast(ast)
# Compile AST -> code object.
code = compiler.pycodegen.ModuleCodeGenerator(ast).getCode()
|
return CodeWrapper(filename, code)
class CodeWrapper:
"""An utility class to save code object as .pyc file."""
def __init__(self, src_filename, code):
"CodeWrapper(code) This class only wrap an object for method chain."
assert isinstance(src_filename, str) and src_filename
assert os.path.isfile(src_filename) and src_filename.endswith('.py')
assert isinstance(code, new.code)
self.src_filename = src_ | filename
self.__code = code
def getCode(self):
"getCode() ... Returns code object."
assert isinstance(self.__code, new.code)
return self.__code
def __timestamp(self, pyc_filename):
"__get_timestamp(pyc_filename) Gets timestamp stored in .pyc file."
assert isinstance(pyc_filename, str) and pyc_filename
assert pyc_filename.endswith('.pyc')
assert os.path.isfile(pyc_filename)
assert os.access(pyc_filename, os.R_OK)
try:
pyc = open(pyc_filename, 'rb')
# The first 4 bytes is a magic number.for pyc file.
# this checks the python's version.
if pyc.read(4) == imp.get_magic():
# The next 4 bytes is the timestamp stored as long,
# we need this value.
return struct.unpack("<l", pyc.read(4))[0]
else:
# Not .pyc file or wrong version of python.
# It should be always updated.
return -1
finally:
pyc.close()
def __modified(self, src, pyc):
"__modified(src_filename, pyc_filename) Returns True if src updated."
assert isinstance(src, str) and src and src.endswith('.py')
assert isinstance(pyc, str) and pyc and pyc.endswith('.pyc')
assert os.path.isfile(src)
# If not exists .pyc file then always True.
if not os.path.isfile(pyc):
return True
# Is source's modified time newer than .pyc's timestamp ?
return os.stat(src)[9] > self.__timestamp(pyc)
def save_as(self, pyc_filename):
"save_as(pyc_filename) ... Save current code object to .pyc file."
assert isinstance(self.__code, new.code)
assert isinstance(pyc_filename, str) and pyc_filename
assert pyc_filename.endswith('.pyc')
# Skip if the file was already updated.
if self.__modified(self.src_filename, pyc_filename):
# Output dump the code object to .pyc file.
pyc = open(pyc_filename, 'wb')
pyc.write(imp.get_magic())
pyc.write(struct.pack('<l', time.time()))
marshal.dump(self.__code, pyc)
pyc.close()
assert os.path.isfile(pyc_filename)
assert os.path.getsize(pyc_filename) > 0
def main(const_file, *argv):
pyc = ConstantCompiler(const_file)
for filename in filter(os.path.exists, argv):
pyc.compile(filename).save_as(change_extension(filename, ext='.pyc'))
if __name__ == '__main__':
main(*sys.argv[1:])
####
# define_constants.py
import math
PI = math.atan(1) * 4.0
DEBUG = 1
####
# test_constants.py
print PI
def foo(num):
if DEBUG:
print "debug foo(%d)" num
print num
for i in range(20): foo(i)
####
# how to run
# python compile.py define_constants.py test_constants.py
|
matiasherranz/keyczar | python/tests/keyczar_tests/util_test.py | Python | apache-2.0 | 7,078 | 0.008336 | #!/usr/bin/python
#
# Copyright 2011 LightKeeper LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testcases to test behavior of Keyczar utils.
@author: rleftwich@lightkeeper.com (Robert Leftwich)
"""
import unittest
import base64
import StringIO
import random
import os
from keyczar import util
class Base64WSStreamingReadTest(unittest.TestCase):
def __readStream(self, stream, size=-1):
result = ''
read_data = True
while read_data != '':
if size >= 0:
read_data = stream.read(size)
else:
read_data = stream.read()
if read_data:
result += read_data
return result
def __testRead(self, input_data, expected_result):
for size in [1, 5, 4096, 99999, -1]:
stream = util.IncrementalBase64WSStreamReader(StringIO.StringIO(input_data))
self.assertEquals(self.__readStream(stream, size), expected_result)
def testNoPadRead(self):
no_pad_data = 'Some inspired test datum'
b64_data = base64.urlsafe_b64encode(no_pad_data)
self.assertFalse(b64_data.endswith('='))
self.__testRead(b64_data, no_pad_data)
def testSinglePadRead(self):
single_pad_data = 'Some inspired test data'
b64_data = base64.urlsafe_b64encode(single_pad_data)
self.assertFalse(b64_data.endswith('=='))
self.assertTrue(b64_data.endswith('='))
self.__testRead(b64_data, single_pad_data)
self.__testRead(b64_data[:-1], single_pad_data)
def testDoublePadRead(self):
double_pad_data = 'All inspired test data'
b64_data = base64.urlsafe_b64encode(double_pad_data)
self.as | sertTrue(b64_data.endswith('=='))
self.__testRead(b64_data, double_pad_data)
self.__testRead(b64_data[:-1], double_pad_data)
self.__testRead(b64_data[:-2], double_pad_data)
def testSimulateDecrypter(self):
enc_data = \
'AJehaFGwoOrkzpDCnF1zqIi721eCOMYWRmLyRyn3hxyhh_mYwpnDN6jKN057gr5lz' \
'APFYhq9zoDwFMaGMEipEl__ECOZGeaxWw'
expected_result = util.Base64WS | Decode(enc_data)
stream = util.IncrementalBase64WSStreamReader(StringIO.StringIO(enc_data))
result = stream.read(5)
result += stream.read(15)
read_data = True
while read_data:
read_data = stream.read(4096)
result += read_data
self.assertEquals(result, expected_result)
class ParseX509Test(unittest.TestCase):
def testParseX509(self):
publickey = 'MIIBtjCCASsGByqGSM44BAEwggEeAoGBAMtPbcgvf2CAHN4djUb+gCPw/e8Xpeyc9GknS9zsJjSC' +\
'g9vgiKBVlQBceiKAkK8SVVEaA671SS0XO575OK/sAc4j0n2t9QJP1wyGCOhV79WbwhPPEVhscpAH' +\
'akr9IAW6WdSnwhL/seZLYRKiVGpxXJffwN+sYjH00PulKNxmz2+DAhUAxh9yFSC1uuGk6IR0tnVA' +\
'fsPUt7cCgYBGfHU40n0HgKIkVe3XtX0G3CbiGbR++qaEjNqnfWynggqeeVkYliLaDlVrR4B0nLrH' +\
'ZLEcUMO38YKmrwug02acp9P65IcjZ2yaioPBSmV7R6pMGOdJFR3V7Pd5R2+NcUdJd2xSffLfrChM' +\
'82SKqa7b3DOPHkSoIdp/vJiRgikZrwOBhAACgYAVb/mCnKb7Zl12kPXYTTkCvN4JSvxxhAmb7Nea' +\
'Xno2JVd5X/4ubp3M5QGQWvf72FXwUnSILRz6T8gRaEYtuSO3/lY4q5vOAOnVQU6KjH97SKMutwHT' +\
'l9d+zbuoBc4YMASUZa+vKqRZ3a+d15WdlBjtEzB2NbBbnbCJKjfGSmOCbg=='
params = util.ParseX509(publickey)
expected = {
'q': 1131081433714279493125447137485919672696369887159L,
'p': long(
'142769326027561200015702846633037171844738546159067892543586' +
'089819169405771071960306351240015809035099105692642283483274' +
'608612927770127886695041551320596560058685767748528711711436' +
'409986081908184259687167758644036767240729572639963928679656' +
'775238247767249458127174935432419747620377588855197434035039' +
'449870211L'
),
'y': long(
'150538549060345519581302552574691577464375345311526809670286' +
'632129938599078813029403767119263752107859825857360955403210' +
'483822181224937742908787267712756285866859569379427477824560' +
'428538873166504142351085994699088158365390257837477138679868' +
'684991017869376251632455719760930339224531116502262910706269' +
'32425326L'
),
'g': long(
'494970673920668377956046733315341969794517742954883725248168' +
'301122249691271552495885761534156140297218760038024832456713' +
'075235324022590936023023330095636324540517029462960508640485' +
'442663739929843398631060563747514973648590099126190306662078' +
'078963670549708652204163320908486137580862047218028976205111' +
'50905775L'
)
}
self.assertEquals(len(expected),len(params))
for key in expected:
self.assertEquals(expected[key],params[key])
class Base64WSStreamingWriteTest(unittest.TestCase):
def __testWrite(self, input_data):
expected_result = base64.urlsafe_b64encode(input_data)
while expected_result[-1] == '=':
expected_result = expected_result[:-1]
for size in [1, 5, 4096, random.randrange(1, 9999), -1]:
output_stream = StringIO.StringIO()
stream = util.IncrementalBase64WSStreamWriter(output_stream)
i = 0
if size >= 0:
while (i * size) <= len(input_data):
stream.write(input_data[i * size:(i + 1) * size])
i += 1
else:
stream.write(input_data)
stream.flush()
self.assertEquals(output_stream.getvalue(), expected_result)
def testNoPadWrite(self):
no_pad_data = 'Some inspired test datum'
b64_data = base64.urlsafe_b64encode(no_pad_data)
self.assertFalse(b64_data.endswith('='))
self.__testWrite(no_pad_data)
def testSinglePadWrite(self):
single_pad_data = 'Some inspired test data'
b64_data = base64.urlsafe_b64encode(single_pad_data)
self.assertFalse(b64_data.endswith('=='))
self.assertTrue(b64_data.endswith('='))
self.__testWrite(single_pad_data)
def testDoublePadWrite(self):
double_pad_data = 'All inspired test data'
b64_data = base64.urlsafe_b64encode(double_pad_data)
self.assertTrue(b64_data.endswith('=='))
self.__testWrite(double_pad_data)
def testRandomLongerWrite(self):
random_input_data = os.urandom(random.randrange(
util.DEFAULT_STREAM_BUFF_SIZE * 2 + 1,
50000))
self.__testWrite(random_input_data)
def suite():
alltests = unittest.TestSuite(
[unittest.TestLoader().loadTestsFromTestCase(Base64WSStreamingReadTest),
unittest.TestLoader().loadTestsFromTestCase(Base64WSStreamingWriteTest),
unittest.TestLoader().loadTestsFromTestCase(ParseX509Test),
])
return alltests
if __name__ == "__main__":
unittest.main(defaultTest='suite')
|
krukru/shell-me-up-scotty | scripts/generators.py | Python | apache-2.0 | 1,272 | 0.000786 | import os
from .helpers import get_user
from shutil import copyfile
def enable_vhosts():
tmp = []
conf_path = "/usr/local/etc/httpd/httpd.conf"
with open(conf_path, 'r') as input_file:
for line in input_file.readlines():
if line == "#Include /usr/local/etc/httpd/extra/httpd-vhosts.conf\n":
line = line.replace("#", "")
tmp.append(line)
input_file.close()
with open(conf_path, 'w') as outp | ut_file:
for line in tmp:
output_file.write(line)
output_file.close()
def generate_hosts():
data_path = os.path.dirname(__file__) + "/../files/hosts"
hosts_path = "/private/etc/hosts"
copyfile(data_path, hosts_path)
def generate_vhosts():
tmp = []
data_path = os.path.dirname(__file__) + "/../files/vhosts"
vhosts_path = "/usr/local/etc/httpd/e | xtra/httpd-vhosts.conf"
user = get_user()
with open(data_path, 'r') as vhost_template:
for line in vhost_template.readlines():
tmp.append(line.replace("{USERNAME}", user))
vhost_template.close()
print(tmp)
# with open(vhosts_path, 'w') as output_file:
# for line in tmp:
# output_file.write(line)
#
# output_file.close()
|
quantmind/pulsar-agile | tests/test_githubapi.py | Python | bsd-3-clause | 2,024 | 0 | import os
import asyncio
import tests
class TestGithubApi(tests.GithubMix, tests.AgileTest):
def test_url(self):
repo = self.repo
self.assertTrue(repo.api_url)
self.assertEqual(repo.api_url,
'%s/repos/%s' % (self.github.api_url, tests.REPO))
self.assertEqual(repo.client, self.github)
self.assertEqual(repo.repo_path, tests.REPO)
# COMMITS
async def test_commits(self):
repo = self.repo
commits = await repo.commits.get_list()
self.assertTrue(commits)
self.assertTrue(len(commits) <= 100)
commit = commits[0]
self.assertTrue(commit['sha'])
# RELEASES
async def test_releases(self):
repo = self.repo
releases = await repo.releases.get_list()
| self.assertTrue(releases)
self.assertTrue(len(releases) <= 100)
async def test_latest_release( | self):
repo = self.repo
release = await repo.releases.latest()
self.assertTrue(release)
async def test_release_by_tag(self):
repo = self.repo
release = await repo.releases.latest()
self.assertTrue(release)
bytag = await repo.releases.tag(release['tag_name'])
self.assertEqual(bytag['id'], release['id'])
async def __test_upload_file(self):
repo = self.repo
release = await repo.releases.latest()
assets = await repo.releases.release_assets(release)
filename = os.path.basename(__file__)
# Check if the filename is available
for asset in assets:
if asset['name'] == filename:
await repo.releases.assets.delete(asset)
await asyncio.sleep(1)
break
asset = await repo.releases.upload(release, __file__, 'text/plain')
self.assertTrue(asset)
self.assertTrue(asset['id'])
self.assertEqual(asset['content_type'], 'text/plain')
# Now delete the asset
await repo.releases.assets.delete(asset)
|
Backflipz/plugin.video.excubed | resources/lib/cache/tests.py | Python | gpl-2.0 | 4,445 | 0 | # coding=utf-8
import random
import time
import threading
import unittest
from lru_cache import LruCache
class TesLruCache(unittest.TestCase):
def test_cache_normal(self):
a = []
@LruCache(maxsize=2, timeout=1)
def foo(num):
a.append(num)
return num
foo(1)
foo(1)
self.assertEqual(a, [1])
def test_cache_none(self):
a = []
@LruCache(maxsize=2, timeout=1)
def foo(num):
a.append(num)
return None
foo(1)
foo(1)
self.assertEqual(a, [1])
def test_cache_when_timeout(self):
a = []
@LruCache(maxsize=2, timeout=1)
def foo(num):
a.append(num)
return num
foo(2)
time.sleep(2)
foo(2)
self.assertEqual(a, [2, 2])
def test_cache_when_cache_is_full(self):
a = []
@LruCache(maxsize=2, timeout=1)
def foo(num):
a.append(num)
return num
foo(1)
foo(2)
foo(3)
foo(1)
self.assertEqual(a, [1, 2, 3, 1])
def test_cache_with_multi_thread(self):
a = []
@LruCache(maxsize=10, timeout=1)
def foo(num):
a.append(num)
return num
for i in xrange(10):
threading.Thread(target=foo, args=(i, )).start()
main_thread = threading.currentThread()
for t in threading.enumerate():
if t is not main_thread:
t.join()
foo(random.randint(0, 9))
self.assertEqual(set(a), set(range(10)))
def test_cache_with_multi_thread_two_func(self):
a = []
@LruCache(maxsize=10, timeout=1)
def foo(num):
a.append(num)
return num
b = []
@LruCache(maxsize=10, timeout=1)
def bar(num):
b.append(num)
return num + 1
for i in xrange(10):
threading.Thread(target=foo, args=(i, )).start()
threading.Thread(target=bar, args=(i, )).start()
main_thread = threading.currentThread()
for t in threading.enumerate():
if t is not main_thread:
t.join()
feed = random.randint(0, 9)
self.assertEqual(foo(feed), feed)
self.assertEqual(bar(feed), feed + 1)
self.assertEqual(set(a), set(range(10)))
self.assertEqual(set(b), set(range(10)))
def te | st_cache_when_timeout_and_maxsize_is_none(self):
a = []
@LruCache()
| def foo(num):
a.append(num)
return num
foo(1)
foo(1)
self.assertEqual(a, [1])
def test_cache_when_timeout_is_none(self):
a = []
@LruCache(maxsize=10)
def foo(num):
a.append(num)
return num
foo(1)
foo(1)
self.assertEqual(a, [1])
def test_cache_when_only_maxsize_is_none_normal(self):
a = []
@LruCache(timeout=2)
def foo(num):
a.append(num)
return num
foo(1)
foo(1)
self.assertEqual(a, [1])
def test_cache_when_only_maxsize_is_none_timeout(self):
a = []
@LruCache(timeout=1)
def foo(num):
a.append(num)
return num
foo(1)
time.sleep(2)
foo(1)
self.assertEqual(a, [1, 1])
def test_cache_when_only_maxsize_is_none_normal_method(self):
a = []
class Func(object):
@LruCache(timeout=2)
def foo(self, num):
a.append(num)
return num
fun = Func()
fun.foo(1)
fun.foo(1)
self.assertEqual(a, [1])
def test_cache_when_only_maxsize_is_none_normal_method_timeout(self):
a = []
class Func(object):
@LruCache(timeout=1)
def foo(self, num):
a.append(num)
return num
fun = Func()
fun.foo(1)
time.sleep(2)
fun.foo(1)
self.assertEqual(a, [1, 1])
def test_invalidate(self):
a = []
@LruCache()
def foo(num):
a.append(num)
return num
foo(1)
foo(1)
self.assertEqual(a, [1])
foo.invalidate(1)
foo(1)
self.assertEqual(a, [1, 1])
if __name__ == "__main__":
unittest.main()
|
BMeu/Orchard | orchard/system_status/system/memory.py | Python | mit | 1,413 | 0 | # -*- coding: utf-8 -*-
"""
Retrieve information on system memory (RAM).
"""
import psutil
import orchard.extensions
def free() -> int:
"""
Get the amount of memory available for usage.
:return: The memory in Bytes that can be used.
"""
memory = psutil.virtual_memory()
return memory.free
@orchard.extensions.cache.memoize()
def total() -> int:
"""
Get the total amount of memory.
:return: The total amount of memory in Bytes.
"""
memory = psutil.virtual_memory()
return memory.total
def used() -> int:
"""
Get the amount of memory used.
:return: The amount of memory in Bytes that is being used.
"""
return total() | - free()
def swap_available() -> int:
"""
Get the amount of swap memory available for usage.
:return: The amount of swap memory in Bytes that can be used.
"""
swap = psutil.swap_memory()
return swap.free
@orchard.extensions.cache.memoize()
def swap_total() -> int:
"""
Get the total amount of swap memory.
:return: The total amount of swap memory in Bytes.
"""
| swap = psutil.swap_memory()
return swap.total
def swap_used() -> int:
"""
Get the amount of swap memory used.
:return: The amount of swap memory in Bytes that is being used.
"""
swap = psutil.swap_memory()
return swap.used
|
undoware/neutron-drive | google_appengine/google/appengine/ext/mapreduce/output_writers.py | Python | bsd-3-clause | 23,260 | 0.007352 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Output writers for MapReduce."""
from __future__ import with_statement
__ | all__ = [
"BlobstoreOutputWriter",
"BlobstoreOutputWriterBase",
"BlobstoreRecordsOutputWriter",
"FileOutputWriter",
"FileOutputWriterBase",
"FileRecordsOutputWriter",
"KeyValueBlobstoreOutputWriter",
"KeyValueFileOutputWriter",
"COUNTER_IO_ | WRITE_BYTES",
"COUNTER_IO_WRITE_MSEC",
"OutputWriter",
"RecordsPool",
]
import gc
import itertools
import logging
import time
from google.appengine.api import files
from google.appengine.api.files import file_service_pb
from google.appengine.api.files import records
from google.appengine.ext.mapreduce import errors
from google.appengine.ext.mapreduce import model
from google.appengine.ext.mapreduce import operation
COUNTER_IO_WRITE_BYTES = "io-write-bytes"
COUNTER_IO_WRITE_MSEC = "io-write-msec"
class OutputWriter(model.JsonMixin):
"""Abstract base class for output writers.
Output writers process all mapper handler output, which is not
the operation.
OutputWriter's lifecycle is the following:
0) validate called to validate mapper specification.
1) init_job is called to initialize any job-level state.
2) create() is called, which should create a new instance of output
writer for a given shard
3) from_json()/to_json() are used to persist writer's state across
multiple slices.
4) write() method is called to write data.
5) finalize() is called when shard processing is done.
5) finalize_job() is called when job is completed.
"""
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper specification.
Output writer parameters are expected to be passed as "output_writer"
subdictionary of mapper_spec.params. To be compatible with previous
API output writer is advised to check mapper_spec.params and issue
a warning if "output_writer" subdicationary is not present.
_get_params helper method can be used to simplify implementation.
Args:
mapper_spec: an instance of model.MapperSpec to validate.
"""
raise NotImplementedError("validate() not implemented in %s" % cls)
@classmethod
def init_job(cls, mapreduce_state):
"""Initialize job-level writer state.
Args:
mapreduce_state: an instance of model.MapreduceState describing current
job. State can be modified during initialization.
"""
raise NotImplementedError("init_job() not implemented in %s" % cls)
@classmethod
def finalize_job(cls, mapreduce_state):
"""Finalize job-level writer state.
Args:
mapreduce_state: an instance of model.MapreduceState describing current
job. State can be modified during finalization.
"""
raise NotImplementedError("finalize_job() not implemented in %s" % cls)
@classmethod
def from_json(cls, state):
"""Creates an instance of the OutputWriter for the given json state.
Args:
state: The OutputWriter state as a dict-like object.
Returns:
An instance of the OutputWriter configured using the values of json.
"""
raise NotImplementedError("from_json() not implemented in %s" % cls)
def to_json(self):
"""Returns writer state to serialize in json.
Returns:
A json-izable version of the OutputWriter state.
"""
raise NotImplementedError("to_json() not implemented in %s" %
self.__class__)
@classmethod
def create(cls, mapreduce_state, shard_number):
"""Create new writer for a shard.
Args:
mapreduce_state: an instance of model.MapreduceState describing current
job. State can be modified.
shard_number: shard number as integer.
"""
raise NotImplementedError("create() not implemented in %s" % cls)
def write(self, data, ctx):
"""Write data.
Args:
data: actual data yielded from handler. Type is writer-specific.
ctx: an instance of context.Context.
"""
raise NotImplementedError("write() not implemented in %s" %
self.__class__)
def finalize(self, ctx, shard_number):
"""Finalize writer shard-level state.
Args:
ctx: an instance of context.Context.
shard_number: shard number as integer.
"""
raise NotImplementedError("finalize() not implemented in %s" %
self.__class__)
@classmethod
def get_filenames(cls, mapreduce_state):
"""Obtain output filenames from mapreduce state.
Args:
mapreduce_state: an instance of model.MapreduceState
Returns:
list of filenames this writer writes to or None if writer
doesn't write to a file.
"""
raise NotImplementedError("get_filenames() not implemented in %s" % cls)
_FILES_API_FLUSH_SIZE = 128*1024
_FILES_API_MAX_SIZE = 1000*1024
def _get_params(mapper_spec, allowed_keys=None):
"""Obtain output writer parameters.
Utility function for output writer implementation. Fetches parameters
from mapreduce specification giving appropriate usage warnings.
Args:
mapper_spec: The MapperSpec for the job
allowed_keys: set of all allowed keys in parameters as strings. If it is not
None, then parameters are expected to be in a separate "output_writer"
subdictionary of mapper_spec parameters.
Returns:
mapper parameters as dict
Raises:
BadWriterParamsError: if parameters are invalid/missing or not allowed.
"""
if "output_writer" not in mapper_spec.params:
message = (
"Output writer's parameters should be specified in "
"output_writer subdictionary.")
if allowed_keys:
raise errors.BadWriterParamsError(message)
else:
logging.warning(message)
params = mapper_spec.params
params = dict((str(n), v) for n, v in params.iteritems())
else:
if not isinstance(mapper_spec.params.get("output_writer"), dict):
raise BadWriterParamsError(
"Output writer parameters should be a dictionary")
params = mapper_spec.params.get("output_writer")
params = dict((str(n), v) for n, v in params.iteritems())
if allowed_keys:
params_diff = set(params.keys()) - allowed_keys
if params_diff:
raise errors.BadWriterParamsError(
"Invalid output_writer parameters: %s" % ",".join(params_diff))
return params
class _FilePool(object):
"""Pool of file append operations."""
def __init__(self, flush_size_chars=_FILES_API_FLUSH_SIZE, ctx=None):
"""Constructor.
Args:
flush_size_chars: buffer flush size in bytes as int. Internal buffer
will be flushed once this size is reached.
ctx: mapreduce context as context.Context. Can be null.
"""
self._flush_size = flush_size_chars
self._append_buffer = {}
self._size = 0
self._ctx = ctx
def __append(self, filename, data):
"""Append data to the filename's buffer without checks and flushes."""
self._append_buffer[filename] = (
self._append_buffer.get(filename, "") + data)
self._size += len(data)
def append(self, filename, data):
"""Append data to a file.
Args:
filename: the name of the file as string.
data: data as string.
"""
if self._size + len(data) > self._flush_size:
self.flush()
if len(data) > _FILES_API_MAX_SIZE:
raise errors.Error(
"Can't write more than %s bytes in one request: "
"risk of writes interleaving." % _FILES_API_MAX_SIZE)
else:
self.__append(filename, data)
if self._size > self._flush_size:
s |
civisanalytics/civis-python | civis/tests/test_io.py | Python | bsd-3-clause | 61,250 | 0 | import tempfile
import csv
import gzip
import io
import json
import os
from io import StringIO, BytesIO
from unittest import mock
from tempfile import TemporaryDirectory
import zipfile
import pytest
import requests
import vcr
try:
import pandas as pd
has_pandas = True
except ImportError:
has_pandas = False
import civis
from civis.io import _files
from civis.io._tables import _File
from civis.response import Response
from civis.base import CivisAPIError, CivisImportError, EmptyResultError
from civis.resources import API_SPEC
from civis.resources._resources import get_api_spec, generate_classes
from civis.tests.testcase import (CivisVCRTestCase,
cassette_dir,
POLL_INTERVAL)
from civis.tests.mocks import create_client_mock
api_import_str = 'civis.resources._resources.get_api_spec'
class MockAPIError(CivisAPIError):
"""A fake API error with only a status code"""
def __init__(self, sc):
self.status_code = sc
@mock.patch(api_import_str, return_value=API_SPEC)
class ImportTests(CivisVCRTestCase):
# Note that all functions tested here should use a
# `polling_interval=POLL_INTERVAL` input. This lets us use
# sensible polling intervals when recording, but speed through
# the calls in the VCR cassette when testing later.
@pytest.fixture(autouse=True)
def client_mock(self):
self.mock_client = create_client_mock()
@classmethod
def tearDownClass(cls):
get_api_spec.cache_clear()
generate_classes.cache_clear()
@classmethod
@mock.patch(api_import_str, return_value=API_SPEC)
def setUpClass(cls, *mocks):
get_api_spec.cache_clear()
generate_classes.cache_clear()
setup_vcr = vcr.VCR(filter_headers=['Authorization'])
setup_cassette = os.path.join(cassette_dir(), 'io_setup.yml')
with setup_vcr.use_cassette(setup_cassette):
# create a file
buf = StringIO()
buf.write('a,b,c\n1,2,3')
buf.seek(0)
file_id = civis.io.file_to_civis(buf, 'somename')
cls.file_id = file_id
# create the table. assumes this function works.
sql = """
DROP TABLE IF EXISTS scratch.api_client_test_fixture;
CREATE TABLE scratch.api_client_test_fixture (
a int,
b int,
c int
);
INSERT INTO scratch.api_client_test_fixture
VALUES (1,2,3);
"""
res = civis.io.query_civis(sql, 'redshift-general',
polling_interval=POLL_INTERVAL)
res.result() # block
# create an export to check get_url. also tests export_csv
with TemporaryDirectory() as temp_dir:
fname = os.path.join(temp_dir, 'tempfile')
sql = "SELECT * FROM scratch.api_client_test_fixture"
database = 'redshift-general'
result = civis.io.civis_to_csv(fname, sql, database,
polling_interval=POLL_INTERVAL)
result = result.result()
cls.export_url = result['output'][0]['path']
assert result.state == 'succeeded'
cls.export_job_id = result.sql_id
@pytest.mark.file_to_civis
@mock.patch(api_import_str, return_value=API_SPEC)
def test_zip_member_to_civis(self, *mocks):
with TemporaryDirectory() as temp_dir:
fname = os.path.join(temp_dir, 'tempfile')
with zipfile.ZipFile(fname, 'w', zipfile.ZIP_DEFLATED) as zip_file:
archive_name = 'archive_name'
zip_file.writestr(archive_name, 'a,b,c\n1,2,3')
zip_member = zip_file.namelist()[0]
with zip_file.open(zip_member) as zip_member_buf:
result = civis.io.file_to_civis(zip_member_buf, zip_member)
assert isinstance(result, int)
@mock.patch(api_import_str, return_value=API_SPEC)
def test_text_file_to_civis(self, *mocks):
buf = StringIO()
buf.write('a,b,c\n1,2,3')
buf.seek(0)
result = civis.io.file_to_civis(buf, 'somename')
assert isinstance(result, int)
@pytest.mark.file_to_civis
@mock.patch(api_import_str, return_value=API_SPEC)
def test_bytes_file_to_civis(self, *mocks):
buf = BytesIO()
buf.write(b'a,b,c\n1,2,3')
buf.seek(0)
result = civis.io.file_to_civis(buf, 'somename')
assert isinstance(result, int)
@pytest.mark.file_to_civis
@mock.patch(api_import_str, return_value=API_SPEC)
def test_large_file_to_civis(self, *mocks):
curr_size = civis.io._files.MIN_MULTIPART_SIZE
civis.io._files.MIN_MULTIPART_SIZE = 1
with TemporaryDirectory() as temp_dir:
fname = os.path.join(temp_dir, 'tempfile')
with open(fname, 'w+b') as tmp:
tmp.write(b'a,b,c\n1,2,3')
with open(fname, 'r+b') as tmp:
result = civis.io.file_to_civis(tmp, fname)
civis.io._files.MIN_MULTIPART_SIZE = curr_size
assert isinstance(result, int)
@pytest.mark.civis_to_file
@mock.patch(api_import_str, return_value=API_SPEC)
def test_civis_to_file(self, *mocks):
buf = BytesIO()
civis.io.civis_to_file(self.file_id, buf)
buf.seek(0)
assert buf.read() == b'a,b,c\n1,2,3'
@pytest.mark.csv_to_civis
@mock.patch('civis.io._tables.file_to_civis')
@mock.patch('civis.io._tables.civis_file_to_table')
def test_csv_to_civis(self, m_civis_file_to_table, m_file_to_civis,
_m_get_api_spec):
mock_file_id = 42
m_file_to_civis.return_value = mock_file_id
mock_future = mock.create_autospec(civis.futures.CivisFuture,
spec_set=True)
m_civis_file_to_table.return_value = mock_future
table = "scratch.api_client_test_fixture"
database = 'redshift-general'
fname = 'a/tempfile'
with mock.patch.object(civis.io._tables, 'open',
mock.mock_open(read_data='some,test,data'),
create=True) as m_open:
result = civis.io.csv_to_civis(fname, database, table,
client=self.mock_client,
existing_table_rows='truncate')
m_file_to_civis.assert_called_once_with(m_open.return_value,
'tempfile',
client=self.mock_client)
assert result == mock_future
m_civis_file_to_table.assert_called_once_with(
mock_file_id, database, table,
client=self.mock_client,
max_errors=None,
existing_table_rows='truncate',
diststyle=None, distkey=None,
sortkey1=None, sortkey2=None,
table_columns=None,
delimiter=",", headers=None,
primary_keys=None,
last_modified_keys=None,
| escaped=False, execution='imm | ediate',
credential_id=None, polling_interval=None,
hidden=True
)
@pytest.mark.civis_file_to_table
@mock.patch('civis.io._tables._process_cleaning_results')
@mock.patch('civis.io._tables._run_cleaning')
def test_civis_file_to_table_table_exists(self,
m_run_cleaning,
m_process_cleaning_results,
_m_get_api_spec):
table = "scratch.api_client_test_fixture"
database = 'redshift-general'
mock_file_id = 1234
mock_cleaned_file_id = 1235
mock_import_id = 8675309
self.mock_client.imports.post_files_csv.return_value\
.id = mock_import_id
self.mock_client.get_database_id.return_value = 42
self.mock_client |
shaunduncan/helga-wiki-whois | helga_wiki_whois.py | Python | mit | 487 | 0.004107 | from helga import settings
from helga.plugins import com | mand
@command('showme', aliases=['whois', 'whothehellis'],
help="Show a URL for the user's intranet page. Usage: helga (showme|whois|whothehellis) <nick>")
def wiki_whois(client, channel, nick, message, cmd, args): # pragma: no cover
"""
Show the intranet page for a user. Settings must have a WIKI_URL value with formatt | able
substring named {user}
"""
return settings.WIKI_URL.format(user=args[0])
|
Asparagirl/ArchiveBot | uploader/uploader.py | Python | mit | 9,855 | 0.003551 | #!/usr/bin/python3
"""uploader.py: upload WARC files toward the Internet Archive
rsync mode (RSYNC_URL set): upload everything to an rsync endpoint
such as fos.
s3 mode (S3_URL set): upload everything directly to the Internet Archive
"""
from __future__ import print_function
import os
import time
import subprocess
import sys
import re
import datetime
import json
import hashlib
import requests
class Params:
"""Encapsulation of global parameters from environment and derivation
"""
def __init__(self):
if len(sys.argv) > 1:
self.directory = sys.argv[1]
elif os.environ.get('FINISHED_WARCS_DIR') != None:
self.directory = os.environ['FINISHED_WARCS_DIR']
else:
raise RuntimeError('No directory specified (set FINISHED_WARCS_DIR '
'or specify directory on command line)')
self.url = os.environ.get('RSYNC_URL')
if self.url != None:
if '/localhost' in self.url or '/127.' in self.url:
raise RuntimeError('Won\'t let you upload to localhost because I '
'remove files after uploading them, and you '
'might be uploading to the same directory')
self.mode = 'rsync'
if self.url is None:
self.url = os.environ.get('S3_URL')
if self.url is not None:
self.mode = 's3'
if self.url is None:
raise RuntimeError('Neither RSYNC_URL nor S3_URL are set - nowhere to '
'upload to. Hint: use'
'S3_URL=https://s3.us.archive.org')
if self.mode == 's3': #parse IA-S3-specific options
self.ia_collection = os.environ.get('IA_COLLECTION')
if self.ia_collection is None:
raise RuntimeError('Must specify IA_COLLECTION if using IA S3 '
'(hint: ArchiveBot)')
self.ia_item_title = os.environ.get('IA_ITEM_TITLE')
if self.ia_item_title is None:
raise RuntimeError('Must specify IA_ITEM_TITLE if using IA S3 '
'(hint: "Archiveteam: Archivebot $pipeline_name '
'GO Pack")')
self.ia_auth = os.environ.get('IA_AUTH')
if self.ia_auth is None:
raise RuntimeError('Must specify IA_AUTH if using IA S3 '
'(hint: access_key:secret_key)')
self.ia_item_prefix = os.environ.get('IA_ITEM_PREFIX')
if self.ia_auth is | None:
raise RuntimeError('Must specify IA_ITEM_PREFIX if using IA S3 '
'(hint: archiveteam_archivebot_go_$pipeline_name'
'_}')
self.ia_access = os.environ.get('IA_ACCESS')
if self.ia_access is None:
raise RuntimeError('Must specify IA_ACCESS if using IA S3 '
| '(hint: your access key)')
self.wait = os.environ.get('WAIT', 5)
def try_mkdir(path):
try:
os.mkdir(path)
except OSError:
pass
def should_upload(basename):
assert not '/' in basename, basename
return not basename.startswith('.') and \
(basename.endswith('.warc.gz') or basename.endswith('.json') or basename.endswith('.txt'))
def parse_name(basename):
k = re.split(r'(.*)-\w+-(\d{8})-\d{6}-[^.]*\.warc.gz', basename) # extract domain name and date
if len(k) != 4:
return {'dns': 'UNKNOWN', 'date': datetime.datetime.now().strftime("%Y%m%d")}
return {'dns': k[1], 'date': k[2]}
def ia_upload_allowed(s3_url, accesskey, bucket=''):
try:
quota_url = '{}/?check_limit=1&accesskey={}&bucket={}'.format(s3_url, accesskey, bucket)
resp = requests.get(url=quota_url)
data = json.loads(resp.text)
except (requests.RequestException, json.JSONDecodeError) as err:
print('Could not get throttling status - assuming IA is down')
print('Exception: {}'.format(err))
return False
if 'over_limit' in data and data['over_limit'] is not 0:
print('IA S3 API notifies us we are being throttled (over_limit)')
return False
if 'detail' in data and 'rationing_engaged' in data['detail'] \
and data['detail']['rationing_engaged'] is not 0:
quota_our_remaining = data['detail']['accesskey_ration'] - \
data['detail']['accesskey_tasks_queued']
quota_global_remaining = data['detail']['total_global_limit'] - \
data['detail']['total_tasks_queued']
quota_bucket_remaining = data['detail']['bucket_ration'] - \
data['detail']['bucket_tasks_queued']
if quota_our_remaining < 10 or quota_global_remaining < 10 or quota_bucket_remaining < 5:
print('IA S3 API notifies us rationing is engaged with little room for new work!')
print('Our outstanding jobs: {}'.format(data['detail']['accesskey_tasks_queued']))
print('Our remaining quota: {}'.format(quota_our_remaining))
print('Global remaining quota: {}'.format(quota_global_remaining))
print('Limit reason given: {}'.format(data['detail']['limit_reason']))
return False
else:
print('IA S3 API notifies us rationing is engaged but we have '
'room for another job.')
return True
def file_md5(fname):
md5 = hashlib.md5()
with open(fname, "rb") as inputfile:
for block in iter(lambda: inputfile.read(16384), b""):
md5.update(block)
return md5.hexdigest()
def ia_s3_ship(fname, basename, item, params: Params):
bucket_unescaped_name = params.ia_item_prefix + '_' + \
item['dns'][-64:] + '_' + item['date']
ia_upload_bucket = re.sub(r'[^0-9a-zA-Z-]+', '_', bucket_unescaped_name)
if not ia_upload_allowed(params.url, params.ia_access, ia_upload_bucket):
# IA is throttling
# At some point, an ambitious person could try a file belonging
# in a different bucket if ia_upload_allowed denied this one
return 1
size_hint = str(os.stat(fname).st_size)
compat_filename = re.sub(r'[^0-9a-zA-Z-.]+', '_', basename)[-64:]
if compat_filename is '' or compat_filename[0] is '_':
# IA filenames cannot be empty or start with underscore
compat_filename = 'z' + compat_filename[1:]
target = params.url + '/' + ia_upload_bucket + '/' + \
compat_filename
md5sum = file_md5(fname)
return subprocess.call([
"curl", "-v", "--location", "--fail",
"--speed-limit", "1", "--speed-time", "900",
"--header", "Content-MD5: " + md5sum,
"--header", "x-archive-queue-derive:1",
"--header", "x-amz-auto-make-bucket:1",
"--header", "x-archive-meta-sponsor:Internet Archive",
"--header", "x-archive-meta-collection:" + params.ia_collection,
"--header", "x-archive-meta-mediatype:web",
"--header", "x-archive-meta-subject:archivebot",
"--header", "x-archive-meta-title:" + params.ia_item_title +
' ' + item['dns'] + ' ' + item['date'],
"--header", "x-archive-meta-date:" +
item['date'][0:4] + '-' +
item['date'][4:6] + '-' +
item['date'][6:8],
"--header", "x-archive-size-hint:" + size_hint,
"--header", "authorization: LOW " + params.ia_auth,
"-o", "/dev/stdout",
"--upload-file", fname,
target])
def main():
params = Params()
print("CHECK THE UPLOAD TARGET: %s as %s endpoint" % (params.url, params.mode))
print()
print("Upload target must reliably store data")
print("Each local file will removed after upload")
print("Hit CTRL-C immediately if upload target is incorrect")
print()
uploading_dir = os.path.join(params.directory, "_uploading")
try_mkdir(uploading_dir)
need_wait = True
while True:
if need_wait:
print("Waiting {} seconds".format(params.wait))
time.sleep(params.wait)
need |
rajashreer7/autotest-client-tests | linux-tools/nss_softokn/nss_softokn.py | Python | gpl-2.0 | 1,474 | 0.007463 | #!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class nss_softokn(test.test):
"""
Autotest module for testing basic functionality
of nss_softokn
@author Anitha MallojiRao amalloji@in.ibm.com ##
"""
version = 1
nfail = 0
path = ''
def initialize(self, test_path=''):
"""
Sets the overall failure counter for | the test.
"""
self.nfail = 0
ret_val = subprocess.Popen(['make', 'PERL=/usr/bin/perl', 'tests'], cwd="%s/nss_softokn/test" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
| """
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./nss-softokn.sh'], cwd="%s/nss_softokn" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
|
Ziqi-Li/bknqgis | pandas/pandas/io/parsers.py | Python | gpl-2.0 | 119,743 | 0.0001 | """
Module contains tools for processing files into DataFrames or other objects
"""
from __future__ import print_function
from collections import defaultdict
import re
import csv
import sys
import warnings
import datetime
from textwrap import fill
import numpy as np
from pandas import compat
from pandas.compat import (range, lrange, PY3, StringIO, lzip,
zip, string_types, map, u)
from pandas.core.dtypes.common import (
is_integer, _ensure_object,
is_list_like, is_integer_dtype,
is_float, is_dtype_equal,
is_object_dtype, is_string_dtype,
is_scalar, is_categorical_dtype)
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.index import Index, MultiIndex, RangeIndex
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.categorical import Categorical
from pandas.core import algorithms
from pandas.core.common import AbstractMethodError
from pandas.io.date_converters import generic_parser
from pandas.errors import ParserWarning, ParserError, EmptyDataError
from pandas.io.common import (get_filepath_or_buffer, is_file_like,
_validate_header_arg, _get_handle,
UnicodeReader, UTF8Recoder, _NA_VALUES,
BaseIterator, _infer_compression)
from pandas.core.tools import datetimes as tools
from pandas.util._decorators import Appender
import pandas._libs.lib as lib
import pandas._libs.parsers as parsers
# BOM character (byte order mark)
# This exists at the beginning of a file to indicate endianness
# of a file (stream). Unfortunately, this marker screws up parsing,
# so we need to remove it if we see it.
_BOM = u('\ufeff')
_parser_params = """Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<http://pandas.pydata.org/pandas-docs/stable/io.html>`_.
Parameters
----------
filepath_or_buffer : str, pathlib.Path, py._path.local.LocalPath or any \
object with a read() method (such as a file handle or StringIO)
The string could be a URL. Valid URL schemes include http, ftp, s3, and
file. For file URLs, a host is expected. For instance, a local file could
be file ://localhost/path/to/table.csv
%s
delim_whitespace : boolean, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the sep. Equivalent to setting ``sep='\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
.. versionadded:: 0.18.1 support for the Python parser.
header : int or list of ints, default 'infer'
Row number(s) to use as the column names, and the start of the data.
Default behavior is as if set to 0 if no ``names`` passed, otherwise
``None``. Explicitly pass ``header=0`` to be able to replace existing
names. The header can be a list of integers that specify row locations for
a multi-index on the columns e.g. [0,1,3]. Intervening rows that are not
specified will be skipped (e.g. 2 in this example is skipped). Note that
this parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so header=0 denotes the first line of data
rather than the first line of the file.
names : array-like, default None
List of column names to use. If file contains no header row, then you
should explicitly pass header=None. Duplicates in this list are not
allowed unless mangle_dupe_cols=True, which is the default.
index_col : int or sequence or False, default None
Column to use as the row labels of the DataFrame. If a sequence is given, a
MultiIndex is used. If you have a malformed file with delimiters at the end
of each line, you might consider index_col=False to force pandas to _not_
use the first column as the index (row names)
usecols : array-like or callable, default None
Return a subset of the columns. If array-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header row(s). For example, a valid array-like
`usecols` parameter would be [0, 1, 2] or ['foo', 'bar', 'baz'].
If callable, the callable function will be evaluated against the column
names, returning names whe | re the callable function evaluates to True. An
example of a valid callable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
as_recarray : boolean, default False
.. deprecated:: 0.19.0
Please call `pd.read_csv(...).to_records()` instead.
R | eturn a NumPy recarray instead of a DataFrame after parsing the data.
If set to True, this option takes precedence over the `squeeze` parameter.
In addition, as row indices are not available in such a format, the
`index_col` parameter will be ignored.
squeeze : boolean, default False
If the parsed data only contains one column then return a Series
prefix : str, default None
Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : boolean, default True
Duplicate columns will be specified as 'X.0'...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `str` or `object` to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
%s
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels
true_values : list, default None
Values to consider as True
false_values : list, default None
Values to consider as False
skipinitialspace : boolean, default False
Skip spaces after delimiter.
skiprows : list-like or integer or callable, default None
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file.
If callable, the callable function will be evaluated against the row
indices, returning True if the row should be skipped and False otherwise.
An example of a valid callable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with engine='c')
skip_footer : int, default 0
.. deprecated:: 0.19.0
Use the `skipfooter` parameter instead, as they are identical
nrows : int, default None
Number of rows of file to read. Useful for reading pieces of large files
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '""" + fill("', '".join(sorted(_NA_VALUES)),
70, subsequent_indent=" ") + """'.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to.
na_filter : boolean, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file
verbose : boolean, default False
Indicate number of NA values placed in non-numeric columns
skip_blank_lines : boolean, default True
If True, skip over blank lines rather than interpreting as NaN values
parse_dates : boolean or list of ints or names or list of lists or dict, \
default False
* boolean. If True -> try parsing the index.
* list of ints or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column |
codekoala/django-pendulum | pendulum/tests/pause.py | Python | mit | 3,889 | 0.001543 | from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from pendulum.tests.utils import VALID_PASSWORD, VALID_USER, ffd
from pendulum.models import Entry
class PauseTestCase(TestCase):
"""
Check to make sure that entries can be paused and unpaused as expected.
Rules for pausing an entry:
- Must be owned by user
- If paused, unpause it
- Entry must be open
"""
fixtures = ['activities', 'projects', 'users', 'entries']
first_run = True
def setUp(self):
self.client = Client()
if self.first_run:
# try pausing an entry before being logged in
response = self.get_response(2)
self.assertEquals(response.status_code, 302)
# log in
response = self.client.login(username=VALID_USER, password=VALID_PASSWORD)
self.assertTrue(response)
if self.first_run:
# try pausing an entry that doesn't exist
response = self.get_response(1000)
self.assertEquals(response.status_code, 302)
self.first_run = False
def get_response(self, id):
"""
Retrieve the response of a GET request
"""
return self.client.get(reverse('pendulum-toggle-paused', args=[id]))
def testPauseOtherUsersEntry(self):
#--------------------------------------------------
# 1. ENTRY THAT BELONGS TO OTHER USER
id = 1
# check to make sure that log entry isn't paused
entry = Entry.objects.get(pk=id)
self.assertFalse(entry.is_paused)
# try pausing an entry that doesn't belong to the current user
response = self.get_response(id)
self.assertEquals(response.status_code, 302)
# check to make sure that log entry still isn't paused
entry = Entry.objects.get(pk=id)
self.assertFalse(entry.is_paused)
def testAlreadyPausedEntry(self):
#- | -------------------------------------------------
# 2. ENTRY THAT IS ALREADY PAUSED
id = 2
# check to make sure that | log entry is paused
entry = Entry.objects.get(pk=id)
self.assertTrue(entry.is_paused)
# try pausing an already paused entry
response = self.get_response(id)
self.assertEquals(response.status_code, 302)
# check to make sure that log entry is no longer paused
entry = Entry.objects.get(pk=id)
self.assertFalse(entry.is_paused)
def testAlreadyClosedEntry(self):
#--------------------------------------------------
# 3. ENTRY THAT IS ALREADY CLOSED
id = 3
# check to make sure that log entry is closed and not paused
entry = Entry.objects.get(pk=id)
self.assertTrue(entry.is_closed)
self.assertFalse(entry.is_paused)
# try pausing an already closed entry
response = self.get_response(id)
self.assertEquals(response.status_code, 302)
# check to make sure that log entry is still closed and not paused
entry = Entry.objects.get(pk=id)
self.assertTrue(entry.is_closed)
self.assertFalse(entry.is_paused)
def testOpenUnpausedEntry(self):
#--------------------------------------------------
# 4. ENTRY THAT IS OPEN AND NOT PAUSED
id = 4
# check to make sure that log entry is open and not paused
entry = Entry.objects.get(pk=id)
self.assertFalse(entry.is_closed)
self.assertFalse(entry.is_paused)
# try pausing an open entry owned by the user
response = self.get_response(id)
self.assertEquals(response.status_code, 302)
# make sure the entry is still open but now paused
entry = Entry.objects.get(pk=id)
self.assertFalse(entry.is_closed)
self.assertTrue(entry.is_paused)
|
oshepherd/eforge | eforge/views.py | Python | isc | 2,416 | 0.003313 | # -*- coding: utf-8 -*-
# EForge project management system, Copyright © 2010, Element43
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Create your views here.
from eforge import plugins
from eforge.models import Project
from eforge.decorators im | port project_page, has_project_perm, user_page, group_page
from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.http import HttpResponse
from django.template import RequestContext
@project_page
def summary(request, project):
return render_to_response('eforge/summary.html', {
'project': project
}, context_instance=RequestContext(request))
@project_page
@has_project_perm('eforge.manage')
def | manage(request, project):
tabs = plugins.provider['managepg']
print tabs
if not 'pg' in request.GET:
return render_to_response('eforge/manage.html', {
'project': project,
'tabs': tabs.items(),
}, context_instance=RequestContext(request))
else:
pg = request.GET['pg']
if not pg in tabs:
raise Http404()
return tabs[pg]['view'](request, project)
@user_page
def user(request, user):
return render_to_response('eforge/user.html', {
'pguser': user,
})
@group_page
def group(request, group):
return render_to_response('eforge/group.html', {
'group': group,
})
def about(request):
import platform
import django
import eforge
return render_to_response('eforge/about.html', {
'plugins': plugins.plugins,
'eforgever': eforge.get_version(),
'djangover': django.get_version(),
'pyver': platform.python_version(),
}, context_instance=RequestContext(request))
|
twatteyne/dustlink_academy | views/web/dustWeb/viz/VizTimeLine.py | Python | bsd-3-clause | 3,559 | 0.007305 | import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('VizTimeLine')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import VizMorris
class VizTimeLine(VizMorris.VizMorris):
#======================== header ==========================================
templateHeader = '''
<style type="text/css">
</style>
'''
#======================== body ============================================
templateBody = '''
<script type='text/javascript'>
// wait for the page to be loaded, then create the form (once)
$(document).ready(getData_{VIZID});
var graph_{VIZID},
autorefresh_{VIZID};
graph_{VIZID} = 0;
autorefresh_{VIZID} = true;
//======================= get form ========================================
function getData_{VIZID}() {{
var statusDivId;
try {{
if (autorefresh_{VIZID}) {{
// update the status message
statusDivId = 'status_div_{VIZID}';
updateStatus(statusDivId,'busy','');
// get updated data from the server and execute
jQuery.ajax({{
type: 'GET',
url: '/{RESOURCE}/',
timeout: 5*1000,
statusCode: {{
200: function(response) {{
try {{
drawData_{VIZID}(response);
}} catch(err) {{
throw err;
}}
updateStatus(statusDivId,'success','');
}},
400: function() {{
updateStatus(statusDivId,'failure','Malformed.');
}},
401: function() {{
updateStatus(statusDivId,'failure','Access denied.');
}},
404: function() {{
updateStatus(statusDivId,'failure','Resource not found.');
}},
500: function() {{
updateStatus(statusDivId,'failure','Internal server error.');
}}
}},
error: function(jqXHR, textStatus, errorThrown) {{
if (textStatus=='timeout') {{
updateStatus(statusDivId,'failure','Server unreachable.');
}}
}}
}});
}}
}} catch (err) {{
updateStatus(statusDivId,'failure',err.toString());
}}
}}
function drawData_{VIZ | ID}(data) {{
if (graph_{VIZID}== | 0) {{
graph_{VIZID} = Morris.Line({{
element: 'chart_div_{VIZID}',
data: data.datapoints,
xkey: 'timestamp',
ykeys: data.metadata.axis,
labels: data.metadata.axis,
xLabels: "auto",
hideHover: true,
}});
}} else {{
graph_{VIZID}.setData(data.datapoints);
}}
}}
setInterval(getData_{VIZID},{RELOAD_PERIOD});
</script>
'''
|
idlesign/django-admirarchy | admirarchy/tests/conftest.py | Python | bsd-3-clause | 174 | 0 | from pytest_djangoapp import configure_djangoapp_plugin
pytest | _plugins = configure_djangoapp_plugin(
{
'LANGUAGE_CODE': 'ru',
},
admi | n_contrib=True,
)
|
byxor/limp | tests/unit/syntax_tree_test.py | Python | gpl-3.0 | 14,784 | 0.000473 | import limp.syntax_tree as SyntaxTree
import limp.tokens as Tokens
from nose.tools import assert_equals
TT = SyntaxTree.Types
data = [
([], None),
# Booleans
('true', (TT.Boolean, 'true')),
('false', (TT.Boolean, 'false')),
# Integers
('100', (TT.Integer, '100')),
('500', (TT.Integer, '500')),
# Positive/Negative Integers
('+100', (TT.UnaryPositive, (TT.Integer, '100'))),
('-500', (TT.UnaryNegative, (TT.Integer, '500'))),
# Floats
('0.123', (TT.Float, '0.123')),
('.99', (TT.Float, '.99')),
# Positive/Negative Floats
('+99.8', (TT.UnaryPositive, (TT.Float, '99.8'))),
('-0.123', (TT.UnaryNegative, (TT.Float, '0.123'))),
# Hexadecimals
('0xDeaDa55', (TT.Hexadecimal, '0xDeaDa55')),
('0xBEEF123aaa', (TT.Hexadecimal, '0xBEEF123aaa')),
# Positive/Negative Hexadecimals
('+0xDeaDa55', (TT.UnaryPositive, (TT.Hexadecimal, '0xDeaDa55'))),
('-0xBEEF123aaa', (TT.UnaryNegative, (TT.Hexadecimal, '0xBEEF123aaa'))),
# Octals
('0o7654321', (TT.Octal, '0o7654321')),
('0o111', (TT.Octal, '0o111')),
# Positive/Negative Octals
('+0o7654321', (TT.UnaryPositive, (TT.Octal, '0o7654321'))),
('-0o111', (TT.UnaryNegative, (TT.Octal, '0o111'))),
# Binaries
('0b101010', (TT.Binary, '0b101010')),
('0b111110', (TT.Binary, '0b111110')),
# Positive/Negative Binaries
('+0b101010', (TT.UnaryPositive, (TT.Binary, '0b101010'))),
('-0b111110', (TT.UnaryNegative, (TT.Binary, '0b111110'))),
# Strings
('"hi()!"', (TT.String, '"hi()!"')),
('"super string ->"', (TT.String, '"super string ->"')),
# Symbols
('name', (TT.Symbol, 'name')),
('my-address', (TT.Symbol, 'my-address')),
# Function Calls
('(fibonacci)', (TT.FunctionCall, (TT.Symbol, 'fibonacci'), [])),
('(destroy-evidence)', (TT.FunctionCall, (TT.Symbol, 'destroy-evidence'), [])),
('(steal-cookies 99)', (TT.FunctionCall, (TT.Symbol, 'steal-cookies'),
[(TT.Integer, '99')])),
('(steal-biscuits 22)', (TT.FunctionCall, (TT.Symbol, 'steal-biscuits'),
[(TT.Integer, '22')])),
('(reverse "foo")', (TT.FunctionCall, (TT.Symbol, 'reverse'),
[(TT.String, '"foo"')])),
('(+ 1 2)', (TT.FunctionCall, (TT.Symbol, '+'),
[(TT.Integer, '1'),
(TT.Integer, '2')])),
('(concatenate "foo" "bar" "baz")', (TT.FunctionCall,
(TT.Symbol, 'concatenate'),
[(TT.String, '"foo"'),
(TT.String, '"bar"'),
(TT.String, '"baz"')])),
('(f (g))', (TT.FunctionCall,
(TT.Symbol, 'f'),
[(TT.FunctionCall,
(TT.Symbol, 'g'),
[])])),
('(+ 10 (- 100 50))', (TT.FunctionCall,
(TT.Symbol, '+'),
[(TT.Integer, '10'),
(TT.FunctionCall,
(TT.Symbol, '-'),
[(TT.Integer, '100'),
(TT.Integer, '50')])])),
('(* (- 10 5) 2)', (TT.FunctionCall, (TT.Symbol, '*'),
[(TT.FunctionCall, (TT.Symbol, '-'),
[(TT.Integer, '10'),
(TT.Integer, '5')]),
(TT.Integer, '2')])),
('(* (- 10 (+ 1 1)) 2)', (TT.FunctionCall, (TT.Symbol, '*'),
[(TT.FunctionCall, (TT.Symbol, '-'),
[(TT.Integer, '10'),
(TT.FunctionCall, (TT.Symbol, '+'),
[(TT.Integer, '1'),
(TT.Integer, '1')])]),
(TT.Integer, '2')])),
('(+ (- 1 2) (/ 3 4))',
(TT.FunctionCall,
(TT.Symbol, '+'),
[(TT.FunctionCall,
(TT.Symbol, '-'),
[(TT.Integer, '1'),
(TT.Integer, '2')]),
(TT.FunctionCall,
(TT.Symbol, '/'),
[(TT.Integer, '3'),
(TT.Integer, '4')])])),
# Lists
('[]', (TT.List, [])),
('[1]', (TT.List, [(TT.Integer, '1')])),
('[(a)]', (TT.List, [(TT.FunctionCall, (TT.Symbol, 'a'), [])])),
('[1 2]', (TT.List, [(TT.Integer, '1'), (TT.Integer, '2')])),
('["uncle" "bob" "rules"]', (TT.List,
[(TT.String, '"uncle"'),
(TT.String, '"bob"'),
(TT.String, '"rules"')])),
('[(+ 1 2)]', (TT.List,
[(TT.FunctionCall,
(TT.Symbol, '+'),
[(TT.Integer, '1'),
(TT.Integer, '2')])])),
('[(+ 1 2) (- 3 4)]', (TT.List,
[(TT.FunctionCall,
(TT.Symbol, '+'),
[(TT.Integer, '1'),
(TT.Integer, '2')]),
(TT.FunctionCall,
(TT.Symbol, '-'),
[(TT.Integer, '3'),
(TT.Integer, '4')])])),
('[[]]', (TT.List, [(TT.List, [])])),
('[[] []]', (TT.List,
[(TT.List, []),
(TT.List, [])])),
('[[1]]', (TT.List, [(TT.List, [(TT.Integer, '1')])])),
('[[1 2]]', (TT.List,
[(TT.List,
[(TT.Integer, '1'),
(TT.Integer, '2')])])),
('[[1] [2]]', (TT.List,
[(TT.List, [(TT.Integer, '1')]),
(TT.List, [(TT.Integer, '2')])])),
('[[1] [2 3]]', (TT.List,
[(TT.List,
[(TT.Integer, '1')]),
(TT.List,
[(TT.Integer, '2'),
(TT.Integer, '3')])])),
('[[(+ 0 1)] [2 3]]', (TT.List,
[(TT.List,
[(TT.FunctionCall,
(TT.Symbol, '+'),
[(TT.Integer, '0'),
(TT.Integer, '1')])]),
(TT.List,
[(TT.Integer, '2'),
(TT.Integer, '3')])])),
# Function Calls with Lists
('(reverse [1 2])', (TT.FunctionCall,
(TT.Symbol, 'reverse'),
[(TT.List,
[(TT.Integer, '1'),
(TT.Integer, '2')])])),
('(reverse [[a] [b]])', (TT.FunctionCall,
(TT.Symbol, | 'reverse'),
[(TT.List,
[(TT.List,
[(TT.Symbol, 'a')]),
(TT.List,
[(TT.Symbol, 'b')])])])),
# Anonymous Funct | ions
('(->10)', (TT.Function, [], (TT.Integer, '10'))),
('(->20)', (TT.Function, [], (TT.Integer, '20'))),
('(->(foo))', (TT.Function, [],
(TT.FunctionCall, (TT.Symbol, 'foo'), []))),
('(->(+ 0x9 0x10))', (TT.Function, [],
(TT.FunctionCall,
(TT.Symbol, '+'),
[(TT.Hexadecimal, '0x9'),
(TT.Hexadecimal, '0x10')]))),
('(n -> n)', (TT.Function, [(TT.Symbol, 'n')], (TT.Symbol, 'n'))),
('(a b -> (+ a b))', (TT.Function,
[(TT.Symbol, 'a'),
(TT.Symbol, 'b')],
(TT.FunctionCall,
(TT.Symbol, '+'),
[(TT.Symbol, 'a'),
(TT.Symbol, 'b')]))),
# Nested Anonymous Functions
('(->(->1))', (TT.Function,
[],
(TT.Function,
[],
(TT.Integer, '1')))),
('(-> (-> (x -> (+ x 2))))', (TT.Function,
|
sxslex/rows | to-do/ods_draft.py | Python | gpl-3.0 | 1,998 | 0.000501 | # coding: utf-8
import HTMLParser
import zipfile
from re import compile as regexp_compile, DOTALL
from unicodedata import normalize
html_parser = HTMLParser.HTMLParser()
regexp_tags = regexp_compile(r'<[ \t]*[a-zA-Z0-9!"./_-]*[^>]*>', flags=DOTALL)
regexp_comment = regexp_compile(r'<!--.*?-->', flags=DOTALL)
regexp_ods_table = regexp_compile(r'(<table:table [^>]*>)(.*?)'
| r'(</table:table>)',
flags=DOTALL)
regexp_ods_table_row = regexp_compile(r'(<table:table-row[^>]*>)(.*?)'
r'(</table:table-row>)', flags=DOTALL)
regexp | _ods_table_cell = regexp_compile(r'(<table:table-cell[^>]*>)(.*?)'
r'(</table:table-cell>)', flags=DOTALL)
# TODO: encoding?
# TODO: replace &...;
# TODO: name/id of tables
# TODO: re.MULTILINE
# TODO: identify types
# TODO: clear empty rows?
# TODO: clear non-table rows?
def tables_ods(filename, headers=False, strip_xml=True):
zip_fobj = zipfile.ZipFile(filename)
content = zip_fobj.open('content.xml').read()
zip_fobj.close()
return _tables_ods(content, headers, strip_xml)
def _tables_ods(xml, headers, strip_xml):
result = []
ods_tables = regexp_ods_table.split(xml)[2::4]
for table_ods in ods_tables:
table_data = []
rows = regexp_ods_table_row.split(table_ods)[2::4]
if strip_xml:
for row_data in rows:
cells = regexp_ods_table_cell.split(row_data)[2::4]
table_data.append([remove_html(field) for field in cells])
else:
for row_data in rows:
cells = regexp_ods_table_cell.split(row_data)[2::4]
table_data.append([field.strip() for field in cells])
if headers:
header, rows = table_data[0], table_data[1:]
result.append([dict(zip(header, row)) for row in rows])
else:
result.append(table_data)
return result
|
MrSurly/micropython-esp32 | tests/basics/attrtuple1.py | Python | mit | 364 | 0 | # test attrtuple
# we can't test this type directly so we use sys.implementation object
import sys
t = sys.implementation
# It can be just a normal tuple on small ports
try:
t.name
except AttributeError:
print("SKIP")
raise SystemExit
# test printing of attrtuple
pr | int(str(t).find("versi | on=") > 0)
# test read attr
print(isinstance(t.name, str))
|
NLeSC/MAGMa | web/magmaweb/script.py | Python | apache-2.0 | 5,092 | 0 | #!/usr/bin/env python
"""Manage MAGMa web user accounts
"""
import argparse
import os
import sys
from transaction import commit
from paste.deploy import appconfig
from sqlalchemy import engine_from_config
from magmaweb.user import init_user_db, User, JobMeta
from magmaweb.job import make_job_factory
class MagmaCommand(object):
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument(
'-c', '--config',
help="magma web config file (default: production.ini)",
default="production.ini", type=str)
sp = self.parser.add_subparsers()
sc_add = sp.add_parser(
"add", help=self.add.__doc__, description=self.add.__doc__)
sc_add.add_argument(
'user', help="user id (default: %(default)s)",
default=None, type=str)
sc_add.add_argument(
'name', help="name (default: %(default)s)",
default=None, type=str)
sc_add.add_argument(
'email', help="e-mail address (default: %(default)s)",
default=None, type=str)
sc_add.add_argument(
'password', help="password (default: %(default)s)",
default=None, type=str)
sc_add.set_defaults(f | unc=self.add)
sc_update = sp.add_parser(
"update", help=self.update.__doc__,
description=self.update.__doc__)
sc_update.add_argument(
'-u', '-- | user', help="change user id)",
default=argparse.SUPPRESS, type=str)
sc_update.add_argument(
'-n', '--name', help="change display name",
default=argparse.SUPPRESS, type=str)
sc_update.add_argument(
'-e', '--email', help="change email",
default=argparse.SUPPRESS, type=str)
sc_update.add_argument(
'-p', '--password', help="change password",
default=argparse.SUPPRESS, type=str)
sc_update.add_argument(
'userid', help="user id (default: %(default)s)",
default=None, type=str)
sc_update.set_defaults(func=self.update)
sc_remove = sp.add_parser(
"remove", help=self.remove.__doc__,
description=self.remove.__doc__)
sc_remove.add_argument(
'user', help="user id (default: %(default)s)",
default=None, type=str)
sc_remove.set_defaults(func=self.remove)
sc_owner = sp.add_parser(
"owner", help=self.owner.__doc__, description=self.owner.__doc__)
sc_owner.add_argument(
'job', help="job identifier", default=None, type=str)
sc_owner.add_argument(
'user', help="user id", default=None, type=str)
sc_owner.set_defaults(func=self.owner)
sc_import = sp.add_parser(
"importjob", help=self.importjob.__doc__,
description=self.importjob.__doc__)
sc_import.add_argument(
'dbfile', help="job sqlite result db file",
default=None, type=argparse.FileType('r'))
sc_import.add_argument(
'owner', help="user id", default=None, type=str)
sc_import.set_defaults(func=self.importjob)
def add(self, args):
"Add new user"
user = User(args.user, args.name, args.email, args.password)
User.add(user)
commit()
def update(self, args):
"Update user data"
user = User.by_id(args.userid)
if 'user' in args:
user.userid = args.user
for job in user.jobs:
job.owner = args.user
JobMeta.add(job)
if 'name' in args:
user.displayname = args.name
if 'email' in args:
user.email = args.email
if 'password' in args:
user.password = args.password
User.add(user)
commit()
def remove(self, args):
"Remove user and his/her jobs"
user = User.by_id(args.user)
for jobmeta in user.jobs:
print(jobmeta.jobid)
self.job_factory.fromId(jobmeta.jobid).delete()
User.delete(user)
commit()
def owner(self, args):
"""Alter owner of job"""
job = self.job_factory.fromId(args.job)
job.meta.owner = args.user
JobMeta.add(job.meta)
commit()
def importjob(self, args):
"""Import job results db"""
dbfile = args.dbfile
owner = args.owner
job = self.job_factory.fromDb(dbfile, owner)
print(job.id)
commit()
def configure(self, config_file):
config_url = 'config:' + config_file
cwd = os.getcwd()
self.config = appconfig(config_url, 'MAGMaWeb', relative_to=cwd)
engine = engine_from_config(self.config)
init_user_db(engine)
self.job_factory = make_job_factory(self.config)
def run(self, argv):
args = self.parser.parse_args(argv)
self.configure(args.config)
args.func(args)
def main(argv=sys.argv[1:]):
command = MagmaCommand()
return command.run(argv)
|
torufuru/oolhackathon | ryu/ofproto/ofproto_v1_2_parser.py | Python | apache-2.0 | 159,337 | 0 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Decoder/Encoder implementations of OpenFlow 1.2.
"""
import struct
import itertools
from ryu.lib import addrconv
from ryu.lib import mac
from ryu import utils
from ofproto_parser import StringifyMixin, MsgBase, msg_pack_into, msg_str_attr
from . import ether
from . import ofproto_parser
from . import ofproto_v1_2 as ofproto
import logging
LOG = logging.getLogger('ryu.ofproto.ofproto_v1_2_parser')
_MSG_PARSERS = {}
def _set_msg_type(msg_type):
def _set_cls_msg_type(cls):
cls.cls_msg_type = msg_type
return cls
return _set_cls_msg_type
def _register_parser(cls):
'''class decorator to register msg parser'''
assert cls.cls_msg_type is not None
assert cls.cls_msg_type not in _MSG_PARSERS
_MSG_PARSERS[cls.cls_msg_type] = cls.parser
return cls
@ofproto_parser.register_msg_parser(ofproto.OFP_VERSION)
def msg_parser(datapath, version, msg_type, msg_len, xid, buf):
parser = _MSG_PARSERS.get(msg_type)
return parser(datapath, version, msg_type, msg_len, xid, buf)
@_register_parser
@_set_msg_type(ofproto.OFPT_HELLO)
class OFPHello(MsgBase):
"""
Hello message
When connection is started, the hello message is exchanged between a
switch and a controller.
This message is handled by the Ryu framework, so the Ryu application
do not need to process this typically.
"""
def __init__(self, datapath):
super(OFPHello, self).__init__(datapath)
@_register_parser
@_set_msg_type(ofproto.OFPT_ERROR)
class OFPErrorMsg(MsgBase):
"""
Error message
The switch notifies controller of problems by this message.
========== =========================================================
Attribute Description
========== =========================================================
type High level type of error
code Details depending on the type
data Variable length data depending on the type and code
========== =========================================================
``type`` attribute corresponds to ``type_`` parameter of __init__.
Types and codes are defined in ``ryu.ofproto.ofproto``.
============================= ===========
Type Code
============================= ===========
OFPET_HELLO_FAILED OFPHFC_*
OFPET_BAD_REQUEST OFPBRC_*
OFPET_BAD_ACTION OFPBAC_*
OFPET_BAD_INSTRUCTION OFPBIC_*
OFPET_BAD_MATCH OFPBMC_*
OFPET_FLOW_MOD_FAILED OFPFMFC_*
OFPET_GROUP_MOD_FAILED OFPGMFC_*
OFPET_PORT_MOD_FAILED OFPPMFC_*
OFPET_TABLE_MOD_FAILED OFPTMFC_*
OFPET_QUEUE_OP_FAILED OFPQOFC_*
OFPET_SWITCH_CONFIG_FAILED OFPSCFC_*
OFPET_ROLE_REQUEST_FAILED OFPRRFC_*
OFPET_EXPERIMENTER N/A
============================= ===========
Example::
@set_ev_cls(ofp_event.EventOFPErrorMsg,
[HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
def error_msg_handler(self, ev):
msg = ev.msg
self.logger.debug('OFPErrorMsg received: type=0x%02x code=0x%02x '
'message=%s',
msg.type, msg.code, utils.hex_array(msg.data))
"""
def __init__(self, datapath, type_=None, code=None, data=None):
super(OFPErrorMsg, self).__init__(datapath)
self.type = type_
self.code = code
self.data = data
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
type_, = struct.unpack_from('!H', buffer(buf),
ofproto.OFP_HEADER_SIZE)
if type_ == | ofproto.OFPET_EXPERIMENTER:
return OFPErrorExperimenterMsg.parser(datapath, version, msg_type,
msg_len, xid, buf)
msg = super(OFPErrorMsg, cls).parser(datapath, version, msg_type,
| msg_len, xid, buf)
msg.type, msg.code = struct.unpack_from(
ofproto.OFP_ERROR_MSG_PACK_STR, msg.buf,
ofproto.OFP_HEADER_SIZE)
msg.data = msg.buf[ofproto.OFP_ERROR_MSG_SIZE:]
return msg
def _serialize_body(self):
assert self.data is not None
msg_pack_into(ofproto.OFP_ERROR_MSG_PACK_STR, self.buf,
ofproto.OFP_HEADER_SIZE, self.type, self.code)
self.buf += self.data
class OFPErrorExperimenterMsg(MsgBase):
def __init__(self, datapath, type_=None, exp_type=None, experimenter=None,
data=None):
super(OFPErrorExperimenterMsg, self).__init__(datapath)
self.type = ofproto.OFPET_EXPERIMENTER
self.exp_type = exp_type
self.experimenter = experimenter
self.data = data
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
cls.cls_msg_type = msg_type
msg = super(OFPErrorExperimenterMsg, cls).parser(
datapath, version, msg_type, msg_len, xid, buf)
msg.type, msg.exp_type, msg.experimenter = struct.unpack_from(
ofproto.OFP_ERROR_EXPERIMENTER_MSG_PACK_STR, msg.buf,
ofproto.OFP_HEADER_SIZE)
msg.data = msg.buf[ofproto.OFP_ERROR_EXPERIMENTER_MSG_SIZE:]
return msg
def _serialize_body(self):
assert self.data is not None
msg_pack_into(ofproto.OFP_ERROR_EXPERIMENTER_MSG_PACK_STR,
self.buf, ofproto.OFP_HEADER_SIZE,
self.type, self.exp_type, self.experimenter)
self.buf += self.data
@_register_parser
@_set_msg_type(ofproto.OFPT_ECHO_REQUEST)
class OFPEchoRequest(MsgBase):
"""
Echo request message
This message is handled by the Ryu framework, so the Ryu application
do not need to process this typically.
========== =========================================================
Attribute Description
========== =========================================================
data An arbitrary length data
========== =========================================================
Example::
def send_echo_request(self, datapath, data):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPEchoRequest(datapath, data)
datapath.send_msg(req)
@set_ev_cls(ofp_event.EventOFPEchoRequest,
[HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
def echo_request_handler(self, ev):
self.logger.debug('OFPEchoRequest received: data=%s',
utils.hex_array(ev.msg.data))
"""
def __init__(self, datapath, data=None):
super(OFPEchoRequest, self).__init__(datapath)
self.data = data
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPEchoRequest, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
msg.data = msg.buf[ofproto.OFP_HEADER_SIZE:]
return msg
def _serialize_body(self):
if self.data is not None:
self.buf += self.data
@_register_parser
@_set_msg_type(ofproto.OFPT_ECHO_REPLY)
class OFPEchoReply(MsgBase):
"""
Echo reply message
This message is handled by the Ryu framework, so the Ryu application
do not need to process this typically.
|
jaor/python | bigml/tests/test_09_ensemble_prediction.py | Python | apache-2.0 | 4,365 | 0.003438 | # -*- coding: utf-8 -*-
#
# Copyright 2015-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Creating ensembles predictions
"""
from .world import world, setup_module, teardown_module
from . import create_source_steps as source_create
from . import create_dataset_steps as dataset_create
from . import create_ensemble_steps as ensemble_create
from . import create_prediction_steps as prediction_create
class TestEnsemblePrediction(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully creating a prediction from an ensemble:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create an ensemble of <number_of_models> models and <tlp> tlp
And I wait until the ensemble is ready less than <time_3> sec | s
When I create an ensemble prediction for "<data_input>"
And I wait until the prediction is ready less than <time_4> secs
Then the prediction for "<objective>" is "<prediction>"
Examples:
| data | time_1 | | time_2 | time_3 | time_4 | number_of_models | tlp | data_input | objective | prediction |
| ../data/iris.csv | 10 | 10 | 50 | 20 | 5 | 1 | {"petal width": 0.5} | 000004 | Iris-versicolor |
| ../data/iris_sp_chars.csv | 10 | 10 | 50 | 20 | 5 | 1 | {"pétal&width\\u0000": 0.5} | 000004 | Iris-versicolor |
| ../data/grades.csv | 10 | 10 | 150 | 20 | 10 | 1 | {"Assignment": 81.22, "Tutorial": 91.95, "Midterm": 79.38, "TakeHome": 105.93} | 000005 | 88.205575 |
| ../data/grades.csv | 10 | 10 | 150 | 20 | 10 | 1 | {"Assignment": 97.33, "Tutorial": 106.74, "Midterm": 76.88, "TakeHome": 108.89} | 000005 | 84.29401 |
"""
print(self.test_scenario1.__doc__)
examples = [
['data/iris.csv', '30', '30', '50', '20', '5', '1', '{"petal width": 0.5}', '000004', 'Iris-versicolor'],
['data/iris_sp_chars.csv', '30', '30', '50', '20', '5', '1', '{"pétal&width\\u0000": 0.5}', '000004', 'Iris-versicolor'],
['data/grades.csv', '30', '30', '150', '20', '10', '1', '{"Assignment": 81.22, "Tutorial": 91.95, "Midterm": 79.38, "TakeHome": 105.93}', '000005', '84.556'],
['data/grades.csv', '30', '30', '150', '20', '10', '1', '{"Assignment": 97.33, "Tutorial": 106.74, "Midterm": 76.88, "TakeHome": 108.89}', '000005', '73.13558']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
ensemble_create.i_create_an_ensemble(self, example[5], example[6])
ensemble_create.the_ensemble_is_finished_in_less_than(self, example[3])
prediction_create.i_create_an_ensemble_prediction(self, example[7])
prediction_create.the_prediction_is_finished_in_less_than(self, example[4])
prediction_create.the_prediction_is(self, example[8], example[9])
|
SeanHayes/swarm-war | swarm_war/teams/models.py | Python | agpl-3.0 | 3,862 | 0.039109 | # -*- coding: utf-8 -*-
#Copyright (C) 2011 Seán Hayes
#Python imports
import logging
import pdb
#Django imports
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models, IntegrityError
from django.db.models.signals import post_save
from django.utils import safestring
#App imports
from exceptions import TeamAlreadyExistsError, TeamNoLongerExistsError, TeamFullError, NotOnATeamError, TeamAlreadyHasALeaderError, NotOnSameTeamError, NotALeaderError
from managers import *
from swarm_war.core.models import FacebookRequest
from swarm_war.core.managers import FacebookRequestManager
logger = logging.getLogger(__name__)
# Create your models here.
MAX_TEAM_SIZE = 10
class Team(models.Model):
name = models.CharField(unique=True, max_length=100)
def get_leader(self):
leader = None
try:
#TODO: use filter to better tolerate bugs (e.g. more than one leader) that may creep up
leader = self.members.get(leader=True).user
except TeamProfile.DoesNotExist:
pass
except Exception as e:
logger.error(e)
return leader
def html(self):
s = u'<a href="%s">%s</a>' % (reverse('teams_view', args=[self.id]), self.name)
return safestring.mark_safe(s)
def __unicode__(self):
return self.name
class TeamProfile(models.Model):
user = models.OneToOneField(User)
team = models.ForeignKey(Team, null=True, blank=True, related_name="members")
leader = models.BooleanField(default=False)
#TODO: need a leave_team() method that cleans up teams with no members left
def become_leader(self):
if self.team is None:
raise NotOnATeamError(self.user)
elif self.team.get_leader() is not None:
raise TeamAlreadyHasALeaderError()
else:
self.leader = True
self.save()
def create_team(self, name):
try:
team = Team(name=name)
team.save()
except IntegrityError as e:
raise TeamAlreadyExistsError()
self.team = team
self.leader = True
self.save()
return team
def join_team(self, team):
count = team.members.count()
if count < MAX_TEAM_SIZE:
self.team = team
self.leader = False
self.save()
else:
raise TeamFullError()
def kick_out(self, user):
if self.team is None:
raise NotOnATeamError(self.user)
if not self.leader:
raise NotALeaderError()
user_tp = TeamProfile.objects.get(user=user)
if user_tp.team is None:
raise NotOnATeamError(user)
if user_tp.team.id is not self.team.id:
raise NotOnSameTeamError()
user_tp.leave_team()
def leave_team(self):
team = self.team
self.team = None
self.leader = False
self.save()
count = team.members.count()
if count == 0:
team.delete()
def __unicode__(self):
return u'%s: %s' % (self.__class__.__name__, self.user)
def create_profile(user):
"""
Called using a post_save trigger on User, so when a new User is added a Profile is created as well.
"""
#create a profile
profile = TeamProfile(user=user)
profile.save()
def user_save_handler(sender, instance, created, **kwargs):
if created:
create_profile(instance)
post_save.connect(user_save_handler, sender=User)
class TeamFacebookRequest(FacebookRequest):
#has to be nullable so that this doesn't | get deleted when a related team gets deleted
team = models.ForeignKey(Team, null=True)
objects = FacebookRequestManager()
def html(self):
s = u'%s has invited you to join a Team: %s.' % (self.user.username, self.team.html)
return safestring.mark_safe(s)
def conf | irm(self, friend):
try:
if self.team is None:
raise TeamNoLongerExistsError()
if self.user.id not in [u.id for u in self.team.members.all()]:
raise Exception('Can\'t join %s because %s isn\'t a member anymore.' % (self.team.name, self.user.username))
friend.teamprofile.join_team(self.team)
finally:
super(TeamFacebookRequest, self).confirm(friend)
|
pombredanne/parakeet | parakeet/builder/array_builder.py | Python | bsd-3-clause | 9,545 | 0.025144 |
from ..ndtypes import (make_slice_type, make_array_type, ptr_type,
ArrayT, TupleT, ScalarT, Type, PtrT, Int64, IntT, Float64)
from ..syntax import (Alloc, AllocArray, ArrayView, Const, Index, Slice, Struct, Var, Select, Expr)
from ..syntax.helpers import (const, zero_i64, wrap_if_constant, slice_none, unwrap_constant)
from arith_builder import ArithBuilder
class ArrayBuilder(ArithBuilder):
"""
Builder for constructing arrays and getting their properties
"""
def elt_type(self, x):
if isinstance(x, Type):
try:
return x.elt_type
except:
return x
elif self.is_array(x):
return x.type.elt_type
else:
return x.type
def alloc_array(self, elt_t, dims, name = "array",
explicit_struct = False,
array_view = False,
order = "C"):
"""
Given an element type and sequence of expressions denoting each dimension
size, generate code to allocate an array and its shape/strides metadata. For
now I'm assuming that all arrays are in row-major, eventually we should make
the layout an option.
"""
assert order == "C", "Only row-major layout supported so far, not %s" % order
if self.is_tuple(dims):
shape = dims
dims = self.tuple_elts(shape)
else:
if not isinstance(dims, (list, tuple)):
dims = [dims]
shape = self.tuple(dims, "shape", explicit_struct = explicit_struct)
rank = len(dims)
array_t = make_array_type(elt_t, rank)
if explicit_struct or array_view:
nelts = self.prod(dims, name = "nelts")
ptr_t = ptr_type(elt_t)
ptr_var = self.assign_name(Alloc(elt_t, nelts, type = ptr_t), "data_ptr")
stride_elts = [const(1)]
# assume row-major for now!
for d in reversed(dims[1:]):
next_stride = self.mul(stride_elts[0], d, "dim")
stride_elts = [next_stride] + stride_elts
strides = self.tuple(stride_elts, "strides", explicit_struct = explicit_struct)
if explicit_struct:
array = Struct([ptr_var, shape, strides, zero_i64, nelts], type = array_t)
else:
array = ArrayView(data = ptr_var,
shape = shape,
strides = strides,
offset = zero_i64,
size = nelts,
type = array_t)
else:
array = AllocArray(shape, elt_type = elt_t, type = array_t)
if name is None:
return array
return self.assign_name(array, name)
def len(self, array):
return self.shape(array, 0)
def nelts(self, array, explicit_struct = False):
shape_elts = self.tuple_elts(self.shape(array), explicit_struct = explicit_struct)
return self.prod(shape_elts, name = "nelts")
def rank(self, value):
if self.is_array(value):
return value.type.rank
else:
return 0
def shape(self, array, dim = None, explicit_struct = False, temp = True):
if array.type.__class__ is ArrayT:
shape = self.attr(array, "shape", temp = temp)
if dim is None:
return shape
if isinstance(dim, Expr):
dim = unwrap_constant(dim)
assert isinstance(dim, (int, long)), "Expected array dimension to be an int, got %s" % dim
dim_value = self.tuple_proj(shape, dim, explicit_struct = explicit_struct)
if temp:
return self.assign_name(dim_value, "dim%d" % dim)
else:
return dim_value
else:
return self.tuple([])
def strides(self, array, dim = None, explicit_struct = False):
assert array.type.__class__ is ArrayT
strides = self.attr(array, "strides")
if dim is None:
return strides
else:
elt_value = self.tuple_proj(strides, dim, explicit_struct = explicit_struct)
return self.assign_name(elt_value, "stride%d" % dim)
def slice_value(self, start, stop, step):
slice_t = make_slice_type(start.type, stop.type, step.type)
return Slice(start, stop, step, type = slice_t)
def build_slice_indices(self, rank, axis, idx):
"""
Build index tuple to pull out the 'idx' element along the given axis
""" |
if rank == 1:
assert axis == 0
return idx
indices = []
for i in xrange(rank):
if i == axis:
indices.append(idx)
else:
s = self. | slice_value(self.none, self.none, self.int(1))
indices.append(s)
return self.tuple(indices)
def elts_in_slice(self, start, stop, step):
start_minus_start = self.sub(stop, start, name = "stop_minus_start")
nelts = self.div(self.cast(start_minus_start, Float64), step, name = "nelts")
ceil = self.ceil(nelts)
nelts = self.cast(ceil, Int64)
return self.max(nelts, self.zero(nelts.type))
def slice_along_axis(self, arr, axis, idx):
"""
Pull out a slice if the array has the given axis,
otherwise just return the array
"""
r = self.rank(arr)
if isinstance(axis, Expr):
axis = unwrap_constant(axis)
idx = wrap_if_constant(idx)
if r == 1 and (axis is None or axis == 0):
return self.index(arr, idx)
elif axis is None:
if isinstance(idx.type, ScalarT):
idx = self.tuple((idx,) * r)
return self.index(arr, idx)
elif r > axis:
index_tuple = self.build_slice_indices(r, axis, idx)
return self.index(arr, index_tuple)
else:
return arr
def output_slice(self, output, axis, idx):
"""
Create an expression which acts as an LHS output location
for a slice throught the variable 'output' along the given axis
"""
r = self.rank(output)
if r > 1:
output_indices = self.build_slice_indices(r, axis, idx)
elif r == 1:
output_idx = self.slice_value(idx, self.none, self.int(1))
output_indices = self.tuple([output_idx])
else:
output_idx = self.slice_value(self.none, self.none, self.none)
output_indices = self.tuple([output_idx])
return self.index(output, output_indices)
def size_along_axis(self, value, axis):
return self.shape(value, axis)
def check_equal_sizes(self, sizes):
pass
def index(self, arr, idx, temp = False, name = None):
"""Index into array or tuple differently depending on the type"""
temp = temp or name is not None
arr_t = arr.type
if isinstance(arr_t, ScalarT):
# even though it's not correct externally, it's
# often more convenient to treat indexing
# into scalars as the identity function.
# Just be sure to catch this as an error in
# the user's code earlier in the pipeline.
return arr
if isinstance(arr_t, TupleT):
if isinstance(idx, Const):
idx = idx.value
assert isinstance(idx, int), \
"Index into tuple must be an integer, got %s" % idx
if isinstance(idx, Const):
idx = idx.value
proj = self.tuple_proj(arr, idx)
if temp:
return self.assign_name(proj, "tuple_elt%d" % idx if name is None else name)
else:
return proj
if self.is_tuple(idx):
indices = self.tuple_elts(idx)
elif isinstance(idx, (list,tuple)) or hasattr(idx, '__iter__'):
indices = tuple(map(wrap_if_constant,idx))
else:
indices = (wrap_if_constant(idx),)
n_required = arr_t.rank
n_indices = len(indices)
if n_indices < n_required:
# all unspecified dimensions are considered fully sliced
extra = (slice_none,) * (n_required - n_indices)
indices = indices + extra
if len(indices) > 1:
idx = self.tuple(indices, name = name)
else:
idx = indices[0]
t = arr_t.index_type(idx.type)
idx_expr = Index(arr, idx, type=t)
if temp:
return self.assign_name(idx_expr, "array_elt" if name is None else name)
else:
return idx_expr
def index_along_axis(self, arr, axis, idx, name=None):
if arr.type.__class__ is not ArrayT:
return arr
assert isinstance(axis, int), \
"Axis must be a known constant int, got: " + str(axis)
indices = []
for i in xrange(arr.type.rank):
if i == axis:
indices.append(wrap_if |
coteyr/home-assistant | homeassistant/components/downloader.py | Python | mit | 4,237 | 0 | """
homeassistant.components.downloader
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides functionality to download files.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/downloader/
"""
import logging
import os
import re
import threading
import requests
from homeassistant.helpers import validate_config
from homeassistant.util import sanitize_filename
DOMAIN = "downloader"
SERVICE_DOWNLOAD_FILE = "download_file"
ATTR_URL = "url"
ATTR_SUBDIR = "subdir"
CONF_DOWNLOAD_DIR = 'download_dir'
# pylint: disable=too-many-branches
def setup(hass, config):
""" Listens for download events to download files. """
logger = logging.getLogger(__name__)
if not validate_config(config, {DOMAIN: [CONF_DOWNLOAD_DIR]}, logger):
return False
download_path = config[DOMAIN][CONF_DOWNLOAD_DIR]
# If path is relative, we assume relative to HASS config dir
if not os.path.isabs(download_path):
download_path = hass.config.path(download_path)
if not os.path.isdir(download_path):
logger.error(
"Download path %s does not exist. File Downloader not active.",
download_path)
return False
def download_file(service):
""" Starts thread to download file specified in the url. """
if ATTR_URL not in service.data:
logger.error("Service called but 'url' parameter not specified.")
return
def do_download():
""" Downloads the file. """
try:
url = service.data[ATTR_URL]
subdir = service.data.get(ATTR_SUBDIR)
if subdir:
subdir = sanitize_filename(subdir)
final_path = None
req = requests.get(url, stream=True, timeout=10)
if req.status_code == 200:
filename = None
if 'content-disposition' in req.headers:
match = re.findall(r"filename=(\S+)",
req.headers['content-disposition'])
if len(match) > 0:
filename = match[0].strip("'\" ")
if not filename:
filename = os.path.basename(
url).strip()
if not filename:
filename = "ha_download"
# Remove stuff to ruin paths
filename = sanitize_filename(filename)
# Do we want to download to subdir, create if needed
if subdir:
subdir_path = os.path.join(download_path, subdir)
# Ensure subdir exist
if not os.path.isdir(subdir_path):
os.makedirs(subdir_path)
final_path = os.path.join(subdir_path, filename)
else:
final_path = os.path.join(download_path, filename)
path, ext = os.path.splitext(final_path)
# If file exist append a number.
# We test filename, filename_2..
tries = 1
final_path = path + ext
while os.path.isfile(final_path):
tries += 1
final_path = "{}_{}.{}".format(path, tries, ext)
| logger.info("%s -> %s", url, final_path)
with open(final_path, 'wb') as fil:
for chunk in req.iter_content(1024):
fil.write(chunk)
logger.info("Downloading of %s done", url)
except requests.exceptions.ConnectionError:
logger.exception("ConnectionError occured for %s", url)
# Remove file if we started downloading but failed
| if final_path and os.path.isfile(final_path):
os.remove(final_path)
threading.Thread(target=do_download).start()
hass.services.register(DOMAIN, SERVICE_DOWNLOAD_FILE,
download_file)
return True
|
haim0n/miko | travis_pypi_setup.py | Python | isc | 3,755 | 0.000266 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update encrypted deploy password in Travis config file
"""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except:
from urllib.request import urlopen
GITHUB_REPO = 'bregman-arie/miko'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key, with work-around for keys using
incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning.
"""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Update the deploy section of the .travis.yml file
to use the given encrypted password.
"""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '_ | _main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)') |
args = parser.parse_args()
main(args)
|
evernym/zeno | plenum/test/replica/test_buffers_cleaning.py | Python | apache-2.0 | 3,313 | 0.001811 | from plenum.common.event_bus import InternalBus
from plenum.common.startable import Mode
from plenum.common.timer import QueueTimer
from plenum.common.util import get_utc_epoch
from plenum.server.consensus.primary_selector import RoundRobinConst | antNodesPrimariesSelector
from plenum.server.database_manager import DatabaseManager
from plenum.server.quorums import Quorums
from plenum.server.replica import Replica
from plenum.test.testing_utils import FakeSomething
def test_ordered_cleaning(tconf):
global_view_no = 2
node = FakeSomething(
name="fake node",
ledger_ids=[0],
viewNo=global_view_n | o,
utc_epoch=get_utc_epoch,
get_validators=lambda: [],
db_manager=DatabaseManager(),
requests=[],
mode=Mode.participating,
timer=QueueTimer(),
quorums=Quorums(4),
write_manager=None,
poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []),
primaries_selector=RoundRobinConstantNodesPrimariesSelector(["Alpha", "Beta", "Gamma", "Delta"])
)
bls_bft_replica = FakeSomething(
gc=lambda *args: None,
)
replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica)
replica._consensus_data.view_no = global_view_no
total = []
num_requests_per_view = 3
for viewNo in range(global_view_no + 1):
for seqNo in range(num_requests_per_view):
reqId = viewNo, seqNo
replica._ordering_service._add_to_ordered(*reqId)
total.append(reqId)
# gc is called after stable checkpoint, since no request executed
# in this test starting it manually
replica._ordering_service.gc(100)
# Requests with view lower then previous view
# should not be in ordered
assert len(replica._ordering_service.ordered) == len(total[num_requests_per_view:])
def test_primary_names_cleaning(tconf):
node = FakeSomething(
name="fake node",
ledger_ids=[0],
viewNo=0,
utc_epoch=get_utc_epoch,
get_validators=lambda: [],
db_manager=DatabaseManager(),
requests=[],
mode=Mode.participating,
timer=QueueTimer(),
quorums=Quorums(4),
write_manager=None,
poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []),
primaries_selector=RoundRobinConstantNodesPrimariesSelector(["Alpha", "Beta", "Gamma", "Delta"])
)
bls_bft_replica = FakeSomething(
gc=lambda *args: None,
)
replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica)
replica.primaryName = "Node1:0"
assert list(replica.primaryNames.items()) == \
[(0, "Node1:0")]
node.viewNo += 1
replica._consensus_data.view_no = node.viewNo
replica.primaryName = "Node2:0"
assert list(replica.primaryNames.items()) == \
[(0, "Node1:0"), (1, "Node2:0")]
node.viewNo += 1
replica._consensus_data.view_no = node.viewNo
replica.primaryName = "Node3:0"
assert list(replica.primaryNames.items()) == \
[(1, "Node2:0"), (2, "Node3:0")]
node.viewNo += 1
replica._consensus_data.view_no = node.viewNo
replica.primaryName = "Node4:0"
assert list(replica.primaryNames.items()) == \
[(2, "Node3:0"), (3, "Node4:0")]
|
wojas/django-pgrunner | pgrunner/management/commands/pg_psql.py | Python | mit | 539 | 0.003711 | from __future__ import absolute_import, division, print_functio | n, unicode_literals
from django.core.management.base import BaseCommand, CommandError
import subprocess
from pgrunner import bin_path
from pgrunner.commands import get_port
class Command(BaseCommand):
help = 'Run psql with correct database'
def run_from_argv(self, argv):
port = get_port()
cmd = [bin_path('psql'), '-p', str(port), '-h', '127.0.0.1', 'django']
cmd.extend(argv[2:])
| print(' '.join(cmd))
subprocess.call(cmd)
|
brettcs/diffoscope | diffoscope/comparators/text.py | Python | gpl-3.0 | 2,721 | 0.002575 | # -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2014-2015 Jérémy Bobbio <lunar@debian.org>
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <https://www.gnu.org/licenses/>.
import re
import codecs
from diffoscope.difference import Difference
from .utils.file import File
def order_only_difference(unified_diff):
diff_lines = unified_diff.splitlines()
added_lines = [line[1:] for line in diff_lines if line.startswith('+')]
removed_lines = [line[1:] for line in diff_lines if line.startswith('-')]
# Faster check: does number of lines match?
if len(added_lines) != len(removed_lines):
return False
return sorted(added_lines) == sorted(removed_lines) and added_lines != removed_lines
class TextFile(File):
RE_FILE_TYPE = re.compile(r'\btext\b')
@property
def encoding(self):
if not hasattr(self, '_encoding'):
self._encoding = File.guess_encoding(self.path)
return self._encoding
def compare(self, other, source=None):
my_encoding = self.encoding or 'utf-8'
other_encoding = other.encoding or 'utf-8'
try:
with codecs.open(self.path, 'r', encoding=my_encoding) as my_content, \
codecs.open(other.path, 'r', encoding=other_encoding) as other_content:
| difference = Difference.from_text_readers(my_conte | nt, other_content, self.name, other.name, source)
# Check if difference is only in line order.
if difference and order_only_difference(difference.unified_diff):
difference.add_comment("ordering differences only")
if my_encoding != other_encoding:
if difference is None:
difference = Difference(None, self.path, other.path, source)
difference.add_details([Difference.from_text(my_encoding, other_encoding, None, None, source='encoding')])
return difference
except (LookupError, UnicodeDecodeError):
# unknown or misdetected encoding
return self.compare_bytes(other, source)
|
HybridF5/jacket | jacket/storage/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py | Python | apache-2.0 | 18,157 | 0.000386 | # (c) Copyright 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Script to push the zone configuration to Cisco SAN switches.
"""
import random
import re
from eventlet import greenthread
from oslo_c | oncurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
import six
from jacket.storage import exception
from jacket.storage.i18n import _, _LE, _LI, _LW
from jacket.storage import ssh_utils
from jacket.storage import utils
import jacket.storage.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant
LOG = logging.getLogger(__name__)
class CiscoFCZoneClientCLI(object):
| """Cisco FC zone client cli implementation.
OpenStack Fibre Channel zone client cli connector
to manage FC zoning in Cisco SAN fabrics.
Version history:
1.0 - Initial Cisco FC zone client cli
"""
switch_ip = None
switch_port = '22'
switch_user = 'admin'
switch_pwd = 'none'
def __init__(self, ipaddress, username, password, port, vsan):
"""initializing the client."""
self.switch_ip = ipaddress
self.switch_port = port
self.switch_user = username
self.switch_pwd = password
self.fabric_vsan = vsan
self.sshpool = None
def get_active_zone_set(self):
"""Return the active zone configuration.
Return active zoneset from fabric. When none of the configurations
are active then it will return empty map.
:returns: Map -- active zone set map in the following format
{
'zones':
{'openstack50060b0000c26604201900051ee8e329':
['50060b0000c26604', '201900051ee8e329']
},
'active_zone_config': 'OpenStack_Cfg'
}
"""
zone_set = {}
zone = {}
zone_member = None
zone_name = None
switch_data = None
zone_set_name = None
try:
switch_data = self._get_switch_info(
[ZoneConstant.GET_ACTIVE_ZONE_CFG, self.fabric_vsan,
' | no-more'])
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed getting active zone set "
"from fabric %s"), self.switch_ip)
try:
for line in switch_data:
# Split on non-word characters,
line_split = re.split('[\s\[\]]+', line)
if ZoneConstant.CFG_ZONESET in line_split:
# zoneset name [name] vsan [vsan]
zone_set_name = \
line_split[line_split.index(ZoneConstant.CFG_ZONESET)
+ 2]
continue
if ZoneConstant.CFG_ZONE in line_split:
# zone name [name] vsan [vsan]
zone_name = \
line_split[line_split.index(ZoneConstant.CFG_ZONE) + 2]
zone[zone_name] = list()
continue
if ZoneConstant.CFG_ZONE_MEMBER in line_split:
# Examples:
# pwwn c0:50:76:05:15:9f:00:12
# * fcid 0x1e01c0 [pwwn 50:05:07:68:02:20:48:04] [V7K_N1P2]
zone_member = \
line_split[
line_split.index(ZoneConstant.CFG_ZONE_MEMBER) + 1]
zone_member_list = zone.get(zone_name)
zone_member_list.append(zone_member)
zone_set[ZoneConstant.CFG_ZONES] = zone
zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] = zone_set_name
except Exception as ex:
# In case of parsing error here, it should be malformed cli output.
msg = _("Malformed zone configuration: (switch=%(switch)s "
"zone_config=%(zone_config)s)."
) % {'switch': self.switch_ip,
'zone_config': switch_data}
LOG.error(msg)
exc_msg = _("Exception: %s") % six.text_type(ex)
LOG.error(exc_msg)
raise exception.FCZoneDriverException(reason=msg)
return zone_set
def add_zones(self, zones, activate, fabric_vsan, active_zone_set,
zone_status):
"""Add zone configuration.
This method will add the zone configuration passed by user.
input params:
zones - zone names mapped to members and VSANs.
zone members are colon separated but case-insensitive
{ zonename1:[zonememeber1,zonemember2,...],
zonename2:[zonemember1, zonemember2,...]...}
e.g: {'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']
}
activate - True/False
"""
LOG.debug("Add Zones - Zones passed: %s", zones)
LOG.debug("Active zone set: %s", active_zone_set)
zone_list = active_zone_set[ZoneConstant.CFG_ZONES]
LOG.debug("zone list: %s", zone_list)
LOG.debug("zone status: %s", zone_status)
cfg_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG]
zone_cmds = [['conf'],
['zoneset', 'name', cfg_name, 'vsan', fabric_vsan]]
for zone in zones.keys():
# if zone exists, its an update. Delete & insert
LOG.debug("Update call")
if zone in zone_list:
# Response from get_active_zone_set strips colons from WWPNs
current_zone = set(zone_list[zone])
new_wwpns = map(lambda x: x.lower().replace(':', ''),
zones[zone])
new_zone = set(new_wwpns)
if current_zone != new_zone:
try:
self.delete_zones(zone, activate, fabric_vsan,
active_zone_set, zone_status)
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Deleting zone failed %s"), zone)
LOG.debug("Deleted Zone before insert : %s", zone)
zone_cmds.append(['zone', 'name', zone])
for member in zones[zone]:
zone_cmds.append(['member', 'pwwn', member])
zone_cmds.append(['end'])
try:
LOG.debug("Add zones: Config cmd to run: %s", zone_cmds)
self._ssh_execute(zone_cmds, True, 1)
if activate:
self.activate_zoneset(cfg_name, fabric_vsan, zone_status)
self._cfg_save()
except Exception as e:
msg = _("Creating and activating zone set failed: "
"(Zone set=%(zoneset)s error=%(err)s)."
) % {'zoneset': cfg_name, 'err': six.text_type(e)}
LOG.error(msg)
raise exception.CiscoZoningCliException(reason=msg)
def activate_zoneset(self, cfgname, fabric_vsan, zone_status):
"""Method to Activate the zone config. Param cfgname - ZonesetName."""
LOG.debug("zone status: %s", zone_status)
cmd_list = [['conf'],
['zoneset', 'activate', 'name', cfgname, 'vsan',
self.fabric_vsan]]
if zone_status['mode'] == 'enhanced':
cmd_list.append(['zone', 'commit', 'vsan', fabric_vsan])
cmd_list.append(['end'])
return self._ssh_execute(cm |
easmetz/inasafe | safe/report/impact_report.py | Python | gpl-3.0 | 24,112 | 0.000249 | # coding=utf-8
"""
Module to generate impact report using QgsComposition.
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'akbargumbira@gmail.com'
__revision__ = '$Format:%H$'
__date__ = '21/03/2014'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import os
import logging
from qgis.core import (
QgsComposition,
QgsRectangle,
QgsMapLayer,
QgsComposerHtml,
QgsComposerFrame)
# Whoaa this is ugly can we get rid of it?
try:
# noinspection PyUnresolvedReferences
# pylint: disable=unused-import
from qgis.core import QgsLayerTreeGroup, QgsMapSettings
# pylint: enable=unused-import
except ImportError:
from qgis.core import QgsMapRenderer
from PyQt4.QtCore import QUrl
from safe.defaults import disclaimer
from safe.common.utilities import temp_dir, unique_filename
from safe.common.version import get_version
from safe.common.exceptions import (
KeywordNotFoundError, TemplateLoadingError)
from safe import messaging as m
from safe.messaging import styles
from safe.utilities.keyword_io import KeywordIO
from safe.utilities.gis import qgis_version
from safe.utilities.utilities import impact_attribution, html_to_file
from safe.utilities.resources import (
html_footer, html_header, resource_url, resources_path)
from safe.utilities.i18n import tr
from safe.defaults import (
white_inasafe_logo_path,
black_inasafe_logo_path,
supporters_logo_path,
default_north_arrow_path)
from safe.report.template_composition import TemplateComposition
from safe.impact_template.utilities import get_report_template
INFO_STYLE = styles.INFO_STYLE
LOGO_ELEMENT = m.Image(
resource_url(
resources_path('img', 'logos', 'inasafe-logo.png')),
'InaSAFE Logo')
LOGGER = logging.getLogger('InaSAFE')
class ImpactReport(object):
"""A class for creating report using QgsComposition."""
def __init__(self, iface, template, layer, extra_layers=[]):
"""Constructor for the Composition Report class.
:param iface: Reference to the QGIS iface object.
:type iface: QgsAppInterface
:param template: The QGIS template path.
:type template: str
"""
LOGGER.debug('InaSAFE Impact Report class initialised')
self._iface = iface
self._template = None
self.template = template
self._layer = layer
self._extra_layers = extra_layers
self._extent = self._iface.mapCanvas().extent()
self._page_dpi = 300.0
self._black_inasafe_logo = black_inasafe_logo_path()
self._white_inasafe_logo = white_inasafe_logo_path()
# User can change this path in preferences
self._organisation_logo = supporters_logo_path()
self._supporters_logo = supporters_logo_path()
self._north_arrow = default_north_arrow_path()
self._disclaimer = disclaimer()
# For QGIS < 2.4 compatibility
# QgsMapSettings is added in 2.4
if qgis_version() < 20400:
map_settings = self._iface.mapCanvas().mapRenderer()
else:
map_settings = self._iface.mapCanvas().mapSettings()
self._template_composition = TemplateComposition(
template_path=self.template,
map_settings=map_settings)
self._keyword_io = KeywordIO()
@property
def template(self):
"""Getter to the template"""
return self._template
@template.setter
def template(self, template):
"""Set template that will be used for report generation.
:param template: Path to composer template
:type template: str
"""
if isinstance(template, basestring) and os.path.exists(template):
self._template = template
else:
self._template = resources_path(
'qgis-composer-templates', 'a4-portrait-blue.qpt')
# Also recreate template composition
self._template_composition = TemplateComposition(
template_path=self.template,
map_settings=self._iface.mapCanvas().mapSettings())
@property
def layer(self):
"""Getter to layer that will be used for stats, legend, reporting."""
return self._layer
@layer.setter
def layer(self, layer):
| """Set the layer that will be used for stats, legend and reporting.
:param layer: Layer that will be used for stats, legend and reporting.
:type layer: QgsMapLayer, Q | gsRasterLayer, QgsVectorLayer
"""
self._layer = layer
@property
def extra_layers(self):
"""Getter to extra layers
extra layers will be rendered alongside impact layer
"""
return self._extra_layers
@extra_layers.setter
def extra_layers(self, extra_layers):
"""Set extra layers
extra layers will be rendered alongside impact layer
:param extra_layers: List of QgsMapLayer
:type extra_layers: list(QgsMapLayer)
"""
self._extra_layers = extra_layers
@property
def composition(self):
"""Getter to QgsComposition instance."""
return self._template_composition.composition
@property
def extent(self):
"""Getter to extent for map component in composition."""
return self._extent
@extent.setter
def extent(self, extent):
"""Set the extent that will be used for map component in composition.
:param extent: The extent.
:type extent: QgsRectangle
"""
if isinstance(extent, QgsRectangle):
self._extent = extent
else:
self._extent = self._iface.mapCanvas().extent()
@property
def page_dpi(self):
"""Getter to page resolution in dots per inch."""
return self._page_dpi
@page_dpi.setter
def page_dpi(self, page_dpi):
"""Set the page resolution in dpi.
:param page_dpi: The page resolution in dots per inch.
:type page_dpi: int
"""
self._page_dpi = page_dpi
@property
def north_arrow(self):
"""Getter to north arrow path."""
return self._north_arrow
@north_arrow.setter
def north_arrow(self, north_arrow_path):
"""Set image that will be used as north arrow in reports.
:param north_arrow_path: Path to the north arrow image.
:type north_arrow_path: str
"""
if isinstance(north_arrow_path, basestring) and os.path.exists(
north_arrow_path):
self._north_arrow = north_arrow_path
else:
self._north_arrow = default_north_arrow_path()
@property
def inasafe_logo(self):
"""Getter to safe logo path.
.. versionchanged:: 3.2 - this property is now read only.
"""
return self._black_inasafe_logo
@property
def organisation_logo(self):
"""Getter to organisation logo path."""
return self._organisation_logo
@organisation_logo.setter
def organisation_logo(self, logo):
"""Set image that will be used as organisation logo in reports.
:param logo: Path to the organisation logo image.
:type logo: str
"""
if isinstance(logo, basestring) and os.path.exists(logo):
self._organisation_logo = logo
else:
self._organisation_logo = supporters_logo_path()
@property
def supporters_logo(self):
"""Getter to supporters logo path - this is a read only property.
It always returns the InaSAFE supporters logo unlike the organisation
logo which is customisable.
.. versionadded:: 3.2
"""
return self._supporters_logo
@property
def disclaimer(self):
"""Getter to disclaimer."""
return self._disclaimer
@disclaimer.setter
def disclaimer(self, text):
"""Set t |
cgstudiomap/cgstudiomap | main/parts/connector-telephony/crm_phone/crm_phone.py | Python | agpl-3.0 | 4,443 | 0 | # -*- encoding: utf-8 -*-
##############################################################################
#
# CRM phone module for Odoo/OpenERP
# Copyright (c) 2012-2014 Akretion (http://www.akretion.com)
# @author: Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, _
class CrmLead(models.Model):
_name = 'crm.lead'
_inherit = ['crm.lead', 'phone.common']
_phone_fields = ['phone', 'mobile', 'fax']
_phone_name_sequence = 20
_country_field = 'country_id'
_partner_field = None
def create(self, cr, uid, vals, context=None):
vals_reformated = self._generic_reformat_phonenumbers(
cr, uid, None, vals, context=context)
return super(CrmLead, self).create(
cr, uid, vals_reformated, context=context)
def write(self, cr, uid, ids, vals, context=None):
vals_reformated = self._generic_reformat_phonenumbers(
cr, uid, ids, vals, context=context)
return super(CrmLead, self).write(
cr, uid, ids, vals_reformated, context=context)
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if context.get('callerid'):
res = []
if isinstance(ids, (int, long)):
ids = [ids]
for lead in self.browse(cr, uid, ids, context=context):
if lead.partner_name and lead.contact_name:
name = u'%s (%s)' % (lead.contact_name, lead.partner_name)
elif lead.partner_name:
name = lead.partner_name
elif lead.contact_name:
name = lead.contact_name
else:
name = lead.name
res.append((lead.id, name))
return res
else:
return super(CrmLead, self).name_get(
cr, uid, ids, context=context)
class CrmPhonecall(models.Model):
_name = 'crm.phonecall'
_inherit = ['crm.phonecall', 'phone.common']
_phone_fields = ['partner_phone', 'partner_mobile']
_country_field = None
_partner_field = 'partner_id'
def create(self, cr, uid, vals, context=None):
vals_reformated = self._generic_reformat_phonenumbers(
cr, uid, None, vals, context=context)
return super(CrmPhonecall, self).create(
cr, uid, vals_reformated, context=context)
def write(self, cr, uid, ids, vals, context=None):
vals_reformated = self._generic_reformat_phonenumbers(
cr, uid, ids, vals, context=context)
return super(CrmPhonecall, self).write(
cr, uid, ids, vals_reformated, context=context)
class ResUsers(models.Model):
_inherit = "res.users"
# Field name starts with 'context_' to allow modification by the user |
# in his preferences, cf server/openerp/addons/base/res/res_users.py
# | in "def write()" of "class res_users(osv.osv)"
context_propose_creation_crm_call = fields.Boolean(
string='Propose to create a call in CRM after a click2dial',
default=True)
class PhoneCommon(models.AbstractModel):
_inherit = 'phone.common'
@api.model
def click2dial(self, erp_number):
res = super(PhoneCommon, self).click2dial(erp_number)
if (
self.env.user.context_propose_creation_crm_call and
self.env.context.get('click2dial_model')
in ('res.partner', 'crm.lead')):
res.update({
'action_name': _('Create Call in CRM'),
'action_model': 'wizard.create.crm.phonecall',
})
return res
|
blxble/mesh-core-on-nrf | nrf5_sdk/external/nano-pb/generator/camel_case_splitter.py | Python | apache-2.0 | 854 | 0.001171 | def split_camel_case(input):
def remove_camel_case(camel_case_input):
no_camel_case = ""
if len(camel_case_input) <= 0:
return ""
no_camel_case += camel_case_input[0].lower()
for c in camel_case_input[1:]:
if c.isupper():
no_camel_case += "_" + c.lower()
else:
no_camel_case += c
return no_camel_case
underscore_split = input.split("_")
retval = ""
for i in underscore_split:
if is_ca | mel_case_name(i):
retval += remove_camel_case(i) + "_"
else:
retval += i + "_"
return retval[:-1].replace("__", "_")
def is_camel_case_name(input):
if '_' in input:
return False
if input.islower():
return False
if input.isupper():
return F | alse
return True |
ooici/coi-services | ion/processes/event/notification_sent_scanner.py | Python | bsd-2-clause | 5,398 | 0.005372 | #!/usr/bin/env python
"""
@package ion.processes.event
@file ion/processes/event/notification_sent_scanner.py
@author Brian McKenna <bmckenna@asascience.com>
@brief NotificationSentScanner plugin. An EventPersister plugin scanning for, and keeping state(count) of, NotificationEvent's
"""
import time
from datetime import date, datetime, timedelta
from collections import Counter
from pyon.core import bootstrap
from pyon.core.exception import NotFound
from pyon.event.event import EventPublisher
from pyon.public import log, OT
NOTIFICATION_EVENTS = {OT.NotificationSentEvent}
class NotificationSentScanner(object):
def __init__(self, container=None):
self.container = container or bootstrap.container_instance
self.object_store = self.container.object_store
self.resource_registry = self.container.resource_registry
self.event_publisher = EventPublisher()
| # next_midnight is used to flush the counts (see NOTE in method)
self.next_midnight = self._midnight(days=1)
self.persist_interval = 300 # interval in seconds to persist/reload counts TODO: use CFG
self.time_last_persist = 0
# initalize volatile counts (memory only, should be routinely persisted)
self._initialize_counts()
def process_events(self, event_list):
notifications = set() # set() of notifications to disable, _disable_notific | ations can happen >1 depending on len(event_list)
for e in event_list:
# skip if not a NotificationEvent
if e.type_ not in NOTIFICATION_EVENTS:
continue
user_id = e.user_id
notification_id = e.notification_id
notification_max = e.notification_max # default value is zero indicating no max
# initialize user_id if necessary
if user_id not in self.counts:
self.counts[user_id] = Counter()
# increment counts (user_id key to allow ALL to be counted)
self.counts[user_id]['all'] += 1 # tracks total notifications by user
self.counts[user_id][notification_id] += 1
self.counts_updated_since_persist = True
# disable notification if notification_max reached
if notification_max:
if self.counts[user_id][notification_id] >= notification_max:
# TODO this could be dict so key could be checked for insertion
notifications.add(self._disable_notification(notification_id))
# update notifications that have been disabled
if notifications:
self._update_notifications(notifications)
# only attempt to persist counts if there was an update
if self.counts_updated_since_persist:
if time.time() > (self.time_last_persist + self.persist_interval):
self._persist_counts()
# reset counts if reset_interval has elapsed
if time.time() > self.next_midnight:
self._reset_counts()
# NOTE: ObjectStore 'objects' contain '_id' and '_rev'
def _initialize_counts(self):
""" initialize the volatile (memory only) counts from ObjectStore if available """
try:
self.counts_obj = self.object_store.read_doc('notification_counts')
# persisted as standard dicts, convert to Counter objects ignoring the ObjectStore '_id' and '_rev'
self.counts = {k:Counter(v) for k,v in self.counts_obj.items() if not (k == '_id' or k == '_rev') }
except NotFound:
self.counts = {}
self._persist_counts()
def _persist_counts(self):
""" persist the counts to ObjectStore """
try:
self.counts_obj = self.object_store.read_doc('notification_counts')
except NotFound:
self.object_store.create_doc({}, 'notification_counts')
self.counts_obj = self.object_store.read_doc('notification_counts')
# Counter objects cannot be persisted, convert to standard dicts (leaves '_id', '_rev' untouched)
self.counts_obj.update({k:dict(v) for k,v in self.counts.items()})
self.object_store.update_doc(self.counts_obj)
self.time_last_persist = time.time()
self.counts_updated_since_persist = False # boolean to check if counts should be persisted
def _reset_counts(self):
""" clears the persisted counts """
self.object_store.delete_doc('notification_counts')
self._initialize_counts() # NOTE: NotificationRequest boolean disabled_by_system reset by UNS
self.next_midnight = self._midnight(days=1)
def _disable_notification(self, notification_id):
""" set the disabled_by_system boolean to True """
notification = self.resource_registry.read(notification_id)
notification.disabled_by_system = True
return notification
def _update_notifications(self, notifications):
""" updates notifications then publishes ReloadUserInfoEvent """
for n in notifications:
self.resource_registry.update(n)
self.event_publisher.publish_event(event_type=OT.ReloadUserInfoEvent)
def _midnight(self, days=0):
""" NOTE: this is midnight PDT (+0700) """
dt = datetime.combine(date.today(), datetime.min.time()) + timedelta(days=days,hours=7)
return (dt - datetime.utcfromtimestamp(0)).total_seconds()
|
mapzen/vector-datasource | integration-test/1596-junk-healthcare-values.py | Python | mit | 10,521 | 0 | # -*- encoding: utf-8 -*-
from . import FixtureTest
class HealthcareTest(FixtureTest):
def test_blood_donation(self):
import dsl
z, x, y = (16, 11205, 26166)
self.generate_fixtures(
# https://www.openstreetmap.org/way/227712531
dsl.way(227712531, dsl.tile_box(z, x, y), {
'addr:city': u'Los Angeles',
'addr:housenumber': u'1045',
'addr:postcode': u'90024',
'addr:state': u'CA',
'addr:street': u'Gayley Avenue',
'blood:plasma': u'yes',
'blood:platelets': u'yes',
'blood:whole': u'yes',
'building': u'yes',
'ele': u'105.8',
'elevator': u'yes',
'healthcare': u'blood_donation',
'height': u'7.9',
'lacounty:ain': u'4363025008',
'lacounty:bld_id': u'426122844901',
'level': u'1',
'name': u'UCLA Blood & Platelet Center',
'office': u'yes',
'opening_hours': u'Mo-Fr 08:00-17:00',
'operator': u'UCLA Health',
'phone': u'(310) 825-0888',
'source': u'openstreetmap.org',
'start_date': u'1950',
'website': u'http://gotblood.ucla.edu/westwood-center',
}),
)
# NOTE: blood_donation is mapped to blood_bank.
self.assert_has_feature(
z, x, y, 'pois', {
'id': 227712531,
'kind': u'blood_bank',
})
def test_hospice(self):
import dsl
z, x, y = (16, 10427, 25175)
self.generate_fixtures(
# https://www.openstreetmap.org/way/544655321
dsl.way(544655321, dsl.tile_box(z, x, y), {
'building': u'yes',
'healthcare': u'hospice',
'name': u'Memorial Hospice',
'source': u'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 544655321,
'kind': u'hospice',
})
def test_optometrist(self):
import dsl
z, x, y = (16, 10494, 24676)
self.generate_fixtures(
# https://www.openstreetmap.org/way/566181198
dsl.way(566181198, dsl.tile_box(z, x, y), {
'building': u'yes',
'healthcare': u'optometrist',
'name': u'Shasta Eye Medical Group',
'source': u'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 566181198,
'kind': u'optometrist',
})
def test_physiotherapist(self):
import dsl
z, x, y = (16, 11428, 26418)
self.generate_fixtures(
# https://www.openstreetmap.org/way/31815993
dsl.way(31815993, dsl.tile_box(z, x, y), {
'addr:city': u'San Diego',
'addr:country': u'US',
'addr:housenumber': u'10803',
'addr:state': u'CA',
'building': u'yes',
'building_type': u'industrial',
'healthcare': u'physiotherapist',
'name': u'Function Smart Physical Therapy',
'sangis:OBJECTID': u'9872',
'sangis:TYPE': u'Industrial',
'source': u'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 31815993,
'kind': u'physiotherapist',
})
def test_psychotherapist(self):
import dsl
z, x, y = (16, 11730, 26442)
self.generate_fixtures(
# https://www.openstreetmap.org/way/602686729
dsl.way(602686729, dsl.tile_box(z, x, y), {
'addr:city': u'El Centro',
'addr:postcode': u'92243',
'addr:state': u'CA',
'building': u'yes',
'healthcare': u'psychotherapist',
'name': u'Behavior Health',
'source': u'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 602686729,
'kind': u'psychotherapist',
})
def test_rehabilitation(self):
import dsl
z, x, y = (16, 10577, 25429)
self.generate_fixtures(
# https://www.openstreetmap.org/way/351588744
dsl.way(351588744, dsl.box_area(z, x, y, 1593), {
'addr:housenumber': '480',
'addr:street': 'North 1st Street',
'amenity': 'healthcare',
'building': 'yes',
'building:levels': '2',
'healthcare': 'rehabilitation',
'healthcare:speciality': 'brain_injury',
'name': 'Services for Brain Injury',
'source': 'openstreetmap.org',
'website': 'http://legalaidsociety.org/',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 351588744,
'kind': 'rehabilitation',
})
def test_blood_bank(self):
import dsl
z, x, y = (16, 13332, 26538)
self.generate_fixtures(
# https://www.openstreetmap.org/way/123526145
dsl.way(123526145, dsl.box_area(z, x, y, 601), {
'addr:housenumber': '1200',
'addr:postcode': '88011',
'addr:street': 'Commerce Drive',
'building': 'yes',
'healthcare': 'blood_bank',
'name': 'United Blood Services',
'phone': '+1-575-527-1322',
'source': 'openstreetmap.org',
'website': 'http://unitedbloodservices.org | /',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 123526145,
'kind': 'blood_bank',
| })
def test_chiropractor(self):
import dsl
z, x, y = (16, 13686, 25100)
self.generate_fixtures(
# https://www.openstreetmap.org/node/429672737
dsl.point(429672737, (-104.815915, 38.767464), {
'healthcare': 'chiropractor',
'name': 'Cheyenne Mountain Chiropractic',
'shop': 'chiropractor',
'source': 'openstreetmap.org',
'url': 'http://www.cheyennemountainchiro.com/',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 429672737,
'kind': 'chiropractor',
})
def test_midwife(self):
import dsl
z, x, y = (16, 10572, 25430)
self.generate_fixtures(
# https://www.openstreetmap.org/node/4976744222
dsl.point(4976744222, (-121.921157, 37.342539), {
'healthcare': 'midwife',
'name': 'South Bay Homebirth Collective',
'source': 'openstreetmap.org',
'website': 'https://www.southbayhomebirthcollective.com/',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 4976744222,
'kind': 'midwife',
})
def test_occupational_therapist(self):
import dsl
z, x, y = (16, 13009, 25019)
self.generate_fixtures(
# https://www.openstreetmap.org/node/5990502872
dsl.point(5990502872, (-108.534998, 39.113415), {
'addr:housenumber': '751',
'addr:street': 'Horizon Court',
'addr:unit': '247',
'healthcare': 'occupational_therapist',
'name': 'OSA Transpersonal Counseling',
'office': 'company',
'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 599 |
apache/allura | Allura/allura/lib/utils.py | Python | apache-2.0 | 27,100 | 0.001144 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import base64
import operator
from contextlib import contextmanager
import time
import string
import hashlib
import binascii
import logging.handlers
import os.path
import datetime
import random
import mimetypes
import re
import magic
from itertools import groupby
import operator as op
import collections
import ming
from six.moves.urllib.parse import urlparse
import six.moves.urllib.request
import six.moves.urllib.parse
import six.moves.urllib.error
import types
import socket
import tg
import emoji
import json
from collections import OrderedDict
from bs4 import BeautifulSoup
from tg import redirect, app_globals as g
from tg.decorators import before_validate
from tg.controllers.util import etag_cache
from paste.deploy.converters import asbool, asint
from markupsafe import Markup
from webob import exc
from pygments.formatters import HtmlFormatter
from setproctitle import getproctitle
import html5lib.filters.sanitizer
from ew import jinja2_ew as ew
from ming.utils import LazyProperty
from ming.odm.odmsession import ODMCursor
from ming.odm import session
import six
MARKDOWN_EXTENSIONS = ['.markdown', '.mdown', '.mkdn', '.mkd', '.md']
def clean_ming_config(config):
# delete replicaSet=''
for key in list(config.keys()):
if '.replicaSet' in key and not config[key]:
del config[key]
elif 'mongo_host' in key and 'replicaSet=&' in config[key]:
config[key] = config[key].replace('replicaSet=&', '')
return config
def configure_ming(conf):
conf = clean_ming_config(conf)
ming.configure(**conf)
def permanent_redirect(url):
try:
tg.redirect(url)
except exc.HTTPFound as err:
raise exc.HTTPMovedPermanently(location=err.location)
def guess_mime_type(filename):
'''Guess MIME type based on filename.
Applies heuristics, tweaks, and defaults in centralized manner.
'''
# Consider changing to strict=False
content_type = mimetypes.guess_type(filename, strict=True)
if content_type[0]:
content_type = content_type[0]
else:
content_type = 'application/octe | t-stream'
return content_type
class ConfigProxy:
'''Wrapper for loading config values at module-scope so we don't
have problems when a module is imported b | efore tg.config is initialized
'''
def __init__(self, **kw):
self._kw = kw
def __getattr__(self, k):
return self.get(k)
def get(self, key, default=None):
return tg.config.get(self._kw.get(key, key), default)
def get_bool(self, key):
return asbool(self.get(key))
class lazy_logger:
'''Lazy instatiation of a logger, to ensure that it does not get
created before logging is configured (which would make it disabled)'''
def __init__(self, name):
self._name = name
@LazyProperty
def _logger(self):
return logging.getLogger(self._name)
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError(name)
return getattr(self._logger, name)
log = lazy_logger(__name__)
class CustomWatchedFileHandler(logging.handlers.WatchedFileHandler):
"""Custom log handler for Allura"""
def format(self, record):
"""Prepends current process name to ``record.name`` if running in the
context of a taskd process that is currently processing a task.
"""
title = getproctitle()
if title.startswith('taskd:'):
record.name = f"{title}:{record.name}"
return super().format(record)
def chunked_find(cls, query=None, pagesize=1024, sort_key='_id', sort_dir=1):
'''
Execute a mongo query against the specified class, yield some results at
a time (avoids mongo cursor timeouts if the total result set is very large).
Pass an indexed sort_key for efficient queries. Default _id should work
in most cases.
'''
if query is None:
query = {}
page = 0
max_id = None
while True:
if sort_key:
if max_id:
if sort_key not in query:
query[sort_key] = {}
query[sort_key]['$gt'] = max_id
q = cls.query.find(query).limit(pagesize).sort(sort_key, sort_dir)
else:
# skipping requires scanning, even for an indexed query
q = cls.query.find(query).limit(pagesize).skip(pagesize * page)
results = (q.all())
if not results:
break
if sort_key:
max_id = results[-1][sort_key]
yield results
page += 1
def chunked_list(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def chunked_iter(iterable, max_size):
'''return iterable 'chunks' from the iterable of max size max_size'''
eiter = enumerate(iterable)
keyfunc = lambda i_x: i_x[0] // max_size
for _, chunk in groupby(eiter, keyfunc):
yield (x for i, x in chunk)
class AntiSpam:
'''Helper class for bot-protecting forms'''
honey_field_template = string.Template('''<p class="$honey_class">
<label for="$fld_id">You seem to have CSS turned off.
Please don't fill out this field.</label><br>
<input id="$fld_id" name="$fld_name" type="text"><br></p>''')
def __init__(self, request=None, num_honey=2, timestamp=None, spinner=None):
self.num_honey = num_honey
if request is None or request.method == 'GET':
self.request = tg.request
self.timestamp = timestamp if timestamp else int(time.time())
self.spinner = spinner if spinner else self.make_spinner()
self.timestamp_text = str(self.timestamp)
self.spinner_text = six.ensure_text(self._wrap(self.spinner))
else:
self.request = request
self.timestamp_text = request.params['timestamp']
self.spinner_text = request.params['spinner']
self.timestamp = int(self.timestamp_text)
self.spinner = self._unwrap(self.spinner_text)
trans_fn = int
self.spinner_ord = list(map(trans_fn, self.spinner))
self.random_padding = [random.randint(0, 255) for x in self.spinner]
self.honey_class = self.enc(self.spinner_text, css_safe=True)
# The counter is to ensure that multiple forms in the same page
# don't end up with the same id. Instead of doing:
#
# honey0, honey1
# which just relies on 0..num_honey we include a counter
# which is incremented every time extra_fields is called:
#
# honey00, honey 01, honey10, honey11
self.counter = 0
@staticmethod
def _wrap(s):
'''Encode bytes to make it HTML id-safe (starts with alpha, includes
only digits, hyphens, underscores, colons, and periods). Luckily, base64
encoding doesn't use hyphens, underscores, colons, nor periods, so we'll
use these characters to replace its plus, slash, equals, and newline.
'''
s = base64.b64encode(six.ensure_binary(s))
s = s.rstrip(b'=\n')
s = s.replace(b'+', b'-').replace(b'/', b'_')
s = b'X' + s
return s
@staticmethod
def _unwrap(s):
s = s[1:]
s = six.ensure_binary(s)
s = s |
cropleyb/pentai | pentai/base/mock.py | Python | mit | 16,157 | 0.006313 | #
# (c) Dave Kirby 2001 - 2005
# mock@thedeveloperscoach.com
#
# Original call interceptor and call assertion code by Phil Dawes (pdawes@users.sourceforge.net)
# Call interceptor code enhanced by Bruce Cropley (cropleyb@yahoo.com.au)
#
# This Python module and associated files are released under the FreeBSD
# license. Essentially, you can do what you like with it except pretend you wrote
# it yourself.
#
#
# Copyright (c) 2005, Dave Kirby
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of this library nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permissi | on.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN | NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# mock@thedeveloperscoach.com
"""
Mock object library for Python. Mock objects can be used when unit testing
to remove a dependency on another production class. They are typically used
when the dependency would either pull in lots of other classes, or
significantly slow down the execution of the test.
They are also used to create exceptional conditions that cannot otherwise
be easily triggered in the class under test.
"""
__version__ = "0.1.0"
# Added in Python 2.1
import inspect
import re
class MockInterfaceError(Exception):
pass
class Mock(object):
"""
The Mock class emulates any other class for testing purposes.
All method calls are stored for later examination.
"""
def __init__(self, returnValues=None, realClass=None):
"""
The Mock class constructor takes a dictionary of method names and
the values they return. Methods that are not in the returnValues
dictionary will return None.
You may also supply a class whose interface is being mocked.
All calls will be checked to see if they appear in the original
interface. Any calls to methods not appearing in the real class
will raise a MockInterfaceError. Any calls that would fail due to
non-matching parameter lists will also raise a MockInterfaceError.
Both of these help to prevent the Mock class getting out of sync
with the class it is Mocking.
"""
self.mockCalledMethods = {}
self.mockAllCalledMethods = []
self.mockReturnValues = returnValues or {}
self.mockExpectations = {}
self.realClassMethods = None
if realClass:
self.realClassMethods = dict(inspect.getmembers(realClass, inspect.isroutine))
for retMethod in self.mockReturnValues.keys():
if not self.realClassMethods.has_key(retMethod):
raise MockInterfaceError("Return value supplied for method '%s' that was not in the original class" % retMethod)
self._setupSubclassMethodInterceptors()
def _setupSubclassMethodInterceptors(self):
methods = inspect.getmembers(self.__class__,inspect.isroutine)
baseMethods = dict(inspect.getmembers(Mock, inspect.ismethod))
for m in methods:
name = m[0]
# Don't record calls to methods of Mock base class.
if not name in baseMethods:
self.__dict__[name] = MockCallable(name, self, handcrafted=True)
def __getattr__(self, name):
return MockCallable(name, self)
def mockAddReturnValues(self, **methodReturnValues ):
self.mockReturnValues.update(methodReturnValues)
def mockSetExpectation(self, name, testFn, after=0, until=0):
self.mockExpectations.setdefault(name, []).append((testFn,after,until))
def _checkInterfaceCall(self, name, callParams, callKwParams):
"""
Check that a call to a method of the given name to the original
class with the given parameters would not fail. If it would fail,
raise a MockInterfaceError.
Based on the Python 2.3.3 Reference Manual section 5.3.4: Calls.
"""
if self.realClassMethods == None:
return
if not self.realClassMethods.has_key(name):
raise MockInterfaceError("Calling mock method '%s' that was not found in the original class" % name)
func = self.realClassMethods[name]
try:
args, varargs, varkw, defaults = inspect.getargspec(func)
except TypeError:
# func is not a Python function. It is probably a builtin,
# such as __repr__ or __coerce__. TODO: Checking?
# For now assume params are OK.
return
# callParams doesn't include self; args does include self.
numPosCallParams = 1 + len(callParams)
if numPosCallParams > len(args) and not varargs:
raise MockInterfaceError("Original %s() takes at most %s arguments (%s given)" %
(name, len(args), numPosCallParams))
# Get the number of positional arguments that appear in the call,
# also check for duplicate parameters and unknown parameters
numPosSeen = _getNumPosSeenAndCheck(numPosCallParams, callKwParams, args, varkw)
lenArgsNoDefaults = len(args) - len(defaults or [])
if numPosSeen < lenArgsNoDefaults:
raise MockInterfaceError("Original %s() takes at least %s arguments (%s given)" % (name, lenArgsNoDefaults, numPosSeen))
def mockGetAllCalls(self):
"""
Return a list of MockCall objects,
representing all the methods in the order they were called.
"""
return self.mockAllCalledMethods
getAllCalls = mockGetAllCalls # deprecated - kept for backward compatibility
def mockGetNamedCalls(self, methodName):
"""
Return a list of MockCall objects,
representing all the calls to the named method in the order they were called.
"""
return self.mockCalledMethods.get(methodName, [])
getNamedCalls = mockGetNamedCalls # deprecated - kept for backward compatibility
def mockCheckCall(self, index, name, *args, **kwargs):
'''test that the index-th call had the specified name and parameters'''
call = self.mockAllCalledMethods[index]
assert name == call.getName(), "%r != %r" % (name, call.getName())
call.checkArgs(*args, **kwargs)
def _getNumPosSeenAndCheck(numPosCallParams, callKwParams, args, varkw):
"""
Positional arguments can appear as call parameters either named as
a named (keyword) parameter, or just as a value to be matched by
position. Count the positional arguments that are given by either
keyword or position, and check for duplicate specifications.
Also check for arguments specified by keyword that do not appear
in the method's parameter list.
"""
posSeen = {}
for arg in args[:numPosCallParams]:
posSeen[arg] = True
for kwp in callKwParams:
if posSeen. |
elandini/CoMPlEx | GUIs/CoMPlEx_hwConfig_Dialog.py | Python | mit | 17,425 | 0.004706 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'GUIs\CoMPlEx_hwConfig_Dialog.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_hwConfig_dialog(object):
def setupUi(self, hwConfig_dialog):
hwConfig_dialog.setObjectName(_fromUtf8("hwConfig_dialog"))
hwConfig_dialog.resize(531, 816)
self.verticalLayout = QtGui.QVBoxLayout(hwConfig_dialog)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.groupBox = QtGui.QGroupBox(hwConfig_dialog)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.formLayout = QtGui.QFormLayout(self.groupBox)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.label = QtGui.QLabel(self.groupBox)
self.label.setObjectName(_fromUtf8("label"))
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label)
self.afmIpLine = QtGui.QLineEdit(self.groupBox)
self.afmIpLine.setObjectName(_fromUtf8("afmIpLine"))
self.formLayout.setWidget(0 | , QtGui.QFormLayout.FieldRole, self.afmIpLine)
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_2)
self.afmSubPortNum = QtGui.QSpinBox(self.groupBox)
self.afm | SubPortNum.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons)
self.afmSubPortNum.setMaximum(100000000)
self.afmSubPortNum.setObjectName(_fromUtf8("afmSubPortNum"))
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.afmSubPortNum)
self.label_14 = QtGui.QLabel(self.groupBox)
self.label_14.setObjectName(_fromUtf8("label_14"))
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_14)
self.afmPubPortNum = QtGui.QSpinBox(self.groupBox)
self.afmPubPortNum.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons)
self.afmPubPortNum.setMaximum(100000000)
self.afmPubPortNum.setObjectName(_fromUtf8("afmPubPortNum"))
self.formLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.afmPubPortNum)
self.label_15 = QtGui.QLabel(self.groupBox)
self.label_15.setObjectName(_fromUtf8("label_15"))
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_15)
self.curveNameLine = QtGui.QLineEdit(self.groupBox)
self.curveNameLine.setObjectName(_fromUtf8("curveNameLine"))
self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.curveNameLine)
self.label_16 = QtGui.QLabel(self.groupBox)
self.label_16.setObjectName(_fromUtf8("label_16"))
self.formLayout.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_16)
self.monitNameLine = QtGui.QLineEdit(self.groupBox)
self.monitNameLine.setObjectName(_fromUtf8("monitNameLine"))
self.formLayout.setWidget(4, QtGui.QFormLayout.FieldRole, self.monitNameLine)
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.formLayout.setWidget(5, QtGui.QFormLayout.LabelRole, self.label_3)
self.xyCmdTagLine = QtGui.QLineEdit(self.groupBox)
self.xyCmdTagLine.setObjectName(_fromUtf8("xyCmdTagLine"))
self.formLayout.setWidget(5, QtGui.QFormLayout.FieldRole, self.xyCmdTagLine)
self.label_4 = QtGui.QLabel(self.groupBox)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.formLayout.setWidget(6, QtGui.QFormLayout.LabelRole, self.label_4)
self.xyResTagLine = QtGui.QLineEdit(self.groupBox)
self.xyResTagLine.setObjectName(_fromUtf8("xyResTagLine"))
self.formLayout.setWidget(6, QtGui.QFormLayout.FieldRole, self.xyResTagLine)
self.verticalLayout.addWidget(self.groupBox)
self.groupBox_2 = QtGui.QGroupBox(hwConfig_dialog)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.formLayout_2 = QtGui.QFormLayout(self.groupBox_2)
self.formLayout_2.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_2.setObjectName(_fromUtf8("formLayout_2"))
self.label_5 = QtGui.QLabel(self.groupBox_2)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_5)
self.maxPiezoVoltNumDbl = QtGui.QDoubleSpinBox(self.groupBox_2)
self.maxPiezoVoltNumDbl.setMinimum(-99.0)
self.maxPiezoVoltNumDbl.setObjectName(_fromUtf8("maxPiezoVoltNumDbl"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.maxPiezoVoltNumDbl)
self.label_6 = QtGui.QLabel(self.groupBox_2)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_6)
self.minPiezoVoltNumDbl = QtGui.QDoubleSpinBox(self.groupBox_2)
self.minPiezoVoltNumDbl.setMinimum(-99.0)
self.minPiezoVoltNumDbl.setObjectName(_fromUtf8("minPiezoVoltNumDbl"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.minPiezoVoltNumDbl)
self.label_7 = QtGui.QLabel(self.groupBox_2)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_7)
self.maxPiezoExtNumDbl = QtGui.QDoubleSpinBox(self.groupBox_2)
self.maxPiezoExtNumDbl.setMinimum(-99.0)
self.maxPiezoExtNumDbl.setObjectName(_fromUtf8("maxPiezoExtNumDbl"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.FieldRole, self.maxPiezoExtNumDbl)
self.label_8 = QtGui.QLabel(self.groupBox_2)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_8)
self.minPiezoExtNumDbl = QtGui.QDoubleSpinBox(self.groupBox_2)
self.minPiezoExtNumDbl.setMinimum(-99.0)
self.minPiezoExtNumDbl.setObjectName(_fromUtf8("minPiezoExtNumDbl"))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.FieldRole, self.minPiezoExtNumDbl)
self.label_9 = QtGui.QLabel(self.groupBox_2)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.formLayout_2.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_9)
self.farNearCmbBox = QtGui.QComboBox(self.groupBox_2)
self.farNearCmbBox.setObjectName(_fromUtf8("farNearCmbBox"))
self.farNearCmbBox.addItem(_fromUtf8(""))
self.farNearCmbBox.addItem(_fromUtf8(""))
self.formLayout_2.setWidget(4, QtGui.QFormLayout.FieldRole, self.farNearCmbBox)
self.label_19 = QtGui.QLabel(self.groupBox_2)
self.label_19.setObjectName(_fromUtf8("label_19"))
self.formLayout_2.setWidget(7, QtGui.QFormLayout.LabelRole, self.label_19)
self.toStartSpeedNumDbl = QtGui.QDoubleSpinBox(self.groupBox_2)
self.toStartSpeedNumDbl.setMinimum(1.0)
self.toStartSpeedNumDbl.setMaximum(20000.0)
self.toStartSpeedNumDbl.setObjectName(_fromUtf8("toStartSpeedNumDbl"))
self.formLayout_2.setWidget(7, QtGui.QFormLayout.FieldRole, self.toStartSpeedNumDbl)
self.maxSpeedNumDbl = QtGui.QDoubleSpinBox(self.groupBox_2)
self.maxSpeedNumDbl.setKeyboardTracking(False)
self.maxSpeedNumDbl.setMinimum(1.0)
self.maxSpeedNumDbl.setMaximum(20000.0)
self.maxSpeedNumDbl.setObjectName(_fromUtf8("maxSpeedNumDbl"))
self.formLayout_2.setWidget(6, QtGui.QFormLayout.FieldRole, self.maxSpeedNumDbl)
self.label_22 = QtGui.QLabel(self.groupBox_2)
|
Kunal57/MIT_6.00.1x | final/problem7.py | Python | mit | 518 | 0.015444 | # Problem 7
# 20.0 points possible (graded)
| # Write a function called general_poly.
def general_poly(L):
""" L, a list of numbers (n0, n1, n2, ... nk)
Returns a function, which when applied to a value x, returns the value
n0 * x^k + | n1 * x^(k-1) + ... nk * x^0
"""
numList = L[::-1]
def apply(number):
value = 0
for i in range(len(numList)):
result = numList[i] * (number ** i)
value += result
return value
return apply
L = [1, 2, 3, 4]
print(general_poly([1, 2, 3, 4])(10)) |
pombredanne/swfshield-cli | swfshield/main.py | Python | bsd-3-clause | 922 | 0.002169 | import logging
import sys
from cliff.app import App
from cliff.commandmanager import CommandMana | ger
class SwfShield(App):
log = logging.getLogger(__name__)
def __init__(self):
super(SwfShield, self).__init__(
description='SwfShield CLI',
version='0.1',
command_manager=CommandManager('swfshield.cli'),
)
def initialize_app(self, argv):
self.log.debug('initialize app')
def prepare_to_run_command(self, cmd):
self.log.debug('prepare to run')
def clean_up(self, cmd, result, err):
self.log.debug | ('clean up')
if err:
self.log.debug('got an error: {!s}'.format(err))
def main(argv=sys.argv[1:]):
app = SwfShield()
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
return app.run(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
ljjt/azure-sdk-for-python | tests/test_tableservice.py | Python | apache-2.0 | 44,444 | 0.000406 | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import base64
import time
import unittest
from datetime import datetime
from azure import WindowsAzureError, WindowsAzureBatchOperationError
from azure.storage import (
Entity,
EntityProperty,
StorageServiceProperties,
TableService,
)
from util import (
AzureTestCase,
credentials,
getUniqueName,
set_service_options,
)
#------------------------------------------------------------------------------
MAX_RETRY = 60
#------------------------------------------------------------------------------
class TableServiceTest(AzureTestCase):
def setUp(self):
self.ts = TableService(credentials.getStorageServicesName(),
credentials.getStorageServicesKey())
set_service_options(self.ts)
self.table_name = getUniqueName('uttable')
self.additional_table_names = []
def tearDown(self):
self.cleanup()
return super(TableServiceTest, self).tearDown()
def cleanup(self):
try:
self.ts.delete_table(self.table_name)
except:
pass
for name in self.additional_table_names:
try:
self.ts.delete_table(name)
except:
pass
#--Helpers-----------------------------------------------------------------
def _create_table(self, table_name):
'''
Creates a table with the specified name.
'''
self.ts.create_table(table_name, True)
def _create_table_with_default_entities(self, table_name, entity_count):
'''
Creates a table with the specified name and adds entities with the
default set of values. PartitionKey is set to 'MyPartition' and RowKey
is set to a unique counter value starting at 1 (as a string).
'''
entities = []
self._create_table(table_name)
for i in range(1, entity_count + 1):
entities.append(self.ts.insert_entity(
table_name,
self._create_default_entity_dict('MyPartition', str(i))))
return entities
def _create_default_entity_class(self, partition, row):
'''
Creates a class-based entity with fixed values, using all
of the supported data types.
'''
entity = Entity()
entity.PartitionKey = partition
entity.RowKey = row
entity.age = 39
entity.sex = 'male'
entity.married = True
entity.deceased = False
entity.optional = None
entity.ratio = 3.1
entity.large = 9333111000
entity.Birthday = datetime(1973, 10, 4)
entity.birthday = datetime(1970, 10, 4)
entity.binary = None
entity.other = EntityProperty('Edm.Int64', 20)
entity.clsid = EntityProperty(
'Edm.Guid', 'c9da6455-213d-42c9-9a79-3e9149a57833')
return entity
def | _create | _default_entity_dict(self, partition, row):
'''
Creates a dictionary-based entity with fixed values, using all
of the supported data types.
'''
return {'PartitionKey': partition,
'RowKey': row,
'age': 39,
'sex': 'male',
'married': True,
'deceased': False,
'optional': None,
'ratio': 3.1,
'large': 9333111000,
'Birthday': datetime(1973, 10, 4),
'birthday': datetime(1970, 10, 4),
'other': EntityProperty('Edm.Int64', 20),
'clsid': EntityProperty(
'Edm.Guid',
'c9da6455-213d-42c9-9a79-3e9149a57833')}
def _create_updated_entity_dict(self, partition, row):
'''
Creates a dictionary-based entity with fixed values, with a
different set of values than the default entity. It
adds fields, changes field values, changes field types,
and removes fields when compared to the default entity.
'''
return {'PartitionKey': partition,
'RowKey': row,
'age': 'abc',
'sex': 'female',
'sign': 'aquarius',
'birthday': datetime(1991, 10, 4)}
def _assert_default_entity(self, entity):
'''
Asserts that the entity passed in matches the default entity.
'''
self.assertEqual(entity.age, 39)
self.assertEqual(entity.sex, 'male')
self.assertEqual(entity.married, True)
self.assertEqual(entity.deceased, False)
self.assertFalse(hasattr(entity, "aquarius"))
self.assertEqual(entity.ratio, 3.1)
self.assertEqual(entity.large, 9333111000)
self.assertEqual(entity.Birthday, datetime(1973, 10, 4))
self.assertEqual(entity.birthday, datetime(1970, 10, 4))
self.assertEqual(entity.other, 20)
self.assertIsInstance(entity.clsid, EntityProperty)
self.assertEqual(entity.clsid.type, 'Edm.Guid')
self.assertEqual(entity.clsid.value,
'c9da6455-213d-42c9-9a79-3e9149a57833')
def _assert_updated_entity(self, entity):
'''
Asserts that the entity passed in matches the updated entity.
'''
self.assertEqual(entity.age, 'abc')
self.assertEqual(entity.sex, 'female')
self.assertFalse(hasattr(entity, "married"))
self.assertFalse(hasattr(entity, "deceased"))
self.assertEqual(entity.sign, 'aquarius')
self.assertFalse(hasattr(entity, "optional"))
self.assertFalse(hasattr(entity, "ratio"))
self.assertFalse(hasattr(entity, "large"))
self.assertFalse(hasattr(entity, "Birthday"))
self.assertEqual(entity.birthday, datetime(1991, 10, 4))
self.assertFalse(hasattr(entity, "other"))
self.assertFalse(hasattr(entity, "clsid"))
def _assert_merged_entity(self, entity):
'''
Asserts that the entity passed in matches the default entity
merged with the updated entity.
'''
self.assertEqual(entity.age, 'abc')
self.assertEqual(entity.sex, 'female')
self.assertEqual(entity.sign, 'aquarius')
self.assertEqual(entity.married, True)
self.assertEqual(entity.deceased, False)
self.assertEqual(entity.sign, 'aquarius')
self.assertEqual(entity.ratio, 3.1)
self.assertEqual(entity.large, 9333111000)
self.assertEqual(entity.Birthday, datetime(1973, 10, 4))
self.assertEqual(entity.birthday, datetime(1991, 10, 4))
self.assertEqual(entity.other, 20)
self.assertIsInstance(entity.clsid, EntityProperty)
self.assertEqual(entity.clsid.type, 'Edm.Guid')
self.assertEqual(entity.clsid.value,
'c9da6455-213d-42c9-9a79-3e9149a57833')
#--Test cases for table service -------------------------------------------
def test_get_set_table_service_properties(self):
table_properties = self.ts.get_table_service_properties()
self.ts.set_table_service_properties(table_properties)
tests = [('logging.delete', True),
('logging.delete', False),
('logging.read', True),
('logging.read', False),
('logging.write', True),
('logging.write', False),
]
for path, value in tests:
# |
gencer/sentry | tests/sentry/web/frontend/test_setup_wizard.py | Python | bsd-3-clause | 1,341 | 0 | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from django.core.cache import cache
from sentry.testutils import PermissionTestCase
from sentry.api.endpoints.setup_wizard import SETUP_WIZARD_CACHE_KEY
class SetupWizard(PermissionTestCase):
def test_redirect(self):
user = self.create_user('foo@example.com', is_active=False)
url = reverse('sentry-project-wizard-fetch', kwargs={
'wizard_hash': 'abc'
})
resp = self.client.get(url)
self.login_as(user)
assert resp.status_code == 302
def test_simple(self):
self.create_organization(owner=self.user)
self.login_as(self.user)
key = '%s%s' % (SETUP_WIZARD_CACHE_KEY, 'abc')
cache.set(key, 'test')
url = reverse('sentry-project-wizard-fetch', kwargs={
'wizard_hash': 'abc'
})
resp = self.client.get(url)
assert resp.status_code == 200
self.asse | rtTemplateUsed(resp, 'sentry/setup-wizard.html')
def test_redirect_to_org(self):
self.create_organization(owner=self.user)
self.login_as(self.user)
url = reverse('sentry-project-wizard-fetch', kwargs={
'wizard_hash': 'xyz'
})
| resp = self.client.get(url)
assert resp.status_code == 302
|
nickw444/MediaBrowser | Item.py | Python | mit | 106 | 0 | class | Item(object):
def __init__(self, path, name):
self.path = path
| self.name = name
|
kyleparrott/kubos-sdk | kubos/test/test_licenses.py | Python | apache-2.0 | 1,774 | 0.005073 | # Kubos SDK
# Copyright (C) 2016 Kubos Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
impor | t kubos
import mock
import os
import sys
import unittest
from kubos.test.utils import get_arg_list, KubosTestCase
class KubosLicenseTest(KubosTestCase):
def setUp(self):
super(KubosLicenseTest, self).setUp()
self.test_command = 'licenses'
sys.argv.append(self.test_command)
sys.stdout = sys.stderr = open(os.devnull, 'wb')
def test_license(self):
kubos.main()
arg_list = get_arg_list(kubos.utils.container.pass_through.cal | l_args_list)
self.assertTrue(self.test_command in arg_list)
def tearDown(self):
super(KubosLicenseTest, self).tearDown()
class KubosLicsTest(KubosTestCase):
def setUp(self):
super(KubosLicsTest, self).setUp()
self.test_command = 'lics'
sys.argv.append(self.test_command)
sys.stdout = sys.stderr = open(os.devnull, 'wb')
def test_lics(self):
kubos.main()
arg_list = get_arg_list(kubos.utils.container.pass_through.call_args_list)
self.assertTrue(self.test_command in arg_list)
def tearDown(self):
super(KubosLicsTest, self).tearDown()
if __name__ == '__main__':
unittest.main()
|
gurkslask/alexistheman | tests/factories.py | Python | bsd-3-clause | 612 | 0.003268 | # -*- coding: utf-8 -*-
from factory import Sequence, PostGenerationMethodCall
from factory.alchemy import SQLAlchemyModelFactory
from alexistheman.user.models import User
from alexistheman.database import db
class BaseFactory(SQLAlchemyModelFactory):
cl | ass Meta:
abstract = True
sqlalchemy_session | = db.session
class UserFactory(BaseFactory):
username = Sequence(lambda n: "user{0}".format(n))
email = Sequence(lambda n: "user{0}@example.com".format(n))
password = PostGenerationMethodCall('set_password', 'example')
active = True
class Meta:
model = User
|
michalliu/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/unittest/case.py | Python | gpl-2.0 | 55,456 | 0.001226 | """Test case imp | lementation"""
import sys
import functools
import difflib
import logging
import pprint
import re
import warnings
import collections
import contextlib
import traceback
from . import result
from .util import (strclass, safe_repr, _count_diff_all_purpose,
_count_diff_hashable, _common_shorten_repr)
__unittest = True
DIFF_OMITTED = ('\nDiff is %s characters long. '
'Set self.maxDiff to None to see | it.')
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestCase.skipTest() or one of the skipping decorators
instead of raising this directly.
"""
class _ShouldStop(Exception):
"""
The test should stop.
"""
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
class _Outcome(object):
def __init__(self, result=None):
self.expecting_failure = False
self.result = result
self.result_supports_subtests = hasattr(result, "addSubTest")
self.success = True
self.skipped = []
self.expectedFailure = None
self.errors = []
@contextlib.contextmanager
def testPartExecutor(self, test_case, isTest=False):
old_success = self.success
self.success = True
try:
yield
except KeyboardInterrupt:
raise
except SkipTest as e:
self.success = False
self.skipped.append((test_case, str(e)))
except _ShouldStop:
pass
except:
exc_info = sys.exc_info()
if self.expecting_failure:
self.expectedFailure = exc_info
else:
self.success = False
self.errors.append((test_case, exc_info))
# explicitly break a reference cycle:
# exc_info -> frame -> exc_info
exc_info = None
else:
if self.result_supports_subtests and self.success:
self.errors.append((test_case, None))
finally:
self.success = self.success and old_success
def _id(obj):
return obj
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not isinstance(test_item, type):
@functools.wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
def skipUnless(condition, reason):
"""
Skip a test unless the condition is true.
"""
if not condition:
return skip(reason)
return _id
def expectedFailure(test_item):
test_item.__unittest_expecting_failure__ = True
return test_item
class _BaseTestCaseContext:
def __init__(self, test_case):
self.test_case = test_case
def _raiseFailure(self, standardMsg):
msg = self.test_case._formatMessage(self.msg, standardMsg)
raise self.test_case.failureException(msg)
class _AssertRaisesBaseContext(_BaseTestCaseContext):
def __init__(self, expected, test_case, callable_obj=None,
expected_regex=None):
_BaseTestCaseContext.__init__(self, test_case)
self.expected = expected
self.test_case = test_case
if callable_obj is not None:
try:
self.obj_name = callable_obj.__name__
except AttributeError:
self.obj_name = str(callable_obj)
else:
self.obj_name = None
if expected_regex is not None:
expected_regex = re.compile(expected_regex)
self.expected_regex = expected_regex
self.msg = None
def handle(self, name, callable_obj, args, kwargs):
"""
If callable_obj is None, assertRaises/Warns is being used as a
context manager, so check for a 'msg' kwarg and return self.
If callable_obj is not None, call it passing args and kwargs.
"""
if callable_obj is None:
self.msg = kwargs.pop('msg', None)
return self
with self:
callable_obj(*args, **kwargs)
class _AssertRaisesContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
if self.obj_name:
self._raiseFailure("{} not raised by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not raised".format(exc_name))
else:
traceback.clear_frames(tb)
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
# store exception, without traceback, for later retrieval
self.exception = exc_value.with_traceback(None)
if self.expected_regex is None:
return True
expected_regex = self.expected_regex
if not expected_regex.search(str(exc_value)):
self._raiseFailure('"{}" does not match "{}"'.format(
expected_regex.pattern, str(exc_value)))
return True
class _AssertWarnsContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertWarns* methods."""
def __enter__(self):
# The __warningregistry__'s need to be in a pristine state for tests
# to work properly.
for v in sys.modules.values():
if getattr(v, '__warningregistry__', None):
v.__warningregistry__ = {}
self.warnings_manager = warnings.catch_warnings(record=True)
self.warnings = self.warnings_manager.__enter__()
warnings.simplefilter("always", self.expected)
return self
def __exit__(self, exc_type, exc_value, tb):
self.warnings_manager.__exit__(exc_type, exc_value, tb)
if exc_type is not None:
# let unexpected exceptions pass through
return
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
first_matching = None
for m in self.warnings:
w = m.message
if not isinstance(w, self.expected):
continue
if first_matching is None:
first_matching = w
if (self.expected_regex is not None and
not self.expected_regex.search(str(w))):
continue
# store warning for later retrieval
self.warning = w
self.filename = m.filename
self.lineno = m.lineno
return
# Now we simply try to choose a helpful failure message
if first_matching is not None:
self._raiseFailure('"{}" does not match "{}"'.format(
self.expected_regex.pattern, str(first_matching)))
if self.obj_name:
self._raiseFailure("{} not triggered by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not triggered".format(exc_name))
_LoggingWatcher = collections.namedtuple("_LoggingWatcher",
["records", "output"])
class _CapturingHandler(logging.Handler):
"""
A logging handler capturing all (raw and formatted) logging output.
"""
def __init__(self):
logging.Handler.__init__(self)
self.watcher = _LoggingWatcher([], [])
def flush(self):
pass
def emit(self, record):
self.watcher |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.