repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
ismailof/mopidy-json-client | mopidy_json_client/methods_2_0/tracklist.py | Python | apache-2.0 | 12,680 | 0.001341 | from ..mopidy_api import MopidyWSController
class TracklistController (MopidyWSController):
def index(self, tl_track=None, tlid=None, **options):
'''The position of the given track in the tracklist.
If neither *tl_track* or *tlid* is given we return the index of
the currently playing track.
:param tl_track: the track to find the index of
:type tl_track: :class:`mopidy.models.TlTrack` or :class:`None`
:param tlid: TLID of the track to find the index of
:type tlid: :class:`int` or :class:`None`
:rtype: :class:`int` or :class:`None`
.. versionadded:: 1.1
The *tlid* parameter
'''
return self.mopidy_request('core.tracklist.index', tl_track=tl_track, tlid=tlid, **options)
def get_consume(self, **options):
'''Get consume mode.
:class:`True`
Tracks are removed from the tracklist when they have been played.
:class:`False`
Tracks are not removed from the tracklist.
'''
return self.mopidy_request('core.tracklist.get_consume', **options)
def shuffle(self, start=None, end=None, **options):
'''Shuffles the entire tracklist. If ``start`` and ``end`` is given only
shuffles the slice ``[start:end]``.
Triggers the :meth:`mopidy.core.CoreListener.tracklist_changed` event.
:param start: position of first track to shuffle
:type start: int or :class:`None`
:param end: position after last track to shuffle
:type end: int or :class:`None`
'''
return self.mopidy_request('core.tracklist.shuffle', start=start, end=end, **options)
def next_track(self, tl_track=None, **options):
'''The track that will be played if calling
:meth:`mopidy.core.PlaybackController.next()`.
For normal playback this is the next track in the tracklist. If repeat
is enabled the next track can loop around the tracklist. When random is
enabled this should be a random track, all tracks should be played once
before the tracklist repeats.
:param tl_track: the reference track
:type tl_track: :class:`mopidy.models.TlTrack` or :class:`None`
:rtype: :class:`mopidy.models.TlTrack` or :class:`None`
'''
return self.mopidy_request('core.tracklist.next_track', tl_track=tl_track, **options)
def get_random(self, **options):
'''Get random mode.
:class:`True`
Tracks are selected at random from the tracklist.
:class:`False`
Tracks are played in the order of the tracklist.
'''
return self.mopidy_request('core.tracklist.get_random', **options)
def get_next_tlid(self, **options):
'''The tlid of the track that will be played if calling
:meth:`mopidy.core.PlaybackController.next()`.
For normal playback this is the next track in the tracklist. If repeat
is enabled the next track can loop around the tracklist. When random is
enabled this should be a random track, all tracks should be played once
before the tracklist repeats.
:rtype: :class:`int` or :class:`None`
.. versionadded:: 1.1
'''
return self.mopidy_request('core.tracklist.get_next_tlid', **options)
def previous_track(self, tl_track=None, **options):
'''Returns the track that will be played if calling
:meth:`mopidy.core.PlaybackController.previous()`.
For normal playback this is the previous track in the tracklist. If
random and/or consume is enabled it should return the current track
instead.
:param tl_track: the reference track
:type tl_track: :class:`mopidy.models.TlTrack` or :class:`None`
:rtype: :class:`mopidy.models.TlTrack` or :class:`None`
'''
return self.mopidy_request('core.tracklist.previous_track', tl_track=tl_track, **options)
# DEPRECATED
def add(self, tracks=None, at_position=None, uri=None, uris=None, **options):
'''Add tracks to the tracklist.
If ``uri`` is given instead of ``tracks``, the URI is looked up in the
library and the resulting tracks are added to the tracklist.
If ``uris`` is given instead of ``uri`` or ``tracks``, the URIs are
looked up in the library and the resulting tracks are added to the
tracklist.
If ``at_position`` is given, the tracks are inserted at the given
position in the tracklist. If ``at_position`` is not given, the tracks
are appended to the end of the tracklist.
Triggers the :meth:`mopidy.core.CoreListener.tracklist_changed` event.
:param tracks: tracks to add
:type tracks: list of :class:`mopidy.models.Track` or :class:`None`
:param at_position: position in tracklist to add tracks
:type at_position: int or :class:`None`
:param uri: URI for tracks to add
:type uri: string or :class:`None`
:param uris: list of URIs for tracks to add
:type uris: list of string or :class:`None`
:rtype: list of :class:`mopidy.models.TlTrack`
.. versionadded:: 1.0
The ``uris`` argument.
.. deprecated:: 1.0
The ``tracks`` and ``uri`` arguments. Use ``uris``.
'''
return self.mopidy_request('core.tracklist.add', tracks=tracks, at_position=at_position, uri=uri, uris=uris, **options)
def get_eot_tlid(self, **options):
'''The TLID of the track that will be played after the current track.
Not necessarily the same TLID as returned by :meth:`get_next_tlid`.
:rtype: :class:`int` or :class:`None`
.. versionadded:: 1.1
'''
return self.mopidy_request('core.tracklist.get_eot_tlid', **options)
def set_random(self, value, **options):
'''Set random mode.
:class:`True`
Tracks are selected at random from the tracklist.
:class:`False`
Tracks are played in the order of the tracklist.
'''
return self.mopidy_request('core.tracklist.set_random', value=value, **options)
def get_tracks(self, **options):
'''Get tracklist as list of :class:`mopidy.models.Track`.
'''
return self.mopidy_request('core.tracklist.get_tracks', **options)
def set_single(self, value, **options):
'''Set single mode.
:class:`True`
Playback is stopped after current song, unless in ``repeat`` mode.
:class:`False`
Playback continues after current song.
'''
return self.mopidy_request('core.tracklist.set_single', value=value, **options)
def slice(self, start, end, **options):
'''Returns a slice of the tracklist, limited by the given start and end
positions.
:param start: position of first track to include in slice
:type start: int
:param end: position after last track to include in slice
:type end: int
:rtype: :class:`mopidy.models.TlTrack`
'''
return self.mopidy_request('core.tracklist.slice', start=start, end=end, **options)
# DEPRECATED
def filter(self, criteria=None, **options):
'''Filter t | he tracklist by the given criterias.
A criteria consists of a model field to check and a list of values to
compare it against. If the model field matches one of the values, it
may be returned.
Only tracks that matches all | the given criterias are returned.
Examples::
# Returns tracks with TLIDs 1, 2, 3, or 4 (tracklist ID)
filter({'tlid': [1, 2, 3, 4]})
# Returns track with URIs 'xyz' or 'abc'
filter({'uri': ['xyz', 'abc']})
# Returns track with a matching TLIDs (1, 3 or 6) and a
# matching URI ('xyz' or 'abc')
filter({'tlid': [1, 3, 6], 'uri': ['xyz', 'abc']})
:param criteria: on or more criteria to match by
:type criteria: dict, of (string, list) pairs
:rtype: list of :class:`mopidy.models.TlTrack`
.. deprecated:: 1.1
Providing the c |
cevaris/pants | contrib/go/src/python/pants/contrib/go/tasks/go_thrift_gen.py | Python | apache-2.0 | 5,677 | 0.008631 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
import subprocess
from pants.backend.codegen.subsystems.thrift_defaults import ThriftDefaults
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.binaries.thrift_binary import ThriftBinary
from pants.task.simple_codegen_task import SimpleCodegenTask
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_property
from twitter.common.collections import OrderedSet
from pants.contrib.go.targets.go_thrift_library import GoThriftGenLibrary, GoThriftLibrary
class GoThriftGen(SimpleCodegenTask):
@classmethod
def register_options(cls, register):
super(GoThriftGen, cls).register_options(register)
register('--strict', default=True, fingerprint=True, type=bool,
help='Run thrift compiler with strict warnings.')
register('--gen-options', advanced=True, fingerprint=True,
help='Use these apache thrift go gen options.')
register('--thrift-import', advanced=True,
help='Use this thrift-import gen option to thrift.')
register('--thrift-import-target', advanced=True,
help='Use this thrift import on symbolic defs.')
@classmethod
def subsystem_dependencies(cls):
return (super(GoThriftGen, cls).subsystem_dependencies() +
(ThriftDefaults, ThriftBinary.Factory.scoped(cls)))
@memoized_property
def _thrift_binary(self):
thrift_binary = ThriftBinary.Factory.scoped_instance(self).create()
return thrift_binary.path
@memoized_property
def _deps(self):
thrift_import_target = self.get_options().thrift_import_t | arget
thrift_imports = self.context.resolve(thrift_import_target)
return thri | ft_imports
@memoized_property
def _service_deps(self):
service_deps = self.get_options().get('service_deps')
return list(self.resolve_deps(service_deps)) if service_deps else self._deps
SERVICE_PARSER = re.compile(r'^\s*service\s+(?:[^\s{]+)')
NAMESPACE_PARSER = re.compile(r'^\s*namespace go\s+([^\s]+)', re.MULTILINE)
def _declares_service(self, source):
with open(source) as thrift:
return any(line for line in thrift if self.SERVICE_PARSER.search(line))
def _get_go_namespace(self, source):
with open(source) as thrift:
namespace = self.NAMESPACE_PARSER.search(thrift.read())
if not namespace:
raise TaskError('Thrift file {} must contain "namespace go "', source)
return namespace.group(1)
def synthetic_target_extra_dependencies(self, target, target_workdir):
for source in target.sources_relative_to_buildroot():
if self._declares_service(os.path.join(get_buildroot(), source)):
return self._service_deps
return self._deps
def synthetic_target_type(self, target):
return GoThriftGenLibrary
def is_gentarget(self, target):
return isinstance(target, GoThriftLibrary)
@memoized_property
def _thrift_cmd(self):
cmd = [self._thrift_binary]
thrift_import = 'thrift_import={}'.format(self.get_options().thrift_import)
gen_options = self.get_options().gen_options
if gen_options:
gen_options += ',' + thrift_import
else:
gen_options = thrift_import
cmd.extend(('--gen', 'go:{}'.format(gen_options)))
if self.get_options().strict:
cmd.append('-strict')
if self.get_options().level == 'debug':
cmd.append('-verbose')
return cmd
def _generate_thrift(self, target, target_workdir):
target_cmd = self._thrift_cmd[:]
bases = OrderedSet(tgt.target_base for tgt in target.closure() if self.is_gentarget(tgt))
for base in bases:
target_cmd.extend(('-I', base))
target_cmd.extend(('-o', target_workdir))
all_sources = list(target.sources_relative_to_buildroot())
if len(all_sources) != 1:
raise TaskError('go_thrift_library only supports a single .thrift source file for {}.', target)
source = all_sources[0]
target_cmd.append(os.path.join(get_buildroot(), source))
with self.context.new_workunit(name=source,
labels=[WorkUnitLabel.TOOL],
cmd=' '.join(target_cmd)) as workunit:
result = subprocess.call(target_cmd,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'))
if result != 0:
raise TaskError('{} ... exited non-zero ({})'.format(self._thrift_binary, result))
gen_dir = os.path.join(target_workdir, 'gen-go')
src_dir = os.path.join(target_workdir, 'src')
safe_mkdir(src_dir)
go_dir = os.path.join(target_workdir, 'src', 'go')
os.rename(gen_dir, go_dir)
@classmethod
def product_types(cls):
return ['go']
def execute_codegen(self, target, target_workdir):
self._generate_thrift(target, target_workdir)
@property
def _copy_target_attributes(self):
"""Override `_copy_target_attributes` to exclude `provides`."""
return [a for a in super(GoThriftGen, self)._copy_target_attributes if a != 'provides']
def synthetic_target_dir(self, target, target_workdir):
all_sources = list(target.sources_relative_to_buildroot())
source = all_sources[0]
namespace = self._get_go_namespace(source)
return os.path.join(target_workdir, 'src', 'go', namespace.replace(".", os.path.sep))
|
ruijie/quantum | quantum/tests/unit/test_db_plugin.py | Python | apache-2.0 | 122,027 | 0.000156 | # Copyright (c) 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import copy
import datetime
import logging
import mock
import os
import random
import unittest2
import webob.exc
import quantum
from quantum.api.v2 import attributes
from quantum.api.v2.attributes import ATTR_NOT_SPECIFIED
from quantum.api.v2.router import APIRouter
from quantum.common import config
from quantum.common import exceptions as q_exc
from quantum.common.test_lib import test_config
from quantum import context
from quantum.db import api as db
from quantum.db import db_base_plugin_v2
from quantum.db import models_v2
from quantum.extensions.extensions import PluginAwareExtensionManager
from quantum.manager import QuantumManager
from quantum.openstack.common import cfg
from quantum.openstack.common | import timeutils
from quantum.tests.unit import test_extensions
from quantum.tests.unit.testlib_api import create_request
from quantum.wsgi import Serializer, JSONDeseri | alizer
LOG = logging.getLogger(__name__)
DB_PLUGIN_KLASS = 'quantum.db.db_base_plugin_v2.QuantumDbPluginV2'
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
def etcdir(*p):
return os.path.join(ETCDIR, *p)
class QuantumDbPluginV2TestCase(unittest2.TestCase):
def setUp(self, plugin=None):
super(QuantumDbPluginV2TestCase, self).setUp()
# NOTE(jkoelker) for a 'pluggable' framework, Quantum sure
# doesn't like when the plugin changes ;)
db._ENGINE = None
db._MAKER = None
# Make sure at each test a new instance of the plugin is returned
QuantumManager._instance = None
# Make sure at each test according extensions for the plugin is loaded
PluginAwareExtensionManager._instance = None
# Save the attributes map in case the plugin will alter it
# loading extensions
# Note(salvatore-orlando): shallow copy is not good enough in
# this case, but copy.deepcopy does not seem to work, since it
# causes test failures
self._attribute_map_bk = {}
for item in attributes.RESOURCE_ATTRIBUTE_MAP:
self._attribute_map_bk[item] = (attributes.
RESOURCE_ATTRIBUTE_MAP[item].
copy())
self._tenant_id = 'test-tenant'
json_deserializer = JSONDeserializer()
self._deserializers = {
'application/json': json_deserializer,
}
if not plugin:
plugin = test_config.get('plugin_name_v2', DB_PLUGIN_KLASS)
# Create the default configurations
args = ['--config-file', etcdir('quantum.conf.test')]
# If test_config specifies some config-file, use it, as well
for config_file in test_config.get('config_files', []):
args.extend(['--config-file', config_file])
config.parse(args=args)
# Update the plugin
cfg.CONF.set_override('core_plugin', plugin)
cfg.CONF.set_override('base_mac', "12:34:56:78:90:ab")
cfg.CONF.max_dns_nameservers = 2
cfg.CONF.max_subnet_host_routes = 2
self.api = APIRouter()
def _is_native_bulk_supported():
plugin_obj = QuantumManager.get_plugin()
native_bulk_attr_name = ("_%s__native_bulk_support"
% plugin_obj.__class__.__name__)
return getattr(plugin_obj, native_bulk_attr_name, False)
self._skip_native_bulk = not _is_native_bulk_supported()
ext_mgr = test_config.get('extension_manager', None)
if ext_mgr:
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
def tearDown(self):
super(QuantumDbPluginV2TestCase, self).tearDown()
# NOTE(jkoelker) for a 'pluggable' framework, Quantum sure
# doesn't like when the plugin changes ;)
db.clear_db()
db._ENGINE = None
db._MAKER = None
cfg.CONF.reset()
# Restore the original attribute map
attributes.RESOURCE_ATTRIBUTE_MAP = self._attribute_map_bk
def _req(self, method, resource, data=None, fmt='json',
id=None, params=None, action=None):
if id and action:
path = '/%(resource)s/%(id)s/%(action)s.%(fmt)s' % locals()
elif id:
path = '/%(resource)s/%(id)s.%(fmt)s' % locals()
else:
path = '/%(resource)s.%(fmt)s' % locals()
content_type = 'application/%s' % fmt
body = None
if data is not None: # empty dict is valid
body = Serializer().serialize(data, content_type)
return create_request(path,
body,
content_type,
method,
query_string=params)
def new_create_request(self, resource, data, fmt='json'):
return self._req('POST', resource, data, fmt)
def new_list_request(self, resource, fmt='json', params=None):
return self._req('GET', resource, None, fmt, params=params)
def new_show_request(self, resource, id, fmt='json'):
return self._req('GET', resource, None, fmt, id=id)
def new_delete_request(self, resource, id, fmt='json'):
return self._req('DELETE', resource, None, fmt, id=id)
def new_update_request(self, resource, data, id, fmt='json'):
return self._req('PUT', resource, data, fmt, id=id)
def new_action_request(self, resource, data, id, action, fmt='json'):
return self._req('PUT', resource, data, fmt, id=id, action=action)
def deserialize(self, content_type, response):
ctype = 'application/%s' % content_type
data = self._deserializers[ctype].deserialize(response.body)['body']
return data
def _create_bulk(self, fmt, number, resource, data, name='test', **kwargs):
""" Creates a bulk request for any kind of resource """
objects = []
collection = "%ss" % resource
for i in range(0, number):
obj = copy.deepcopy(data)
obj[resource]['name'] = "%s_%s" % (name, i)
if 'override' in kwargs and i in kwargs['override']:
obj[resource].update(kwargs['override'][i])
objects.append(obj)
req_data = {collection: objects}
req = self.new_create_request(collection, req_data, fmt)
if ('set_context' in kwargs and
kwargs['set_context'] is True and
'tenant_id' in kwargs):
# create a specific auth context for this request
req.environ['quantum.context'] = context.Context(
'', kwargs['tenant_id'])
elif 'context' in kwargs:
req.environ['quantum.context'] = kwargs['context']
return req.get_response(self.api)
def _create_network(self, fmt, name, admin_status_up,
arg_list=None, **kwargs):
data = {'network': {'name': name,
'admin_state_up': admin_status_up,
'tenant_id': self._tenant_id}}
for arg in (('admin_state_up', 'tenant_id', 'shared') +
(arg_list or ())):
# Arg must be present and not empty
if arg in kwargs and kwargs[arg]:
data['network'][arg] = kwargs[arg]
network_req = self.new_create_request('networks', data, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
network_req.environ['quantum.context |
lodevil/cpy | cpy/parser/ast_builder.py | Python | mit | 38,253 | 0.000758 | from . import ast
from .pystates import symbols as syms
from .grammar.sourcefile import SourceFile
import token
import six
import re
class ASTError(Exception):
pass
class ASTMeta(type):
def __new__(cls, name, bases, attrs):
handlers = {}
attrs['handlers'] = handlers
newcls = type.__new__(cls, name, bases, attrs)
for k, v in attrs.items():
if k.startswith('handle_'):
sym = k[len('handle_'):]
handlers[syms[sym]] = getattr(newcls, k)
return newcls
operator_map = {
'+': ast.Add,
'+=': ast.Add,
'-': ast.Sub,
'-=': ast.Sub,
'*': ast.Mult,
'*=': ast.Mult,
'/': ast.Div,
'/=': ast.Div,
'%': ast.Mod,
'%=': ast.Mod,
'**': ast.Pow,
'**=': ast.Pow,
'<<': ast.LShift,
'<<=': ast.LShift,
'>>': ast.RShift,
'>>=': ast.RShift,
'|': ast.BitOr,
'|=': ast.BitOr,
'^': ast.BitXor,
'^=': ast.BitXor,
'&': ast.BitAnd,
'&=': ast.BitAnd,
'//': ast.FloorDiv,
'//=': ast.FloorDiv,
}
compare_map = {
'==': ast.Eq,
'!=': ast.NotEq,
'<': ast.Lt,
'<=': ast.LtE,
'>': ast.Gt,
'>=': ast.GtE,
'is': ast.Is,
'is not': ast.IsNot,
'in': ast.In,
'not in': ast.NotIn,
}
xdigits = re.compile(r'^[0-9a-z]{2}$', re.IGNORECASE)
@six.add_metaclass(ASTMeta)
class ASTBuilder(object):
def __init__(self, src):
if not isinstance(src, SourceFile):
raise Exception('invalid sourcefile')
self.src = src
self.root = src.parse_tree.root
self.ast = self.build()
def syntax_error(self, msg, node):
return SyntaxError(msg, (self.src.name, node.start[0], node.start[1],
self.src.get_line(node.start[0])))
def build(self):
n = self.root
if n == syms.single_input:
# single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
if n[0] == token.NEWLINE:
return ast.Interactive([])
return ast.Interactive(self.handle(n[0]))
elif n == syms.file_input:
# file_input: (NEWLINE | stmt)* ENDMARKER
stmts = []
for stmt in n.filter(syms.stmt):
stmts.extend(self.handle(stmt[0]))
return ast.Module(stmts)
elif n == syms.eval_input:
# eval_input: testlist NEWLINE* ENDMARKER
return ast.Expression(self.handle_testlist(n[0]))
raise ASTError('invalid root node')
def handle(self, node):
handler = self.handlers.get(node.type, None)
if handler is None:
raise ASTError('invalid node: %r', node)
return handler(self, node)
def handle_stmt(self, stmt):
# stmt: simple_stmt | compound_stmt
if stmt[0] == syms.simple_stmt:
return self.handle_simple_stmt(stmt[0])
return [self.handle(stmt[0][0])]
def handle_simple_stmt(self, simple_stmt):
# simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
# small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
# import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
stmts = []
for small_stmt in simple_stmt.filter(syms.small_stmt):
stmts.append(self.handle(small_stmt[0]))
return stmts
def handle_compound_stmt(self, compound_stmt):
# compound_stmt: (if_stmt | while_stmt | for_stmt |
# try_stmt | with_stmt | funcdef)
return [self.handle(compound_stmt[0])]
def handle_testlist(self, testlist):
# testlist: test (',' test)* [',']
if len(testlist) == 1:
return self.handle_test(testlist[0])
exprs = []
for test in testlist.filter(syms.test):
exprs.append(self.handle_test(test))
return ast.Tuple(exprs, ast.Load, *testlist.start)
def handle_test(self, test):
# test: or_test ['if' or_test 'else' test] | lambdef
if len(test) == 1:
if test[0] == syms.lambdef:
return self.handle_lambdef(test[0])
return self.handle_or_test(test[0])
body = self.handle_or_test(test[0])
te = self.handle_or_test(test[2])
orelse = self.handle_test(test[4])
return ast.IfExp(te, body, orelse, *test.start)
def handle_or_test(self, or_test):
# or_test: and_test ('or' and_test)*
if len(or_test) == 1:
return self.handle_and_test(or_test[0])
return ast.BoolOp(ast.Or,
[self.handle_and_test(x) for x in or_test.filter(syms.and_test)],
*or_test.start)
def handle_and_test(self, and_test):
#and_test: not_test ('and' not_test)*
if len(and_test) == 1:
return self.handle_not_test(and_test[0])
return ast.BoolOp(ast.And,
[self.handle_not_test(x) for x in and_test.filter(syms.not_test)],
*and_test.start)
def handle_not_test(self, node):
# not_test: 'not' not_test | comparison
if len(node) == 2:
return ast.UnaryOp(
ast.Not, self.handle_not_test(node[1]), *node.start)
# comparison: expr (comp_op expr)*
# comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
node = node[0]
expr = self.handle_expr(node[0])
if len(node) == 1:
return expr
operators = []
operands = []
for i in range(1, len(node), 2):
if len(node[i]) == 1:
op = node[i][0].val
else:
op = '%s %s' % (node[i][0].val, node[i][1].val)
operators.append(compare_map[op])
operands.append(self.handle_expr(node[i + 1]))
return ast.Compare(expr, operators, operands, *node.start)
def handle_lambdef(self, node):
# lambdef: 'lambda' [varargslist] ':' test
if len(node) == 3:
args = ast.arguments(args=[], vararg=None, varargannotation=None,
kwonlyargs=[], kwarg=None, kwargannotation=None,
defaults=[], kw_defaults=[])
else:
args = self.handle_varargslist(node[1])
return ast.Lambda(args, self.handle_test(node[-1]), *node.start)
def handle_varargslist(self, node):
# typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [','
# ['*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef]]
# | '*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef)
# tfpdef: NAME [':' test]
# varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [','
# ['*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef]]
# | '*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef)
# vfpdef: NAME
if node[0].val == '**':
kwarg = node[1][0].val
kwargannotation = node[1][2].val if len(node[1]) == 3 else None
return ast.arguments(args=[], vararg=None, varargannotation=None,
kwonlyargs=[], kwarg=kwarg, kwargannotation=kwargannotation,
defaults=[], kw_defaults=[])
elif node[0].val == '*':
vararg, i = node[1][0].val, 3
varargannotation = node[1][2].val if len(node[1]) == 3 else None
kwonlyargs = []
kw_defaults = []
while i < len(node) and node[i].val != '**':
arg = ast.arg(node[i][0].val, None)
if len(node[i]) == 3:
| arg.annotation = node[i][2].val
kwonlyargs.append(arg)
if node[i + 1].val == '=':
kw_defaults.append(self.handle_test(node[i + 2]))
i += 4
else:
i += 2
if i < len(node) and node[i].val == '**':
kwarg = node[i + 1][0] | .val
kwargannotation = node[i + 1][2] if len(node[i + 1]) == 3 else None
else:
kwarg, kwargannotation = None, None
return ast.arguments(args=[], vararg=vararg,
varargannotation=varargannotation |
jrief/django-angular | djng/forms/angular_model.py | Python | mit | 4,211 | 0.004275 | from django.forms.utils import ErrorDict
from django.utils.html import format_html
from djng.forms.angular_base import NgFormBaseMixin, SafeTuple
class NgModelFormMixin(NgFormBaseMixin):
"""
Add this NgModelFormMixin to every class derived from ``forms.Form``, if that custom ``Form``
shall be managed through an Angular controller.
It adds attributes ``ng-model``, and optionally ``ng-change``, ``ng-class`` and ``ng-style``
to each of your input fields.
If form validation fails, the ErrorDict is rewritten in a way, so that the Angular controller
can access the error strings using the same key values as for its models.
"""
add_djng_error = False
def __init__(self, *args, **kwargs):
self.scope_prefix = kwargs.pop('scope_prefix', getattr(self, 'scope_prefix', None))
self.ng_directives = {}
for key in list(kwargs.keys()):
if key.startswith('ng_'):
fmtstr = kwargs.pop(key)
self.ng_directives[key.replace('_', '-')] = fmtstr
if hasattr(self, 'Meta') and hasattr(self.Meta, 'ng_models'):
if not isinstance(getattr(self.Meta, 'ng_models'), list):
raise TypeError('Meta.ng_model is not of type list')
elif 'ng-model' not in self.ng_directives:
self.ng_directives['ng-model'] = '%(model)s'
super(NgModelFormMixin, self).__init__(*args, **kwargs)
self.prefix = kwargs.get('prefix')
if self.prefix and self.data:
if self.data.get(self.prefix):
self.data = {self.add_prefix(name): value for (name, value) in self.data.get(self.prefix).items()}
else:
self.data = {name: value for (name, value) in self.data.items() if name.startswith(self.prefix + '.')}
if self.scope_prefix == self.form_name:
raise ValueError("The form's name may not be identical wi | th its scope_prefix")
def _post_clean(self):
"" | "
Rewrite the error dictionary, so that its keys correspond to the model fields.
"""
super(NgModelFormMixin, self)._post_clean()
if self._errors and self.prefix:
self._errors = ErrorDict((self.add_prefix(name), value) for name, value in self._errors.items())
def get_initial_data(self):
"""
Return a dictionary specifying the defaults for this form. This dictionary can be used to
inject the initial values for an Angular controller using the directive:
``ng-init={{ thisform.get_initial_data|js|safe }}``.
"""
data = {}
ng_models = hasattr(self, 'Meta') and getattr(self.Meta, 'ng_models', []) or []
for name, field in self.fields.items():
if 'ng-model' in self.ng_directives or name in ng_models:
data[name] = self.initial.get(name) if self.initial else field.initial
return data
def get_field_errors(self, field):
errors = super(NgModelFormMixin, self).get_field_errors(field)
if field.is_hidden:
return errors
identifier = format_html('{0}[\'{1}\']', self.form_name, field.html_name)
errors.append(SafeTuple((identifier, self.field_error_css_classes, '$pristine', '$error.rejected', 'invalid', '$message')))
return errors
def non_field_errors(self):
errors = super(NgModelFormMixin, self).non_field_errors()
errors.append(SafeTuple((self.form_name, self.form_error_css_classes, '$pristine', '$error.rejected', 'invalid', '$message')))
return errors
def update_widget_attrs(self, bound_field, attrs):
super(NgModelFormMixin, self).update_widget_attrs(bound_field, attrs)
identifier = self.add_prefix(bound_field.name)
ng = {
'name': bound_field.name,
'identifier': identifier,
'model': ('%s[\'%s\']' % (self.scope_prefix, identifier)) if self.scope_prefix else identifier
}
if hasattr(self, 'Meta') and bound_field.name in getattr(self.Meta, 'ng_models', []):
attrs['ng-model'] = ng['model']
for key, fmtstr in self.ng_directives.items():
attrs[key] = fmtstr % ng
return attrs
|
karlht/services-tools | fxa-l10n/genContentPages.py | Python | mpl-2.0 | 752 | 0.00266 | langs = [ "ca",
"cs",
| "cy",
"da",
"de",
"en-US",
"es",
"es-CL",
"et",
"eu",
"fr",
"fy",
"he",
"hu",
"id",
"it",
"ja",
"ko",
"lt",
"nb-NO",
"nl",
"pa",
"pl",
"pt",
"pt-BR",
"rm",
"ru",
"sk",
"sl",
"sq",
| "sr",
"sr-LATN",
"sv",
"tr",
"zh-CN",
"zh-TW",
"xx"]
print '#!/bin/sh'
print
for lang in langs:
print "node_modules/phantomjs/bin/phantomjs fxa-l18n/page-scrape/loadPage.js %s" % lang
|
KenetJervet/mapensee | python/pedal/pedal/test/test_pedal_syntax.py | Python | gpl-3.0 | 2,077 | 0.000481 | # coding: utf-8
from pedal import (
parse,
transcript,
_pedal
)
import unittest
class PedalParseTest(TestCase):
_sample_code = r'''
title "No trans"
input {
historical_samplings: int
recent_n_min_no_ | trans_func: func
}
tweaks {
sample_days: int
daily_sample_points: int
check_interval: int
hs_u: int
hs_l: int
n_u: int
n_l: int
}
"Historical samplings" {
}
trigger "Alert trigger condition" {
if historical_samplings > hs_u {
N = n_u
} elif historical_samplings > hs_l {
N = n_l - historical_samplings
} else
}
| '''
def test_parse(self):
ast = parse(self._sample_code)
assert len(ast) == 3
# Test hello section
assert ast[0].name == 'hello'
stmts = ast[0].stmts
num_stmt = stmts[0]
assert num_stmt.varname.identifier == 'num'
assert num_stmt.val.val == 123
str_stmt = stmts[1]
assert str_stmt.varname.identifier == 'str'
assert str_stmt.val.val == 'Hello"World!'
if_stmt = stmts[2]
assert isinstance(if_stmt.if_.expr, _pedal.LessThan)
assert if_stmt.if_.expr.left.identifier == 'num'
assert if_stmt.if_.expr.right.val == 250
if_stmt_if_inner = if_stmt.if_.stmts
# Fast forward
assert if_stmt_if_inner[1].if_.stmts[0].val.val == "Yes really hell"
assert if_stmt_if_inner[1].else_.stmts[0].val.val == "Not really"
if_stmt_elif_inner = if_stmt.elif_[0].stmts
assert if_stmt_elif_inner[0].val.val == 1
if_stmt_else_inner = if_stmt.else_.stmts
assert if_stmt_else_inner[0].val.val == 2
# Test world section
# Fast forward
assert ast[1].name == 'world'
assert ast[1].stmts[0].func_name.identifier == 'print'
assert list(map(lambda x: getattr(x, 'val'), ast[1].stmts[0].arguments)) == ['Damn', 'hell']
def test_transcript(self):
transcript(self._sample_code)
|
Dima73/enigma2 | lib/python/Components/Sources/StreamService.py | Python | gpl-2.0 | 2,074 | 0.024108 | from Source import Source
from Components.Element import cached
from Components.SystemInfo import SystemInfo
from enigma import eServiceReference
StreamServiceList = []
class StreamService(Source):
def __init__(self, navcore):
Source.__init__(self)
self.ref = None
self.__service = None
self.navcor | e = navcore
def serviceEvent(self, event):
pass
@cached
def getService(self):
return self.__service
service = property(getService)
def handleCommand(self, cmd):
print "[StreamService] handle command", cmd
self.ref = eServiceReference(cmd)
def recordEvent(se | lf, service, event):
if service is self.__service:
return
print "[StreamService] RECORD event for us:", service
self.changed((self.CHANGED_ALL, ))
def execBegin(self):
if self.ref is None:
print "[StreamService] has no service ref set"
return
print "[StreamService]e execBegin", self.ref.toString()
if SystemInfo["CanNotDoSimultaneousTranscodeAndPIP"]:
from Screens.InfoBar import InfoBar
if InfoBar.instance and hasattr(InfoBar.instance.session, 'pipshown') and InfoBar.instance.session.pipshown:
hasattr(InfoBar.instance, "showPiP") and InfoBar.instance.showPiP()
print "[StreamService] try to disable pip before start stream"
if hasattr(InfoBar.instance.session, 'pip'):
del InfoBar.instance.session.pip
InfoBar.instance.session.pipshown = False
self.__service = self.navcore.recordService(self.ref)
self.navcore.record_event.append(self.recordEvent)
if self.__service is not None:
if self.__service.__deref__() not in StreamServiceList:
StreamServiceList.append(self.__service.__deref__())
self.__service.prepareStreaming()
self.__service.start()
def execEnd(self):
print "[StreamService] execEnd", self.ref.toString()
self.navcore.record_event.remove(self.recordEvent)
if self.__service is not None:
if self.__service.__deref__() in StreamServiceList:
StreamServiceList.remove(self.__service.__deref__())
self.navcore.stopRecordService(self.__service)
self.__service = None
self.ref = None
|
saintpai/sos | sos/plugins/tuned.py | Python | gpl-2.0 | 1,399 | 0 | # Copyright (C) 2014 Red Hat, Inc., Peter Portante <peter.portante@redhat.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin
class Tuned(Plugin, RedHatPlugin):
"""Tuned system tuning daemon
"""
packages = ('tuned',)
profiles = ('system', 'performance')
plugin_name = 'tuned'
def setup(self):
self.add | _cmd_output([
"tuned-adm list",
"t | uned-adm active",
"tuned-adm recommend"
])
self.add_copy_spec([
"/etc/tuned.conf",
"/etc/tune-profiles"
])
self.add_copy_spec([
"/etc/tuned",
"/usr/lib/tuned",
"/var/log/tuned/tuned.log"
])
# vim: et ts=4 sw=4
|
Smart-Torvy/torvy-home-assistant | homeassistant/components/media_player/sonos.py | Python | mit | 13,962 | 0 | """
Support to interface with Sonos players (via SoCo).
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.sonos/
"""
import datetime
import logging
from os import path
import socket
import voluptuous as vol
from homeassistant.components.media_player import (
ATTR_MEDIA_ENQUEUE, DOMAIN, MEDIA_TYPE_MUSIC, SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_CLEAR_PLAYLIST,
SUPPORT_SELECT_SOURCE, MediaPlayerDevice)
from homeassistant.const import (
STATE_IDLE, STATE_PAUSED, STATE_PLAYING, STATE_UNKNOWN, STATE_OFF,
ATTR_ENTITY_ID)
from homeassistant.config import load_yaml_config_file
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['SoCo==0.11.1']
_LOGGER = logging.getLogger(__name__)
# The soco library is excessively chatty when it comes to logging and
# causes a LOT of spam in the logs due to making a http connection to each
# speaker every 10 seconds. Quiet it down a bit to just actual problems.
_SOCO_LOGGER = logging.getLogger('soco')
_SOCO_LOGGER.setLevel(logging.ERROR)
_REQUESTS_LOGGER = logging.getLogger('requests')
_REQUESTS_LOGGER.setLevel(logging.ERROR)
SUPPORT_SONOS = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE |\
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_PLAY_MEDIA |\
SUPPORT_SEEK | SUPPORT_CLEAR_PLAYLIST | SUPPORT_SELECT_SOURCE
SERVICE_GROUP_PLAYERS = 'sonos_group_players'
SERVICE_UNJOIN = 'sonos_unjoin'
SERVICE_SNAPSHOT = 'sonos_snapshot'
SERVICE_RESTORE = 'sonos_restore'
SUPPORT_SOURCE_LINEIN = 'Line-in'
SUPPORT_SOURCE_TV = 'TV'
SUPPORT_SOURCE_RADIO = 'Radio'
SONOS_SCHEMA = vol.Schema({
ATTR_ENTITY_ID: cv.entity_ids,
})
# List of devices that have been registered
DEVICES = []
# pylint: disable=unused-argument, too-many-locals
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Sonos platform."""
import soco
global DEVICES
if discovery_info:
player = soco.SoCo(discovery_info)
if player.is_visible:
device = SonosDevice(hass, player)
add_devices([device])
if not DEVICES:
register_services(hass)
DEVICES.append(device)
return True
return False
players = None
hosts = config.get('hosts', None)
if hosts:
# Support retro compatibility with comma separated list of hosts
# from config
hosts = hosts.split(',') if isinstance(hosts, str) else hosts
players = []
for host in hosts:
players.append(soco.SoCo(socket.gethostbyname(host)))
if not players:
players = soco.discover(interface_addr=config.get('interface_addr',
None))
if not players:
_LOGGER.warning('No Sonos speakers found.')
return False
DEVICES = [SonosDevice(hass, p) for p in players]
add_devices(DEVICES)
register_services(hass)
_LOGGER.info('Added %s Sonos speakers', len(players))
return True
def register_services(hass):
"""Register all services for sonos devices."""
descriptions = load_yaml_config_file(
path.join(path.dirname(__file__), 'services.yaml'))
hass.services.register(DOMAIN, SERVICE_GROUP_PLAYERS,
_group_players_service,
descriptions.get(SERVICE_GROUP_PLAYERS),
schema=SONOS_SCHEMA)
hass.services.register(DOMAIN, SERVICE_UNJOIN,
_unjoin_service,
descriptions.get(SERVICE_UNJOIN),
schema=SONOS_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SNAPSHOT,
_snapshot_service,
descriptions.get(SERVICE_SNAPSHOT),
schema=SONOS_SCHEMA)
hass.services.register(DOMAIN, SERVICE_RESTORE,
_restore_service,
descriptions.get(SERVICE_RESTORE),
schema=SONOS_SCHEMA)
def _apply_service(service, service_func, *service_func_args):
"""Internal func for applying a service."""
entity_ids = service.data.get('entity_id')
if entity_ids:
_devices = [device for device in DEVICES
if device.entity_id in entity_ids]
else:
_devices = DEVICES
for device in _devices:
service_func(device, *service_func_args)
device.update_ha_state(True)
def _group_players_service(service):
"""Group media players, use player as coordinator."""
_apply_service(service, SonosDevice.group_players)
def _unjoin_service(service):
"""Unjoin the player from a group."""
_apply_service(service, SonosDevice.unjoin)
def _snapshot_service(service):
"""Take a snapshot."""
_apply_service(service, SonosDevice.snapshot)
def _restore_service(service):
"""Restore a snapshot."""
_apply_service(service, SonosDevice.restore)
def only_if_coordinator(func):
"""Decorator for coordinator.
If used as decorator, avoid calling the decorated method if player is not
a coordinator. If not, a grouped speaker (not in coordinator role) will
throw soco.exceptions.SoCoSlaveException.
Also, partially catch exceptions like:
soco.exceptions.SoCoUPnPException: UPnP Error 701 received:
Transition not available from <player ip address>
"""
def wrapper(*args, **kwargs):
"""Decorator wrapper."""
if args[0].is_coordinator:
from soco.exceptions import SoCoUPnPException
try:
func(*args, **kwargs)
except SoCoUPnPException:
_LOGGER.error('command "%s" for | Sonos device "%s" '
| 'not available in this mode',
func.__name__, args[0].name)
else:
_LOGGER.debug('Ignore command "%s" for Sonos device "%s" (%s)',
func.__name__, args[0].name, 'not coordinator')
return wrapper
# pylint: disable=too-many-instance-attributes, too-many-public-methods
# pylint: disable=abstract-method
class SonosDevice(MediaPlayerDevice):
"""Representation of a Sonos device."""
# pylint: disable=too-many-arguments
def __init__(self, hass, player):
"""Initialize the Sonos device."""
from soco.snapshot import Snapshot
self.hass = hass
self.volume_increment = 5
self._player = player
self._name = None
self.update()
self.soco_snapshot = Snapshot(self._player)
@property
def should_poll(self):
"""Polling needed."""
return True
def update_sonos(self, now):
"""Update state, called by track_utc_time_change."""
self.update_ha_state(True)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._status == 'PAUSED_PLAYBACK':
return STATE_PAUSED
if self._status == 'PLAYING':
return STATE_PLAYING
if self._status == 'STOPPED':
return STATE_IDLE
return STATE_UNKNOWN
@property
def is_coordinator(self):
"""Return true if player is a coordinator."""
return self._player.is_coordinator
def update(self):
"""Retrieve latest state."""
self._name = self._player.get_speaker_info()['zone_name'].replace(
' (R)', '').replace(' (L)', '')
if self.available:
self._status = self._player.get_current_transport_info().get(
'current_transport_state')
self._trackinfo = self._player.get_current_track_info()
else:
self._status = STATE_OFF
self._trackinfo = {}
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._player.volume / 100.0
@proper |
codeif/crimg | crimg/api.py | Python | mit | 2,205 | 0.000454 | # -*- coding: utf-8 -*-
from PIL import Image
def get_target_size(img_size, size, exact_size=False):
assert img_size[0] and img_size[1]
assert size[0] or size[1]
size = list(size)
if not size[0]:
size[0] = size[1] * img_size[0] // img_size[1]
if not size[1]:
size[1] = size[0] * img_size[1] // img_size[0]
if not exact_size:
return min(img_size[0], size[0]), min(img_size[1], size[1])
else:
return tuple(size)
| def crop_by_aspect_ratio(image, aspect_ratio):
"""crop image by scale without aspect rati | o distortion
:param image: a PIL image object
:param aspect_ratio: aspect ratio, as a 2-tuple: (width, height).
:returns: An :py:class:`~PIL.Image.Image` object.
"""
size = image.size
size1 = (size[0], size[0] * aspect_ratio[1] // aspect_ratio[0])
size2 = (size[1] * aspect_ratio[0] // aspect_ratio[1], size[1])
new_size = min(size1, size2)
if new_size == image.size:
return image
# calc left, upper, right, lower
left = (size[0] - new_size[0]) // 2
right = left + new_size[0]
upper = (size[1] - new_size[1]) // 2
lower = upper + new_size[1]
return image.crop((left, upper, right, lower))
def crop_resize(image, size, exact_size=False):
"""Crop out the proportional middle of the image and set to the desired size.
:param image: a PIL image object
:param size: a 2-tuple of (width,height); at least one must be specified
:param exact_size: whether to scale up for smaller images.
Defaults to ``False``.
If the image is bigger than the sizes passed,
this works as expected.
If the image is smaller than the sizes passed,
then behavior is dictated by the ``exact_size`` flag.
If the ``exact_size`` flag is false,
the image will be returned unmodified.
If the ``exact_size`` flag is true,
the image will be scaled up to the required size.
:return: An :py:class:`~PIL.Image.Image` object.
"""
target_size = get_target_size(image.size, size, exact_size)
img2 = crop_by_aspect_ratio(image, target_size)
return img2.resize(target_size, Image.ANTIALIAS)
|
TwistingTwists/sms | data/module_locator.py | Python | apache-2.0 | 418 | 0.014354 | """Locate the data files in the eggs to open"""
"Special thanks to https://github.com/Orko | Hunter/ping | -me/tree/master/ping_me/data"
import os
import sys
def we_are_frozen():
return hasattr(sys, "frozen")
def modeule_path():
encoding = sys.getfilesystemencoding()
if we_are_frozen():
return os.path.dirname(unicode(sys.executable, encoding))
return os.path.dirname(unicode(__file__, encoding)) |
funkring/fdoo | addons-funkring/at_project_sale/wizard/correct_time_wizard.py | Python | agpl-3.0 | 3,859 | 0.022286 | # -*- coding: utf-8 -*-
#############################################################################
#
# Copyright (c) 2007 Martin Reisenhofer <martin.reisenhofer@funkring.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, eit | her version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICUL | AR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
class correct_time_wizard(osv.osv_memory):
def _sale_line_get(self,cr,uid,task,context):
#correct procurement
procurement = task.procurement_id
if procurement:
sale_line_obj = self.pool.get("sale.order.line")
sale_line_ids = sale_line_obj.search(cr,uid,[("procurement_id","=",procurement.id)])
if len(sale_line_ids)==1 :
sale_line = sale_line_obj.browse(cr,uid,sale_line_ids[0],context)
if sale_line.state in ("draft","confirmed"):
return sale_line
return None
def default_get(self, cr, uid, fields_list, context=None):
res = {}
active_id = context and context.get("active_id") or None
if active_id:
task_obj = self.pool.get("project.task")
task = task_obj.browse(cr,uid,active_id,context)
res["task_id"]=task.id
res["planned_hours"]=task.planned_hours
sale_line=self._sale_line_get(cr, uid, task, context)
if sale_line:
res["offered_hours"]=sale_line.product_uom_qty
res["correct_offered_hours"]=True
else:
res["offered_hours"]=0.0
res["correct_offered_hours"]=False
return res
def do_correction(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids,context=context):
task = obj.task_id
if task and task.state != "done":
#correct task
task_obj = self.pool.get("project.task")
task_obj.write(cr,uid,task.id,{"planned_hours" : obj.task_hours},context)
#correct sale line
if obj.correct_offered_hours:
sale_line_obj = self.pool.get("sale.order.line")
sale_line = self._sale_line_get(cr, uid, task, context)
if sale_line:
sale_line_obj.write(cr,uid,sale_line.id,{"product_uom_qty" : obj.task_hours },context)
sale_line_obj.write(cr,uid,sale_line.id,{"product_uos_qty" : obj.task_hours },context)
return { "type" : "ir.actions.act_window_close" }
_name = "at_project_sale.correct_time_wizard"
_columns = {
"task_id" : fields.many2one("project.task","Task"),
"task_hours" : fields.float("Task Hours Correction"),
"planned_hours" : fields.float("Planned Hours",readonly=True),
"offered_hours" : fields.float("Offered Hours",readonly=True),
"correct_offered_hours" : fields.boolean("Correct offered Hours")
} |
ruuk/script.web.viewer2 | lib/webviewer/cssutils/__init__.py | Python | gpl-2.0 | 15,021 | 0.002197 | #!/usr/bin/env python
"""cssutils - CSS Cascading Style Sheets library for Python
Copyright (C) 2004-2013 Christof Hoeke
cssutils is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
A Python package to parse and build CSS Cascading Style Sheets. DOM only, not
any rendering facilities!
Based upon and partly implementing the following specifications :
`CSS 2.1 <http://www.w3.org/TR/CSS2/>`__
General CSS rules and properties are defined here
`CSS 2.1 Errata <http://www.w3.org/Style/css2-updates/CR-CSS21-20070719-errata.html>`__
A few errata, mainly the definition of CHARSET_SYM tokens
`CSS3 Module: Syntax <http://www.w3.org/TR/css3-syntax/>`__
Used in parts since cssutils 0.9.4. cssutils tries to use the features from
CSS 2.1 and CSS 3 with preference to CSS3 but as this is not final yet some
parts are from CSS 2.1
`MediaQueries <http://www.w3.org/TR/css3-mediaqueries/>`__
MediaQueries are part of ``stylesheets.MediaList`` since v0.9.4, used in
@import and @media rules.
`Namespaces <http://dev.w3.org/csswg/css3-namespace/>`__
Added in v0.9.1, updated to definition in CSSOM in v0.9.4, updated in 0.9.5
for dev version
`CSS3 Module: Pages Media <http://www.w3.org/TR/css3-page/>`__
Most properties of this spec are implemented including MarginRules
`Selectors <http://www.w3.org/TR/css3-selectors/>`__
The selector syntax defined here (and not in CSS 2.1) should be parsable
with cssutils (*should* mind though ;) )
`DOM Level 2 Style CSS <http://www.w3.org/TR/DOM-Level-2-Style/css.html>`__
DOM for package css. 0.9.8 removes support for CSSValue and related API,
see PropertyValue and Value API for now
`DOM Level 2 Style Stylesheets <http://www.w3.org/TR/DOM-Level-2-Style/stylesheets.html>`__
DOM for package stylesheets
`CSSOM <http://dev.w3.org/csswg/cssom/>`__
A few details (mainly the NamespaceRule DOM) is taken from here. Plan is
to move implementation to the stuff defined here which is newer but still
no REC so might change anytime...
The cssutils tokenizer is a customized implementation of `CSS3 Module: Syntax
(W3C Working Draft 13 August 2003) <http://www.w3.org/TR/css3-syntax/>`__ which
itself is based on the CSS 2.1 tokenizer. It tries to be as compliant as
possible but uses some (helpful) parts of the CSS 2.1 tokenizer.
I guess cssutils is neither CSS 2.1 nor CSS 3 compliant but tries to at least
be able to parse both grammars including some more real world cases (some CSS
hacks are actually parsed and serialized). Both official grammars are not final
nor bugfree but still feasible. cssutils aim is not to be fully compliant to
any CSS specification (the specifications seem to be in a constant flow anyway)
but cssutils *should* be able to read and write as many as possible CSS
stylesheets "in the wild" while at the same time implement the official APIs
which are well documented. Some minor extensions are provided as well.
Please visit http://cthedot.de/cssutils/ for more details.
Tested with Python 2.7.3 and 3.3 on Windows 8 64bit.
This library may be used ``from cssutils import *`` which
import subpackages ``css`` and ``stylesheets``, CSSParser and
CSSSerializer classes only.
Usage may be::
>>> from cssutils import *
>>> parser = CSSParser()
>>> sheet = parser.parseString(u'a { color: red}')
| >>> print sheet.cssText
a {
color: red
}
"""
__all__ = ['css', 'stylesheets', 'CSSParser', 'CSSSerializer']
__docformat__ = 'restructuredtext'
__author__ = | 'Christof Hoeke with contributions by Walter Doerwald'
__date__ = '$LastChangedDate:: $:'
VERSION = '0.9.10'
__version__ = '%s $Id$' % VERSION
import sys
if sys.version_info < (2,6):
bytes = str
import codec
import os.path
import urllib
import urlparse
import xml.dom
# order of imports is important (partly circular)
from . import util
import errorhandler
log = errorhandler.ErrorHandler(raiseExceptions=False)
import css
import stylesheets
from parse import CSSParser
from serialize import CSSSerializer
ser = CSSSerializer()
from profiles import Profiles
profile = Profiles(log=log)
# used by Selector defining namespace prefix '*'
_ANYNS = -1
class DOMImplementationCSS(object):
"""This interface allows the DOM user to create a CSSStyleSheet
outside the context of a document. There is no way to associate
the new CSSStyleSheet with a document in DOM Level 2.
This class is its *own factory*, as it is given to
xml.dom.registerDOMImplementation which simply calls it and receives
an instance of this class then.
"""
_features = [
('css', '1.0'),
('css', '2.0'),
('stylesheets', '1.0'),
('stylesheets', '2.0')
]
def createCSSStyleSheet(self, title, media):
"""
Creates a new CSSStyleSheet.
title of type DOMString
The advisory title. See also the Style Sheet Interfaces
section.
media of type DOMString
The comma-separated list of media associated with the new style
sheet. See also the Style Sheet Interfaces section.
returns
CSSStyleSheet: A new CSS style sheet.
TODO: DOMException
SYNTAX_ERR: Raised if the specified media string value has a
syntax error and is unparsable.
"""
return css.CSSStyleSheet(title=title, media=media)
def createDocument(self, *args):
# not needed to HTML, also not for CSS?
raise NotImplementedError
def createDocumentType(self, *args):
# not needed to HTML, also not for CSS?
raise NotImplementedError
def hasFeature(self, feature, version):
return (feature.lower(), unicode(version)) in self._features
xml.dom.registerDOMImplementation('cssutils', DOMImplementationCSS)
def parseString(*a, **k):
return CSSParser().parseString(*a, **k)
parseString.__doc__ = CSSParser.parseString.__doc__
def parseFile(*a, **k):
return CSSParser().parseFile(*a, **k)
parseFile.__doc__ = CSSParser.parseFile.__doc__
def parseUrl(*a, **k):
return CSSParser().parseUrl(*a, **k)
parseUrl.__doc__ = CSSParser.parseUrl.__doc__
def parseStyle(*a, **k):
return CSSParser().parseStyle(*a, **k)
parseStyle.__doc__ = CSSParser.parseStyle.__doc__
# set "ser", default serializer
def setSerializer(serializer):
"""Set the global serializer used by all class in cssutils."""
global ser
ser = serializer
def getUrls(sheet):
"""Retrieve all ``url(urlstring)`` values (in e.g.
:class:`cssutils.css.CSSImportRule` or :class:`cssutils.css.CSSValue`
objects of given `sheet`.
:param sheet:
:class:`cssutils.css.CSSStyleSheet` object whose URLs are yielded
This function is a generator. The generated URL values exclude ``url(`` and
``)`` and surrounding single or double quotes.
"""
for importrule in (r for r in sheet if r.type == r.IMPORT_RULE):
yield importrule.href
def styleDeclarations(base):
"recursive generator to find all CSSStyleDeclarations"
if hasattr(base, 'cssRules'):
for rule in base.cssRules:
for s in styleDeclarations(rule):
yield s
elif hasattr(base, 'style'):
yield base.style
for style in styleDeclarations(sheet):
for p in style.getProperties(all=True):
for v in p.propertyValue:
if v.type == 'URI':
yield v.uri
def replaceUrls(sheetOrStyle, replacer, ign |
anoopkunchukuttan/transliterator | src/cfilt/transliteration/analysis/analysis_commands.py | Python | gpl-3.0 | 3,540 | 0.031356 | #Copyright Anoop Kunchukuttan 2015 - present
#
#This file is part of the IITB Unsupervised Transliterator
#
#IITB Unsupervised Transliterator is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#IITB Unsupervised Transliterator is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with IITB Unsupervised Transliterator. If not, see <http://www.gnu.org/licenses/>.
from cfilt.transliteration.analysis import transliteration_analysis as ta
import sys, codecs, yaml
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from indicnlp import langinfo
langs=['bn','gu','hi','pa','mr','kK','ml','ta','te']
def monolingual_analysis_all(indir,outdir):
"""
indir: contains monolingual files of the name: 'lang_code.txt'
outdir: contains output files for each language: 'lang_code.pickle'
"""
for lang in langs:
ta.monolingual_analysis('{}/{}.txt'.format(indir,lang),'{}/{}.yaml'.format(outdir,lang),lang)
def compare_character_ratios(mono_datadir):
## read language data
lang_data=[]
for lang in langs:
with codecs.open('{}/{}.yaml'.format(mono_datadir,lang),'r','utf-8') as datafile:
lang_data.append(yaml.load(datafile))
### Plot character ratios
charratio_mat=np.zeros((len(langs),langinfo.COORDINATED_RANGE_E | ND_INCLUSIVE-langinfo.COORDINATED_RANGE_START_INCLUSIVE+1))
for i,lang in enumerate(langs):
for c,v in lang_data[i]['char_proportions'].iteritems():
charratio_mat[i,c]=v
## plot
matplotlib.rc('font', family='Lohit | Hindi')
fig, ax = plt.subplots()
plt.pcolor(charratio_mat,cmap=plt.cm.hot_r,edgecolors='k')
plt.colorbar()
plt.xticks(np.arange(0,charratio_mat.shape[1])+0.5,[ langinfo.offset_to_char(o,'hi') for o in xrange(langinfo.COORDINATED_RANGE_START_INCLUSIVE,langinfo.COORDINATED_RANGE_END_INCLUSIVE+1)])
plt.yticks(np.arange(0,charratio_mat.shape[0])+0.5,xrange(len(langs)))
plt.show()
plt.close()
def compare_kl_divergence(mono_datadir):
## read language data
lang_data=[]
for lang in langs:
with codecs.open('{}/{}.yaml'.format(mono_datadir,lang),'r','utf-8') as datafile:
lang_data.append(yaml.load(datafile))
# compute kl divergence
kl_div_mat=np.zeros((len(langs),len(langs)))
for i,langi in enumerate(langs):
for j,langj in enumerate(langs):
kl_div_mat[i,j]=ta.kl_divergence(lang_data[i]['char_proportions'],lang_data[j]['char_proportions'])
## plot
fig, ax = plt.subplots()
plt.pcolor(kl_div_mat,cmap=plt.cm.hot_r,edgecolors='k')
plt.colorbar()
plt.xticks(np.arange(0,kl_div_mat.shape[1])+0.5,langs)
plt.yticks(np.arange(0,kl_div_mat.shape[0])+0.5,langs)
plt.show()
plt.close()
if __name__=='__main__':
commands={
'monolingual_analysis_all': monolingual_analysis_all,
'compare_character_ratios': compare_character_ratios,
'compare_kl_divergence': compare_kl_divergence,
}
commands[sys.argv[1]](*sys.argv[2:])
|
PaloAltoNetworks/minemeld-core | tests/test_ft_st.py | Python | apache-2.0 | 8,708 | 0.00023 | # Copyright 2015 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FT ST tests
Unit tests for minemeld.ft.st
"""
import unittest
import tempfile
import shutil
import random
import uuid
import time
from nose.plugins.attrib import attr
import minemeld.ft.st
TABLENAME = tempfile.mktemp(prefix='minemeld.ftsttest')
NUM_ELEMENTS = 10000
class MineMeldFTSTTests(unittest.TestCase):
def setUp(self):
try:
shutil.rmtree(TABLENAME)
except:
pass
def te | arDown(self):
try:
shutil.rmtree(TABLENAME)
except:
pass
def test_add_delete(self):
st = minemeld.ft.st.ST(TABLENAME, 8, truncate=True)
sid = uuid.uuid4().bytes
st.put(sid, 1, 5, 1)
st.delete(sid, 1, 5, 1)
st.close()
def test_query_endpoints_forward(self):
st = minemeld.ft.st.ST(TABLENAME, 8, truncate=True)
sid1 = uuid.uuid4().bytes
sid2 = uu | id.uuid4().bytes
st.put(sid1, 1, 70, 1)
st.put(sid2, 50, 100, 1)
eps = [ep[0] for ep in st.query_endpoints(
start=0,
stop=st.max_endpoint,
reverse=False,
include_start=False,
include_stop=False
)]
self.assertEqual(eps, [1, 50, 70, 100])
st.close()
def test_query_endpoints_reverse(self):
st = minemeld.ft.st.ST(TABLENAME, 8, truncate=True)
sid1 = uuid.uuid4().bytes
sid2 = uuid.uuid4().bytes
st.put(sid1, 1, 70, 1)
st.put(sid2, 50, 100, 1)
eps = [ep[0] for ep in st.query_endpoints(
start=0,
stop=st.max_endpoint,
reverse=True,
include_start=False,
include_stop=False
)]
self.assertEqual(eps, [100, 70, 50, 1])
st.close()
def test_basic_cover(self):
st = minemeld.ft.st.ST(TABLENAME, 8, truncate=True)
sid = uuid.uuid4().bytes
st.put(sid, 1, 5, 1)
for i in range(1, 6):
ci = st.cover(i)
interval = next(ci, None)
self.assertEqual(interval[0], sid)
self.assertEqual(interval[1], 1)
self.assertEqual(interval[2], 1)
self.assertEqual(interval[3], 5)
interval2 = next(ci, None)
self.assertEqual(interval2, None)
st.close()
def test_cover_overlap(self):
st = minemeld.ft.st.ST(TABLENAME, 8, truncate=True)
sid1 = uuid.uuid4().bytes
sid2 = uuid.uuid4().bytes
st.put(sid1, 1, 5, 1)
st.put(sid2, 3, 7, 2)
ci = st.cover(1)
interval = next(ci, None)
self.assertEqual(interval[0], sid1)
self.assertEqual(interval[1], 1)
self.assertEqual(interval[2], 1)
self.assertEqual(interval[3], 5)
interval = next(ci, None)
self.assertEqual(interval, None)
ci = st.cover(3)
intervals = [i for i in st.cover(3)]
self.assertEqual(len(intervals), 2)
self.assertEqual(intervals[0][0], sid1)
self.assertEqual(intervals[0][1], 1)
self.assertEqual(intervals[0][2], 1)
self.assertEqual(intervals[0][3], 5)
self.assertEqual(intervals[1][0], sid2)
self.assertEqual(intervals[1][1], 2)
self.assertEqual(intervals[1][2], 3)
self.assertEqual(intervals[1][3], 7)
ci = st.cover(7)
interval = next(ci, None)
self.assertEqual(interval[0], sid2)
self.assertEqual(interval[1], 2)
self.assertEqual(interval[2], 3)
self.assertEqual(interval[3], 7)
interval = next(ci, None)
self.assertEqual(interval, None)
st.close()
def test_cover_overlap2(self):
st = minemeld.ft.st.ST(TABLENAME, 8, truncate=True)
sid1 = uuid.uuid4().bytes
sid2 = uuid.uuid4().bytes
st.put(sid1, 3, 7, 1)
st.put(sid2, 3, 7, 2)
intervals = [i for i in st.cover(3)]
self.assertEqual(len(intervals), 2)
self.assertEqual(intervals[0][0], sid2)
self.assertEqual(intervals[0][1], 2)
self.assertEqual(intervals[0][2], 3)
self.assertEqual(intervals[0][3], 7)
self.assertEqual(intervals[1][0], sid1)
self.assertEqual(intervals[1][1], 1)
self.assertEqual(intervals[1][2], 3)
self.assertEqual(intervals[1][3], 7)
st.close()
def _random_map(self, nbits=10, nintervals=1000):
epmax = (1 << nbits)-1
rmap = [set() for i in xrange(epmax+1)]
st = minemeld.ft.st.ST(TABLENAME, nbits, truncate=True)
for j in xrange(nintervals):
sid = uuid.uuid4().bytes
end = random.randint(0, epmax)
start = random.randint(0, epmax)
if end < start:
start, end = end, start
st.put(sid, start, end, level=1)
for k in xrange(start, end+1):
rmap[k].add(sid)
eps = []
for ep, lvl, t, id_ in st.query_endpoints():
if ep == 0 or ep == epmax:
self.assertTrue(len(rmap[ep]) > 0)
else:
c = len(rmap[ep] ^ rmap[ep-1]) + len(rmap[ep] ^ rmap[ep+1])
self.assertTrue(
c > 0,
msg="no change detected @ep %d: "
"%r %r %r" % (ep, rmap[ep-1], rmap[ep], rmap[ep+1])
)
eps.append(ep)
for e in eps:
intervals = [x[0] for x in st.cover(e)]
intervals.sort()
self.assertListEqual(intervals, sorted(rmap[e]))
st.close()
def test_random_map_fast(self):
self._random_map()
@attr('slow')
def test_random_map_fast2(self):
self._random_map(nintervals=2000)
def test_255(self):
st = minemeld.ft.st.ST(TABLENAME, 32, truncate=True)
sid = uuid.uuid4().bytes
st.put(sid, 0, 0xFF)
self.assertEqual(st.num_segments, 1)
self.assertEqual(st.num_endpoints, 2)
st.close()
@attr('slow')
def test_stress_0(self):
num_intervals = 100000
st = minemeld.ft.st.ST(TABLENAME, 32, truncate=True)
t1 = time.time()
for j in xrange(num_intervals):
end = random.randint(0, 0xFFFFFFFF)
if random.randint(0, 1) == 0:
end = end & 0xFFFFFF00
start = end + 0xFF
else:
start = end
sid = uuid.uuid4().bytes
t2 = time.time()
dt = t2-t1
t1 = time.time()
for j in xrange(num_intervals):
end = random.randint(0, 0xFFFFFFFF)
if random.randint(0, 1) == 0:
start = end & 0xFFFFFF00
end = start + 0xFF
else:
start = end
sid = uuid.uuid4().bytes
st.put(sid, start, end)
t2 = time.time()
print "TIME: Inserted %d intervals in %d" % (num_intervals, (t2-t1-dt))
self.assertEqual(st.num_segments, num_intervals)
self.assertEqual(st.num_endpoints, num_intervals*2)
st.close()
@attr('slow')
def test_stress_1(self):
num_intervals = 100000
st = minemeld.ft.st.ST(TABLENAME, 32, truncate=True)
t1 = time.time()
for j in xrange(num_intervals):
end = random.randint(0, 0xFFFFFFFF)
start = random.randint(0, end)
sid = uuid.uuid4().bytes
t2 = time.time()
dt = t2-t1
t1 = time.time()
for j in xrange(num_intervals):
end = random.randint(0, 0xFFFFFFFF |
tashigaofei/BlogSpider | scrapy/tests/test_http_headers.py | Python | mit | 4,934 | 0.001824 | import unittest
import copy
from scrapy.http import Headers
class HeadersTest(unittest.TestCase):
def test_basics(self):
h = Headers({'Content-Type': 'text/html', 'Content-Length': 1234})
assert h['Content-Type']
assert h['Content-Length']
self.assertRaises(KeyError, h.__getitem__, 'Accept')
self.assertEqual(h.get('Accept'), None)
self.assertEqual(h.getlist('Accept'), [])
self.assertEqual(h.get('Accept', '*/*'), '*/*')
self.assertEqual(h.getlist('Accept', '*/*'), ['*/*'])
self.assertEqual(h.getlist('Accept', ['text/html', 'images/jpeg']), ['text/html','images/jpeg'])
def test_single_value(self):
h = Headers()
h['Content-Type'] = 'text/html'
self.assertEqual(h['Content-Type'], 'text/html')
self.assertEqual(h.get('Content-Type'), 'text/html')
self.assertEqual(h.getlist('Content-Type'), ['text/html'])
def test_multivalue(self):
h = Headers()
h['X-Forwarded-For'] = hlist = ['ip1', 'ip2']
self.assertEqual(h['X-Forwarded-For'], 'ip2')
self.assertEqual(h.get('X-Forwarded-For'), 'ip2')
self.assertEqual(h.getlist('X-Forwarded-For'), hlist)
assert h.getlist('X-Forwarded-For') is not hlist
def test_encode_utf8(self):
h = Headers({u'key': u'\xa3'}, encoding='utf-8')
key, val = dict(h).items()[0]
assert isinstance(key, str), key
assert isinstance(val[0], str), val[0]
self.assertEqual(val[0], '\xc2\xa3')
def test_encode_latin1(self):
h = Headers({u'key': u'\xa3'}, encoding='latin1')
key, val = dict(h).items()[0]
self.assertEqual(val[0], '\xa3')
def test_encode_multiple(self):
h = Headers({u'key': [u'\xa3']}, encoding='utf-8')
key, val = dict(h).items()[0]
self.assertEqual(val[0], '\xc2\xa3')
def test_delete_and_contains(self):
h = Headers()
h['Content-Type'] = 'text/html'
assert 'Content-Type' in h
del h['Content-Type']
assert 'Content-Type' not in h
def test_setdefault(self):
h = Headers()
hlist = ['ip1', 'ip2']
olist = h.setdefault('X-Forwarded-For', hlist)
assert h.getlist('X-Forwarded-For') is not hlist
assert h.getlist('X-Forwarded-For') is olist
h = Headers()
olist = h.setdefault('X-Forwarded-For', 'ip1')
self.assertEqual(h.getlist('X-Forwarded-For'), ['ip1'])
assert h.getlist('X-Forwarded-For') is olist
def test_iterables(self):
idict = {'Content-Type': 'text/html', 'X-Forwarded-For': ['ip1', 'ip2']}
h = Headers(idict)
self.assertEqual(dict(h), {'Content-Type': ['text/html'], 'X-Forwarded-For': ['ip1', 'ip2']})
self.assertEqual(h.keys(), ['X-Forwarded-For', 'Content-Type'])
self.assertEqual(h.items(), [('X-Forwarded-For', ['ip1', 'ip2']), ('Content-Type', ['text/html'])])
self.assertEqual(list(h.iteritems()),
[('X-Forwarded-For', ['ip1', 'ip2']), ('Content-Type', ['text/ | html'])])
self.assertEqual(h.va | lues(), ['ip2', 'text/html'])
def test_update(self):
h = Headers()
h.update({'Content-Type': 'text/html', 'X-Forwarded-For': ['ip1', 'ip2']})
self.assertEqual(h.getlist('Content-Type'), ['text/html'])
self.assertEqual(h.getlist('X-Forwarded-For'), ['ip1', 'ip2'])
def test_copy(self):
h1 = Headers({'header1': ['value1', 'value2']})
h2 = copy.copy(h1)
self.assertEqual(h1, h2)
self.assertEqual(h1.getlist('header1'), h2.getlist('header1'))
assert h1.getlist('header1') is not h2.getlist('header1')
assert isinstance(h2, Headers)
def test_appendlist(self):
h1 = Headers({'header1': 'value1'})
h1.appendlist('header1', 'value3')
self.assertEqual(h1.getlist('header1'), ['value1', 'value3'])
h1 = Headers()
h1.appendlist('header1', 'value1')
h1.appendlist('header1', 'value3')
self.assertEqual(h1.getlist('header1'), ['value1', 'value3'])
def test_setlist(self):
h1 = Headers({'header1': 'value1'})
self.assertEqual(h1.getlist('header1'), ['value1'])
h1.setlist('header1', ['value2', 'value3'])
self.assertEqual(h1.getlist('header1'), ['value2', 'value3'])
def test_setlistdefault(self):
h1 = Headers({'header1': 'value1'})
h1.setlistdefault('header1', ['value2', 'value3'])
h1.setlistdefault('header2', ['value2', 'value3'])
self.assertEqual(h1.getlist('header1'), ['value1'])
self.assertEqual(h1.getlist('header2'), ['value2', 'value3'])
def test_none_value(self):
h1 = Headers()
h1['foo'] = 'bar'
h1['foo'] = None
h1.setdefault('foo', 'bar')
self.assertEqual(h1.get('foo'), None)
self.assertEqual(h1.getlist('foo'), [])
|
tangyanhan/homesite | manage_videos/import_videos.py | Python | mit | 11,680 | 0.00214 | # coding=utf8
# encoding: utf-8
import os
import platform
import re
import signal
import sys
import traceback
from subprocess import Popen, PIPE
from threading import Thread, current_thread
from Queue import Queue
from util.log import get_logger, log
from video.models import Video, KeywordVideoId
from django.db.models import Max
from collect_video import G_GEN_IMAGE
MAX_THREAD_NUM = 4
THREAD_STOP_FLAGS = []
THUMB_DIR = './static/thumb'
THUMB_SIZE = '180x135'
COVER_DIR = './static/cover'
FLIP_DIR = './static/flip'
FLIP_NUM = 10
task_queue = Queue(maxsize=2000)
def register_int_signal_handler():
def stop_thread_handler(signum, frame):
log.info("Received signal {0}. Will stop all task threads".format(signum))
for _ in range(len(THREAD_STOP_FLAGS)):
THREAD_STOP_FLAGS[_] = True
if platform.platform().startswith('Windows'):
signal.signal(signal.CTRL_C_EVENT, stop_thread_handler)
else:
signal.signal(signal.SIGINT, stop_thread_handler)
def next_video_id(current, path):
existing = Video.objects.filter(path=path)
if existing:
return existing[0].video_id, current
current += 1
return current, current
def create_task_list(path_list):
"""
Walks path recursively, and create a task list
:param path_list: a list of (path, rating)
:return: a list of ImportTask objects
"""
current_video_id = Video.objects.all().aggregate(Max('video_id'))['video_id__max']
if not current_video_id:
current_video_id = 0
task_list = []
for (path, rating) in path_list:
base_path = os.path.split(path)[0]
if os.path.isfile(path):
file_name = os.path.basename(path)
if is_valid_video_file(path, file_name):
video_id, current_video_id = next_video_id(current_video_id, path)
task_list.append(ImportTask(video_id, base_path, path, rating))
continue
for (root, dirs, files) in os.walk(path):
for file_name in files:
try:
file_path = os.path.join(root, file_name)
if os.path.isdir(file_path):
continue
if is_valid_video_file(file_path, file_name):
video_id, current_video_id = next_video_id(current_video_id, file_path)
task_list.append(ImportTask(video_id, base_path, file_path, rating))
except:
log.error('#Error while proceeding: {0}'.format(file_name))
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout)
return task_list
def start_tasks(task_list):
global task_queue
for task in task_list:
task_queue.put(task)
if not THREAD_STOP_FLAGS:
for _ in range(MAX_THREAD_NUM):
THREAD_STOP_FLAGS.append(True)
if not os.path.isdir(COVER_DIR):
os.mkdir(COVER_DIR)
if not os.path.isdir(THUMB_DIR):
os.mkdir(THUMB_DIR)
| if not os.path.isdir(FLIP_DIR):
os.mkdir(FLIP_DIR)
for _ in range(MAX_THREAD_NUM):
if THREAD_STOP_FLAGS[_]:
t = Thread(target=import_worker | , kwargs={'thread_index': _})
t.name = str(_)
t.daemon = False
t.start()
task_queue.join()
def add_keywords_to_db(task_list):
blacklist = load_keyword_blacklist_from_file()
for task in task_list:
base_path = task.base_path
file_path = task.file_path
video_id = task.video_id
keywords = get_keywords(base_path, file_path, blacklist)
log.info('#Keywords:'.format(keywords))
for key in keywords:
try:
if KeywordVideoId.objects.filter(keyword=key, video_id=video_id):
log.info("Existing keyword {0} for {1}".format(key, video_id))
continue
keyword_record = KeywordVideoId()
keyword_record.keyword = key
keyword_record.video = Video.objects.get(video_id=video_id)
keyword_record.save()
log.info('#Added keyword:{0} for video_id: {1}'.format(key, video_id))
except Exception as e:
log.error("Error while adding keyword {0} to video {1}: {2}".format(key, video_id, e))
class ImportTask(object):
def __init__(self, video_id, base_path, path, rating=Video.P):
"""
Create an import task object.
:param video_id: a pre-allocated video_id in number, so we don't need to lock db in multiple thread.
:param base_path: path prefix that will be ignored when creating keywords from path.
:param path: path of the file
:param rating: rating of the video, highest by default.
"""
self.video_id = video_id
self.base_path = base_path
self.file_path = path
self.rating = rating
def import_worker(thread_index):
"""
Thread worker that deals with tasks.
:return:
"""
THREAD_STOP_FLAGS[thread_index] = False
while not (THREAD_STOP_FLAGS[thread_index] or task_queue.empty()):
task = task_queue.get()
do_import_video_task(task)
task_queue.task_done()
THREAD_STOP_FLAGS[thread_index] = True
def do_import_video_task(task):
video_id = task.video_id
file_path = task.file_path
rating = task.rating
file_name = os.path.basename(file_path)[:-4]
tlog = get_logger(current_thread().name)
videos = Video.objects.filter(path=file_path)
if videos:
tlog.info("Existing video: {0}".format(task.file_path))
return
video = Video()
video.video_id = video_id
video.rating = rating
thumb_path = get_thumb_path(video.video_id)
cover_path = get_cover_path(video.video_id)
if not gen_cover(task.file_path, cover_path):
tlog.error("Failed to gen cover for {0}".format(file_path))
return
success, duration = gen_thumb(file_path, thumb_path)
if success:
if not gen_flips(file_path, video.video_id, duration, FLIP_DIR, FLIP_NUM):
tlog.error("Failed to gen flips for {0}".format(file_path))
else:
tlog.error("Failed to gen thumb for {0}".format(file_path))
video.title = file_name
video.path = file_path
video.duration = duration
video.save()
tlog.info('#Video: {0} [{1}] {2}'.format(video.title, video.duration, video.path))
def is_valid_video_file(file_path, file_name):
# skip hidden files (possibly not valid video files)
if file_name.startswith('.') or (not file_name.endswith('.mp4')):
return False
if os.path.getsize(file_path) == 0:
log.info('Remove invalid video file: {0}'.format(file_path))
os.remove(file_path)
return False
return True
def load_keyword_blacklist_from_file():
blacklist = set()
keyword_file = 'keywords.blacklist'
try:
with open(keyword_file, 'r') as kfp:
for line in kfp:
line = line.strip('\n')
if line:
blacklist.add(line)
log.info("Keywords blacklist: {0}".format(blacklist))
except Exception as e:
log.error("Error while processing {0}:{1}".format(keyword_file, e))
return blacklist
def get_keywords(prefix, file_path, blacklist):
"""
Get keywords from file path
:param prefix: Prefix of the dir path, so we can ignore them
:param file_path: full path of the video file
:param blacklist: A set of words/symbols that should be ignored
:return: a list of keywords
"""
file_path = str(file_path).replace(prefix, '') # remove base_dir from file_path
file_path = os.path.splitext(file_path)[0] # Only keep the part without extension
file_path = str(file_path).lower()
for bad_keyword in blacklist:
file_path = file_path.replace(bad_keyword, ' ')
file_path = re.sub(r'\s+', ' ', file_path) # Replace multiple spaces to single one
keywords = file_path.split(' ')
keywords = [k for k in keywords if k]
return |
criteo/biggraphite | biggraphite/cli/import_whisper.py | Python | apache-2.0 | 8,945 | 0.001006 | #!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A CLI to import whisper data into Cassandra."""
from __future__ import print_function
import argparse
import datetime
import io
import logging
import multiprocessing
import os
import re
import struct
import sys
import time
from multiprocessing import dummy as multiprocessing_dummy
import progressbar
import scandir
import whisper
from biggraphite import accessor_factory as bg_accessor_factory
from biggraphite import metric as bg_metric
from biggraphite import settings as bg_settings
from biggraphite import utils as bg_utils
from biggraphite.cli import command
_DEV_NULL = open(os.devnull, "w")
_POINT_STRUCT = struct.Struct(whisper.pointFormat)
_WORKER = None
log = logging.getLogger(__name__)
def metric_name_from_wsp(root_dir, prefix, wsp_path):
"""Return the name of a metric given a wsp file path and a root directory.
The path do not have to exist.
Args:
root_dir: A directory that is parent to all metrics.
prefix: Prefix to preprend to metric names.
wsp_path: The name of a file ending with wsp file in root_dir.
Returns:
The metric name.
"""
relpath = os.path.relpath(wsp_path, root_dir)
assert ".." not in relpath, "%s not a child of %s" % (root_dir, wsp_path)
relpath_noext = os.path.splitext(relpath)[0]
return prefix + relpath_noext.replace(os.path.sep, ".")
class _Worker(object):
def __init__(self, opts):
settings = bg_settings.settings_from_args(opts)
bg_utils.set_log_level(settings)
self._accessor = bg_accessor_factory.accessor_from_settings(settings)
self._opts = opts
self.time_start = time.mktime(self._opts.time_start.timetuple())
self.time_end = time.mktime(self._opts.time_end.timetuple())
@staticmethod
def _read_metadata(metric_name, path):
info = whisper.info(path)
if not info:
return None
retentions = bg_metric.Retention(
[
bg_metric.Stage(precision=a["secondsPerPoint"], points=a["points"])
for a in info["archives"]
]
)
aggregator = bg_metric.Aggregator.from_carbon_name(info["aggregationMethod"])
return bg_metric.MetricMetadata.create(
aggregator=aggregator,
retention=retentions,
carbon_xfilesfactor=info["xFilesFactor"],
)
def _read_points(self, path):
"""Return a list of (timestamp, value)."""
info = whisper.info(path)
res = []
if not info:
return []
archives = info["archives"]
with io.open(path, "rb") as f:
buf = f.read()
stage0 = True
for archive in archives:
offset = archive["offset"]
stage = bg_metric.Stage(
precision=archive["secondsPerPoint"],
points=archive["points"],
stage0=stage0,
)
stage0 = False
if stage in self._opts.ignored_stages:
continue
for _ in range(archive["points"]):
timestamp, value = _POINT_STRUCT.unpack_from(buf, offset)
offset += whisper.pointSize
if timestamp == 0:
continue
elif timestamp >= self.time_start and timestamp <= self.time_end:
res.append((timestamp, value, 1, stage))
return res
def import_whisper(self, path):
if not self._accessor.is_connected:
self._accessor.connect()
name = metric_name_from_wsp(self._opts.root_directory, self._opts.prefix, path)
metadata = self._read_metadata(name, path)
log.debug("%s: %s" % (name, metadata.as_string_dict()))
if not metadata:
return 0
metric = bg_metric.make_metric_with_defaults(name, metadata)
if not self._opts.no_metadata:
self._accessor.create_metric(metric)
ret = 0
if not self._opts.no_data:
points = self._read_points(path)
self._accessor.insert_downsampled_points(metric, points)
ret = len(points)
return ret
def _setup_process(opts):
global _WORKER
_WORKER = _Worker(opts)
def _import_whisper(*args, **kwargs):
assert _WORKER is not None, "_setup_process was never called"
try:
return _WORKER.import_whisper(*args, **kwargs)
except Exception as e:
log.exception(e)
| return 0
def _parse_opts(args):
parser = argparse.ArgumentParser(
description="Import whisper files into BigGraphite."
)
parser.add_argument(
"root_directory | ",
metavar="WHISPER_DIR",
help="directory in which to find whisper files",
)
parser.add_argument(
"--filter",
type=str,
default=r".*\.wsp",
help="Only import metrics matching this filter",
)
parser.add_argument(
"--prefix",
metavar="WHISPER_PREFIX",
default="",
help="prefix to prepend to metric names",
)
parser.add_argument(
"--quiet",
action="store_const",
default=False,
const=True,
help="Show no output unless there are problems.",
)
parser.add_argument(
"--process",
metavar="N",
type=int,
help="number of concurrent process",
default=multiprocessing.cpu_count(),
)
parser.add_argument(
"--no-data", action="store_true", help="Do not import data, only metadata."
)
parser.add_argument(
"--no-metadata", action="store_true", help="Do not import metadata, only data."
)
parser.add_argument(
"--ignored_stages",
nargs="*",
help="Do not import data for these stages.",
default=[],
)
parser.add_argument(
"--time-start",
action=command.ParseDateTimeArg,
help="Read points written later than this time.",
default=datetime.datetime.fromtimestamp(0),
required=False,
)
parser.add_argument(
"--time-end",
action=command.ParseDateTimeArg,
help="Read points written earlier than this time.",
default=datetime.datetime.now(),
required=False,
)
bg_settings.add_argparse_arguments(parser)
opts = parser.parse_args(args)
opts.ignored_stages = [bg_metric.Stage.from_string(s) for s in opts.ignored_stages]
return opts
# TODO: put that in a thread.
class _Walker:
def __init__(self, root_directory, regexp):
self.count = 0
self.root_directory = root_directory
self.regexp = re.compile(regexp)
def paths(self, root=None):
root = root or self.root_directory
for entry in scandir.scandir(root):
if entry.is_dir():
for filename in self.paths(entry.path):
yield filename
elif self.regexp.match(os.path.join(root, entry.name)):
self.count += 1
yield os.path.join(root, entry.name)
def main(args=None):
"""Entry point for the module."""
if not args:
args = sys.argv[1:]
opts = _parse_opts(args)
pool_factory = multiprocessing.Pool
if opts.process == 1:
pool_factory = multiprocessing_dummy.Pool
pool = pool_factory(opts.process, initializer=_setup_process, initargs=(opts,))
out_fd = sys.stderr
if opts.quiet:
out_fd = _DEV_NULL
if "__pypy__" not in sys.builtin_module_names:
print("Running without PyPy, this is about 20 times slower", file=out_fd)
|
ahmedaljazzar/edx-platform | cms/envs/test.py | Python | agpl-3.0 | 11,745 | 0.001703 | # -*- coding: utf-8 -*-
"""
This config file runs the simplest dev environment using sqlite, and db-based
sessions. Assumes structure:
/envroot/
/db # This is where it'll write the database file
/edx-platform # The location of this repo
/log # Where we're going to write log files
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-imp | ort, unused-wildcard-import
from .common import *
import os
from path import Path as path |
from uuid import uuid4
from util.db import NoOpMigrationModules
from openedx.core.lib.derived import derive_settings
# import settings from LMS for consistent behavior with CMS
# pylint: disable=unused-import
from lms.envs.test import (
WIKI_ENABLED,
PLATFORM_NAME,
SITE_NAME,
DEFAULT_FILE_STORAGE,
MEDIA_ROOT,
MEDIA_URL,
COMPREHENSIVE_THEME_DIRS,
JWT_AUTH,
REGISTRATION_EXTRA_FIELDS,
ECOMMERCE_API_URL,
)
# Allow all hosts during tests, we use a lot of different ones all over the codebase.
ALLOWED_HOSTS = [
'*'
]
# mongo connection settings
MONGO_PORT_NUM = int(os.environ.get('EDXAPP_TEST_MONGO_PORT', '27017'))
MONGO_HOST = os.environ.get('EDXAPP_TEST_MONGO_HOST', 'localhost')
THIS_UUID = uuid4().hex[:5]
TEST_ROOT = path('test_root')
# Want static files in the same dir for running on jenkins.
STATIC_ROOT = TEST_ROOT / "staticfiles"
WEBPACK_LOADER['DEFAULT']['STATS_FILE'] = STATIC_ROOT / "webpack-stats.json"
GITHUB_REPO_ROOT = TEST_ROOT / "data"
DATA_DIR = TEST_ROOT / "data"
COMMON_TEST_DATA_ROOT = COMMON_ROOT / "test" / "data"
# For testing "push to lms"
FEATURES['ENABLE_EXPORT_GIT'] = True
GIT_REPO_EXPORT_DIR = TEST_ROOT / "export_course_repos"
# TODO (cpennington): We need to figure out how envs/test.py can inject things into common.py so that we don't have to repeat this sort of thing
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
]
STATICFILES_DIRS += [
(course_dir, COMMON_TEST_DATA_ROOT / course_dir)
for course_dir in os.listdir(COMMON_TEST_DATA_ROOT)
if os.path.isdir(COMMON_TEST_DATA_ROOT / course_dir)
]
# Avoid having to run collectstatic before the unit test suite
# If we don't add these settings, then Django templates that can't
# find pipelined assets will raise a ValueError.
# http://stackoverflow.com/questions/12816941/unit-testing-with-django-pipeline
STATICFILES_STORAGE = 'pipeline.storage.NonPackagingPipelineStorage'
STATIC_URL = "/static/"
# Update module store settings per defaults for tests
update_module_store_settings(
MODULESTORE,
module_store_options={
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': TEST_ROOT / "data",
},
doc_store_settings={
'db': 'test_xmodule_{}'.format(THIS_UUID),
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'collection': 'test_modulestore',
},
)
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': MONGO_HOST,
'db': 'test_xcontent',
'port': MONGO_PORT_NUM,
'collection': 'dont_trip',
},
# allow for additional options that can be keyed on a name, e.g. 'trashcan'
'ADDITIONAL_OPTIONS': {
'trashcan': {
'bucket': 'trash_fs'
}
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': TEST_ROOT / "db" / "cms.db",
'ATOMIC_REQUESTS': True,
},
}
if os.environ.get('DISABLE_MIGRATIONS'):
# Create tables directly from apps' models. This can be removed once we upgrade
# to Django 1.9, which allows setting MIGRATION_MODULES to None in order to skip migrations.
MIGRATION_MODULES = NoOpMigrationModules()
LMS_BASE = "localhost:8000"
LMS_ROOT_URL = "http://{}".format(LMS_BASE)
FEATURES['PREVIEW_LMS_BASE'] = "preview.localhost"
CACHES = {
# This is the cache used for most things. Askbot will not work without a
# functioning cache -- it relies on caching to load its settings in places.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'mongo_metadata_inheritance': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': os.path.join(tempfile.gettempdir(), 'mongo_metadata_inheritance'),
'TIMEOUT': 300,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'loc_cache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
},
'course_structure_cache': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
}
################################# CELERY ######################################
CELERY_ALWAYS_EAGER = True
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
CLEAR_REQUEST_CACHE_ON_TASK_COMPLETION = False
########################### Server Ports ###################################
# These ports are carefully chosen so that if the browser needs to
# access them, they will be available through the SauceLabs SSH tunnel
LETTUCE_SERVER_PORT = 8003
XQUEUE_PORT = 8040
YOUTUBE_PORT = 8031
LTI_PORT = 8765
VIDEO_SOURCE_PORT = 8777
################### Make tests faster
# http://slacy.com/blog/2012/04/make-your-tests-faster-in-django-1-4/
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
]
# No segment key
CMS_SEGMENT_KEY = None
FEATURES['ENABLE_SERVICE_STATUS'] = True
# Toggles embargo on for testing
FEATURES['EMBARGO'] = True
# set up some testing for microsites
FEATURES['USE_MICROSITES'] = True
MICROSITE_ROOT_DIR = COMMON_ROOT / 'test' / 'test_sites'
MICROSITE_CONFIGURATION = {
"test_site": {
"domain_prefix": "test-site",
"university": "test_site",
"platform_name": "Test Site",
"logo_image_url": "test_site/images/header-logo.png",
"email_from_address": "test_site@edx.org",
"payment_support_email": "test_site@edx.org",
"ENABLE_MKTG_SITE": False,
"SITE_NAME": "test_site.localhost",
"course_org_filter": "TestSiteX",
"course_about_show_social_links": False,
"css_overrides_file": "test_site/css/test_site.css",
"show_partners": False,
"show_homepage_promo_video": False,
"course_index_overlay_text": "This is a Test Site Overlay Text.",
"course_index_overlay_logo_file": "test_site/images/header-logo.png",
"homepage_overlay_html": "<h1>This is a Test Site Overlay HTML</h1>",
"ALWAYS_REDIRECT_HOMEPAGE_TO_DASHBOARD_FOR_AUTHENTICATED_USER": False,
"COURSE_CATALOG_VISIBILITY_PERMISSION": "see_in_catalog",
"COURSE_ABOUT_VISIBILITY_PERMISSION": "see_about_page",
"ENABLE_SHOPPING_CART": True,
"ENABLE_PAID_COURSE_REGISTRATION": True,
"SESSION_COOKIE_DOMAIN": "test_site.localhost",
"urls": {
'ABOUT': 'test-site/about',
'PRIVACY': 'test-site/privacy',
'TOS_AND_HONOR': 'test-site/tos-and-honor',
},
},
"site_with_logistration": {
"domain_prefix": "logistration",
"university": "logistration",
"platform_name": "Test logistration",
"logo_image_url": "test_site/images/header-logo.png",
"email_from_address": "test_site@edx.org",
"payment_support_email": "test_site@edx.org",
|
mscuthbert/abjad | abjad/tools/pitchtools/test/test_pitchtools_Registration___init__.py | Python | gpl-3.0 | 1,317 | 0.000759 | # -*- encoding: utf-8 -*-
from abjad import *
def test_pitchtools_Registration___init___01():
r'''Initialize from items.
'''
mapping = pitchtools.Registration([('[A0, C4)', 15), ('[C4, C8)', 27)])
assert isinstance(mapping, pitchtools.Registration)
def test_pitch | tools_Registration___ini | t___02():
r'''Initialize from instance.
'''
mapping_1 = pitchtools.Registration([('[A0, C4)', 15), ('[C4, C8)', 27)])
mapping_2 = pitchtools.Registration(mapping_1)
assert isinstance(mapping_1, pitchtools.Registration)
assert isinstance(mapping_2, pitchtools.Registration)
assert mapping_1 == mapping_2
def test_pitchtools_Registration___init___03():
r'''Initialize from named instance.
'''
mapping_1 = pitchtools.Registration(
[('[A0, C4)', 15), ('[C4, C8)', 27)],
)
mapping_2 = pitchtools.Registration(mapping_1)
assert isinstance(mapping_1, pitchtools.Registration)
assert isinstance(mapping_2, pitchtools.Registration)
assert mapping_1 == mapping_2
def test_pitchtools_Registration___init___04():
r'''Initializeempty.
'''
mapping = pitchtools.Registration()
assert isinstance(mapping, pitchtools.Registration)
mapping = pitchtools.Registration([])
assert isinstance(mapping, pitchtools.Registration) |
bertptrs/adventofcode | 2019/tests/test_day22.py | Python | mit | 944 | 0.001059 | import pytest
from aoc2019.day22 import shuffle
SAMPLE_INSTRUCTIONS = [
"""deal with increment 7
deal into new stack
deal into new stack""",
"""cut 6
deal with increment 7
deal into new stack""",
"""deal with increment 7
deal with increment 9
cut -2""",
"""deal into new stack
cut -2
deal with increment 7
| cut 8
cut -4
deal with increment 7
cut 3
deal with increment 9
deal with increment 3
cut -1""",
]
CORRECT_SHUFFLES = [
"0 3 6 9 2 5 8 1 4 7",
"3 0 7 4 1 8 5 2 9 6",
"6 3 0 7 4 1 8 5 2 9",
"9 2 5 8 1 4 7 0 3 6",
]
@pytest.mark.parametrize('instructions,correct', zip(SAMPLE_INSTRUCTIONS, CORRECT_SHUFFLES))
def test_shuffle(instructions, correct):
instructions = [line.strip() for line in instructions.splitlines()]
| correct = [int(i) for i in correct.split(" ")]
result = shuffle(instructions, 10)
assert result == correct
|
RomanBelkov/qreal | plugins/tools/visualInterpreter/examples/robotsCodeGeneration/reactionsStorage/Initialization.py | Python | apache-2.0 | 3,823 | 0.022134 | import os
max_used_id = -1
cur_node_is_processed = False
conditions = {}
if_nodes = []
if_nodes_with_2_branches = []
branch_end = {}
branch_end_type = {}
branch_type = {}
code = []
init_code = []
terminate_code = []
variables_code = []
balancer_code = ''
id_to_pos_in_code = {}
proce | ssed_loops = {}
processed_ends = []
def ifGeneration():
global if_nodes
global code
global id_to_pos_in_code
global branch_type
global branch_end
global branch_end_type
global if_nodes_with_2_branches
if len(if_nodes) > 0:
while len(if_nodes) > 0 and if_nodes[0] not in if_nodes_with_2_branches:
cond = conditions[if_nodes[0]]
code[id_to_pos_in_code[if_nodes[0]]].insert(0, "if (" + cond + ") {\n")
code.append(["}\n"])
del if_nodes[0]
if len(if_nodes) > | 0:
if branch_type[if_nodes[0]] == 1:
cond = conditions[if_nodes[0]]
code[id_to_pos_in_code[if_nodes[0]]].insert(0, "if (" + cond + ") {\n")
code.append(["}\n"])
branch_end[if_nodes[0]] = len(code) - 1
branch_end_type[if_nodes[0]] = 1
elif branch_type[if_nodes[0]] == 2:
while len(if_nodes) > 0 and (branch_type[if_nodes[0]] == 2 or if_nodes[0] not in if_nodes_with_2_branches):
if if_nodes[0] not in if_nodes_with_2_branches:
cond = conditions[if_nodes[0]]
code[id_to_pos_in_code[if_nodes[0]]].insert(0, "if (" + cond + ") {\n")
code.append(["}\n"])
elif branch_end_type[if_nodes[0]] == 1:
code[branch_end[if_nodes[0]]].append("else {\n")
code.append(["}\n"])
del if_nodes[0]
if len(if_nodes) > 0:
cond = conditions[if_nodes[0]]
code[id_to_pos_in_code[if_nodes[0]]].insert(0, "if (" + cond + ") {\n")
code.append(["}\n"])
branch_end[if_nodes[0]] = len(code) - 1
branch_end_type[if_nodes[0]] = 1
def indentString(i):
res = ''
while i > 0:
res = res + '\t'
i -= 1
return res
def collectPortNames(ports, port_names):
if ports.find("A") != -1:
port_names.append("NXT_PORT_A")
if ports.find("B") != -1:
port_names.append("NXT_PORT_B")
if ports.find("C") != -1:
port_names.append("NXT_PORT_C")
def convertCondition(sign):
if sign == "меньше":
return "<"
elif sign == "больше":
return ">"
elif sign == "не меньше":
return ">="
elif sign == "не больше":
return "<="
else:
return "=="
def containsInitCode(code):
global init_code
for c in init_code:
if c <= code:
return True
return False
def save():
global template
global code
global script_dir
final_code = ''
indent = 1
for strings in code:
for s in strings:
if s:
if (s.find('}') > -1 and s.find('{}') == -1):
indent -= 1
final_code = final_code + indentString(indent) + s
if (s.find('{') > -1 and s.find('{}') == -1):
indent += 1
final_init_code = ''
for s1 in init_code:
final_init_code = final_init_code + "\t" + s1
final_terminate_code = ''
for s2 in terminate_code:
final_terminate_code = final_terminate_code + "\t" + s2
final_variables_code = ''
for s3 in variables_code:
final_variables_code = final_variables_code + s3
s = template.replace("@@CODE@@", final_code)
s = s.replace("@@BALANCER@@", balancer_code)
s = s.replace("@@VARIABLES@@", final_variables_code)
s = s.replace("@@INITHOOKS@@", final_init_code)
s = s.replace("@@TERMINATEHOOKS@@", final_terminate_code)
dir_path = os.path.join(__script_dir__, "nxt-tools")
if not os.path.exists(dir_path):
os.mkdir(dir_path)
file_path = os.path.join(dir_path, "program0.c")
code_file = open(file_path, 'w')
code_file.write(s)
code_file.close()
|
AleksNeStu/ggrc-core | test/selenium/bin/run_selenium.py | Python | apache-2.0 | 1,534 | 0.008475 | #!/usr/bin/env python2.7
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
""" Basic selenium test runner
This script is used for running all selenium tests against the server defined
in the configuration yaml file. The script will wait a defined time for the
server to start before running the test. If the server fails to start before
its grace time is up, the script will return with an error code of 3. Error
codes 1 and 2 are reserved by pytest and status 0 is returned only if all the
tests pass.
"""
import os
import sys
import time
import urllib
import pytest | # pylint: disable=import-error
# add src to path so that we can do imports from our | src
PROJECT_ROOT_PATH = os.path.dirname(os.path.abspath(__file__)) + "/../"
sys.path.append(PROJECT_ROOT_PATH + "src")
from lib import file_ops # NOQA
from lib import environment # NOQA
def wait_for_server():
""" Wait for the server to return a 200 response
"""
sys.stdout.write("Wating on server: ")
for _ in xrange(environment.SERVER_WAIT_TIME):
try:
if urllib.urlopen(environment.APP_URL).getcode() == 200:
print "[Done]"
return True
except IOError:
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(1)
print "[Failed]"
return False
if __name__ == "__main__":
if not wait_for_server():
sys.exit(3)
file_ops.create_directory(environment.LOG_PATH)
file_ops.delete_directory_contents(environment.LOG_PATH)
sys.exit(pytest.main())
|
maxamillion/product-definition-center | pdc/apps/package/filters.py | Python | mit | 4,125 | 0.012848 | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from django.conf import settings
from django.forms import SelectMultiple
import django_filters
from pdc.apps.common.filters import MultiValueFilter, NullableCharFilter
from . import models
class RPMFilter(django_filters.FilterSet):
name = MultiValueFilter()
version = MultiValueFilter()
epoch = MultiValueFilter()
release = MultiValueFilter()
arch = MultiValueFilter()
srpm_name = MultiValueFilter()
srpm_nevra = NullableCharFilter()
filename = MultiValueFilter()
compose = MultiValueFilter(name='composerpm__variant_arch__variant__compose__compose_id',
distinct=True)
linked_release = MultiValueFilter(name='linked_releases__release_id', distinct=True)
class Meta:
model = models.RPM
fields = ('name', 'version', 'epoch', 'release', 'arch', 'srpm_name',
'srpm_nevra', 'compose', 'filename', 'linked_release')
class ImageFilter(django_filters.FilterSet):
file_name = MultiValueFilter()
image_format = MultiValueFilter(name='image_format__name')
image_type = MultiValueFilter(name='image_type__name')
disc_number = MultiValueFilter()
disc_count = MultiValueFilter()
arch = MultiValueFilter()
mtime = MultiValueFilter()
size = MultiValueFilter()
implant_md5 = MultiValueFilter()
volume_id = MultiValueFilter()
md5 = MultiValueFilter()
sha1 = MultiValueFilter()
sha256 = MultiValueFilter()
compose = MultiValueFilter(name='composeimage__variant_arch__variant__compose__compose_id',
distinct=True)
class Meta:
model = models.Image
fields = ('file_name', 'image_format', 'image_type', 'disc_number',
'disc_count', 'arch', 'mtime', 'size', 'bootable',
'implant_md5', 'volume_id', 'md5', 'sha1', 'sha256')
class BuildImageFilter(django_filters.FilterSet):
if settings.WITH_BINDINGS:
component_name = django_filters.MethodFilter(action='filter_by_component_name',
widget=SelectMultiple)
else:
component_name = MultiValueFilter(name='rpms__srpm_name', distinct=True)
rpm_version = MultiValueFilter(name='rpms__version', distinct=True)
rpm_release = MultiValueFilter(name='rpms__release', distinct=True)
image_id = MultiValueFilter()
image_format = MultiValueFilter(name='image_format__name')
md5 = MultiValueFilter()
archive_build_nvr = MultiValueFilter(name='archives__build_nvr', distinct=True)
archive_name = MultiValueFilter(name='archives__name', distinct=True)
archive_size = MultiValueFilter(name='archives__size', distinct=True)
archive_md5 = MultiValueFilter(name='archives__md5', distinct=True)
release_id = MultiValueFilter(name='releases__release_id', distinct=True)
def filter_by_component_name(self, queryset, value):
from pdc.apps.bindings import models as binding_models
srpm_names = binding_models.ReleaseComponentSRPMNameMapping.objects.filter(
release_component__name__in=value).distinct().values_list('srpm_name')
if value:
if srpm_names:
return queryset.filter(rpms__srpm_name__in=s | rpm_names).distinct()
else:
return queryset.filter(rpms__srpm_name__in=value).distinct()
else:
return queryset
class Meta:
model = models | .BuildImage
fields = ('component_name', 'rpm_version', 'rpm_release', 'image_id', 'image_format', 'md5',
'archive_build_nvr', 'archive_name', 'archive_size', 'archive_md5', 'release_id')
|
twitter/pants | src/python/pants/backend/jvm/tasks/classpath_entry.py | Python | apache-2.0 | 4,098 | 0.009761 | # coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
class ClasspathEntry(object):
"""Represents a java classpath entry.
:API: public
"""
def __init__(self, path, directory_digest=None):
self._path = path
s | elf._directory_digest = directory_digest
@property
def path(self):
"""Returns the pants internal path of this classpath entry.
Suitable for use in constructing classpaths for pants executions and pants generated artifacts.
:API: public
|
:rtype: string
"""
return self._path
@property
def directory_digest(self):
"""Returns the directory digest which contains this file. May be None.
This API is experimental, and subject to change.
:rtype: pants.engine.fs.Digest
"""
return self._directory_digest
def is_excluded_by(self, excludes):
"""Returns `True` if this classpath entry should be excluded given the `excludes` in play.
:param excludes: The excludes to check this classpath entry against.
:type excludes: list of :class:`pants.backend.jvm.targets.exclude.Exclude`
:rtype: bool
"""
return False
def __hash__(self):
return hash((self.path, self.directory_digest))
def __eq__(self, other):
return (
isinstance(other, ClasspathEntry) and
self.path == other.path and
self.directory_digest == other.directory_digest
)
def __ne__(self, other):
return not self == other
def __repr__(self):
return 'ClasspathEntry(path={!r}, directory_digest={!r})'.format(
self.path,
self.directory_digest,
)
@classmethod
def is_artifact_classpath_entry(cls, classpath_entry):
"""
:API: public
"""
return isinstance(classpath_entry, ArtifactClasspathEntry)
@classmethod
def is_internal_classpath_entry(cls, classpath_entry):
"""
:API: public
"""
return not cls.is_artifact_classpath_entry(classpath_entry)
class ArtifactClasspathEntry(ClasspathEntry):
"""Represents a resolved third party classpath entry.
:API: public
"""
def __init__(self, path, coordinate, cache_path, directory_digest=None):
super(ArtifactClasspathEntry, self).__init__(path, directory_digest)
self._coordinate = coordinate
self._cache_path = cache_path
@property
def coordinate(self):
"""Returns the maven coordinate that used to resolve this classpath entry's artifact.
:rtype: :class:`pants.java.jar.M2Coordinate`
"""
return self._coordinate
@property
def cache_path(self):
"""Returns the external cache path of this classpath entry.
For example, the `~/.m2/repository` or `~/.ivy2/cache` location of the resolved artifact for
maven and ivy resolvers respectively.
Suitable for use in constructing classpaths for external tools that should not be subject to
potential volatility in pants own internal caches.
:API: public
:rtype: string
"""
return self._cache_path
def is_excluded_by(self, excludes):
return any(_matches_exclude(self.coordinate, exclude) for exclude in excludes)
def __hash__(self):
return hash((self.path, self.coordinate, self.cache_path))
def __eq__(self, other):
return (isinstance(other, ArtifactClasspathEntry) and
self.path == other.path and
self.coordinate == other.coordinate and
self.cache_path == other.cache_path and
self.directory_digest == other.directory_digest)
def __ne__(self, other):
return not self == other
def __repr__(self):
return (
'ArtifactClasspathEntry(path={!r}, coordinate={!r}, cache_path={!r}, directory_digest={!r})'
.format(self.path, self.coordinate, self.cache_path, self.directory_digest)
)
def _matches_exclude(coordinate, exclude):
if not coordinate.org == exclude.org:
return False
if not exclude.name:
return True
if coordinate.name == exclude.name:
return True
return False
|
yt4766269/pytorch_zoo | LeNet/CONSTANT.py | Python | apache-2.0 | 100 | 0.01 | TR | AIN_BATCH_SIZE = 64
TEST_BATCH_SIZE = 64
EPOCH = 32
CALCULATE_LOSS = 100
DATA_PATH = '. | ./dataset/' |
carragom/modoboa | modoboa/admin/tests/test_domain_alias.py | Python | isc | 1,687 | 0 | # coding: utf-8
from django.core.urlresolvers import reverse
from modoboa.lib.tests import ModoTestCase
from .. import factories
from ..models import Domain, DomainAlias, Alias
class DomainAliasTestCase(ModoTestCase):
@classmethod
def setUpTestData(cls):
"""Create test data."""
super(Domain | AliasTestCase, cls).setUpTestData()
fa | ctories.populate_database()
cls.dom = Domain.objects.get(name='test.com')
def test_model(self):
dom = Domain.objects.get(name="test.com")
domal = DomainAlias()
domal.name = "domalias.net"
domal.target = dom
domal.save()
self.assertEqual(dom.domainalias_count, 1)
self.assertTrue(
Alias.objects.filter(
address="@{}".format(domal.name)).exists())
domal.name = "domalias.org"
domal.save()
domal.delete()
self.assertFalse(
Alias.objects.filter(
address="@{}".format(domal.name)).exists())
def test_form(self):
dom = Domain.objects.get(name="test.com")
values = dict(
name=dom.name, quota=dom.quota, enabled=dom.enabled,
aliases="domalias.net", aliases_1="domaliasé.com",
type="domain"
)
self.ajax_post(
reverse("admin:domain_change",
args=[dom.id]),
values
)
self.assertEqual(dom.domainalias_set.count(), 2)
del values["aliases_1"]
self.ajax_post(
reverse("admin:domain_change",
args=[dom.id]),
values
)
self.assertEqual(dom.domainalias_set.count(), 1)
|
balazs-bamer/FreeCAD-Surface | src/Mod/Spreadsheet/InitGui.py | Python | lgpl-2.1 | 3,295 | 0.010926 | #***************************************************************************
#* *
#* Copyright (c) 2013 - Yorik van Havre <yorik@uncreated.net> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, wr | ite to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#****************************************************** | *********************
class SpreadsheetWorkbench(Workbench):
"Spreadsheet workbench object"
Icon = """
/* XPM */
static char * Spreadsheet_xpm[] = {
"16 16 5 1",
" c None",
". c #151614",
"+ c #575956",
"@ c #969895",
"# c #F7F9F6",
" ",
" ",
" ...............",
".@##@+########@.",
".+@@+.@@@@@@@@+.",
"..+++.+++++++++.",
".@##@+########@.",
".+@@+.@@@@@@@@+.",
"..+++.+++++++++.",
".@##@+########@.",
".+@@+.@@@@@@@@+.",
"..+++.+++++++++.",
".@##@+########@.",
"..+++.+++++++++.",
" ",
" "};"""
MenuText = "Spreadsheet"
ToolTip = "Spreadsheet workbench"
def Initialize(self):
def QT_TRANSLATE_NOOP(scope, text): return text
import Spreadsheet,Spreadsheet_rc
from DraftTools import translate
commands = ["Spreadsheet_Create","Spreadsheet_Controller","Spreadsheet_PropertyController"]
self.appendToolbar(QT_TRANSLATE_NOOP("Workbench","Spreadsheet tools"),commands)
self.appendMenu(str(translate("Spreadsheet","&Spreadsheet")),commands)
FreeCADGui.addIconPath(":/icons")
FreeCADGui.addLanguagePath(":/translations")
Log ('Loading Spreadsheet module... done\n')
def Activated(self):
Msg("Spreadsheet workbench activated\n")
def Deactivated(self):
Msg("Spreadsheet workbench deactivated\n")
def GetClassName(self):
return "Gui::PythonWorkbench"
FreeCADGui.addWorkbench(SpreadsheetWorkbench)
|
azumimuo/family-xbmc-addon | plugin.video.genesisreborn/resources/lib/sources/watchepisodes.py | Python | gpl-2.0 | 3,918 | 0.025013 | import re
import urllib
import requests
import urlparse
import json
import xbmc
from BeautifulSoup import BeautifulSoup
from resources.lib.modules.common import random_agent
from resources.lib.modules import control
from resources.lib.modules import cleantitle
from schism_commons import quality_tag, google_tag, parseDOM, replaceHTMLCodes ,cleantitle_get, cleantitle_get_2, cleantitle_query, get_size, cleantitle_get_full
class source:
def __init__(self):
self.base_link = control.setting('watchepisodes_base')
if self.base_link == '' or self.base_link == None:self.base_link = 'http://www.watchepisodes4.com'
self.search_link = '/search/ajax_search?q=%s'
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
url = {'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
self.genesisreborn_url = []
try:
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
data['season'], data['episode'] = season, episode
headers = {'User-Agent': random_agent()}
query = self.search_link % (urllib.quote_plus(title))
query = urlparse.urljoin(self.base_link, query)
cleaned_title = cleantitle.get(title)
ep_id = int(episode)
season_id = int(season)
html = requests.get( | query, headers=headers, timeout=30).json()
results = html['series']
for item in results:
r_title = item['label'].encode('utf-8')
r_link = item['seo'].encode('utf-8')
if cleaned_title == cleantitle.get(r_title):
r_page = self.base_link + "/" + r_link
print("WATCHEPISODES r1", r_title,r_page)
r_html = | BeautifulSoup(requests.get(r_page, headers=headers, timeout=30).content)
r = r_html.findAll('div', attrs={'class': re.compile('\s*el-item\s*')})
for container in r:
try:
r_href = container.findAll('a')[0]['href'].encode('utf-8')
r_title = container.findAll('a')[0]['title'].encode('utf-8')
print("WATCHEPISODES r3", r_href,r_title)
episode_check = "[sS]%02d[eE]%02d" % (int(season), int(episode))
match = re.search(episode_check, r_title)
if match:
print("WATCHEPISODES PASSED EPISODE", r_href)
self.genesisreborn_url.append(r_href)
else:
match2 = re.search(episode_check, r_href)
if match2:
self.genesisreborn_url.append(r_href)
except:
pass
print ("WATCHEPISODES LIST", self.genesisreborn_url)
return self.genesisreborn_url
except:
pass
def sources(self, url, hostDict, hostprDict):
try:
sources = []
count = 0
for url in self.genesisreborn_url:
headers = {'User-Agent': random_agent()}
html = BeautifulSoup(requests.get(url, headers=headers, timeout=30).content)
print ("WATCHEPISODES SOURCES", url)
r = html.findAll('div', attrs={'class': 'site'})
for container in r:
if count > 100: break
try:
count += 1
r_url = container.findAll('a')[0]['data-actuallink'].encode('utf-8')
print ("WATCHEPISODES r_url", r_url)
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(r_url.strip().lower()).netloc)[0]
host = host.encode('utf-8')
if not host in hostDict: raise Exception()
sources.append({'source': host, 'quality': 'SD', 'provider': 'Watchepisodes', 'url': r_url, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url |
qsnake/gpaw | oldtest/hund.py | Python | gpl-3.0 | 1,082 | 0 | from ase import *
from gpaw im | port *
atoms = Atoms('H')
atoms.center(vacuum=4. | )
params = dict(h=.3, convergence=dict(density=.005, eigenstates=1e-6))
# No magmom, no hund
atoms.set_calculator(GPAW(**params))
E_nom_noh = atoms.get_potential_energy()
assert np.all(atoms.get_magnetic_moments() == 0.)
assert atoms.calc.get_number_of_spins() == 1
# No magmom, hund
atoms.set_calculator(GPAW(hund=True, **params))
E_nom_h = atoms.get_potential_energy()
assert np.all(atoms.get_magnetic_moments() == 1.)
assert atoms.calc.get_number_of_spins() == 2
atoms.set_initial_magnetic_moments([1.])
# magmom, no hund
atoms.set_calculator(GPAW(fixmom=True, **params))
E_m_noh = atoms.get_potential_energy()
assert np.all(atoms.get_magnetic_moments() == 1.)
assert atoms.calc.get_number_of_spins() == 2
# magmom, hund
atoms.set_calculator(GPAW(hund=True, **params))
E_m_h = atoms.get_potential_energy()
assert np.all(atoms.get_magnetic_moments() == 1.)
assert atoms.calc.get_number_of_spins() == 2
print E_nom_noh
print E_nom_h
print E_m_noh
print E_m_h
assert E_m_h == E_m_noh == E_nom_h
|
huard/scipy-work | scipy/fftpack/tests/test_basic.py | Python | bsd-3-clause | 18,018 | 0.036408 | #!/usr/bin/env python
# Created by Pearu Peterson, September 2002
""" Test functions for fftpack.basic module
"""
__usage__ = """
Build fftpack:
python setup_fftpack.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.fftpack.test()'
Run tests if fftpack is not installed:
python tests/test_basic.py
"""
from numpy.testing import *
from scipy.fftpack import ifft,fft,fftn,ifftn,rfft,irfft, fft2
from scipy.fftpack import _fftpack as fftpack
from numpy import arange, add, array, asarray, zeros, dot, exp, pi,\
swapaxes, double, cdouble
import numpy as np
import numpy.fft
from numpy.random import rand
def random(size):
return rand(*size)
def get_mat(n):
data = arange(n)
data = add.outer(data,data)
return data
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n,dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w),x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n,dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w),x)/n
return y
def direct_dftn(x):
x = asarray(x)
for axis in range(len(x.shape)):
x = fft(x,axis=axis)
return x
def direct_idftn(x):
x = asarray(x)
for axis in range(len(x.shape)):
x = ifft(x,axis=axis)
return x
def direct_rdft(x):
x = asarray(x)
n = len(x)
w = -arange(n)*(2j*pi/n)
r = zeros(n,dtype=double)
for i in range(n/2+1):
y = dot(exp(i*w),x)
if i:
r[2*i-1] = y.real
if 2*i<n:
r[2*i] = y.imag
else:
r[0] = y.real
return r
def direct_irdft(x):
x = asarray(x)
n = len(x)
x1 = zeros(n,dtype=cdouble)
for i in range(n/2+1):
if i:
if 2*i<n:
x1[i] = x[2*i-1] + 1j* x[2*i]
x1[n-i] = x[2*i-1] - 1j* x[2*i]
else:
x1[i] = x[2*i-1]
else:
x1[0] = x[0]
return direct_idft(x1).real
class _TestFFTBase(TestCase):
def setUp(self):
self.cdt = None
self.rdt = None
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype = self.cdt)
y = fft(x)
self.failUnless(y.dtype == self.cdt,
"Output dtype is %s, expected %s" % (y.dtype, self.cdt))
y1 = direct_dft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], dtype = self.cdt)
assert_array_almost_equal(fft(x),direct_dft(x))
def test_n_argument_real(self):
x1 = np.array([1,2,3,4], dtype=self.rdt)
x2 = np.array([1,2,3,4], dtype=self.rdt)
y = fft([x1,x2],n=4)
self.failUnless(y.dtype == self.cdt,
"Output dtype is %s, expected %s" % (y.dtype, self.cdt))
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def _test_n_argument_complex(self):
x1 = np.array([1,2,3,4+1j], dtype=self.cdt)
x2 = np.array([1,2,3,4+1j], dtype=self.cdt)
y = fft([x1,x2],n=4)
self.failUnless(y.dtype == self.cdt,
"Output dtype is %s, expected %s" % (y.dtype, self.cdt))
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = range(n)
y = fftpack.zfft(x)
y2 = numpy.fft.fft(x)
assert_array_almost_equal(y,y2)
y = fftpack.zrfft(x)
assert_array_almost_equal(y,y2)
class TestDoubleFFT(_TestFFTBase):
def setUp(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestSingleFFT(_TestFFTBase):
def setUp(self):
self.cdt = np.complex64
self.rdt = np.float32
class _TestIFFTBase(TestCase):
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt)
y = ifft(x)
y1 = direct_idft(x)
self.failUnless(y.dtype == self.cdt,
"Output dtype is %s, expected %s" % (y.dtype, self.cdt))
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_definition_real(self):
x = np.array([1,2,3,4,1,2,3,4], self.rdt)
y = ifft(x)
self.failUnless(y.dtype == self.cdt,
"Output dtype is %s, expected %s" % (y.dtype, self.cdt))
y1 = direct_idft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4,5], dtype=self.rdt)
self.failUnless(y.dtype == self.cdt,
"Output dtype is %s, expected %s" % (y.dtype, self.cdt))
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = range(n)
y = fftpack.zfft(x,direction=-1)
y2 = numpy.fft.ifft(x)
assert_array_almost_equal(y,y2)
y = fftpack.zrfft(x,direction=-1)
assert_array_almost_equal(y,y2)
def test_random_complex(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.cdt)
x = random([size]).astype(self.cdt) +1j*x
y1 = ifft(fft(x))
y2 = fft(ifft(x))
self.failUnless(y1.dtype == self.cdt,
"Output dtype is %s, expected %s" % (y1.dtype, self.cdt))
self.failUnless(y2.dtype == self.cdt,
"Output dtype is %s, expected %s" % (y2.dtype, self.cdt))
assert_array_almost_equal (y1, x)
assert_array_almost_equal (y2, x)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = ifft(fft(x))
y2 = fft(ifft(x))
self.failUnless(y1.dtype == self.cdt,
"Output dtype is %s, expected %s" % (y1.dtype, self.cdt))
self.failUnless(y2.dtype == self.cdt,
"Output dtype is %s, expected %s" % (y2.dtype, self.cdt))
assert_array_almost_equal (y1, x)
assert_array_almost_equal (y2, x)
class TestDoubleIFFT(_TestIFFTBase):
def setUp(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestSingleIFFT(_TestIFFTBase):
def setUp(self):
self.cdt = np.complex64
self.rdt = np.float32
class _TestRFFTBase(TestCase):
def test_definition(self):
for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]:
x = np.array(t, dtype=self.rdt)
y = rfft(x)
y1 = direct_rdft(x)
assert_array_almost_equal(y,y1)
self.failUnless(y.dtype == self.rdt,
"Output dtype is %s, expected %s" % (y.dtype, self.rdt))
def test_djbfft(self):
from numpy.fft import fft as numpy_fft
for i in range(2,14):
n = 2**i
x = range(n)
| y2 = numpy_fft(x)
y1 = zeros((n,),dtype=double)
y1[0] = y2[0].real
y1[-1] = y2[n/2].real
for k in range(1,n/2):
y1[2*k-1] = y2[k].real
y1[2*k] = y2[k].imag
y = fftpack.drfft(x)
assert_array_almost_equal(y,y1)
class TestRFFTDo | uble(_TestRFFTBase):
def setUp(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestRFFTSingle(_TestRFFTBase):
def setUp(self):
self.cdt = np.complex64
self.rdt = np.float32
class _TestIRFFTBase(TestCase):
def test_definition(self):
x1 = [1,2,3,4,1,2,3,4]
x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j]
x2= [1,2,3,4,1,2,3,4,5]
x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j]
def _test(x, xr):
y = irfft(np.array(x, dtype=self.rdt))
y1 = direct_irdft(x)
self.failUnless(y.dtype == self.rdt,
"Output dtype is %s, expec |
detrout/python-htseq | HTSeq/StepVector.py | Python | gpl-3.0 | 25,079 | 0.020894 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.4
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_StepVector', [dirname(__file__)])
except ImportError:
import _StepVector
return _StepVector
if fp is not None:
try:
_mod = imp.load_module('_StepVector', fp, pathname, description)
finally:
fp.close()
return _mod
_StepVector = swig_import_helper()
del swig_import_helper
else:
import _StepVector
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
class _Pair_int_float(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, _Pair_int_float, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, _Pair_int_float, name)
__repr__ = _swig_repr
__swig_setmethods__["first"] = _StepVector._Pair_int_float_first_set
__swig_getmethods__["first"] = | _StepVector._Pair_int_float_first_get
if _newclass:first = _swig_property(_StepVector._Pair_int_float_first_get, _StepVector._Pair_int_float_first_set)
__swig_setmethods__["second"] = _StepVector._Pair_int_float_second_set
__swig_getmethods__["second"] = _StepVector._Pair_int_float_second_get
if _newclass:second = _swig_property(_StepVector._Pair_int_float_second_get, _StepVector._Pair_int_float_second_set)
def __init__(self, | *args):
this = _StepVector.new__Pair_int_float(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _StepVector.delete__Pair_int_float
__del__ = lambda self : None;
_Pair_int_float_swigregister = _StepVector._Pair_int_float_swigregister
_Pair_int_float_swigregister(_Pair_int_float)
class _StepVector_Iterator_float(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, _StepVector_Iterator_float, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, _StepVector_Iterator_float, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _StepVector.new__StepVector_Iterator_float(*args)
try: self.this.append(this)
except: self.this = this
def next(self): return _StepVector._StepVector_Iterator_float_next(self)
def __iter__(self): return _StepVector._StepVector_Iterator_float___iter__(self)
__swig_destroy__ = _StepVector.delete__StepVector_Iterator_float
__del__ = lambda self : None;
_StepVector_Iterator_float_swigregister = _StepVector._StepVector_Iterator_float_swigregister
_StepVector_Iterator_float_swigregister(_StepVector_Iterator_float)
class _StepVector_float(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, _StepVector_float, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, _StepVector_float, name)
__repr__ = _swig_repr
def __init__(self):
this = _StepVector.new__StepVector_float()
try: self.this.append(this)
except: self.this = this
def set_value(self, *args): return _StepVector._StepVector_float_set_value(self, *args)
def add_value(self, *args): return _StepVector._StepVector_float_add_value(self, *args)
def get_all_values_pystyle(self): return _StepVector._StepVector_float_get_all_values_pystyle(self)
def get_values_pystyle(self, *args): return _StepVector._StepVector_float_get_values_pystyle(self, *args)
def num_values(self): return _StepVector._StepVector_float_num_values(self)
__swig_destroy__ = _StepVector.delete__StepVector_float
__del__ = lambda self : None;
_StepVector_float_swigregister = _StepVector._StepVector_float_swigregister
_StepVector_float_swigregister(_StepVector_float)
cvar = _StepVector.cvar
_StepVector_float.min_index = _StepVector.cvar._StepVector_float_min_index
_StepVector_float.max_index = _StepVector.cvar._StepVector_float_max_index
class _Pair_int_int(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, _Pair_int_int, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, _Pair_int_int, name)
__repr__ = _swig_repr
__swig_setmethods__["first"] = _StepVector._Pair_int_int_first_set
__swig_getmethods__["first"] = _StepVector._Pair_int_int_first_get
if _newclass:first = _swig_property(_StepVector._Pair_int_int_first_get, _StepVector._Pair_int_int_first_set)
__swig_setmethods__["second"] = _StepVector._Pair_int_int_second_set
__swig_getmethods__["second"] = _StepVector._Pair_int_int_second_get
if _newclass:second = _swig_property(_StepVector._Pair_int_int_second_get, _StepVector._Pair_int_int_second_set)
def __init__(self, *args):
this = _StepVector.new__Pair_int_int(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _StepVector.delete__Pair_int_int
__del__ = lambda self : None;
_Pair_int_int_swigregister = _StepVector._Pair_int_int_swigregister
_Pair_int_int_swigregister(_Pair_int_int)
class _StepVector_Iterator_int(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, _StepVector_Iterator_int, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, _StepVector_Iterator_int, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _StepVector.new__StepVector_Iterator_int(*args)
try: self.this.append(this)
except: self.this = this
def next(self): return _StepVector._StepVector_Iterator_int_next(self)
def __iter__(self): return _StepVector._StepVector_Iterator_int___iter__(self)
__swig_destroy__ = _StepVector.delete__StepVector_Iterator_int
__del__ = lambda self : None;
_StepVector_Iterator_int_swigregister = _StepVector._StepVector_Iterator_int_swigregister
_StepVector_Iterator_int_swigregister(_StepVector_Iterator_int)
class _StepVector_int(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, _StepVector_int, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, _StepVector_int, name)
__repr__ = _swig_repr
def __init__(self):
this = _StepVector.new__StepVector_int()
try: self.this.append(this)
except: self.this = this
def set_value(self, *args): return _StepVector._StepVector_int_set_value(self, *args)
def add_value(self, *args): return _StepVector._StepVector_int_add_value(s |
tebeka/pythonwise | ansiprint.py | Python | bsd-3-clause | 2,287 | 0.010931 | #!/usr/bin/env python
'''Print message using ANSI terminal codes'''
__author__ = "Miki Tebeka <miki@mikitebeka.com>"
from sys import stdout, stderr
# Format
bright = 1
dim = 2
underline = 4
blink = 5
reverse = 7
hidden = 8
# Forground
black = 30
red = 31
green = 32
yellow = 33
blue = 34
magenta = 35
cyan = 36
white = 37
# Background
on_black = 40
on_red = 41
on_green = 42
on_yellow = 43
on_blue = 44
on_magenta = 45
on_cyan = 46
on_white = 47
def ansiformat(msg, *args):
'''Format msg according to args.
See http://www.termsys.demon.co.uk/vtansi.htm for more details/
'''
return "\033[%sm%s\033[0m" % (";".join(["%s" % f for f in args]), msg)
def ansiprint(msg, *args, **kw):
'''Print formatted message.
Should work on ANSI compatible terminal.
'''
if kw.get("stderr", 0):
outfo = stderr
else:
outfo = stdout
outfo.write(ansiformat(msg, *args))
outfo.flush()
if __name__ == "__main__":
from sys import argv, exit
from os.path import basename
h = {
"brig | ht" : bright,
"dim" : dim,
"underline" : underline,
"blink" : blink,
"reverse" : reverse,
"hidden | " : hidden,
"black" : black,
"red" : red,
"green" : green,
"yellow" : yellow,
"blue" : blue,
"magenta" : magenta,
"cyan" : cyan,
"white" : white,
"on_black" : on_black,
"on_red" : on_red,
"on_green" : on_green,
"on_yellow" : on_yellow,
"on_blue" : on_blue,
"on_magenta" : on_magenta,
"on_cyan" : on_cyan,
"on_white" : on_white
}
eg = "e.g. ansiprint hello red on_green underline -> %s" % \
ansiformat("hello", red, on_green, underline)
if len(argv) < 2:
print >> stderr, "usage: %s message [format ...]" % basename(argv[0])
print >> stderr, eg
exit(1)
for i in argv[2:]:
if i not in h:
ansiprint("%s: Unknown format\n" % i, red, bright, stderr=True)
print >> stderr, "Formats can be:",
msg = ", ".join([ansiformat(f, h[f]) for f in h.keys()])
print msg
print >> stderr, eg
exit(1)
ansiprint(argv[1], *[h[i] for i in argv[2:]])
print
|
slippers/Flask-Prose | tests/security_models.py | Python | mit | 2,079 | 0.002886 | from flask_security import (
SQLAlchemyUserDatastore,
UserMixin,
RoleMixin,
)
from sqlalchemy.ext.declarative import (
declarative_base,
declared_attr,
as_declarative
)
from sqlalchemy.orm import relationship, backref
from sqlalchemy import (
Table,
Column,
Integer,
String,
Boolean,
ForeignKey,
Text,
DateTime,
SmallInteger,
PrimaryKeyConstraint
)
def SetupModels(db):
# db is a flask_sqlalchemy instance
Base = db.Model
# Define models
class RoleUsers(Base):
__tablename__ = 'roles_users'
id = Column(Integer(), primary_key=True)
user_id = Column(Integer(), ForeignKey('users.id'))
role_id = Column(Integer(), ForeignKey('roles.id'))
class Role(Base, RoleMixin):
__tablename__ = 'roles'
id = Column(Integer(), primary_key=True)
name = Column(String(80), unique=True)
description = Column(String(255))
class User(Base, UserMixin):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
email = Column(String(255), unique=True)
password = Column(String(255))
active = Column(Boolean())
confirmed_at = Column(DateTime())
roles = relationship('Role',
secondary='roles_users',
backref=backref('users', lazy='dynamic') | )
return SQLAlchemyUserDatastore(db, User, Role)
def SetupUsers(user_datastore):
user_datastore.find_or_create_role(name='prose_admin', description='prose administrator')
user_datastore.find_or_create_role(name='reader', description='prose reader')
if not user_datastore.get_user('reader@example.com'):
user_datastore.create_user(email='reader@example.com', password='test123')
user_datastore.add_role_to_use | r('reader@example.com', 'reader')
if not user_datastore.get_user('test@example.com'):
user_datastore.create_user(email='test@example.com', password='test123')
user_datastore.add_role_to_user('test@example.com', 'prose_admin')
|
Nexenta/s3-tests | virtualenv/lib/python2.7/site-packages/boto/mturk/connection.py | Python | mit | 42,336 | 0.001323 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax
import datetime
import itertools
from boto import handler
from boto import config
from boto.mturk.price import Price
import boto.mturk.notification
from boto.connection import AWSQueryConnection
from boto.exception import EC2ResponseError
from boto.resultset import ResultSet
from boto.mturk.question import QuestionForm, ExternalQuestion, HTMLQuestion
class MTurkRequestError(EC2ResponseError):
"Error for MTurk Requests"
# todo: subclass from an abstract parent of EC2ResponseError
class MTurkConnection(AWSQueryConnection):
APIVersion = '2012-03-25'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None,
host=None, debug=0,
https_connection_factory=None, security_token=None,
profile_name=None):
if not host:
if config.has_option('MTurk', 'sandbox') and config.get('MTurk', 'sandbox') == 'True':
host = 'mechanicalturk.sandbox.amazonaws.com'
else:
host = 'mechanicalturk.amazonaws.com'
self.debug = debug
super(MTurkConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass, host, debug,
https_connection_factory,
| security_token=security_token,
profile_name=profile_name)
def _required_auth_capability(self):
return ['mturk']
def get_account_balance(self):
"""
"""
params = {}
return self._process_request('GetAccountBalance', params,
| [('AvailableBalance', Price),
('OnHoldBalance', Price)])
def register_hit_type(self, title, description, reward, duration,
keywords=None, approval_delay=None, qual_req=None):
"""
Register a new HIT Type
title, description are strings
reward is a Price object
duration can be a timedelta, or an object castable to an int
"""
params = dict(
Title=title,
Description=description,
AssignmentDurationInSeconds=self.duration_as_seconds(duration),
)
params.update(MTurkConnection.get_price_as_price(reward).get_as_params('Reward'))
if keywords:
params['Keywords'] = self.get_keywords_as_string(keywords)
if approval_delay is not None:
d = self.duration_as_seconds(approval_delay)
params['AutoApprovalDelayInSeconds'] = d
if qual_req is not None:
params.update(qual_req.get_as_params())
return self._process_request('RegisterHITType', params,
[('HITTypeId', HITTypeId)])
def set_email_notification(self, hit_type, email, event_types=None):
"""
Performs a SetHITTypeNotification operation to set email
notification for a specified HIT type
"""
return self._set_notification(hit_type, 'Email', email,
'SetHITTypeNotification', event_types)
def set_rest_notification(self, hit_type, url, event_types=None):
"""
Performs a SetHITTypeNotification operation to set REST notification
for a specified HIT type
"""
return self._set_notification(hit_type, 'REST', url,
'SetHITTypeNotification', event_types)
def set_sqs_notification(self, hit_type, queue_url, event_types=None):
"""
Performs a SetHITTypeNotification operation so set SQS notification
for a specified HIT type. Queue URL is of form:
https://queue.amazonaws.com/<CUSTOMER_ID>/<QUEUE_NAME> and can be
found when looking at the details for a Queue in the AWS Console
"""
return self._set_notification(hit_type, "SQS", queue_url,
'SetHITTypeNotification', event_types)
def send_test_event_notification(self, hit_type, url,
event_types=None,
test_event_type='Ping'):
"""
Performs a SendTestEventNotification operation with REST notification
for a specified HIT type
"""
return self._set_notification(hit_type, 'REST', url,
'SendTestEventNotification',
event_types, test_event_type)
def _set_notification(self, hit_type, transport,
destination, request_type,
event_types=None, test_event_type=None):
"""
Common operation to set notification or send a test event
notification for a specified HIT type
"""
params = {'HITTypeId': hit_type}
# from the Developer Guide:
# The 'Active' parameter is optional. If omitted, the active status of
# the HIT type's notification specification is unchanged. All HIT types
# begin with their notification specifications in the "inactive" status.
notification_params = {'Destination': destination,
'Transport': transport,
'Version': boto.mturk.notification.NotificationMessage.NOTIFICATION_VERSION,
'Active': True,
}
# add specific event types if required
if event_types:
self.build_list_params(notification_params, event_types,
'EventType')
# Set up dict of 'Notification.1.Transport' etc. values
notification_rest_params = {}
num = 1
for key in notification_params:
notification_rest_params['Notification.%d.%s' % (num, key)] = notification_params[key]
# Update main params dict
params.update(notification_rest_params)
# If test notification, specify the notification type to be tested
if test_event_type:
params.update({'TestEventType': test_event_type})
# Execute operation
return self._process_request(request_type, params)
def create_hit(self, hit_type=None, question=None, hit_layout=None,
lifetime=datetime.timedelta(days=7),
max_assignments=1,
title=None, description=None, keywords=None,
reward=None, duration=datetime.timedelta(days=7),
approval_delay=None, annotation=None,
questions=None, qualifications=None,
layout_params=None, response_groups=None):
"""
Creates a new HIT.
Returns a ResultSet
See: |
kit-cel/gr-drm | python/qa_cell_mapping_cc.py | Python | gpl-3.0 | 1,593 | 0.008161 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA. |
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import drm_swig as drm
class qa_cell_mapping_cc (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
self.tp = drm.transm_params(1, 3, False, 0, 1, 0, 1, 1, 0, False, 24000, "station label", "text message")
v | len_msc = self.tp.msc().N_MUX() * self.tp.ofdm().M_TF()
vlen_sdc = self.tp.sdc().N()
vlen_fac = self.tp.fac().N() * self.tp.ofdm().M_TF()
self.cell_mapping = drm.cell_mapping_cc(self.tp, (vlen_msc, vlen_sdc, vlen_fac))
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
self.tb.run ()
# check data
if __name__ == '__main__':
gr_unittest.run(qa_cell_mapping_cc, "qa_cell_mapping_cc.xml")
|
googleads/google-ads-python | google/ads/googleads/v9/services/services/account_budget_service/transports/__init__.py | Python | apache-2.0 | 1,063 | 0 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# | You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is | distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import AccountBudgetServiceTransport
from .grpc import AccountBudgetServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AccountBudgetServiceTransport]]
_transport_registry["grpc"] = AccountBudgetServiceGrpcTransport
__all__ = (
"AccountBudgetServiceTransport",
"AccountBudgetServiceGrpcTransport",
)
|
mmaker/bridgedb | lib/bridgedb/runner.py | Python | bsd-3-clause | 4,313 | 0.000696 | # -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-clause BSD, see included LICENSE for information
"""Classes for running components and servers, as well as daemonisation.
** Module Overview: **
"""
from __future__ import print_function
import sys
def generateDescriptors(count=None, rundir=None):
"""Run a script which creates fake bridge descriptors for testing purposes.
This will run Leekspin_ to create bridge server descriptors, bridge
extra-info descriptors, and networkstatus document.
.. warning: This function can take a very long time to run, especially in
headless environments where entropy sources are minimal, because it
creates the keys for each mocked OR, which are embedded in the server
descriptors, used to calculate the OR fingerpr | ints, and sign the
descriptors, among other things.
.. _Leekspin: https://gitweb.torproject.org/user/isis/leekspin.git
:param integer count: Number of mocked bridges to generate descriptor
for. (d | efault: 3)
:type rundir: string or None
:param rundir: If given, use this directory as the current working
directory for the bridge descriptor generator script to run in. The
directory MUST already exist, and the descriptor files will be created
in it. If None, use the whatever directory we are currently in.
"""
import subprocess
import os.path
proc = None
statuscode = 0
script = 'leekspin'
rundir = rundir if os.path.isdir(rundir) else None
count = count if count else 3
try:
proc = subprocess.Popen([script, '-n', str(count)],
close_fds=True, cwd=rundir)
finally:
if proc is not None:
proc.wait()
if proc.returncode:
print("There was an error generating bridge descriptors.",
"(Returncode: %d)" % proc.returncode)
statuscode = proc.returncode
else:
print("Sucessfully generated %s descriptors." % str(count))
del subprocess
return statuscode
def runTrial(options):
"""Run Twisted trial based unittests, optionally with coverage.
:type options: :class:`~bridgedb.opt.TestOptions`
:param options: Parsed options for controlling the twisted.trial test
run. All unrecognised arguments after the known options will be passed
along to trial.
"""
from twisted.scripts import trial
# Insert 'trial' as the first system cmdline argument:
sys.argv = ['trial']
if options['coverage']:
try:
from coverage import coverage
except ImportError as ie:
print(ie.message)
else:
cov = coverage()
cov.start()
sys.argv.append('--coverage')
sys.argv.append('--reporter=bwverbose')
# Pass all arguments along to its options parser:
if 'test_args' in options:
for arg in options['test_args']:
sys.argv.append(arg)
# Tell trial to test the bridgedb package:
sys.argv.append('bridgedb.test')
trial.run()
if options['coverage']:
cov.stop()
cov.html_report('_trial_temp/coverage/')
def runTests(options):
"""Run unittest based tests.
:type options: :class:`~bridgedb.opt.TestOptions`
:param options: Parsed options for controlling the twisted.trial test
run. All unrecognised arguments after the known options will be passed
along to trial.
"""
testModule = __import__('bridgedb.Tests', globals(), '', [])
testModule.Tests.main()
def doDumpBridges(config):
"""Dump bridges by assignment to a file.
This function handles the commandline '--dump-bridges' option.
:type config: :class:`bridgedb.Main.Conf`
:param config: The current configuration.
"""
import bridgedb.Bucket as bucket
bucketManager = bucket.BucketManager(config)
bucketManager.assignBridgesToBuckets()
bucketManager.dumpBridges()
|
dreieinhalb/canteenie | archive/canteenie_v1.py | Python | apache-2.0 | 3,987 | 0.029678 | #!/usr/bin/env python3
"""canteenie.py: A small python script that prints today's canteen/mensa menu for FAU on console."""
import requests
import datetime
import argparse
from lxml import html
from colorama import Fore, Style
import textwrap
import xmascc
# command line arguments
parser = argparse.ArgumentParser(description='A small python script that prints today\'s canteen/mensa menu for FAU on console.')
parser.add_argument('-m','--mensa', help='for which mensa? (lmpl: Erlangen Langemarckplatz (default), sued: Erlangen Süd, isch: Nürnberg Insel Schütt)', required=False, default="lmpl", choices=['lmpl', 'sued', 'isch'])
parser.add_argument('-l','--lite', help='disable ascii art header and color (lite view)', required=False, default=False, action='store_true')
args = vars(parser.parse_args())
# get html content from webpage
page = requests.get('http://www.werkswelt.de/?id=%s' %args['mensa'])
tree = html.fromstring(page.content)
menu = tree.xpath('/html/body/div[3]/div/div[2]/div[2]/text()')
# join to string and tidy up the text
menu_str = ' '.join(menu) # join list to one string
menu_str = menu_str.replace('\xa0', ' ') # remove no break space
menu_str = menu_str.replace('\n', ' ') # remove line feed
menu_str = menu_str.replace('\r', ' ') # remove carriage return
menu_str = " ".join(menu_str.split()) # remove more than one space
# count amount of meals
meal_count = menu_str.count("Essen")
meal_special_count = menu_str.count("Aktionsessen")
# print header
now = datetime.datetime.now()
if not args['lite']: print(Fore.YELLOW + '', end="")
if args['lite'] == False:
print("\t ")
print("\t __ __ ")
print("\t| \/ | ___ _ __ ___ __ _ ")
print("\t| |\/| |/ _ \ '_ \/ __|/ _` |")
print("\t| | | | __/ | | \__ \ (_| |")
print("\t|_| |_|\___|_| |_|___/\__,_|")
print("\t ")
if not args['lite']: print(Style.RESET_ALL + '', end="")
if not args['lite']: print(Fore.GREEN + '', end="")
if not args['lite']: print("\t", end='')
print("////////", now.strftime("%d.%m.%Y"),"/////////")
if not args['lite']: print(Style.RESET_ALL + '', end="")
print("")
def wrap(meal_string):
prefix = "\t\t"
preferredWidth = 105
wrapper = textwrap.TextWrapper(subsequent_indent=prefix, width=preferredWidth)
print(wrapper.fill(meal_string))
return
# print normal meals
i = 1
while i < meal_count +1:
if "Essen %d" %i in menu_str: # check for missing menu
slice_amount = -8
if "- €" in menu_str.split("Essen %d" %i,1)[1].split("(Gäste)",1)[0][:-8]: # check for missing price
slice_amount = -5
if not args['lite']: print(Fore.CYAN + '', end="")
if not args['lite']: print("\t", end='')
meal_string = "%d\t" %i + menu_str.split("Essen %d" %i,1)[1].split("(Gäste)",1)[0][:slice_amount]
if not args['lite']:
wrap(meal_string)
else:
print(meal_string)
if not args['lite']: print(Style.RESET_ALL + '', end="")
i += 1
else:
meal_count += 1
i += 1
# print special meals
if meal_special_count != 0:
print("")
i = 1
while i < meal_special_count + 1:
if "Aktionsessen %d" %i in menu_str: # check for missing menu
slice_amount = -8
if "- €" in menu_str.split("Aktionsessen %d" %i,1)[1].split("(Gäste)",1)[0][:-8]: # check for missing price
slice_amount = -5
if not args['lite']: print(Fore.BLUE + '', end="")
if not args['lite']: print("\t", end='')
meal_special_string= "A%d\t" %i + menu_str.split("Aktionsessen %d" %i,1)[1].split("(Gäste)",1)[0][:slic | e_amount]
if not args['lite']:
wrap(meal_special_string)
else:
print(meal_special_string)
if not args['lite']: print(Style.RESET_ALL + '', end="")
i += 1
else:
meal_special_count += 1
i += 1
print("")
if not args['lite']: print("")
#xmascc
#if not args['lite']: print(Fore.MAGENTA + '', end="")
#if not args['lite']: print("\t", end='')
#print(xmascc.get_countdown())
#if not args['lite']: print(Style.RESET_ALL + '', end="")
#print( | "")
|
circlesabound/matchr | server/Auth.py | Python | apache-2.0 | 4,880 | 0.005123 | # Credit to original post from Amar Birgisson at
# http://tools.cherrypy.org/wiki/AuthenticationAndAccessRestrictions
# Form based authentication for CherryPy. Requires the
# Session tool to be loaded.
import cherrypy
import urllib
import DB
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader('public/html')) # Jinja2 environment
SESSION_KEY = '_cp_username'
def verify(username, password):
"""Verifies credentials for username and password.
Returns user ID on success or a string describing the error on failure"""
try:
db = DB.DB("matchr.db")
db.connect()
userID = db.check_credentials(username, password)
db.close()
if userID:
return userID
else:
raise ValueError("Incorrect username or password")
except RuntimeError:
return "Could not open database"
def check_auth(*args, **kwargs):
| """A tool that looks in config for 'auth.require'. If found and it
is not None, a login is required and the entry is evaluated as alist of
conditions that the user must fulfill"""
conditions = cherrypy.request.config.get('auth.require', None)
# format GET params
get_parmas = urllib.parse.quote(cherrypy.request.request_line.split()[1])
if conditions is not None:
username = cherrypy.session.get(SESSION_KEY) |
if username:
cherrypy.request.login = username
for condition in conditions:
# A condition is just a callable that returns true or false
if not condition():
# Send old page as from_page parameter
raise cherrypy.HTTPRedirect("/")
else:
# Send old page as from_page parameter
raise cherrypy.HTTPRedirect("/auth/login?from_page=%s" %get_parmas)
cherrypy.tools.auth = cherrypy.Tool('before_handler', check_auth)
def require(*conditions):
"""A decorator that appends conditions to the auth.require config
variable."""
def decorate(f):
if not hasattr(f, '_cp_config'):
f._cp_config = dict()
if 'auth.require' not in f._cp_config:
f._cp_config['auth.require'] = []
f._cp_config['auth.require'].extend(conditions)
return f
return decorate
# Conditions are callables that return True
# if the user fulfills the conditions they define, False otherwise
#
# They can access the current username as cherrypy.request.login
#
# Define those at will however suits the application.
def name_is(reqd_username):
return lambda: reqd_username == cherrypy.request.login
# These might be handy
def any_of(*conditions):
"""Returns True if any of the conditions match"""
def check():
for c in conditions:
if c():
return True
return False
return check
# By default all conditions are required, but this might still be
# needed if you want to use it inside of an any_of(...) condition
def all_of(*conditions):
"""Returns True if all of the conditions match"""
def check():
for c in conditions:
if not c():
return False
return True
return check
# Controller to provide login and logout actions
class AuthController(object):
def on_login(self, username):
try:
db = DB.DB("matchr.db")
db.connect()
userID = cherrypy.session[SESSION_KEY]
cherrypy.session['user_details'] = db.get_user_details(userID)
except ValueError:
raise ValueError("Could not find user details")
except RuntimeError:
raise ValueError("Could not open database")
def on_logout(self, username):
"""Called on logout"""
def get_loginform(self, username, msg="Enter login information", from_page="/"):
tmpl = env.get_template("login.html")
return tmpl.render(user = username, message = msg, from_pg = from_page)
@cherrypy.expose
def login(self, username=None, password=None, from_page="/"):
if username is None or password is None:
return self.get_loginform("", from_page=from_page)
try:
userID = verify(username, password)
cherrypy.session[SESSION_KEY] = cherrypy.request.login = userID
self.on_login(userID)
raise cherrypy.HTTPRedirect(from_page or "/")
except ValueError as e:
return self.get_loginform(username, e.args[0], from_page)
@cherrypy.expose
def logout(self, from_page="/"):
sess = cherrypy.session
username = sess.get(SESSION_KEY, None)
sess[SESSION_KEY] = None
if username:
cherrypy.request.login = None
self.on_logout(username)
raise cherrypy.HTTPRedirect(from_page or "/")
|
aroth-arsoft/arsoft-meta-packages | grp_java.py | Python | gpl-3.0 | 2,247 | 0.040498 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;
java = [
{'name':'common',
'mainpackage':True,
'shortdesc':'Installs the latest version of Java',
'description':'',
'packages-trusty':['openjdk-7-jre-lib'],
'packages-xenial':[],
'packages-bionic':[],
'packages-focal':[],
'packages-groovy':[],
'side-by-side':['jre-headless', 'jre', 'jdk'],
},
{'name':'jre-headless',
'shortdesc':'Installs the latest version of the Java Runtime Environment',
'description':'',
'depends':['common'],
'packages-trusty':['openjdk-7-jre-headless', 'openjdk-8-jre-headless'],
'packages-xenial':['openjdk-8-jre-headless'],
'packages-bionic':['openjdk-8-jre-headless'],
'packages-focal':['openjdk-11-jre-headless'],
'packages-groovy':['openjdk-11-jre-headless'],
},
{'name':'jre',
'shortdesc':'Installs the latest version of the Java Runtime Environment',
'description':'',
'depends':['jre-headless'],
'packages-trusty':['openjdk-7-jre', 'openjdk-8-jre'],
'packages-xenial':['openjdk-8-jre'],
'pa | ckages-bionic':['openjdk-8-jre'],
'packages-focal':['openjdk-11-jre'],
'packages-groovy':['openjdk-11-jre'],
},
{'name':'jdk',
'shortdesc':'Installs | the latest version of the Java Development Kit',
'description':'',
'depends':['jre'],
'packages-trusty':['openjdk-7-jdk', 'openjdk-8-jdk'],
'packages-xenial':['openjdk-8-jdk'],
'packages-bionic':['openjdk-8-jdk'],
'packages-focal':['openjdk-11-jdk'],
'packages-groovy':['openjdk-11-jdk'],
},
{'name':'jdk-headless',
'shortdesc':'Installs the latest version of the Java Development Kit',
'description':'',
'depends':['jre-headless'],
'packages-trusty':['openjdk-7-jdk-headless', 'openjdk-8-jdk-headless'],
'packages-xenial':['openjdk-8-jdk-headless'],
'packages-bionic':['openjdk-8-jdk-headless'],
'packages-focal':['openjdk-11-jdk-headless'],
'packages-groovy':['openjdk-11-jdk-headless'],
},
{'name':'none',
'shortdesc':'Uninstalls all versions of Java',
'description':'',
'packages':[],
'noconflicts':[]
},
]
|
yayoiukai/signalserver | signals/urls.py | Python | mit | 541 | 0 | from django.conf.urls import url
from . import views
app_name = 'signals'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^process/$', views.process, name='process'),
url(r'^file_process_status/$', views.file_process_status,
name='file_process_status'),
url(r'^get_graph/$', views.get_graph, name='get_graph'),
url(r'^api/get_graph_ | data/$', views.get_graph_data, name='get_graph_data'),
| url(r'^delete_output/(?P<process_pk>[\w.]{0,256})$',
views.delete_output, name='delete_output'),
]
|
meshy/django-conman | example/example/migrations/0001_initial.py | Python | bsd-2-clause | 1,026 | 0.000975 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-03 07:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
class Migration(migrations.Migration):
initial = True
dependencies = [
('routes', '0003_add_validators'),
]
operations = [
migrations.CreateModel(
name='Page',
fields=[
('route_ptr', models.One | ToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='routes.Route')),
('raw_html', models.TextField(verbose_name='Raw HTML')),
],
options={
| 'abstract': False,
},
bases=('routes.route',),
managers=[
('objects', django.db.models.manager.Manager()),
('base_objects', django.db.models.manager.Manager()),
],
),
]
|
depp/sglib | script/d3build/msvc/project.py | Python | bsd-2-clause | 11,200 | 0.000982 | # Copyright 2014 Dietrich Epp.
# This file is part of SGLib. SGLib is licensed under the terms of the
# 2-clause BSD license. For more information, see LICENSE.txt.
import uuid as uuid_module
import xml.etree.ElementTree as etree
from ..util import indent_xml
from ..error import ConfigError
import io
import os
Element = etree.Element
SubElement = etree.SubElement
XMLNS = 'http://schemas.microsoft.com/developer/msbuild/2003'
def condition(variant):
return "'$(Configuration)|$(Platform)'=='{}'".format(variant)
SOURCE_TYPES = {kk: (n, v) for n, (k, v) in enumerate([
('c c++', 'ClCompile'),
('h h++', 'ClInclude'),
('rc', 'ResourceCompile'),
('vcxproj', 'ProjectReference'),
]) for kk in k.split()}
def proj_import(root, path):
SubElement(root, 'Import', {'Project': path})
def emit_properties(*, element, props, var=None):
for k, v in sorted(props.items()):
if isinstance(v, list):
assert var is not None
vs = '{};{}({})'.format(';'.join(v), var, k)
elif isinstance(v, bool):
vs = str(v).lower()
elif isinstance(v, str):
vs = v
else:
raise TypeError('unexpected property type: {}'.format(type(v)))
SubElement(element, k).text = vs
TYPE_CPP = uuid_module.UUID('8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942')
class Project(object):
"""A Visual Studio project."""
__slots__ = [
# The project name.
'name',
# Path to the project file.
'path',
# The project type UUID.
'type',
# The project UUID.
'uuid',
# Map from solution (config, platform) to project (config, platform).
'configs',
# List of project dependencies (other projects).
'dependencies',
]
@property
def sourcetype(self):
return 'vcxproj'
def emit(self):
"""Emit project files if necessary."""
class UserProject(Project):
"""A user-generated Visual Studio project."""
__slots__ = [
# Contents of the project file.
'_data_project',
# Contents of the filter file.
'_data_filter',
# Contents of the user file.
'_data_user',
]
def emit(self):
"""Emit project files if necessary."""
with open(self.name + '.vcxproj', 'wb') as fp:
fp.write(self._data_project)
with open(self.name + '.vcxproj.filters', 'wb') as fp:
fp.write(self._data_filter)
with open(self.name + '.vcxproj.user', 'wb') as fp:
fp.write(self._data_user)
def read_project(*, path, configs):
"""Read a Visual Studio project."""
if os.path.splitext(path)[1] != '.vcxproj':
raise UserError('invalid Visual Studio project extension')
with open(path, 'rb') as fp:
doc = etree.parse(fp)
def get_uuid():
gtag = etree.QName(XMLNS, 'PropertyGroup')
ptag = etree.QName(XMLNS, 'ProjectGuid')
fo | r gelem in doc.getroot():
if gelem.tag == gtag:
for pelem in gelem:
if pelem.tag == ptag:
return uuid_module.UUID(pelem.text)
raise ConfigError('could not de | tect project UUID: {}'.format(path))
def get_configs():
gtag = etree.QName(XMLNS, 'ItemGroup')
itag = etree.QName(XMLNS, 'ProjectConfiguration')
ctag = etree.QName(XMLNS, 'Configuration')
ptag = etree.QName(XMLNS, 'Platform')
configs = []
for gelem in doc.getroot():
if (gelem.tag != gtag or
gelem.attrib.get('Label') != 'ProjectConfigurations'):
continue
for ielem in gelem:
if ielem.tag != itag:
continue
cfg = None
plat = None
for pelem in ielem:
if pelem.tag == ctag:
cfg = pelem.text
elif pelem.tag == ptag:
plat = pelem.text
if cfg is None or plat is None:
raise ConfigError(
'could not parse project configurations')
configs.append((cfg, plat))
return configs
obj = Project()
obj.name = os.path.splitext(os.path.basename(path))[0]
obj.path = path
obj.type = TYPE_CPP
obj.uuid = get_uuid()
obj.configs = configs
obj.dependencies = []
return obj
def xml_data(root):
indent_xml(root)
return etree.tostring(root, encoding='UTF-8')
def create_project(*, name, sources, uuid, variants, props, arguments):
"""Create a Visual Studio project.
name: the project name.
sources: list of source files in the project.
uuid: the project UUID.
variants: list of "config|arch" variants.
props: map from "config|arch" to map from group to prop dict.
arguments: default arguments for debugging.
"""
def create_project():
root = Element('Project', {
'xmlns': XMLNS,
'ToolsVersion': '12.0',
'DefaultTargets': 'Build',
})
cfgs = SubElement(
root, 'ItemGroup', {'Label': 'ProjectConfigurations'})
for variant in variants:
pc = SubElement(
cfgs, 'ProjectConfiguration', {'Include': variant})
configuration, platform = variant.split('|')
SubElement(pc, 'Configuration').text = configuration
SubElement(pc, 'Platform').text = platform
del cfgs, variant, configuration, platform, pc
pg = SubElement(root, 'PropertyGroup', {'Label': 'Globals'})
SubElement(pg, 'Keyword').text = 'Win32Proj'
SubElement(pg, 'ProjectGuid').text = \
'{{{}}}'.format(str(uuid).upper())
# RootNamespace
del pg
proj_import(root, '$(VCTargetsPath)\\Microsoft.Cpp.Default.props')
for variant in variants:
emit_properties(
element=SubElement(root, 'PropertyGroup', {
'Condition': condition(variant),
'Label': 'Configuration',
}),
props=props[variant]['Config'])
del variant
proj_import(root, '$(VCTargetsPath)\\Microsoft.Cpp.props')
SubElement(root, 'ImportGroup', {'Label': 'ExtensionSettings'})
for variant in variants:
ig = SubElement(root, 'ImportGroup', {
'Label': 'PropertySheets',
'Condition': condition(variant),
})
path = '$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props'
i = SubElement(ig, 'Import', {
'Project': path,
'Condition': "exists('{}')".format(path),
'Label': 'LocalAppDataPlatform',
})
del variant, ig, path, i
SubElement(root, 'PropertyGroup', {'Label': 'UserMacros'})
for variant in variants:
emit_properties(
element=SubElement(root, 'PropertyGroup', {
'Condition': condition(variant),
}),
props=props[variant]['VC'],
var='$')
del variant
for variant in variants:
ig = SubElement(root, 'ItemDefinitionGroup', {
'Condition': condition(variant),
})
for group in ('ClCompile', 'Link'):
emit_properties(
element=SubElement(ig, group),
props=props[variant][group],
var='%')
del variant, ig, group
groups = {}
for source in sources:
try:
index, tag = SOURCE_TYPES[source.sourcetype]
except KeyError:
raise ConfigError(
'cannot add file to executable: {}'.format(source.path))
try:
group = groups[index]
except KeyError:
group = Element('ItemGroup')
groups[index] = group
src = SubElement(group, tag, {'Include': source.path})
if tag == 'ProjectReference':
SubElement(src, 'Projec |
rodrigolucianocosta/ProjectParking | ProjectParking/Parking/django-localflavor-1.1/tests/test_generic.py | Python | mpl-2.0 | 12,114 | 0.001486 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ValidationError, ImproperlyConfigured
from django.test import SimpleTestCase, TestCase
from django.utils import formats
from localflavor.generic.models import BICField, IBANField
from localflavor.generic.validators import BICValidator, IBANValidator
from localflavor.generic.forms import DateField, DateTimeField, SplitDateTimeField, BICFormField, IBANFormField
class DateTimeFieldTestCase(SimpleTestCase):
default_date_input_formats = (
'%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', '%b %d %Y', '%b %d, %Y',
'%d %b %Y', '%d %b, %Y', '%B %d %Y', '%B %d, %Y', '%d %B %Y',
'%d %B, %Y',
)
default_datetime_input_formats = (
'%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d', '%d/%m/%Y %H:%M:%S',
'%d/%m/%Y %H:%M', '%d/%m/%Y', '%d/%m/%y %H:%M:%S', '%d/%m/%y %H:%M',
'%d/%m/%y',
)
def assertInputFormats(self, field, formats):
self.assertSequenceEqual(field.input_formats, formats)
class DateFieldTests(DateTimeFieldTestCase):
def setUp(self):
self.default_input_formats = self.default_date_input_formats
def test_init_no_input_formats(self):
field = DateField()
self.assertInputFormats(field, self.default_input_formats)
def test_init_empty_input_formats(self):
field = DateField(input_formats=())
self.assertInputFormats(field, self.default_input_formats)
def test_init_custom_input_formats(self):
input_formats = ('%m/%d/%Y', '%m/%d/%y')
field = DateField(input_formats=input_formats)
self.assertInputFormats(field, input_formats)
class DateTimeFieldTests(DateTimeFieldTestCase):
def setUp(self):
self.default_input_formats = self.default_datetime_input_formats
def test_init_no_input_formats(self):
field = DateTimeField()
self.assertInputFormats(field, self.default_input_formats)
def test_init_empty_input_formats(self):
field = DateTimeField(input_formats=())
self.assertInputFormats(field, self.default_input_formats)
def test_init_custom_input_formats(self):
input_formats = ('%m/%d/%Y %H:%M', '%m/%d/%y %H:%M')
field = DateTimeField(input_formats=input_formats)
self.assertInputFormats(field, input_formats)
class SplitDateTimeFieldTests(DateTimeFieldTestCase):
default_time_input_formats = formats.get_format_lazy('TIME_INPUT_FORMATS')
def test_init_no_input_formats(self):
field = SplitDateTimeField()
date_field, time_field = field.fields
self.assertInputFormats(date_field, self.default_date_input_formats)
self.assertInputFormats(time_field, self.default_time_input_formats)
def test_init_empty_input_formats(self):
field = SplitDateTimeField(input_date_formats=(),
input_time_formats=())
date_field, time_field = field.fields
self.assertInputFormats(date_field, self.default | _date_input_formats)
self. | assertInputFormats(time_field, ())
def test_init_custom_input_formats(self):
date_input_formats = ('%m/%d/%Y', '%m/%d/%y')
time_input_formats = ('%H:%M', '%H:%M:%S')
field = SplitDateTimeField(input_date_formats=date_input_formats,
input_time_formats=time_input_formats)
date_field, time_field = field.fields
self.assertInputFormats(date_field, date_input_formats)
self.assertInputFormats(time_field, time_input_formats)
class IBANTests(TestCase):
def test_iban_validator(self):
valid = [
'GB82WeST12345698765432',
'GB82 WEST 1234 5698 7654 32',
'GR1601101250000000012300695',
'GR16-0110-1250-0000-0001-2300-695',
'GB29NWBK60161331926819',
'GB29N-WB K6016-13319-26819',
'SA0380000000608010167519',
'SA0380 0 0000 06 0 8 0 1 0 1 6 7 519 ',
'CH9300762011623852957',
'IL620108000000099999999',
'EE982200221111099080',
]
invalid = {
'GB82WEST1234569876543': 'GB IBANs must contain 22 characters.',
'CA34CIBC123425345': 'CA is not a valid country code for IBAN.',
'GB29ÉWBK60161331926819': 'is not a valid character for IBAN.',
'SA0380000000608019167519': 'Not a valid IBAN.',
'EE012200221111099080': 'Not a valid IBAN.',
}
for iban in valid:
IBANValidator(iban)
for iban in invalid:
self.assertRaisesMessage(ValidationError, invalid[iban], IBANValidator(), iban)
def test_iban_fields(self):
""" Test the IBAN model and form field. """
valid = {
'NL02ABNA0123456789': 'NL02ABNA0123456789',
'Nl02aBNa0123456789': 'NL02ABNA0123456789',
'NL02 ABNA 0123 4567 89': 'NL02ABNA0123456789',
'NL02-ABNA-0123-4567-89': 'NL02ABNA0123456789',
'NL91ABNA0417164300': 'NL91ABNA0417164300',
'NL91 ABNA 0417 1643 00': 'NL91ABNA0417164300',
'NL91-ABNA-0417-1643-00': 'NL91ABNA0417164300',
'MU17BOMM0101101030300200000MUR': 'MU17BOMM0101101030300200000MUR',
'MU17 BOMM 0101 1010 3030 0200 000M UR': 'MU17BOMM0101101030300200000MUR',
'MU 17BO MM01011010 3030-02 000-00M UR': 'MU17BOMM0101101030300200000MUR',
'BE68539007547034': 'BE68539007547034',
'BE68 5390 0754 7034': 'BE68539007547034',
'BE-685390075470 34': 'BE68539007547034',
}
invalid = {
'NL02ABNA012345678999': ['NL IBANs must contain 18 characters.'],
'NL02 ABNA 0123 4567 8999': ['NL IBANs must contain 18 characters.'],
'NL91ABNB0417164300': ['Not a valid IBAN.'],
'NL91 ABNB 0417 1643 00': ['Not a valid IBAN.'],
'MU17BOMM0101101030300200000MUR12345': [
'MU IBANs must contain 30 characters.',
'Ensure this value has at most 34 characters (it has 35).'],
'MU17 BOMM 0101 1010 3030 0200 000M UR12 345': [
'MU IBANs must contain 30 characters.',
'Ensure this value has at most 34 characters (it has 35).'],
# This IBAN should only be valid only if the Nordea extensions are turned on.
'EG1100006001880800100014553': ['EG is not a valid country code for IBAN.'],
'EG11 0000 6001 8808 0010 0014 553': ['EG is not a valid country code for IBAN.']
}
self.assertFieldOutput(IBANFormField, valid=valid, invalid=invalid)
# Test valid inputs for model field.
iban_model_field = IBANField()
for input, output in valid.items():
self.assertEqual(iban_model_field.clean(input, None), output)
# Invalid inputs for model field.
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
iban_model_field.clean(input, None)
# The error messages for models are in a different order.
errors.reverse()
self.assertEqual(context_manager.exception.messages, errors)
def test_nordea_extensions(self):
""" Test a valid IBAN in the Nordea extensions. """
iban_validator = IBANValidator(use_nordea_extensions=True)
# Run the validator to ensure there are no ValidationErrors raised.
iban_validator('Eg1100006001880800100014553')
def test_form_field_formatting(self):
iban_form_field = IBANFormField()
self.assertEqual(iban_form_field.prepare_value('NL02ABNA0123456789'), 'NL02 ABNA 0123 4567 89')
self.assertEqual(iban_form_field.prepare_value('NL02 ABNA 0123 4567 89'), 'NL02 ABNA 0123 4567 89')
self.assertIsNone(iban_form_field.prepare_value(None))
def test_include_countries(self):
""" Test the IBAN model and form include_countries feature. """
include_countries = ('NL', 'BE', 'LU')
valid = {
'NL02ABNA0123456789': 'NL02ABNA0123456789',
'BE685390075470 |
sjaa/scheduler | sched_core/models.py | Python | gpl-3.0 | 352 | 0.011364 | from django.db import models
class TimeStampedModel(models.Model):
'''
Ab abstract base class model that provides self-
updating 'created' and 'modified' fields.
'''
created = models.DateTimeField(auto_now_add=True)
modified = models.DateT | imeField(auto_now =True)
class Meta:
abs | tract = True
|
andycavatorta/oratio | Roles/avl-formant-3/main.py | Python | mit | 7,853 | 0.006494 | import commands
import os
import Queue
import settings
import time
import threading
import wiringpi as wpi
import sys
import traceback
#BASE_PATH = os.path.dirname(os.path.realpath(__file__))
#UPPER_PATH = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0]
#DEVICES_PATH = "%s/Hosts/" % (BASE_PATH )
#THIRTYBIRDS_PATH = "%s/thirtybirds_2_0" % (UPPER_PATH )
#sys.path.append(BASE_PATH)
#sys.path.append(UPPER_PATH)
from thirtybirds_2_0.Network.manager import init as network_init
from thirtybirds_2_0.Adaptors.Sensors import AMT203_expanded_spi
from thirtybirds_2_0.Updates.manager import init as updates_init
wpi.wiringPiSetup()
wpi.wiringPiSPISetup(0, 500000)
class Network(object):
def __init__(self, hostname, network_message_handler, network_status_handler):
self.hostname = hostname
self.thirtybirds = network_init(
hostname=hostname,
role="client",
discovery_multicastGroup=settings.discovery_multicastGroup,
discovery_multicastPort=settings.discovery_multicastPort,
discovery_responsePort=settings.discovery_responsePort,
pubsub_pubPort=settings.pubsub_pubPort,
message_callback=network_message_handler,
status_callback=network_status_handler
)
########################
## UTILS
########################
class Utils(object):
def __init__(self, hostname):
self.hostname = hostname
def reboot(self):
os.system("sudo reboot now")
def get_shelf_id(self):
return self.hostname[11:][:1]
def get_camera_id(self):
return self.hostname[12:]
def create_image_file_name(self, timestamp, light_level, process_type):
return "{}_{}_{}_{}_{}.png".format(timestamp, self.get_shelf_id() , self.get_camera_id(), light_level, process_type)
def remote_update_git(self, oratio, thirtybirds, update, upgrade):
if oratio:
subprocess.call(['sudo', 'git', 'pull'], cwd='/home/pi/oratio')
if thirtybirds:
subprocess.call(['sudo', 'git', 'pull'], cwd='/home/pi/thirtybirds_2_0')
return
def remote_update_scripts(self):
updates_init("/home/pi/oratio", False, True)
return
def get_update_script_version(self):
(updates, ghStatus, bsStatus) = updates_init("/home/pi/oratio", False, False)
return updates.read_version_pickle()
def get_git_timestamp(self):
return commands.getstatusoutput("cd /home/pi/oratio/; git log -1 --format=%cd")[1]
def get_temp(self):
return commands.getstatusoutput("/opt/vc/bin/vcgencmd measure_temp")[1]
def get_cpu(self):
bash_output = commands.getstatusoutput("uptime")[1]
split_output = bash_output.split(" ")
return split_output[12]
def get_uptime(self):
bash_output = commands.getstatusoutput("uptime")[1]
split_output = bash_output.split(" ")
return split_output[4]
def get_disk(self):
# stub for now
return "0"
def get_client_status(self):
return (self.hostname, self.get_update_script_version(), self.get_git_timestamp(), self.get_temp(), self.get_cpu(), self.get_uptime(), self.get_disk())
# Main handles network send/recv and can see all other classes directly
class Main(threading.Thread):
def __init__(self, hostname):
threading.Thread.__init__(self)
self.network = Network(hostname, self.network_message_handler, self.network_status_handler)
self.queue = Queue.Queue()
self.last_master_volume_level = 0
self.utils = Utils(hostname)
self.network.thirtybirds.subscribe_to_topic("voice_3")
self.network.thirtybirds.subscribe_to_topic("client_monitor_request")
self.network.thirtybirds.subscribe_to_topic("mandala_device_request")
self.status = {
"avl-formant-3":"pass", # because this passes if it can respond. maybe better tests in future
"avl-formant-3-amplifier":"unset"
}
def update_device_status(self, devicename, status):
print "update_device_status 1",devicename, status
if self.status[devicename] != status:
self.status[devicename] = status
msg = [devicename, status]
print "update_device_status 2",devicename, status
self.network.thirtybirds.send("mandala_device_status", msg)
def get_device_status(self):
for devicename in self.status:
msg = [devicename, self.status[devicename]]
self.network.thirtybirds.send("mandala_device_status", msg)
def network_message_handler(self, topic_msg):
# this method runs in the thread of the caller, not the tread of Main
topic, msg = topic_msg # separating just to eval msg. best to do it early. it should be done in TB.
if len(msg) > 0:
msg = eval(msg)
self.add_to_queue(topic, msg)
def network_status_handler(self, topic_msg):
# this method runs in the thread of the caller, not the tread of Main
print "Main.network_status_handler", topic_msg
def add_to_queue(self, topic, msg):
self.queue.put((topic, msg))
def run(self):
master_volume = 0
try:
wpi.wiringPiSPIDataRW(0, chr(0) + chr(0)) # set volume to zero as test of comms
self.update_device_status("avl-formant-3-amplifier", "pass")
except Exception as e:
self.update_device_status("avl-formant-3-amplifier", "fail")
while True:
try:
try:
topic, msg = self.queue.get(False)
if topic == "mandala_device_re | quest":
self.get_device_status()
if topic == "voice_3":
master_volume = msg[1] * 100
master_volume = 0 if master_volume < 10 else master_volume - 10
except Queue.Empty:
pass
#if master_volume != self.last_master_volume_level :
if master_volume > self.last_master_volume_level | :
#print "upside A master_volume=", master_volume, "self.last_master_volume_level", self.last_master_volume_level
self.last_master_volume_level = self.last_master_volume_level + 1
gain = int(102 + (self.last_master_volume_level)) if self.last_master_volume_level > 1 else 0
print "upside B master_volume=", master_volume, "self.last_master_volume_level", self.last_master_volume_level, gain
wpi.wiringPiSPIDataRW(0, chr(gain) + chr(0))
time.sleep(0.001)
continue
if master_volume < self.last_master_volume_level :
#print "downside A master_volume=", master_volume, "self.last_master_volume_level", self.last_master_volume_level
self.last_master_volume_level = self.last_master_volume_level - 1
if self.last_master_volume_level < 0:
self.last_master_volume_level = 0
gain = int(102 + (self.last_master_volume_level)) if self.last_master_volume_level > 1 else 0
print "downside B master_volume=", master_volume, "self.last_master_volume_level", self.last_master_volume_level, gain
wpi.wiringPiSPIDataRW(0, chr(gain) + chr(0))
time.sleep(0.001)
continue
time.sleep(0.01)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
print e, repr(traceback.format_exception(exc_type, exc_value,exc_traceback))
def init(hostname):
main = Main(hostname)
main.daemon = True
main.start()
return main
|
rddim/Notepad-plus-plus | scintilla/scripts/ScintillaData.py | Python | gpl-3.0 | 10,927 | 0.005583 | #!/usr/bin/env python3
# ScintillaData.py - implemented 2013 by Neil Hodgson neilh@scintilla.org
# Released to the public domain.
# Common code used by Scintilla and SciTE for source file regeneration.
# The ScintillaData object exposes information about Scintilla as properties:
# Version properties
# version
# versionDotted
# versionCommad
#
# Date last modified
# dateModified
# yearModified
# mdyModified
# dmyModified
# myModified
#
# Information about lexers and properties defined in lexers
# lexFiles
# sorted list of lexer files
# lexerModules
# sorted list of module names
# lexerProperties
# sorted list of lexer properties
# propertyDocuments
# dictionary of property documentation { name: document string }
# sclexFromName
# dictionary of SCLEX_* IDs { name: SCLEX_ID }
# fileFromSclex
# dictionary of file names { SCLEX_ID: file name }
# This file can be run to see the data it provides.
# Requires Python 3.6 or later
import datetime, pathlib, sys, textwrap
import FileGenerator
def FindModules(lexFile):
modules = []
partLine = ""
with lexFile.open() as f:
for l in f.readlines():
l = l.rstrip()
if partLine or l.startswith("LexerModule"):
if ")" in l:
l = partLine + l
l = l.replace("(", " ")
l = l.replace(")", " ")
l = l.replace(",", " ")
parts = l.split()
modules.append([parts[1], parts[2], parts[4][1:-1]])
partLine = ""
else:
partLine = partLine + l
return modules
def FindLexersInXcode(xCodeProject):
lines = FileGenerator.ReadFileAsList(xCodeProject)
uidsOfBuild = {}
markersPBXBuildFile = ["Begin PBXBuildFile section", "", "End PBXBuildFile section"]
for buildLine in lines[FileGenerator.FindSectionInList(lines, markersPBXBuildFile)]:
# Occurs for each file in the build. Find the UIDs used for the file.
#\t\t[0-9A-F]+ /* [a-zA-Z]+.cxx in sources */ = {isa = PBXBuildFile; fileRef = [0-9A-F]+ /* [a-zA-Z]+ */; };
pieces = buildLine.split()
uid1 = pieces[0]
filename = pieces[2].split(".")[0]
uid2 = pieces[12]
uidsOfBuild[filename] = [uid1, uid2]
lexers = {}
markersLexers = ["/* Lexers */ =", "children", ");"]
for lexerLine in lines[FileGenerator.FindSectionInList(lines, markersLexers)]:
#\t\t\t\t[0-9A-F]+ /* [a-zA-Z]+.cxx */,
uid, _, rest = lexerLine.partition("/* ")
uid = uid.strip()
lexer, _, _ = rest.partition(".")
lexers[lexer] = uidsOfBuild[lexer]
return lexers
# Properties that start with lexer. or fold. are automatically found but there are some
# older properties that don't follow this pattern so must be explicitly listed.
knownIrregularProperties = [
"fold",
"styling.within.preprocessor",
"tab.timmy.whinge.level",
"asp.default.language",
"html.tags.case.sensitive",
"ps.level",
"ps.tokenize",
"sql.backslash.escapes",
"nsis.uservars",
"nsis.ignorecase"
]
def FindProperties(lexFile):
properties = {}
with open(lexFile) as f:
for l in f.readlines():
if ("GetProperty" in l or "DefineProperty" in l) and "\"" in l:
l = l.strip()
if not l.startswith("//"): # Drop comments
propertyName = l.split("\"")[1]
if propertyName.lower() == propertyName:
# Only allow lower case property names
if propertyName in knownIrregularProperties or \
propertyName.startswith("fold.") or \
propertyName.startswith("lexer."):
properties[propertyName] = 1
return properties
def FindPropertyDocumentation(lexFile):
documents = {}
with lexFile.open() as f:
name = ""
for l in f.readlines():
l = l.strip()
if "// property " in l:
propertyName = l.split()[2]
if propertyName.lower() == propertyName:
# Only allow lower case property names
name = propertyName
documents[name] = ""
elif "DefineProperty" in l and "\"" in l:
propertyName = l.split("\"")[1]
if propertyName.lower() == propertyName:
# Only allow lower case property names
name = propertyName
documents[name] = ""
elif name:
if l.startswith("//"):
if documents[name]:
documents[name] += " "
documents[name] += l[2:].strip()
elif l.startswith("\""):
l = l[1:].strip()
if l.endswith(";"):
l = l[:-1].strip()
if l.endswith(")"):
l = l[:-1].s | trip()
if l.endswith("\""):
l = l[:-1]
# Fix escaped double quotes
l = l.replace("\\\"", "\"")
documents[name] += l
else:
name = ""
for name in list(documents.keys()):
if documents[name] == "":
del documents[name]
return documents
def FindCredits(historyFile):
credits = [] |
stage = 0
with historyFile.open(encoding="utf-8") as f:
for l in f.readlines():
l = l.strip()
if stage == 0 and l == "<table>":
stage = 1
elif stage == 1 and l == "</table>":
stage = 2
if stage == 1 and l.startswith("<td>"):
credit = l[4:-5]
if "<a" in l:
title, a, rest = credit.partition("<a href=")
urlplus, bracket, end = rest.partition(">")
name = end.split("<")[0]
url = urlplus[1:-1]
credit = title.strip()
if credit:
credit += " "
credit += name + " " + url
credits.append(credit)
return credits
def ciKey(a):
return str(a).lower()
def SortListInsensitive(l):
l.sort(key=ciKey)
class ScintillaData:
def __init__(self, scintillaRoot):
# Discover version information
self.version = (scintillaRoot / "version.txt").read_text().strip()
self.versionDotted = self.version[0] + '.' + self.version[1] + '.' + \
self.version[2]
self.versionCommad = self.versionDotted.replace(".", ", ") + ', 0'
with (scintillaRoot / "doc" / "index.html").open() as f:
self.dateModified = [l for l in f.readlines() if "Date.Modified" in l]\
[0].split('\"')[3]
# 20130602
# index.html, SciTE.html
dtModified = datetime.datetime.strptime(self.dateModified, "%Y%m%d")
self.yearModified = self.dateModified[0:4]
monthModified = dtModified.strftime("%B")
dayModified = "%d" % dtModified.day
self.mdyModified = monthModified + " " + dayModified + " " + self.yearModified
# May 22 2013
# index.html, SciTE.html
self.dmyModified = dayModified + " " + monthModified + " " + self.yearModified
# 22 May 2013
# ScintillaHistory.html -- only first should change
self.myModified = monthModified + " " + self.yearModified
# Find all the lexer source code files
lexFilePaths = list((scintillaRoot / "lexers").glob("Lex*.cxx"))
SortListInsensitive(lexFilePaths)
self.lexFiles = [f.stem for f in lexFilePaths]
|
RudolfCardinal/crate | crate_anon/preprocess/preprocess_pcmis.py | Python | gpl-3.0 | 35,444 | 0 | #!/usr/bin/env python
"""
crate_anon/preprocess/preprocess_pcmis.py
===============================================================================
Copyright (C) 2015-2021 Rudolf Cardinal (rudolf@pobox.com).
This file is part of CRATE.
CRATE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either ver | sion 3 of the License, or
(at your option) any later version.
CRATE is distributed in the hope that it will be useful, |
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CRATE. If not, see <https://www.gnu.org/licenses/>.
===============================================================================
**Preprocesses PCMIS tables for CRATE.**
PCMIS is an EMR for UK IAPT services from the University of York.
**PCMIS table structure**
No proper documentation, but the structure is clear. See
``pcmis_information_schema.ods``.
.. code-block:: none
- PatientDetails
PatientID -- PK; patient-defining field; VARCHAR(100)
FirstName
LastName
NHSNumber -- VARCHAR(100)
...
- Other per-patient things: Patient*
- CasesAll
CaseNumber -- appears to be unique; same #records as ReferralDetails
ReferralDate
- Many other per-case things: Case*
CaseNumber -- FK to CasesAll/ReferralDetails
- Group things: linked to cases via GroupMember
IMPORTANTLY: there are only two, Groups and GroupSession
and neither are identifiable.
- Carers: from PatientCarerDetails (CarerNumber, PatientID)
- Children: from PatientChildDetails (ChildNumber, PatientID)
- ReferralDetails
CaseNumber -- appears to be unique; same #records as CasesAll
PatientID -- not unique
PrimaryDiagnosis (e.g. 'F41.1')
- Non-patient stuff we'll filter out:
pcmis_UserProfiles
Users
- Then a lot of other things are index by ContactNumber, which probably
cross-refers to CaseContacts, having
ContactNumber INT
CaseNumber VARCHAR(100)
**Decisions re database keys and anonymisation**
For RiO, we had integer patient IDs but mangled into a text format. So there
were distinct performance advantages in making an integer version. For PCMIS,
patient IDs look like 'JC000001', 'SB000001' (where the letters are unrelated
to patients' initials; I'm not sure what they refer to). There are numerical
overlaps if you ignore the letters. So there is no neat integer mapping; we'd
be inventing an arbitrary new key if we added one.
So the tradeoff is simplicity (keep textual PK for patients) versus speed
(parallel processing based on an integer operation). It's natural to think of
an integer hash of a string, but this hash has to operate in the SQL
domain, be portable, and produce an integer (so SQL Server's HASHBYTES is of
no use. At present (2017-05-02), our PCMIS copy has ~53,000 patients in, and
there are lots of tables with patients in.
Therefore, DECISION: create an integer PK.
However, we could do this deterministically. Since the length is fixed, and the
numerical part goes up to 999999, and the letters are always upper case -- ah,
no, there are some like <digit><letter>999999. But 0-99 would be fine.
.. code-block:: sql
SELECT (
(ASCII(SUBSTRING(PatientID, 1, 1))) * 100000000 +
(ASCII(SUBSTRING(PatientID, 2, 1))) * 1000000 +
CAST(SUBSTRING(PatientID, 3, 6) AS BIGINT)
) AS patient_id_int
FROM PatientDetails
If we're using SQLAlchemy, then use things like func.substr instead, but it's
a reasonable compromise for now to say that a specific database like PCMIS is
going to be hosted on SQL Server, since PCMIS uses that
=============== ===================
SQL Server SQLAlchemy
=============== ===================
SUBSTR func.substr
ASCII
=============== ===================
What about CaseNumber -- is that identifying? If not, it can remain the
internal key to identify cases. If it is, then we have to replace it.
The first character is 1,9,A-Z except Q, X, Y (n=25).
The second character is 0,A-Z except I, Q, U, X, Z (n=22).
So, pretty spread.
The digits seem to be approximately consecutive.
So it does look more like an internal PK than something identifiable.
Mind you, very often it is identical to CaseNumber. So, do we need a second
hash?
Our PCMIS copy certainly has free text (search the schema for text types).
**Therefore, views and the like**
MAIN SOFTWARE CHANGES
- Support non-integer PIDs/MPIDs.
- Add an AlterMethod that is hash=hash_config_key_name
with e.g.
.. code-block:: ini
[hash_config_key_name]
method = hmacsha256
key = somesecretkey
TABLES
- If a table doesn't have a PK, give it an AUTONUMBER integer PK (e.g.
"crate_pk"). That looks to be true of ?all tables.
VIEWS
- In general, not needed: we can use PatientId and CaseNumber as non-integer
fields.
- We do need the geography views, though.
DATA DICTIONARY AUTOGENERATIO
- PatientId: always the PID.
- NHSNumber: always the MPID.
- CaseNumber: belongs in ddgen_extra_hash_fields, and users should give it the
same hash key as for the PID-to-RID conversion, since it's often the same
code.
"""
import argparse
import logging
from typing import List
from cardinal_pythonlib.argparse_func import RawDescriptionArgumentDefaultsHelpFormatter # noqa
from cardinal_pythonlib.debugging import pdb_run
from cardinal_pythonlib.logs import configure_logger_for_colour
from cardinal_pythonlib.sql.sql_grammar_factory import make_grammar
from cardinal_pythonlib.sqlalchemy.schema import (
get_effective_int_pk_col,
get_pk_colnames,
hack_in_mssql_xml_type,
make_bigint_autoincrement_column,
)
from sqlalchemy import (
create_engine,
MetaData,
)
from sqlalchemy.engine.base import Engine
from sqlalchemy.schema import Table
from crate_anon.anonymise.constants import CHARSET
from crate_anon.common.sql import (
add_columns,
add_indexes,
drop_columns,
drop_indexes,
ensure_columns_present,
get_column_names,
get_table_names,
set_print_not_execute,
ViewMaker,
)
from crate_anon.preprocess.rio_constants import (
DEFAULT_GEOG_COLS,
ONSPD_TABLE_POSTCODE,
)
from crate_anon.preprocess.rio_ddgen import DDHint
log = logging.getLogger(__name__)
# =============================================================================
# Constants
# =============================================================================
CRATE_COL_PK = "crate_pk"
CRATE_IDX_PK = "crate_idx_pk" # for any patient table
CRATE_VIEW_SUFFIX = "_crateview"
PCMIS_COL_CASE_NUMBER = "CaseNumber"
PCMIS_COL_CONTACT_NUMBER = "ContactNumber"
PCMIS_COL_NHS_NUMBER = "NHSNumber"
PCMIS_COL_PATIENT_ID = "PatientID"
PCMIS_COL_POSTCODE = "PostCode"
PCMIS_COL_PREV_POSTCODE = "PreviousPostCode"
PCMIS_TABLE_CASE_CONTACTS = "CaseContacts" # contacts -> cases
PCMIS_TABLE_CASE_CONTACT_DETAILS = "CaseContactDetails"
PCMIS_TABLE_REFERRAL_DETAILS = "ReferralDetails" # cases -> patients
PCMIS_TABLE_MASTER_PATIENT = "PatientDetails"
VIEW_CASE_CONTACT_DETAILS_W_GEOG = PCMIS_TABLE_CASE_CONTACT_DETAILS + CRATE_VIEW_SUFFIX # noqa
VIEW_PT_DETAIL_W_GEOG = PCMIS_TABLE_MASTER_PATIENT + CRATE_VIEW_SUFFIX
# =============================================================================
# Config class
# =============================================================================
class PcmisConfigOptions(object):
"""
Hold configuration options for this program.
"""
def __init__(self,
postcodedb: str,
geogcols: List[str],
print_sql_only: bool,
drop_not_create: bool) -> None:
"""
Args:
postcodedb:
Specify database (schema) name for ONS Postcode Database (as
imported by CRATE) to lin |
werbk/task-5.14 | tests_group/group_lib.py | Python | apache-2.0 | 3,504 | 0.001427 | from sys import maxsize
class Group:
def __init__(self, group_name=None, group_header=None, group_footer=None, id=None):
self.group_name = group_name
self.group_header = group_header
self.group_footer = group_footer
self.id = id
def __repr__(self):
return '%s:%s' % (self.id, self.group_name)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.group_name == other.group_name
def if_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize |
class GroupBase:
def __init__(self, app):
self.app = app
def open_group_page(self):
wd = self.app.wd
if not (wd.current_url.endswith('/group.php') and len(wd.find_elements_by_name('new')) > 0):
wd.find_element_by_link_text("grou | ps").click()
def count(self):
wd = self.app.wd
self.open_group_page()
return len(wd.find_elements_by_name("selected[]"))
def validation_of_group_exist(self):
if self.count() == 0:
self.create(Group(group_name='test'))
self.click_group_page()
def group_line(self, field, text):
wd = self.app.wd
if text:
wd.find_element_by_name(field).click()
wd.find_element_by_name(field).clear()
wd.find_element_by_name(field).send_keys(text)
def create(self, Group):
wd = self.app.wd
self.open_group_page()
wd.find_element_by_name("new").click()
self.group_line('group_name', Group.group_name)
self.group_line('group_header', Group.group_header)
self.group_line('group_footer', Group.group_footer)
wd.find_element_by_name("submit").click()
self.group_cache = None
def delete_first_group(self):
self.delete_group_by_index(0)
def click_group_page(self):
wd = self.app.wd
wd.find_element_by_css_selector("div.msgbox").click()
wd.find_element_by_link_text("group page").click()
group_cache = None
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.open_group_page()
self.group_cache = []
for element in wd.find_elements_by_css_selector('span.group'):
text = element.text
id = element.find_element_by_name('selected[]').get_attribute('value')
self.group_cache.append(Group(group_name=text, id=id))
return list(self.group_cache)
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def delete_group_by_index(self, index):
wd = self.app.wd
self.open_group_page()
self.select_group_by_index(index)
wd.find_element_by_name('delete').click()
self.click_group_page()
self.group_cache = None
def edit_group_by_index(self, Group, index):
wd = self.app.wd
self.open_group_page()
wd.find_elements_by_name("selected[]")[index].click()
wd.find_element_by_name("edit").click()
self.group_line('group_name', Group.group_name)
self.group_line('group_header', Group.group_header)
self.group_line('group_footer', Group.group_footer)
wd.find_element_by_name("update").click()
wd.find_element_by_link_text("groups").click()
self.group_cache = None
|
renalreg/radar | tests/test_round_age.py | Python | agpl-3.0 | 257 | 0 | import pytest
from radar.utils import round_age
@pytest.m | ark.parametrize(['months', 'expected'], [
(3, 3), # 3 months
(60, 60), # 5 years
(61, 60), # 5 years, 1 month
])
def test(months, expected): |
assert round_age(months) == expected
|
DinoCow/airflow | airflow/providers/amazon/aws/operators/cloud_formation.py | Python | apache-2.0 | 3,375 | 0.001481 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains CloudFormation create/delete stack operators."""
from typing import List, Optional
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.cloud_formation import AWSCloudFormationHook
from airflow.utils.decorators import apply_defaults
class CloudFormationCreateStackOperator(BaseOperator):
"""
An operator that creates a CloudFormation stack.
:param stack_name: stack name (templated)
:type stack_name: str
:param params: parameters to be passed to CloudFormation.
.. seealso::
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudformation.html#CloudFormation.Client.create_stack
:type params: dict
:param aws_conn_id: aws connection to uses
:type aws_conn_id: str
"""
template_fields: List[str] = ['stack_name']
template_ext = ()
ui_color = '#6b9659'
@apply_defaults
def __init__(self, *, stack_name: str, params: dict, aws_conn_id: str = 'aws_default', **kwargs):
super().__init__(**kwargs)
| self.stack_name = stack_name
self.params = params
self.aws_conn_id = aws_conn_id
def execute(self, context):
self.log.info('Parameters: | %s', self.params)
cloudformation_hook = AWSCloudFormationHook(aws_conn_id=self.aws_conn_id)
cloudformation_hook.create_stack(self.stack_name, self.params)
class CloudFormationDeleteStackOperator(BaseOperator):
"""
An operator that deletes a CloudFormation stack.
:param stack_name: stack name (templated)
:type stack_name: str
:param params: parameters to be passed to CloudFormation.
.. seealso::
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudformation.html#CloudFormation.Client.delete_stack
:type params: dict
:param aws_conn_id: aws connection to uses
:type aws_conn_id: str
"""
template_fields: List[str] = ['stack_name']
template_ext = ()
ui_color = '#1d472b'
ui_fgcolor = '#FFF'
@apply_defaults
def __init__(
self, *, stack_name: str, params: Optional[dict] = None, aws_conn_id: str = 'aws_default', **kwargs
):
super().__init__(**kwargs)
self.params = params or {}
self.stack_name = stack_name
self.aws_conn_id = aws_conn_id
def execute(self, context):
self.log.info('Parameters: %s', self.params)
cloudformation_hook = AWSCloudFormationHook(aws_conn_id=self.aws_conn_id)
cloudformation_hook.delete_stack(self.stack_name, self.params)
|
emersonsoftware/ansiblefork | lib/ansible/galaxy/role.py | Python | gpl-3.0 | 14,352 | 0.003344 | ########################################################################
#
# (C) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import os
import tarfile
import tempfile
import yaml
from distutils.version import LooseVersion
from shutil import rmtree
import ansible.constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.urls import open_url
from ansible.playbook.role.requirement import RoleRequirement
from ansible.galaxy.api import GalaxyAPI
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyRole(object):
SUPPORTED_SCMS = set(['git', 'hg'])
META_MAIN = os.path.join('meta', 'main.yml')
META_INSTALL = os.path.join('meta', '.galaxy_install_info')
ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars','tests')
def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None):
self._metadata = None
self._install_info = None
self._validate_certs = not galaxy.options.ignore_certs
display.debug('Validate TLS certificates: %s' % self._validate_certs)
self.options = galaxy.options
self.galaxy = galaxy
self.name = name
self.version = version
self.src = src or name
self.scm = scm
if path is not None:
if self.name not in path:
path = os.path.join(path, self.name)
self.path = path
else:
for role_path_dir in galaxy.roles_paths:
role_path = os.path.join(role_path_dir, self.name)
if os.path.exists(role_path):
self.path = role_path
break
else:
# use the first path by default
self.path = os.path.join(galaxy.roles_paths[0], self.name)
# create list of possible paths
self.paths = [x for x in galaxy.roles_paths]
self.paths = [os.path.join(x, self.name) for x in self.paths]
def __repr__(self):
"""
Returns "rolename (version)" if version is not null
Returns "rolename" otherwise
"""
if self.version:
return "%s (%s)" % (self.name, self.version)
else:
return self.name
def __eq__(self, other):
return self.name == other.name
@property
def metadata(self):
"""
Returns role metadata
"""
if self._metadata is None:
meta_path = os.path.join(self.path, self.META_MAIN)
if os.path.isfile(meta_path):
try:
f = open(meta_path, 'r')
self._metadata = yaml.safe_load(f)
except:
| display.vvvvv("Unable to load metadata for %s" % self.name)
return False
finally:
f.close()
return self._metadata
@property
def install_info(self):
"""
Returns role install info
"""
if self._install_info is None:
info_path = os.path.join(self.path, self.META_INSTALL)
if os.path.isfile(info_path):
try:
f = open( | info_path, 'r')
self._install_info = yaml.safe_load(f)
except:
display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
return False
finally:
f.close()
return self._install_info
def _write_galaxy_install_info(self):
"""
Writes a YAML-formatted file to the role's meta/ directory
(named .galaxy_install_info) which contains some information
we can use later for commands like 'list' and 'info'.
"""
info = dict(
version=self.version,
install_date=datetime.datetime.utcnow().strftime("%c"),
)
if not os.path.exists(os.path.join(self.path, 'meta')):
os.makedirs(os.path.join(self.path, 'meta'))
info_path = os.path.join(self.path, self.META_INSTALL)
with open(info_path, 'w+') as f:
try:
self._install_info = yaml.safe_dump(info, f)
except:
return False
return True
def remove(self):
"""
Removes the specified role from the roles path.
There is a sanity check to make sure there's a meta/main.yml file at this
path so the user doesn't blow away random directories.
"""
if self.metadata:
try:
rmtree(self.path)
return True
except:
pass
return False
def fetch(self, role_data):
"""
Downloads the archived role from github to a temp location
"""
if role_data:
# first grab the file and save it to a temp location
if "github_user" in role_data and "github_repo" in role_data:
archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version)
else:
archive_url = self.src
display.display("- downloading role from %s" % archive_url)
try:
url_file = open_url(archive_url, validate_certs=self._validate_certs)
temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
while data:
temp_file.write(data)
data = url_file.read()
temp_file.close()
return temp_file.name
except Exception as e:
display.error("failed to download the file: %s" % str(e))
return False
def install(self):
# the file is a tar, so open it that way and extract it
# to the specified (or default) roles directory
local_file = False
if self.scm:
# create tar file from scm url
tmp_file = RoleRequirement.scm_archive_role(**self.spec)
elif self.src:
if os.path.isfile(self.src):
# installing a local tar.gz
local_file = True
tmp_file = self.src
elif '://' in self.src:
role_data = self.src
tmp_file = self.fetch(role_data)
else:
api = GalaxyAPI(self.galaxy)
role_data = api.lookup_role_by_name(self.src)
if not role_data:
raise AnsibleError("- sorry, %s was not found on %s." % (self.src, api.api_server))
if role_data.get('role_type') == 'CON' and not os.environ.get('ANSIBLE_CONTAINER'):
# Container Enabled, running outside of a container
display.warning("%s is a Container Enabled role and should only be installed using "
"Ansible Container" % self.name)
if role_data.get('role_type') == 'APP':
# Container Role
display.warning("%s is a Container App role and should only be installed using Ansible "
"Container" % self.name)
|
psiq/gdsfactory | pp/components/waveguide.py | Python | mit | 4,434 | 0.001804 | from typing import List, Tuple
import hashlib
import pp
from pp.name import autoname
from pp.components.hline import hline
from pp.component import Component
@autoname
def waveguide(
length: float = 10.0,
width: float = 0.5,
layer: Tuple[int, int] = pp.LAYER.WG,
layers_cladding: List[Tuple[int, int]] = [pp.LAYER.WGCLAD],
cladding_offset: float = 3.0,
) -> Component:
""" straight waveguide
Args:
length: in X direction
width: in Y direction
.. plot::
:include-source:
import pp
c = pp.c.waveguide(length=10, width=0.5)
pp.plotgds(c)
"""
c = Component()
w = width / 2
c.add_polygon([(0, -w), (length, -w), (length, w), (0, w)], layer=layer)
wc = w + cladding_offset
for layer_cladding in layers_cladding:
c.add_polygon(
[(0, -wc), (length, -wc), (length, wc), (0, wc)], layer=layer_cladding
)
c.add_port(name="W0", midpoint=[0, 0], width=width, orientation=180, layer=layer)
c.add_port(name="E0", midpoint=[length, 0], width=width, orientation=0, layer=layer)
c.width = width
c.length = length
return c
@autoname
def wg_shallow_rib(width=0.5, layer=pp.LAYER.SLAB150, layers_cladding=[], **kwargs):
width = pp.bias.width(width)
return waveguide(
width=width, layer=layer, layers_cladding=layers_cladding, **kwargs
)
@autoname
def wg_deep_rib(width=0.5, layer=pp.LAYER.SLAB90, layers_cladding=[], **kwargs):
width = pp.bias.width(width)
return waveguide(
width=width, layer=layer, layers_cladding=layers_cladding, **kwargs
)
@autoname
def waveguide_biased(width=0.5, **kwargs):
width = pp.bias.width(width)
return waveguide(width=width, **kwargs)
def _arbitrary_straight_waveguide(length, windows):
"""
windows: [(y_start, y_stop, layer), ...]
"""
md5 = hashlib.md5()
for e in windows:
md5.update(str(e).encode())
component = Component()
component.name = "ARB_SW_L{}_HASH{}".format(length, md5.hexdigest())
y_min, y_max, layer0 = windows[0]
y_min, y_max = min(y_min, y_max), max(y_min, y_max)
# Add one port on each side centered at y=0
for y_start, y_stop, layer in windows:
w = abs(y_stop - y_start)
y = (y_stop + y_start) / 2
_wg = hline(length=length, width=w, layer=layer).ref()
_wg.movey(y)
component.add(_wg)
component.absorb(_wg)
y_min = min(y_stop, y_start, y_min)
y_max = max(y_stop, y_start, y_max)
width = y_max - y_min
component.add_port(
name="W0", midpoint=[0, 0], width=width, orientation=180, layer=layer0
)
component.add_port(
name="E0", midpoint=[length, 0], width=width, orientation=0, layer=lay | er0
)
return component
@autoname
def waveguide_slab(length=10.0, width=0.5, cladding=2.0, slab_layer=pp.LAYER.SLAB90):
width = pp.bias.width(width)
ymin = width / 2
ymax = ymin + cladding
windows = [(-ymin, ymin, pp.LAYER.WG), (-ymax, ymax, slab_layer)]
return _arbitrary_straight_waveguide(length=length, windows=windows)
@autoname
def waveguide_tre | nches(
length=10.0,
width=0.5,
layer=pp.LAYER.WG,
trench_width=3.0,
trench_offset=0.2,
trench_layer=pp.LAYER.SLAB90,
):
width = pp.bias.width(width)
w = width / 2
ww = w + trench_width
wt = ww + trench_offset
windows = [(-ww, ww, layer), (-wt, -w, trench_layer), (w, wt, trench_layer)]
return _arbitrary_straight_waveguide(length=length, windows=windows)
waveguide_ridge = waveguide_slab
@autoname
def waveguide_slot(length=10.0, width=0.5, gap=0.2, layer=pp.LAYER.WG):
width = pp.bias.width(width)
gap = pp.bias.gap(gap)
a = width / 2
d = a + gap / 2
windows = [(-a - d, a - d, layer), (-a + d, a + d, layer)]
return _arbitrary_straight_waveguide(length=length, windows=windows)
def _demo_waveguide():
c = waveguide()
pp.write_gds(c)
return c
if __name__ == "__main__":
c = waveguide(length=4, pins=True)
pp.show(c)
# print(c.hash_geometry())
# pp.show(c)
# print(c.ports)
# cc = pp.routing.add_fiber_array(c)
# pp.show(cc)
# c = waveguide_slab()
# c = waveguide_trenches()
# c = waveguide()
# c = waveguide_slot()
# c = waveguide_slot(length=11.2, width=0.5)
# c = waveguide_slot(length=11.2, width=0.5)
# pp.show(c)
|
nicogid/Projet4Moc1 | api/projet-old/distributor_api/migrations/0011_sensor_id_distributor.py | Python | mit | 605 | 0.001653 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-20 13:13
from __future__ import unicode_lit | erals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('distributor_api', '0010_remove_sensor_id_distributor'),
]
operations = [
migrations.AddField(
model_name='sensor',
name='id_distributor',
field=models.Fore | ignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='distributor_api.Distributor'),
),
]
|
mathiasertl/django-xmpp-server-list | account/migrations/0001_initial.py | Python | gpl-3.0 | 2,915 | 0.004803 | from django.db import models, migrations
import django.utils.timezone
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LocalUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=T | rue, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.t | imezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('jid', models.CharField(help_text=b'Required, a confirmation message will be sent to this address.', max_length=128)),
('email_confirmed', models.BooleanField(default=False)),
('jid_confirmed', models.BooleanField(default=False)),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
bases=(models.Model,),
),
]
|
Fierydemise/ShadowCraft-Engine | shadowcraft/calcs/__init__.py | Python | lgpl-3.0 | 30,637 | 0.00457 | from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from builtins import str
from builtins import object
import gettext
import builtins
import math
import os
import subprocess
_ = gettext.gettext
from shadowcraft.core import exceptions
from shadowcraft.objects import class_data
from shadowcraft.objects import talents
from shadowcraft.objects import artifact
from shadowcraft.objects import procs
from shadowcraft.objects.procs import InvalidProcException
class DamageCalculator(object):
# This method holds the general interface for a damage calculator - the
# sorts of parameters and calculated values that will be need by many (or
# most) classes if they implement a damage calculator using this framework.
# Not saying that will happen, but I want to leave my options open.
# Any calculations that are specific to a particular class should go in
# calcs.<class>.<Class>DamageCalculator instead - for an example, see
# calcs.rogue.RogueDamageCalculator
# Override this in your class specfic subclass to list appropriate stats
# possible values are agi, str, spi, int, haste, crit, mastery
default_ep_stats = []
# normalize_ep_stat is the stat with value 1 EP, override in your subclass
normalize_ep_stat = None
def __init__(self, stats, talents, traits, buffs, race, spec, settings=None, level=110, target_level=None, char_class='rogue'):
self.WOW_BUILD_TARGET = '7.3.0' # should reflect the game patch being targetted
self.SHADOWCRAFT_BUILD = self.get_version_string()
self.tools = class_data.Util()
self.stats = stats
self.talents = talents
self.traits = traits
self.buffs = buffs
self.race = race
self.char_class = char_class
self.spec = spec
self.settings = settings
self.target_level = target_level if target_level else level+3 #assumes 3 levels higher if not explicit
#racials
if self.race.race_name == 'undead':
self.stats.procs.set_proc('touch_of_the_grave')
if self.race.race_name == 'goblin':
self.stats.procs.set_proc('rocket_barrage')
self.level_difference = max(self.target_level - level, 0)
self.base_one_hand_miss_rate = 0
self.base_parry_chance = .01 * self.level_difference
self.base_dodge_chance = 0
self.dw_miss_penalty = .19
self._set_constants_for_class()
self.level = level
self.recalculate_hit_constants()
self.base_block_chance = .03 + .015 * self.level_difference
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
if name == 'level':
self._set_constants_for_level()
def __getattr__(self, name):
# Any status we haven't assigned a value to, we don't have.
if name == 'calculating_ep':
return False
object.__getattribute__(self, name)
def _set_constants_for_level(self):
self.buffs.level = self.level
self.stats.level = self.level
self.race.level = self.level
self.stats.gear_buffs.level = self.level
# calculate and cache the level-dependent armor mitigation parameter
self.attacker_k_value = self.tools.get_k_value(self.level)
# target level dependent constants
self.target_base_armor = self.tools.get_base_armor(self.target_level)
#Crit suppression removed in Legion
#Source: http://blue.mmo-champion.com/topic/409203-theorycrafting-questions/#post274
self.crit_reduction = 0
def _set_constants_for_class(self):
# These factors are class-specific. Generaly those go in the class module,
# unless it's basic stuff like combat ratings or base stats that we can
# datamine for all classes/specs at once.
self.game_class = self.talents.game_class
def get_version_string(self):
try:
thisdir = os.path.dirname(os.path.abspath(__file__))
build = subprocess.check_output(['git', 'rev-list', '--count', 'HEAD'], cwd=thisdir).strip()
commit = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'], cwd=thisdir).strip()
if build.isdigit() and commit:
return '{0} ({1})'.format(build, commit)
except:
pass
return 'UNKNOWN'
def recalculate_hit_constants(self):
self.base_dw_miss_rate = self.base_one_hand_miss_rate + self.dw_miss_penalty
def get_adv_param(self, type, default_val, min_bound=-10000, max_bound=10000, ignore_bounds=False):
if type in self.settings.adv_params and not ignore_bounds:
return max( min(float(self.settings.adv_params[type]), max_bound), min_bound )
elif type in self.settings.adv_params:
return self.settings.adv_params[type]
else:
return default_val
raise exceptions.InvalidInputException(_('Improperly defined parameter type: '+type))
def add_exported_data(self, damage_breakdown):
#used explicitly to highjack data outputs to export additional data.
if self.get_version_number:
damage_breakdown['version_' + self.WOW_BUILD_TARGET + '_' + self.SHADOWCRAFT_BUILD] = [.0, 0]
def set_rppm_uptime(self, proc):
#http://iam.yellingontheinternet.com/2013/04/12/theorycraft-201-advanced-rppm/
haste = 1.
if proc.haste_scales:
haste *= self.stats.get_haste_multiplier_from_rating(self.base_stats['haste'] * self.stat_multipliers['haste']) * self.true_haste_mod
if proc.att_spd_scales:
haste *= 1.4
#The 1.1307 is a value that increases the proc rate due to bad luck prevention. It /should/ be constant among all rppm proc styles
if not proc.icd:
if proc.max_stacks <= 1:
proc.uptime = 1.1307 * (1 - math.e ** (-1 * haste * proc.get_rppm_proc_rate(spec=self.spec) * proc.duration / 60))
else:
lambd = haste * proc.get_rppm_proc_rat | e(spec=self.spec) * proc.duration / 60
| e_lambda = math.e ** lambd
e_minus_lambda = math.e ** (-1 * lambd)
proc.uptime = 1.1307 * (e_lambda - 1) * (1 - ((1 - e_minus_lambda) ** proc.max_stacks))
else:
mean_proc_time = 60 / (haste * proc.get_rppm_proc_rate(spec=self.spec)) + proc.icd - min(proc.icd, 10)
proc.uptime = 1.1307 * proc.duration / mean_proc_time
def set_uptime(self, proc, attacks_per_second, crit_rates):
if proc.is_real_ppm():
self.set_rppm_uptime(proc)
else:
procs_per_second = self.get_procs_per_second(proc, attacks_per_second, crit_rates)
if proc.icd:
proc.uptime = proc.duration / (proc.icd + 1 / procs_per_second)
else:
if procs_per_second >= 1:
self.set_uptime_for_ramping_proc(proc, procs_per_second)
else:
# See http://elitistjerks.com/f31/t20747-advanced_rogue_mechanics_discussion/#post621369
# for the derivation of this formula.
q = 1 - procs_per_second
Q = q ** proc.duration
if Q < .0001:
self.set_uptime_for_ramping_proc(proc, procs_per_second)
else:
P = 1 - Q
proc.uptime = P * (1 - P ** proc.max_stacks) / Q
def average_damage_breakdowns(self, aps_dict, denom=180):
final_breakdown = {}
#key: phase name
#number: place in tuple... tuple = (phase_length, dps_breakdown)
#entry: DPS skill_name
#denom: total duration (to divide phase duration by it)
for key in aps_dict:
for entry in aps_dict[key][1]:
if entry in final_breakdown:
final_breakdown[entry] += aps_dict[key][1][entry] * (aps_dict[key][0] / denom)
else:
final_breakdown[entry] = aps_dict[key][1][entry] * (aps_dict[key][0] / denom |
iLoop2/ResInsight | ThirdParty/Ert/devel/python/test/ert_tests/ecl/test_grdecl.py | Python | gpl-3.0 | 3,995 | 0.00776 | #!/usr/bin/env python
# Copyright (C) 2011 Statoil ASA, Norway.
#
# The file 'sum_test.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
import os
from ert.ecl import EclKW
from ert.test import ExtendedTestCase
class GRDECLTest(ExtendedTestCase):
def setUp(self):
self.src_file = self.createTestPath("Statoil/ECLIPSE/Gurbat/include/example_permx.GRDECL")
self.file_list = []
def addFile(self, filename):
self.file_list.append(filename)
def tearDown(self):
for f in self.file_list:
if os.path.exists(f):
os.unlink(f)
def test_Load( self ):
kw = EclKW.read_grdecl(open(self.src_file, "r"), "PERMX")
self.assertTrue(kw)
def test_reload( self ):
kw = EclKW.read_grdecl(open(self.src_file, "r"), "PERMX")
tmp_file1 = "/tmp/permx1.grdecl"
tmp_file2 = "/tmp/permx2.grdecl"
self.addFile(tmp_file1)
self.addFile(tmp_file2)
fileH = open(tmp_file1, "w")
kw.write_grdecl(fileH)
fileH.close()
kw1 = EclKW.read_grdecl(open(tmp_file1, "r"), "PERMX")
| fileH = open(tmp_file2, "w")
kw1.write_grdec | l(fileH)
fileH.close()
self.assertFilesAreEqual(tmp_file1, tmp_file2)
def test_fseek( self ):
file = open(self.src_file, "r")
self.assertTrue(EclKW.fseek_grdecl(file, "PERMX"))
self.assertFalse(EclKW.fseek_grdecl(file, "PERMY"))
file.close()
file = open(self.src_file, "r")
kw1 = EclKW.read_grdecl(file, "PERMX")
self.assertFalse(EclKW.fseek_grdecl(file, "PERMX"))
self.assertTrue(EclKW.fseek_grdecl(file, "PERMX", rewind=True))
file.close()
def test_fseek2(self):
test_src = self.createTestPath("local/ECLIPSE/grdecl-test/test.grdecl")
# Test kw at the the very start
file = open(test_src, "r")
self.assertTrue(EclKW.fseek_grdecl(file, "PERMX"))
# Test commented out kw:
self.assertFalse(EclKW.fseek_grdecl(file, "PERMY"))
self.assertFalse(EclKW.fseek_grdecl(file, "PERMZ"))
# Test ignore not start of line:
self.assertTrue(EclKW.fseek_grdecl(file, "MARKER"))
self.assertFalse(EclKW.fseek_grdecl(file, "PERMXYZ"))
# Test rewind
self.assertFalse(EclKW.fseek_grdecl(file, "PERMX", rewind=False))
self.assertTrue(EclKW.fseek_grdecl(file, "PERMX", rewind=True))
# Test multiline comments + blanks
self.assertTrue(EclKW.fseek_grdecl(file, "LASTKW"))
def test_fseek_dos(self):
test_src = self.createTestPath("local/ECLIPSE/grdecl-test/test.grdecl_dos") # File formatted with \r\n line endings.
# Test kw at the the very start
file = open(test_src, "r")
self.assertTrue(EclKW.fseek_grdecl(file, "PERMX"))
# Test commented out kw:
self.assertFalse(EclKW.fseek_grdecl(file, "PERMY"))
self.assertFalse(EclKW.fseek_grdecl(file, "PERMZ"))
# Test ignore not start of line:
self.assertTrue(EclKW.fseek_grdecl(file, "MARKER"))
self.assertFalse(EclKW.fseek_grdecl(file, "PERMXYZ"))
# Test rewind
self.assertFalse(EclKW.fseek_grdecl(file, "PERMX", rewind=False))
self.assertTrue(EclKW.fseek_grdecl(file, "PERMX", rewind=True))
# Test multiline comments + blanks
self.assertTrue(EclKW.fseek_grdecl(file, "LASTKW"))
|
awildeone/Wusif | pyborg.py | Python | gpl-2.0 | 42,867 | 0.034893 | # -*- coding: utf-8 -*-
#
# PyBorg: The python AI bot.
#
# Copyright (c) 2000, 2006 Tom Morton, Sebastien Dailly
#
#
# This bot was inspired by the PerlBorg, by Eric Bock.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Tom Morton <tom@moretom.net>
# Seb Dailly <seb.dailly@gmail.com>
#
from random import *
import ctypes
import sys
import os
import fileinput
import marshal # buffered marshal is bloody fast. wish i'd found this before :)
import struct
import time
import zipfile
import re
import threading
timers_started = False
def to_sec(s):
seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
return int(s[:-1])*seconds_per_unit[s[-1]]
# This will make the !learn and !teach magic work ;)
def dbread(key):
value = None
if os.path.isfile("qdb.dat"):
file = open("qdb.dat")
for line in file.readlines():
reps = int(len(line.split(":=:"))-1)
data = line.split(":=:")[0]
dlen = r'\b.{2,}\b'
if re.search(dlen, key, re.IGNORECASE):
if key.lower() in data.lower() or data.lower() in key.lower():
if reps > 1:
repnum = randint(1, int(reps))
value = line.split(":=:")[repnum].strip()
else: value = line.split(":=:")[1].strip()
break
else:
value = None
break
file.close()
return value
def dbwrite(key, value):
if dbread(key) is None:
file = open("qdb.dat", "a")
file.write(str(key)+":=:"+str(value)+"\n")
file.close()
else:
for line in fileinput.input("qdb.dat",inplace=1):
data = line.split(":=:")[0]
dlen = r'\b.{2,}\b'
if re.search(dlen, key, re.IGNORECASE):
if key.lower() in data.lower() or data.lower() in key.lower():
print str(line.strip())+":=:"+str(value)
else:
print line.strip()
# Some more machic to fix some common issues with the teach system
def teach_filter(message):
message = message.replace("||", "$C4")
message = message.replace("|-:", "$b7")
message = message.replace(":-|", "$b6")
message = message.replace(";-|", "$b5")
message = message.replace("|:", "$b4")
message = message.replace(";|", "$b3")
message = message.replace("=|", "$b2")
message = message.replace(":|", "$b1")
return message
def unfilter_reply(message):
"""
This undoes the phrase mangling the central code does
so the bot sounds more human :P
"""
# Had to write my own initial capitalizing code *sigh*
message = "%s%s" % (message[:1].upper(), message[1:])
# Fixes punctuation
message = message.replace(" ?", "?")
message = message.replace(" !", "!")
message = message.replace(" .", ".")
message = message.replace(" ,", ",")
message = message.replace(" : ", ": ")
message = message.replace(" ; ", "; ")
# Fixes I and I contractions
message = message.replace(" i ", " I ")
message = message.replace("i'", "I'")
# Fixes the common issues with the teach system
message = message.replace("$C4", "||")
message = message.replace("$b7", "|-:")
message = message.replace("$b6", ";-|")
message = message.replace("$b5", ":-|")
message = message.replace("$b4", "|:")
message = message.replace("$b3", ";|")
message = message.replace("$b2", "=|")
message = message.replace("$b1", ":|")
# Fixes emoticons that don't work in lowercase
emoticon = re.search("(:|x|;|=|8){1}(-)*(p|x|d){1}", message, re.IGNORECASE)
if not emoticon == None:
emoticon = "%s" % emoticon.group()
message = message.replace(emoticon, emoticon.upper())
# Fixes the annoying XP capitalization in words...
message = message.replace("XP", "xp")
message = message.replace(" xp", " XP")
message = message.replace("XX", "xx")
return message
def filter_message(message, bot):
"""
Filter a message body so it is suitable for learning from and
replying to. This involves removing confusing characters,
padding ? and ! with ". " so they also terminate lines
and converting to lower case.
"""
# remove garbage
message = message.replace("\"", "") # remove "s
message = message.replace("\n", " ") # remove newlines
message = message.replace("\r", " ") # remove carriage returns
# remove matching brackets (unmatched ones are likely smileys :-) *cough*
# should except out when not found.
index = 0
try:
while 1:
index = message.index("(", index)
# Remove matching ) bracket
i = message.index(")", index+1)
message = message[0:i]+message[i+1:]
# And remove the (
message = message[0:index]+message[index+1:]
except ValueError, e:
pass
# Strips out mIRC Control codes
ccstrip = re.compile("\x1f|\x02|\x12|\x0f|\x16|\x03(?:\d{1,2}(?:,\d{1,2})?)?", re.UNICODE)
message = ccstrip.sub("", message)
# Few of my fixes...
message = message.replace(": ", " : ")
message = message.replace("; ", " ; ")
# ^--- because some : and ; might be smileys...
message = message.replace("`", "'")
message = message.replace("?", " ? ")
message = message.replace("!", " ! ")
message = message.replace(".", " . ")
message = message.replace(",", " , ")
# Fixes broken emoticons...
message = message.replace("^ . ^", "^.^")
message = message.replace("- . -", "-.-")
message = message.replace("0 . o", "0.o")
message = message.replace("o . o", "o.o")
message = message.replace("O . O", "O.O")
message = message.replace("< . <", "<.<")
message = message.replace("> . >", ">.>")
message = message.replace("> . <", ">.<")
message = message.replace(": ?", ":?")
message = message.replace(":- ?", ":-?")
message = message.replace(", , l , ,", ",,l,,")
message = message.replace("@ . @", "@.@")
words = message.split()
if bot.settings.process_with == "pyborg":
for x in xrange(0, len(words)):
#is there aliases ?
for z in bot.settings.aliases.keys():
for alias in bot.settings.aliases[z]:
pattern = "^%s$" % alias
if re.search(pattern, words[x]):
words[x] = z
message = " ".join(words)
return message
class pyborg:
import re
import cfgfile
ver_string = "PyBorg version 1.1.0"
saves_version = "1.1.0"
# Main command list
commandlist = "Pyborg commands:\n!checkdict, !contexts, !help, !known, !learning, !rebuilddict, !replace, !unlearn, !purge, !version, !words, !limit, !alias, !save, !censor, !uncensor, !learn, !teach, !forget, !find, !responses"
commanddict = {
"help": "Owner command. Usage: !help [command]\nPrints information about using a command, or a list of commands if no command is given",
"version": "Usage: !version\nDisplay what version of Pyborg we are running",
"words": "Usage: !words\nDisplay how many words are known",
"known": "Usage: !known word1 [word2 [...]]\nDisplays if one or more words are known, and how many contexts are known",
"contexts": "Owner command. Usage: !contexts <phrase>\nPrint contexts containing <phrase>",
"unlearn": "Owner command. Usage: !unlearn <expression>\nRemove all occurances of a word or expression from the dictionary. For example '!unlearn of of' would remove all contexts containing double 'of's",
"purge": "Owner command. Usage: !purge [number]\nRemove all occurances of the words that appears in less than <number> contexts",
"replace": "Owner command. Usage: !replace <old> <new>\nReplace all occurances of word <old> in the dictionary with <new>",
"learning": "Owner command. Usage: !learning [on|off]\nToggle bot learning. Without arguments shows the current setting",
"checkdict": "Owner command. Usage: !checkdict\nChecks the dictionary for broken links. Shouldn't happen, but worth trying if | you ge | t KeyError crashes",
"rebuilddict": "Owner command. Usage: !rebuilddict\nRebuilds dictionary links from the lines of k |
branto1/ceph-deploy | ceph_deploy/hosts/centos/install.py | Python | mit | 6,289 | 0.002067 | from ceph_deploy.util import templates
from ceph_deploy.lib import remoto
from ceph_deploy.hosts.common import map_components
from ceph_deploy.util.paths import gpg
NON_SPLIT_PACKAGES = ['ceph-osd', 'ceph-mon', 'ceph-mds']
def rpm_dist(distro):
if distro.normalized_name in ['redhat', 'centos', 'scientific'] and distro.normalized_release.int_major >= 6:
return 'el' + distro.normalized_release.major
return 'el6'
def repository_url_part(distro):
"""
Historically everything CentOS, RHEL, and Scientific has been mapped to
`el6` urls, but as we are adding repositories for `rhel`, the URLs should
map correctly to, say, `rhel6` or `rhel7`.
This function looks into the `distro` object and determines the right url
part for the given distro, falling back to `el6` when all else fails.
Specifically to work around the issue of CentOS vs RHEL::
>>> import platform
>>> platform.linux_distribution()
('Red Hat Enterprise Linux Server', '7.0', 'Maipo')
"""
if distro.normalized_release.int_major >= 6:
if distro.normalized_name == 'redhat':
return 'rhel' + distro.normalized_release.major
if distro.normalized_name in ['centos', 'scientific']:
return 'el' + distro.normalized_release.major
return 'el6'
def install(distro, version_kind, version, adjust_repos, **kw):
packages = map_components(
NON_SPLIT_PACKAGES,
kw.pop('components', [])
)
logger = distro.conn.logger
release = distro.release
machine = distro.machine_type
repo_part = repository_url_part(distro)
dist = rpm_dist(distro)
distro.packager.clean()
# Get EPEL installed before we continue:
if adjust_repos:
distro.packager.install('epel-release')
distro.packager.install('yum-plugin-priorities')
distro.conn.remote_module.enable_yum_priority_obsoletes()
logger.warning('check_obsoletes has been enabled for Yum priorities plugin')
if version_kind in ['stable', 'testing']:
key = 'release'
else:
key = 'autobuild'
if adjust_repos:
if version_kind != 'dev':
distro.packager.add_repo_gpg_key(gpg.url(key))
if version_kind == 'stable':
url = 'http://ceph.com/rpm-{version}/{repo}/'.format(
version=version,
repo=repo_pa | rt,
)
| elif version_kind == 'testing':
url = 'http://ceph.com/rpm-testing/{repo}/'.format(repo=repo_part)
remoto.process.run(
distro.conn,
[
'rpm',
'-Uvh',
'--replacepkgs',
'{url}noarch/ceph-release-1-0.{dist}.noarch.rpm'.format(url=url, dist=dist),
],
)
if version_kind == 'dev':
logger.info('skipping install of ceph-release package')
logger.info('repo file will be created manually')
mirror_install(
distro,
'http://gitbuilder.ceph.com/ceph-rpm-centos{release}-{machine}-basic/ref/{version}/'.format(
release=release.split(".", 1)[0],
machine=machine,
version=version),
gpg.url(key),
adjust_repos=True,
extra_installs=False
)
# set the right priority
logger.warning('ensuring that /etc/yum.repos.d/ceph.repo contains a high priority')
distro.conn.remote_module.set_repo_priority(['Ceph', 'Ceph-noarch', 'ceph-source'])
logger.warning('altered ceph.repo priorities to contain: priority=1')
if packages:
distro.packager.install(packages)
def mirror_install(distro, repo_url, gpg_url, adjust_repos, extra_installs=True, **kw):
packages = map_components(
NON_SPLIT_PACKAGES,
kw.pop('components', [])
)
repo_url = repo_url.strip('/') # Remove trailing slashes
distro.packager.clean()
if adjust_repos:
distro.packager.add_repo_gpg_key(gpg_url)
ceph_repo_content = templates.ceph_repo.format(
repo_url=repo_url,
gpg_url=gpg_url
)
distro.conn.remote_module.write_yum_repo(ceph_repo_content)
# set the right priority
if distro.packager.name == 'yum':
distro.packager.install('yum-plugin-priorities')
distro.conn.remote_module.set_repo_priority(['Ceph', 'Ceph-noarch', 'ceph-source'])
distro.conn.logger.warning('altered ceph.repo priorities to contain: priority=1')
if extra_installs and packages:
distro.packager.install(packages)
def repo_install(distro, reponame, baseurl, gpgkey, **kw):
packages = map_components(
NON_SPLIT_PACKAGES,
kw.pop('components', [])
)
logger = distro.conn.logger
# Get some defaults
name = kw.pop('name', '%s repo' % reponame)
enabled = kw.pop('enabled', 1)
gpgcheck = kw.pop('gpgcheck', 1)
install_ceph = kw.pop('install_ceph', False)
proxy = kw.pop('proxy', '') # will get ignored if empty
_type = 'repo-md'
baseurl = baseurl.strip('/') # Remove trailing slashes
distro.packager.clean()
if gpgkey:
distro.packager.add_repo_gpg_key(gpgkey)
repo_content = templates.custom_repo(
reponame=reponame,
name=name,
baseurl=baseurl,
enabled=enabled,
gpgcheck=gpgcheck,
_type=_type,
gpgkey=gpgkey,
proxy=proxy,
**kw
)
distro.conn.remote_module.write_yum_repo(
repo_content,
"%s.repo" % reponame
)
repo_path = '/etc/yum.repos.d/{reponame}.repo'.format(reponame=reponame)
# set the right priority
if kw.get('priority'):
if distro.packager.name == 'yum':
distro.packager.install('yum-plugin-priorities')
distro.conn.remote_module.set_repo_priority([reponame], repo_path)
logger.warning('altered {reponame}.repo priorities to contain: priority=1'.format(
reponame=reponame)
)
# Some custom repos do not need to install ceph
if install_ceph and packages:
distro.packager.install(packages)
|
WilliamQLiu/django-cassandra-prototype | cass-prototype/reddit/management/commands/cassandra_initialize.py | Python | mit | 522 | 0.003831 | from django.core.management.base import BaseCommand
from cassandra.cluster import Cluster, NoHostAvailable
class Command(BaseCommand):
#http://datastax.github.io/python-driver/getting_started.html
def handle(self, *arg | s, **options):
print "Running Cassandra Initialize"
cluster = Cluster(['127.0.0.1'])
try:
session = cluster.connect()
except NoHostAvaila | ble:
print "No Cassandra Host Available: Check Cassandra has started"
cluster.shutdown()
|
brsbilgic/django-quick-reports | quick_reports_demo/main/migrations/0001_initial.py | Python | mit | 1,707 | 0.002929 | # -*- codin | g: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settin | gs
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=190)),
('body', models.TextField()),
('slug', models.SlugField(max_length=190)),
('status', models.IntegerField(default=0, choices=[(0, b'Draft'), (1, b'Published')])),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created_at'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=190)),
('text', models.TextField()),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('article', models.ForeignKey(to='main.Article')),
],
options={
'ordering': ['-created_at'],
},
bases=(models.Model,),
),
]
|
basti2342/simple-ledstrip | extended-dioder.py | Python | mpl-2.0 | 5,582 | 0.03565 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import colorsys, time, random, signal
from dioder import Dioder, SerialLogic
from threading import Thread
class ExtendedDioder(Dioder, Thread):
def __init__(self, *args, **kwargs):
super(ExtendedDioder, self).__init__(*args, **kwargs)
# thread status
self.running = True
# method with the current mode
self.mode = None
# should the current mode be cancelled
self.modeBreak = False
# last mode (for switching)
self.savedMode = signal.SIGRTMIN + 1
# checks, if mode should be cancelled
def shouldBreak(self):
returnVal = not self.running or self.modeBreak
self.modeBreak = False
return returnVal
def setMode(self, signalNum):
# at startup the mode is None
if self.mode: self.modeBreak = True
# signal to method mode mapping
signals = {
34: "dark",
35: "lightUp",
36: "rainbow",
37: "wipeRed",
38: "wipeGreen",
39: "wipeBlue",
40: "colorWipeCenter",
41: "colorWipeCenterReverse",
42: "colorWipeCenterBounce",
43: "strobo",
44: "ambientColorFade",
45: "orange",
46: "white"
}
# special signal 50 for switching through modes
if signalNum == 50:
signalNum = self.savedMode
self.savedMode += 1
if self.savedMode > (signal.SIGRTMIN + len(signals) - 1):
self.savedMode = signal.SIGRTMIN
self.mode = getattr(self, signals[signalNum])
print "Running ", signals[signalNum]
def run(self):
# main loop
while self.running:
# selected mode or "dark" (to make sure all LEDs stay dark)
if self.mode:
self.mode()
else:
self.dark()
# all LEDs off
def dark(self):
self.showColor(0, 0, 0)
# all LEDs on
def white(self):
self.showColor(255, 255, 255)
# a nice orange
def orange(self):
self.showColor(173, 76, 0)
# set color to all LEDs
def showColor(self, color0, color1, color2):
while not self.shouldBreak():
self.setStripColor(color0, color1, color2)
self.show()
time.sleep(0.5)
# light up smoothly
def lightUp(self):
for color in range(256):
if self.shouldBreak(): return
self.setStripColor(color, color, color)
self.show()
self.white()
# a nice rainbow fading through
def rainbow(self, waitMs=20):
for j in range(256):
for i in range(self.limits[1]):
if self.shouldBreak(): return
color = self.wheel((i+j) & 255)
self.setColor(i, *color)
self.show()
time.sleep(waitMs*0.001)
def wheel(diod, wheelPos):
if wheelPos < 85:
return (wheelPos * 3, 255 - wheelPos * 3, 0)
elif wheelPos < 170:
wheelPos -= 85;
return (255 - wheelPos * 3, 0, wheelPos * 3)
else:
wheelPos -= 170
return (0, wheelPos * 3, 255 - wheelPos * 3)
def wipeRed(self):
self.colorWipe((255, 0, 0))
def wipeG | reen(self):
self.colorWipe((0, 255, 0))
def wipeBlue(self):
self.colorWipe((0, 0, 255))
def colorWipe(self, color, waitMs=50):
for i in range(self.limits[1]):
if self.shouldBreak(): return
self.setColor(i, *color)
self.show()
time.sleep(waitMs*0.001)
# like colorWipe() but from center
def colorWipeCenter(self, color=(255, 255, 255), waitMs=50):
center = int(round(self.limits[1]/2))
i = center
j = center
while i != 0 and j | != self.limits[1]:
if self.shouldBreak(): return
self.setColor(i, *color)
self.setColor(j, *color)
self.show()
time.sleep(waitMs*0.001)
i -= 1
j += 1
# like colorWipe() but from first and last LED
def colorWipeCenterReverse(self, color=(0, 255, 0), waitMs=50):
center = int(round(self.limits[1]/2))
i = 0
j = self.limits[1] - 1
while i < center and j > center:
if self.shouldBreak(): return
self.setColor(i, *color)
self.setColor(j, *color)
self.show()
time.sleep(waitMs*0.001)
i += 1
j -= 1
def colorWipeCenterBounce(self, color=(0, 255, 0), waitMs=50):
self.colorWipeCenter(color, waitMs)
self.colorWipeCenterReverse((0, 0, 0), waitMs)
# strobo color (default: white)
def strobo(self, color=(255, 255, 255)):
for c in [color] + [(0, 0, 0)]:
if self.shouldBreak(): return
self.setStripColor(*c)
self.show()
# a nice fire effect using Gauss
def ambientColorFade(self, color1=(254, 100, 0), color2=(255, 120, 1), waitMs=50):
color = [random.randint(color1[x], color2[x]) for x in range(3)]
self.setStripColor(color[0], color[1], color[2])
self.show()
while True:
gauss = {}
mean = random.choice(range(self.limits[1]+1))
for i in range(5000):
try:
rand = int(round(random.gauss(mean, 0.2))) #2
if rand >= self.limits[0] and rand <= self.limits[1]:
gauss[rand] += 1
except KeyError:
gauss[rand] = 1
for i, count in gauss.items():
if self.shouldBreak(): return
try:
hsv = list(colorsys.rgb_to_hsv(color[0]/255.0, color[1]/255.0, color[2]/255.0))
except ZeroDivisionError:
pass
hsvNew = hsv
hsvNew[2] += (count*0.000008) * random.choice((1, -1))#signed
if hsvNew[2] < 0: hsvNew = hsv
if hsvNew[2] < -1: hsvNew[2] += 1
newRgb = colorsys.hsv_to_rgb(*hsvNew)
newRgb = [x*255 for x in newRgb]
try:
self.setColor(i, *newRgb)
color = newRgb
except ValueError:
pass
self.show()
time.sleep(0.00001)
time.sleep(waitMs*(0.0001*random.randint(0, 5)))
# start the thread
# FIXME: put this in main()
serialLogic = SerialLogic("/dev/ttyACM0", 57600)
diod0 = ExtendedDioder(serialLogic)
diod0.start()
def wrapMode(signalNum, stackframe):
diod0.setMode(signalNum)
for i in range(signal.SIGRTMIN, signal.SIGRTMAX+1):
signal.signal(i, wrapMode)
while True:
signal.pause()
|
kdeloach/gwlf-e | gwlfe/AnnualMeans.py | Python | apache-2.0 | 5,840 | 0.000342 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
"""
Imported from AnnualMeans.bas
"""
import logging
log = logging.getLogger(__name__)
def CalculateAnnualMeanLoads(z, Y):
# UPDATE SEPTIC SYSTEM AVERAGES
z.AvSeptNitr += z.SepticNitr[Y] / z.NYrs
z.AvSeptPhos += z.SepticPhos[Y] / z.NYrs
# Add the Stream Bank Erosion to sediment yield
for i in range(12):
z.SedYield[Y][i] += z.StreamBankEros[Y][i] / 1000
z.CalendarYr = z.WxYrBeg + (Y - 1)
# CALCULATE ANNUAL MEANS FOR STREAM BANK AND TILE DRAINAGE VALUES
for i in range(12):
z.AvStreamBankEros[i] += z.StreamBankEros[Y][i] / z.NYrs
z.AvStreamBankN[i] += z.StreamBankN[Y][i] / z.NYrs
z.AvStreamBankP[i] += z.StreamBankP[Y][i] / z.NYrs
# If the Monthly Erosion is < the Sediment Yield
# recalculate using Sediment Delivery Ratio
if z.SedDelivRatio > 0 and z.Erosion[Y][i] < z.SedYield[Y][i]:
z.Erosion[Y][i] = z.SedYield[Y][i] / z.SedDelivRatio
z.AvPtSrcFlow[i] += z.PtSrcFlow[Y][i] / z.NYrs
z.AvTileDrain[i] += z.TileDrain[Y][i] / z.NYrs
z.AvWithdrawal[i] += z.Withdrawal[Y][i] / z.NYrs
z.AvTileDrainN[i] += z.TileDrainN[Y][i] / z.NYrs
z.AvTileDrainP[i] += z.TileDrainP[Y][i] / z.NYrs
z.AvTileDrainSed[i] += z.TileDrainSed[Y][i] / z.NYrs
# Recalculate the total annual erosion
z.ErosSum = 0
for i in range(12):
z.ErosSum += z.Erosion[Y][i]
# COMPUTE ANNUAL MEANS
for i in range(12):
z.AvPrecipitation[i] += z.Precipitation[Y][i] / z.NYrs
z.AvEvapoTrans[i] += z.Evapotrans[Y][i] / z.NYrs
z.AvGroundWater[i] += z.GroundWatLE[Y][i] / z.NYrs
if z.AvGroundWater[i] < 0:
z.AvGroundWater[i] = 0
z.AvRunoff[i] += z.Runoff[Y][i] / z.NYrs
z.AvErosion[i] += z.Erosion[Y][i] / z.NYrs
z.AvSedYield[i] += z.SedYield[Y][i] / z.NYrs
z.AvDisNitr[i] += z.DisNitr[Y][i] / z.NYrs
z.AvTotNitr[i] += z.TotNitr[Y][i] / z.NYrs
z.AvDisPhos[i] += z.DisPhos[Y][i] / z.NYrs
z.AvTotPhos[i] += z.TotPhos[Y][i] / z.NYrs
z.AvGroundNitr[i] += z.GroundNitr[Y][i] / z.NYrs
z.AvGroundPhos[i] += z.GroundPhos[Y][i] / z.NYrs
z.AvAnimalN[i] += z.AnimalN[Y][i] / z.NYrs
z.AvAnimalP[i] += z.AnimalP[Y][i] / z.NYrs
z.AvGRLostBarnN[i] += z.GRLostBarnN[Y][i] / z.NYrs
z.AvGRLostBarnP[i] += z.GRLostBarnP[Y][i] / z.NYrs
z.AvGRLostBarnFC[i] += z.GRLostBarnFC[Y][i] / z.NYrs
z.AvNGLostBarnN[i] += z.NGLostBarnN[Y][i] / z.NYrs
z.AvNGLostBarnP[i] += z.NGLostBarnP[Y][i] / z.NYrs
z.AvNGLostBarnFC[i] += z.NGLostBarnFC[Y][i] / z.NYrs
z.AvNGLostManP[i] += z.NGLostManP[Y][i] / z.NYrs
# Average pathogen totals
z.AvAnimalFC[i] += z.AnimalFC[Y][i] / z.NYrs
z.AvWWOrgs[i] += z.WWOrgs[Y][i] / z.NYrs
z.AvSSOrgs[i] += z.SSOrgs[Y][i] / z.NYrs
z.AvUrbOrgs[i] += z.UrbOrgs[Y][i] / z.NYrs
z.AvWildOrgs[i] += z.WildOrgs[Y][i] / z.NYrs
z.AvTotalOrgs[i] += z.TotalOrgs[Y][i] / z.NYrs
# Average loads for each landuse
for l in range(z.NRur):
z.AvLuRunoff[l] += z.LuRunoff[Y][l] / z.NYrs
z.AvLuErosion[l] += z.LuErosion[Y][l] / z.NYrs
z.AvLuSedYield[l] += z.LuSedYield[Y][l] / z.NYrs
z.AvLuDisNitr[l] += z.LuDisNitr[Y][l] / z.NYrs
z.AvLuTotNitr[l] += z.LuTotNitr[Y][l] / z.NYrs
z.AvLuDisPhos[l] += z.LuDisPhos[Y][l] / z.NYrs
z.AvLuTotPhos[l] += z.LuTotPhos[Y][l] / z.NYrs
for l in range(z.NRur, z.NLU):
z.AvLuRunoff[l] += z.LuRunoff[Y][l] / z.NYrs
z.AvLuTotNitr[l] += z.LuTotNitr[Y][l] / z.NYrs
z.AvLuTotPhos[l] += z.LuTotPhos[Y][l] / z.NYrs
z.AvLuDisNitr[l] += z.LuDisNitr[Y][l] / z.NYrs
z.AvLuDisPhos[l] += z.LuDisPhos[Y][l] / z.NYrs
z.AvLuSedYield[l] += z.LuSedYield[Y][l] / z.NYrs
z.AvStreamBankErosSum = sum(z.AvStreamBankEros)
z.AvStreamBankNSum = sum(z.AvStreamBankN)
z.AvStreamBankPSum = sum(z.AvStreamBankP)
z.AvPtSrcFlowSum = sum(z.AvPtSrcFlow)
z.AvTileDrainSum = sum(z.AvTileDrain)
z.AvWithdrawalSum = sum(z.AvWithdrawal)
z.AvTileDrainNSum = sum(z.AvTileDrainN)
z.AvTileDrainPSu | m = sum(z.AvTileDrainP)
z.AvTileDrainSedSum = sum(z.AvTileDrainSed)
z.AvPrecipitationSum = sum(z.AvPrecipitation)
z.AvEvapoTransSum = sum(z | .AvEvapoTrans)
z.AvGroundWaterSum = sum(z.AvGroundWater)
z.AvRunoffSum = sum(z.AvRunoff)
z.AvErosionSum = sum(z.AvErosion)
z.AvSedYieldSum = sum(z.AvSedYield)
z.AvDisNitrSum = sum(z.AvDisNitr)
z.AvTotNitrSum = sum(z.AvTotNitr)
z.AvDisPhosSum = sum(z.AvDisPhos)
z.AvTotPhosSum = sum(z.AvTotPhos)
z.AvGroundNitrSum = sum(z.AvGroundNitr)
z.AvGroundPhosSum = sum(z.AvGroundPhos)
z.AvAnimalNSum = sum(z.AvAnimalN)
z.AvAnimalPSum = sum(z.AvAnimalP)
z.AvGRLostBarnNSum = sum(z.AvGRLostBarnN)
z.AvGRLostBarnPSum = sum(z.AvGRLostBarnP)
z.AvGRLostBarnFCSum = sum(z.AvGRLostBarnFC)
z.AvNGLostBarnNSum = sum(z.AvNGLostBarnN)
z.AvNGLostBarnPSum = sum(z.AvNGLostBarnP)
z.AvNGLostBarnFCSum = sum(z.AvNGLostBarnFC)
z.AvNGLostManPSum = sum(z.AvNGLostManP)
z.AvAnimalFCSum = sum(z.AvAnimalFC)
z.AvWWOrgsSum = sum(z.AvWWOrgs)
z.AvSSOrgsSum = sum(z.AvSSOrgs)
z.AvUrbOrgsSum = sum(z.AvUrbOrgs)
z.AvWildOrgsSum = sum(z.AvWildOrgs)
z.AvTotalOrgsSum = sum(z.AvTotalOrgs)
z.AvLuRunoffSum = sum(z.AvLuRunoff)
z.AvLuErosionSum = sum(z.AvLuErosion)
z.AvLuSedYieldSum = sum(z.AvLuSedYield)
z.AvLuDisNitrSum = sum(z.AvLuDisNitr)
z.AvLuTotNitrSum = sum(z.AvLuTotNitr)
z.AvLuDisPhosSum = sum(z.AvLuDisPhos)
z.AvLuTotPhosSum = sum(z.AvLuTotPhos)
|
madhat2r/plaid2text | src/python/plaid2text/online_accounts.py | Python | gpl-3.0 | 2,305 | 0.004338 | #! /usr/bin/env python3
from collections import OrderedDict
import datetime
import os
import sys
import textwrap
from plaid import Client
from plaid import errors as plaid_errors
import plaid2text.config_manager as cm
from plaid2text.interact import prompt, clear_screen, NullValidator
from plaid2text.interact import NumberValidator, NumLengthValidator, YesNoValidator, PATH_COMPLETER
class PlaidAccess():
def __init__(self, client_id=None, secret=None):
if client_id and secret:
self.client_id = client_id
self.secret = secret
else:
self.client_id, self.secret = cm.get_plaid_config()
self.client = Client(self.client_id, self.secret, "development", suppress_warnings=True)
def get_transactions(self,
access_token,
start_date,
end_date,
account_ids):
"""Get transaction for a given account for the given dates"""
ret = []
total_transactions = None
page = 0
account_array = []
account_array.append(account_ids)
while True:
page += 1
if total_transactions:
print("Fetching page %d, already fetched %d/%d transactions" % ( page, len(ret), total_transactions))
else:
print("Fetching page 1")
try:
response = self.client.Transactions.get(
access_token,
start_date.strftime("%Y-%m-%d"),
en | d_date.strftime("%Y-%m-%d"),
account_ids=account_array,
offset=len(ret))
except plaid_errors.ItemError as ex:
print("Unable to update plaid account [%s] due to: " % account_ids, file=sys.stderr)
print(" %s" % ex, file=sys.stderr )
sys.exit(1)
total_transactions = response['total_transactions']
| ret.extend(response['transactions'])
if len(ret) >= total_transactions: break
print("Downloaded %d transactions for %s - %s" % ( len(ret), start_date.strftime("%Y-%m-%d"), end_date.strftime("%Y-%m-%d")))
return ret
|
swift-lang/swift-e-lab | parsl/configs/comet_ipp_multinode.py | Python | apache-2.0 | 1,364 | 0.002933 | from parsl.channels import SSHChannel
from parsl.providers import SlurmProvider
from parsl.launchers import | SrunLauncher
from parsl.config import Config
from parsl.executors.ipp import IPyParallelExecutor
from parsl.executors.ipp_controller import Con | troller
# This is an example config, make sure to
# replace the specific values below with the literal values
# (e.g., 'USERNAME' -> 'your_username')
config = Config(
executors=[
IPyParallelExecutor(
label='comet_ipp_multinode',
provider=SlurmProvider(
'debug',
channel=SSHChannel(
hostname='comet.sdsc.xsede.org',
username='USERNAME', # Please replace USERNAME with your username
script_dir='/home/USERNAME/parsl_scripts', # Please replace USERNAME with your username
),
launcher=SrunLauncher(),
scheduler_options='', # Input your scheduler_options if needed
worker_init='', # Input your worker_init if needed
walltime="00:10:00",
init_blocks=1,
max_blocks=1,
nodes_per_block=2,
),
controller=Controller(public_ip='PUBLIC_IP'), # Please replace PUBLIC_IP with your public ip
)
],
)
|
WarrenWeckesser/scipy | scipy/stats/_discrete_distns.py | Python | bsd-3-clause | 50,628 | 0.000316 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from functools import partial
from scipy import special
from scipy.special import entr, logsumexp, betaln, gammaln as gamln, zeta
from scipy._lib._util import _lazywhere, rng_integers
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _ncx2_pdf, _ncx2_cdf, get_distribution_names,
_check_shape)
import scipy.stats._boost as _boost
from .biasedurn import (_PyFishersNCHypergeometric,
_PyWalleniusNCHypergeometric,
_PyStochasticLib3)
class binom_gen(rv_discrete):
r"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is:
.. math::
f(k) = \binom{n}{k} p^k (1-p)^{n-k}
for :math:`k \in \{0, 1, \dots, n\}`, :math:`0 \leq p \leq 1`
`binom` takes :math:`n` and :math:`p` as shape parameters,
where :math:`p` is the probability of a single success
and :math:`1-p` is the probability of a single failure.
%(after_notes)s
%(example)s
See Also
--------
hypergeom, nbinom, nhypergeom
"""
def _rvs(self, n, p, size=None, random_state=None):
return random_state.binomial(n, p, size)
def _argcheck(self, n, p):
return (n >= 0) & (p >= 0) & (p <= 1)
def _get_support(self, n, p):
return self.a, n
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
# binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
return _boost._binom_pdf(x, n, p)
def _cdf(self, x, n, p):
k = floor(x)
return _boost._binom_cdf(k, n, p)
def _sf(self, x, n, p):
k = floor(x)
return _boost._binom_sf(k, n, p)
def _isf(self, x, n, p):
return _boost._binom_isf(x, n, p)
def _ppf(self, q, n, p):
return _boost._binom_ppf(q, n, p)
def _stats(self, n, p, moments='mv'):
mu = _boost._binom_mean(n, p)
var = _ | boost._binom_variance(n, p)
g1, g2 = None, None
if 's' in moments:
| g1 = _boost._binom_skewness(n, p)
if 'k' in moments:
g2 = _boost._binom_kurtosis_excess(n, p)
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
r"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is:
.. math::
f(k) = \begin{cases}1-p &\text{if } k = 0\\
p &\text{if } k = 1\end{cases}
for :math:`k` in :math:`\{0, 1\}`, :math:`0 \leq p \leq 1`
`bernoulli` takes :math:`p` as shape parameter,
where :math:`p` is the probability of a single success
and :math:`1-p` is the probability of a single failure.
%(after_notes)s
%(example)s
"""
def _rvs(self, p, size=None, random_state=None):
return binom_gen._rvs(self, 1, p, size=size, random_state=random_state)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _get_support(self, p):
# Overrides binom_gen._get_support!x
return self.a, self.b
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
# bernoulli.pmf(k) = 1-p if k = 0
# = p if k = 1
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _isf(self, x, p):
return binom._isf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class betabinom_gen(rv_discrete):
r"""A beta-binomial discrete random variable.
%(before_notes)s
Notes
-----
The beta-binomial distribution is a binomial distribution with a
probability of success `p` that follows a beta distribution.
The probability mass function for `betabinom` is:
.. math::
f(k) = \binom{n}{k} \frac{B(k + a, n - k + b)}{B(a, b)}
for :math:`k \in \{0, 1, \dots, n\}`, :math:`n \geq 0`, :math:`a > 0`,
:math:`b > 0`, where :math:`B(a, b)` is the beta function.
`betabinom` takes :math:`n`, :math:`a`, and :math:`b` as shape parameters.
References
----------
.. [1] https://en.wikipedia.org/wiki/Beta-binomial_distribution
%(after_notes)s
.. versionadded:: 1.4.0
See Also
--------
beta, binom
%(example)s
"""
def _rvs(self, n, a, b, size=None, random_state=None):
p = random_state.beta(a, b, size)
return random_state.binomial(n, p, size)
def _get_support(self, n, a, b):
return 0, n
def _argcheck(self, n, a, b):
return (n >= 0) & (a > 0) & (b > 0)
def _logpmf(self, x, n, a, b):
k = floor(x)
combiln = -log(n + 1) - betaln(n - k + 1, k + 1)
return combiln + betaln(k + a, n - k + b) - betaln(a, b)
def _pmf(self, x, n, a, b):
return exp(self._logpmf(x, n, a, b))
def _stats(self, n, a, b, moments='mv'):
e_p = a / (a + b)
e_q = 1 - e_p
mu = n * e_p
var = n * (a + b + n) * e_p * e_q / (a + b + 1)
g1, g2 = None, None
if 's' in moments:
g1 = 1.0 / sqrt(var)
g1 *= (a + b + 2 * n) * (b - a)
g1 /= (a + b + 2) * (a + b)
if 'k' in moments:
g2 = a + b
g2 *= (a + b - 1 + 6 * n)
g2 += 3 * a * b * (n - 2)
g2 += 6 * n ** 2
g2 -= 3 * e_p * b * n * (6 - n)
g2 -= 18 * e_p * e_q * n ** 2
g2 *= (a + b) ** 2 * (1 + a + b)
g2 /= (n * a * b * (a + b + 2) * (a + b + 3) * (a + b + n))
g2 -= 3
return mu, var, g1, g2
betabinom = betabinom_gen(name='betabinom')
class nbinom_gen(rv_discrete):
r"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
Negative binomial distribution describes a sequence of i.i.d. Bernoulli
trials, repeated until a predefined, non-random number of successes occurs.
The probability mass function of the number of failures for `nbinom` is:
.. math::
f(k) = \binom{k+n-1}{n-1} p^n (1-p)^k
for :math:`k \ge 0`, :math:`0 < p \leq 1`
`nbinom` takes :math:`n` and :math:`p` as shape parameters where n is the
number of successes, :math:`p` is the probability of a single success,
and :math:`1-p` is the probability of a single failure.
Another common parameterization of the negative binomial distribution is
in terms of the mean number of failures :math:`\mu` to achieve :math:`n`
successes. The mean :math:`\mu` is related to the probability of success
as
.. math::
p = \frac{n}{n + \mu}
The number of successes :math:`n` may also be specified in terms of a
"dispersion", "heterogeneity", or "aggregation" parameter :math:`\alpha`,
which relates the mean :math:`\mu` to the variance :math:`\sigma^2`,
e.g. :math:`\sigma^2 = \mu + \alpha \mu^2`. Regardless of the convention
used for :math:`\alpha`,
.. math::
p &= \frac{\mu}{\sigma^2} \\
n &= \frac{\mu^2}{\sigma^2 - \mu}
%(after_notes)s
%(example)s
See Also
--------
hypergeom, binom, nhypergeom
"""
def _rvs(self, n, p, size=None, random_state=None):
return random_state.negative_binomial(n, p, size)
def _argcheck(self, n, p):
return (n > 0) & (p > 0) & (p <= 1)
def _pmf(self, x, n, p):
# nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
|
PyCQA/pylint | tests/functional/n/non/non_ascii_name.py | Python | gpl-2.0 | 133 | 0.00813 | """ Tests for non-ascii-name checker. """
áéíóú = 4444 # [non-ascii-name]
def úóíé | á(): # [non-a | scii-name]
"""yo"""
|
appuio/ansible-role-openshift-zabbix-monitoring | vendor/openshift-tools/ansible/roles/lib_openshift_3.2/build/src/oc_version.py | Python | apache-2.0 | 2,732 | 0.001464 | # pylint: skip-file
# pylint: disable=too-many-instance-attributes
class OCVersion(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
debug):
''' Constructor for OCVersion '''
super(OCVersion, self).__init__(None, config)
self.debug = debug
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
else:
return False
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 do | es not return | an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
def get(self):
'''get and return version information '''
results = {}
results["installed"] = OCVersion.openshift_installed()
if not results["installed"]:
return results
version_results = self.openshift_cmd(['version'], output=True, output_type='raw')
if version_results['returncode'] == 0:
filtered_vers = OCVersion.filter_versions(version_results['results'])
custom_vers = OCVersion.add_custom_versions(filtered_vers)
results['returncode'] = version_results['returncode']
results.update(filtered_vers)
results.update(custom_vers)
return results
raise OpenShiftCLIError('Problem detecting openshift version.')
|
aspose-words/Aspose.Words-for-Cloud | Examples/Python/Examples/ReadingAllHyperlinksFromDocument.py | Python | mit | 1,469 | 0.012253 | import asposewordscloud
from asposewordscloud.WordsApi import WordsApi
from asposewordscloud.WordsApi import ApiException
from asposewordscloud.models import ProtectionRequest
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, T | rue)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Words API SDK
api_client = asposewordscloud.ApiClient.ApiClient(apiKey, appSid, True)
wordsApi = WordsApi(api_client)
#set input file name
filename = "SampleWordDocument.docx"
#upload file to aspose cloud storage
storageApi.PutCreate(Path=filename, file=data_folder + filename)
try:
#invoke Aspose.Wor | ds Cloud SDK API to get all the hyperlinks in a word document
response = wordsApi.GetDocumentHyperlinks(name=filename)
if response.Status == "OK":
#display the hyperlinks info
for hyperlink in response.Hyperlinks.HyperlinkList:
print "Display Text: " + hyperlink.DisplayText + " Value: " + hyperlink.Value + " link: " + hyperlink.link.Href
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
|
nagyistoce/devide | modules/vtk_basic/vtkImageHSIToRGB.py | Python | bsd-3-clause | 489 | 0.002045 | # class generated by DeVIDE::createDeVIDEMod | uleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkImageHSIToRGB(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkImageHSIToRGB(), 'Processing.',
('vtkImageData',), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunction | s=None)
|
ruiting/opencog | opencog/python/pln_old/examples/tuffy/smokes/smokes_agent.py | Python | agpl-3.0 | 5,370 | 0.00149 | """
PLN representation of the "smokes" sample from Tuffy Markov Logic Networks
More details on this sample are available here:
https://github.com/opencog/opencog/tree/master/opencog/python/pln_old/examples/tuffy/smokes
https://github.com/cosmoharrigan/tuffy/tree/master/samples/smoke
http://hazy.cs.wisc.edu/hazy/tuffy/doc/tuffy-manual.pdf
Instructions:
Method 1 (preferred) -- Running the example with attention allocation using
a Python control interface
- Follow the instructions here:
https://github.com/opencog/external-tools/tree/master/attention
Method 2 -- Running the example in a standalone Python environment:
- Run smokes_example.py: python smokes_example.py
Method 3 -- Running the example within the cogserver:
- Add the module path to your PYTHON_EXTENSION_DIRS in opencog.conf:
../opencog/python/pln_old/examples/tuffy/smokes
- Run the cogserver
- Load these files into the cogserver:
python/pln_old/examples/tuffy/smokes/smokes.scm,
python/pln_old/examples/tuffy/smokes/extra-data.scm
- Run these commands in the cogserver:
loadpy smokes_agent
agents-start smokes_agent.InferenceAgent
- Use the Scheme shell to monitor the inference progress
"""
from opencog.cogserver import MindAgent
from opencog.atomspace import types
from pln.chainers import Chainer
from pln.rules import *
from opencog.scheme_wrapper import scheme_eval_h, __init__
__author__ = 'Cosmo Harrigan'
TARGET_STIMULUS = 20
class InferenceAgent(MindAgent):
def __init__(self):
self.chainer = None
self.query = None
print "Initializing InferenceAgent."
def create_chainer(self, atomspace, stimulate_atoms=True):
"""
Creates the chainer for the "smokes" example. Optionally, you can
define the target query before calling this method, by defining a
Scheme expression named "query". For example, you can issue the
Scheme expression "(define query hasCancer)" in reference to the
predicate defined in smokes.scm before loading this agent. Once
defined, the target query will receive stimulus at every time step.
Stimulus requires the agent to be running in a CogServer.
For a complete example that incorporates this behavior, see
example.py here:
https://github.com/opencog/external-tools/tree/master/attention
"""
self.chainer = Chainer(atomspace,
agent=self,
stimulateAt | oms=stimulate_atoms,
allow_output_with_variables=False,
preferAttentionalFocus=True,
| delete_temporary_variables=True)
# ModusPonens:
# Implication smokes(x) cancer(X)
# smokes(Anna)
# |= cancer(Anna)
self.chainer.add_rule(
ModusPonensRule(self.chainer, types.ImplicationLink))
# stimulateAtoms is only enabled when the agent is ran inside the
# CogServer, since the functionality requires a CogServer and
# attention allocation
if self.chainer._stimulateAtoms:
self.query = scheme_eval_h(atomspace, "query")
def run(self, atomspace):
if self.chainer is None:
self.create_chainer(atomspace)
print "PLN Chainer created."
return
print "PLN continuing."
if not check_result(atomspace):
result = self.chainer.forward_step()
if self.query is not None:
# Allow the stimulus amount to be set dynamically by setting
# a configuration atom in the atomspace.
stimulus_predicate = atomspace.add_node(types.PredicateNode,
'CONFIG-StimulusAmount')
# Only set TARGET_STIMULUS if this atom has been setup with
# an appropriate NumberNode with the value.
outgoing = stimulus_predicate.out
if len(outgoing) > 0:
list = outgoing[0].incoming # EvaluationLink
list = list[0].out # ListLink
list = list[1].out # NumberNode
value = list[0].name
TARGET_STIMULUS = int(value)
print "Target stimulus amount updated to {0}".\
format(TARGET_STIMULUS)
self.chainer._give_stimulus(atomspace[self.query],
TARGET_STIMULUS)
return result
def check_result(atomspace):
"""
Searches for 4 instances of an EvaluationLink where the first argument is:
PredicateNode "cancer"
and the target of the predicate is a ConceptNode (representing a person)
"""
eval_links = atomspace.get_atoms_by_type(types.EvaluationLink)
num_results = 0
for eval_link in eval_links:
out = [atom for atom in eval_link.out
if atom.is_a(types.PredicateNode) and atom.name == "cancer"]
if out:
list_link = eval_link.out[1]
argument = list_link.out[0]
if argument.is_a(types.ConceptNode):
num_results += 1
result_found = (num_results == 4)
print "Result found? {0}. {1} results satisfy the query.".\
format(result_found, num_results)
return result_found
|
andrei4ka/fuel-web-redhat | nailgun/nailgun/objects/serializers/release.py | Python | apache-2.0 | 1,707 | 0 | # -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by | applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun. | objects.serializers.base import BasicSerializer
class ReleaseSerializer(BasicSerializer):
fields = (
"id",
"name",
"version",
"can_update_from_versions",
"description",
"operating_system",
"modes_metadata",
"roles",
"roles_metadata",
"wizard_metadata",
"state",
"attributes_metadata"
)
@classmethod
def serialize(cls, instance, fields=None):
from nailgun.objects.release import Release
release_dict = \
super(ReleaseSerializer, cls).serialize(instance, fields)
release_dict["is_deployable"] = Release.is_deployable(instance)
# we always want to get orchestrator data even it's a default one
release_dict["orchestrator_data"] = \
Release.get_orchestrator_data_dict(instance)
return release_dict
class ReleaseOrchestratorDataSerializer(BasicSerializer):
fields = (
"repo_metadata",
"puppet_manifests_source",
"puppet_modules_source"
)
|
grezesf/Research | Reservoirs/Task5-Memory_Tuning/task5.py | Python | mit | 2,640 | 0.007576 | import mdp
import Oger
import numpy
import pylab
import random
### README
# study the memory capacities of same size reservoirs
def main():
num_waves = 100
waves = [gen_test_wave(2.0*random.random()-1.0) for x in range(num_waves)]
print "Shape of waves" , numpy.shape(waves[0])
### Create reservoir
# construct individual nodes,
reservoir_size = 100
reservoir = Oger.nodes.ReservoirNode(output_dim=reservoir_size, spectral_radius=1.001)
# readout = Oger.nodes.RidgeRegressionNode()
# build network with MDP framework
# flow = mdp.Flow([reservoir, readout], verbose=1)
flow = mdp.Flow([reservoir], verbose=1)
Oger.utils.make_inspectable(Oger.nodes.ReservoirNode)
# apply waves to flow
results = [flow(x) for x in waves]
print "Shape of res", numpy.shape(results[0])
print "Shape of reservoir.inspect()", numpy.shape(reservoir.inspect())
# print some results
# find first step with values all <0.01
# avg = 0
# for trace in reservoir.inspect():
# # print "memory:", measure_memory(trace)-20
# avg += measure_memory(trace)-20.0
# print "memory average of this reservoir:", avg/num_waves
# plotting parameter | s
nx = 5+1
ny = 1
# #plot the input
for wave in waves:
pylab.subplot(nx, ny, 1)
pylab.plot(wave)
# plot the activity | for first 5 inputs
for num in range(5):
pylab.subplot(nx, ny, num+2)
pylab.plot(reservoir.inspect()[num])
pylab.show()
# end of main
return None
def gen_test_wave(max_value = 1):
# generates a 1D test wave
# simple step function
# tunable parameters
# the max value attained by the wave
# how long the data stays at 0 at the start
startup_time = 10
# how long the data stays at 0 at the end
cooldown_time = 79
# how long the signal will be active
active_time = 11
wave = [numpy.array([0]) for x in range(startup_time)]
wave += [numpy.array([max_value]) for x in range(active_time)]
wave += [numpy.array([0]) for x in range(cooldown_time)]
return numpy.array(wave)
def measure_memory(signal):
# given a reservoir activity matrix for a given signal, find the first step where all activity<0.01
# for testing purpose only
# step count starts at 0
step = 15
for frame in signal[15:]:
if all(i<0.01 and i>-0.01 for i in frame):
# found the first frame where activity died off
return step
else:
step += 1
# fail case
return None
# Call to main
if __name__=='__main__':
main() |
luci/recipes-py | recipes/engine_tests/proto_properties.py | Python | apache-2.0 | 1,095 | 0.004566 | # Copyright 2019 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
from PB.recipes.recipe_engine.engine_tests import proto_properties
PYTHON_VERSION_COMPATIBILITY = 'PY2+3'
DEPS = [
'assertions',
'properties',
]
PROPERTIES = proto_properties.TestProperties
ENV_PROPERTIES = proto_properties.EnvProperties
def RunSteps(api, properties, env_props):
api.assertions.assertEqual(properties.an_int, 100)
api.assertions.assertEqual(properties.some_string, 'hey there')
api.assertions.assertEqual(env_props.STR_ENV, "sup")
api.assertions.assertEqual(env_props.INT_ENV, 9000)
def GenTests(api):
yield (
api.test('full')
+ api.properties(
proto_properties.TestProperties(
an_int= | 100,
some_string='hey there',
),
ignored_prop='yo')
+ api.properties.environ(
proto_properties.EnvProperties(
STR_ENV="sup", |
INT_ENV=9000,
))
+ api.post_process(lambda _check, _steps: {}))
|
mrunge/openstack_horizon | openstack_horizon/dashboards/project/data_processing/cluster_templates/forms.py | Python | apache-2.0 | 2,210 | 0 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE- | 2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.translation imp | ort ugettext_lazy as _
from horizon_lib import exceptions
from horizon_lib import forms
from openstack_horizon.api import sahara as saharaclient
from openstack_horizon.dashboards.project.data_processing. \
utils import workflow_helpers
LOG = logging.getLogger(__name__)
class UploadFileForm(forms.SelfHandlingForm,
workflow_helpers.PluginAndVersionMixin):
template_name = forms.CharField(max_length=80,
label=_("Cluster Template Name"))
def __init__(self, request, *args, **kwargs):
super(UploadFileForm, self).__init__(request, *args, **kwargs)
sahara = saharaclient.client(request)
self._generate_plugin_version_fields(sahara)
self.fields['template_file'] = forms.FileField(label=_("Template"))
def handle(self, request, data):
try:
# we can set a limit on file size, but should we?
filecontent = self.files['template_file'].read()
plugin_name = data['plugin_name']
hadoop_version = data.get(plugin_name + "_version")
saharaclient.plugin_convert_to_template(request,
plugin_name,
hadoop_version,
data['template_name'],
filecontent)
return True
except Exception:
exceptions.handle(request,
_("Unable to upload cluster template file"))
return False
|
tomjelinek/pcs | pcs/cli/file/metadata.py | Python | gpl-2.0 | 1,201 | 0 | import os.path
from pcs.common import file_type_codes as code
from pcs.common.file import FileMetadata
_metadata = {
code.BOOTH_CONFIG: lambda path: FileMetadata(
file_type_code=code.BOOTH_CONFIG,
path=path,
owner_user_name=None,
owner_group_name=None,
permissions=None,
is_binary=False,
),
code.BOOTH_KEY: lambda path: FileMetadata(
file_type_code=code.BOOTH_KEY,
path=path,
owner_user_name=None,
owner_group_name=None,
permissions=0o600,
is_binary=True,
| ),
code.COROSYNC_CONF: lambda path: FileMetadata(
file_type_code=code.COROSYNC_CONF,
path=path,
owner_user_name=None,
owner_group_name=None,
permissions=0o644,
is_binary=False,
),
code.PCS_KNOWN_HOSTS: lambda: FileMetadata(
file_type_code=code.PCS_KNOWN_HOST | S,
path=os.path.join(os.path.expanduser("~/.pcs"), "known-hosts"),
owner_user_name=None,
owner_group_name=None,
permissions=0o600,
is_binary=False,
),
}
def for_file_type(file_type_code, *args, **kwargs):
return _metadata[file_type_code](*args, **kwargs)
|
mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/stat/qos/qos_stats.py | Python | apache-2.0 | 18,855 | 0.052453 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class qos_stats(base_resource) :
def __init__(self) :
self._clearstats = ""
self._ipcmessagessent = 0
self._ipcmessagessentrate = 0
self._ipcmessagesfailed = 0
self._ipcmessagesfailedrate = 0
self._ipcmessagesreceived = 0
self._ipcmessagesreceivedrate = 0
self._ipcpe2qossent = 0
self._ipcpe2qossentrate = 0
self._ipcpe2qosfailed = 0
self._ipcpe2qosfailedrate = 0
self._ipcpe2qosreceived = 0
self._ipcpe2qosreceivedrate = 0
self._qosbytesdropped = 0
self._qosbytesdroppedrate = 0
self._qosbytessentnotclassified = 0
self._qosbytessentnotclassifiedrate = 0
self._qosbytesdroppednoconnection = 0
self._qosbytesdroppednoconnectionrate = 0
self._qosinputpackets = 0
self._qosinputpacketsrate = 0
self._qosoutputpackets = 0
self._qosoutputpacketsrate = 0
self._qosdroppackets = 0
self._qosdroppacketsrate = 0
self._qosrewritemacs = 0
self._qosrewritemacsrate = 0
self._qospacketsunclassified = 0
self._qospacketsunclassifiedrate = 0
self._qospacketsclassified = 0
self._qospacketsclassifiedrate = 0
self._qoslearnedmac = 0
self._qoslearnedmacrate = 0
self._qosinputbytes = 0
self._qosinputbytesrate = 0
self._qosoutputbytes = 0
self._qosoutputbytesrate = 0
self._qosfreeheldlist = 0
self._qoslink00sent = 0
self._qoslink00sentrate = 0
self._qoslink00drop = 0
self._qoslink00droprate = 0
self._qoslink01sent = 0
self._qoslink01sentrate = 0
self._qoslink01drop = 0
self._qoslink01droprate = 0
self._qoslink02sent = 0
self._qoslink02sentrate = 0
self._qoslink02drop = 0
self._qoslink02droprate = 0
self._qoslink03sent = 0
self._qoslink03sentrate = 0
self._qoslink03drop = 0
self._qoslink03droprate = 0
self._qoslink04sent = 0
self._qoslink04sentrate = 0
self._qoslink04drop = 0
self._qoslink04droprate = 0
self._qoslink05sent = 0
self._qoslink05sentrate = 0
self._qoslink05drop = 0
self._qoslink05droprate = 0
self._qoslink06sent = 0
self._qoslink06sentrate = 0
self._qoslink06drop = 0
self._qoslink06droprate = 0
self._qoslink07sent = 0
self._qoslink07sentrate = 0
self._qoslink07drop = 0
self._qoslink07droprate = 0
self._qoslink08sent = 0
self._qoslink08sentrate = 0
self._qoslink08drop = 0
self._qoslink08droprate = 0
self._qoslink09sent = 0
self._qoslink09sentrate = 0
self._qoslink09drop = 0
self._qoslink09droprate = 0
self._qoslink10sent = 0
self._qoslink10sentrate = 0
self._qoslink10drop = 0
self._qoslink10droprate = 0
@property
def clearstats(self) :
"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def qoslink06droprate(self) :
"""Rate (/s) counter for qoslink06drop.
"""
try :
return self._qoslink06droprate
except Exception as e:
raise e
@property
def ipcpe2qosfailed(self) :
"""IPC messages failed to send to qos system.
"""
try :
return self._ipcpe2qosfailed
except Exception as e:
raise e
@property
def qoslink02sent(self) :
"""QoS bytes sent on Link 02.
"""
try :
return self._qoslink02sent
except Exception as e:
raise e
@property
def qoslink10sent(self) :
"""QoS bytes sent on Link 10.
"""
try :
return self._qoslink10sent
except Exception as e:
raise e
@property
de | f ipcmessagessentrate(self) :
"""Rate (/s) counter for ipcmessagessent.
"""
try :
return self._ipcmessages | sentrate
except Exception as e:
raise e
@property
def qoslink02sentrate(self) :
"""Rate (/s) counter for qoslink02sent.
"""
try :
return self._qoslink02sentrate
except Exception as e:
raise e
@property
def qosrewritemacsrate(self) :
"""Rate (/s) counter for qosrewritemacs.
"""
try :
return self._qosrewritemacsrate
except Exception as e:
raise e
@property
def qoslink08sentrate(self) :
"""Rate (/s) counter for qoslink08sent.
"""
try :
return self._qoslink08sentrate
except Exception as e:
raise e
@property
def qoslink04drop(self) :
"""QoS bytes dropped on Link 04.
"""
try :
return self._qoslink04drop
except Exception as e:
raise e
@property
def qoslink01droprate(self) :
"""Rate (/s) counter for qoslink01drop.
"""
try :
return self._qoslink01droprate
except Exception as e:
raise e
@property
def qosoutputbytesrate(self) :
"""Rate (/s) counter for qosoutputbytes.
"""
try :
return self._qosoutputbytesrate
except Exception as e:
raise e
@property
def qoslink08drop(self) :
"""QoS bytes dropped on Link 08.
"""
try :
return self._qoslink08drop
except Exception as e:
raise e
@property
def qoslink05sent(self) :
"""QoS bytes sent on Link 05.
"""
try :
return self._qoslink05sent
except Exception as e:
raise e
@property
def qoslink10droprate(self) :
"""Rate (/s) counter for qoslink10drop.
"""
try :
return self._qoslink10droprate
except Exception as e:
raise e
@property
def qoslink00drop(self) :
"""QoS bytes dropped on Link 00.
"""
try :
return self._qoslink00drop
except Exception as e:
raise e
@property
def ipcmessagesfailedrate(self) :
"""Rate (/s) counter for ipcmessagesfailed.
"""
try :
return self._ipcmessagesfailedrate
except Exception as e:
raise e
@property
def qoslink05droprate(self) :
"""Rate (/s) counter for qoslink05drop.
"""
try :
return self._qoslink05droprate
except Exception as e:
raise e
@property
def qoslink08droprate(self) :
"""Rate (/s) counter for qoslink08drop.
"""
try :
return self._qoslink08droprate
except Exception as e:
raise e
@property
def qosbytessentnotclassifiedrate(self) :
"""Rate (/s) counter for qosbytessentnotclassified.
"""
try :
return self._qosbytessentnotclassifiedrate
except Exception as e:
raise e
@property
def qoslink03droprate(self) :
"""Rate (/s) counter for qoslink03drop.
"""
try :
return self._qoslink03droprate
except Exception as e:
raise e
@property
def qoslink09sent(self) :
"""QoS bytes sent on Link 09.
"""
try :
return self._qoslink09sent
except Exception as e:
raise e
@property
def qospacketsunclassifiedrate(self) :
"""Rate (/s) counter for qospacketsunclassified.
"""
try :
return self._qospacketsunclassifiedrate
except Exception as e:
raise e
@property
def qosfreeheldlist(self) :
"""No. more packets QoS can hold onto.
"""
try :
return self._qosfreeheldlist
except Exception as e:
raise e
@property
def qospacketsclassified(self) :
"""Number of packets with classification.
"""
try :
return self._qospacketsclassified
except Exception as e:
raise e
@property
def qosbytesdroppednoconnectionrate(self) :
"""Rate (/s) counter for qosbytesdroppednoconnection.
"""
try :
return self._qosbytesdroppednoconnectionrate
except Exception as e:
raise e
@property
def qoslink01sentrate(self) :
"""Rate (/s) counter fo |
swindonmakers/axCutHost | backend/filereaders/svg_tag_reader.py | Python | gpl-3.0 | 9,861 | 0.003144 |
__author__ = 'Stefan Hechenberger <stefan@nortd.com>'
import re
import math
import logging
from .utilities import matrixMult, parseFloats
from .svg_attribute_reader import SVGAttributeReader
from .svg_path_reader import SVGPathReader
log = logging.getLogger("svg_reader")
class SVGTagReader:
def __init__(self, svgreader):
# init helper for attribute reading
self._attribReader = SVGAttributeReader(svgreader)
# init helper for path handling
self._pathReader = SVGPathReader(svgreader)
self._handlers = {
'g': self.g,
'path': self.path,
'polygon': self.polygon,
'polyline': self.polyline,
'rect': self.rect,
'line': self.line,
'circle': self.circle,
'ellipse': self.ellipse,
'image': self.image,
'defs': self.defs,
'style': self.style,
'text': True # text is special, see read_tag func
}
self.re_findall_lasertags = re.compile('=pass([0-9]+):([0-9]*)(mm\/min)?:([0-9]*)(%)?(:#[a-fA-F0-9]{6})?(:#[a-fA-F0-9]{6})?(:#[a-fA-F0-9]{6})?(:#[a-fA-F0-9]{6})?(:#[a-fA-F0-9]{6})?(:#[a-fA-F0-9]{6})?=').findall
def read_tag(self, tag, node):
"""Read a tag.
Any tag name that is in self._handlers will be handled.
Similarly any attribute name in self._attribReader._handlers
will be parsed. Both tag and attribute results are stored in
node.
Any path data is ultimately handled by
self._pathReader.add_path(...). For any geometry that is not
already in the 'd' attribute of a 'path' tag this class
converts it first to this format and then delegates it to
add_path(...).
"""
tagName = self._get_tag(tag)
if tagName in self._handlers:
# log.debug("reading tag: " + tagName)
# parse own attributes and overwrite in node
for attr,value in tag.attrib.items():
# log.debug("considering attrib: " + attr)
self._attribReader.read_attrib(node, attr, value)
# accumulate transformations
node['xformToWorld'] = matrixMult(node['xformToWorld'], node['xform'])
# read tag
if (tagName != 'text'):
self._handlers[tagName](node)
else:
self.find_cut_settings_tags(tag, node)
def has_handler(self, tag):
tagName = self._get_tag(tag)
return bool(tagName in self._handlers)
def g(self, node):
# http://www.w3.org/TR/SVG11/struct.html#Groups
# has transform and style attributes
pass
def path(self, node):
# http://www.w3.org/TR/SVG11/paths.html
# has transform and style attributes
if self._has_valid_stroke(node):
d = node.get("d")
self._pathReader.add_path(d, node)
def polygon(self, node):
# http://www.w3.org/TR/SVG11/shapes.html#PolygonElement
# has transform and style attributes
if self._has_valid_stroke(node):
d = ['M'] + node['points'] + ['z']
node['points'] = None
self._pathReader.add_path(d, node)
def polyline(self, node):
# http://www.w3.org/TR/SVG11/shapes.html#PolylineElement
# has transform and style attributes
if self._has_valid_stroke(node):
d = ['M'] + node['points']
node['points'] = None
self._pathReader.add_path(d, node)
def rect(self, node):
# http://www.w3.org/TR/SVG11/shapes.html#RectElement
# has tran | sform and style attributes
if self._has_valid_stroke(node):
w = node.get('width') or 0.0
h = | node.get('height') or 0.0
x = node.get('x') or 0.0
y = node.get('y') or 0.0
rx = node.get('rx')
ry = node.get('ry')
if rx is None and ry is None: # no rounded corners
d = ['M', x, y, 'h', w, 'v', h, 'h', -w, 'z']
self._pathReader.add_path(d, node)
else: # rounded corners
if rx is None:
rx = ry
elif ry is None:
ry = rx
if rx > w/2.0:
rx = w/2.0
if ry > h/2.0:
rx = h/2.0
if rx < 0.0: rx *=-1
if ry < 0.0: ry *=-1
d = ['M', x+rx , y ,
'h', w-2*rx,
'c', rx, 0.0, rx, ry, rx, ry,
'v', h-2*ry,
'c', 0.0, ry, -rx, ry, -rx, ry,
'h', -w+2*rx,
'c', -rx, 0.0, -rx, -ry, -rx, -ry,
'v', -h+2*ry,
'c', 0.0, 0.0, 0.0, -ry, rx, -ry,
'z']
self._pathReader.add_path(d, node)
def line(self, node):
# http://www.w3.org/TR/SVG11/shapes.html#LineElement
# has transform and style attributes
if self._has_valid_stroke(node):
x1 = node.get('x1') or 0.0
y1 = node.get('y1') or 0.0
x2 = node.get('x2') or 0.0
y2 = node.get('y2') or 0.0
d = ['M', x1, y1, 'L', x2, y2]
self._pathReader.add_path(d, node)
def circle(self, node):
# http://www.w3.org/TR/SVG11/shapes.html#CircleElement
# has transform and style attributes
if self._has_valid_stroke(node):
r = node.get('r')
cx = node.get('cx') or 0.0
cy = node.get('cy') or 0.0
if r > 0.0:
d = ['M', cx-r, cy,
'A', r, r, 0.0, 0.0, 0.0, cx, cy+r,
'A', r, r, 0.0, 0.0, 0.0, cx+r, cy,
'A', r, r, 0.0, 0.0, 0.0, cx, cy-r,
'A', r, r, 0.0, 0.0, 0.0, cx-r, cy,
'Z']
self._pathReader.add_path(d, node)
def ellipse(self, node):
# has transform and style attributes
if self._has_valid_stroke(node):
rx = node.get('rx')
ry = node.get('ry')
cx = node.get('cx') or 0.0
cy = node.get('cy') or 0.0
if rx > 0.0 and ry > 0.0:
d = ['M', cx-rx, cy,
'A', rx, ry, 0.0, 0.0, 0.0, cx, cy+ry,
'A', rx, ry, 0.0, 0.0, 0.0, cx+rx, cy,
'A', rx, ry, 0.0, 0.0, 0.0, cx, cy-ry,
'A', rx, ry, 0.0, 0.0, 0.0, cx-rx, cy,
'Z']
self._pathReader.add_path(d, node)
def image(self, node):
# not supported
# has transform and style attributes
log.warn("'image' tag is not supported, ignored")
def defs(self, node):
# not supported
# http://www.w3.org/TR/SVG11/struct.html#Head
# has transform and style attributes
log.warn("'defs' tag is not supported, ignored")
def style(self, node):
# not supported: embedded style sheets
# http://www.w3.org/TR/SVG11/styling.html#StyleElement
# instead presentation attributes and the 'style' attribute
log.warn("'style' tag is not supported, use presentation \
attributes or the style attribute instead")
def find_cut_settings_tags(self, tag, node):
# Parse special text used for setting lasersaur cut
# parameters from within the SVG file.
# Any text in the SVG file within a 'text' tag (and one level deep)
# with the following format gets read.
# =pass1:550mm/min:90%:#ff0000=
# =pass2:550:90:#00ff00:#ffff00:#000000=
# =pass3:1200mm/min:80%:#00000=
# =pass4:1200mm/min:80%=
# =pass5:4000mm/min:100%=
# =pass6:4000:100=
text_accum = [tag.text or '']
# # search one level deep
for child in tag:
text_accum.append(child.text or '')
text_accum = ' '.join(text_accum)
matches = self.re_findall_lasertags(text_accum)
# Something like: =pass12:2550:100 |
mxmzdlv/pybigquery | tests/unit/test_select.py | Python | mit | 15,799 | 0.002469 | # Copyright (c) 2021 The sqlalchemy-bigquery Authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CL | AIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import datetime
from decimal import Decimal
import packaging.version
import pytest |
import sqlalchemy
import sqlalchemy_bigquery
from conftest import (
setup_table,
sqlalchemy_version,
sqlalchemy_1_3_or_higher,
sqlalchemy_1_4_or_higher,
sqlalchemy_before_1_4,
)
def test_labels_not_forced(faux_conn):
table = setup_table(faux_conn, "t", sqlalchemy.Column("id", sqlalchemy.Integer))
result = faux_conn.execute(sqlalchemy.select([table.c.id]))
assert result.keys() == ["id"] # Look! Just the column name!
def dtrepr(v):
return f"{v.__class__.__name__.upper()} {repr(str(v))}"
@pytest.mark.parametrize(
"type_,val,btype,vrep",
[
(sqlalchemy.String, "myString", "STRING", repr),
(sqlalchemy.Text, "myText", "STRING", repr),
(sqlalchemy.Unicode, "myUnicode", "STRING", repr),
(sqlalchemy.UnicodeText, "myUnicodeText", "STRING", repr),
(sqlalchemy.Integer, 424242, "INT64", repr),
(sqlalchemy.SmallInteger, 42, "INT64", repr),
(sqlalchemy.BigInteger, 1 << 60, "INT64", repr),
(sqlalchemy.Numeric, Decimal(42), "NUMERIC", str),
(sqlalchemy.Float, 4.2, "FLOAT64", repr),
(
sqlalchemy.DateTime,
datetime.datetime(2021, 2, 3, 4, 5, 6, 123456),
"DATETIME",
dtrepr,
),
(sqlalchemy.Date, datetime.date(2021, 2, 3), "DATE", dtrepr),
(sqlalchemy.Time, datetime.time(4, 5, 6, 123456), "TIME", dtrepr),
(sqlalchemy.Boolean, True, "BOOL", "true"),
(sqlalchemy.REAL, 1.42, "FLOAT64", repr),
(sqlalchemy.FLOAT, 0.42, "FLOAT64", repr),
(sqlalchemy.NUMERIC, Decimal(4.25), "NUMERIC", str),
(sqlalchemy.NUMERIC(39), Decimal(4.25), "BIGNUMERIC(39)", str),
(sqlalchemy.NUMERIC(30, 10), Decimal(4.25), "BIGNUMERIC(30, 10)", str),
(sqlalchemy.NUMERIC(39, 10), Decimal(4.25), "BIGNUMERIC(39, 10)", str),
(sqlalchemy.DECIMAL, Decimal(0.25), "NUMERIC", str),
(sqlalchemy.DECIMAL(39), Decimal(4.25), "BIGNUMERIC(39)", str),
(sqlalchemy.DECIMAL(30, 10), Decimal(4.25), "BIGNUMERIC(30, 10)", str),
(sqlalchemy.DECIMAL(39, 10), Decimal(4.25), "BIGNUMERIC(39, 10)", str),
(sqlalchemy.INTEGER, 434343, "INT64", repr),
(sqlalchemy.INT, 444444, "INT64", repr),
(sqlalchemy.SMALLINT, 43, "INT64", repr),
(sqlalchemy.BIGINT, 1 << 61, "INT64", repr),
(
sqlalchemy.TIMESTAMP,
datetime.datetime(2021, 2, 3, 4, 5, 7, 123456),
"TIMESTAMP",
lambda v: f"TIMESTAMP {repr(str(v))}",
),
(
sqlalchemy.DATETIME,
datetime.datetime(2021, 2, 3, 4, 5, 8, 123456),
"DATETIME",
dtrepr,
),
(sqlalchemy.DATE, datetime.date(2021, 2, 4), "DATE", dtrepr),
(sqlalchemy.TIME, datetime.time(4, 5, 7, 123456), "TIME", dtrepr),
(sqlalchemy.TIME, datetime.time(4, 5, 7), "TIME", dtrepr),
(sqlalchemy.TEXT, "myTEXT", "STRING", repr),
(sqlalchemy.VARCHAR, "myVARCHAR", "STRING", repr),
(sqlalchemy.NVARCHAR, "myNVARCHAR", "STRING", repr),
(sqlalchemy.VARCHAR(42), "myVARCHAR", "STRING(42)", repr),
(sqlalchemy.NVARCHAR(42), "myNVARCHAR", "STRING(42)", repr),
(sqlalchemy.CHAR, "myCHAR", "STRING", repr),
(sqlalchemy.NCHAR, "myNCHAR", "STRING", repr),
(sqlalchemy.BINARY, b"myBINARY", "BYTES", repr),
(sqlalchemy.VARBINARY, b"myVARBINARY", "BYTES", repr),
(sqlalchemy.VARBINARY(42), b"myVARBINARY", "BYTES(42)", repr),
(sqlalchemy.BOOLEAN, False, "BOOL", "false"),
(sqlalchemy.ARRAY(sqlalchemy.Integer), [1, 2, 3], "ARRAY<INT64>", repr),
(
sqlalchemy.ARRAY(sqlalchemy.DATETIME),
[
datetime.datetime(2021, 2, 3, 4, 5, 6),
datetime.datetime(2021, 2, 3, 4, 5, 7, 123456),
datetime.datetime(2021, 2, 3, 4, 5, 8, 123456),
],
"ARRAY<DATETIME>",
lambda a: "[" + ", ".join(dtrepr(v) for v in a) + "]",
),
],
)
def test_typed_parameters(faux_conn, type_, val, btype, vrep):
col_name = "foo"
table = setup_table(faux_conn, "t", sqlalchemy.Column(col_name, type_))
assert faux_conn.test_data["execute"].pop()[0].strip() == (
f"CREATE TABLE `t` (\n" f"\t`{col_name}` {btype}\n" f")"
)
faux_conn.execute(table.insert().values(**{col_name: val}))
ptype = btype[: btype.index("(")] if "(" in btype else btype
assert faux_conn.test_data["execute"][-1] == (
f"INSERT INTO `t` (`{col_name}`) VALUES (%({col_name}:{ptype})s)",
{col_name: val},
)
faux_conn.execute(
table.insert()
.values(**{col_name: sqlalchemy.literal(val, type_)})
.compile(
dialect=sqlalchemy_bigquery.BigQueryDialect(),
compile_kwargs=dict(literal_binds=True),
)
)
if not isinstance(vrep, str):
vrep = vrep(val)
assert faux_conn.test_data["execute"][-1] == (
f"INSERT INTO `t` (`{col_name}`) VALUES ({vrep})",
{},
)
assert list(map(list, faux_conn.execute(sqlalchemy.select([table])))) == [[val]] * 2
assert faux_conn.test_data["execute"][-1][0] == "SELECT `t`.`foo` \nFROM `t`"
assert (
list(
map(
list,
faux_conn.execute(sqlalchemy.select([table.c.foo], use_labels=True)),
)
)
== [[val]] * 2
)
assert faux_conn.test_data["execute"][-1][0] == (
"SELECT `t`.`foo` AS `t_foo` \nFROM `t`"
)
def test_select_struct(faux_conn, metadata):
from sqlalchemy_bigquery import STRUCT
table = sqlalchemy.Table(
"t", metadata, sqlalchemy.Column("x", STRUCT(y=sqlalchemy.Integer)),
)
faux_conn.ex("create table t (x RECORD)")
faux_conn.ex("""insert into t values ('{"y": 1}')""")
row = list(faux_conn.execute(sqlalchemy.select([table])))[0]
# We expect the raw string, because sqlite3, unlike BigQuery
# doesn't deserialize for us.
assert row.x == '{"y": 1}'
def test_select_label_starts_w_digit(faux_conn):
# Make sure label names are legal identifiers
faux_conn.execute(sqlalchemy.select([sqlalchemy.literal(1).label("2foo")]))
assert (
faux_conn.test_data["execute"][-1][0] == "SELECT %(param_1:INT64)s AS `_2foo`"
)
def test_force_quote(faux_conn):
from sqlalchemy.sql.elements import quoted_name
table = setup_table(
faux_conn, "t", sqlalchemy.Column(quoted_name("foo", True), sqlalchemy.Integer),
)
faux_conn.execute(sqlalchemy.select([table]))
assert faux_conn.test_data["execute"][-1][0] == ("SELECT `t`.`foo` \nFROM `t`")
def test_disable_quote(faux_conn):
from sqlalchemy.sql.elements import quoted_name
table = setup_table(
faux_conn,
"t",
sqlalchemy.Column(quoted_name("foo", False), sqlalchemy.Integer),
)
faux_conn.execute(sqlalchemy.select([table]))
assert fa |
elainenaomi/sciwonc-dataflow-examples | dissertation2017/Experiment 2/instances/7_2_wikiflow_1sh_1s_noannot_wmj/init_0/DataStoreInit.py | Python | gpl-3.0 | 1,335 | 0.006742 | #!/usr/bin/env python
from pymongo import MongoClient
import pymongo
HOST = "wfSciwoncWiki:enw1989@172.31.2.76:27001/?authSource=admin"
c = MongoClient('mongodb://'+HOST)
dbname = "wiki"
sessions = "sessions"
contributors = "contributors"
user_sessions = "user_sessions"
top_sessio | ns = "top_sessions"
c[dbname].drop_collection(contributors)
c[dbname].create_collection(contributors)
c[dbname].drop_collection(user_sessions)
c[dbname].create_collection(user_sessions)
c[dbname].drop_collection(top_sessions)
c[dbname]. | create_collection(top_sessions)
db = c[dbname]
sessions_col = db[sessions]
contributors_col = db[contributors]
user_sessions_col = db[user_sessions]
top_sessions_col = db[top_sessions]
sessions_col.create_index([("contributor_username", pymongo.ASCENDING)])
sessions_col.create_index([("timestamp", pymongo.ASCENDING)])
user_sessions_col.create_index([("timestamp", pymongo.ASCENDING)])
#sessions_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
contributors_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
user_sessions_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
top_sessions_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
|
dakrauth/picker | picker/urls/picks.py | Python | mit | 1,495 | 0.00602 | from django.urls import include, re_path
from .. import views
management_urls = [
re_path(r'^$', views.ManagementHome.as_view(), name='picker-manage'),
re_path(r'^game/(\d+)/$', views.ManageGame.as_view(), name='picker-manage-game'),
re_path(r'^(?P<season>\d{4})/', include( | [
re_path(r'^$', views.ManageSeason.as_view(), name='picker-manage-season'),
re_path(r'^(-?\d+)/$', views.ManageWeek.as_view(), name='picker-manage-week'),
])),
]
picks_urls = [
re_path(r'^$', views | .Picks.as_view(), name='picker-picks'),
re_path(r'^(?P<season>\d{4})/', include([
re_path(r'^$', views.PicksBySeason.as_view(), name='picker-season-picks'),
re_path(r'^(-?\d+)/$', views.PicksByGameset.as_view(), name='picker-picks-sequence'),
])),
]
results_urls = [
re_path(r'^$', views.Results.as_view(), name='picker-results'),
re_path(r'^(?P<season>\d{4})/', include([
re_path(r'^$', views.ResultsBySeason.as_view(), name='picker-season-results'),
re_path(r'^(-?\d+)/$', views.ResultsByWeek.as_view(), name='picker-game-sequence'),
])),
]
roster_urls = [
re_path(r'^$', views.RosterRedirect.as_view(), name='picker-roster-base'),
re_path(r'^(\d+)/', include([
re_path(r'^$', views.Roster.as_view(), name='picker-roster'),
re_path(r'^(\d{4})/$', views.Roster.as_view(), name='picker-season-roster'),
re_path(r'^p/(\w+)/$', views.RosterProfile.as_view(), name='picker-roster-profile'),
])),
]
|
gonicus/gosa | backend/src/tests/backend/components/test_jsonrpc_objects.py | Python | lgpl-2.1 | 11,005 | 0.003635 | # This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
from unittest import mock, TestCase
import datetime
from gosa.backend.components.jsonrpc_objects import JSONRPCObjectMapper, ObjectRegistry
from gosa.common.components import PluginRegistry
from tests.GosaTestCase import *
@slow
class JSONRPCObjectMapperTestCase(TestCase):
refs = []
def setUp(self):
super(JSONRPCObjectMapperTestCase, self).setUp()
self.mapper = JSONRPCObjectMapper()
self.mocked_resolver = mock.MagicMock()
self.mocked_resolver.return_value.check.return_value = True
self.mocked_resolver.return_value.isAdmin.return_value = False
self.patcher = mock.patch.dict(PluginRegistry.modules, {'ACLResolver': self.mocked_resolver})
self.patcher.start()
self.refs = []
def tearDown(self):
for ref in self.refs:
try:
self.mapper.closeObject('admin', ref)
except ValueError:
pass
self.patcher.stop()
super(JSONRPCObjectMapperTestCase, self).tearDown()
def test_listObjectOIDs(self):
res = self.mapper.listObjectOIDs()
assert 'object' in res
assert 'workflow' in res
assert len(res) == 2
def openObject(self, *args, **kwargs):
res = self.mapper.openObject(*args, **kwargs)
self.refs.append(res['__jsonclass__'][1][1])
return res
def reloadObject(self, *args, **kwargs):
res = self.mapper.reloadObject(*args, **kwargs)
self.refs.remove(args[1])
self.refs.append(res['__jsonclass__'][1][1])
return res
def closeObject(self, user, ref):
res = self.mapper.closeObject(user, ref)
self.refs.remove(ref)
return res
def test_openObject(self):
res = self.openObject('admin', None, 'object', 'dc=example,dc=net')
assert res['dc'] == "example"
with pytest.raises(Exception):
self.openObject('admin', None, 'object', 'dc=example,dc=net')
def test_closeObject(self):
res = self.openObject('admin', None, 'object', 'dc=example,dc=net')
with pytest.raises(ValueError):
self.closeObject('admin', 'unknown')
with pytest.raises(ValueError):
self.closeObject('someone else', res["__jsonclass__"][1][1])
self.closeObject('admin', res["__jsonclass__"][1][1])
# as a workaround for checking if its not loaded anymore we try to reload it
with pytest.raises(ValueError):
self.reloadObject('admin', res["__jsonclass__"][1][1])
def test_continueObjectEditing(self):
res = self.openObject('admin', 'session-uuid', 'object', 'dc=example,dc=net')
with pytest.raises(ValueError):
self.mapper.continueObjectEditing('admin', 'unknown_ref')
with pytest.raises(ValueError):
self.mapper.continueObjectEditing('other_user', res["__jsonclass__"][1][1])
ref = self.mapper._JSONRPCObjectMapper__get_ref(res["__jsonclass__"][1][1])
before = ref['last_interaction'] if 'last_interaction' in ref else ref['created']
self.mapper.continueObjectEditing('admin', res["__jsonclass__"][1][1])
assert before != ref['last_interaction']
ref['mark_for_deletion'] = datetime.datetime.now() + datetime.timedelta(seconds=29)
self.mapper.continueObjectEditing('admin', res["__jsonclass__"][1][1])
assert 'mark_for_deletion' not in ref
def test_checkObjectRef(self):
res = self.openObject('admin', 'session-uuid', 'object', 'dc=example,dc=net')
ref = self.mapper._JSONRPCObjectMapper__get_ref(res["__jsonclass__"][1][1])
assert self.mapper.checkObjectRef('admin', 'new-session-uuid', res["__jsonclass__"][1][1]) is True
assert ref['session_id'] == "new-session-uuid"
self.closeObject('admin', res["__jsonclass__"][1][1])
assert self.mapper.checkObjectRef('admin', 'new-session-uuid', res["__jsonclass__"][1][1]) is False
def test_getObjectProperty(self):
res = self.openObject('admin', None, 'object', 'dc=example,dc=net')
ref = res["__jsonclass__"][1][1]
with pytest.raises(ValueError):
self.mapper.getObjectProperty('admin', 'unknown', 'prop')
with pytest.raises(ValueError):
self.mapper.getObjectProperty('admin', ref, 'prop')
with pytest.raises(ValueError):
self.mapper.getObjectProperty('someone else', ref, 'description')
assert self.mapper.getObjectProperty('admin', ref, 'description') == "Example"
def test_setObjectProperty(self):
res = self.openObject('admin', "session-uuid", 'object', 'cn=Frank Reich,ou=people,dc=example,dc=net')
ref = res["__jsonclass__"][1][1]
with pytest.raises(ValueError):
self.mapper.setObjectProperty('admin', 'unknown', 'prop', 'val')
with pytest.raises(ValueError):
self.mapper.setObjectProperty('admin', ref, 'prop', 'val')
with pytest.raises(ValueError):
self.mapper.setObjectProperty('someone else', ref, 'description', 'val')
objdesc = self.mapper._JSONRPCObjectMapper__get_ref(res["__jsonclass__"][1][1])
objdesc['mark_for_deletion'] = datetime.datetime.now() + datetime.timedelta(seconds=29)
self.mapper.setObjectProperty('admin', ref, 'uid', 'val')
assert self.mapper.getObjectProperty('admin', ref, 'uid') == "val"
assert 'mark_for_deletion' not in objdesc
# undo
self.mapper.setObjectProperty('admin', ref, 'uid', 'admin')
assert self.mapper.getObjectProperty('admin', ref, 'uid') == "admin"
def test_reloadObjectProperty(self):
res = self.openObject('admin', None, 'object', 'dc=example,dc=net')
uuid = res['uuid']
ref = res["__jsonclass__"][1][1]
with pytest.raises(ValueError):
self.reloadObject('someone else', ref)
res = self.reloadObject('admin', ref)
assert uuid == res['uuid']
assert ref != res["__jsonclass__"][1][1]
def test_dispatchObjectMethod(self):
res = self.openObject('admin', None, 'object', 'cn=Frank Reich,ou=people,dc=example,dc=net')
ref = res["__jsonclass__"][1][1]
with pytest.raises(ValueError):
self.mapper.dispatchObjectMethod('admin', None, 'wrong_ref', 'lock')
with pytest.raises(ValueError):
self.mapper.dispatchObjectMethod('admin', None, ref, 'wrong_method')
with pytest.raises(ValueError):
self.mapper.dispatchObjectMethod('someone_else', None, ref, 'lock')
# mock a method in the object
with mock.patch('gosa.backend.plugins.password.manager.ObjectProxy') as m:
user = m.return_value
user.passwordMethod = "MD5"
self.mapper.dispatchObjectMethod('admin', None, ref, 'changePassword', 'Test')
assert user.userPassword
asse | rt user.commit.called
def test_diffObject(self):
assert self.mapper.diffObject('admin', 'unkown_ref') is None
res = self.openObject('admin', None, 'object', 'cn=Frank Reich,ou=people,dc=example,dc=net')
ref = res["__jsonclass__"][1][1]
with pytest.raises(ValueError):
self.mapper.diffObject('someone_el | se', ref)
self.mapper.setObjectProperty('admin', ref, 'uid', 'val')
delta = self.mapper.diffObject('admin', ref)
assert 'uid' in delta['attributes']['changed']
def test_removeObject(self):
res = self.openObject('admin', None, 'object', 'cn=Frank Reich,ou=people,dc=example,dc=net')
ref = res["__jsonclass__"][1][1]
with pytest.raises(Exception):
self.mapper.removeObject('admin','object', 'cn=Frank Reich,ou=people,dc=example,dc=net')
self.closeObject('admin', ref)
with mock.patch.dict(ObjectRegistry.objects['object'], {'object': mock.MagicMock()}):
mockedObject = ObjectRegistry |
kslundberg/pants | contrib/buildgen/src/python/pants/contrib/buildgen/build_file_manipulator.py | Python | apache-2.0 | 20,160 | 0.007192 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import ast
import logging
import re
import sys
from difflib import unified_diff
from pants.base.address import Address, BuildFileAddress
logger = logging.getLogger(__name__)
class BuildTargetParseError(Exception): pass
class DependencySpec(object):
"""A representation of a single dependency spec, including comments around it.
This is a helper class to aid in deduplicating, sorting, forcing, and formatting
dependency spec | s in a BUILD target's dependencies section.
"""
def __init__(self, spec, comments_above=None, side_comment=None):
self.spec = spec
self.comments_above = comments_above or []
self.side_comment = side_comment
def comments | _above_lines(self):
for line in self.comments_above:
line = line.strip()
if line:
yield '# {line}'.format(line=line)
else:
yield ''
def indented_lines(self, lines, indent=4):
indent_spaces = ' ' * indent
for line in lines:
line = line.strip()
if not line:
yield ''
else:
yield '{indent_spaces}{line}'.format(indent_spaces=indent_spaces, line=line)
def lines(self, indent=4):
spec_line = "'{0}',".format(self.spec)
if self.side_comment is not None:
spec_line = '{spec_line} # {comment}'.format(spec_line=spec_line,
comment=self.side_comment)
comments_above = list(self.comments_above_lines())
lines = comments_above + [spec_line]
return list(self.indented_lines(lines, indent))
def has_comment(self):
# If all of the comments above are whitespace, don't consider this forced,
# but keep the whitespace.
return bool(any(self.comments_above_lines()) or self.side_comment)
def __repr__(self):
return '\n'.join(self.lines())
class BuildFileManipulator(object):
"""A class to load, represent, and change the dependencies of a given target.
Use BuildFileManipulator.load(...) for construction, rather than constructing it directly.
"""
@classmethod
def load(cls, build_file, name, target_aliases):
"""A BuildFileManipulator factory class method.
Note that BuildFileManipulator requires a very strict formatting of target declaration.
In particular, it wants to see a newline after `target_type(`, `dependencies = [`, and
the last param to the target constructor before the trailing `)`. There are further
restrictions as well--see the comments below or check out the example targets in
the tests for this class.
:param build_file: A FilesystemBuildFile instance to operate on.
:param name: The name of the target (without the spec path or colon) to operate on.
:target aliases: The callables injected into the build file context that we should treat
as target declarations.
"""
with open(build_file.full_path, 'r') as f:
source = f.read()
source_lines = source.split('\n')
tree = ast.parse(source)
# Since we're not told what the last line of an expression is, we have
# to figure it out based on the start of the expression after it.
# The interval that we consider occupied by a given expression is
# [expr.lineno, next_expr.lineno). For the last expression in the
# file, its end is the number of lines in the file.
# Also note that lineno is 1-indexed, so we subtract 1 from everything.
intervals = [t.lineno - 1 for t in tree.body]
intervals.append(len(source_lines))
# Candidate target declarations
top_level_exprs = [t for t in tree.body if isinstance(t, ast.Expr)]
top_level_calls = [e.value for e in top_level_exprs if isinstance(e.value, ast.Call)]
# Just in case someone is tricky and assigns the result of a target
# declaration to a variable, though in general this is not useful
assigns = [t for t in tree.body if isinstance(t, ast.Assign)]
assigned_calls = [t.value for t in assigns if isinstance(t.value, ast.Call)]
# Final candidate declarations
calls = top_level_calls + assigned_calls
# Filter out calls that don't have a simple name as the function
# i.e. keep `foo()` but not `(some complex expr)()`
calls = [call for call in calls if isinstance(call.func, ast.Name)]
# Now actually get all of the calls to known aliases for targets
# TODO(pl): Log these
target_calls = [call for call in calls if call.func.id in target_aliases]
# We now have enough information to instantiate a BuildFileTarget for
# any one of these, but we're only interested in the one with name `name`
def name_from_call(call):
for keyword in call.keywords:
if keyword.arg == 'name':
if isinstance(keyword.value, ast.Str):
return keyword.value.s
else:
logger.warn('Saw a non-string-literal name argument to a target while '
'looking through {build_file}. Target type was {target_type}.'
'name value was {name_value}'
.format(build_file=build_file,
target_type=call.func.id,
name_value=keyword.value))
raise BuildTargetParseError('Could not find name parameter to target call'
'with target type {target_type}'
.format(target_type=call.func.id))
calls_by_name = dict((name_from_call(call), call) for call in target_calls)
if name not in calls_by_name:
raise BuildTargetParseError('Could not find target named {name} in {build_file}'
.format(name=name, build_file=build_file))
target_call = calls_by_name[name]
# lineno is 1-indexed
target_interval_index = intervals.index(target_call.lineno - 1)
target_start = intervals[target_interval_index]
target_end = intervals[target_interval_index + 1]
def is_whitespace(line):
return line.strip() == ''
def is_comment(line):
return line.strip().startswith('#')
def is_ignored_line(line):
return is_whitespace(line) or is_comment(line)
# Walk the end back so we don't have any trailing whitespace
while is_ignored_line(source_lines[target_end - 1]):
target_end -= 1
target_source_lines = source_lines[target_start:target_end]
# TODO(pl): This would be good logging
# print(astpp.dump(target_call))
# print("Target source lines")
# for line in target_source_lines:
# print(line)
if target_call.args:
raise BuildTargetParseError('Targets cannot be called with non-keyword args. Target was '
'{name} in {build_file}'
.format(name=name, build_file=build_file))
# TODO(pl): This should probably be an assertion. In order for us to have extracted
# this target_call by name, it must have had at least one kwarg (name)
if not target_call.keywords:
raise BuildTargetParseError('Targets cannot have no kwargs. Target type was '
'{target_type} in {build_file}'
.format(target_type=target_call.func.id, build_file=build_file))
if target_call.lineno == target_call.keywords[0].value.lineno:
raise BuildTargetParseError('Arguments to a target cannot be on the same line as the '
'target type. Target type was {target_type} in {build_file} '
'on line number {lineno}.'
.format(target_type=target_call.func.id,
build_file=build_file,
lineno=target_call.lineno))
for keyword in target_call.keywords:
kw_str = keyword.arg
kw_start_line = keyword.value.lineno
source_line = source_lines[kw_start_line - 1]
|
aqualid/aqualid | make/aql_linker.py | Python | mit | 11,736 | 0 | import re
import os.path
import datetime
import base64
import aql
# ==============================================================================
info = aql.get_aql_info()
HEADER = """#!/usr/bin/env python
#
# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT!
#
# Copyright (c) 2011-{year} of the {name} project, site: {url}
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be inclu | ded
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE |
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
""".format(year=datetime.date.today().year,
name=info.name, url=info.url)
# ==============================================================================
AQL_DATE = '_AQL_VERSION_INFO.date = "{date}"'.format(
date=datetime.date.today().isoformat())
# ==============================================================================
MAIN = """
if __name__ == '__main__':
aql_module_globals = globals().copy()
aql_module_name = "aql"
aql_module = imp.new_module(aql_module_name)
aql_module_globals.update( aql_module.__dict__)
aql_module.__dict__.update(aql_module_globals)
sys.modules[aql_module_name] = aql_module
{embedded_tools}
sys.exit(main())
"""
# ==============================================================================
EMBEDDED_TOOLS = '\n _EMBEDDED_TOOLS.append(b"""\n%s""")\n'
# ==============================================================================
class AqlPreprocess (aql.FileBuilder):
split = aql.FileBuilder.split_single
# ----------------------------------------------------------
def get_trace_name(self, source_entities, brief):
return "Preprocess file"
# ----------------------------------------------------------
def get_trace_targets(self, target_entities, brief):
return None
# -----------------------------------------------------------
def build(self, source_entities, targets):
src_file = source_entities[0].get()
empty_re = re.compile(r'^\s*\r*\n', re.MULTILINE)
slash_re = re.compile(r'\\\r*\n', re.MULTILINE)
comments_re = re.compile(r"^\s*#.*$", re.MULTILINE)
all_stmt_re = re.compile(
r"^__all__\s*=\s*\(.+?\)", re.MULTILINE | re.DOTALL)
content = aql.read_text_file(src_file)
content = slash_re.sub("", content)
content = comments_re.sub("", content)
content = all_stmt_re.sub("", content)
# -----------------------------------------------------------
import_re = re.compile(r"^import\s+(.+)$", re.MULTILINE)
std_imports = set()
def import_handler(match, _std_imports=std_imports):
module_name = match.group(1)
_std_imports.add(module_name)
return ""
content = import_re.sub(import_handler, content)
# -----------------------------------------------------------
aql_import_re = re.compile(r"^\s*from\s+(\.?aql.+)\s+import\s+.+$",
re.MULTILINE)
aql_imports = set()
def aql_import_handler(match, _aql_imports=aql_imports):
module_name = match.group(1)
if module_name.startswith('.'):
module_name = os.sep + module_name[1:] + '.py'
else:
module_name = os.sep + \
module_name.replace('.', os.sep) + os.sep
_aql_imports.add(module_name)
return ""
content = aql_import_re.sub(aql_import_handler, content)
# -----------------------------------------------------------
content = empty_re.sub("", content)
target = aql.SimpleEntity(name=src_file,
data=(std_imports, aql_imports, content))
targets.add_target_entity(target)
# ==============================================================================
class AqlLinkCore (aql.FileBuilder):
def __init__(self, options, target):
self.target = self.get_target_path(target, ext='.py')
def get_trace_name(self, source_entities, brief):
return "Link AQL Module"
# ----------------------------------------------------------
def get_target_entities(self, source_entities):
return self.target
# ----------------------------------------------------------
def get_trace_sources(self, source_entities, brief):
return (os.path.basename(src.name) for src in source_entities)
# -----------------------------------------------------------
def replace(self, options, source_entities):
finder = aql.FindFilesBuilder(options,
mask='*.py',
exclude_mask="__init__.py")
core_files = aql.Node(finder, source_entities)
return aql.Node(AqlPreprocess(options), core_files)
# -----------------------------------------------------------
@staticmethod
def _mod_to_files(file2deps, modules):
mod2files = {}
for mod in modules:
files = set()
for file in file2deps:
if file.find(mod) != -1:
files.add(file)
mod2files[mod] = files
return mod2files
# -----------------------------------------------------------
@staticmethod
def _get_dep_to_files(file2deps, mod2files):
dep2files = {}
tmp_file2deps = {}
for file, mods in file2deps.items():
for mod in mods:
files = mod2files[mod]
tmp_file2deps.setdefault(file, set()).update(files)
for f in files:
dep2files.setdefault(f, set()).add(file)
return dep2files, tmp_file2deps
# -----------------------------------------------------------
@staticmethod
def _get_content(files_content, dep2files, file2deps, tails):
content = ""
while tails:
tail = tails.pop(0)
content += files_content[tail]
files = dep2files.pop(tail, [])
for file in files:
deps = file2deps[file]
deps.remove(tail)
if not deps:
tails.append(file)
del file2deps[file]
return content
# -----------------------------------------------------------
def build(self, source_entities, targets):
file2deps = {}
files_content = {}
modules = set()
tails = []
std_modules = set()
for entity in source_entities:
file_name = entity.name
mod_std_imports, mod_deps, mod_content = entity.data
if not mod_content:
continue
if not mod_deps:
tails.append(file_name)
files_content[file_name] = mod_content
file2deps[file_name] = mod_deps
std_modules.update(mod_std_imports)
modules.update(mod_deps)
mod2files = self._mod_to_files(file2deps, modules)
dep2files, file2deps = self._get_dep_to_files(file2deps, mod2files)
content = self._get_content(files_content, dep2files, file2deps, tails)
imports_content = '\n'.join(
"import %s" % module for module in sorted(std_modules))
|
cornelinux/django-linotp-auth | django_linotp/linotp_auth.py | Python | gpl-3.0 | 2,749 | 0.036741 | '''
Add the following to your project/settings.py
AUTHENTICATION_BACKENDS = ('django_linotp.linotp_auth.LinOTP', )
LINOTP = { 'url' : 'https://puckel/validate/check',
'timeout' : 5,
'ssl_verify' : False,
'host_verify' : False,
'create_user' : False,
}
'create_user': if set to True, the user in the django DB will be created, if LinOTP returns a successful authentication
'''
from django.conf import settings
from django.contrib.auth.models import User, check_password
import sys
import pycurl
import logging
import traceback
from urllib import urlencode
import json
logger = logging.getLogger(__name__)
class Test:
def __init__(self):
self.contents = ''
def body_callback(self, buf):
self.contents = self.contents + buf
class LinOTP(object):
def __init__(self):
self.url = 'https://localhost/validate/check'
self.timeout = 5
self.ssl_verify = False
self.host_verify = False
self.create_user = False
if settings.LINOTP:
self.url = settin | gs.LINOTP.get('url', self.url)
self.timeout = settings.LINOTP.get('timeout', self.timeout)
self.ssl_verify = settings.LINOTP.get('ssl_verify', self.ssl_verify)
self.host_verify = settings.LINOTP.get('host_verify', self.host_verify)
self.create_user = settings.LINOTP.get('create_user', self.create_user)
def authenticate(self, username=None, password=None):
user = None
try:
t = Test()
c = | pycurl.Curl()
params = { 'user' : username, 'pass' : password }
url = str("%s?%s" % (self.url, urlencode(params)))
print "Connecting to %s" % url
c.setopt(c.URL, url)
c.setopt(c.WRITEFUNCTION, t.body_callback)
c.setopt(c.HEADER, False)
c.setopt(c.SSL_VERIFYPEER, self.ssl_verify)
c.setopt(c.SSL_VERIFYHOST, self.host_verify)
c.setopt(c.CONNECTTIMEOUT, self.timeout)
c.perform()
c.close()
print t.contents
res = json.loads(t.contents)
if (res.get('result',{}).get('status') == True and
res.get('result',{}).get('value') == True):
user = User.objects.get(username=username)
except User.DoesNotExist:
# The user was authenticated by LinOTP but does not exist!
print "User authenticated but does not exist"
if self.create_user:
print "creating user"
# FIXME: For any reason does not work at the moment
user = User(username=username, password="supersecret")
user.is_staff = True
user.is_superuser = False
user.save
except Exception as e:
print traceback.format_exc()
print e
return user
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
victorpoluceno/webrtc-sample-client | app/__init__.py | Python | mit | 314 | 0.025478 | from flask import | Flask
from flask import render_template, request
app = Flask(__name__)
@app.route("/")
def main():
room = request.args.get('room', '')
if room:
return render_template('watch.html')
return render_template('index.html' | )
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
|
daisychainme/daisychain | daisychain/config/settings_local.py | Python | mit | 1,754 | 0.00057 | from .settings_base import *
from config.keys import keys
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = keys['DJANGO | ']['LOCAL']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = | {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'formatters': {
'simple': {
'format': '[%(asctime)s] %(levelname)s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'verbose': {
'format': '[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'channel': {
'handlers': ['console'],
'level': 'DEBUG'
},
'database': {
'handlers': ['console'],
'level': 'INFO'
},
'django': {
'handlers': ['console'],
},
'py.warnings': {
'handlers': ['console'],
},
}
}
|
digibyte/digibyte | test/functional/wallet_accounts.py | Python | mit | 8,572 | 0.001633 | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test account RPCs.
RPCs tested are:
- getaccountaddress
- getaddressesbyaccount
- listaddressgroupings
- setaccount
- sendfrom (with account arguments)
- move (with account arguments)
"""
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import assert_equal
class WalletAccountsTest(DigiByteTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[]]
def run_test(self):
node = self.nodes[0]
# Check that there's no UTXO on any of the nodes
assert_equal(len(node.listunspent()), 0)
# Note each time we call generate, all generated coins go into
# the same address, so we call twice to get two addresses w/50 each
node.generate(1)
node.generate(101)
assert_equal(node.getbalance(), 100)
# there should be 2 address groups
# each with 1 address with a balance of 50 DigiBytes
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 2)
# the addresses aren't linked now, but will be after we send to the
# common address
linked_addresses = set()
for address_group in address_groups:
assert_equal(len(address_group), 1)
assert_equal(len(address_group[0]), 2)
assert_equal(address_group[0][1], 50)
linked_addresses.add(address_group[0][0])
# send 50 from each address to a third address not in this wallet
# There's some fee that will come back to us when the miner reward
# matures.
common_address = "msf4WtN1YQKXvNtvdFYt9JBnUD2FB41kjr"
txid = node.sendmany(
fromaccount="",
amounts={common_address: 100},
subtractfeefrom=[common_address],
minconf=1,
)
tx_details = node.gettransaction(txid)
fee = -tx_details['details'][0]['fee']
# there should be 1 address group, with the previously
# unlinked addresses now linked (they both have 0 balance)
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 1)
assert_equal(len(address_groups[0]), 2)
assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses)
assert_equal([a[1] for a in address_groups[0]], [0, 0])
node.generate(1)
# we want to reset so that the "" account has what's expected.
# otherwise we're off by exactly the fee amount as that's mined
# and matures in the next 100 blocks
node.sendfrom("", common_address, fee)
amount_to_send = 1.0
# Create accounts and make sure subsequent account API calls
# recognize the account/address associations.
accounts = [Account(name) for name in ("a", "b", "c", "d", "e")]
for account in accounts:
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
# Send a transaction to each account, and make sure this forces
# getaccountaddress to generate a new receiving address.
for account in accounts:
node.sendtoaddress(account.receive_address, amount_to_send)
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
# Check the amounts received.
node.generate(1)
for account in accounts:
assert_equal(
node.getreceivedbyaddress(account.addresses[0]), amount_to_send)
assert_equal(node.getreceivedbyaccount(account.name), amount_to_send)
# Check that sendfrom account reduces listaccounts balances.
for i, account in enumerate(accounts):
to_account = accounts[(i+1) % len(accounts)]
node.sendfrom(a | ccount.name, to_account.receive_address, amount_to_send)
node.generate(1)
for account in accounts:
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
| assert_equal(node.getreceivedbyaccount(account.name), 2)
node.move(account.name, "", node.getbalance(account.name))
account.verify(node)
node.generate(101)
expected_account_balances = {"": 5200}
for account in accounts:
expected_account_balances[account.name] = 0
assert_equal(node.listaccounts(), expected_account_balances)
assert_equal(node.getbalance(""), 5200)
# Check that setaccount can assign an account to a new unused address.
for account in accounts:
address = node.getaccountaddress("")
node.setaccount(address, account.name)
account.add_address(address)
account.verify(node)
assert(address not in node.getaddressesbyaccount(""))
# Check that addmultisigaddress can assign accounts.
for account in accounts:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, account.name)['address']
account.add_address(multisig_address)
account.verify(node)
node.sendfrom("", multisig_address, 50)
node.generate(101)
for account in accounts:
assert_equal(node.getbalance(account.name), 50)
# Check that setaccount can change the account of an address from a
# different account.
change_account(node, accounts[0].addresses[0], accounts[0], accounts[1])
# Check that setaccount can change the account of an address which
# is the receiving address of a different account.
change_account(node, accounts[0].receive_address, accounts[0], accounts[1])
# Check that setaccount can set the account of an address already
# in the account. This is a no-op.
change_account(node, accounts[2].addresses[0], accounts[2], accounts[2])
# Check that setaccount can set the account of an address which is
# already the receiving address of the account. It would probably make
# sense for this to be a no-op, but right now it resets the receiving
# address, causing getaccountaddress to return a brand new address.
change_account(node, accounts[2].receive_address, accounts[2], accounts[2])
class Account:
def __init__(self, name):
# Account name
self.name = name
# Current receiving address associated with this account.
self.receive_address = None
# List of all addresses assigned with this account
self.addresses = []
def add_address(self, address):
assert_equal(address not in self.addresses, True)
self.addresses.append(address)
def add_receive_address(self, address):
self.add_address(address)
self.receive_address = address
def verify(self, node):
if self.receive_address is not None:
assert self.receive_address in self.addresses
assert_equal(node.getaccountaddress(self.name), self.receive_address)
for address in self.addresses:
assert_equal(node.getaccount(address), self.name)
assert_equal(
set(node.getaddressesbyaccount(self.name)), set(self.addresses))
def change_account(node, address, old_account, new_account):
assert_equal(address in old_account.addresses, True)
node.setaccount(address, new_account.name)
old_account.addresses.remove(address)
new_account.add_address(address)
# Calling setaccount on an address which was previously the receiving
# address of a different account should reset the receiving address of
# the old account, causing getaccountaddress to return a brand new
# address.
if address == old_account |
jelmer/xandikos | xandikos/store/config.py | Python | gpl-3.0 | 4,820 | 0 | # Xandikos
# Copyright (C) 2016-2017 Jelmer Vernooij <jelmer@jelmer.uk>, et al.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 3
# of the License or (at your option) any later version of
# the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""Collection configuration file.
"""
import configparser
FILENAME = ".xandikos"
class CollectionMetadata(object):
"""Metadata for a configuration."""
def get_color(self) -> str:
"""Get the color for this collection."""
raise NotImplementedError(self.get_color)
def set_color(self, color: str) -> None:
"""Change the color of this collection."""
raise NotImplementedError(self.set_color)
def get_source_url(self) -> str:
"""Get the source URL for this collection."""
raise NotImplementedError(self.get_source_url)
def set_source_url(self, url: str) -> None:
"""Set the source URL for this collection."""
raise NotImplementedError(self.set_source_url)
def get_comment(self) -> str:
raise NotImplementedError(self.get_comment)
def get_displayname(self) -> str:
raise NotImplementedError(self.get_displayname)
def get_description(self) -> str:
raise NotImplementedError(self.get_description)
def get_order(self) -> str:
raise NotImplementedError(self.get_order)
def set_order(self, order: str) -> None:
raise NotImplementedError(self.set_order)
class FileBasedCollectionMetadata(CollectionMetadata):
"""Metadata for a configuration."""
def __init__(self, cp=None, save=None):
if cp is None:
cp = configparser.ConfigParser()
self._configparser = cp
self._save_cb = save
def _save(self, m | essage):
| if self._save_cb is None:
return
self._save_cb(self._configparser, message)
@classmethod
def from_file(cls, f):
cp = configparser.ConfigParser()
cp.read_file(f)
return cls(cp)
def get_source_url(self):
return self._configparser["DEFAULT"]["source"]
def set_source_url(self, url):
if url is not None:
self._configparser["DEFAULT"]["source"] = url
else:
del self._configparser["DEFAULT"]["source"]
self._save("Set source URL.")
def get_color(self):
return self._configparser["DEFAULT"]["color"]
def get_comment(self):
return self._configparser["DEFAULT"]["comment"]
def get_displayname(self):
return self._configparser["DEFAULT"]["displayname"]
def get_description(self):
return self._configparser["DEFAULT"]["description"]
def set_color(self, color):
if color is not None:
self._configparser["DEFAULT"]["color"] = color
else:
del self._configparser["DEFAULT"]["color"]
self._save("Set color.")
def set_displayname(self, displayname):
if displayname is not None:
self._configparser["DEFAULT"]["displayname"] = displayname
else:
del self._configparser["DEFAULT"]["displayname"]
self._save("Set display name.")
def set_description(self, description):
if description is not None:
self._configparser["DEFAULT"]["description"] = description
else:
del self._configparser["DEFAULT"]["description"]
self._save("Set description.")
def set_comment(self, comment):
if comment is not None:
self._configparser["DEFAULT"]["comment"] = comment
else:
del self._configparser["DEFAULT"]["comment"]
self._save("Set comment.")
def set_type(self, store_type):
self._configparser["DEFAULT"]["type"] = store_type
self._save("Set collection type.")
def get_type(self):
return self._configparser["DEFAULT"]["type"]
def get_order(self):
return self._configparser["calendar"]["order"]
def set_order(self, order):
try:
self._configparser.add_section("calendar")
except configparser.DuplicateSectionError:
pass
if order is None:
del self._configparser["calendar"]["order"]
else:
self._configparser["calendar"]["order"] = order
|
redvasily/django-emailauth | emailauth/tests.py | Python | bsd-3-clause | 14,812 | 0.003983 | import re
from datetime import datetime, timedelta
from django.test.client import Client
from django.test.testcases import TestCase
from django.core import mail
from django.contrib.auth.models import User
from django.conf import settings
from emailauth.models import UserEmail
from emailauth.utils import email_verification_days
class Status:
OK = 200
REDIRECT = 302
NOT_FOUND = 404
class BaseTestCase(TestCase):
def assertStatusCode(self, response, status_code=200):
self.assertEqual(response.status_code, status_code)
def checkSimplePage(self, path, params={}):
client = Client()
response = client.get(path, params)
self.assertStatusCode(response)
def createActiveUser(self, username='username', email='user@example.com',
password='password'):
user = User(username=username, email=email, is_active=True)
user.first_name = 'John'
user.set_password(password)
user.save()
user_email = UserEmail(user=user, email=email, verified=True,
default=True, verification_key=UserEmail.VERIFIED)
user_email.save()
return user, user_email
def getLoggedInClient(self, email='user@example.com', password='password'):
client = Client()
client.login(username=email, password=password)
return client
class RegisterTest(BaseTestCase):
def testRegisterGet(self):
self.checkSimplePage('/register/')
def testRegisterPost(self):
client = Client()
response = client.post('/register/', {
'email': 'user@example.com',
'first_name': 'John',
'password1': 'password',
'password2': 'password',
})
self.assertRedirects(response, '/register/continue/user%40example.com/')
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
addr_re = re.compile(r'.*http://.*?(/\S*/)', re.UNICODE | re.MULTILINE)
verification_url = addr_re.search(email.body).groups()[0]
response = client.get(verification_url)
self.assertRedirects(response, '/account/')
response = client.post('/login/', {
'email': 'user@example.com',
'password': 'password',
})
self.assertRedirects(response, '/account/')
user = User.objects.get(email='user@example.com')
self.assertEqual(user.first_name, 'John')
def testRegisterSame(self):
user, user_email = self.createActiveUser()
client = Client()
response = client.post('/register/', {
'email': user_email.email,
'first_name': 'John',
'password1': 'password',
'password2': 'password',
})
self.assertContains(response, 'This email is already taken')
email_obj = UserEmail.objects.create_unverified_email(
'user@example.org', user)
email_obj.save()
response = client.post('/register/', {
'email': 'user@example.org',
'first_name': 'John',
'password1': 'password',
'password2': 'password',
})
self.assertContains(response, 'This email is already taken')
class LoginTest(BaseTestCase):
def testLoginGet(self):
self.checkSimplePage('/login/')
def testLoginFail(self):
user, user_email = self.createActiveUser()
client = Client()
response = client.post('/login/', {
'email': 'user@example.com',
'password': 'wrongpassword',
})
self.assertStatusCode(response, Status.OK)
class PasswordResetTest(BaseTestCase):
def prepare(self):
user, user_email = self.createActiveUser()
client = Client()
response = client.post('/resetpassword/', {
'email': user_email.email,
})
self.assertRedirects(response,
'/resetpassword/continue/user%40example.com/')
email = mail.outbox[0]
addr_re = re.compile(r'.*http://.*?(/\S*/)', re.UNICODE | re.MULTILINE)
reset_url = addr_re.search(email.body).groups()[0]
return reset_url, user_email
def testPasswordReset(self):
reset_url, user_email = self.prepare()
client = Client()
self.checkSimplePage(reset_url)
response = client.post(reset_url, {
'password1': 'newpassword',
'password2': 'newpassword',
})
self.assertRedirects(response, '/account/')
user = User.objects.get(email=user_email.email)
self.assertTrue(user.check_password('newpassword'))
response = client.get(reset_url)
self.assertStatusCode(response, Status.NOT_FOUND)
def testPasswordResetFail(self):
reset_url, user_email = self.prepare()
client = Client()
user_email.verification_key = UserEmail.VERIFIED
user_email.save()
response = client.get(reset_url)
self.assertStatusCode(response, Status.NOT_FOUND)
def testPasswordResetFail2(self):
reset_url, user_email = self.prepare()
client = Client()
user_email.code_creation_date = (datetime.now() -
timedelta(days=email_verification_days() + 1))
user_email.save()
response = client.get(reset_url)
self.assertStatusCode(response, Status.NOT_FOUND)
class TestAddEmail(BaseTestCase):
def setUp(self):
self.user, self.user_email = self.createActiveUser()
self.client = self.getLoggedInClient()
def testAddEmailGet(self):
response = self.client.get('/account/addemail/' | )
self.assertStatusCode(response, Status.OK)
def testAddEmail(self):
response = self.client.post('/account/addemail/', {
'email': 'user@example.org',
})
self.assertRedirects(response, '/account/addemail/continue/user%40example.org/')
self.assertEqual(len( | mail.outbox), 1)
email = mail.outbox[0]
addr_re = re.compile(r'.*http://.*?(/\S*/)', re.UNICODE | re.MULTILINE)
verification_url = addr_re.search(email.body).groups()[0]
response = self.client.get(verification_url)
self.assertRedirects(response, '/account/')
client = Client()
response = client.post('/login/', {
'email': 'user@example.org',
'password': 'password',
})
self.assertRedirects(response, '/account/')
def testAddSameEmail(self):
response = self.client.post('/account/addemail/', {
'email': 'user@example.com',
})
self.assertStatusCode(response, Status.OK)
response = self.client.post('/account/addemail/', {
'email': 'user@example.org',
})
self.assertRedirects(response,
'/account/addemail/continue/user%40example.org/')
response = self.client.post('/account/addemail/', {
'email': 'user@example.org',
})
self.assertStatusCode(response, Status.OK)
class TestDeleteEmail(BaseTestCase):
def setUp(self):
self.user, self.user_email = self.createActiveUser()
self.client = self.getLoggedInClient()
def testDeleteEmail(self):
user = self.user
user_email = UserEmail(user=user, email='email@example.org', verified=True,
default=False, verification_key=UserEmail.VERIFIED)
user_email.save()
response = self.client.post('/account/deleteemail/%s/' % user_email.id, {
'yes': 'yes',
})
self.assertRedirects(response, '/account/')
user_emails = UserEmail.objects.filter(user=self.user)
self.assertEqual(len(user_emails), 1)
response = self.client.post('/account/deleteemail/%s/' % user_emails[0].id, {
'yes': 'yes',
})
self.assertStatusCode(response, Status.OK)
class TestSetDefaultEmail(BaseTestCase):
def setUp(self):
self.user, self.user_email = self.createActiveUser()
self.client = self.getLoggedInClient()
def testSetDefaultEmailGet(self):
response = self.client.get('/account/setdefaultemail/%s/' %
|
ahmadpriatama/Flask-Simple-Ecommerce | appname/assets.py | Python | bsd-2-clause | 490 | 0 | from | flask_assets import Bundle
common_css = Bundle(
'css/vendor/bootstrap.min.css',
'css/vendor/helper.css',
| 'selectize/dist/css/selectize.bootstrap3.css',
# 'css/main.css',
filters='cssmin',
output='public/css/common.css'
)
common_js = Bundle(
'js/vendor/jquery.min.js',
'js/vendor/bootstrap.min.js',
'selectize/dist/js/standalone/selectize.min.js',
Bundle(
'js/main.js',
filters='jsmin'
),
output='public/js/common.js'
)
|
cpennington/edx-platform | openedx/core/djangoapps/schedules/management/commands/__init__.py | Python | agpl-3.0 | 2,311 | 0.001731 | """
Base management command for sending emails
"""
import datetime
import pytz
from six.moves import range
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand
from openedx.core.djangoapps.schedules.utils import PrefixedDebugLoggerMixin
class SendEmailBaseCommand(PrefixedDebugLoggerMixin, BaseCommand):
async_send_task = None # define in subclass
# An iterable of day offsets (e.g. -7, -14, -21, -28, ...) that defines the days for
# which emails are sent out, relative to the 'date' parameter
offsets = range(-7, -77, -7)
def add_arguments(self, parser):
parser.add_argument(
'--date',
default=datetime.datetime.utcnow().date().isoformat(),
help='The date to compute weekly messages relative to, in YYYY-MM-DD format',
)
parser.add_argument(
'--override-recipient-email',
help='Send all emails to this address instead of the actual recipient'
)
parser.add_argument('site_domain_name')
parser.add_argument(
'--weeks',
type=int,
help='Number of weekly emails to be sent',
)
def handle(self, *args, **options):
self.log_debug('Args = %r', options)
num_weeks = options.get('weeks')
if num_weeks:
num_days = (7 * nu | m_weeks) + 1
self.offsets = range(-7, -num_days, -7)
current_date = datetime.datetime(
*[int(x) for x in options['date'].spl | it('-')],
tzinfo=pytz.UTC
)
self.log_debug(u'Current date = %s', current_date.isoformat())
site = Site.objects.get(domain__iexact=options['site_domain_name'])
self.log_debug(u'Running for site %s', site.domain)
override_recipient_email = options.get('override_recipient_email')
self.send_emails(site, current_date, override_recipient_email)
def enqueue(self, day_offset, site, current_date, override_recipient_email=None):
self.async_send_task.enqueue(
site,
current_date,
day_offset,
override_recipient_email,
)
def send_emails(self, *args, **kwargs):
for offset in self.offsets:
self.enqueue(offset, *args, **kwargs)
|
asedunov/intellij-community | python/testData/deprecation/deprecatedProperty.py | Python | apache-2.0 | 202 | 0.029703 | class Foo:
@property
def bar(self):
import warnings
warnings.warn("this is deprecated", DeprecationWarni | ng, 2)
foo = Foo()
foo.<warning descr=" | this is deprecated">bar</warning>
|
konstantint/eio-userdb | tests/conftest.py | Python | mit | 273 | 0.003663 | from | pytest import fixture
from eio_userdb.main import app
from eio_userdb.model import init_db
@fixture
def client():
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
app.config['TESTING'] = True
client = app.test_cl | ient()
init_db()
return client
|
bamos/dotfiles | .xmonad/xmobar.py | Python | mit | 655 | 0 | #!/usr/bin/env python3
from datetime import datetime
import psutil
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('direction', type=str,
| choices=['left', 'right'])
args = parser.parse_args()
status = ''
if args.direction == 'left':
status = '<fc=#B27AEB><fn=2>❤</fn></fc>'
elif args.direction == 'right':
battery = psutil.sensors_battery()
if not battery.power_plugged: |
status += f'<fc=#D43737><fn=1></fn>{int(battery.percent)}%</fc> '
now = datetime.now()
time_str = now.strftime('%Y.%m.%d %-I:%M%p')
status += f'<fc=#ABABAB>{time_str}</fc>'
print(status)
|
iLoveTux/unitils | test/test_ls.py | Python | gpl-3.0 | 3,856 | 0.002075 | import unittest
import unitils
from io import StringIO
try:
from unittest import mock
except ImportError:
import mock
return_value = (
'that', 'that', 'that',
'the other', 'the other', 'the other',
'this', 'this', 'this'
)
column_test_return_value = (
"appveyor.yml",
"cover",
"docs",
"Makefile",
"requirements.txt",
"setup.py",
"test",
"unitils",
"codecov.yml",
"dist",
"LICENSE",
"README.rst",
| "setup.cfg",
"stats.dat",
"test-data",
"unitils.egg-info",
)
class TestLsCLI(unittest.TestCase):
@mock.patch("unitils.ls", return_value=ret | urn_value)
def test_can_be_called_without_arguments(self, mock_ls):
args = []
unitils.cli.ls(args)
mock_ls.assert_called_with(path=".", _all=False, almost_all=False)
@mock.patch("unitils.ls", return_value=column_test_return_value)
@mock.patch("unitils.cli.get_terminal_size", return_value=(104, 25))
def test_columns(self, mock_term_size, mock_ls):
args = []
out = StringIO()
unitils.cli.ls(args, out=out)
out.seek(0)
results = out.read()
expected = """appveyor.yml cover docs Makefile requirements.txt setup.py test unitils
codecov.yml dist LICENSE README.rst setup.cfg stats.dat test-data unitils.egg-info
"""
self.assertEqual(expected, results)
directory_listing = ["this", "that", "the other"] * 3
class TestLS(unittest.TestCase):
@mock.patch("os.listdir", return_value=directory_listing)
def test_ls_sorts_and_iterates_through_directory_listing(self, mock_listdir):
"""ls should be an iterator yielding the contents of the directory
"""
expected = [
'that',
'that',
'that',
'the other',
'the other',
'the other',
'this',
'this',
'this'
]
results = list(unitils.ls())
self.assertEqual(expected, results)
@mock.patch("os.listdir", return_value=directory_listing+[".hidden"])
def test_ls_ignores_dot_files(self, mock_listdir):
"""By default, ls should not yield any items staring with "."
"""
expected = [
'that',
'that',
'that',
'the other',
'the other',
'the other',
'this',
'this',
'this'
]
results = list(unitils.ls())
self.assertEqual(expected, results)
@mock.patch("os.listdir", return_value=directory_listing+[".hidden"])
def test_ls_accepts_all(self, mock_listdir):
"""If _all=True, ls should yield the "dot files" as well
as "." and "..". Note that the param is "_all" that is because
all is reserved in Python
"""
expected = [
'.',
'..',
'.hidden',
'that',
'that',
'that',
'the other',
'the other',
'the other',
'this',
'this',
'this'
]
results = list(unitils.ls(_all=True))
self.assertEqual(expected, results)
@mock.patch("os.listdir", return_value=directory_listing+[".hidden"])
def test_ls_accepts_almost_all(self, mock_listdir):
"""If almost_all=True, ls should yield the "dot files" but not
"." and "..".
"""
expected = [
'.hidden',
'that',
'that',
'that',
'the other',
'the other',
'the other',
'this',
'this',
'this'
]
results = list(unitils.ls(almost_all=True))
self.assertEqual(expected, results)
|
ashepelev/TopologyWeigher | source/migrate_versions/243_topology_tables.py | Python | apache-2.0 | 2,616 | 0.019495 | from migrate.changeset import UniqueConstraint
from migrate import ForeignKeyConstraint
from sqlalchemy import Boolean, BigInteger, Column, DateTime, Enum, Float
from sqlalchemy import dialects
from sqlalchemy import ForeignKey, Index, Integer, MetaData, String, Table
from sqlalchemy import Text
from sqlalchemy.types import NullType
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
node_info = Table('node_info',meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('node_id',Integer,nullable=False),
Column('name',String(length=30),nullable=False),
Column('ip_addr',String(length=20)),
Column('hostname',String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
edge_info = Table('edge_info',meta,
Column('created_at', DateTim | e),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', | Integer, primary_key=True, nullable=False),
Column('start',Integer,nullable=False),
Column('end',Integer,nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
try:
node_info.create()
except Exception:
LOG.info(repr(node_info))
LOG.exception(_('Exception while creating table node_info.'))
raise
try:
edge_info.create()
except Exception:
LOG.info(repr(edge_info))
LOG.exception(_('Exception while creating table edge_info.'))
raise
# TO DO
# Create indicies
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
node_info = Table('node_info',meta)
try:
node_info.drop()
except Exception:
LOG.info("Table node_info doesn't exist")
#LOG.info(repr(node_info))
#LOG.exception(_('Exception while deleting table node_info.'))
edge_info = Table('edge_info',meta)
try:
edge_info.drop()
except Exception:
LOG.info("Table edge_info doesn't exist")
#LOG.info(repr(edge_info))
#LOG.exception(_('Exception while deleting table edge_info.'))
|
unnikrishnankgs/va | venv/lib/python3.5/site-packages/IPython/core/magics/script.py | Python | bsd-2-clause | 8,835 | 0.004754 | """Magic functions for running cells in various scripts."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import errno
import os
import sys
import signal
import time
from subprocess import Popen, PIPE
import atexit
from IPython.core import magic_arguments
from IPython.core.magic import (
Magics, magics_class, line_magic, cell_magic
)
from IPython.lib.backgroundjobs import BackgroundJobManager
from IPython.utils import py3compat
from IPython.utils.process import arg_split
from traitlets import List, Dict, default
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
def script_args(f):
"""single decorator for adding script args"""
args = [
magic_arguments.argument(
'--out', type=str,
help="""The variable in which to store stdout from the script.
If the script is backgrounded, this will be the stdout *pipe*,
instead of the stderr text itself.
"""
),
magic_arguments.argument(
'--err', type=str,
help="""The variable in which to store stderr from the script.
If the script is backgrounded, this will be the stderr *pipe*,
instead of the stderr text itself.
"""
),
magic_arguments.argument(
'--bg', action="store_true",
help="""Whether to run the script in the background.
If given, the only way to see the output of the command is
with --out/err.
"""
),
magic_arguments.argument(
'--proc', type=str,
help="""The variable in which to store Popen instance.
This is used only when --bg option is given.
"""
),
]
for arg in args:
f = arg(f)
return f
@magics_class
class ScriptMagics(Magics):
"""Magics for talking to scripts
This defines a base `%%script` cell magic for running a cell
with a program in a subprocess, and registers a few top-level
magics that call %%script with common interpreters.
"""
script_magics = List(
help="""Extra s | cript cell magics to define
This generates simple wrappers of `%%script foo` as `%%foo`.
If you want to add script magics that aren't on your path,
specify them in scri | pt_paths
""",
).tag(config=True)
@default('script_magics')
def _script_magics_default(self):
"""default to a common list of programs"""
defaults = [
'sh',
'bash',
'perl',
'ruby',
'python',
'python2',
'python3',
'pypy',
]
if os.name == 'nt':
defaults.extend([
'cmd',
])
return defaults
script_paths = Dict(
help="""Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
Only necessary for items in script_magics where the default path will not
find the right interpreter.
"""
).tag(config=True)
def __init__(self, shell=None):
super(ScriptMagics, self).__init__(shell=shell)
self._generate_script_magics()
self.job_manager = BackgroundJobManager()
self.bg_processes = []
atexit.register(self.kill_bg_processes)
def __del__(self):
self.kill_bg_processes()
def _generate_script_magics(self):
cell_magics = self.magics['cell']
for name in self.script_magics:
cell_magics[name] = self._make_script_magic(name)
def _make_script_magic(self, name):
"""make a named magic, that calls %%script with a particular program"""
# expand to explicit path if necessary:
script = self.script_paths.get(name, name)
@magic_arguments.magic_arguments()
@script_args
def named_script_magic(line, cell):
# if line, add it as cl-flags
if line:
line = "%s %s" % (script, line)
else:
line = script
return self.shebang(line, cell)
# write a basic docstring:
named_script_magic.__doc__ = \
"""%%{name} script magic
Run cells with {script} in a subprocess.
This is a shortcut for `%%script {script}`
""".format(**locals())
return named_script_magic
@magic_arguments.magic_arguments()
@script_args
@cell_magic("script")
def shebang(self, line, cell):
"""Run a cell via a shell command
The `%%script` line is like the #! line of script,
specifying a program (bash, perl, ruby, etc.) with which to run.
The rest of the cell is run by that program.
Examples
--------
::
In [1]: %%script bash
...: for i in 1 2 3; do
...: echo $i
...: done
1
2
3
"""
argv = arg_split(line, posix = not sys.platform.startswith('win'))
args, cmd = self.shebang.parser.parse_known_args(argv)
try:
p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
print("Couldn't find program: %r" % cmd[0])
return
else:
raise
if not cell.endswith('\n'):
cell += '\n'
cell = cell.encode('utf8', 'replace')
if args.bg:
self.bg_processes.append(p)
self._gc_bg_processes()
if args.out:
self.shell.user_ns[args.out] = p.stdout
if args.err:
self.shell.user_ns[args.err] = p.stderr
self.job_manager.new(self._run_script, p, cell, daemon=True)
if args.proc:
self.shell.user_ns[args.proc] = p
return
try:
out, err = p.communicate(cell)
except KeyboardInterrupt:
try:
p.send_signal(signal.SIGINT)
time.sleep(0.1)
if p.poll() is not None:
print("Process is interrupted.")
return
p.terminate()
time.sleep(0.1)
if p.poll() is not None:
print("Process is terminated.")
return
p.kill()
print("Process is killed.")
except OSError:
pass
except Exception as e:
print("Error while terminating subprocess (pid=%i): %s" \
% (p.pid, e))
return
out = py3compat.bytes_to_str(out)
err = py3compat.bytes_to_str(err)
if args.out:
self.shell.user_ns[args.out] = out
else:
sys.stdout.write(out)
sys.stdout.flush()
if args.err:
self.shell.user_ns[args.err] = err
else:
sys.stderr.write(err)
sys.stderr.flush()
def _run_script(self, p, cell):
"""callback for running the script in the background"""
p.stdin.write(cell)
p.stdin.close()
p.wait()
@line_magic("killbgscripts")
def killbgscripts(self, _nouse_=''):
"""Kill all BG processes started by %%script and its family."""
self.kill_bg_processes()
print("All background processes were killed.")
def kill_bg_processes(self):
"""Kill all BG processes which are still running."""
if not self.bg_processes:
return
for p in self.bg_processes:
if p.poll() is None:
try:
p.send_signal(signal.SIGINT)
except:
pass
time.sleep(0.1)
self._gc_bg_proc |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.